cmd/compile: Tinkering with schedule for debug and regalloc

This adds a heap-based proper priority queue to the
scheduler which made a relatively easy to test quite a few
heuristics that "ought to work well".  For go tools
themselves (which may not be representative) the heuristic
that works best is (1) in line-number-order, then (2) from
more to fewer args, then (3) in variable ID order.  Trying
to improve this with information about use at end of
blocks turned out to be fruitless -- all of my naive
attempts at using that information turned out worse than
ignoring it.  I can confirm that the stores-early heuristic
tends to help; removing it makes the results slightly worse.

My metric is code size reduction, which I take to mean fewer
spills from register allocation.  It's not uniform.
Here's the endpoints for "vet" from one set of pretty-good
heuristics (this is representative at least).

-2208 time.parse 13472 15680 -14.081633%
-1514 runtime.pclntab 1002058 1003572 -0.150861%
-352 time.Time.AppendFormat 9952 10304 -3.416149%
-112 runtime.runGCProg 1984 2096 -5.343511%
-64 regexp/syntax.(*parser).factor 7264 7328 -0.873362%
-44 go.string.alldata 238630 238674 -0.018435%

48 math/big.(*Float).round 1376 1328 3.614458%
48 text/tabwriter.(*Writer).writeLines 1232 1184 4.054054%
48 math/big.shr 832 784 6.122449%
88 go.func.* 75174 75086 0.117199%
96 time.Date 1968 1872 5.128205%

Overall there appears to be an 0.1% decrease in text size.
No timings yet, and given the distribution of size reductions
it might make sense to wait on those.

addr2line  text (code) = -4392 bytes (-0.156273%)
api  text (code) = -5502 bytes (-0.147644%)
asm  text (code) = -5254 bytes (-0.187810%)
cgo  text (code) = -4886 bytes (-0.148846%)
compile  text (code) = -1577 bytes (-0.019346%) * changed
cover  text (code) = -5236 bytes (-0.137992%)
dist  text (code) = -5015 bytes (-0.167829%)
doc  text (code) = -5180 bytes (-0.182121%)
fix  text (code) = -5000 bytes (-0.215148%)
link  text (code) = -5092 bytes (-0.152712%)
newlink  text (code) = -5204 bytes (-0.196986%)
nm  text (code) = -4398 bytes (-0.156018%)
objdump  text (code) = -4582 bytes (-0.155046%)
pack  text (code) = -4503 bytes (-0.294287%)
pprof  text (code) = -6314 bytes (-0.085177%)
trace  text (code) = -5856 bytes (-0.097818%)
vet  text (code) = -5696 bytes (-0.117334%)
yacc  text (code) = -4971 bytes (-0.213817%)

This leaves me sorely tempted to look into a "real" scheduler
to try to do a better job, but I think it might make more
sense to look into getting loop information into the
register allocator instead.

Fixes #14577.

Change-Id: I5238b83284ce76dea1eb94084a8cd47277db6827
Reviewed-on: https://go-review.googlesource.com/20240
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index f47f93c..8124823 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -4,6 +4,8 @@
 
 package ssa
 
+import "container/heap"
+
 const (
 	ScorePhi = iota // towards top of block
 	ScoreVarDef
@@ -11,10 +13,31 @@
 	ScoreDefault
 	ScoreFlags
 	ScoreControl // towards bottom of block
-
-	ScoreCount // not a real score
 )
 
+type ValHeap struct {
+	a    []*Value
+	less func(a, b *Value) bool
+}
+
+func (h ValHeap) Len() int      { return len(h.a) }
+func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
+
+func (h *ValHeap) Push(x interface{}) {
+	// Push and Pop use pointer receivers because they modify the slice's length,
+	// not just its contents.
+	v := x.(*Value)
+	h.a = append(h.a, v)
+}
+func (h *ValHeap) Pop() interface{} {
+	old := h.a
+	n := len(old)
+	x := old[n-1]
+	h.a = old[0 : n-1]
+	return x
+}
+func (h ValHeap) Less(i, j int) bool { return h.less(h.a[i], h.a[j]) }
+
 // Schedule the Values in each Block. After this phase returns, the
 // order of b.Values matters and is the order in which those values
 // will appear in the assembly output. For now it generates a
@@ -23,23 +46,55 @@
 func schedule(f *Func) {
 	// For each value, the number of times it is used in the block
 	// by values that have not been scheduled yet.
-	uses := make([]int, f.NumValues())
+	uses := make([]int32, f.NumValues())
 
 	// "priority" for a value
-	score := make([]uint8, f.NumValues())
+	score := make([]int8, f.NumValues())
 
 	// scheduling order. We queue values in this list in reverse order.
 	var order []*Value
 
-	// priority queue of legally schedulable (0 unscheduled uses) values
-	var priq [ScoreCount][]*Value
-
 	// maps mem values to the next live memory value
 	nextMem := make([]*Value, f.NumValues())
 	// additional pretend arguments for each Value. Used to enforce load/store ordering.
 	additionalArgs := make([][]*Value, f.NumValues())
 
 	for _, b := range f.Blocks {
+		// Compute score. Larger numbers are scheduled closer to the end of the block.
+		for _, v := range b.Values {
+			switch {
+			case v.Op == OpAMD64LoweredGetClosurePtr:
+				// We also score GetLoweredClosurePtr as early as possible to ensure that the
+				// context register is not stomped. GetLoweredClosurePtr should only appear
+				// in the entry block where there are no phi functions, so there is no
+				// conflict or ambiguity here.
+				if b != f.Entry {
+					f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
+				}
+				score[v.ID] = ScorePhi
+			case v.Op == OpPhi:
+				// We want all the phis first.
+				score[v.ID] = ScorePhi
+			case v.Op == OpVarDef:
+				// We want all the vardefs next.
+				score[v.ID] = ScoreVarDef
+			case v.Type.IsMemory():
+				// Schedule stores as early as possible. This tends to
+				// reduce register pressure. It also helps make sure
+				// VARDEF ops are scheduled before the corresponding LEA.
+				score[v.ID] = ScoreMemory
+			case v.Type.IsFlags():
+				// Schedule flag register generation as late as possible.
+				// This makes sure that we only have one live flags
+				// value at a time.
+				score[v.ID] = ScoreFlags
+			default:
+				score[v.ID] = ScoreDefault
+			}
+		}
+	}
+
+	for _, b := range f.Blocks {
 		// Find store chain for block.
 		// Store chains for different blocks overwrite each other, so
 		// the calculated store chain is good only for this block.
@@ -77,38 +132,7 @@
 				uses[v.ID]++
 			}
 		}
-		// Compute score. Larger numbers are scheduled closer to the end of the block.
-		for _, v := range b.Values {
-			switch {
-			case v.Op == OpAMD64LoweredGetClosurePtr:
-				// We also score GetLoweredClosurePtr as early as possible to ensure that the
-				// context register is not stomped. GetLoweredClosurePtr should only appear
-				// in the entry block where there are no phi functions, so there is no
-				// conflict or ambiguity here.
-				if b != f.Entry {
-					f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
-				}
-				score[v.ID] = ScorePhi
-			case v.Op == OpPhi:
-				// We want all the phis first.
-				score[v.ID] = ScorePhi
-			case v.Op == OpVarDef:
-				// We want all the vardefs next.
-				score[v.ID] = ScoreVarDef
-			case v.Type.IsMemory():
-				// Schedule stores as early as possible. This tends to
-				// reduce register pressure. It also helps make sure
-				// VARDEF ops are scheduled before the corresponding LEA.
-				score[v.ID] = ScoreMemory
-			case v.Type.IsFlags():
-				// Schedule flag register generation as late as possible.
-				// This makes sure that we only have one live flags
-				// value at a time.
-				score[v.ID] = ScoreFlags
-			default:
-				score[v.ID] = ScoreDefault
-			}
-		}
+
 		if b.Control != nil && b.Control.Op != OpPhi {
 			// Force the control value to be scheduled at the end,
 			// unless it is a phi value (which must be first).
@@ -130,14 +154,32 @@
 			}
 		}
 
-		// Initialize priority queue with schedulable values.
-		for i := range priq {
-			priq[i] = priq[i][:0]
+		// To put things into a priority queue
+		// The values that should come last are least.
+		priq := &ValHeap{
+			a: make([]*Value, 0, 8), // TODO allocate once and reuse.
+			less: func(x, y *Value) bool {
+				sx := score[x.ID]
+				sy := score[y.ID]
+				if c := sx - sy; c != 0 {
+					return c > 0 // higher score comes later.
+				}
+				if x.Line != y.Line { // Favor in-order line stepping
+					return x.Line > y.Line
+				}
+				if x.Op != OpPhi {
+					if c := len(x.Args) - len(y.Args); c != 0 {
+						return c < 0 // smaller args comes later
+					}
+				}
+				return x.ID > y.ID
+			},
 		}
+
+		// Initialize priority queue with schedulable values.
 		for _, v := range b.Values {
 			if uses[v.ID] == 0 {
-				s := score[v.ID]
-				priq[s] = append(priq[s], v)
+				heap.Push(priq, v)
 			}
 		}
 
@@ -145,19 +187,13 @@
 		order = order[:0]
 		for {
 			// Find highest priority schedulable value.
-			var v *Value
-			for i := len(priq) - 1; i >= 0; i-- {
-				n := len(priq[i])
-				if n == 0 {
-					continue
-				}
-				v = priq[i][n-1]
-				priq[i] = priq[i][:n-1]
+			// Note that schedule is assembled backwards.
+
+			if priq.Len() == 0 {
 				break
 			}
-			if v == nil {
-				break
-			}
+
+			v := heap.Pop(priq).(*Value)
 
 			// Add it to the schedule.
 			order = append(order, v)
@@ -170,16 +206,14 @@
 				uses[w.ID]--
 				if uses[w.ID] == 0 {
 					// All uses scheduled, w is now schedulable.
-					s := score[w.ID]
-					priq[s] = append(priq[s], w)
+					heap.Push(priq, w)
 				}
 			}
 			for _, w := range additionalArgs[v.ID] {
 				uses[w.ID]--
 				if uses[w.ID] == 0 {
 					// All uses scheduled, w is now schedulable.
-					s := score[w.ID]
-					priq[s] = append(priq[s], w)
+					heap.Push(priq, w)
 				}
 			}
 		}