cmd/compile: do not allow regalloc to LoadReg G register

On architectures where G is stored in a register, it is
possible for a variable to allocated to it, and subsequently
that variable may be spilled and reloaded, for example
because of an intervening call.  If such an allocation
reaches a join point and it is the primary predecessor,
it becomes the target of a reload, which is only usually
right.

Fix: guard all the LoadReg ops, and spill value in the G
register (if any) before merges (in the same way that 387
FP registers are freed between blocks).

Includes test.

Fixes #25504.

Change-Id: I0482a53e20970c7315bf09c0e407ae5bba2fe05d
Reviewed-on: https://go-review.googlesource.com/114695
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index be9f19b..5832050 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -28,6 +28,7 @@
 
 func testConfig(tb testing.TB) *Conf      { return testConfigArch(tb, "amd64") }
 func testConfigS390X(tb testing.TB) *Conf { return testConfigArch(tb, "s390x") }
+func testConfigARM64(tb testing.TB) *Conf { return testConfigArch(tb, "arm64") }
 
 func testConfigArch(tb testing.TB, arch string) *Conf {
 	ctxt, ok := testCtxts[arch]
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 080ad0f..bbf1932 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -532,6 +532,9 @@
 	}
 
 	s.assignReg(r, v, c)
+	if c.Op == OpLoadReg && s.isGReg(r) {
+		s.f.Fatalf("allocValToReg.OpLoadReg targeting g: " + c.LongString())
+	}
 	if nospill {
 		s.nospill |= regMask(1) << r
 	}
@@ -809,6 +812,10 @@
 	return opcodeTable[op].reg
 }
 
+func (s *regAllocState) isGReg(r register) bool {
+	return s.f.Config.hasGReg && s.GReg == r
+}
+
 func (s *regAllocState) regalloc(f *Func) {
 	regValLiveSet := f.newSparseSet(f.NumValues()) // set of values that may be live in register
 	defer f.retSparseSet(regValLiveSet)
@@ -951,6 +958,7 @@
 			// Majority vote? Deepest nesting level?
 			phiRegs = phiRegs[:0]
 			var phiUsed regMask
+
 			for _, v := range phis {
 				if !s.values[v.ID].needReg {
 					phiRegs = append(phiRegs, noRegister)
@@ -1516,6 +1524,9 @@
 		// predecessor of it, find live values that we use soon after
 		// the merge point and promote them to registers now.
 		if len(b.Succs) == 1 {
+			if s.f.Config.hasGReg && s.regs[s.GReg].v != nil {
+				s.freeReg(s.GReg) // Spill value in G register before any merge.
+			}
 			// For this to be worthwhile, the loop must have no calls in it.
 			top := b.Succs[0].b
 			loop := s.loopnest.b2l[top.ID]
@@ -1996,6 +2007,9 @@
 			c = e.p.NewValue1(pos, OpLoadReg, c.Type, c)
 		}
 		e.set(r, vid, c, false, pos)
+		if c.Op == OpLoadReg && e.s.isGReg(register(r.(*Register).num)) {
+			e.s.f.Fatalf("process.OpLoadReg targeting g: " + c.LongString())
+		}
 	}
 }
 
@@ -2110,6 +2124,9 @@
 		}
 	}
 	e.set(loc, vid, x, true, pos)
+	if x.Op == OpLoadReg && e.s.isGReg(register(loc.(*Register).num)) {
+		e.s.f.Fatalf("processDest.OpLoadReg targeting g: " + x.LongString())
+	}
 	if splice != nil {
 		(*splice).Uses--
 		*splice = x
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
index 02751a9..bb8be5e 100644
--- a/src/cmd/compile/internal/ssa/regalloc_test.go
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -36,6 +36,55 @@
 	checkFunc(f.f)
 }
 
+// Test to make sure G register is never reloaded from spill (spill of G is okay)
+// See #25504
+func TestNoGetgLoadReg(t *testing.T) {
+	/*
+		Original:
+		func fff3(i int) *g {
+			gee := getg()
+			if i == 0 {
+				fff()
+			}
+			return gee // here
+		}
+	*/
+	c := testConfigARM64(t)
+	f := c.Fun("b1",
+		Bloc("b1",
+			Valu("v1", OpInitMem, types.TypeMem, 0, nil),
+			Valu("v6", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+			Valu("v8", OpGetG, c.config.Types.Int64.PtrTo(), 0, nil, "v1"),
+			Valu("v11", OpARM64CMPconst, types.TypeFlags, 0, nil, "v6"),
+			Eq("v11", "b2", "b4"),
+		),
+		Bloc("b4",
+			Goto("b3"),
+		),
+		Bloc("b3",
+			Valu("v14", OpPhi, types.TypeMem, 0, nil, "v1", "v12"),
+			Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+			Valu("v16", OpARM64MOVDstore, types.TypeMem, 0, nil, "v8", "sb", "v14"),
+			Exit("v16"),
+		),
+		Bloc("b2",
+			Valu("v12", OpARM64CALLstatic, types.TypeMem, 0, nil, "v1"),
+			Goto("b3"),
+		),
+	)
+	regalloc(f.f)
+	checkFunc(f.f)
+	// Double-check that we never restore to the G register. Regalloc should catch it, but check again anyway.
+	r := f.f.RegAlloc
+	for _, b := range f.blocks {
+		for _, v := range b.Values {
+			if v.Op == OpLoadReg && r[v.ID].String() == "g" {
+				t.Errorf("Saw OpLoadReg targeting g register: %s", v.LongString())
+			}
+		}
+	}
+}
+
 // Test to make sure we don't push spills into loops.
 // See issue #19595.
 func TestSpillWithLoop(t *testing.T) {