[dev.ssa] Merge remote-tracking branch 'origin/master' into mergebranch

Conflicts:
	src/cmd/compile/internal/gc/racewalk.go
	src/cmd/internal/obj/stack.go
	src/cmd/internal/obj/x86/obj6.go
	src/runtime/stack.go
	test/nilptr3.go
	test/nosplit.go

Change-Id: Ie6053eb1577fd73e8243651f25c0f1fc765ae660
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index a4f1ec9..0cd3473 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -192,7 +192,7 @@
  *	res = nl % nr
  * according to op.
  */
-func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	// Have to be careful about handling
 	// most negative int divided by -1 correctly.
 	// The hardware will trap.
@@ -335,7 +335,8 @@
 		x.Type = gc.Types[gc.TINT64]
 		gmove(x, oldx)
 		x.Type = t
-		oldx.Etype = r // squirrel away old r value
+		// TODO(marvin): Fix Node.EType type union.
+		oldx.Etype = gc.EType(r) // squirrel away old r value
 		gc.SetReg(dr, 1)
 	}
 }
@@ -389,7 +390,7 @@
  *	res = nl << nr
  *	res = nl >> nr
  */
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	a := optoas(op, nl.Type)
 
 	if nr.Op == gc.OLITERAL {
@@ -508,7 +509,7 @@
  * there is no 2-operand byte multiply instruction so
  * we do a full-width multiplication and truncate afterwards.
  */
-func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
+func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
 	if optoas(op, nl.Type) != x86.AIMULB {
 		return false
 	}
diff --git a/src/cmd/compile/internal/amd64/gsubr.go b/src/cmd/compile/internal/amd64/gsubr.go
index 7b57902..003b0ad 100644
--- a/src/cmd/compile/internal/amd64/gsubr.go
+++ b/src/cmd/compile/internal/amd64/gsubr.go
@@ -100,7 +100,7 @@
 	gins(as, &n1, n2)
 }
 
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
 	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL {
 		// Reverse comparison to place constant last.
 		op = gc.Brrev(op)
@@ -108,7 +108,15 @@
 	}
 	// General case.
 	var r1, r2, g1, g2 gc.Node
-	if n1.Op == gc.ONAME && n1.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG {
+
+	// A special case to make write barriers more efficient.
+	// Comparing the first field of a named struct can be done directly.
+	base := n1
+	if n1.Op == gc.ODOT && n1.Left.Type.Etype == gc.TSTRUCT && n1.Left.Type.Type.Sym == n1.Right.Sym {
+		base = n1.Left
+	}
+
+	if base.Op == gc.ONAME && base.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG {
 		r1 = *n1
 	} else {
 		gc.Regalloc(&r1, t, n1)
@@ -673,514 +681,547 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
 
+	// avoid constant conversions in switches below
+	const (
+		OMINUS_  = uint32(gc.OMINUS) << 16
+		OLSH_    = uint32(gc.OLSH) << 16
+		ORSH_    = uint32(gc.ORSH) << 16
+		OADD_    = uint32(gc.OADD) << 16
+		OSUB_    = uint32(gc.OSUB) << 16
+		OMUL_    = uint32(gc.OMUL) << 16
+		ODIV_    = uint32(gc.ODIV) << 16
+		OMOD_    = uint32(gc.OMOD) << 16
+		OOR_     = uint32(gc.OOR) << 16
+		OAND_    = uint32(gc.OAND) << 16
+		OXOR_    = uint32(gc.OXOR) << 16
+		OEQ_     = uint32(gc.OEQ) << 16
+		ONE_     = uint32(gc.ONE) << 16
+		OLT_     = uint32(gc.OLT) << 16
+		OLE_     = uint32(gc.OLE) << 16
+		OGE_     = uint32(gc.OGE) << 16
+		OGT_     = uint32(gc.OGT) << 16
+		OCMP_    = uint32(gc.OCMP) << 16
+		OPS_     = uint32(gc.OPS) << 16
+		OPC_     = uint32(gc.OPC) << 16
+		OAS_     = uint32(gc.OAS) << 16
+		OHMUL_   = uint32(gc.OHMUL) << 16
+		OSQRT_   = uint32(gc.OSQRT) << 16
+		OADDR_   = uint32(gc.OADDR) << 16
+		OINC_    = uint32(gc.OINC) << 16
+		ODEC_    = uint32(gc.ODEC) << 16
+		OLROT_   = uint32(gc.OLROT) << 16
+		ORROTC_  = uint32(gc.ORROTC) << 16
+		OEXTEND_ = uint32(gc.OEXTEND) << 16
+	)
+
 	a := obj.AXXX
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
 
-	case gc.OADDR<<16 | gc.TPTR32:
+	case OADDR_ | gc.TPTR32:
 		a = x86.ALEAL
 
-	case gc.OADDR<<16 | gc.TPTR64:
+	case OADDR_ | gc.TPTR64:
 		a = x86.ALEAQ
 
-	case gc.OEQ<<16 | gc.TBOOL,
-		gc.OEQ<<16 | gc.TINT8,
-		gc.OEQ<<16 | gc.TUINT8,
-		gc.OEQ<<16 | gc.TINT16,
-		gc.OEQ<<16 | gc.TUINT16,
-		gc.OEQ<<16 | gc.TINT32,
-		gc.OEQ<<16 | gc.TUINT32,
-		gc.OEQ<<16 | gc.TINT64,
-		gc.OEQ<<16 | gc.TUINT64,
-		gc.OEQ<<16 | gc.TPTR32,
-		gc.OEQ<<16 | gc.TPTR64,
-		gc.OEQ<<16 | gc.TFLOAT32,
-		gc.OEQ<<16 | gc.TFLOAT64:
+	case OEQ_ | gc.TBOOL,
+		OEQ_ | gc.TINT8,
+		OEQ_ | gc.TUINT8,
+		OEQ_ | gc.TINT16,
+		OEQ_ | gc.TUINT16,
+		OEQ_ | gc.TINT32,
+		OEQ_ | gc.TUINT32,
+		OEQ_ | gc.TINT64,
+		OEQ_ | gc.TUINT64,
+		OEQ_ | gc.TPTR32,
+		OEQ_ | gc.TPTR64,
+		OEQ_ | gc.TFLOAT32,
+		OEQ_ | gc.TFLOAT64:
 		a = x86.AJEQ
 
-	case gc.ONE<<16 | gc.TBOOL,
-		gc.ONE<<16 | gc.TINT8,
-		gc.ONE<<16 | gc.TUINT8,
-		gc.ONE<<16 | gc.TINT16,
-		gc.ONE<<16 | gc.TUINT16,
-		gc.ONE<<16 | gc.TINT32,
-		gc.ONE<<16 | gc.TUINT32,
-		gc.ONE<<16 | gc.TINT64,
-		gc.ONE<<16 | gc.TUINT64,
-		gc.ONE<<16 | gc.TPTR32,
-		gc.ONE<<16 | gc.TPTR64,
-		gc.ONE<<16 | gc.TFLOAT32,
-		gc.ONE<<16 | gc.TFLOAT64:
+	case ONE_ | gc.TBOOL,
+		ONE_ | gc.TINT8,
+		ONE_ | gc.TUINT8,
+		ONE_ | gc.TINT16,
+		ONE_ | gc.TUINT16,
+		ONE_ | gc.TINT32,
+		ONE_ | gc.TUINT32,
+		ONE_ | gc.TINT64,
+		ONE_ | gc.TUINT64,
+		ONE_ | gc.TPTR32,
+		ONE_ | gc.TPTR64,
+		ONE_ | gc.TFLOAT32,
+		ONE_ | gc.TFLOAT64:
 		a = x86.AJNE
 
-	case gc.OPS<<16 | gc.TBOOL,
-		gc.OPS<<16 | gc.TINT8,
-		gc.OPS<<16 | gc.TUINT8,
-		gc.OPS<<16 | gc.TINT16,
-		gc.OPS<<16 | gc.TUINT16,
-		gc.OPS<<16 | gc.TINT32,
-		gc.OPS<<16 | gc.TUINT32,
-		gc.OPS<<16 | gc.TINT64,
-		gc.OPS<<16 | gc.TUINT64,
-		gc.OPS<<16 | gc.TPTR32,
-		gc.OPS<<16 | gc.TPTR64,
-		gc.OPS<<16 | gc.TFLOAT32,
-		gc.OPS<<16 | gc.TFLOAT64:
+	case OPS_ | gc.TBOOL,
+		OPS_ | gc.TINT8,
+		OPS_ | gc.TUINT8,
+		OPS_ | gc.TINT16,
+		OPS_ | gc.TUINT16,
+		OPS_ | gc.TINT32,
+		OPS_ | gc.TUINT32,
+		OPS_ | gc.TINT64,
+		OPS_ | gc.TUINT64,
+		OPS_ | gc.TPTR32,
+		OPS_ | gc.TPTR64,
+		OPS_ | gc.TFLOAT32,
+		OPS_ | gc.TFLOAT64:
 		a = x86.AJPS
 
-	case gc.OPC<<16 | gc.TBOOL,
-		gc.OPC<<16 | gc.TINT8,
-		gc.OPC<<16 | gc.TUINT8,
-		gc.OPC<<16 | gc.TINT16,
-		gc.OPC<<16 | gc.TUINT16,
-		gc.OPC<<16 | gc.TINT32,
-		gc.OPC<<16 | gc.TUINT32,
-		gc.OPC<<16 | gc.TINT64,
-		gc.OPC<<16 | gc.TUINT64,
-		gc.OPC<<16 | gc.TPTR32,
-		gc.OPC<<16 | gc.TPTR64,
-		gc.OPC<<16 | gc.TFLOAT32,
-		gc.OPC<<16 | gc.TFLOAT64:
+	case OPC_ | gc.TBOOL,
+		OPC_ | gc.TINT8,
+		OPC_ | gc.TUINT8,
+		OPC_ | gc.TINT16,
+		OPC_ | gc.TUINT16,
+		OPC_ | gc.TINT32,
+		OPC_ | gc.TUINT32,
+		OPC_ | gc.TINT64,
+		OPC_ | gc.TUINT64,
+		OPC_ | gc.TPTR32,
+		OPC_ | gc.TPTR64,
+		OPC_ | gc.TFLOAT32,
+		OPC_ | gc.TFLOAT64:
 		a = x86.AJPC
 
-	case gc.OLT<<16 | gc.TINT8,
-		gc.OLT<<16 | gc.TINT16,
-		gc.OLT<<16 | gc.TINT32,
-		gc.OLT<<16 | gc.TINT64:
+	case OLT_ | gc.TINT8,
+		OLT_ | gc.TINT16,
+		OLT_ | gc.TINT32,
+		OLT_ | gc.TINT64:
 		a = x86.AJLT
 
-	case gc.OLT<<16 | gc.TUINT8,
-		gc.OLT<<16 | gc.TUINT16,
-		gc.OLT<<16 | gc.TUINT32,
-		gc.OLT<<16 | gc.TUINT64:
+	case OLT_ | gc.TUINT8,
+		OLT_ | gc.TUINT16,
+		OLT_ | gc.TUINT32,
+		OLT_ | gc.TUINT64:
 		a = x86.AJCS
 
-	case gc.OLE<<16 | gc.TINT8,
-		gc.OLE<<16 | gc.TINT16,
-		gc.OLE<<16 | gc.TINT32,
-		gc.OLE<<16 | gc.TINT64:
+	case OLE_ | gc.TINT8,
+		OLE_ | gc.TINT16,
+		OLE_ | gc.TINT32,
+		OLE_ | gc.TINT64:
 		a = x86.AJLE
 
-	case gc.OLE<<16 | gc.TUINT8,
-		gc.OLE<<16 | gc.TUINT16,
-		gc.OLE<<16 | gc.TUINT32,
-		gc.OLE<<16 | gc.TUINT64:
+	case OLE_ | gc.TUINT8,
+		OLE_ | gc.TUINT16,
+		OLE_ | gc.TUINT32,
+		OLE_ | gc.TUINT64:
 		a = x86.AJLS
 
-	case gc.OGT<<16 | gc.TINT8,
-		gc.OGT<<16 | gc.TINT16,
-		gc.OGT<<16 | gc.TINT32,
-		gc.OGT<<16 | gc.TINT64:
+	case OGT_ | gc.TINT8,
+		OGT_ | gc.TINT16,
+		OGT_ | gc.TINT32,
+		OGT_ | gc.TINT64:
 		a = x86.AJGT
 
-	case gc.OGT<<16 | gc.TUINT8,
-		gc.OGT<<16 | gc.TUINT16,
-		gc.OGT<<16 | gc.TUINT32,
-		gc.OGT<<16 | gc.TUINT64,
-		gc.OLT<<16 | gc.TFLOAT32,
-		gc.OLT<<16 | gc.TFLOAT64:
+	case OGT_ | gc.TUINT8,
+		OGT_ | gc.TUINT16,
+		OGT_ | gc.TUINT32,
+		OGT_ | gc.TUINT64,
+		OLT_ | gc.TFLOAT32,
+		OLT_ | gc.TFLOAT64:
 		a = x86.AJHI
 
-	case gc.OGE<<16 | gc.TINT8,
-		gc.OGE<<16 | gc.TINT16,
-		gc.OGE<<16 | gc.TINT32,
-		gc.OGE<<16 | gc.TINT64:
+	case OGE_ | gc.TINT8,
+		OGE_ | gc.TINT16,
+		OGE_ | gc.TINT32,
+		OGE_ | gc.TINT64:
 		a = x86.AJGE
 
-	case gc.OGE<<16 | gc.TUINT8,
-		gc.OGE<<16 | gc.TUINT16,
-		gc.OGE<<16 | gc.TUINT32,
-		gc.OGE<<16 | gc.TUINT64,
-		gc.OLE<<16 | gc.TFLOAT32,
-		gc.OLE<<16 | gc.TFLOAT64:
+	case OGE_ | gc.TUINT8,
+		OGE_ | gc.TUINT16,
+		OGE_ | gc.TUINT32,
+		OGE_ | gc.TUINT64,
+		OLE_ | gc.TFLOAT32,
+		OLE_ | gc.TFLOAT64:
 		a = x86.AJCC
 
-	case gc.OCMP<<16 | gc.TBOOL,
-		gc.OCMP<<16 | gc.TINT8,
-		gc.OCMP<<16 | gc.TUINT8:
+	case OCMP_ | gc.TBOOL,
+		OCMP_ | gc.TINT8,
+		OCMP_ | gc.TUINT8:
 		a = x86.ACMPB
 
-	case gc.OCMP<<16 | gc.TINT16,
-		gc.OCMP<<16 | gc.TUINT16:
+	case OCMP_ | gc.TINT16,
+		OCMP_ | gc.TUINT16:
 		a = x86.ACMPW
 
-	case gc.OCMP<<16 | gc.TINT32,
-		gc.OCMP<<16 | gc.TUINT32,
-		gc.OCMP<<16 | gc.TPTR32:
+	case OCMP_ | gc.TINT32,
+		OCMP_ | gc.TUINT32,
+		OCMP_ | gc.TPTR32:
 		a = x86.ACMPL
 
-	case gc.OCMP<<16 | gc.TINT64,
-		gc.OCMP<<16 | gc.TUINT64,
-		gc.OCMP<<16 | gc.TPTR64:
+	case OCMP_ | gc.TINT64,
+		OCMP_ | gc.TUINT64,
+		OCMP_ | gc.TPTR64:
 		a = x86.ACMPQ
 
-	case gc.OCMP<<16 | gc.TFLOAT32:
+	case OCMP_ | gc.TFLOAT32:
 		a = x86.AUCOMISS
 
-	case gc.OCMP<<16 | gc.TFLOAT64:
+	case OCMP_ | gc.TFLOAT64:
 		a = x86.AUCOMISD
 
-	case gc.OAS<<16 | gc.TBOOL,
-		gc.OAS<<16 | gc.TINT8,
-		gc.OAS<<16 | gc.TUINT8:
+	case OAS_ | gc.TBOOL,
+		OAS_ | gc.TINT8,
+		OAS_ | gc.TUINT8:
 		a = x86.AMOVB
 
-	case gc.OAS<<16 | gc.TINT16,
-		gc.OAS<<16 | gc.TUINT16:
+	case OAS_ | gc.TINT16,
+		OAS_ | gc.TUINT16:
 		a = x86.AMOVW
 
-	case gc.OAS<<16 | gc.TINT32,
-		gc.OAS<<16 | gc.TUINT32,
-		gc.OAS<<16 | gc.TPTR32:
+	case OAS_ | gc.TINT32,
+		OAS_ | gc.TUINT32,
+		OAS_ | gc.TPTR32:
 		a = x86.AMOVL
 
-	case gc.OAS<<16 | gc.TINT64,
-		gc.OAS<<16 | gc.TUINT64,
-		gc.OAS<<16 | gc.TPTR64:
+	case OAS_ | gc.TINT64,
+		OAS_ | gc.TUINT64,
+		OAS_ | gc.TPTR64:
 		a = x86.AMOVQ
 
-	case gc.OAS<<16 | gc.TFLOAT32:
+	case OAS_ | gc.TFLOAT32:
 		a = x86.AMOVSS
 
-	case gc.OAS<<16 | gc.TFLOAT64:
+	case OAS_ | gc.TFLOAT64:
 		a = x86.AMOVSD
 
-	case gc.OADD<<16 | gc.TINT8,
-		gc.OADD<<16 | gc.TUINT8:
+	case OADD_ | gc.TINT8,
+		OADD_ | gc.TUINT8:
 		a = x86.AADDB
 
-	case gc.OADD<<16 | gc.TINT16,
-		gc.OADD<<16 | gc.TUINT16:
+	case OADD_ | gc.TINT16,
+		OADD_ | gc.TUINT16:
 		a = x86.AADDW
 
-	case gc.OADD<<16 | gc.TINT32,
-		gc.OADD<<16 | gc.TUINT32,
-		gc.OADD<<16 | gc.TPTR32:
+	case OADD_ | gc.TINT32,
+		OADD_ | gc.TUINT32,
+		OADD_ | gc.TPTR32:
 		a = x86.AADDL
 
-	case gc.OADD<<16 | gc.TINT64,
-		gc.OADD<<16 | gc.TUINT64,
-		gc.OADD<<16 | gc.TPTR64:
+	case OADD_ | gc.TINT64,
+		OADD_ | gc.TUINT64,
+		OADD_ | gc.TPTR64:
 		a = x86.AADDQ
 
-	case gc.OADD<<16 | gc.TFLOAT32:
+	case OADD_ | gc.TFLOAT32:
 		a = x86.AADDSS
 
-	case gc.OADD<<16 | gc.TFLOAT64:
+	case OADD_ | gc.TFLOAT64:
 		a = x86.AADDSD
 
-	case gc.OSUB<<16 | gc.TINT8,
-		gc.OSUB<<16 | gc.TUINT8:
+	case OSUB_ | gc.TINT8,
+		OSUB_ | gc.TUINT8:
 		a = x86.ASUBB
 
-	case gc.OSUB<<16 | gc.TINT16,
-		gc.OSUB<<16 | gc.TUINT16:
+	case OSUB_ | gc.TINT16,
+		OSUB_ | gc.TUINT16:
 		a = x86.ASUBW
 
-	case gc.OSUB<<16 | gc.TINT32,
-		gc.OSUB<<16 | gc.TUINT32,
-		gc.OSUB<<16 | gc.TPTR32:
+	case OSUB_ | gc.TINT32,
+		OSUB_ | gc.TUINT32,
+		OSUB_ | gc.TPTR32:
 		a = x86.ASUBL
 
-	case gc.OSUB<<16 | gc.TINT64,
-		gc.OSUB<<16 | gc.TUINT64,
-		gc.OSUB<<16 | gc.TPTR64:
+	case OSUB_ | gc.TINT64,
+		OSUB_ | gc.TUINT64,
+		OSUB_ | gc.TPTR64:
 		a = x86.ASUBQ
 
-	case gc.OSUB<<16 | gc.TFLOAT32:
+	case OSUB_ | gc.TFLOAT32:
 		a = x86.ASUBSS
 
-	case gc.OSUB<<16 | gc.TFLOAT64:
+	case OSUB_ | gc.TFLOAT64:
 		a = x86.ASUBSD
 
-	case gc.OINC<<16 | gc.TINT8,
-		gc.OINC<<16 | gc.TUINT8:
+	case OINC_ | gc.TINT8,
+		OINC_ | gc.TUINT8:
 		a = x86.AINCB
 
-	case gc.OINC<<16 | gc.TINT16,
-		gc.OINC<<16 | gc.TUINT16:
+	case OINC_ | gc.TINT16,
+		OINC_ | gc.TUINT16:
 		a = x86.AINCW
 
-	case gc.OINC<<16 | gc.TINT32,
-		gc.OINC<<16 | gc.TUINT32,
-		gc.OINC<<16 | gc.TPTR32:
+	case OINC_ | gc.TINT32,
+		OINC_ | gc.TUINT32,
+		OINC_ | gc.TPTR32:
 		a = x86.AINCL
 
-	case gc.OINC<<16 | gc.TINT64,
-		gc.OINC<<16 | gc.TUINT64,
-		gc.OINC<<16 | gc.TPTR64:
+	case OINC_ | gc.TINT64,
+		OINC_ | gc.TUINT64,
+		OINC_ | gc.TPTR64:
 		a = x86.AINCQ
 
-	case gc.ODEC<<16 | gc.TINT8,
-		gc.ODEC<<16 | gc.TUINT8:
+	case ODEC_ | gc.TINT8,
+		ODEC_ | gc.TUINT8:
 		a = x86.ADECB
 
-	case gc.ODEC<<16 | gc.TINT16,
-		gc.ODEC<<16 | gc.TUINT16:
+	case ODEC_ | gc.TINT16,
+		ODEC_ | gc.TUINT16:
 		a = x86.ADECW
 
-	case gc.ODEC<<16 | gc.TINT32,
-		gc.ODEC<<16 | gc.TUINT32,
-		gc.ODEC<<16 | gc.TPTR32:
+	case ODEC_ | gc.TINT32,
+		ODEC_ | gc.TUINT32,
+		ODEC_ | gc.TPTR32:
 		a = x86.ADECL
 
-	case gc.ODEC<<16 | gc.TINT64,
-		gc.ODEC<<16 | gc.TUINT64,
-		gc.ODEC<<16 | gc.TPTR64:
+	case ODEC_ | gc.TINT64,
+		ODEC_ | gc.TUINT64,
+		ODEC_ | gc.TPTR64:
 		a = x86.ADECQ
 
-	case gc.OMINUS<<16 | gc.TINT8,
-		gc.OMINUS<<16 | gc.TUINT8:
+	case OMINUS_ | gc.TINT8,
+		OMINUS_ | gc.TUINT8:
 		a = x86.ANEGB
 
-	case gc.OMINUS<<16 | gc.TINT16,
-		gc.OMINUS<<16 | gc.TUINT16:
+	case OMINUS_ | gc.TINT16,
+		OMINUS_ | gc.TUINT16:
 		a = x86.ANEGW
 
-	case gc.OMINUS<<16 | gc.TINT32,
-		gc.OMINUS<<16 | gc.TUINT32,
-		gc.OMINUS<<16 | gc.TPTR32:
+	case OMINUS_ | gc.TINT32,
+		OMINUS_ | gc.TUINT32,
+		OMINUS_ | gc.TPTR32:
 		a = x86.ANEGL
 
-	case gc.OMINUS<<16 | gc.TINT64,
-		gc.OMINUS<<16 | gc.TUINT64,
-		gc.OMINUS<<16 | gc.TPTR64:
+	case OMINUS_ | gc.TINT64,
+		OMINUS_ | gc.TUINT64,
+		OMINUS_ | gc.TPTR64:
 		a = x86.ANEGQ
 
-	case gc.OAND<<16 | gc.TBOOL,
-		gc.OAND<<16 | gc.TINT8,
-		gc.OAND<<16 | gc.TUINT8:
+	case OAND_ | gc.TBOOL,
+		OAND_ | gc.TINT8,
+		OAND_ | gc.TUINT8:
 		a = x86.AANDB
 
-	case gc.OAND<<16 | gc.TINT16,
-		gc.OAND<<16 | gc.TUINT16:
+	case OAND_ | gc.TINT16,
+		OAND_ | gc.TUINT16:
 		a = x86.AANDW
 
-	case gc.OAND<<16 | gc.TINT32,
-		gc.OAND<<16 | gc.TUINT32,
-		gc.OAND<<16 | gc.TPTR32:
+	case OAND_ | gc.TINT32,
+		OAND_ | gc.TUINT32,
+		OAND_ | gc.TPTR32:
 		a = x86.AANDL
 
-	case gc.OAND<<16 | gc.TINT64,
-		gc.OAND<<16 | gc.TUINT64,
-		gc.OAND<<16 | gc.TPTR64:
+	case OAND_ | gc.TINT64,
+		OAND_ | gc.TUINT64,
+		OAND_ | gc.TPTR64:
 		a = x86.AANDQ
 
-	case gc.OOR<<16 | gc.TBOOL,
-		gc.OOR<<16 | gc.TINT8,
-		gc.OOR<<16 | gc.TUINT8:
+	case OOR_ | gc.TBOOL,
+		OOR_ | gc.TINT8,
+		OOR_ | gc.TUINT8:
 		a = x86.AORB
 
-	case gc.OOR<<16 | gc.TINT16,
-		gc.OOR<<16 | gc.TUINT16:
+	case OOR_ | gc.TINT16,
+		OOR_ | gc.TUINT16:
 		a = x86.AORW
 
-	case gc.OOR<<16 | gc.TINT32,
-		gc.OOR<<16 | gc.TUINT32,
-		gc.OOR<<16 | gc.TPTR32:
+	case OOR_ | gc.TINT32,
+		OOR_ | gc.TUINT32,
+		OOR_ | gc.TPTR32:
 		a = x86.AORL
 
-	case gc.OOR<<16 | gc.TINT64,
-		gc.OOR<<16 | gc.TUINT64,
-		gc.OOR<<16 | gc.TPTR64:
+	case OOR_ | gc.TINT64,
+		OOR_ | gc.TUINT64,
+		OOR_ | gc.TPTR64:
 		a = x86.AORQ
 
-	case gc.OXOR<<16 | gc.TINT8,
-		gc.OXOR<<16 | gc.TUINT8:
+	case OXOR_ | gc.TINT8,
+		OXOR_ | gc.TUINT8:
 		a = x86.AXORB
 
-	case gc.OXOR<<16 | gc.TINT16,
-		gc.OXOR<<16 | gc.TUINT16:
+	case OXOR_ | gc.TINT16,
+		OXOR_ | gc.TUINT16:
 		a = x86.AXORW
 
-	case gc.OXOR<<16 | gc.TINT32,
-		gc.OXOR<<16 | gc.TUINT32,
-		gc.OXOR<<16 | gc.TPTR32:
+	case OXOR_ | gc.TINT32,
+		OXOR_ | gc.TUINT32,
+		OXOR_ | gc.TPTR32:
 		a = x86.AXORL
 
-	case gc.OXOR<<16 | gc.TINT64,
-		gc.OXOR<<16 | gc.TUINT64,
-		gc.OXOR<<16 | gc.TPTR64:
+	case OXOR_ | gc.TINT64,
+		OXOR_ | gc.TUINT64,
+		OXOR_ | gc.TPTR64:
 		a = x86.AXORQ
 
-	case gc.OLROT<<16 | gc.TINT8,
-		gc.OLROT<<16 | gc.TUINT8:
+	case OLROT_ | gc.TINT8,
+		OLROT_ | gc.TUINT8:
 		a = x86.AROLB
 
-	case gc.OLROT<<16 | gc.TINT16,
-		gc.OLROT<<16 | gc.TUINT16:
+	case OLROT_ | gc.TINT16,
+		OLROT_ | gc.TUINT16:
 		a = x86.AROLW
 
-	case gc.OLROT<<16 | gc.TINT32,
-		gc.OLROT<<16 | gc.TUINT32,
-		gc.OLROT<<16 | gc.TPTR32:
+	case OLROT_ | gc.TINT32,
+		OLROT_ | gc.TUINT32,
+		OLROT_ | gc.TPTR32:
 		a = x86.AROLL
 
-	case gc.OLROT<<16 | gc.TINT64,
-		gc.OLROT<<16 | gc.TUINT64,
-		gc.OLROT<<16 | gc.TPTR64:
+	case OLROT_ | gc.TINT64,
+		OLROT_ | gc.TUINT64,
+		OLROT_ | gc.TPTR64:
 		a = x86.AROLQ
 
-	case gc.OLSH<<16 | gc.TINT8,
-		gc.OLSH<<16 | gc.TUINT8:
+	case OLSH_ | gc.TINT8,
+		OLSH_ | gc.TUINT8:
 		a = x86.ASHLB
 
-	case gc.OLSH<<16 | gc.TINT16,
-		gc.OLSH<<16 | gc.TUINT16:
+	case OLSH_ | gc.TINT16,
+		OLSH_ | gc.TUINT16:
 		a = x86.ASHLW
 
-	case gc.OLSH<<16 | gc.TINT32,
-		gc.OLSH<<16 | gc.TUINT32,
-		gc.OLSH<<16 | gc.TPTR32:
+	case OLSH_ | gc.TINT32,
+		OLSH_ | gc.TUINT32,
+		OLSH_ | gc.TPTR32:
 		a = x86.ASHLL
 
-	case gc.OLSH<<16 | gc.TINT64,
-		gc.OLSH<<16 | gc.TUINT64,
-		gc.OLSH<<16 | gc.TPTR64:
+	case OLSH_ | gc.TINT64,
+		OLSH_ | gc.TUINT64,
+		OLSH_ | gc.TPTR64:
 		a = x86.ASHLQ
 
-	case gc.ORSH<<16 | gc.TUINT8:
+	case ORSH_ | gc.TUINT8:
 		a = x86.ASHRB
 
-	case gc.ORSH<<16 | gc.TUINT16:
+	case ORSH_ | gc.TUINT16:
 		a = x86.ASHRW
 
-	case gc.ORSH<<16 | gc.TUINT32,
-		gc.ORSH<<16 | gc.TPTR32:
+	case ORSH_ | gc.TUINT32,
+		ORSH_ | gc.TPTR32:
 		a = x86.ASHRL
 
-	case gc.ORSH<<16 | gc.TUINT64,
-		gc.ORSH<<16 | gc.TPTR64:
+	case ORSH_ | gc.TUINT64,
+		ORSH_ | gc.TPTR64:
 		a = x86.ASHRQ
 
-	case gc.ORSH<<16 | gc.TINT8:
+	case ORSH_ | gc.TINT8:
 		a = x86.ASARB
 
-	case gc.ORSH<<16 | gc.TINT16:
+	case ORSH_ | gc.TINT16:
 		a = x86.ASARW
 
-	case gc.ORSH<<16 | gc.TINT32:
+	case ORSH_ | gc.TINT32:
 		a = x86.ASARL
 
-	case gc.ORSH<<16 | gc.TINT64:
+	case ORSH_ | gc.TINT64:
 		a = x86.ASARQ
 
-	case gc.ORROTC<<16 | gc.TINT8,
-		gc.ORROTC<<16 | gc.TUINT8:
+	case ORROTC_ | gc.TINT8,
+		ORROTC_ | gc.TUINT8:
 		a = x86.ARCRB
 
-	case gc.ORROTC<<16 | gc.TINT16,
-		gc.ORROTC<<16 | gc.TUINT16:
+	case ORROTC_ | gc.TINT16,
+		ORROTC_ | gc.TUINT16:
 		a = x86.ARCRW
 
-	case gc.ORROTC<<16 | gc.TINT32,
-		gc.ORROTC<<16 | gc.TUINT32:
+	case ORROTC_ | gc.TINT32,
+		ORROTC_ | gc.TUINT32:
 		a = x86.ARCRL
 
-	case gc.ORROTC<<16 | gc.TINT64,
-		gc.ORROTC<<16 | gc.TUINT64:
+	case ORROTC_ | gc.TINT64,
+		ORROTC_ | gc.TUINT64:
 		a = x86.ARCRQ
 
-	case gc.OHMUL<<16 | gc.TINT8,
-		gc.OMUL<<16 | gc.TINT8,
-		gc.OMUL<<16 | gc.TUINT8:
+	case OHMUL_ | gc.TINT8,
+		OMUL_ | gc.TINT8,
+		OMUL_ | gc.TUINT8:
 		a = x86.AIMULB
 
-	case gc.OHMUL<<16 | gc.TINT16,
-		gc.OMUL<<16 | gc.TINT16,
-		gc.OMUL<<16 | gc.TUINT16:
+	case OHMUL_ | gc.TINT16,
+		OMUL_ | gc.TINT16,
+		OMUL_ | gc.TUINT16:
 		a = x86.AIMULW
 
-	case gc.OHMUL<<16 | gc.TINT32,
-		gc.OMUL<<16 | gc.TINT32,
-		gc.OMUL<<16 | gc.TUINT32,
-		gc.OMUL<<16 | gc.TPTR32:
+	case OHMUL_ | gc.TINT32,
+		OMUL_ | gc.TINT32,
+		OMUL_ | gc.TUINT32,
+		OMUL_ | gc.TPTR32:
 		a = x86.AIMULL
 
-	case gc.OHMUL<<16 | gc.TINT64,
-		gc.OMUL<<16 | gc.TINT64,
-		gc.OMUL<<16 | gc.TUINT64,
-		gc.OMUL<<16 | gc.TPTR64:
+	case OHMUL_ | gc.TINT64,
+		OMUL_ | gc.TINT64,
+		OMUL_ | gc.TUINT64,
+		OMUL_ | gc.TPTR64:
 		a = x86.AIMULQ
 
-	case gc.OHMUL<<16 | gc.TUINT8:
+	case OHMUL_ | gc.TUINT8:
 		a = x86.AMULB
 
-	case gc.OHMUL<<16 | gc.TUINT16:
+	case OHMUL_ | gc.TUINT16:
 		a = x86.AMULW
 
-	case gc.OHMUL<<16 | gc.TUINT32,
-		gc.OHMUL<<16 | gc.TPTR32:
+	case OHMUL_ | gc.TUINT32,
+		OHMUL_ | gc.TPTR32:
 		a = x86.AMULL
 
-	case gc.OHMUL<<16 | gc.TUINT64,
-		gc.OHMUL<<16 | gc.TPTR64:
+	case OHMUL_ | gc.TUINT64,
+		OHMUL_ | gc.TPTR64:
 		a = x86.AMULQ
 
-	case gc.OMUL<<16 | gc.TFLOAT32:
+	case OMUL_ | gc.TFLOAT32:
 		a = x86.AMULSS
 
-	case gc.OMUL<<16 | gc.TFLOAT64:
+	case OMUL_ | gc.TFLOAT64:
 		a = x86.AMULSD
 
-	case gc.ODIV<<16 | gc.TINT8,
-		gc.OMOD<<16 | gc.TINT8:
+	case ODIV_ | gc.TINT8,
+		OMOD_ | gc.TINT8:
 		a = x86.AIDIVB
 
-	case gc.ODIV<<16 | gc.TUINT8,
-		gc.OMOD<<16 | gc.TUINT8:
+	case ODIV_ | gc.TUINT8,
+		OMOD_ | gc.TUINT8:
 		a = x86.ADIVB
 
-	case gc.ODIV<<16 | gc.TINT16,
-		gc.OMOD<<16 | gc.TINT16:
+	case ODIV_ | gc.TINT16,
+		OMOD_ | gc.TINT16:
 		a = x86.AIDIVW
 
-	case gc.ODIV<<16 | gc.TUINT16,
-		gc.OMOD<<16 | gc.TUINT16:
+	case ODIV_ | gc.TUINT16,
+		OMOD_ | gc.TUINT16:
 		a = x86.ADIVW
 
-	case gc.ODIV<<16 | gc.TINT32,
-		gc.OMOD<<16 | gc.TINT32:
+	case ODIV_ | gc.TINT32,
+		OMOD_ | gc.TINT32:
 		a = x86.AIDIVL
 
-	case gc.ODIV<<16 | gc.TUINT32,
-		gc.ODIV<<16 | gc.TPTR32,
-		gc.OMOD<<16 | gc.TUINT32,
-		gc.OMOD<<16 | gc.TPTR32:
+	case ODIV_ | gc.TUINT32,
+		ODIV_ | gc.TPTR32,
+		OMOD_ | gc.TUINT32,
+		OMOD_ | gc.TPTR32:
 		a = x86.ADIVL
 
-	case gc.ODIV<<16 | gc.TINT64,
-		gc.OMOD<<16 | gc.TINT64:
+	case ODIV_ | gc.TINT64,
+		OMOD_ | gc.TINT64:
 		a = x86.AIDIVQ
 
-	case gc.ODIV<<16 | gc.TUINT64,
-		gc.ODIV<<16 | gc.TPTR64,
-		gc.OMOD<<16 | gc.TUINT64,
-		gc.OMOD<<16 | gc.TPTR64:
+	case ODIV_ | gc.TUINT64,
+		ODIV_ | gc.TPTR64,
+		OMOD_ | gc.TUINT64,
+		OMOD_ | gc.TPTR64:
 		a = x86.ADIVQ
 
-	case gc.OEXTEND<<16 | gc.TINT16:
+	case OEXTEND_ | gc.TINT16:
 		a = x86.ACWD
 
-	case gc.OEXTEND<<16 | gc.TINT32:
+	case OEXTEND_ | gc.TINT32:
 		a = x86.ACDQ
 
-	case gc.OEXTEND<<16 | gc.TINT64:
+	case OEXTEND_ | gc.TINT64:
 		a = x86.ACQO
 
-	case gc.ODIV<<16 | gc.TFLOAT32:
+	case ODIV_ | gc.TFLOAT32:
 		a = x86.ADIVSS
 
-	case gc.ODIV<<16 | gc.TFLOAT64:
+	case ODIV_ | gc.TFLOAT64:
 		a = x86.ADIVSD
 
-	case gc.OSQRT<<16 | gc.TFLOAT64:
+	case OSQRT_ | gc.TFLOAT64:
 		a = x86.ASQRTSD
 	}
 
diff --git a/src/cmd/compile/internal/amd64/peep.go b/src/cmd/compile/internal/amd64/peep.go
index 130f369..452f954 100644
--- a/src/cmd/compile/internal/amd64/peep.go
+++ b/src/cmd/compile/internal/amd64/peep.go
@@ -823,6 +823,10 @@
 		return 2
 	}
 
+	if (p.Info.Reguse|p.Info.Regset)&FtoB(int(v.Reg)) != 0 {
+		return 2
+	}
+
 	if p.Info.Flags&gc.LeftAddr != 0 {
 		if copyas(&p.From, v) {
 			return 2
diff --git a/src/cmd/compile/internal/amd64/prog.go b/src/cmd/compile/internal/amd64/prog.go
index 9502cf9..b4cc781 100644
--- a/src/cmd/compile/internal/amd64/prog.go
+++ b/src/cmd/compile/internal/amd64/prog.go
@@ -141,7 +141,7 @@
 	x86.AMOVSL:     {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
 	x86.AMOVSQ:     {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
 	x86.AMOVSW:     {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
-	obj.ADUFFCOPY:  {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI | CX},
+	obj.ADUFFCOPY:  {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI | X0},
 	x86.AMOVSD:     {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
 	x86.AMOVSS:     {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move},
 
@@ -228,7 +228,7 @@
 	x86.ASTOSL:    {Flags: gc.OK, Reguse: AX | DI, Regset: DI},
 	x86.ASTOSQ:    {Flags: gc.OK, Reguse: AX | DI, Regset: DI},
 	x86.ASTOSW:    {Flags: gc.OK, Reguse: AX | DI, Regset: DI},
-	obj.ADUFFZERO: {Flags: gc.OK, Reguse: AX | DI, Regset: DI},
+	obj.ADUFFZERO: {Flags: gc.OK, Reguse: X0 | DI, Regset: DI},
 	x86.ASUBB:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
 	x86.ASUBL:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
 	x86.ASUBQ:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry},
diff --git a/src/cmd/compile/internal/amd64/reg.go b/src/cmd/compile/internal/amd64/reg.go
index 8fab639..60822fb 100644
--- a/src/cmd/compile/internal/amd64/reg.go
+++ b/src/cmd/compile/internal/amd64/reg.go
@@ -107,6 +107,7 @@
 	DI  = 1 << (x86.REG_DI - x86.REG_AX)
 	SI  = 1 << (x86.REG_SI - x86.REG_AX)
 	R15 = 1 << (x86.REG_R15 - x86.REG_AX)
+	X0  = 1 << 16
 )
 
 func RtoB(r int) uint64 {
diff --git a/src/cmd/compile/internal/arm/cgen64.go b/src/cmd/compile/internal/arm/cgen64.go
index a9fe77b..d46d5a8 100644
--- a/src/cmd/compile/internal/arm/cgen64.go
+++ b/src/cmd/compile/internal/arm/cgen64.go
@@ -741,9 +741,9 @@
 		gins(arm.AMOVW, &lo1, &al)
 		gins(arm.AMOVW, &hi1, &ah)
 		gins(arm.AMOVW, &lo2, &n1)
-		gins(optoas(int(n.Op), lo1.Type), &n1, &al)
+		gins(optoas(n.Op, lo1.Type), &n1, &al)
 		gins(arm.AMOVW, &hi2, &n1)
-		gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
+		gins(optoas(n.Op, lo1.Type), &n1, &ah)
 		gc.Regfree(&n1)
 	}
 
@@ -767,7 +767,7 @@
  * generate comparison of nl, nr, both 64-bit.
  * nl is memory; nr is constant or memory.
  */
-func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
+func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) {
 	var lo1 gc.Node
 	var hi1 gc.Node
 	var lo2 gc.Node
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
index 193d4af..517b4f4 100644
--- a/src/cmd/compile/internal/arm/ggen.go
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -173,7 +173,7 @@
  *	res = nl << nr
  *	res = nl >> nr
  */
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	if nl.Type.Width > 4 {
 		gc.Fatalf("cgen_shift %v", nl.Type)
 	}
@@ -477,7 +477,7 @@
 	gc.Regfree(&n2)
 }
 
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
 	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL {
 		op = gc.Brrev(op)
 		n1, n2 = n2, n1
diff --git a/src/cmd/compile/internal/arm/gsubr.go b/src/cmd/compile/internal/arm/gsubr.go
index acc6765..108d78a 100644
--- a/src/cmd/compile/internal/arm/gsubr.go
+++ b/src/cmd/compile/internal/arm/gsubr.go
@@ -757,11 +757,36 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
 
+	// avoid constant conversions in switches below
+	const (
+		OMINUS_ = uint32(gc.OMINUS) << 16
+		OLSH_   = uint32(gc.OLSH) << 16
+		ORSH_   = uint32(gc.ORSH) << 16
+		OADD_   = uint32(gc.OADD) << 16
+		OSUB_   = uint32(gc.OSUB) << 16
+		OMUL_   = uint32(gc.OMUL) << 16
+		ODIV_   = uint32(gc.ODIV) << 16
+		OMOD_   = uint32(gc.OMOD) << 16
+		OOR_    = uint32(gc.OOR) << 16
+		OAND_   = uint32(gc.OAND) << 16
+		OXOR_   = uint32(gc.OXOR) << 16
+		OEQ_    = uint32(gc.OEQ) << 16
+		ONE_    = uint32(gc.ONE) << 16
+		OLT_    = uint32(gc.OLT) << 16
+		OLE_    = uint32(gc.OLE) << 16
+		OGE_    = uint32(gc.OGE) << 16
+		OGT_    = uint32(gc.OGT) << 16
+		OCMP_   = uint32(gc.OCMP) << 16
+		OPS_    = uint32(gc.OPS) << 16
+		OAS_    = uint32(gc.OAS) << 16
+		OSQRT_  = uint32(gc.OSQRT) << 16
+	)
+
 	a := obj.AXXX
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
@@ -776,261 +801,261 @@
 				break;
 		*/
 	// TODO(kaib): make sure the conditional branches work on all edge cases
-	case gc.OEQ<<16 | gc.TBOOL,
-		gc.OEQ<<16 | gc.TINT8,
-		gc.OEQ<<16 | gc.TUINT8,
-		gc.OEQ<<16 | gc.TINT16,
-		gc.OEQ<<16 | gc.TUINT16,
-		gc.OEQ<<16 | gc.TINT32,
-		gc.OEQ<<16 | gc.TUINT32,
-		gc.OEQ<<16 | gc.TINT64,
-		gc.OEQ<<16 | gc.TUINT64,
-		gc.OEQ<<16 | gc.TPTR32,
-		gc.OEQ<<16 | gc.TPTR64,
-		gc.OEQ<<16 | gc.TFLOAT32,
-		gc.OEQ<<16 | gc.TFLOAT64:
+	case OEQ_ | gc.TBOOL,
+		OEQ_ | gc.TINT8,
+		OEQ_ | gc.TUINT8,
+		OEQ_ | gc.TINT16,
+		OEQ_ | gc.TUINT16,
+		OEQ_ | gc.TINT32,
+		OEQ_ | gc.TUINT32,
+		OEQ_ | gc.TINT64,
+		OEQ_ | gc.TUINT64,
+		OEQ_ | gc.TPTR32,
+		OEQ_ | gc.TPTR64,
+		OEQ_ | gc.TFLOAT32,
+		OEQ_ | gc.TFLOAT64:
 		a = arm.ABEQ
 
-	case gc.ONE<<16 | gc.TBOOL,
-		gc.ONE<<16 | gc.TINT8,
-		gc.ONE<<16 | gc.TUINT8,
-		gc.ONE<<16 | gc.TINT16,
-		gc.ONE<<16 | gc.TUINT16,
-		gc.ONE<<16 | gc.TINT32,
-		gc.ONE<<16 | gc.TUINT32,
-		gc.ONE<<16 | gc.TINT64,
-		gc.ONE<<16 | gc.TUINT64,
-		gc.ONE<<16 | gc.TPTR32,
-		gc.ONE<<16 | gc.TPTR64,
-		gc.ONE<<16 | gc.TFLOAT32,
-		gc.ONE<<16 | gc.TFLOAT64:
+	case ONE_ | gc.TBOOL,
+		ONE_ | gc.TINT8,
+		ONE_ | gc.TUINT8,
+		ONE_ | gc.TINT16,
+		ONE_ | gc.TUINT16,
+		ONE_ | gc.TINT32,
+		ONE_ | gc.TUINT32,
+		ONE_ | gc.TINT64,
+		ONE_ | gc.TUINT64,
+		ONE_ | gc.TPTR32,
+		ONE_ | gc.TPTR64,
+		ONE_ | gc.TFLOAT32,
+		ONE_ | gc.TFLOAT64:
 		a = arm.ABNE
 
-	case gc.OLT<<16 | gc.TINT8,
-		gc.OLT<<16 | gc.TINT16,
-		gc.OLT<<16 | gc.TINT32,
-		gc.OLT<<16 | gc.TINT64,
-		gc.OLT<<16 | gc.TFLOAT32,
-		gc.OLT<<16 | gc.TFLOAT64:
+	case OLT_ | gc.TINT8,
+		OLT_ | gc.TINT16,
+		OLT_ | gc.TINT32,
+		OLT_ | gc.TINT64,
+		OLT_ | gc.TFLOAT32,
+		OLT_ | gc.TFLOAT64:
 		a = arm.ABLT
 
-	case gc.OLT<<16 | gc.TUINT8,
-		gc.OLT<<16 | gc.TUINT16,
-		gc.OLT<<16 | gc.TUINT32,
-		gc.OLT<<16 | gc.TUINT64:
+	case OLT_ | gc.TUINT8,
+		OLT_ | gc.TUINT16,
+		OLT_ | gc.TUINT32,
+		OLT_ | gc.TUINT64:
 		a = arm.ABLO
 
-	case gc.OLE<<16 | gc.TINT8,
-		gc.OLE<<16 | gc.TINT16,
-		gc.OLE<<16 | gc.TINT32,
-		gc.OLE<<16 | gc.TINT64,
-		gc.OLE<<16 | gc.TFLOAT32,
-		gc.OLE<<16 | gc.TFLOAT64:
+	case OLE_ | gc.TINT8,
+		OLE_ | gc.TINT16,
+		OLE_ | gc.TINT32,
+		OLE_ | gc.TINT64,
+		OLE_ | gc.TFLOAT32,
+		OLE_ | gc.TFLOAT64:
 		a = arm.ABLE
 
-	case gc.OLE<<16 | gc.TUINT8,
-		gc.OLE<<16 | gc.TUINT16,
-		gc.OLE<<16 | gc.TUINT32,
-		gc.OLE<<16 | gc.TUINT64:
+	case OLE_ | gc.TUINT8,
+		OLE_ | gc.TUINT16,
+		OLE_ | gc.TUINT32,
+		OLE_ | gc.TUINT64:
 		a = arm.ABLS
 
-	case gc.OGT<<16 | gc.TINT8,
-		gc.OGT<<16 | gc.TINT16,
-		gc.OGT<<16 | gc.TINT32,
-		gc.OGT<<16 | gc.TINT64,
-		gc.OGT<<16 | gc.TFLOAT32,
-		gc.OGT<<16 | gc.TFLOAT64:
+	case OGT_ | gc.TINT8,
+		OGT_ | gc.TINT16,
+		OGT_ | gc.TINT32,
+		OGT_ | gc.TINT64,
+		OGT_ | gc.TFLOAT32,
+		OGT_ | gc.TFLOAT64:
 		a = arm.ABGT
 
-	case gc.OGT<<16 | gc.TUINT8,
-		gc.OGT<<16 | gc.TUINT16,
-		gc.OGT<<16 | gc.TUINT32,
-		gc.OGT<<16 | gc.TUINT64:
+	case OGT_ | gc.TUINT8,
+		OGT_ | gc.TUINT16,
+		OGT_ | gc.TUINT32,
+		OGT_ | gc.TUINT64:
 		a = arm.ABHI
 
-	case gc.OGE<<16 | gc.TINT8,
-		gc.OGE<<16 | gc.TINT16,
-		gc.OGE<<16 | gc.TINT32,
-		gc.OGE<<16 | gc.TINT64,
-		gc.OGE<<16 | gc.TFLOAT32,
-		gc.OGE<<16 | gc.TFLOAT64:
+	case OGE_ | gc.TINT8,
+		OGE_ | gc.TINT16,
+		OGE_ | gc.TINT32,
+		OGE_ | gc.TINT64,
+		OGE_ | gc.TFLOAT32,
+		OGE_ | gc.TFLOAT64:
 		a = arm.ABGE
 
-	case gc.OGE<<16 | gc.TUINT8,
-		gc.OGE<<16 | gc.TUINT16,
-		gc.OGE<<16 | gc.TUINT32,
-		gc.OGE<<16 | gc.TUINT64:
+	case OGE_ | gc.TUINT8,
+		OGE_ | gc.TUINT16,
+		OGE_ | gc.TUINT32,
+		OGE_ | gc.TUINT64:
 		a = arm.ABHS
 
-	case gc.OCMP<<16 | gc.TBOOL,
-		gc.OCMP<<16 | gc.TINT8,
-		gc.OCMP<<16 | gc.TUINT8,
-		gc.OCMP<<16 | gc.TINT16,
-		gc.OCMP<<16 | gc.TUINT16,
-		gc.OCMP<<16 | gc.TINT32,
-		gc.OCMP<<16 | gc.TUINT32,
-		gc.OCMP<<16 | gc.TPTR32:
+	case OCMP_ | gc.TBOOL,
+		OCMP_ | gc.TINT8,
+		OCMP_ | gc.TUINT8,
+		OCMP_ | gc.TINT16,
+		OCMP_ | gc.TUINT16,
+		OCMP_ | gc.TINT32,
+		OCMP_ | gc.TUINT32,
+		OCMP_ | gc.TPTR32:
 		a = arm.ACMP
 
-	case gc.OCMP<<16 | gc.TFLOAT32:
+	case OCMP_ | gc.TFLOAT32:
 		a = arm.ACMPF
 
-	case gc.OCMP<<16 | gc.TFLOAT64:
+	case OCMP_ | gc.TFLOAT64:
 		a = arm.ACMPD
 
-	case gc.OPS<<16 | gc.TFLOAT32,
-		gc.OPS<<16 | gc.TFLOAT64:
+	case OPS_ | gc.TFLOAT32,
+		OPS_ | gc.TFLOAT64:
 		a = arm.ABVS
 
-	case gc.OAS<<16 | gc.TBOOL:
+	case OAS_ | gc.TBOOL:
 		a = arm.AMOVB
 
-	case gc.OAS<<16 | gc.TINT8:
+	case OAS_ | gc.TINT8:
 		a = arm.AMOVBS
 
-	case gc.OAS<<16 | gc.TUINT8:
+	case OAS_ | gc.TUINT8:
 		a = arm.AMOVBU
 
-	case gc.OAS<<16 | gc.TINT16:
+	case OAS_ | gc.TINT16:
 		a = arm.AMOVHS
 
-	case gc.OAS<<16 | gc.TUINT16:
+	case OAS_ | gc.TUINT16:
 		a = arm.AMOVHU
 
-	case gc.OAS<<16 | gc.TINT32,
-		gc.OAS<<16 | gc.TUINT32,
-		gc.OAS<<16 | gc.TPTR32:
+	case OAS_ | gc.TINT32,
+		OAS_ | gc.TUINT32,
+		OAS_ | gc.TPTR32:
 		a = arm.AMOVW
 
-	case gc.OAS<<16 | gc.TFLOAT32:
+	case OAS_ | gc.TFLOAT32:
 		a = arm.AMOVF
 
-	case gc.OAS<<16 | gc.TFLOAT64:
+	case OAS_ | gc.TFLOAT64:
 		a = arm.AMOVD
 
-	case gc.OADD<<16 | gc.TINT8,
-		gc.OADD<<16 | gc.TUINT8,
-		gc.OADD<<16 | gc.TINT16,
-		gc.OADD<<16 | gc.TUINT16,
-		gc.OADD<<16 | gc.TINT32,
-		gc.OADD<<16 | gc.TUINT32,
-		gc.OADD<<16 | gc.TPTR32:
+	case OADD_ | gc.TINT8,
+		OADD_ | gc.TUINT8,
+		OADD_ | gc.TINT16,
+		OADD_ | gc.TUINT16,
+		OADD_ | gc.TINT32,
+		OADD_ | gc.TUINT32,
+		OADD_ | gc.TPTR32:
 		a = arm.AADD
 
-	case gc.OADD<<16 | gc.TFLOAT32:
+	case OADD_ | gc.TFLOAT32:
 		a = arm.AADDF
 
-	case gc.OADD<<16 | gc.TFLOAT64:
+	case OADD_ | gc.TFLOAT64:
 		a = arm.AADDD
 
-	case gc.OSUB<<16 | gc.TINT8,
-		gc.OSUB<<16 | gc.TUINT8,
-		gc.OSUB<<16 | gc.TINT16,
-		gc.OSUB<<16 | gc.TUINT16,
-		gc.OSUB<<16 | gc.TINT32,
-		gc.OSUB<<16 | gc.TUINT32,
-		gc.OSUB<<16 | gc.TPTR32:
+	case OSUB_ | gc.TINT8,
+		OSUB_ | gc.TUINT8,
+		OSUB_ | gc.TINT16,
+		OSUB_ | gc.TUINT16,
+		OSUB_ | gc.TINT32,
+		OSUB_ | gc.TUINT32,
+		OSUB_ | gc.TPTR32:
 		a = arm.ASUB
 
-	case gc.OSUB<<16 | gc.TFLOAT32:
+	case OSUB_ | gc.TFLOAT32:
 		a = arm.ASUBF
 
-	case gc.OSUB<<16 | gc.TFLOAT64:
+	case OSUB_ | gc.TFLOAT64:
 		a = arm.ASUBD
 
-	case gc.OMINUS<<16 | gc.TINT8,
-		gc.OMINUS<<16 | gc.TUINT8,
-		gc.OMINUS<<16 | gc.TINT16,
-		gc.OMINUS<<16 | gc.TUINT16,
-		gc.OMINUS<<16 | gc.TINT32,
-		gc.OMINUS<<16 | gc.TUINT32,
-		gc.OMINUS<<16 | gc.TPTR32:
+	case OMINUS_ | gc.TINT8,
+		OMINUS_ | gc.TUINT8,
+		OMINUS_ | gc.TINT16,
+		OMINUS_ | gc.TUINT16,
+		OMINUS_ | gc.TINT32,
+		OMINUS_ | gc.TUINT32,
+		OMINUS_ | gc.TPTR32:
 		a = arm.ARSB
 
-	case gc.OAND<<16 | gc.TINT8,
-		gc.OAND<<16 | gc.TUINT8,
-		gc.OAND<<16 | gc.TINT16,
-		gc.OAND<<16 | gc.TUINT16,
-		gc.OAND<<16 | gc.TINT32,
-		gc.OAND<<16 | gc.TUINT32,
-		gc.OAND<<16 | gc.TPTR32:
+	case OAND_ | gc.TINT8,
+		OAND_ | gc.TUINT8,
+		OAND_ | gc.TINT16,
+		OAND_ | gc.TUINT16,
+		OAND_ | gc.TINT32,
+		OAND_ | gc.TUINT32,
+		OAND_ | gc.TPTR32:
 		a = arm.AAND
 
-	case gc.OOR<<16 | gc.TINT8,
-		gc.OOR<<16 | gc.TUINT8,
-		gc.OOR<<16 | gc.TINT16,
-		gc.OOR<<16 | gc.TUINT16,
-		gc.OOR<<16 | gc.TINT32,
-		gc.OOR<<16 | gc.TUINT32,
-		gc.OOR<<16 | gc.TPTR32:
+	case OOR_ | gc.TINT8,
+		OOR_ | gc.TUINT8,
+		OOR_ | gc.TINT16,
+		OOR_ | gc.TUINT16,
+		OOR_ | gc.TINT32,
+		OOR_ | gc.TUINT32,
+		OOR_ | gc.TPTR32:
 		a = arm.AORR
 
-	case gc.OXOR<<16 | gc.TINT8,
-		gc.OXOR<<16 | gc.TUINT8,
-		gc.OXOR<<16 | gc.TINT16,
-		gc.OXOR<<16 | gc.TUINT16,
-		gc.OXOR<<16 | gc.TINT32,
-		gc.OXOR<<16 | gc.TUINT32,
-		gc.OXOR<<16 | gc.TPTR32:
+	case OXOR_ | gc.TINT8,
+		OXOR_ | gc.TUINT8,
+		OXOR_ | gc.TINT16,
+		OXOR_ | gc.TUINT16,
+		OXOR_ | gc.TINT32,
+		OXOR_ | gc.TUINT32,
+		OXOR_ | gc.TPTR32:
 		a = arm.AEOR
 
-	case gc.OLSH<<16 | gc.TINT8,
-		gc.OLSH<<16 | gc.TUINT8,
-		gc.OLSH<<16 | gc.TINT16,
-		gc.OLSH<<16 | gc.TUINT16,
-		gc.OLSH<<16 | gc.TINT32,
-		gc.OLSH<<16 | gc.TUINT32,
-		gc.OLSH<<16 | gc.TPTR32:
+	case OLSH_ | gc.TINT8,
+		OLSH_ | gc.TUINT8,
+		OLSH_ | gc.TINT16,
+		OLSH_ | gc.TUINT16,
+		OLSH_ | gc.TINT32,
+		OLSH_ | gc.TUINT32,
+		OLSH_ | gc.TPTR32:
 		a = arm.ASLL
 
-	case gc.ORSH<<16 | gc.TUINT8,
-		gc.ORSH<<16 | gc.TUINT16,
-		gc.ORSH<<16 | gc.TUINT32,
-		gc.ORSH<<16 | gc.TPTR32:
+	case ORSH_ | gc.TUINT8,
+		ORSH_ | gc.TUINT16,
+		ORSH_ | gc.TUINT32,
+		ORSH_ | gc.TPTR32:
 		a = arm.ASRL
 
-	case gc.ORSH<<16 | gc.TINT8,
-		gc.ORSH<<16 | gc.TINT16,
-		gc.ORSH<<16 | gc.TINT32:
+	case ORSH_ | gc.TINT8,
+		ORSH_ | gc.TINT16,
+		ORSH_ | gc.TINT32:
 		a = arm.ASRA
 
-	case gc.OMUL<<16 | gc.TUINT8,
-		gc.OMUL<<16 | gc.TUINT16,
-		gc.OMUL<<16 | gc.TUINT32,
-		gc.OMUL<<16 | gc.TPTR32:
+	case OMUL_ | gc.TUINT8,
+		OMUL_ | gc.TUINT16,
+		OMUL_ | gc.TUINT32,
+		OMUL_ | gc.TPTR32:
 		a = arm.AMULU
 
-	case gc.OMUL<<16 | gc.TINT8,
-		gc.OMUL<<16 | gc.TINT16,
-		gc.OMUL<<16 | gc.TINT32:
+	case OMUL_ | gc.TINT8,
+		OMUL_ | gc.TINT16,
+		OMUL_ | gc.TINT32:
 		a = arm.AMUL
 
-	case gc.OMUL<<16 | gc.TFLOAT32:
+	case OMUL_ | gc.TFLOAT32:
 		a = arm.AMULF
 
-	case gc.OMUL<<16 | gc.TFLOAT64:
+	case OMUL_ | gc.TFLOAT64:
 		a = arm.AMULD
 
-	case gc.ODIV<<16 | gc.TUINT8,
-		gc.ODIV<<16 | gc.TUINT16,
-		gc.ODIV<<16 | gc.TUINT32,
-		gc.ODIV<<16 | gc.TPTR32:
+	case ODIV_ | gc.TUINT8,
+		ODIV_ | gc.TUINT16,
+		ODIV_ | gc.TUINT32,
+		ODIV_ | gc.TPTR32:
 		a = arm.ADIVU
 
-	case gc.ODIV<<16 | gc.TINT8,
-		gc.ODIV<<16 | gc.TINT16,
-		gc.ODIV<<16 | gc.TINT32:
+	case ODIV_ | gc.TINT8,
+		ODIV_ | gc.TINT16,
+		ODIV_ | gc.TINT32:
 		a = arm.ADIV
 
-	case gc.OMOD<<16 | gc.TUINT8,
-		gc.OMOD<<16 | gc.TUINT16,
-		gc.OMOD<<16 | gc.TUINT32,
-		gc.OMOD<<16 | gc.TPTR32:
+	case OMOD_ | gc.TUINT8,
+		OMOD_ | gc.TUINT16,
+		OMOD_ | gc.TUINT32,
+		OMOD_ | gc.TPTR32:
 		a = arm.AMODU
 
-	case gc.OMOD<<16 | gc.TINT8,
-		gc.OMOD<<16 | gc.TINT16,
-		gc.OMOD<<16 | gc.TINT32:
+	case OMOD_ | gc.TINT8,
+		OMOD_ | gc.TINT16,
+		OMOD_ | gc.TINT32:
 		a = arm.AMOD
 
 		//	case CASE(OEXTEND, TINT16):
@@ -1045,13 +1070,13 @@
 	//		a = ACQO;
 	//		break;
 
-	case gc.ODIV<<16 | gc.TFLOAT32:
+	case ODIV_ | gc.TFLOAT32:
 		a = arm.ADIVF
 
-	case gc.ODIV<<16 | gc.TFLOAT64:
+	case ODIV_ | gc.TFLOAT64:
 		a = arm.ADIVD
 
-	case gc.OSQRT<<16 | gc.TFLOAT64:
+	case OSQRT_ | gc.TFLOAT64:
 		a = arm.ASQRTD
 	}
 
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
index 2cbd663..c495bbc 100644
--- a/src/cmd/compile/internal/arm64/ggen.go
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -140,7 +140,7 @@
  *	res = nl % nr
  * according to op.
  */
-func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	// Have to be careful about handling
 	// most negative int divided by -1 correctly.
 	// The hardware will generate undefined result.
@@ -310,7 +310,7 @@
  *	res = nl << nr
  *	res = nl >> nr
  */
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	a := int(optoas(op, nl.Type))
 
 	if nr.Op == gc.OLITERAL {
diff --git a/src/cmd/compile/internal/arm64/gsubr.go b/src/cmd/compile/internal/arm64/gsubr.go
index 50ff29bf..c0aa45e 100644
--- a/src/cmd/compile/internal/arm64/gsubr.go
+++ b/src/cmd/compile/internal/arm64/gsubr.go
@@ -102,7 +102,7 @@
 	gc.Regfree(&ntmp)
 }
 
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
 	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
 		// Reverse comparison to place constant last.
 		op = gc.Brrev(op)
@@ -590,240 +590,264 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
 
+	// avoid constant conversions in switches below
+	const (
+		OMINUS_ = uint32(gc.OMINUS) << 16
+		OLSH_   = uint32(gc.OLSH) << 16
+		ORSH_   = uint32(gc.ORSH) << 16
+		OADD_   = uint32(gc.OADD) << 16
+		OSUB_   = uint32(gc.OSUB) << 16
+		OMUL_   = uint32(gc.OMUL) << 16
+		ODIV_   = uint32(gc.ODIV) << 16
+		OOR_    = uint32(gc.OOR) << 16
+		OAND_   = uint32(gc.OAND) << 16
+		OXOR_   = uint32(gc.OXOR) << 16
+		OEQ_    = uint32(gc.OEQ) << 16
+		ONE_    = uint32(gc.ONE) << 16
+		OLT_    = uint32(gc.OLT) << 16
+		OLE_    = uint32(gc.OLE) << 16
+		OGE_    = uint32(gc.OGE) << 16
+		OGT_    = uint32(gc.OGT) << 16
+		OCMP_   = uint32(gc.OCMP) << 16
+		OAS_    = uint32(gc.OAS) << 16
+		OHMUL_  = uint32(gc.OHMUL) << 16
+		OSQRT_  = uint32(gc.OSQRT) << 16
+	)
+
 	a := int(obj.AXXX)
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
 
-	case gc.OEQ<<16 | gc.TBOOL,
-		gc.OEQ<<16 | gc.TINT8,
-		gc.OEQ<<16 | gc.TUINT8,
-		gc.OEQ<<16 | gc.TINT16,
-		gc.OEQ<<16 | gc.TUINT16,
-		gc.OEQ<<16 | gc.TINT32,
-		gc.OEQ<<16 | gc.TUINT32,
-		gc.OEQ<<16 | gc.TINT64,
-		gc.OEQ<<16 | gc.TUINT64,
-		gc.OEQ<<16 | gc.TPTR32,
-		gc.OEQ<<16 | gc.TPTR64,
-		gc.OEQ<<16 | gc.TFLOAT32,
-		gc.OEQ<<16 | gc.TFLOAT64:
+	case OEQ_ | gc.TBOOL,
+		OEQ_ | gc.TINT8,
+		OEQ_ | gc.TUINT8,
+		OEQ_ | gc.TINT16,
+		OEQ_ | gc.TUINT16,
+		OEQ_ | gc.TINT32,
+		OEQ_ | gc.TUINT32,
+		OEQ_ | gc.TINT64,
+		OEQ_ | gc.TUINT64,
+		OEQ_ | gc.TPTR32,
+		OEQ_ | gc.TPTR64,
+		OEQ_ | gc.TFLOAT32,
+		OEQ_ | gc.TFLOAT64:
 		a = arm64.ABEQ
 
-	case gc.ONE<<16 | gc.TBOOL,
-		gc.ONE<<16 | gc.TINT8,
-		gc.ONE<<16 | gc.TUINT8,
-		gc.ONE<<16 | gc.TINT16,
-		gc.ONE<<16 | gc.TUINT16,
-		gc.ONE<<16 | gc.TINT32,
-		gc.ONE<<16 | gc.TUINT32,
-		gc.ONE<<16 | gc.TINT64,
-		gc.ONE<<16 | gc.TUINT64,
-		gc.ONE<<16 | gc.TPTR32,
-		gc.ONE<<16 | gc.TPTR64,
-		gc.ONE<<16 | gc.TFLOAT32,
-		gc.ONE<<16 | gc.TFLOAT64:
+	case ONE_ | gc.TBOOL,
+		ONE_ | gc.TINT8,
+		ONE_ | gc.TUINT8,
+		ONE_ | gc.TINT16,
+		ONE_ | gc.TUINT16,
+		ONE_ | gc.TINT32,
+		ONE_ | gc.TUINT32,
+		ONE_ | gc.TINT64,
+		ONE_ | gc.TUINT64,
+		ONE_ | gc.TPTR32,
+		ONE_ | gc.TPTR64,
+		ONE_ | gc.TFLOAT32,
+		ONE_ | gc.TFLOAT64:
 		a = arm64.ABNE
 
-	case gc.OLT<<16 | gc.TINT8,
-		gc.OLT<<16 | gc.TINT16,
-		gc.OLT<<16 | gc.TINT32,
-		gc.OLT<<16 | gc.TINT64:
+	case OLT_ | gc.TINT8,
+		OLT_ | gc.TINT16,
+		OLT_ | gc.TINT32,
+		OLT_ | gc.TINT64:
 		a = arm64.ABLT
 
-	case gc.OLT<<16 | gc.TUINT8,
-		gc.OLT<<16 | gc.TUINT16,
-		gc.OLT<<16 | gc.TUINT32,
-		gc.OLT<<16 | gc.TUINT64,
-		gc.OLT<<16 | gc.TFLOAT32,
-		gc.OLT<<16 | gc.TFLOAT64:
+	case OLT_ | gc.TUINT8,
+		OLT_ | gc.TUINT16,
+		OLT_ | gc.TUINT32,
+		OLT_ | gc.TUINT64,
+		OLT_ | gc.TFLOAT32,
+		OLT_ | gc.TFLOAT64:
 		a = arm64.ABLO
 
-	case gc.OLE<<16 | gc.TINT8,
-		gc.OLE<<16 | gc.TINT16,
-		gc.OLE<<16 | gc.TINT32,
-		gc.OLE<<16 | gc.TINT64:
+	case OLE_ | gc.TINT8,
+		OLE_ | gc.TINT16,
+		OLE_ | gc.TINT32,
+		OLE_ | gc.TINT64:
 		a = arm64.ABLE
 
-	case gc.OLE<<16 | gc.TUINT8,
-		gc.OLE<<16 | gc.TUINT16,
-		gc.OLE<<16 | gc.TUINT32,
-		gc.OLE<<16 | gc.TUINT64,
-		gc.OLE<<16 | gc.TFLOAT32,
-		gc.OLE<<16 | gc.TFLOAT64:
+	case OLE_ | gc.TUINT8,
+		OLE_ | gc.TUINT16,
+		OLE_ | gc.TUINT32,
+		OLE_ | gc.TUINT64,
+		OLE_ | gc.TFLOAT32,
+		OLE_ | gc.TFLOAT64:
 		a = arm64.ABLS
 
-	case gc.OGT<<16 | gc.TINT8,
-		gc.OGT<<16 | gc.TINT16,
-		gc.OGT<<16 | gc.TINT32,
-		gc.OGT<<16 | gc.TINT64,
-		gc.OGT<<16 | gc.TFLOAT32,
-		gc.OGT<<16 | gc.TFLOAT64:
+	case OGT_ | gc.TINT8,
+		OGT_ | gc.TINT16,
+		OGT_ | gc.TINT32,
+		OGT_ | gc.TINT64,
+		OGT_ | gc.TFLOAT32,
+		OGT_ | gc.TFLOAT64:
 		a = arm64.ABGT
 
-	case gc.OGT<<16 | gc.TUINT8,
-		gc.OGT<<16 | gc.TUINT16,
-		gc.OGT<<16 | gc.TUINT32,
-		gc.OGT<<16 | gc.TUINT64:
+	case OGT_ | gc.TUINT8,
+		OGT_ | gc.TUINT16,
+		OGT_ | gc.TUINT32,
+		OGT_ | gc.TUINT64:
 		a = arm64.ABHI
 
-	case gc.OGE<<16 | gc.TINT8,
-		gc.OGE<<16 | gc.TINT16,
-		gc.OGE<<16 | gc.TINT32,
-		gc.OGE<<16 | gc.TINT64,
-		gc.OGE<<16 | gc.TFLOAT32,
-		gc.OGE<<16 | gc.TFLOAT64:
+	case OGE_ | gc.TINT8,
+		OGE_ | gc.TINT16,
+		OGE_ | gc.TINT32,
+		OGE_ | gc.TINT64,
+		OGE_ | gc.TFLOAT32,
+		OGE_ | gc.TFLOAT64:
 		a = arm64.ABGE
 
-	case gc.OGE<<16 | gc.TUINT8,
-		gc.OGE<<16 | gc.TUINT16,
-		gc.OGE<<16 | gc.TUINT32,
-		gc.OGE<<16 | gc.TUINT64:
+	case OGE_ | gc.TUINT8,
+		OGE_ | gc.TUINT16,
+		OGE_ | gc.TUINT32,
+		OGE_ | gc.TUINT64:
 		a = arm64.ABHS
 
-	case gc.OCMP<<16 | gc.TBOOL,
-		gc.OCMP<<16 | gc.TINT8,
-		gc.OCMP<<16 | gc.TINT16,
-		gc.OCMP<<16 | gc.TINT32,
-		gc.OCMP<<16 | gc.TPTR32,
-		gc.OCMP<<16 | gc.TINT64,
-		gc.OCMP<<16 | gc.TUINT8,
-		gc.OCMP<<16 | gc.TUINT16,
-		gc.OCMP<<16 | gc.TUINT32,
-		gc.OCMP<<16 | gc.TUINT64,
-		gc.OCMP<<16 | gc.TPTR64:
+	case OCMP_ | gc.TBOOL,
+		OCMP_ | gc.TINT8,
+		OCMP_ | gc.TINT16,
+		OCMP_ | gc.TINT32,
+		OCMP_ | gc.TPTR32,
+		OCMP_ | gc.TINT64,
+		OCMP_ | gc.TUINT8,
+		OCMP_ | gc.TUINT16,
+		OCMP_ | gc.TUINT32,
+		OCMP_ | gc.TUINT64,
+		OCMP_ | gc.TPTR64:
 		a = arm64.ACMP
 
-	case gc.OCMP<<16 | gc.TFLOAT32:
+	case OCMP_ | gc.TFLOAT32:
 		a = arm64.AFCMPS
 
-	case gc.OCMP<<16 | gc.TFLOAT64:
+	case OCMP_ | gc.TFLOAT64:
 		a = arm64.AFCMPD
 
-	case gc.OAS<<16 | gc.TBOOL,
-		gc.OAS<<16 | gc.TINT8:
+	case OAS_ | gc.TBOOL,
+		OAS_ | gc.TINT8:
 		a = arm64.AMOVB
 
-	case gc.OAS<<16 | gc.TUINT8:
+	case OAS_ | gc.TUINT8:
 		a = arm64.AMOVBU
 
-	case gc.OAS<<16 | gc.TINT16:
+	case OAS_ | gc.TINT16:
 		a = arm64.AMOVH
 
-	case gc.OAS<<16 | gc.TUINT16:
+	case OAS_ | gc.TUINT16:
 		a = arm64.AMOVHU
 
-	case gc.OAS<<16 | gc.TINT32:
+	case OAS_ | gc.TINT32:
 		a = arm64.AMOVW
 
-	case gc.OAS<<16 | gc.TUINT32,
-		gc.OAS<<16 | gc.TPTR32:
+	case OAS_ | gc.TUINT32,
+		OAS_ | gc.TPTR32:
 		a = arm64.AMOVWU
 
-	case gc.OAS<<16 | gc.TINT64,
-		gc.OAS<<16 | gc.TUINT64,
-		gc.OAS<<16 | gc.TPTR64:
+	case OAS_ | gc.TINT64,
+		OAS_ | gc.TUINT64,
+		OAS_ | gc.TPTR64:
 		a = arm64.AMOVD
 
-	case gc.OAS<<16 | gc.TFLOAT32:
+	case OAS_ | gc.TFLOAT32:
 		a = arm64.AFMOVS
 
-	case gc.OAS<<16 | gc.TFLOAT64:
+	case OAS_ | gc.TFLOAT64:
 		a = arm64.AFMOVD
 
-	case gc.OADD<<16 | gc.TINT8,
-		gc.OADD<<16 | gc.TUINT8,
-		gc.OADD<<16 | gc.TINT16,
-		gc.OADD<<16 | gc.TUINT16,
-		gc.OADD<<16 | gc.TINT32,
-		gc.OADD<<16 | gc.TUINT32,
-		gc.OADD<<16 | gc.TPTR32,
-		gc.OADD<<16 | gc.TINT64,
-		gc.OADD<<16 | gc.TUINT64,
-		gc.OADD<<16 | gc.TPTR64:
+	case OADD_ | gc.TINT8,
+		OADD_ | gc.TUINT8,
+		OADD_ | gc.TINT16,
+		OADD_ | gc.TUINT16,
+		OADD_ | gc.TINT32,
+		OADD_ | gc.TUINT32,
+		OADD_ | gc.TPTR32,
+		OADD_ | gc.TINT64,
+		OADD_ | gc.TUINT64,
+		OADD_ | gc.TPTR64:
 		a = arm64.AADD
 
-	case gc.OADD<<16 | gc.TFLOAT32:
+	case OADD_ | gc.TFLOAT32:
 		a = arm64.AFADDS
 
-	case gc.OADD<<16 | gc.TFLOAT64:
+	case OADD_ | gc.TFLOAT64:
 		a = arm64.AFADDD
 
-	case gc.OSUB<<16 | gc.TINT8,
-		gc.OSUB<<16 | gc.TUINT8,
-		gc.OSUB<<16 | gc.TINT16,
-		gc.OSUB<<16 | gc.TUINT16,
-		gc.OSUB<<16 | gc.TINT32,
-		gc.OSUB<<16 | gc.TUINT32,
-		gc.OSUB<<16 | gc.TPTR32,
-		gc.OSUB<<16 | gc.TINT64,
-		gc.OSUB<<16 | gc.TUINT64,
-		gc.OSUB<<16 | gc.TPTR64:
+	case OSUB_ | gc.TINT8,
+		OSUB_ | gc.TUINT8,
+		OSUB_ | gc.TINT16,
+		OSUB_ | gc.TUINT16,
+		OSUB_ | gc.TINT32,
+		OSUB_ | gc.TUINT32,
+		OSUB_ | gc.TPTR32,
+		OSUB_ | gc.TINT64,
+		OSUB_ | gc.TUINT64,
+		OSUB_ | gc.TPTR64:
 		a = arm64.ASUB
 
-	case gc.OSUB<<16 | gc.TFLOAT32:
+	case OSUB_ | gc.TFLOAT32:
 		a = arm64.AFSUBS
 
-	case gc.OSUB<<16 | gc.TFLOAT64:
+	case OSUB_ | gc.TFLOAT64:
 		a = arm64.AFSUBD
 
-	case gc.OMINUS<<16 | gc.TINT8,
-		gc.OMINUS<<16 | gc.TUINT8,
-		gc.OMINUS<<16 | gc.TINT16,
-		gc.OMINUS<<16 | gc.TUINT16,
-		gc.OMINUS<<16 | gc.TINT32,
-		gc.OMINUS<<16 | gc.TUINT32,
-		gc.OMINUS<<16 | gc.TPTR32,
-		gc.OMINUS<<16 | gc.TINT64,
-		gc.OMINUS<<16 | gc.TUINT64,
-		gc.OMINUS<<16 | gc.TPTR64:
+	case OMINUS_ | gc.TINT8,
+		OMINUS_ | gc.TUINT8,
+		OMINUS_ | gc.TINT16,
+		OMINUS_ | gc.TUINT16,
+		OMINUS_ | gc.TINT32,
+		OMINUS_ | gc.TUINT32,
+		OMINUS_ | gc.TPTR32,
+		OMINUS_ | gc.TINT64,
+		OMINUS_ | gc.TUINT64,
+		OMINUS_ | gc.TPTR64:
 		a = arm64.ANEG
 
-	case gc.OMINUS<<16 | gc.TFLOAT32:
+	case OMINUS_ | gc.TFLOAT32:
 		a = arm64.AFNEGS
 
-	case gc.OMINUS<<16 | gc.TFLOAT64:
+	case OMINUS_ | gc.TFLOAT64:
 		a = arm64.AFNEGD
 
-	case gc.OAND<<16 | gc.TINT8,
-		gc.OAND<<16 | gc.TUINT8,
-		gc.OAND<<16 | gc.TINT16,
-		gc.OAND<<16 | gc.TUINT16,
-		gc.OAND<<16 | gc.TINT32,
-		gc.OAND<<16 | gc.TUINT32,
-		gc.OAND<<16 | gc.TPTR32,
-		gc.OAND<<16 | gc.TINT64,
-		gc.OAND<<16 | gc.TUINT64,
-		gc.OAND<<16 | gc.TPTR64:
+	case OAND_ | gc.TINT8,
+		OAND_ | gc.TUINT8,
+		OAND_ | gc.TINT16,
+		OAND_ | gc.TUINT16,
+		OAND_ | gc.TINT32,
+		OAND_ | gc.TUINT32,
+		OAND_ | gc.TPTR32,
+		OAND_ | gc.TINT64,
+		OAND_ | gc.TUINT64,
+		OAND_ | gc.TPTR64:
 		a = arm64.AAND
 
-	case gc.OOR<<16 | gc.TINT8,
-		gc.OOR<<16 | gc.TUINT8,
-		gc.OOR<<16 | gc.TINT16,
-		gc.OOR<<16 | gc.TUINT16,
-		gc.OOR<<16 | gc.TINT32,
-		gc.OOR<<16 | gc.TUINT32,
-		gc.OOR<<16 | gc.TPTR32,
-		gc.OOR<<16 | gc.TINT64,
-		gc.OOR<<16 | gc.TUINT64,
-		gc.OOR<<16 | gc.TPTR64:
+	case OOR_ | gc.TINT8,
+		OOR_ | gc.TUINT8,
+		OOR_ | gc.TINT16,
+		OOR_ | gc.TUINT16,
+		OOR_ | gc.TINT32,
+		OOR_ | gc.TUINT32,
+		OOR_ | gc.TPTR32,
+		OOR_ | gc.TINT64,
+		OOR_ | gc.TUINT64,
+		OOR_ | gc.TPTR64:
 		a = arm64.AORR
 
-	case gc.OXOR<<16 | gc.TINT8,
-		gc.OXOR<<16 | gc.TUINT8,
-		gc.OXOR<<16 | gc.TINT16,
-		gc.OXOR<<16 | gc.TUINT16,
-		gc.OXOR<<16 | gc.TINT32,
-		gc.OXOR<<16 | gc.TUINT32,
-		gc.OXOR<<16 | gc.TPTR32,
-		gc.OXOR<<16 | gc.TINT64,
-		gc.OXOR<<16 | gc.TUINT64,
-		gc.OXOR<<16 | gc.TPTR64:
+	case OXOR_ | gc.TINT8,
+		OXOR_ | gc.TUINT8,
+		OXOR_ | gc.TINT16,
+		OXOR_ | gc.TUINT16,
+		OXOR_ | gc.TINT32,
+		OXOR_ | gc.TUINT32,
+		OXOR_ | gc.TPTR32,
+		OXOR_ | gc.TINT64,
+		OXOR_ | gc.TUINT64,
+		OXOR_ | gc.TPTR64:
 		a = arm64.AEOR
 
 		// TODO(minux): handle rotates
@@ -840,30 +864,30 @@
 	//	a = 0//???; RLDC?
 	//	break;
 
-	case gc.OLSH<<16 | gc.TINT8,
-		gc.OLSH<<16 | gc.TUINT8,
-		gc.OLSH<<16 | gc.TINT16,
-		gc.OLSH<<16 | gc.TUINT16,
-		gc.OLSH<<16 | gc.TINT32,
-		gc.OLSH<<16 | gc.TUINT32,
-		gc.OLSH<<16 | gc.TPTR32,
-		gc.OLSH<<16 | gc.TINT64,
-		gc.OLSH<<16 | gc.TUINT64,
-		gc.OLSH<<16 | gc.TPTR64:
+	case OLSH_ | gc.TINT8,
+		OLSH_ | gc.TUINT8,
+		OLSH_ | gc.TINT16,
+		OLSH_ | gc.TUINT16,
+		OLSH_ | gc.TINT32,
+		OLSH_ | gc.TUINT32,
+		OLSH_ | gc.TPTR32,
+		OLSH_ | gc.TINT64,
+		OLSH_ | gc.TUINT64,
+		OLSH_ | gc.TPTR64:
 		a = arm64.ALSL
 
-	case gc.ORSH<<16 | gc.TUINT8,
-		gc.ORSH<<16 | gc.TUINT16,
-		gc.ORSH<<16 | gc.TUINT32,
-		gc.ORSH<<16 | gc.TPTR32,
-		gc.ORSH<<16 | gc.TUINT64,
-		gc.ORSH<<16 | gc.TPTR64:
+	case ORSH_ | gc.TUINT8,
+		ORSH_ | gc.TUINT16,
+		ORSH_ | gc.TUINT32,
+		ORSH_ | gc.TPTR32,
+		ORSH_ | gc.TUINT64,
+		ORSH_ | gc.TPTR64:
 		a = arm64.ALSR
 
-	case gc.ORSH<<16 | gc.TINT8,
-		gc.ORSH<<16 | gc.TINT16,
-		gc.ORSH<<16 | gc.TINT32,
-		gc.ORSH<<16 | gc.TINT64:
+	case ORSH_ | gc.TINT8,
+		ORSH_ | gc.TINT16,
+		ORSH_ | gc.TINT32,
+		ORSH_ | gc.TINT64:
 		a = arm64.AASR
 
 		// TODO(minux): handle rotates
@@ -878,59 +902,59 @@
 	//	a = 0//??? RLDC??
 	//	break;
 
-	case gc.OHMUL<<16 | gc.TINT64:
+	case OHMUL_ | gc.TINT64:
 		a = arm64.ASMULH
 
-	case gc.OHMUL<<16 | gc.TUINT64,
-		gc.OHMUL<<16 | gc.TPTR64:
+	case OHMUL_ | gc.TUINT64,
+		OHMUL_ | gc.TPTR64:
 		a = arm64.AUMULH
 
-	case gc.OMUL<<16 | gc.TINT8,
-		gc.OMUL<<16 | gc.TINT16,
-		gc.OMUL<<16 | gc.TINT32:
+	case OMUL_ | gc.TINT8,
+		OMUL_ | gc.TINT16,
+		OMUL_ | gc.TINT32:
 		a = arm64.ASMULL
 
-	case gc.OMUL<<16 | gc.TINT64:
+	case OMUL_ | gc.TINT64:
 		a = arm64.AMUL
 
-	case gc.OMUL<<16 | gc.TUINT8,
-		gc.OMUL<<16 | gc.TUINT16,
-		gc.OMUL<<16 | gc.TUINT32,
-		gc.OMUL<<16 | gc.TPTR32:
+	case OMUL_ | gc.TUINT8,
+		OMUL_ | gc.TUINT16,
+		OMUL_ | gc.TUINT32,
+		OMUL_ | gc.TPTR32:
 		// don't use word multiply, the high 32-bit are undefined.
 		a = arm64.AUMULL
 
-	case gc.OMUL<<16 | gc.TUINT64,
-		gc.OMUL<<16 | gc.TPTR64:
+	case OMUL_ | gc.TUINT64,
+		OMUL_ | gc.TPTR64:
 		a = arm64.AMUL // for 64-bit multiplies, signedness doesn't matter.
 
-	case gc.OMUL<<16 | gc.TFLOAT32:
+	case OMUL_ | gc.TFLOAT32:
 		a = arm64.AFMULS
 
-	case gc.OMUL<<16 | gc.TFLOAT64:
+	case OMUL_ | gc.TFLOAT64:
 		a = arm64.AFMULD
 
-	case gc.ODIV<<16 | gc.TINT8,
-		gc.ODIV<<16 | gc.TINT16,
-		gc.ODIV<<16 | gc.TINT32,
-		gc.ODIV<<16 | gc.TINT64:
+	case ODIV_ | gc.TINT8,
+		ODIV_ | gc.TINT16,
+		ODIV_ | gc.TINT32,
+		ODIV_ | gc.TINT64:
 		a = arm64.ASDIV
 
-	case gc.ODIV<<16 | gc.TUINT8,
-		gc.ODIV<<16 | gc.TUINT16,
-		gc.ODIV<<16 | gc.TUINT32,
-		gc.ODIV<<16 | gc.TPTR32,
-		gc.ODIV<<16 | gc.TUINT64,
-		gc.ODIV<<16 | gc.TPTR64:
+	case ODIV_ | gc.TUINT8,
+		ODIV_ | gc.TUINT16,
+		ODIV_ | gc.TUINT32,
+		ODIV_ | gc.TPTR32,
+		ODIV_ | gc.TUINT64,
+		ODIV_ | gc.TPTR64:
 		a = arm64.AUDIV
 
-	case gc.ODIV<<16 | gc.TFLOAT32:
+	case ODIV_ | gc.TFLOAT32:
 		a = arm64.AFDIVS
 
-	case gc.ODIV<<16 | gc.TFLOAT64:
+	case ODIV_ | gc.TFLOAT64:
 		a = arm64.AFDIVD
 
-	case gc.OSQRT<<16 | gc.TFLOAT64:
+	case OSQRT_ | gc.TFLOAT64:
 		a = arm64.AFSQRTD
 	}
 
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
index 741588e..812a8cb 100644
--- a/src/cmd/compile/internal/gc/align.go
+++ b/src/cmd/compile/internal/gc/align.go
@@ -6,12 +6,8 @@
 
 import "cmd/internal/obj"
 
-/*
- * machine size and rounding
- * alignment is dictated around
- * the size of a pointer, set in betypeinit
- * (see ../6g/galign.c).
- */
+// machine size and rounding alignment is dictated around
+// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
 var defercalc int
 
 func Rnd(o int64, r int64) int64 {
@@ -68,7 +64,7 @@
 		f.Width = o // really offset for TFIELD
 		if f.Nname != nil {
 			// this same stackparam logic is in addrescapes
-			// in typecheck.c.  usually addrescapes runs after
+			// in typecheck.go.  usually addrescapes runs after
 			// widstruct, in which case we could drop this,
 			// but function closure functions are the exception.
 			if f.Nname.Name.Param.Stackparam != nil {
@@ -153,15 +149,15 @@
 	t.Width = -2
 	t.Align = 0
 
-	et := int32(t.Etype)
+	et := t.Etype
 	switch et {
 	case TFUNC, TCHAN, TMAP, TSTRING:
 		break
 
-		/* simtype == 0 during bootstrap */
+	// simtype == 0 during bootstrap
 	default:
 		if Simtype[t.Etype] != 0 {
-			et = int32(Simtype[t.Etype])
+			et = Simtype[t.Etype]
 		}
 	}
 
@@ -170,7 +166,7 @@
 	default:
 		Fatalf("dowidth: unknown type: %v", t)
 
-		/* compiler-specific stuff */
+	// compiler-specific stuff
 	case TINT8, TUINT8, TBOOL:
 		// bool is int8
 		w = 1
@@ -238,7 +234,7 @@
 		}
 		w = 1 // anything will do
 
-		// dummy type; should be replaced before use.
+	// dummy type; should be replaced before use.
 	case TANY:
 		if Debug['A'] == 0 {
 			Fatalf("dowidth any")
@@ -286,7 +282,7 @@
 		}
 		w = widstruct(t, t, 0, 1)
 
-		// make fake type to check later to
+	// make fake type to check later to
 	// trigger function argument computation.
 	case TFUNC:
 		t1 := typ(TFUNCARGS)
@@ -297,7 +293,7 @@
 		// width of func type is pointer
 		w = int64(Widthptr)
 
-		// function is 3 cated structures;
+	// function is 3 cated structures;
 	// compute their widths as side-effect.
 	case TFUNCARGS:
 		t1 := t.Type
@@ -333,23 +329,21 @@
 	}
 }
 
-/*
- * when a type's width should be known, we call checkwidth
- * to compute it.  during a declaration like
- *
- *	type T *struct { next T }
- *
- * it is necessary to defer the calculation of the struct width
- * until after T has been initialized to be a pointer to that struct.
- * similarly, during import processing structs may be used
- * before their definition.  in those situations, calling
- * defercheckwidth() stops width calculations until
- * resumecheckwidth() is called, at which point all the
- * checkwidths that were deferred are executed.
- * dowidth should only be called when the type's size
- * is needed immediately.  checkwidth makes sure the
- * size is evaluated eventually.
- */
+// when a type's width should be known, we call checkwidth
+// to compute it.  during a declaration like
+//
+//	type T *struct { next T }
+//
+// it is necessary to defer the calculation of the struct width
+// until after T has been initialized to be a pointer to that struct.
+// similarly, during import processing structs may be used
+// before their definition.  in those situations, calling
+// defercheckwidth() stops width calculations until
+// resumecheckwidth() is called, at which point all the
+// checkwidths that were deferred are executed.
+// dowidth should only be called when the type's size
+// is needed immediately.  checkwidth makes sure the
+// size is evaluated eventually.
 type TypeList struct {
 	t    *Type
 	next *TypeList
@@ -422,8 +416,8 @@
 		Fatalf("typeinit before betypeinit")
 	}
 
-	for i := 0; i < NTYPE; i++ {
-		Simtype[i] = uint8(i)
+	for et := EType(0); et < NTYPE; et++ {
+		Simtype[et] = et
 	}
 
 	Types[TPTR32] = typ(TPTR32)
@@ -445,8 +439,8 @@
 		Tptr = TPTR64
 	}
 
-	for i := TINT8; i <= TUINT64; i++ {
-		Isint[i] = true
+	for et := TINT8; et <= TUINT64; et++ {
+		Isint[et] = true
 	}
 	Isint[TINT] = true
 	Isint[TUINT] = true
@@ -469,39 +463,37 @@
 	Issigned[TINT32] = true
 	Issigned[TINT64] = true
 
-	/*
-	 * initialize okfor
-	 */
-	for i := 0; i < NTYPE; i++ {
-		if Isint[i] || i == TIDEAL {
-			okforeq[i] = true
-			okforcmp[i] = true
-			okforarith[i] = true
-			okforadd[i] = true
-			okforand[i] = true
-			okforconst[i] = true
-			issimple[i] = true
-			Minintval[i] = new(Mpint)
-			Maxintval[i] = new(Mpint)
+	// initialize okfor
+	for et := EType(0); et < NTYPE; et++ {
+		if Isint[et] || et == TIDEAL {
+			okforeq[et] = true
+			okforcmp[et] = true
+			okforarith[et] = true
+			okforadd[et] = true
+			okforand[et] = true
+			okforconst[et] = true
+			issimple[et] = true
+			Minintval[et] = new(Mpint)
+			Maxintval[et] = new(Mpint)
 		}
 
-		if Isfloat[i] {
-			okforeq[i] = true
-			okforcmp[i] = true
-			okforadd[i] = true
-			okforarith[i] = true
-			okforconst[i] = true
-			issimple[i] = true
-			minfltval[i] = newMpflt()
-			maxfltval[i] = newMpflt()
+		if Isfloat[et] {
+			okforeq[et] = true
+			okforcmp[et] = true
+			okforadd[et] = true
+			okforarith[et] = true
+			okforconst[et] = true
+			issimple[et] = true
+			minfltval[et] = newMpflt()
+			maxfltval[et] = newMpflt()
 		}
 
-		if Iscomplex[i] {
-			okforeq[i] = true
-			okforadd[i] = true
-			okforarith[i] = true
-			okforconst[i] = true
-			issimple[i] = true
+		if Iscomplex[et] {
+			okforeq[et] = true
+			okforadd[et] = true
+			okforarith[et] = true
+			okforconst[et] = true
+			issimple[et] = true
 		}
 	}
 
@@ -599,10 +591,10 @@
 	mpatofix(Maxintval[TUINT32], "0xffffffff")
 	mpatofix(Maxintval[TUINT64], "0xffffffffffffffff")
 
-	/* f is valid float if min < f < max.  (min and max are not themselves valid.) */
-	mpatoflt(maxfltval[TFLOAT32], "33554431p103") /* 2^24-1 p (127-23) + 1/2 ulp*/
+	// f is valid float if min < f < max.  (min and max are not themselves valid.)
+	mpatoflt(maxfltval[TFLOAT32], "33554431p103") // 2^24-1 p (127-23) + 1/2 ulp
 	mpatoflt(minfltval[TFLOAT32], "-33554431p103")
-	mpatoflt(maxfltval[TFLOAT64], "18014398509481983p970") /* 2^53-1 p (1023-52) + 1/2 ulp */
+	mpatoflt(maxfltval[TFLOAT64], "18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp
 	mpatoflt(minfltval[TFLOAT64], "-18014398509481983p970")
 
 	maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
@@ -610,40 +602,36 @@
 	maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
 	minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
 
-	/* for walk to use in error messages */
+	// for walk to use in error messages
 	Types[TFUNC] = functype(nil, nil, nil)
 
-	/* types used in front end */
+	// types used in front end
 	// types[TNIL] got set early in lexinit
 	Types[TIDEAL] = typ(TIDEAL)
 
 	Types[TINTER] = typ(TINTER)
 
-	/* simple aliases */
-	Simtype[TMAP] = uint8(Tptr)
+	// simple aliases
+	Simtype[TMAP] = Tptr
 
-	Simtype[TCHAN] = uint8(Tptr)
-	Simtype[TFUNC] = uint8(Tptr)
-	Simtype[TUNSAFEPTR] = uint8(Tptr)
+	Simtype[TCHAN] = Tptr
+	Simtype[TFUNC] = Tptr
+	Simtype[TUNSAFEPTR] = Tptr
 
-	/* pick up the backend thearch.typedefs */
-	var s1 *Sym
-	var etype int
-	var sameas int
-	var s *Sym
+	// pick up the backend thearch.typedefs
 	for i = range Thearch.Typedefs {
-		s = Lookup(Thearch.Typedefs[i].Name)
-		s1 = Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
+		s := Lookup(Thearch.Typedefs[i].Name)
+		s1 := Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
 
-		etype = Thearch.Typedefs[i].Etype
-		if etype < 0 || etype >= len(Types) {
+		etype := Thearch.Typedefs[i].Etype
+		if int(etype) >= len(Types) {
 			Fatalf("typeinit: %s bad etype", s.Name)
 		}
-		sameas = Thearch.Typedefs[i].Sameas
-		if sameas < 0 || sameas >= len(Types) {
+		sameas := Thearch.Typedefs[i].Sameas
+		if int(sameas) >= len(Types) {
 			Fatalf("typeinit: %s bad sameas", s.Name)
 		}
-		Simtype[etype] = uint8(sameas)
+		Simtype[etype] = sameas
 		minfltval[etype] = minfltval[sameas]
 		maxfltval[etype] = maxfltval[sameas]
 		Minintval[etype] = Minintval[sameas]
@@ -678,9 +666,7 @@
 	itable.Type = Types[TUINT8]
 }
 
-/*
- * compute total size of f's in/out arguments.
- */
+// compute total size of f's in/out arguments.
 func Argsize(t *Type) int {
 	var save Iter
 	var x int64
diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go
new file mode 100644
index 0000000..b49f0fb
--- /dev/null
+++ b/src/cmd/compile/internal/gc/bexport.go
@@ -0,0 +1,1041 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Binary package export.
+// Based loosely on x/tools/go/importer.
+// (see fmt.go, go.y as "documentation" for how to use/setup data structures)
+//
+// Use "-newexport" flag to enable.
+
+// TODO(gri):
+// - inlined functions
+
+/*
+Export data encoding:
+
+The export data is a serialized description of the graph of exported
+objects: constants, types, variables, and functions. Only types can
+be re-exported and so we need to know which package they are coming
+from. Therefore, packages are also part of the export graph.
+
+The roots of the graph are the list of constants, variables, functions,
+and eventually types. Types are written last because most of them will
+be written as part of other objects which will reduce the number of
+types that need to be written separately.
+
+The graph is serialized in in-order fashion, starting with the roots.
+Each object in the graph is serialized by writing its fields sequentially.
+If the field is a pointer to another object, that object is serialized,
+recursively. Otherwise the field is written. Non-pointer fields are all
+encoded as either an integer or string value.
+
+Only packages and types may be referred to more than once. When getting
+to a package or type that was not serialized before, a number (index) is
+assigned to it, starting at 0. In this case, the encoding starts with an
+integer tag with a value < 0. The tag value indicates the kind of object
+(package or type) that follows and that this is the first time that we
+see this object. If the package or tag was already serialized, the encoding
+starts with the respective package or type index >= 0. An importer can
+trivially determine if a package or type needs to be read in for the first
+time (tag < 0) and entered into the respective package or type table, or
+if the package or type was seen already (index >= 0), in which case the
+index is the table index where the respective object can be found.
+
+Before exporting or importing, the type tables are populated with the
+predeclared types (int, string, error, unsafe.Pointer, etc.). This way
+they are automatically encoded with a known and fixed type index.
+
+Encoding format:
+
+The export data starts with a single byte indicating the encoding format
+(compact, or with debugging information), followed by a version string
+(so we can evolve the encoding if need be), the name of the imported
+package, and a string containing platform-specific information for that
+package.
+
+After this header, the lists of objects follow. After the objects, platform-
+specific data may be found which is not used strictly for type checking.
+
+The encoding of objects is straight-forward: Constants, variables, and
+functions start with their name, type, and possibly a value. Named types
+record their name and package so that they can be canonicalized: If the
+same type was imported before via another import, the importer must use
+the previously imported type pointer so that we have exactly one version
+(i.e., one pointer) for each named type (and read but discard the current
+type encoding). Unnamed types simply encode their respective fields.
+
+In the encoding, all lists (of objects, struct fields, methods, parameter
+names, but also the bytes of a string, etc.) start with an integer which
+is the length of the list. This permits an importer to allocate the right
+amount of space to hold the list without the need to grow it later.
+
+All integer values use a variable-length encoding for compact representation.
+
+If debugFormat is set, each integer and string value is preceeded by a marker
+and position information in the encoding. This mechanism permits an importer
+to recognize immediately when it is out of sync. The importer recognizes this
+mode automatically (i.e., it can import export data produced with debugging
+support even if debugFormat is not set at the time of import). Using this mode
+will massively increase the size of the export data (by a factor of 2 to 3)
+and is only recommended for debugging.
+
+The exporter and importer are completely symmetric in implementation: For
+each encoding routine there is the matching and symmetric decoding routine.
+This symmetry makes it very easy to change or extend the format: If a new
+field needs to be encoded, a symmetric change can be made to exporter and
+importer.
+*/
+
+package gc
+
+import (
+	"bytes"
+	"cmd/compile/internal/big"
+	"cmd/internal/obj"
+	"encoding/binary"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// debugging support
+const (
+	debugFormat = false // use debugging format for export data (emits a lot of additional data)
+)
+
+const exportVersion = "v0"
+
+// Set forceNewExport to force the use of the new export format - for testing on the build dashboard.
+// TODO(gri) remove eventually
+const forceNewExport = false
+
+// Export writes the export data for localpkg to out and returns the number of bytes written.
+func Export(out *obj.Biobuf, trace bool) int {
+	p := exporter{
+		out:      out,
+		pkgIndex: make(map[*Pkg]int),
+		typIndex: make(map[*Type]int),
+		trace:    trace,
+	}
+
+	// write low-level encoding format
+	var format byte = 'c' // compact
+	if debugFormat {
+		format = 'd'
+	}
+	p.byte(format)
+
+	// --- generic export data ---
+
+	if p.trace {
+		p.tracef("\n--- generic export data ---\n")
+		if p.indent != 0 {
+			Fatalf("incorrect indentation %d", p.indent)
+		}
+	}
+
+	p.string(exportVersion)
+	if p.trace {
+		p.tracef("\n")
+	}
+
+	// populate type map with predeclared "known" types
+	predecl := predeclared()
+	for index, typ := range predecl {
+		p.typIndex[typ] = index
+	}
+	if len(p.typIndex) != len(predecl) {
+		Fatalf("duplicate entries in type map?")
+	}
+
+	// write package data
+	if localpkg.Path != "" {
+		Fatalf("local package path not empty: %q", localpkg.Path)
+	}
+	p.pkg(localpkg)
+
+	// write compiler-specific flags
+	// go.y:import_safety
+	{
+		var flags string
+		if safemode != 0 {
+			flags = "safe"
+		}
+		p.string(flags)
+	}
+
+	if p.trace {
+		p.tracef("\n")
+	}
+
+	// collect objects to export
+	var consts, vars, funcs []*Sym
+	var types []*Type
+	for _, n := range exportlist {
+		sym := n.Sym
+		// TODO(gri) Closures appear marked as exported.
+		// Investigate and determine if we need this.
+		if sym.Flags&SymExported != 0 {
+			continue
+		}
+		sym.Flags |= SymExported
+
+		// TODO(gri) Closures have dots in their names;
+		// e.g., TestFloatZeroValue.func1 in math/big tests.
+		// We may not need this eventually. See also comment
+		// on sym.Flags&SymExported test above.
+		if strings.Contains(sym.Name, ".") {
+			Fatalf("unexpected export symbol: %v", sym)
+		}
+
+		if sym.Flags&SymExport != 0 {
+			if sym.Def == nil {
+				Fatalf("unknown export symbol: %v", sym)
+			}
+			switch n := sym.Def; n.Op {
+			case OLITERAL:
+				// constant
+				typecheck(&n, Erv)
+				if n == nil || n.Op != OLITERAL {
+					Fatalf("dumpexportconst: oconst nil: %v", sym)
+				}
+				consts = append(consts, sym)
+
+			case ONAME:
+				// variable or function
+				typecheck(&n, Erv|Ecall)
+				if n == nil || n.Type == nil {
+					Fatalf("variable/function exported but not defined: %v", sym)
+				}
+				if n.Type.Etype == TFUNC && n.Class == PFUNC {
+					funcs = append(funcs, sym)
+				} else {
+					vars = append(vars, sym)
+				}
+
+			case OTYPE:
+				// named type
+				t := n.Type
+				if t.Etype == TFORW {
+					Fatalf("export of incomplete type %v", sym)
+				}
+				types = append(types, t)
+
+			default:
+				Fatalf("unexpected export symbol: %v %v", Oconv(int(n.Op), 0), sym)
+			}
+		}
+	}
+	exportlist = nil // match export.go use of exportlist
+
+	// for reproducible output
+	sort.Sort(symByName(consts))
+	sort.Sort(symByName(vars))
+	sort.Sort(symByName(funcs))
+	// sort types later when we have fewer types left
+
+	// write consts
+	p.int(len(consts))
+	for _, sym := range consts {
+		n := sym.Def
+		typ := n.Type // may or may not be specified
+		// Untyped (ideal) constants get their own type. This decouples
+		// the constant type from the encoding of the constant value.
+		if typ == nil || isideal(typ) {
+			typ = untype(n.Val().Ctype())
+		}
+
+		p.string(sym.Name)
+		p.typ(typ)
+		p.value(n.Val())
+	}
+
+	// write vars
+	p.int(len(vars))
+	for _, sym := range vars {
+		p.string(sym.Name)
+		p.typ(sym.Def.Type)
+	}
+
+	// write funcs
+	p.int(len(funcs))
+	for _, sym := range funcs {
+		p.string(sym.Name)
+		// The type can only be a signature for functions. However, by always
+		// writing the complete type specification (rather than just a signature)
+		// we keep the option open of sharing common signatures across multiple
+		// functions as a means to further compress the export data.
+		p.typ(sym.Def.Type)
+		p.int(p.collectInlined(sym.Def))
+	}
+
+	// determine which types are still left to write and sort them
+	i := 0
+	for _, t := range types {
+		if _, ok := p.typIndex[t]; !ok {
+			types[i] = t
+			i++
+		}
+	}
+	types = types[:i]
+	sort.Sort(typByName(types))
+
+	// write types
+	p.int(len(types))
+	for _, t := range types {
+		// Writing a type may further reduce the number of types
+		// that are left to be written, but at this point we don't
+		// care.
+		p.typ(t)
+	}
+
+	if p.trace {
+		p.tracef("\n")
+	}
+
+	// --- compiler-specific export data ---
+
+	if p.trace {
+		p.tracef("\n--- compiler specific export data ---\n")
+		if p.indent != 0 {
+			Fatalf("incorrect indentation")
+		}
+	}
+
+	// write inlined function bodies
+	p.int(len(p.inlined))
+	for i, f := range p.inlined {
+		p.body(i, f)
+	}
+
+	if p.trace {
+		p.tracef("\n")
+	}
+
+	// --- end of export data ---
+
+	return p.written
+}
+
+type symByName []*Sym
+
+func (a symByName) Len() int           { return len(a) }
+func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
+func (a symByName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+type typByName []*Type
+
+func (a typByName) Len() int           { return len(a) }
+func (a typByName) Less(i, j int) bool { return a[i].Sym.Name < a[j].Sym.Name }
+func (a typByName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+type exporter struct {
+	out      *obj.Biobuf
+	pkgIndex map[*Pkg]int
+	typIndex map[*Type]int
+	inlined  []*Func
+
+	written int // bytes written
+	indent  int // for p.trace
+	trace   bool
+}
+
+func (p *exporter) pkg(pkg *Pkg) {
+	if pkg == nil {
+		Fatalf("unexpected nil pkg")
+	}
+
+	// if we saw the package before, write its index (>= 0)
+	if i, ok := p.pkgIndex[pkg]; ok {
+		p.index('P', i)
+		return
+	}
+
+	// otherwise, remember the package, write the package tag (< 0) and package data
+	if p.trace {
+		p.tracef("P%d = { ", len(p.pkgIndex))
+		defer p.tracef("} ")
+	}
+	p.pkgIndex[pkg] = len(p.pkgIndex)
+
+	p.tag(packageTag)
+	p.string(pkg.Name)
+	p.string(pkg.Path)
+}
+
+func (p *exporter) typ(t *Type) {
+	if t == nil {
+		Fatalf("nil type")
+	}
+
+	// Possible optimization: Anonymous pointer types *T where
+	// T is a named type are common. We could canonicalize all
+	// such types *T to a single type PT = *T. This would lead
+	// to at most one *T entry in typIndex, and all future *T's
+	// would be encoded as the respective index directly. Would
+	// save 1 byte (pointerTag) per *T and reduce the typIndex
+	// size (at the cost of a canonicalization map). We can do
+	// this later, without encoding format change.
+
+	// if we saw the type before, write its index (>= 0)
+	if i, ok := p.typIndex[t]; ok {
+		p.index('T', i)
+		return
+	}
+
+	// otherwise, remember the type, write the type tag (< 0) and type data
+	if p.trace {
+		p.tracef("T%d = {>\n", len(p.typIndex))
+		defer p.tracef("<\n} ")
+	}
+	p.typIndex[t] = len(p.typIndex)
+
+	// pick off named types
+	if sym := t.Sym; sym != nil {
+		// Fields should be exported by p.field().
+		if t.Etype == TFIELD {
+			Fatalf("printing a field/parameter with wrong function")
+		}
+		// Predeclared types should have been found in the type map.
+		if t.Orig == t {
+			Fatalf("predeclared type missing from type map?")
+		}
+		// TODO(gri) The assertion below seems incorrect (crashes during all.bash).
+		// Investigate.
+		/*
+			// we expect the respective definition to point to us
+			if sym.Def.Type != t {
+				Fatalf("type definition doesn't point to us?")
+			}
+		*/
+
+		p.tag(namedTag)
+		p.qualifiedName(sym)
+
+		// write underlying type
+		p.typ(t.Orig)
+
+		// interfaces don't have associated methods
+		if t.Orig.Etype == TINTER {
+			return
+		}
+
+		// sort methods for reproducible export format
+		// TODO(gri) Determine if they are already sorted
+		// in which case we can drop this step.
+		var methods []*Type
+		for m := t.Method; m != nil; m = m.Down {
+			methods = append(methods, m)
+		}
+		sort.Sort(methodbyname(methods))
+		p.int(len(methods))
+
+		if p.trace && t.Method != nil {
+			p.tracef("associated methods {>\n")
+		}
+
+		for _, m := range methods {
+			p.string(m.Sym.Name)
+			p.paramList(getthisx(m.Type))
+			p.paramList(getinargx(m.Type))
+			p.paramList(getoutargx(m.Type))
+			p.int(p.collectInlined(m.Type.Nname))
+
+			if p.trace && m.Down != nil {
+				p.tracef("\n")
+			}
+		}
+
+		if p.trace && t.Method != nil {
+			p.tracef("<\n} ")
+		}
+
+		return
+	}
+
+	// otherwise we have a type literal
+	switch t.Etype {
+	case TARRAY:
+		// TODO(gri) define named constant for the -100
+		if t.Bound >= 0 || t.Bound == -100 {
+			p.tag(arrayTag)
+			p.int64(t.Bound)
+		} else {
+			p.tag(sliceTag)
+		}
+		p.typ(t.Type)
+
+	case T_old_DARRAY:
+		// see p.param use of T_old_DARRAY
+		p.tag(dddTag)
+		p.typ(t.Type)
+
+	case TSTRUCT:
+		p.tag(structTag)
+		p.fieldList(t)
+
+	case TPTR32, TPTR64: // could use Tptr but these are constants
+		p.tag(pointerTag)
+		p.typ(t.Type)
+
+	case TFUNC:
+		p.tag(signatureTag)
+		p.paramList(getinargx(t))
+		p.paramList(getoutargx(t))
+
+	case TINTER:
+		p.tag(interfaceTag)
+
+		// gc doesn't separate between embedded interfaces
+		// and methods declared explicitly with an interface
+		p.int(0) // no embedded interfaces
+		p.methodList(t)
+
+	case TMAP:
+		p.tag(mapTag)
+		p.typ(t.Down) // key
+		p.typ(t.Type) // val
+
+	case TCHAN:
+		p.tag(chanTag)
+		p.int(int(t.Chan))
+		p.typ(t.Type)
+
+	default:
+		Fatalf("unexpected type: %s (Etype = %d)", Tconv(t, 0), t.Etype)
+	}
+}
+
+func (p *exporter) qualifiedName(sym *Sym) {
+	p.string(sym.Name)
+	p.pkg(sym.Pkg)
+}
+
+func (p *exporter) fieldList(t *Type) {
+	if p.trace && t.Type != nil {
+		p.tracef("fields {>\n")
+		defer p.tracef("<\n} ")
+	}
+
+	p.int(countfield(t))
+	for f := t.Type; f != nil; f = f.Down {
+		p.field(f)
+		if p.trace && f.Down != nil {
+			p.tracef("\n")
+		}
+	}
+}
+
+func (p *exporter) field(f *Type) {
+	if f.Etype != TFIELD {
+		Fatalf("field expected")
+	}
+
+	p.fieldName(f)
+	p.typ(f.Type)
+	p.note(f.Note)
+}
+
+func (p *exporter) note(n *string) {
+	var s string
+	if n != nil {
+		s = *n
+	}
+	p.string(s)
+}
+
+func (p *exporter) methodList(t *Type) {
+	if p.trace && t.Type != nil {
+		p.tracef("methods {>\n")
+		defer p.tracef("<\n} ")
+	}
+
+	p.int(countfield(t))
+	for m := t.Type; m != nil; m = m.Down {
+		p.method(m)
+		if p.trace && m.Down != nil {
+			p.tracef("\n")
+		}
+	}
+}
+
+func (p *exporter) method(m *Type) {
+	if m.Etype != TFIELD {
+		Fatalf("method expected")
+	}
+
+	p.fieldName(m)
+	// TODO(gri) For functions signatures, we use p.typ() to export
+	// so we could share the same type with multiple functions. Do
+	// the same here, or never try to do this for functions.
+	p.paramList(getinargx(m.Type))
+	p.paramList(getoutargx(m.Type))
+}
+
+// fieldName is like qualifiedName but it doesn't record the package
+// for blank (_) or exported names.
+func (p *exporter) fieldName(t *Type) {
+	sym := t.Sym
+
+	var name string
+	if t.Embedded == 0 {
+		name = sym.Name
+	} else if bname := basetypeName(t); bname != "" && !exportname(bname) {
+		// anonymous field with unexported base type name: use "?" as field name
+		// (bname != "" per spec, but we are conservative in case of errors)
+		name = "?"
+	}
+
+	p.string(name)
+	if name == "?" || name != "_" && name != "" && !exportname(name) {
+		p.pkg(sym.Pkg)
+	}
+}
+
+func basetypeName(t *Type) string {
+	s := t.Sym
+	if s == nil && Isptr[t.Etype] {
+		s = t.Type.Sym // deref
+	}
+	if s != nil {
+		return s.Name
+	}
+	return ""
+}
+
+func (p *exporter) paramList(params *Type) {
+	if params.Etype != TSTRUCT || !params.Funarg {
+		Fatalf("parameter list expected")
+	}
+
+	// use negative length to indicate unnamed parameters
+	// (look at the first parameter only since either all
+	// names are present or all are absent)
+	n := countfield(params)
+	if n > 0 && parName(params.Type) == "" {
+		n = -n
+	}
+	p.int(n)
+	for q := params.Type; q != nil; q = q.Down {
+		p.param(q, n)
+	}
+}
+
+func (p *exporter) param(q *Type, n int) {
+	if q.Etype != TFIELD {
+		Fatalf("parameter expected")
+	}
+	t := q.Type
+	if q.Isddd {
+		// create a fake type to encode ... just for the p.typ call
+		// (T_old_DARRAY is not used anywhere else in the compiler,
+		// we use it here to communicate between p.param and p.typ.)
+		t = &Type{Etype: T_old_DARRAY, Type: t.Type}
+	}
+	p.typ(t)
+	if n > 0 {
+		p.string(parName(q))
+	}
+	// TODO(gri) This is compiler-specific (escape info).
+	// Move into compiler-specific section eventually?
+	// (Not having escape info causes tests to fail, e.g. runtime GCInfoTest)
+	p.note(q.Note)
+}
+
+func parName(q *Type) string {
+	if q.Sym == nil {
+		return ""
+	}
+	name := q.Sym.Name
+	// undo gc-internal name mangling - we just need the source name
+	if len(name) > 0 && name[0] == '~' {
+		// name is ~b%d or ~r%d
+		switch name[1] {
+		case 'b':
+			return "_"
+		case 'r':
+			return ""
+		default:
+			Fatalf("unexpected parameter name: %s", name)
+		}
+	}
+	// undo gc-internal name specialization
+	if i := strings.Index(name, "·"); i > 0 {
+		name = name[:i] // cut off numbering
+	}
+	return name
+}
+
+func (p *exporter) value(x Val) {
+	if p.trace {
+		p.tracef("= ")
+	}
+
+	switch x := x.U.(type) {
+	case bool:
+		tag := falseTag
+		if x {
+			tag = trueTag
+		}
+		p.tag(tag)
+
+	case *Mpint:
+		if Mpcmpfixfix(Minintval[TINT64], x) <= 0 && Mpcmpfixfix(x, Maxintval[TINT64]) <= 0 {
+			// common case: x fits into an int64 - use compact encoding
+			p.tag(int64Tag)
+			p.int64(Mpgetfix(x))
+			return
+		}
+		// uncommon case: large x - use float encoding
+		// (powers of 2 will be encoded efficiently with exponent)
+		p.tag(floatTag)
+		f := newMpflt()
+		Mpmovefixflt(f, x)
+		p.float(f)
+
+	case *Mpflt:
+		p.tag(floatTag)
+		p.float(x)
+
+	case *Mpcplx:
+		p.tag(complexTag)
+		p.float(&x.Real)
+		p.float(&x.Imag)
+
+	case string:
+		p.tag(stringTag)
+		p.string(x)
+
+	default:
+		Fatalf("unexpected value %v (%T)", x, x)
+	}
+}
+
+func (p *exporter) float(x *Mpflt) {
+	// extract sign (there is no -0)
+	f := &x.Val
+	sign := f.Sign()
+	if sign == 0 {
+		// x == 0
+		p.int(0)
+		return
+	}
+	// x != 0
+
+	// extract exponent such that 0.5 <= m < 1.0
+	var m big.Float
+	exp := f.MantExp(&m)
+
+	// extract mantissa as *big.Int
+	// - set exponent large enough so mant satisfies mant.IsInt()
+	// - get *big.Int from mant
+	m.SetMantExp(&m, int(m.MinPrec()))
+	mant, acc := m.Int(nil)
+	if acc != big.Exact {
+		Fatalf("internal error")
+	}
+
+	p.int(sign)
+	p.int(exp)
+	p.string(string(mant.Bytes()))
+}
+
+// ----------------------------------------------------------------------------
+// Inlined function bodies
+
+// TODO(gri) This section is incomplete. At the moment nothing meaningful
+// is written out for exported functions with inlined function bodies.
+
+func (p *exporter) collectInlined(n *Node) int {
+	if n != nil && n.Func != nil && n.Func.Inl != nil {
+		// when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
+		// currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
+		if Debug['l'] < 2 {
+			typecheckinl(n)
+		}
+		p.inlined = append(p.inlined, n.Func)
+		return len(p.inlined) - 1 // index >= 0 => inlined
+	}
+	return -1 // index < 0 => not inlined
+}
+
+func (p *exporter) body(i int, f *Func) {
+	p.int(i)
+	p.block(f.Inl)
+}
+
+func (p *exporter) block(list *NodeList) {
+	p.int(count(list))
+	for q := list; q != nil; q = q.Next {
+		p.stmt(q.N)
+	}
+}
+
+func (p *exporter) stmt(n *Node) {
+	// TODO(gri) do something sensible here
+	p.string("body")
+}
+
+// ----------------------------------------------------------------------------
+// Low-level encoders
+
+func (p *exporter) index(marker byte, index int) {
+	if index < 0 {
+		Fatalf("invalid index < 0")
+	}
+	if debugFormat {
+		p.marker('t')
+	}
+	if p.trace {
+		p.tracef("%c%d ", marker, index)
+	}
+	p.rawInt64(int64(index))
+}
+
+func (p *exporter) tag(tag int) {
+	if tag >= 0 {
+		Fatalf("invalid tag >= 0")
+	}
+	if debugFormat {
+		p.marker('t')
+	}
+	if p.trace {
+		p.tracef("%s ", tagString[-tag])
+	}
+	p.rawInt64(int64(tag))
+}
+
+func (p *exporter) int(x int) {
+	p.int64(int64(x))
+}
+
+func (p *exporter) int64(x int64) {
+	if debugFormat {
+		p.marker('i')
+	}
+	if p.trace {
+		p.tracef("%d ", x)
+	}
+	p.rawInt64(x)
+}
+
+func (p *exporter) string(s string) {
+	if debugFormat {
+		p.marker('s')
+	}
+	if p.trace {
+		p.tracef("%q ", s)
+	}
+	p.rawInt64(int64(len(s)))
+	for i := 0; i < len(s); i++ {
+		p.byte(s[i])
+	}
+}
+
+// marker emits a marker byte and position information which makes
+// it easy for a reader to detect if it is "out of sync". Used for
+// debugFormat format only.
+func (p *exporter) marker(m byte) {
+	p.byte(m)
+	p.rawInt64(int64(p.written))
+}
+
+// rawInt64 should only be used by low-level encoders
+func (p *exporter) rawInt64(x int64) {
+	var tmp [binary.MaxVarintLen64]byte
+	n := binary.PutVarint(tmp[:], x)
+	for i := 0; i < n; i++ {
+		p.byte(tmp[i])
+	}
+}
+
+// byte is the bottleneck interface to write to p.out.
+// byte escapes b as follows (any encoding does that
+// hides '$'):
+//
+//	'$'  => '|' 'S'
+//	'|'  => '|' '|'
+//
+// Necessary so other tools can find the end of the
+// export data by searching for "$$".
+func (p *exporter) byte(b byte) {
+	switch b {
+	case '$':
+		// write '$' as '|' 'S'
+		b = 'S'
+		fallthrough
+	case '|':
+		// write '|' as '|' '|'
+		obj.Bputc(p.out, '|')
+		p.written++
+	}
+	obj.Bputc(p.out, b)
+	p.written++
+}
+
+// tracef is like fmt.Printf but it rewrites the format string
+// to take care of indentation.
+func (p *exporter) tracef(format string, args ...interface{}) {
+	if strings.IndexAny(format, "<>\n") >= 0 {
+		var buf bytes.Buffer
+		for i := 0; i < len(format); i++ {
+			// no need to deal with runes
+			ch := format[i]
+			switch ch {
+			case '>':
+				p.indent++
+				continue
+			case '<':
+				p.indent--
+				continue
+			}
+			buf.WriteByte(ch)
+			if ch == '\n' {
+				for j := p.indent; j > 0; j-- {
+					buf.WriteString(".  ")
+				}
+			}
+		}
+		format = buf.String()
+	}
+	fmt.Printf(format, args...)
+}
+
+// ----------------------------------------------------------------------------
+// Export format
+
+// Tags. Must be < 0.
+const (
+	// Packages
+	packageTag = -(iota + 1)
+
+	// Types
+	namedTag
+	arrayTag
+	sliceTag
+	dddTag
+	structTag
+	pointerTag
+	signatureTag
+	interfaceTag
+	mapTag
+	chanTag
+
+	// Values
+	falseTag
+	trueTag
+	int64Tag
+	floatTag
+	fractionTag // not used by gc
+	complexTag
+	stringTag
+)
+
+// Debugging support.
+// (tagString is only used when tracing is enabled)
+var tagString = [...]string{
+	// Packages:
+	-packageTag: "package",
+
+	// Types:
+	-namedTag:     "named type",
+	-arrayTag:     "array",
+	-sliceTag:     "slice",
+	-dddTag:       "ddd",
+	-structTag:    "struct",
+	-pointerTag:   "pointer",
+	-signatureTag: "signature",
+	-interfaceTag: "interface",
+	-mapTag:       "map",
+	-chanTag:      "chan",
+
+	// Values:
+	-falseTag:    "false",
+	-trueTag:     "true",
+	-int64Tag:    "int64",
+	-floatTag:    "float",
+	-fractionTag: "fraction",
+	-complexTag:  "complex",
+	-stringTag:   "string",
+}
+
+// untype returns the "pseudo" untyped type for a Ctype (import/export use only).
+// (we can't use an pre-initialized array because we must be sure all types are
+// set up)
+func untype(ctype Ctype) *Type {
+	switch ctype {
+	case CTINT:
+		return idealint
+	case CTRUNE:
+		return idealrune
+	case CTFLT:
+		return idealfloat
+	case CTCPLX:
+		return idealcomplex
+	case CTSTR:
+		return idealstring
+	case CTBOOL:
+		return idealbool
+	case CTNIL:
+		return Types[TNIL]
+	}
+	Fatalf("unknown Ctype")
+	return nil
+}
+
+var (
+	idealint     = typ(TIDEAL)
+	idealrune    = typ(TIDEAL)
+	idealfloat   = typ(TIDEAL)
+	idealcomplex = typ(TIDEAL)
+)
+
+var predecl []*Type // initialized lazily
+
+func predeclared() []*Type {
+	if predecl == nil {
+		// initialize lazily to be sure that all
+		// elements have been initialized before
+		predecl = []*Type{
+			// basic types
+			Types[TBOOL],
+			Types[TINT],
+			Types[TINT8],
+			Types[TINT16],
+			Types[TINT32],
+			Types[TINT64],
+			Types[TUINT],
+			Types[TUINT8],
+			Types[TUINT16],
+			Types[TUINT32],
+			Types[TUINT64],
+			Types[TUINTPTR],
+			Types[TFLOAT32],
+			Types[TFLOAT64],
+			Types[TCOMPLEX64],
+			Types[TCOMPLEX128],
+			Types[TSTRING],
+
+			// aliases
+			bytetype,
+			runetype,
+
+			// error
+			errortype,
+
+			// untyped types
+			untype(CTBOOL),
+			untype(CTINT),
+			untype(CTRUNE),
+			untype(CTFLT),
+			untype(CTCPLX),
+			untype(CTSTR),
+			untype(CTNIL),
+
+			// package unsafe
+			Types[TUNSAFEPTR],
+		}
+	}
+	return predecl
+}
diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go
new file mode 100644
index 0000000..731f31b
--- /dev/null
+++ b/src/cmd/compile/internal/gc/bimport.go
@@ -0,0 +1,634 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Binary package import.
+// Based loosely on x/tools/go/importer.
+
+package gc
+
+import (
+	"cmd/compile/internal/big"
+	"cmd/internal/obj"
+	"encoding/binary"
+)
+
+// The overall structure of Import is symmetric to Export: For each
+// export method in bexport.go there is a matching and symmetric method
+// in bimport.go. Changing the export format requires making symmetric
+// changes to bimport.go and bexport.go.
+
+// Import populates importpkg from the serialized package data.
+func Import(in *obj.Biobuf) {
+	p := importer{in: in}
+	p.buf = p.bufarray[:]
+
+	// read low-level encoding format
+	switch format := p.byte(); format {
+	case 'c':
+		// compact format - nothing to do
+	case 'd':
+		p.debugFormat = true
+	default:
+		Fatalf("invalid encoding format in export data: got %q; want 'c' or 'd'", format)
+	}
+
+	// --- generic export data ---
+
+	if v := p.string(); v != exportVersion {
+		Fatalf("unknown export data version: %s", v)
+	}
+
+	// populate typList with predeclared "known" types
+	p.typList = append(p.typList, predeclared()...)
+
+	// read package data
+	p.pkg()
+	if p.pkgList[0] != importpkg {
+		Fatalf("imported package not found in pkgList[0]")
+	}
+
+	// read compiler-specific flags
+	importpkg.Safe = p.string() == "safe"
+
+	// defer some type-checking until all types are read in completely
+	// (go.y:import_there)
+	tcok := typecheckok
+	typecheckok = true
+	defercheckwidth()
+
+	// read consts
+	for i := p.int(); i > 0; i-- {
+		sym := p.localname()
+		typ := p.typ()
+		val := p.value(typ)
+		if isideal(typ) {
+			// canonicalize ideal types
+			typ = Types[TIDEAL]
+		}
+		importconst(sym, typ, nodlit(val))
+	}
+
+	// read vars
+	for i := p.int(); i > 0; i-- {
+		sym := p.localname()
+		typ := p.typ()
+		importvar(sym, typ)
+	}
+
+	// read funcs
+	for i := p.int(); i > 0; i-- {
+		// go.y:hidden_fndcl
+		sym := p.localname()
+		typ := p.typ()
+		// TODO(gri) fix this
+		p.int() // read and discard index of inlined function body for now
+
+		importsym(sym, ONAME)
+		if sym.Def != nil && sym.Def.Op == ONAME && !Eqtype(typ, sym.Def.Type) {
+			Fatalf("inconsistent definition for func %v during import\n\t%v\n\t%v", sym, sym.Def.Type, typ)
+		}
+
+		n := newfuncname(sym)
+		n.Type = typ
+		declare(n, PFUNC)
+		funchdr(n)
+
+		// go.y:hidden_import
+		n.Func.Inl = nil
+		funcbody(n)
+		importlist = append(importlist, n) // TODO(gri) do this only if body is inlineable?
+	}
+
+	// read types
+	for i := p.int(); i > 0; i-- {
+		// name is parsed as part of named type
+		p.typ()
+	}
+
+	// --- compiler-specific export data ---
+
+	for i := p.int(); i > 0; i-- {
+		p.body()
+	}
+
+	// --- end of export data ---
+
+	typecheckok = tcok
+	resumecheckwidth()
+
+	testdclstack() // debugging only
+}
+
+type importer struct {
+	in       *obj.Biobuf
+	buf      []byte   // for reading strings
+	bufarray [64]byte // initial underlying array for buf, large enough to avoid allocation when compiling std lib
+	pkgList  []*Pkg
+	typList  []*Type
+
+	debugFormat bool
+	read        int // bytes read
+}
+
+func (p *importer) pkg() *Pkg {
+	// if the package was seen before, i is its index (>= 0)
+	i := p.tagOrIndex()
+	if i >= 0 {
+		return p.pkgList[i]
+	}
+
+	// otherwise, i is the package tag (< 0)
+	if i != packageTag {
+		Fatalf("expected package tag, found tag = %d", i)
+	}
+
+	// read package data
+	name := p.string()
+	path := p.string()
+
+	// we should never see an empty package name
+	if name == "" {
+		Fatalf("empty package name in import")
+	}
+
+	// we should never see a bad import path
+	if isbadimport(path) {
+		Fatalf("bad path in import: %q", path)
+	}
+
+	// an empty path denotes the package we are currently importing
+	pkg := importpkg
+	if path != "" {
+		pkg = mkpkg(path)
+	}
+	if pkg.Name == "" {
+		pkg.Name = name
+	} else if pkg.Name != name {
+		Fatalf("inconsistent package names: got %s; want %s (path = %s)", pkg.Name, name, path)
+	}
+	p.pkgList = append(p.pkgList, pkg)
+
+	return pkg
+}
+
+func (p *importer) localname() *Sym {
+	// go.y:hidden_importsym
+	name := p.string()
+	if name == "" {
+		Fatalf("unexpected anonymous name")
+	}
+	structpkg = importpkg // go.y:hidden_pkg_importsym
+	return importpkg.Lookup(name)
+}
+
+func (p *importer) newtyp(etype EType) *Type {
+	t := typ(etype)
+	p.typList = append(p.typList, t)
+	return t
+}
+
+func (p *importer) typ() *Type {
+	// if the type was seen before, i is its index (>= 0)
+	i := p.tagOrIndex()
+	if i >= 0 {
+		return p.typList[i]
+	}
+
+	// otherwise, i is the type tag (< 0)
+	var t *Type
+	switch i {
+	case namedTag:
+		// go.y:hidden_importsym
+		tsym := p.qualifiedName()
+
+		// go.y:hidden_pkgtype
+		t = pkgtype(tsym)
+		importsym(tsym, OTYPE)
+		p.typList = append(p.typList, t)
+
+		// read underlying type
+		// go.y:hidden_type
+		t0 := p.typ()
+		importtype(t, t0) // go.y:hidden_import
+
+		// interfaces don't have associated methods
+		if t0.Etype == TINTER {
+			break
+		}
+
+		// read associated methods
+		for i := p.int(); i > 0; i-- {
+			// go.y:hidden_fndcl
+			name := p.string()
+			recv := p.paramList() // TODO(gri) do we need a full param list for the receiver?
+			params := p.paramList()
+			result := p.paramList()
+			// TODO(gri) fix this
+			p.int() // read and discard index of inlined function body for now
+
+			pkg := localpkg
+			if !exportname(name) {
+				pkg = tsym.Pkg
+			}
+			sym := pkg.Lookup(name)
+
+			n := methodname1(newname(sym), recv.N.Right)
+			n.Type = functype(recv.N, params, result)
+			checkwidth(n.Type)
+			// addmethod uses the global variable structpkg to verify consistency
+			{
+				saved := structpkg
+				structpkg = tsym.Pkg
+				addmethod(sym, n.Type, false, nointerface)
+				structpkg = saved
+			}
+			nointerface = false
+			funchdr(n)
+
+			// (comment from go.y)
+			// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
+			// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
+			// out by typecheck's lookdot as this $$.ttype.  So by providing
+			// this back link here we avoid special casing there.
+			n.Type.Nname = n
+
+			// go.y:hidden_import
+			n.Func.Inl = nil
+			funcbody(n)
+			importlist = append(importlist, n) // TODO(gri) do this only if body is inlineable?
+		}
+
+	case arrayTag, sliceTag:
+		t = p.newtyp(TARRAY)
+		t.Bound = -1
+		if i == arrayTag {
+			t.Bound = p.int64()
+		}
+		t.Type = p.typ()
+
+	case dddTag:
+		t = p.newtyp(T_old_DARRAY)
+		t.Bound = -1
+		t.Type = p.typ()
+
+	case structTag:
+		t = p.newtyp(TSTRUCT)
+		tostruct0(t, p.fieldList())
+
+	case pointerTag:
+		t = p.newtyp(Tptr)
+		t.Type = p.typ()
+
+	case signatureTag:
+		t = p.newtyp(TFUNC)
+		params := p.paramList()
+		result := p.paramList()
+		functype0(t, nil, params, result)
+
+	case interfaceTag:
+		t = p.newtyp(TINTER)
+		if p.int() != 0 {
+			Fatalf("unexpected embedded interface")
+		}
+		tointerface0(t, p.methodList())
+
+	case mapTag:
+		t = p.newtyp(TMAP)
+		t.Down = p.typ() // key
+		t.Type = p.typ() // val
+
+	case chanTag:
+		t = p.newtyp(TCHAN)
+		t.Chan = uint8(p.int())
+		t.Type = p.typ()
+
+	default:
+		Fatalf("unexpected type (tag = %d)", i)
+	}
+
+	if t == nil {
+		Fatalf("nil type (type tag = %d)", i)
+	}
+
+	return t
+}
+
+func (p *importer) qualifiedName() *Sym {
+	name := p.string()
+	pkg := p.pkg()
+	return pkg.Lookup(name)
+}
+
+// go.y:hidden_structdcl_list
+func (p *importer) fieldList() *NodeList {
+	i := p.int()
+	if i == 0 {
+		return nil
+	}
+	n := list1(p.field())
+	for i--; i > 0; i-- {
+		n = list(n, p.field())
+	}
+	return n
+}
+
+// go.y:hidden_structdcl
+func (p *importer) field() *Node {
+	sym := p.fieldName()
+	typ := p.typ()
+	note := p.note()
+
+	var n *Node
+	if sym.Name != "" {
+		n = Nod(ODCLFIELD, newname(sym), typenod(typ))
+	} else {
+		// anonymous field - typ must be T or *T and T must be a type name
+		s := typ.Sym
+		if s == nil && Isptr[typ.Etype] {
+			s = typ.Type.Sym // deref
+		}
+		pkg := importpkg
+		if sym != nil {
+			pkg = sym.Pkg
+		}
+		n = embedded(s, pkg)
+		n.Right = typenod(typ)
+	}
+	n.SetVal(note)
+
+	return n
+}
+
+func (p *importer) note() (v Val) {
+	if s := p.string(); s != "" {
+		v.U = s
+	}
+	return
+}
+
+// go.y:hidden_interfacedcl_list
+func (p *importer) methodList() *NodeList {
+	i := p.int()
+	if i == 0 {
+		return nil
+	}
+	n := list1(p.method())
+	for i--; i > 0; i-- {
+		n = list(n, p.method())
+	}
+	return n
+}
+
+// go.y:hidden_interfacedcl
+func (p *importer) method() *Node {
+	sym := p.fieldName()
+	params := p.paramList()
+	result := p.paramList()
+	return Nod(ODCLFIELD, newname(sym), typenod(functype(fakethis(), params, result)))
+}
+
+// go.y:sym,hidden_importsym
+func (p *importer) fieldName() *Sym {
+	name := p.string()
+	pkg := localpkg
+	if name == "_" {
+		// During imports, unqualified non-exported identifiers are from builtinpkg
+		// (see go.y:sym). The binary exporter only exports blank as a non-exported
+		// identifier without qualification.
+		pkg = builtinpkg
+	} else if name == "?" || name != "" && !exportname(name) {
+		if name == "?" {
+			name = ""
+		}
+		pkg = p.pkg()
+	}
+	return pkg.Lookup(name)
+}
+
+// go.y:ohidden_funarg_list
+func (p *importer) paramList() *NodeList {
+	i := p.int()
+	if i == 0 {
+		return nil
+	}
+	// negative length indicates unnamed parameters
+	named := true
+	if i < 0 {
+		i = -i
+		named = false
+	}
+	// i > 0
+	n := list1(p.param(named))
+	i--
+	for ; i > 0; i-- {
+		n = list(n, p.param(named))
+	}
+	return n
+}
+
+// go.y:hidden_funarg
+func (p *importer) param(named bool) *Node {
+	typ := p.typ()
+
+	isddd := false
+	if typ.Etype == T_old_DARRAY {
+		// T_old_DARRAY indicates ... type
+		typ.Etype = TARRAY
+		isddd = true
+	}
+
+	n := Nod(ODCLFIELD, nil, typenod(typ))
+	n.Isddd = isddd
+
+	if named {
+		name := p.string()
+		if name == "" {
+			Fatalf("expected named parameter")
+		}
+		// The parameter package doesn't matter; it's never consulted.
+		// We use the builtinpkg per go.y:sym (line 1181).
+		n.Left = newname(builtinpkg.Lookup(name))
+	}
+
+	// TODO(gri) This is compiler-specific (escape info).
+	// Move into compiler-specific section eventually?
+	n.SetVal(p.note())
+
+	return n
+}
+
+func (p *importer) value(typ *Type) (x Val) {
+	switch tag := p.tagOrIndex(); tag {
+	case falseTag:
+		x.U = false
+	case trueTag:
+		x.U = true
+	case int64Tag:
+		u := new(Mpint)
+		Mpmovecfix(u, p.int64())
+		u.Rune = typ == idealrune
+		x.U = u
+	case floatTag:
+		f := newMpflt()
+		p.float(f)
+		if typ == idealint || Isint[typ.Etype] {
+			// uncommon case: large int encoded as float
+			u := new(Mpint)
+			mpmovefltfix(u, f)
+			x.U = u
+			break
+		}
+		x.U = f
+	case complexTag:
+		u := new(Mpcplx)
+		p.float(&u.Real)
+		p.float(&u.Imag)
+		x.U = u
+	case stringTag:
+		x.U = p.string()
+	default:
+		Fatalf("unexpected value tag %d", tag)
+	}
+
+	// verify ideal type
+	if isideal(typ) && untype(x.Ctype()) != typ {
+		Fatalf("value %v and type %v don't match", x, typ)
+	}
+
+	return
+}
+
+func (p *importer) float(x *Mpflt) {
+	sign := p.int()
+	if sign == 0 {
+		Mpmovecflt(x, 0)
+		return
+	}
+
+	exp := p.int()
+	mant := new(big.Int).SetBytes([]byte(p.string()))
+
+	m := x.Val.SetInt(mant)
+	m.SetMantExp(m, exp-mant.BitLen())
+	if sign < 0 {
+		m.Neg(m)
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Inlined function bodies
+
+func (p *importer) body() {
+	p.int()
+	p.block()
+}
+
+func (p *importer) block() {
+	for i := p.int(); i > 0; i-- {
+		p.stmt()
+	}
+}
+
+func (p *importer) stmt() {
+	// TODO(gri) do something sensible here
+	p.string()
+}
+
+// ----------------------------------------------------------------------------
+// Low-level decoders
+
+func (p *importer) tagOrIndex() int {
+	if p.debugFormat {
+		p.marker('t')
+	}
+
+	return int(p.rawInt64())
+}
+
+func (p *importer) int() int {
+	x := p.int64()
+	if int64(int(x)) != x {
+		Fatalf("exported integer too large")
+	}
+	return int(x)
+}
+
+func (p *importer) int64() int64 {
+	if p.debugFormat {
+		p.marker('i')
+	}
+
+	return p.rawInt64()
+}
+
+func (p *importer) string() string {
+	if p.debugFormat {
+		p.marker('s')
+	}
+
+	if n := int(p.rawInt64()); n > 0 {
+		if cap(p.buf) < n {
+			p.buf = make([]byte, n)
+		} else {
+			p.buf = p.buf[:n]
+		}
+		for i := 0; i < n; i++ {
+			p.buf[i] = p.byte()
+		}
+		return string(p.buf)
+	}
+
+	return ""
+}
+
+func (p *importer) marker(want byte) {
+	if got := p.byte(); got != want {
+		Fatalf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
+	}
+
+	pos := p.read
+	if n := int(p.rawInt64()); n != pos {
+		Fatalf("incorrect position: got %d; want %d", n, pos)
+	}
+}
+
+// rawInt64 should only be used by low-level decoders
+func (p *importer) rawInt64() int64 {
+	i, err := binary.ReadVarint(p)
+	if err != nil {
+		Fatalf("read error: %v", err)
+	}
+	return i
+}
+
+// needed for binary.ReadVarint in rawInt64
+func (p *importer) ReadByte() (byte, error) {
+	return p.byte(), nil
+}
+
+// byte is the bottleneck interface for reading from p.in.
+// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
+func (p *importer) byte() byte {
+	c := obj.Bgetc(p.in)
+	p.read++
+	if c < 0 {
+		Fatalf("read error")
+	}
+	if c == '|' {
+		c = obj.Bgetc(p.in)
+		p.read++
+		if c < 0 {
+			Fatalf("read error")
+		}
+		switch c {
+		case 'S':
+			c = '$'
+		case '|':
+			// nothing to do
+		default:
+			Fatalf("unexpected escape sequence in export data")
+		}
+	}
+	return byte(c)
+}
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
index 66f66a7..568ffdd 100644
--- a/src/cmd/compile/internal/gc/builtin.go
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -4,7 +4,6 @@
 
 const runtimeimport = "" +
 	"package runtime\n" +
-	"import runtime \"runtime\"\n" +
 	"func @\"\".newobject (@\"\".typ·2 *byte) (? *any)\n" +
 	"func @\"\".panicindex ()\n" +
 	"func @\"\".panicslice ()\n" +
@@ -87,7 +86,7 @@
 	"func @\"\".chanrecv2 (@\"\".chanType·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (? bool)\n" +
 	"func @\"\".chansend1 (@\"\".chanType·1 *byte, @\"\".hchan·2 chan<- any, @\"\".elem·3 *any)\n" +
 	"func @\"\".closechan (@\"\".hchan·1 any)\n" +
-	"var @\"\".writeBarrierEnabled bool\n" +
+	"var @\"\".writeBarrier struct { @\"\".enabled bool; @\"\".needed bool; @\"\".cgo bool }\n" +
 	"func @\"\".writebarrierptr (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
 	"func @\"\".writebarrierstring (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
 	"func @\"\".writebarrierslice (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
@@ -157,12 +156,13 @@
 	"func @\"\".racewrite (? uintptr)\n" +
 	"func @\"\".racereadrange (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" +
 	"func @\"\".racewriterange (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" +
+	"func @\"\".msanread (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" +
+	"func @\"\".msanwrite (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" +
 	"\n" +
 	"$$\n"
 
 const unsafeimport = "" +
 	"package unsafe\n" +
-	"import runtime \"runtime\"\n" +
 	"type @\"\".Pointer uintptr\n" +
 	"func @\"\".Offsetof (? any) (? uintptr)\n" +
 	"func @\"\".Sizeof (? any) (? uintptr)\n" +
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go
index 43c35ca..07a0c31 100644
--- a/src/cmd/compile/internal/gc/builtin/runtime.go
+++ b/src/cmd/compile/internal/gc/builtin/runtime.go
@@ -108,7 +108,11 @@
 func chansend1(chanType *byte, hchan chan<- any, elem *any)
 func closechan(hchan any)
 
-var writeBarrierEnabled bool
+var writeBarrier struct {
+	enabled bool
+	needed  bool
+	cgo     bool
+}
 
 func writebarrierptr(dst *any, src any)
 func writebarrierstring(dst *any, src any)
@@ -195,3 +199,7 @@
 func racewrite(uintptr)
 func racereadrange(addr, size uintptr)
 func racewriterange(addr, size uintptr)
+
+// memory sanitizer
+func msanread(addr, size uintptr)
+func msanwrite(addr, size uintptr)
diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go
index b40339e..c19ec81 100644
--- a/src/cmd/compile/internal/gc/bv.go
+++ b/src/cmd/compile/internal/gc/bv.go
@@ -56,7 +56,7 @@
 	return out
 }
 
-/* difference */
+// difference
 func bvandnot(dst Bvec, src1 Bvec, src2 Bvec) {
 	for i, x := range src1.b {
 		dst.b[i] = x &^ src2.b[i]
@@ -151,14 +151,14 @@
 	}
 }
 
-/* union */
+// union
 func bvor(dst Bvec, src1 Bvec, src2 Bvec) {
 	for i, x := range src1.b {
 		dst.b[i] = x | src2.b[i]
 	}
 }
 
-/* intersection */
+// intersection
 func bvand(dst Bvec, src1 Bvec, src2 Bvec) {
 	for i, x := range src1.b {
 		dst.b[i] = x & src2.b[i]
diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go
index d6538a3..6456240 100644
--- a/src/cmd/compile/internal/gc/cgen.go
+++ b/src/cmd/compile/internal/gc/cgen.go
@@ -6,15 +6,14 @@
 
 import (
 	"cmd/internal/obj"
+	"cmd/internal/obj/ppc64"
 	"fmt"
 )
 
-/*
- * generate:
- *	res = n;
- * simplifies and calls Thearch.Gmove.
- * if wb is true, need to emit write barriers.
- */
+// generate:
+//	res = n;
+// simplifies and calls Thearch.Gmove.
+// if wb is true, need to emit write barriers.
 func Cgen(n, res *Node) {
 	cgen_wb(n, res, false)
 }
@@ -190,7 +189,7 @@
 	}
 
 	if wb {
-		if int(Simtype[res.Type.Etype]) != Tptr {
+		if Simtype[res.Type.Etype] != Tptr {
 			Fatalf("cgen_wb of type %v", res.Type)
 		}
 		if n.Ullman >= UINF {
@@ -253,7 +252,7 @@
 		return
 	}
 
-	if Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+	if Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
 		// if both are addressable, move
 		if n.Addable {
 			if n.Op == OREGISTER || res.Op == OREGISTER {
@@ -397,7 +396,7 @@
 			goto sbop
 		}
 
-		a := Thearch.Optoas(int(n.Op), nl.Type)
+		a := Thearch.Optoas(n.Op, nl.Type)
 		// unary
 		var n1 Node
 		Regalloc(&n1, nl.Type, res)
@@ -434,15 +433,15 @@
 		OXOR,
 		OADD,
 		OMUL:
-		if n.Op == OMUL && Thearch.Cgen_bmul != nil && Thearch.Cgen_bmul(int(n.Op), nl, nr, res) {
+		if n.Op == OMUL && Thearch.Cgen_bmul != nil && Thearch.Cgen_bmul(n.Op, nl, nr, res) {
 			break
 		}
-		a = Thearch.Optoas(int(n.Op), nl.Type)
+		a = Thearch.Optoas(n.Op, nl.Type)
 		goto sbop
 
 		// asymmetric binary
 	case OSUB:
-		a = Thearch.Optoas(int(n.Op), nl.Type)
+		a = Thearch.Optoas(n.Op, nl.Type)
 		goto abop
 
 	case OHMUL:
@@ -656,7 +655,7 @@
 
 	case OMOD, ODIV:
 		if Isfloat[n.Type.Etype] || Thearch.Dodiv == nil {
-			a = Thearch.Optoas(int(n.Op), nl.Type)
+			a = Thearch.Optoas(n.Op, nl.Type)
 			goto abop
 		}
 
@@ -664,7 +663,7 @@
 			var n1 Node
 			Regalloc(&n1, nl.Type, res)
 			Cgen(nl, &n1)
-			cgen_div(int(n.Op), &n1, nr, res)
+			cgen_div(n.Op, &n1, nr, res)
 			Regfree(&n1)
 		} else {
 			var n2 Node
@@ -675,34 +674,32 @@
 				n2 = *nr
 			}
 
-			cgen_div(int(n.Op), nl, &n2, res)
+			cgen_div(n.Op, nl, &n2, res)
 			if n2.Op != OLITERAL {
 				Regfree(&n2)
 			}
 		}
 
 	case OLSH, ORSH, OLROT:
-		Thearch.Cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
+		Thearch.Cgen_shift(n.Op, n.Bounded, nl, nr, res)
 	}
 
 	return
 
-	/*
-	 * put simplest on right - we'll generate into left
-	 * and then adjust it using the computation of right.
-	 * constants and variables have the same ullman
-	 * count, so look for constants specially.
-	 *
-	 * an integer constant we can use as an immediate
-	 * is simpler than a variable - we can use the immediate
-	 * in the adjustment instruction directly - so it goes
-	 * on the right.
-	 *
-	 * other constants, like big integers or floating point
-	 * constants, require a mov into a register, so those
-	 * might as well go on the left, so we can reuse that
-	 * register for the computation.
-	 */
+	// put simplest on right - we'll generate into left
+	// and then adjust it using the computation of right.
+	// constants and variables have the same ullman
+	// count, so look for constants specially.
+	//
+	// an integer constant we can use as an immediate
+	// is simpler than a variable - we can use the immediate
+	// in the adjustment instruction directly - so it goes
+	// on the right.
+	//
+	// other constants, like big integers or floating point
+	// constants, require a mov into a register, so those
+	// might as well go on the left, so we can reuse that
+	// register for the computation.
 sbop: // symmetric binary
 	if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) {
 		nl, nr = nr, nl
@@ -755,14 +752,14 @@
 		Regalloc(&n1, nl.Type, res)
 		Cgen(nl, &n1)
 
-		if Smallintconst(nr) && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
+		if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
 			n2 = *nr
 		} else {
 			Regalloc(&n2, nr.Type, nil)
 			Cgen(nr, &n2)
 		}
 	} else {
-		if Smallintconst(nr) && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
+		if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
 			n2 = *nr
 		} else {
 			Regalloc(&n2, nr.Type, res)
@@ -783,8 +780,13 @@
 var sys_wbptr *Node
 
 func cgen_wbptr(n, res *Node) {
-	if Curfn != nil && Curfn.Func.Nowritebarrier {
-		Yyerror("write barrier prohibited")
+	if Curfn != nil {
+		if Curfn.Func.Nowritebarrier {
+			Yyerror("write barrier prohibited")
+		}
+		if Curfn.Func.WBLineno == 0 {
+			Curfn.Func.WBLineno = lineno
+		}
 	}
 	if Debug_wb > 0 {
 		Warn("write barrier")
@@ -799,7 +801,9 @@
 		Cgenr(n, &src, nil)
 	}
 
-	wbEnabled := syslook("writeBarrierEnabled", 0)
+	wbVar := syslook("writeBarrier", 0)
+	wbEnabled := Nod(ODOT, wbVar, newname(wbVar.Type.Type.Sym))
+	wbEnabled = typecheck(&wbEnabled, Erv)
 	pbr := Thearch.Ginscmp(ONE, Types[TUINT8], wbEnabled, Nodintconst(0), -1)
 	Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, &dst)
 	pjmp := Gbranch(obj.AJMP, nil, 0)
@@ -826,8 +830,13 @@
 }
 
 func cgen_wbfat(n, res *Node) {
-	if Curfn != nil && Curfn.Func.Nowritebarrier {
-		Yyerror("write barrier prohibited")
+	if Curfn != nil {
+		if Curfn.Func.Nowritebarrier {
+			Yyerror("write barrier prohibited")
+		}
+		if Curfn.Func.WBLineno == 0 {
+			Curfn.Func.WBLineno = lineno
+		}
 	}
 	if Debug_wb > 0 {
 		Warn("write barrier")
@@ -909,11 +918,9 @@
 	}
 }
 
-/*
- * allocate a register (reusing res if possible) and generate
- *  a = n
- * The caller must call Regfree(a).
- */
+// allocate a register (reusing res if possible) and generate
+//	a = n
+// The caller must call Regfree(a).
 func Cgenr(n *Node, a *Node, res *Node) {
 	if Debug['g'] != 0 {
 		Dump("cgenr-n", n)
@@ -949,12 +956,10 @@
 	}
 }
 
-/*
- * allocate a register (reusing res if possible) and generate
- * a = &n
- * The caller must call Regfree(a).
- * The generated code checks that the result is not nil.
- */
+// allocate a register (reusing res if possible) and generate
+//	a = &n
+// The caller must call Regfree(a).
+// The generated code checks that the result is not nil.
 func Agenr(n *Node, a *Node, res *Node) {
 	if Debug['g'] != 0 {
 		Dump("\nagenr-n", n)
@@ -1468,11 +1473,9 @@
 	return x
 }
 
-/*
- * generate:
- *	res = &n;
- * The generated code checks that the result is not nil.
- */
+// generate:
+//	res = &n;
+// The generated code checks that the result is not nil.
 func Agen(n *Node, res *Node) {
 	if Debug['g'] != 0 {
 		Dump("\nagen-res", res)
@@ -1829,8 +1832,8 @@
 			// but they don't support direct generation of a bool value yet.
 			// We can fix that as we go.
 			switch Ctxt.Arch.Thechar {
-			case '5', '7', '9':
-				Fatalf("genval 5g, 7g, 9g ONAMES not fully implemented")
+			case '0', '5', '7', '9':
+				Fatalf("genval 0g, 5g, 7g, 9g ONAMES not fully implemented")
 			}
 			Cgen(n, res)
 			if !wantTrue {
@@ -1839,7 +1842,7 @@
 			return
 		}
 
-		if n.Addable && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' {
+		if n.Addable && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' {
 			// no need for a temporary
 			bgenNonZero(n, nil, wantTrue, likely, to)
 			return
@@ -1912,7 +1915,7 @@
 	// n.Op is one of OEQ, ONE, OLT, OGT, OLE, OGE
 	nl := n.Left
 	nr := n.Right
-	a := int(n.Op)
+	op := n.Op
 
 	if !wantTrue {
 		if Isfloat[nr.Type.Etype] {
@@ -1935,19 +1938,19 @@
 			return
 		}
 
-		a = Brcom(a)
+		op = Brcom(op)
 	}
 	wantTrue = true
 
 	// make simplest on right
 	if nl.Op == OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < UINF) {
-		a = Brrev(a)
+		op = Brrev(op)
 		nl, nr = nr, nl
 	}
 
 	if Isslice(nl.Type) || Isinter(nl.Type) {
 		// front end should only leave cmp to literal nil
-		if (a != OEQ && a != ONE) || nr.Op != OLITERAL {
+		if (op != OEQ && op != ONE) || nr.Op != OLITERAL {
 			if Isslice(nl.Type) {
 				Yyerror("illegal slice comparison")
 			} else {
@@ -1966,13 +1969,13 @@
 		Regalloc(&tmp, ptr.Type, &ptr)
 		Cgen(&ptr, &tmp)
 		Regfree(&ptr)
-		bgenNonZero(&tmp, res, a == OEQ != wantTrue, likely, to)
+		bgenNonZero(&tmp, res, op == OEQ != wantTrue, likely, to)
 		Regfree(&tmp)
 		return
 	}
 
 	if Iscomplex[nl.Type.Etype] {
-		complexbool(a, nl, nr, res, wantTrue, likely, to)
+		complexbool(op, nl, nr, res, wantTrue, likely, to)
 		return
 	}
 
@@ -1988,7 +1991,7 @@
 		if !nr.Addable {
 			nr = CgenTemp(nr)
 		}
-		Thearch.Cmp64(nl, nr, a, likely, to)
+		Thearch.Cmp64(nl, nr, op, likely, to)
 		return
 	}
 
@@ -2023,9 +2026,9 @@
 		Cgen(nl, &n1)
 		nl = &n1
 
-		if Smallintconst(nr) && Ctxt.Arch.Thechar != '9' {
+		if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '9' {
 			Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), nl, nr)
-			bins(nr.Type, res, a, likely, to)
+			bins(nr.Type, res, op, likely, to)
 			return
 		}
 
@@ -2043,9 +2046,16 @@
 	l, r := nl, nr
 
 	// On x86, only < and <= work right with NaN; reverse if needed
-	if Ctxt.Arch.Thechar == '6' && Isfloat[nl.Type.Etype] && (a == OGT || a == OGE) {
+	if Ctxt.Arch.Thechar == '6' && Isfloat[nl.Type.Etype] && (op == OGT || op == OGE) {
 		l, r = r, l
-		a = Brrev(a)
+		op = Brrev(op)
+	}
+
+	// MIPS does not have CMP instruction
+	if Ctxt.Arch.Thechar == '0' {
+		p := Thearch.Ginscmp(op, nr.Type, l, r, likely)
+		Patch(p, to)
+		return
 	}
 
 	// Do the comparison.
@@ -2062,10 +2072,10 @@
 			switch n.Op {
 			case ONE:
 				Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, likely), to)
-				Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+				Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
 			default:
 				p := Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, -likely)
-				Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+				Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
 				Patch(p, Pc)
 			}
 			return
@@ -2111,12 +2121,12 @@
 			// On arm64 and ppc64, <= and >= mishandle NaN. Must decompose into < or > and =.
 			// TODO(josh): Convert a <= b to b > a instead?
 			case OLE, OGE:
-				if a == OLE {
-					a = OLT
+				if op == OLE {
+					op = OLT
 				} else {
-					a = OGT
+					op = OGT
 				}
-				Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+				Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
 				Patch(Gbranch(Thearch.Optoas(OEQ, nr.Type), nr.Type, likely), to)
 				return
 			}
@@ -2124,26 +2134,35 @@
 	}
 
 	// Not a special case. Insert the conditional jump or value gen.
-	bins(nr.Type, res, a, likely, to)
+	bins(nr.Type, res, op, likely, to)
 }
 
 func bgenNonZero(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
 	// TODO: Optimize on systems that can compare to zero easily.
-	a := ONE
+	var op Op = ONE
 	if !wantTrue {
-		a = OEQ
+		op = OEQ
 	}
+
+	// MIPS does not have CMP instruction
+	if Thearch.Thechar == '0' {
+		p := Gbranch(Thearch.Optoas(op, n.Type), n.Type, likely)
+		Naddr(&p.From, n)
+		Patch(p, to)
+		return
+	}
+
 	var zero Node
 	Nodconst(&zero, n.Type, 0)
 	Thearch.Gins(Thearch.Optoas(OCMP, n.Type), n, &zero)
-	bins(n.Type, res, a, likely, to)
+	bins(n.Type, res, op, likely, to)
 }
 
 // bins inserts an instruction to handle the result of a compare.
 // If res is non-nil, it inserts appropriate value generation instructions.
 // If res is nil, it inserts a branch to to.
-func bins(typ *Type, res *Node, a, likely int, to *obj.Prog) {
-	a = Thearch.Optoas(a, typ)
+func bins(typ *Type, res *Node, op Op, likely int, to *obj.Prog) {
+	a := Thearch.Optoas(op, typ)
 	if res != nil {
 		// value gen
 		Thearch.Ginsboolval(a, res)
@@ -2219,11 +2238,9 @@
 	return -1000 // not on stack
 }
 
-/*
- * block copy:
- *	memmove(&ns, &n, w);
- * if wb is true, needs write barrier.
- */
+// block copy:
+//	memmove(&ns, &n, w);
+// if wb is true, needs write barrier.
 func sgen_wb(n *Node, ns *Node, w int64, wb bool) {
 	if Debug['g'] != 0 {
 		op := "sgen"
@@ -2301,15 +2318,13 @@
 	Thearch.Blockcopy(n, ns, osrc, odst, w)
 }
 
-/*
- * generate:
- *	call f
- *	proc=-1	normal call but no return
- *	proc=0	normal call
- *	proc=1	goroutine run in new proc
- *	proc=2	defer call save away stack
-  *	proc=3	normal call to C pointer (not Go func value)
-*/
+// generate:
+//	call f
+//	proc=-1	normal call but no return
+//	proc=0	normal call
+//	proc=1	goroutine run in new proc
+//	proc=2	defer call save away stack
+//	proc=3	normal call to C pointer (not Go func value)
 func Ginscall(f *Node, proc int) {
 	if f.Type != nil {
 		extra := int32(0)
@@ -2327,15 +2342,39 @@
 		-1: // normal call but no return
 		if f.Op == ONAME && f.Class == PFUNC {
 			if f == Deferreturn {
-				// Deferred calls will appear to be returning to
-				// the CALL deferreturn(SB) that we are about to emit.
-				// However, the stack trace code will show the line
-				// of the instruction byte before the return PC.
-				// To avoid that being an unrelated instruction,
-				// insert an actual hardware NOP that will have the right line number.
-				// This is different from obj.ANOP, which is a virtual no-op
-				// that doesn't make it into the instruction stream.
+				// Deferred calls will appear to be returning to the CALL
+				// deferreturn(SB) that we are about to emit. However, the
+				// stack scanning code will think that the instruction
+				// before the CALL is executing. To avoid the scanning
+				// code making bad assumptions (both cosmetic such as
+				// showing the wrong line number and fatal, such as being
+				// confused over whether a stack slot contains a pointer
+				// or a scalar) insert an actual hardware NOP that will
+				// have the right line number. This is different from
+				// obj.ANOP, which is a virtual no-op that doesn't make it
+				// into the instruction stream.
 				Thearch.Ginsnop()
+
+				if Thearch.Thechar == '9' {
+					// On ppc64, when compiling Go into position
+					// independent code on ppc64le we insert an
+					// instruction to reload the TOC pointer from the
+					// stack as well. See the long comment near
+					// jmpdefer in runtime/asm_ppc64.s for why.
+					// If the MOVD is not needed, insert a hardware NOP
+					// so that the same number of instructions are used
+					// on ppc64 in both shared and non-shared modes.
+					if Ctxt.Flag_shared != 0 {
+						p := Thearch.Gins(ppc64.AMOVD, nil, nil)
+						p.From.Type = obj.TYPE_MEM
+						p.From.Offset = 24
+						p.From.Reg = ppc64.REGSP
+						p.To.Type = obj.TYPE_REG
+						p.To.Reg = ppc64.REG_R2
+					} else {
+						Thearch.Ginsnop()
+					}
+				}
 			}
 
 			p := Thearch.Gins(obj.ACALL, nil, f)
@@ -2395,10 +2434,8 @@
 	}
 }
 
-/*
- * n is call to interface method.
- * generate res = n.
- */
+// n is call to interface method.
+// generate res = n.
 func cgen_callinter(n *Node, res *Node, proc int) {
 	i := n.Left
 	if i.Op != ODOTINTER {
@@ -2468,12 +2505,10 @@
 	Regfree(&nodo)
 }
 
-/*
- * generate function call;
- *	proc=0	normal call
- *	proc=1	goroutine run in new proc
- *	proc=2	defer call save away stack
- */
+// generate function call;
+//	proc=0	normal call
+//	proc=1	goroutine run in new proc
+//	proc=2	defer call save away stack
 func cgen_call(n *Node, proc int) {
 	if n == nil {
 		return
@@ -2519,11 +2554,9 @@
 	Ginscall(n.Left, proc)
 }
 
-/*
- * call to n has already been generated.
- * generate:
- *	res = return value from call.
- */
+// call to n has already been generated.
+// generate:
+//	res = return value from call.
 func cgen_callret(n *Node, res *Node) {
 	t := n.Left.Type
 	if t.Etype == TPTR32 || t.Etype == TPTR64 {
@@ -2546,11 +2579,9 @@
 	Cgen_as(res, &nod)
 }
 
-/*
- * call to n has already been generated.
- * generate:
- *	res = &return value from call.
- */
+// call to n has already been generated.
+// generate:
+//	res = &return value from call.
 func cgen_aret(n *Node, res *Node) {
 	t := n.Left.Type
 	if Isptr[t.Etype] {
@@ -2581,10 +2612,8 @@
 	}
 }
 
-/*
- * generate return.
- * n->left is assignments to return values.
- */
+// generate return.
+// n->left is assignments to return values.
 func cgen_ret(n *Node) {
 	if n != nil {
 		Genlist(n.List) // copy out args
@@ -2601,19 +2630,17 @@
 	}
 }
 
-/*
- * generate division according to op, one of:
- *	res = nl / nr
- *	res = nl % nr
- */
-func cgen_div(op int, nl *Node, nr *Node, res *Node) {
+// generate division according to op, one of:
+//	res = nl / nr
+//	res = nl % nr
+func cgen_div(op Op, nl *Node, nr *Node, res *Node) {
 	var w int
 
 	// TODO(rsc): arm64 needs to support the relevant instructions
 	// in peep and optoas in order to enable this.
 	// TODO(rsc): ppc64 needs to support the relevant instructions
 	// in peep and optoas in order to enable this.
-	if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+	if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
 		goto longdiv
 	}
 	w = int(nl.Type.Width * 8)
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
index 8ebdd66..6853e62 100644
--- a/src/cmd/compile/internal/gc/closure.go
+++ b/src/cmd/compile/internal/gc/closure.go
@@ -9,9 +9,7 @@
 	"fmt"
 )
 
-/*
- * function literals aka closures
- */
+// function literals aka closures
 func closurehdr(ntype *Node) {
 	var name *Node
 	var a *Node
@@ -179,10 +177,8 @@
 }
 
 func makeclosure(func_ *Node) *Node {
-	/*
-	 * wrap body in external function
-	 * that begins by reading closure parameters.
-	 */
+	// wrap body in external function
+	// that begins by reading closure parameters.
 	xtype := Nod(OTFUNC, nil, nil)
 
 	xtype.List = func_.List
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
index 71b582b..03c52a0 100644
--- a/src/cmd/compile/internal/gc/const.go
+++ b/src/cmd/compile/internal/gc/const.go
@@ -59,10 +59,8 @@
 	return n.Val().U.(bool)
 }
 
-/*
- * truncate float literal fv to 32-bit or 64-bit precision
- * according to type; return truncated value.
- */
+// truncate float literal fv to 32-bit or 64-bit precision
+// according to type; return truncated value.
 func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
 	if t == nil {
 		return oldv
@@ -90,19 +88,15 @@
 	return fv
 }
 
-/*
- * convert n, if literal, to type t.
- * implicit conversion.
- */
+// convert n, if literal, to type t.
+// implicit conversion.
 func Convlit(np **Node, t *Type) {
 	convlit1(np, t, false)
 }
 
-/*
- * convert n, if literal, to type t.
- * return a new node if necessary
- * (if n is a named constant, can't edit n->type directly).
- */
+// convert n, if literal, to type t.
+// return a new node if necessary
+//(if n is a named constant, can't edit n->type directly).
 func convlit1(np **Node, t *Type, explicit bool) {
 	n := *np
 	if n == nil || t == nil || n.Type == nil || isideal(t) || n.Type == t {
@@ -255,7 +249,7 @@
 		if n.Type.Etype == TUNSAFEPTR && t.Etype != TUINTPTR {
 			goto bad
 		}
-		ct := int(n.Val().Ctype())
+		ct := n.Val().Ctype()
 		if Isint[et] {
 			switch ct {
 			default:
@@ -265,7 +259,6 @@
 				n.SetVal(toint(n.Val()))
 				fallthrough
 
-				// flowthrough
 			case CTINT:
 				overflow(n.Val(), t)
 			}
@@ -278,7 +271,6 @@
 				n.SetVal(toflt(n.Val()))
 				fallthrough
 
-				// flowthrough
 			case CTFLT:
 				n.SetVal(Val{truncfltlit(n.Val().U.(*Mpflt), t)})
 			}
@@ -289,6 +281,7 @@
 
 			case CTFLT, CTINT, CTRUNE:
 				n.SetVal(tocplx(n.Val()))
+				fallthrough
 
 			case CTCPLX:
 				overflow(n.Val(), t)
@@ -474,14 +467,14 @@
 	return v
 }
 
-func consttype(n *Node) int {
+func consttype(n *Node) Ctype {
 	if n == nil || n.Op != OLITERAL {
 		return -1
 	}
-	return int(n.Val().Ctype())
+	return n.Val().Ctype()
 }
 
-func Isconst(n *Node, ct int) bool {
+func Isconst(n *Node, ct Ctype) bool {
 	t := consttype(n)
 
 	// If the caller is asking for CTINT, allow CTRUNE too.
@@ -501,9 +494,7 @@
 	return n.Orig
 }
 
-/*
- * if n is constant, rewrite as OLITERAL node.
- */
+// if n is constant, rewrite as OLITERAL node.
 func evconst(n *Node) {
 	// pick off just the opcodes that can be
 	// constant evaluated.
@@ -596,6 +587,42 @@
 		wl = TIDEAL
 	}
 
+	// avoid constant conversions in switches below
+	const (
+		CTINT_         = uint32(CTINT)
+		CTRUNE_        = uint32(CTRUNE)
+		CTFLT_         = uint32(CTFLT)
+		CTCPLX_        = uint32(CTCPLX)
+		CTSTR_         = uint32(CTSTR)
+		CTBOOL_        = uint32(CTBOOL)
+		CTNIL_         = uint32(CTNIL)
+		OCONV_         = uint32(OCONV) << 16
+		OARRAYBYTESTR_ = uint32(OARRAYBYTESTR) << 16
+		OPLUS_         = uint32(OPLUS) << 16
+		OMINUS_        = uint32(OMINUS) << 16
+		OCOM_          = uint32(OCOM) << 16
+		ONOT_          = uint32(ONOT) << 16
+		OLSH_          = uint32(OLSH) << 16
+		ORSH_          = uint32(ORSH) << 16
+		OADD_          = uint32(OADD) << 16
+		OSUB_          = uint32(OSUB) << 16
+		OMUL_          = uint32(OMUL) << 16
+		ODIV_          = uint32(ODIV) << 16
+		OMOD_          = uint32(OMOD) << 16
+		OOR_           = uint32(OOR) << 16
+		OAND_          = uint32(OAND) << 16
+		OANDNOT_       = uint32(OANDNOT) << 16
+		OXOR_          = uint32(OXOR) << 16
+		OEQ_           = uint32(OEQ) << 16
+		ONE_           = uint32(ONE) << 16
+		OLT_           = uint32(OLT) << 16
+		OLE_           = uint32(OLE) << 16
+		OGE_           = uint32(OGE) << 16
+		OGT_           = uint32(OGT) << 16
+		OOROR_         = uint32(OOROR) << 16
+		OANDAND_       = uint32(OANDAND) << 16
+	)
+
 	nr := n.Right
 	var rv Val
 	var lno int
@@ -617,11 +644,10 @@
 				Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), nl.Type)
 				n.Diag = 1
 			}
-
 			return
 
-		case OCONV<<16 | CTNIL,
-			OARRAYBYTESTR<<16 | CTNIL:
+		case OCONV_ | CTNIL_,
+			OARRAYBYTESTR_ | CTNIL_:
 			if n.Type.Etype == TSTRING {
 				v = tostr(v)
 				nl.Type = n.Type
@@ -630,24 +656,24 @@
 			fallthrough
 
 			// fall through
-		case OCONV<<16 | CTINT,
-			OCONV<<16 | CTRUNE,
-			OCONV<<16 | CTFLT,
-			OCONV<<16 | CTSTR:
+		case OCONV_ | CTINT_,
+			OCONV_ | CTRUNE_,
+			OCONV_ | CTFLT_,
+			OCONV_ | CTSTR_:
 			convlit1(&nl, n.Type, true)
 
 			v = nl.Val()
 
-		case OPLUS<<16 | CTINT,
-			OPLUS<<16 | CTRUNE:
+		case OPLUS_ | CTINT_,
+			OPLUS_ | CTRUNE_:
 			break
 
-		case OMINUS<<16 | CTINT,
-			OMINUS<<16 | CTRUNE:
+		case OMINUS_ | CTINT_,
+			OMINUS_ | CTRUNE_:
 			mpnegfix(v.U.(*Mpint))
 
-		case OCOM<<16 | CTINT,
-			OCOM<<16 | CTRUNE:
+		case OCOM_ | CTINT_,
+			OCOM_ | CTRUNE_:
 			et := Txxx
 			if nl.Type != nil {
 				et = int(nl.Type.Etype)
@@ -673,20 +699,20 @@
 
 			mpxorfixfix(v.U.(*Mpint), &b)
 
-		case OPLUS<<16 | CTFLT:
+		case OPLUS_ | CTFLT_:
 			break
 
-		case OMINUS<<16 | CTFLT:
+		case OMINUS_ | CTFLT_:
 			mpnegflt(v.U.(*Mpflt))
 
-		case OPLUS<<16 | CTCPLX:
+		case OPLUS_ | CTCPLX_:
 			break
 
-		case OMINUS<<16 | CTCPLX:
+		case OMINUS_ | CTCPLX_:
 			mpnegflt(&v.U.(*Mpcplx).Real)
 			mpnegflt(&v.U.(*Mpcplx).Imag)
 
-		case ONOT<<16 | CTBOOL:
+		case ONOT_ | CTBOOL_:
 			if !v.U.(bool) {
 				goto settrue
 			}
@@ -797,20 +823,20 @@
 	default:
 		goto illegal
 
-	case OADD<<16 | CTINT,
-		OADD<<16 | CTRUNE:
+	case OADD_ | CTINT_,
+		OADD_ | CTRUNE_:
 		mpaddfixfix(v.U.(*Mpint), rv.U.(*Mpint), 0)
 
-	case OSUB<<16 | CTINT,
-		OSUB<<16 | CTRUNE:
+	case OSUB_ | CTINT_,
+		OSUB_ | CTRUNE_:
 		mpsubfixfix(v.U.(*Mpint), rv.U.(*Mpint))
 
-	case OMUL<<16 | CTINT,
-		OMUL<<16 | CTRUNE:
+	case OMUL_ | CTINT_,
+		OMUL_ | CTRUNE_:
 		mpmulfixfix(v.U.(*Mpint), rv.U.(*Mpint))
 
-	case ODIV<<16 | CTINT,
-		ODIV<<16 | CTRUNE:
+	case ODIV_ | CTINT_,
+		ODIV_ | CTRUNE_:
 		if mpcmpfixc(rv.U.(*Mpint), 0) == 0 {
 			Yyerror("division by zero")
 			mpsetovf(v.U.(*Mpint))
@@ -819,8 +845,8 @@
 
 		mpdivfixfix(v.U.(*Mpint), rv.U.(*Mpint))
 
-	case OMOD<<16 | CTINT,
-		OMOD<<16 | CTRUNE:
+	case OMOD_ | CTINT_,
+		OMOD_ | CTRUNE_:
 		if mpcmpfixc(rv.U.(*Mpint), 0) == 0 {
 			Yyerror("division by zero")
 			mpsetovf(v.U.(*Mpint))
@@ -829,40 +855,40 @@
 
 		mpmodfixfix(v.U.(*Mpint), rv.U.(*Mpint))
 
-	case OLSH<<16 | CTINT,
-		OLSH<<16 | CTRUNE:
+	case OLSH_ | CTINT_,
+		OLSH_ | CTRUNE_:
 		mplshfixfix(v.U.(*Mpint), rv.U.(*Mpint))
 
-	case ORSH<<16 | CTINT,
-		ORSH<<16 | CTRUNE:
+	case ORSH_ | CTINT_,
+		ORSH_ | CTRUNE_:
 		mprshfixfix(v.U.(*Mpint), rv.U.(*Mpint))
 
-	case OOR<<16 | CTINT,
-		OOR<<16 | CTRUNE:
+	case OOR_ | CTINT_,
+		OOR_ | CTRUNE_:
 		mporfixfix(v.U.(*Mpint), rv.U.(*Mpint))
 
-	case OAND<<16 | CTINT,
-		OAND<<16 | CTRUNE:
+	case OAND_ | CTINT_,
+		OAND_ | CTRUNE_:
 		mpandfixfix(v.U.(*Mpint), rv.U.(*Mpint))
 
-	case OANDNOT<<16 | CTINT,
-		OANDNOT<<16 | CTRUNE:
+	case OANDNOT_ | CTINT_,
+		OANDNOT_ | CTRUNE_:
 		mpandnotfixfix(v.U.(*Mpint), rv.U.(*Mpint))
 
-	case OXOR<<16 | CTINT,
-		OXOR<<16 | CTRUNE:
+	case OXOR_ | CTINT_,
+		OXOR_ | CTRUNE_:
 		mpxorfixfix(v.U.(*Mpint), rv.U.(*Mpint))
 
-	case OADD<<16 | CTFLT:
+	case OADD_ | CTFLT_:
 		mpaddfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
 
-	case OSUB<<16 | CTFLT:
+	case OSUB_ | CTFLT_:
 		mpsubfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
 
-	case OMUL<<16 | CTFLT:
+	case OMUL_ | CTFLT_:
 		mpmulfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
 
-	case ODIV<<16 | CTFLT:
+	case ODIV_ | CTFLT_:
 		if mpcmpfltc(rv.U.(*Mpflt), 0) == 0 {
 			Yyerror("division by zero")
 			Mpmovecflt(v.U.(*Mpflt), 1.0)
@@ -873,7 +899,7 @@
 
 		// The default case above would print 'ideal % ideal',
 	// which is not quite an ideal error.
-	case OMOD<<16 | CTFLT:
+	case OMOD_ | CTFLT_:
 		if n.Diag == 0 {
 			Yyerror("illegal constant expression: floating-point %% operation")
 			n.Diag = 1
@@ -881,18 +907,18 @@
 
 		return
 
-	case OADD<<16 | CTCPLX:
+	case OADD_ | CTCPLX_:
 		mpaddfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real)
 		mpaddfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag)
 
-	case OSUB<<16 | CTCPLX:
+	case OSUB_ | CTCPLX_:
 		mpsubfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real)
 		mpsubfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag)
 
-	case OMUL<<16 | CTCPLX:
+	case OMUL_ | CTCPLX_:
 		cmplxmpy(v.U.(*Mpcplx), rv.U.(*Mpcplx))
 
-	case ODIV<<16 | CTCPLX:
+	case ODIV_ | CTCPLX_:
 		if mpcmpfltc(&rv.U.(*Mpcplx).Real, 0) == 0 && mpcmpfltc(&rv.U.(*Mpcplx).Imag, 0) == 0 {
 			Yyerror("complex division by zero")
 			Mpmovecflt(&rv.U.(*Mpcplx).Real, 1.0)
@@ -902,157 +928,157 @@
 
 		cmplxdiv(v.U.(*Mpcplx), rv.U.(*Mpcplx))
 
-	case OEQ<<16 | CTNIL:
+	case OEQ_ | CTNIL_:
 		goto settrue
 
-	case ONE<<16 | CTNIL:
+	case ONE_ | CTNIL_:
 		goto setfalse
 
-	case OEQ<<16 | CTINT,
-		OEQ<<16 | CTRUNE:
+	case OEQ_ | CTINT_,
+		OEQ_ | CTRUNE_:
 		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) == 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case ONE<<16 | CTINT,
-		ONE<<16 | CTRUNE:
+	case ONE_ | CTINT_,
+		ONE_ | CTRUNE_:
 		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) != 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OLT<<16 | CTINT,
-		OLT<<16 | CTRUNE:
+	case OLT_ | CTINT_,
+		OLT_ | CTRUNE_:
 		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) < 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OLE<<16 | CTINT,
-		OLE<<16 | CTRUNE:
+	case OLE_ | CTINT_,
+		OLE_ | CTRUNE_:
 		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) <= 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OGE<<16 | CTINT,
-		OGE<<16 | CTRUNE:
+	case OGE_ | CTINT_,
+		OGE_ | CTRUNE_:
 		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) >= 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OGT<<16 | CTINT,
-		OGT<<16 | CTRUNE:
+	case OGT_ | CTINT_,
+		OGT_ | CTRUNE_:
 		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) > 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OEQ<<16 | CTFLT:
+	case OEQ_ | CTFLT_:
 		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) == 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case ONE<<16 | CTFLT:
+	case ONE_ | CTFLT_:
 		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) != 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OLT<<16 | CTFLT:
+	case OLT_ | CTFLT_:
 		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) < 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OLE<<16 | CTFLT:
+	case OLE_ | CTFLT_:
 		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) <= 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OGE<<16 | CTFLT:
+	case OGE_ | CTFLT_:
 		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) >= 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OGT<<16 | CTFLT:
+	case OGT_ | CTFLT_:
 		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) > 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OEQ<<16 | CTCPLX:
+	case OEQ_ | CTCPLX_:
 		if mpcmpfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real) == 0 && mpcmpfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag) == 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case ONE<<16 | CTCPLX:
+	case ONE_ | CTCPLX_:
 		if mpcmpfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real) != 0 || mpcmpfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag) != 0 {
 			goto settrue
 		}
 		goto setfalse
 
-	case OEQ<<16 | CTSTR:
+	case OEQ_ | CTSTR_:
 		if strlit(nl) == strlit(nr) {
 			goto settrue
 		}
 		goto setfalse
 
-	case ONE<<16 | CTSTR:
+	case ONE_ | CTSTR_:
 		if strlit(nl) != strlit(nr) {
 			goto settrue
 		}
 		goto setfalse
 
-	case OLT<<16 | CTSTR:
+	case OLT_ | CTSTR_:
 		if strlit(nl) < strlit(nr) {
 			goto settrue
 		}
 		goto setfalse
 
-	case OLE<<16 | CTSTR:
+	case OLE_ | CTSTR_:
 		if strlit(nl) <= strlit(nr) {
 			goto settrue
 		}
 		goto setfalse
 
-	case OGE<<16 | CTSTR:
+	case OGE_ | CTSTR_:
 		if strlit(nl) >= strlit(nr) {
 			goto settrue
 		}
 		goto setfalse
 
-	case OGT<<16 | CTSTR:
+	case OGT_ | CTSTR_:
 		if strlit(nl) > strlit(nr) {
 			goto settrue
 		}
 		goto setfalse
 
-	case OOROR<<16 | CTBOOL:
+	case OOROR_ | CTBOOL_:
 		if v.U.(bool) || rv.U.(bool) {
 			goto settrue
 		}
 		goto setfalse
 
-	case OANDAND<<16 | CTBOOL:
+	case OANDAND_ | CTBOOL_:
 		if v.U.(bool) && rv.U.(bool) {
 			goto settrue
 		}
 		goto setfalse
 
-	case OEQ<<16 | CTBOOL:
+	case OEQ_ | CTBOOL_:
 		if v.U.(bool) == rv.U.(bool) {
 			goto settrue
 		}
 		goto setfalse
 
-	case ONE<<16 | CTBOOL:
+	case ONE_ | CTBOOL_:
 		if v.U.(bool) != rv.U.(bool) {
 			goto settrue
 		}
@@ -1099,8 +1125,6 @@
 		Yyerror("illegal constant expression: %v %v %v", nl.Type, Oconv(int(n.Op), 0), nr.Type)
 		n.Diag = 1
 	}
-
-	return
 }
 
 func nodlit(v Val) *Node {
@@ -1146,7 +1170,7 @@
 
 // idealkind returns a constant kind like consttype
 // but for an arbitrary "ideal" (untyped constant) expression.
-func idealkind(n *Node) int {
+func idealkind(n *Node) Ctype {
 	if n == nil || !isideal(n.Type) {
 		return CTxxx
 	}
@@ -1156,7 +1180,7 @@
 		return CTxxx
 
 	case OLITERAL:
-		return int(n.Val().Ctype())
+		return n.Val().Ctype()
 
 		// numeric kinds.
 	case OADD,
@@ -1300,12 +1324,10 @@
 	return
 }
 
-/*
- * defaultlit on both nodes simultaneously;
- * if they're both ideal going in they better
- * get the same type going out.
- * force means must assign concrete (non-ideal) type.
- */
+// defaultlit on both nodes simultaneously;
+// if they're both ideal going in they better
+// get the same type going out.
+// force means must assign concrete (non-ideal) type.
 func defaultlit2(lp **Node, rp **Node, force int) {
 	l := *lp
 	r := *rp
@@ -1406,11 +1428,9 @@
 	return -1
 }
 
-/*
- * convert x to type et and back to int64
- * for sign extension and truncation.
- */
-func iconv(x int64, et int) int64 {
+// convert x to type et and back to int64
+// for sign extension and truncation.
+func iconv(x int64, et EType) int64 {
 	switch et {
 	case TINT8:
 		x = int64(int8(x))
diff --git a/src/cmd/compile/internal/gc/cplx.go b/src/cmd/compile/internal/gc/cplx.go
index 9f11b96..b692456 100644
--- a/src/cmd/compile/internal/gc/cplx.go
+++ b/src/cmd/compile/internal/gc/cplx.go
@@ -14,7 +14,7 @@
 	return f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset
 }
 
-func complexbool(op int, nl, nr, res *Node, wantTrue bool, likely int, to *obj.Prog) {
+func complexbool(op Op, nl, nr, res *Node, wantTrue bool, likely int, to *obj.Prog) {
 	// make both sides addable in ullman order
 	if nr != nil {
 		if nl.Ullman > nr.Ullman && !nl.Addable {
@@ -130,7 +130,7 @@
 // build and execute tree
 //	real(res) = real(nl) op real(nr)
 //	imag(res) = imag(nl) op imag(nr)
-func complexadd(op int, nl *Node, nr *Node, res *Node) {
+func complexadd(op Op, nl *Node, nr *Node, res *Node) {
 	var n1 Node
 	var n2 Node
 	var n3 Node
@@ -143,14 +143,14 @@
 	subnode(&n5, &n6, res)
 
 	var ra Node
-	ra.Op = uint8(op)
+	ra.Op = op
 	ra.Left = &n1
 	ra.Right = &n3
 	ra.Type = n1.Type
 	Cgen(&ra, &n5)
 
 	ra = Node{}
-	ra.Op = uint8(op)
+	ra.Op = op
 	ra.Left = &n2
 	ra.Right = &n4
 	ra.Type = n2.Type
@@ -293,17 +293,10 @@
 
 	ft := Simsimtype(f.Type)
 	tt := Simsimtype(t.Type)
-	switch uint32(ft)<<16 | uint32(tt) {
-	default:
-		Fatalf("complexmove: unknown conversion: %v -> %v\n", f.Type, t.Type)
-
-		// complex to complex move/convert.
+	// complex to complex move/convert.
 	// make f addable.
 	// also use temporary if possible stack overlap.
-	case TCOMPLEX64<<16 | TCOMPLEX64,
-		TCOMPLEX64<<16 | TCOMPLEX128,
-		TCOMPLEX128<<16 | TCOMPLEX64,
-		TCOMPLEX128<<16 | TCOMPLEX128:
+	if (ft == TCOMPLEX64 || ft == TCOMPLEX128) && (tt == TCOMPLEX64 || tt == TCOMPLEX128) {
 		if !f.Addable || overlap_cplx(f, t) {
 			var tmp Node
 			Tempname(&tmp, f.Type)
@@ -320,6 +313,8 @@
 
 		Cgen(&n1, &n3)
 		Cgen(&n2, &n4)
+	} else {
+		Fatalf("complexmove: unknown conversion: %v -> %v\n", f.Type, t.Type)
 	}
 }
 
@@ -471,7 +466,7 @@
 		complexminus(nl, res)
 
 	case OADD, OSUB:
-		complexadd(int(n.Op), nl, nr, res)
+		complexadd(n.Op, nl, nr, res)
 
 	case OMUL:
 		complexmul(nl, nr, res)
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
index 771fe93..c0326c5 100644
--- a/src/cmd/compile/internal/gc/dcl.go
+++ b/src/cmd/compile/internal/gc/dcl.go
@@ -23,9 +23,7 @@
 	return true
 }
 
-/*
- * declaration stack & operations
- */
+// declaration stack & operations
 func dcopy(a *Sym, b *Sym) {
 	a.Pkg = b.Pkg
 	a.Name = b.Name
@@ -149,13 +147,11 @@
 
 var vargen int
 
-/*
- * declare individual names - var, typ, const
- */
+// declare individual names - var, typ, const
 
 var declare_typegen int
 
-func declare(n *Node, ctxt uint8) {
+func declare(n *Node, ctxt Class) {
 	if ctxt == PDISCARD {
 		return
 	}
@@ -221,12 +217,12 @@
 	s.Def = n
 	n.Name.Vargen = int32(gen)
 	n.Name.Funcdepth = Funcdepth
-	n.Class = uint8(ctxt)
+	n.Class = ctxt
 
 	autoexport(n, ctxt)
 }
 
-func addvar(n *Node, t *Type, ctxt uint8) {
+func addvar(n *Node, t *Type, ctxt Class) {
 	if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
 		Fatalf("addvar: n=%v t=%v nil", n, t)
 	}
@@ -236,10 +232,8 @@
 	n.Type = t
 }
 
-/*
- * declare variables from grammar
- * new_name_list (type | [type] = expr_list)
- */
+// declare variables from grammar
+// new_name_list (type | [type] = expr_list)
 func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
 	var init *NodeList
 	doexpr := el != nil
@@ -302,10 +296,8 @@
 	return init
 }
 
-/*
- * declare constants from grammar
- * new_name_list [[type] = expr_list]
- */
+// declare constants from grammar
+// new_name_list [[type] = expr_list]
 func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
 	lno := int32(0) // default is to leave line number alone in listtreecopy
 	if cl == nil {
@@ -350,10 +342,8 @@
 	return vv
 }
 
-/*
- * this generates a new name node,
- * typically for labels or other one-off names.
- */
+// this generates a new name node,
+// typically for labels or other one-off names.
 func newname(s *Sym) *Node {
 	if s == nil {
 		Fatalf("newname nil")
@@ -377,10 +367,8 @@
 	return n
 }
 
-/*
- * this generates a new name node for a name
- * being declared.
- */
+// this generates a new name node for a name
+// being declared.
 func dclname(s *Sym) *Node {
 	n := newname(s)
 	n.Op = ONONAME // caller will correct it
@@ -400,12 +388,10 @@
 	return t.Nod
 }
 
-/*
- * this will return an old name
- * that has already been pushed on the
- * declaration list. a diagnostic is
- * generated if no name has been defined.
- */
+// this will return an old name
+// that has already been pushed on the
+// declaration list. a diagnostic is
+// generated if no name has been defined.
 func oldname(s *Sym) *Node {
 	n := s.Def
 	if n == nil {
@@ -450,9 +436,7 @@
 	return n
 }
 
-/*
- * := declarations
- */
+// := declarations
 func colasname(n *Node) bool {
 	switch n.Op {
 	case ONAME,
@@ -532,10 +516,8 @@
 	return as
 }
 
-/*
- * declare the arguments in an
- * interface field declaration.
- */
+// declare the arguments in an
+// interface field declaration.
 func ifacedcl(n *Node) {
 	if n.Op != ODCLFIELD || n.Right == nil {
 		Fatalf("ifacedcl")
@@ -563,12 +545,10 @@
 	funcbody(n)
 }
 
-/*
- * declare the function proper
- * and declare the arguments.
- * called in extern-declaration context
- * returns in auto-declaration context.
- */
+// declare the function proper
+// and declare the arguments.
+// called in extern-declaration context
+// returns in auto-declaration context.
 func funchdr(n *Node) {
 	// change the declaration context from extern to auto
 	if Funcdepth == 0 && dclcontext != PEXTERN {
@@ -688,11 +668,9 @@
 	}
 }
 
-/*
- * Same as funcargs, except run over an already constructed TFUNC.
- * This happens during import, where the hidden_fndcl rule has
- * used functype directly to parse the function's type.
- */
+// Same as funcargs, except run over an already constructed TFUNC.
+// This happens during import, where the hidden_fndcl rule has
+// used functype directly to parse the function's type.
 func funcargs2(t *Type) {
 	if t.Etype != TFUNC {
 		Fatalf("funcargs2 %v", t)
@@ -735,11 +713,9 @@
 	}
 }
 
-/*
- * finish the body.
- * called in auto-declaration context.
- * returns in extern-declaration context.
- */
+// finish the body.
+// called in auto-declaration context.
+// returns in extern-declaration context.
 func funcbody(n *Node) {
 	// change the declaration context from auto to extern
 	if dclcontext != PAUTO {
@@ -754,9 +730,7 @@
 	}
 }
 
-/*
- * new type being defined with name s.
- */
+// new type being defined with name s.
 func typedcl0(s *Sym) *Node {
 	n := newname(s)
 	n.Op = OTYPE
@@ -764,21 +738,17 @@
 	return n
 }
 
-/*
- * node n, which was returned by typedcl0
- * is being declared to have uncompiled type t.
- * return the ODCLTYPE node to use.
- */
+// node n, which was returned by typedcl0
+// is being declared to have uncompiled type t.
+// return the ODCLTYPE node to use.
 func typedcl1(n *Node, t *Node, local bool) *Node {
 	n.Name.Param.Ntype = t
 	n.Local = local
 	return Nod(ODCLTYPE, n, nil)
 }
 
-/*
- * structs, functions, and methods.
- * they don't belong here, but where do they belong?
- */
+// structs, functions, and methods.
+// they don't belong here, but where do they belong?
 func checkembeddedtype(t *Type) {
 	if t == nil {
 		return
@@ -869,16 +839,21 @@
 	lineno = int32(lno)
 }
 
-/*
- * convert a parsed id/type list into
- * a type for struct/interface/arglist
- */
+// convert a parsed id/type list into
+// a type for struct/interface/arglist
 func tostruct(l *NodeList) *Type {
-	var f *Type
 	t := typ(TSTRUCT)
+	tostruct0(t, l)
+	return t
+}
+
+func tostruct0(t *Type, l *NodeList) {
+	if t == nil || t.Etype != TSTRUCT {
+		Fatalf("struct expected")
+	}
 
 	for tp := &t.Type; l != nil; l = l.Next {
-		f = structfield(l.N)
+		f := structfield(l.N)
 
 		*tp = f
 		tp = &f.Down
@@ -896,8 +871,6 @@
 	if !t.Broke {
 		checkwidth(t)
 	}
-
-	return t
 }
 
 func tofunargs(l *NodeList) *Type {
@@ -910,7 +883,7 @@
 		f = structfield(l.N)
 		f.Funarg = true
 
-		// esc.c needs to find f given a PPARAM to add the tag.
+		// esc.go needs to find f given a PPARAM to add the tag.
 		if l.N.Left != nil && l.N.Left.Class == PPARAM {
 			l.N.Left.Name.Param.Field = f
 		}
@@ -996,18 +969,23 @@
 }
 
 func tointerface(l *NodeList) *Type {
-	var f *Type
-	var t1 *Type
-
 	t := typ(TINTER)
+	tointerface0(t, l)
+	return t
+}
+
+func tointerface0(t *Type, l *NodeList) *Type {
+	if t == nil || t.Etype != TINTER {
+		Fatalf("interface expected")
+	}
 
 	tp := &t.Type
 	for ; l != nil; l = l.Next {
-		f = interfacefield(l.N)
+		f := interfacefield(l.N)
 
 		if l.N.Left == nil && f.Type.Etype == TINTER {
 			// embedded interface, inline methods
-			for t1 = f.Type.Type; t1 != nil; t1 = t1.Down {
+			for t1 := f.Type.Type; t1 != nil; t1 = t1.Down {
 				f = typ(TFIELD)
 				f.Type = t1.Type
 				f.Broke = t1.Broke
@@ -1065,9 +1043,7 @@
 	return n
 }
 
-/*
- * check that the list of declarations is either all anonymous or all named
- */
+// check that the list of declarations is either all anonymous or all named
 func findtype(l *NodeList) *Node {
 	for ; l != nil; l = l.Next {
 		if l.N.Op == OKEY {
@@ -1132,7 +1108,7 @@
 		// declarations, which are parsed by rules that don't
 		// use checkargs, but can happen for func literals in
 		// the inline bodies.
-		// TODO(rsc) this can go when typefmt case TFIELD in exportmode fmt.c prints _ instead of ?
+		// TODO(rsc) this can go when typefmt case TFIELD in exportmode fmt.go prints _ instead of ?
 		if importpkg != nil && n.Sym == nil {
 			n = nil
 		}
@@ -1172,12 +1148,9 @@
 	return n
 }
 
-/*
- * Is this field a method on an interface?
- * Those methods have an anonymous
- * *struct{} as the receiver.
- * (See fakethis above.)
- */
+// Is this field a method on an interface?
+// Those methods have an anonymous *struct{} as the receiver.
+// (See fakethis above.)
 func isifacemethod(f *Type) bool {
 	rcvr := getthisx(f).Type
 	if rcvr.Sym != nil {
@@ -1194,12 +1167,17 @@
 	return true
 }
 
-/*
- * turn a parsed function declaration
- * into a type
- */
+// turn a parsed function declaration into a type
 func functype(this *Node, in *NodeList, out *NodeList) *Type {
 	t := typ(TFUNC)
+	functype0(t, this, in, out)
+	return t
+}
+
+func functype0(t *Type, this *Node, in *NodeList, out *NodeList) {
+	if t == nil || t.Etype != TFUNC {
+		Fatalf("function type expected")
+	}
 
 	var rcvr *NodeList
 	if this != nil {
@@ -1230,8 +1208,6 @@
 			t.Outnamed = true
 		}
 	}
-
-	return t
 }
 
 var methodsym_toppkg *Pkg
@@ -1339,10 +1315,8 @@
 	return n
 }
 
-/*
- * add a method, declared as a function,
- * n is fieldname, pa is base type, t is function type
- */
+// add a method, declared as a function,
+// n is fieldname, pa is base type, t is function type
 func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
 	// get field sym
 	if sf == nil {
@@ -1501,3 +1475,116 @@
 	s1.Def.Func.Shortname = newname(s)
 	funcsyms = append(funcsyms, s1.Def)
 }
+
+type nowritebarrierrecChecker struct {
+	curfn  *Node
+	stable bool
+
+	// best maps from the ODCLFUNC of each visited function that
+	// recursively invokes a write barrier to the called function
+	// on the shortest path to a write barrier.
+	best map[*Node]nowritebarrierrecCall
+}
+
+type nowritebarrierrecCall struct {
+	target *Node
+	depth  int
+	lineno int32
+}
+
+func checknowritebarrierrec() {
+	c := nowritebarrierrecChecker{
+		best: make(map[*Node]nowritebarrierrecCall),
+	}
+	visitBottomUp(xtop, func(list []*Node, recursive bool) {
+		// Functions with write barriers have depth 0.
+		for _, n := range list {
+			if n.Func.WBLineno != 0 {
+				c.best[n] = nowritebarrierrecCall{target: nil, depth: 0, lineno: n.Func.WBLineno}
+			}
+		}
+
+		// Propagate write barrier depth up from callees. In
+		// the recursive case, we have to update this at most
+		// len(list) times and can stop when we an iteration
+		// that doesn't change anything.
+		for _ = range list {
+			c.stable = false
+			for _, n := range list {
+				if n.Func.WBLineno == 0 {
+					c.curfn = n
+					c.visitcodelist(n.Nbody)
+				}
+			}
+			if c.stable {
+				break
+			}
+		}
+
+		// Check nowritebarrierrec functions.
+		for _, n := range list {
+			if !n.Func.Nowritebarrierrec {
+				continue
+			}
+			call, hasWB := c.best[n]
+			if !hasWB {
+				continue
+			}
+
+			// Build the error message in reverse.
+			err := ""
+			for call.target != nil {
+				err = fmt.Sprintf("\n\t%v: called by %v%s", Ctxt.Line(int(call.lineno)), n.Func.Nname, err)
+				n = call.target
+				call = c.best[n]
+			}
+			err = fmt.Sprintf("write barrier prohibited by caller; %v%s", n.Func.Nname, err)
+			yyerrorl(int(n.Func.WBLineno), err)
+		}
+	})
+}
+
+func (c *nowritebarrierrecChecker) visitcodelist(l *NodeList) {
+	for ; l != nil; l = l.Next {
+		c.visitcode(l.N)
+	}
+}
+
+func (c *nowritebarrierrecChecker) visitcode(n *Node) {
+	if n == nil {
+		return
+	}
+
+	if n.Op == OCALLFUNC || n.Op == OCALLMETH {
+		c.visitcall(n)
+	}
+
+	c.visitcodelist(n.Ninit)
+	c.visitcode(n.Left)
+	c.visitcode(n.Right)
+	c.visitcodelist(n.List)
+	c.visitcodelist(n.Nbody)
+	c.visitcodelist(n.Rlist)
+}
+
+func (c *nowritebarrierrecChecker) visitcall(n *Node) {
+	fn := n.Left
+	if n.Op == OCALLMETH {
+		fn = n.Left.Right.Sym.Def
+	}
+	if fn == nil || fn.Op != ONAME || fn.Class != PFUNC || fn.Name.Defn == nil {
+		return
+	}
+	defn := fn.Name.Defn
+
+	fnbest, ok := c.best[defn]
+	if !ok {
+		return
+	}
+	best, ok := c.best[c.curfn]
+	if ok && fnbest.depth+1 >= best.depth {
+		return
+	}
+	c.best[c.curfn] = nowritebarrierrecCall{target: defn, depth: fnbest.depth + 1, lineno: n.Lineno}
+	c.stable = false
+}
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
index c989f51..293f916 100644
--- a/src/cmd/compile/internal/gc/esc.go
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -7,6 +7,7 @@
 import (
 	"cmd/internal/obj"
 	"fmt"
+	"strconv"
 	"strings"
 )
 
@@ -855,7 +856,7 @@
 		var v *Node
 		for ll := n.Func.Cvars; ll != nil; ll = ll.Next {
 			v = ll.N
-			if v.Op == OXXX { // unnamed out argument; see dcl.c:/^funcargs
+			if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
 				continue
 			}
 			a = v.Name.Param.Closure
@@ -1124,7 +1125,8 @@
 	if note == nil || !strings.HasPrefix(*note, "esc:") {
 		return EscUnknown
 	}
-	em := uint16(atoi((*note)[4:]))
+	n, _ := strconv.ParseInt((*note)[4:], 0, 0)
+	em := uint16(n)
 	if em == 0 {
 		return EscNone
 	}
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index 878554b..b4182ae 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -5,6 +5,7 @@
 package gc
 
 import (
+	"bytes"
 	"cmd/internal/obj"
 	"fmt"
 	"sort"
@@ -12,6 +13,20 @@
 	"unicode/utf8"
 )
 
+var (
+	newexport    int // if set, use new export format
+	Debug_export int // if set, print debugging information about export data
+	exportsize   int
+)
+
+func exportf(format string, args ...interface{}) {
+	n, _ := fmt.Fprintf(bout, format, args...)
+	exportsize += n
+	if Debug_export != 0 {
+		fmt.Printf(format, args...)
+	}
+}
+
 var asmlist *NodeList
 
 // Mark n's symbol as exported
@@ -35,8 +50,8 @@
 }
 
 func exportname(s string) bool {
-	if s[0] < utf8.RuneSelf {
-		return 'A' <= s[0] && s[0] <= 'Z'
+	if r := s[0]; r < utf8.RuneSelf {
+		return 'A' <= r && r <= 'Z'
 	}
 	r, _ := utf8.DecodeRuneInString(s)
 	return unicode.IsUpper(r)
@@ -57,7 +72,7 @@
 	return sym.Pkg == localpkg && exportname(sym.Name)
 }
 
-func autoexport(n *Node, ctxt uint8) {
+func autoexport(n *Node, ctxt Class) {
 	if n == nil || n.Sym == nil {
 		return
 	}
@@ -87,7 +102,7 @@
 	if !p.Direct {
 		suffix = " // indirect"
 	}
-	fmt.Fprintf(bout, "\timport %s %q%s\n", p.Name, p.Path, suffix)
+	exportf("\timport %s %q%s\n", p.Name, p.Path, suffix)
 }
 
 // Look for anything we need for the inline body
@@ -128,7 +143,7 @@
 			}
 		}
 
-		// Local variables in the bodies need their type.
+	// Local variables in the bodies need their type.
 	case ODCL:
 		t := n.Left.Type
 
@@ -167,7 +182,7 @@
 			exportlist = append(exportlist, n)
 		}
 
-		// for operations that need a type when rendered, put the type on the export list.
+	// for operations that need a type when rendered, put the type on the export list.
 	case OCONV,
 		OCONVIFACE,
 		OCONVNOP,
@@ -216,9 +231,9 @@
 	dumpexporttype(t)
 
 	if t != nil && !isideal(t) {
-		fmt.Fprintf(bout, "\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(n.Val(), obj.FmtSharp))
+		exportf("\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(n.Val(), obj.FmtSharp))
 	} else {
-		fmt.Fprintf(bout, "\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(n.Val(), obj.FmtSharp))
+		exportf("\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(n.Val(), obj.FmtSharp))
 	}
 }
 
@@ -242,14 +257,14 @@
 			}
 
 			// NOTE: The space after %#S here is necessary for ld's export data parser.
-			fmt.Fprintf(bout, "\tfunc %v %v { %v }\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp), Hconv(n.Func.Inl, obj.FmtSharp))
+			exportf("\tfunc %v %v { %v }\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp), Hconv(n.Func.Inl, obj.FmtSharp))
 
 			reexportdeplist(n.Func.Inl)
 		} else {
-			fmt.Fprintf(bout, "\tfunc %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp))
+			exportf("\tfunc %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp))
 		}
 	} else {
-		fmt.Fprintf(bout, "\tvar %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp))
+		exportf("\tvar %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp))
 	}
 }
 
@@ -287,10 +302,10 @@
 	}
 	sort.Sort(methodbyname(m))
 
-	fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
+	exportf("\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
 	for _, f := range m {
 		if f.Nointerface {
-			fmt.Fprintf(bout, "\t//go:nointerface\n")
+			exportf("\t//go:nointerface\n")
 		}
 		if f.Type.Nname != nil && f.Type.Nname.Func.Inl != nil { // nname was set by caninl
 
@@ -299,10 +314,10 @@
 			if Debug['l'] < 2 {
 				typecheckinl(f.Type.Nname)
 			}
-			fmt.Fprintf(bout, "\tfunc (%v) %v %v { %v }\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp), Hconv(f.Type.Nname.Func.Inl, obj.FmtSharp))
+			exportf("\tfunc (%v) %v %v { %v }\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp), Hconv(f.Type.Nname.Func.Inl, obj.FmtSharp))
 			reexportdeplist(f.Type.Nname.Func.Inl)
 		} else {
-			fmt.Fprintf(bout, "\tfunc (%v) %v %v\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp))
+			exportf("\tfunc (%v) %v %v\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp))
 		}
 	}
 }
@@ -341,44 +356,86 @@
 }
 
 func dumpexport() {
-	lno := lineno
-
 	if buildid != "" {
-		fmt.Fprintf(bout, "build id %q\n", buildid)
+		exportf("build id %q\n", buildid)
 	}
-	fmt.Fprintf(bout, "\n$$\npackage %s", localpkg.Name)
-	if safemode != 0 {
-		fmt.Fprintf(bout, " safe")
-	}
-	fmt.Fprintf(bout, "\n")
 
-	for _, p := range pkgs {
-		if p.Direct {
-			dumppkg(p)
+	size := 0 // size of export section without enclosing markers
+	if forceNewExport || newexport != 0 {
+		// binary export
+		// The linker also looks for the $$ marker - use char after $$ to distinguish format.
+		exportf("\n$$B\n")        // indicate binary format
+		const verifyExport = true // enable to check format changes
+		if verifyExport {
+			// save a copy of the export data
+			var copy bytes.Buffer
+			bcopy := obj.Binitw(&copy)
+			size = Export(bcopy, Debug_export != 0)
+			bcopy.Flush() // flushing to bytes.Buffer cannot fail
+			if n, err := bout.Write(copy.Bytes()); n != size || err != nil {
+				Fatalf("error writing export data: got %d bytes, want %d bytes, err = %v", n, size, err)
+			}
+			// export data must contain no '$' so that we can find the end by searching for "$$"
+			if bytes.IndexByte(copy.Bytes(), '$') >= 0 {
+				Fatalf("export data contains $")
+			}
+
+			// verify that we can read the copied export data back in
+			// (use empty package map to avoid collisions)
+			savedPkgMap := pkgMap
+			savedPkgs := pkgs
+			pkgMap = make(map[string]*Pkg)
+			pkgs = nil
+			importpkg = mkpkg("")
+			Import(obj.Binitr(&copy)) // must not die
+			importpkg = nil
+			pkgs = savedPkgs
+			pkgMap = savedPkgMap
+		} else {
+			size = Export(bout, Debug_export != 0)
 		}
+		exportf("\n$$\n")
+	} else {
+		// textual export
+		lno := lineno
+
+		exportf("\n$$\n") // indicate textual format
+		exportsize = 0
+		exportf("package %s", localpkg.Name)
+		if safemode != 0 {
+			exportf(" safe")
+		}
+		exportf("\n")
+
+		for _, p := range pkgs {
+			if p.Direct {
+				dumppkg(p)
+			}
+		}
+
+		// exportlist grows during iteration - cannot use range
+		for len(exportlist) > 0 {
+			n := exportlist[0]
+			exportlist = exportlist[1:]
+			lineno = n.Lineno
+			dumpsym(n.Sym)
+		}
+
+		size = exportsize
+		exportf("\n$$\n")
+		lineno = lno
 	}
 
-	// exportlist grows during iteration - cannot use range
-	for len(exportlist) > 0 {
-		n := exportlist[0]
-		exportlist = exportlist[1:]
-		lineno = n.Lineno
-		dumpsym(n.Sym)
+	if Debug_export != 0 {
+		fmt.Printf("export data size = %d bytes\n", size)
 	}
-
-	fmt.Fprintf(bout, "\n$$\n")
-	lineno = lno
 }
 
-/*
- * import
- */
+// import
 
-/*
- * return the sym for ss, which should match lexical
- */
-func importsym(s *Sym, op int) *Sym {
-	if s.Def != nil && int(s.Def.Op) != op {
+// return the sym for ss, which should match lexical
+func importsym(s *Sym, op Op) *Sym {
+	if s.Def != nil && s.Def.Op != op {
 		pkgstr := fmt.Sprintf("during import %q", importpkg.Path)
 		redeclare(s, pkgstr)
 	}
@@ -395,9 +452,7 @@
 	return s
 }
 
-/*
- * return the type pkg.name, forward declaring if needed
- */
+// return the type pkg.name, forward declaring if needed
 func pkgtype(s *Sym) *Type {
 	importsym(s, OTYPE)
 	if s.Def == nil || s.Def.Op != OTYPE {
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
index 4eb989e..8e0a0e8 100644
--- a/src/cmd/compile/internal/gc/fmt.go
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -47,9 +47,9 @@
 //		Flags: those of %N
 //			','  separate items with ',' instead of ';'
 //
-//   In mparith1.c:
-//      %B Mpint*	Big integers
-//	%F Mpflt*	Big floats
+//   In mparith2.go and mparith3.go:
+//		%B Mpint*	Big integers
+//		%F Mpflt*	Big floats
 //
 //   %S, %T and %N obey use the following flags to set the format mode:
 const (
@@ -401,8 +401,8 @@
 }
 
 // Fmt "%E": etype
-func Econv(et int, flag int) string {
-	if et >= 0 && et < len(etnames) && etnames[et] != "" {
+func Econv(et EType) string {
+	if int(et) < len(etnames) && etnames[et] != "" {
 		return etnames[et]
 	}
 	return fmt.Sprintf("E-%d", et)
@@ -537,7 +537,7 @@
 
 	if fmtmode == FDbg {
 		fmtmode = 0
-		str := Econv(int(t.Etype), 0) + "-" + typefmt(t, flag)
+		str := Econv(t.Etype) + "-" + typefmt(t, flag)
 		fmtmode = FDbg
 		return str
 	}
@@ -714,7 +714,7 @@
 				}
 			} else if fmtmode == FExp {
 				// TODO(rsc) this breaks on the eliding of unused arguments in the backend
-				// when this is fixed, the special case in dcl.c checkarglist can go.
+				// when this is fixed, the special case in dcl.go checkarglist can go.
 				//if(t->funarg)
 				//	fmtstrcpy(fp, "_ ");
 				//else
@@ -756,15 +756,15 @@
 	}
 
 	if fmtmode == FExp {
-		Fatalf("missing %v case during export", Econv(int(t.Etype), 0))
+		Fatalf("missing %v case during export", Econv(t.Etype))
 	}
 
 	// Don't know how to handle - fall back to detailed prints.
-	return fmt.Sprintf("%v <%v> %v", Econv(int(t.Etype), 0), t.Sym, t.Type)
+	return fmt.Sprintf("%v <%v> %v", Econv(t.Etype), t.Sym, t.Type)
 }
 
 // Statements which may be rendered with a simplestmt as init.
-func stmtwithinit(op int) bool {
+func stmtwithinit(op Op) bool {
 	switch op {
 	case OIF, OFOR, OSWITCH:
 		return true
@@ -782,13 +782,13 @@
 	// block starting with the init statements.
 
 	// if we can just say "for" n->ninit; ... then do so
-	simpleinit := n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
+	simpleinit := n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(n.Op)
 
 	// otherwise, print the inits as separate statements
 	complexinit := n.Ninit != nil && !simpleinit && (fmtmode != FErr)
 
 	// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
-	extrablock := complexinit && stmtwithinit(int(n.Op))
+	extrablock := complexinit && stmtwithinit(n.Op)
 
 	if extrablock {
 		f += "{"
@@ -817,7 +817,7 @@
 			f += Nconv(n.Right, 0)
 		}
 
-		// Don't export "v = <N>" initializing statements, hope they're always
+	// Don't export "v = <N>" initializing statements, hope they're always
 	// preceded by the DCL which will be re-parsed and typecheck to reproduce
 	// the "v = <N>" again.
 	case OAS, OASWB:
@@ -833,7 +833,7 @@
 
 	case OASOP:
 		if n.Implicit {
-			if n.Etype == OADD {
+			if Op(n.Etype) == OADD {
 				f += fmt.Sprintf("%v++", n.Left)
 			} else {
 				f += fmt.Sprintf("%v--", n.Left)
@@ -1128,7 +1128,7 @@
 
 		return Vconv(n.Val(), 0)
 
-		// Special case: name used as local variable in export.
+	// Special case: name used as local variable in export.
 	// _ becomes ~b%d internally; print as _ for export
 	case ONAME:
 		if (fmtmode == FExp || fmtmode == FErr) && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
@@ -1150,7 +1150,6 @@
 		}
 		fallthrough
 
-		//fallthrough
 	case OPACK, ONONAME:
 		return Sconv(n.Sym, 0)
 
@@ -1444,6 +1443,7 @@
 	case OCMPSTR, OCMPIFACE:
 		var f string
 		f += exprfmt(n.Left, nprec)
+		// TODO(marvin): Fix Node.EType type union.
 		f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp))
 		f += exprfmt(n.Right, nprec+1)
 		return f
diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go
index 5d24515..c9208d9 100644
--- a/src/cmd/compile/internal/gc/gen.go
+++ b/src/cmd/compile/internal/gc/gen.go
@@ -9,10 +9,8 @@
 	"fmt"
 )
 
-/*
- * portable half of code generator.
- * mainly statements and control flow.
- */
+// portable half of code generator.
+// mainly statements and control flow.
 var labellist *Label
 
 var lastlabel *Label
@@ -213,18 +211,14 @@
 	return nil
 }
 
-/*
- * compile statements
- */
+// compile statements
 func Genlist(l *NodeList) {
 	for ; l != nil; l = l.Next {
 		gen(l.N)
 	}
 }
 
-/*
- * generate code to start new proc running call n.
- */
+// generate code to start new proc running call n.
 func cgen_proc(n *Node, proc int) {
 	switch n.Left.Op {
 	default:
@@ -241,11 +235,9 @@
 	}
 }
 
-/*
- * generate declaration.
- * have to allocate heap copy
- * for escaped variables.
- */
+// generate declaration.
+// have to allocate heap copy
+// for escaped variables.
 func cgen_dcl(n *Node) {
 	if Debug['g'] != 0 {
 		Dump("\ncgen-dcl", n)
@@ -267,9 +259,7 @@
 	Cgen_as(n.Name.Heapaddr, prealloc[n])
 }
 
-/*
- * generate discard of value
- */
+// generate discard of value
 func cgen_discard(nr *Node) {
 	if nr == nil {
 		return
@@ -324,9 +314,7 @@
 	}
 }
 
-/*
- * clearslim generates code to zero a slim node.
- */
+// clearslim generates code to zero a slim node.
 func Clearslim(n *Node) {
 	var z Node
 	z.Op = OLITERAL
@@ -369,17 +357,13 @@
 	Cgen(&z, n)
 }
 
-/*
- * generate:
- *	res = iface{typ, data}
- * n->left is typ
- * n->right is data
- */
+// generate:
+//	res = iface{typ, data}
+// n->left is typ
+// n->right is data
 func Cgen_eface(n *Node, res *Node) {
-	/*
-	 * the right node of an eface may contain function calls that uses res as an argument,
-	 * so it's important that it is done first
-	 */
+	// the right node of an eface may contain function calls that uses res as an argument,
+	// so it's important that it is done first
 
 	tmp := temp(Types[Tptr])
 	Cgen(n.Right, tmp)
@@ -395,13 +379,11 @@
 	Cgen(n.Left, &dst)
 }
 
-/*
- * generate one of:
- *	res, resok = x.(T)
- *	res = x.(T) (when resok == nil)
- * n.Left is x
- * n.Type is T
- */
+// generate one of:
+//	res, resok = x.(T)
+//	res = x.(T) (when resok == nil)
+// n.Left is x
+// n.Type is T
 func cgen_dottype(n *Node, res, resok *Node, wb bool) {
 	if Debug_typeassert > 0 {
 		Warn("type assertion inlined")
@@ -487,12 +469,10 @@
 	}
 }
 
-/*
- * generate:
- *	res, resok = x.(T)
- * n.Left is x
- * n.Type is T
- */
+// generate:
+//	res, resok = x.(T)
+// n.Left is x
+// n.Type is T
 func Cgen_As2dottype(n, res, resok *Node) {
 	if Debug_typeassert > 0 {
 		Warn("type assertion inlined")
@@ -551,11 +531,9 @@
 	Patch(q, Pc)
 }
 
-/*
- * gather series of offsets
- * >=0 is direct addressed field
- * <0 is pointer to next field (+1)
- */
+// gather series of offsets
+// >=0 is direct addressed field
+// <0 is pointer to next field (+1)
 func Dotoffset(n *Node, oary []int64, nn **Node) int {
 	var i int
 
@@ -604,9 +582,7 @@
 	return i
 }
 
-/*
- * make a new off the books
- */
+// make a new off the books
 func Tempname(nn *Node, t *Type) {
 	if Curfn == nil {
 		Fatalf("no curfn for tempname")
@@ -1038,7 +1014,7 @@
 	numPtr := 0
 	visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
 		n++
-		if int(Simtype[t.Etype]) == Tptr && t != itable {
+		if Simtype[t.Etype] == Tptr && t != itable {
 			numPtr++
 		}
 		return n <= maxMoves && (!wb || numPtr <= 1)
@@ -1155,7 +1131,7 @@
 		ptrOffset int64
 	)
 	visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
-		if wb && int(Simtype[t.Etype]) == Tptr && t != itable {
+		if wb && Simtype[t.Etype] == Tptr && t != itable {
 			if ptrType != nil {
 				Fatalf("componentgen_wb %v", Tconv(nl.Type, 0))
 			}
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index 7b73380..513b234 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -10,8 +10,6 @@
 	"cmd/internal/obj"
 )
 
-// avoid <ctype.h>
-
 // The parser's maximum stack size.
 // We have to use a #define macro here since yacc
 // or bison will check for its definition and use
@@ -95,7 +93,7 @@
 
 type NilVal struct{}
 
-func (v Val) Ctype() int {
+func (v Val) Ctype() Ctype {
 	switch x := v.U.(type) {
 	default:
 		Fatalf("unexpected Ctype for %T", v.U)
@@ -153,7 +151,7 @@
 }
 
 type Type struct {
-	Etype       uint8
+	Etype       EType
 	Nointerface bool
 	Noalg       bool
 	Chan        uint8
@@ -260,6 +258,8 @@
 	T     *Type
 }
 
+type EType uint8
+
 const (
 	Txxx = iota
 
@@ -288,7 +288,7 @@
 
 	TFUNC
 	TARRAY
-	T_old_DARRAY
+	T_old_DARRAY // Doesn't seem to be used in existing code. Used now for Isddd export (see bexport.go). TODO(gri) rename.
 	TSTRUCT
 	TCHAN
 	TMAP
@@ -312,8 +312,11 @@
 	NTYPE
 )
 
+// Ctype describes the constant kind of an "ideal" (untyped) constant.
+type Ctype int8
+
 const (
-	CTxxx = iota
+	CTxxx Ctype = iota
 
 	CTINT
 	CTRUNE
@@ -325,27 +328,31 @@
 )
 
 const (
-	/* types of channel */
-	/* must match ../../pkg/nreflect/type.go:/Chandir */
+	// types of channel
+	// must match ../../pkg/nreflect/type.go:/Chandir
 	Cxxx  = 0
 	Crecv = 1 << 0
 	Csend = 1 << 1
 	Cboth = Crecv | Csend
 )
 
-// declaration context
+// The Class of a variable/function describes the "storage class"
+// of a variable or function. During parsing, storage classes are
+// called declaration contexts.
+type Class uint8
+
 const (
-	Pxxx      = uint8(iota)
-	PEXTERN   // global variable
-	PAUTO     // local variables
-	PPARAM    // input arguments
-	PPARAMOUT // output results
-	PPARAMREF // closure variable reference
-	PFUNC     // global function
+	Pxxx      Class = iota
+	PEXTERN         // global variable
+	PAUTO           // local variables
+	PPARAM          // input arguments
+	PPARAMOUT       // output results
+	PPARAMREF       // closure variable reference
+	PFUNC           // global function
 
 	PDISCARD // discard during parse of duplicate import
 
-	PHEAP = uint8(1 << 7) // an extra bit to identify an escaped variable
+	PHEAP = 1 << 7 // an extra bit to identify an escaped variable
 )
 
 const (
@@ -364,8 +371,8 @@
 
 type Typedef struct {
 	Name   string
-	Etype  int
-	Sameas int
+	Etype  EType
+	Sameas EType
 }
 
 type Sig struct {
@@ -399,10 +406,8 @@
 	dir  string
 }
 
-/*
- * argument passing to/from
- * smagic and umagic
- */
+// argument passing to/from
+// smagic and umagic
 type Magic struct {
 	W   int // input for both - width
 	S   int // output for both - shift
@@ -418,17 +423,15 @@
 	Ua int    // output - adder
 }
 
-/*
- * note this is the runtime representation
- * of the compilers arrays.
- *
- * typedef	struct
- * {				// must not move anything
- *	uchar	array[8];	// pointer to data
- *	uchar	nel[4];		// number of elements
- *	uchar	cap[4];		// allocated number of elements
- * } Array;
- */
+// note this is the runtime representation
+// of the compilers arrays.
+//
+// typedef	struct
+// {					// must not move anything
+// 	uchar	array[8];	// pointer to data
+// 	uchar	nel[4];		// number of elements
+// 	uchar	cap[4];		// allocated number of elements
+// } Array;
 var Array_array int // runtime offsetof(Array,array) - same for String
 
 var Array_nel int // runtime offsetof(Array,nel) - same for String
@@ -437,16 +440,14 @@
 
 var sizeof_Array int // runtime sizeof(Array)
 
-/*
- * note this is the runtime representation
- * of the compilers strings.
- *
- * typedef	struct
- * {				// must not move anything
- *	uchar	array[8];	// pointer to data
- *	uchar	nel[4];		// number of elements
- * } String;
- */
+// note this is the runtime representation
+// of the compilers strings.
+//
+// typedef	struct
+// {					// must not move anything
+// 	uchar	array[8];	// pointer to data
+// 	uchar	nel[4];		// number of elements
+// } String;
 var sizeof_String int // runtime sizeof(String)
 
 var dotlist [10]Dlist // size is max depth of embeddeds
@@ -483,8 +484,7 @@
 
 var lexbuf bytes.Buffer
 var strbuf bytes.Buffer
-
-var litbuf string
+var litbuf string // LLITERAL value for use in syntax error messages
 
 var Debug [256]int
 
@@ -511,6 +511,8 @@
 
 var racepkg *Pkg // package runtime/race
 
+var msanpkg *Pkg // package runtime/msan
+
 var typepkg *Pkg // fake package for runtime type info (headers)
 
 var typelinkpkg *Pkg // fake package for runtime type info (data)
@@ -521,7 +523,7 @@
 
 var trackpkg *Pkg // fake package for field tracking
 
-var Tptr int // either TPTR32 or TPTR64
+var Tptr EType // either TPTR32 or TPTR64
 
 var myimportpath string
 
@@ -543,7 +545,7 @@
 
 var errortype *Type
 
-var Simtype [NTYPE]uint8
+var Simtype [NTYPE]EType
 
 var (
 	Isptr     [NTYPE]bool
@@ -591,7 +593,7 @@
 
 var funcsyms []*Node
 
-var dclcontext uint8 // PEXTERN/PAUTO
+var dclcontext Class // PEXTERN/PAUTO
 
 var incannedimport int
 
@@ -625,7 +627,7 @@
 
 var Widthreg int
 
-var typesw *Node
+var typesw *Node // TODO(gri) remove when yacc-based parser is gone
 
 var nblank *Node
 
@@ -645,15 +647,23 @@
 
 var flag_race int
 
+var flag_msan int
+
 var flag_largemodel int
 
+// Whether we are adding any sort of code instrumentation, such as
+// when the race detector is enabled.
+var instrumenting bool
+
 // Pending annotations for next func declaration.
 var (
-	noescape       bool
-	nosplit        bool
-	nowritebarrier bool
-	systemstack    bool
-	norace         bool
+	noescape          bool
+	noinline          bool
+	norace            bool
+	nosplit           bool
+	nowritebarrier    bool
+	nowritebarrierrec bool
+	systemstack       bool
 )
 
 var debuglive int
@@ -706,9 +716,7 @@
 	Rpo []*Flow
 }
 
-/*
- *	interface to back end
- */
+// interface to back end
 
 const (
 	// Pseudo-op, like TEXT, GLOBL, TYPE, PCDATA, FUNCDATA.
@@ -786,14 +794,14 @@
 	Bgen_float   func(*Node, bool, int, *obj.Prog) // optional
 	Cgen64       func(*Node, *Node)                // only on 32-bit systems
 	Cgenindex    func(*Node, *Node, bool) *obj.Prog
-	Cgen_bmul    func(int, *Node, *Node, *Node) bool
+	Cgen_bmul    func(Op, *Node, *Node, *Node) bool
 	Cgen_float   func(*Node, *Node) // optional
 	Cgen_hmul    func(*Node, *Node, *Node)
-	Cgen_shift   func(int, bool, *Node, *Node, *Node)
+	Cgen_shift   func(Op, bool, *Node, *Node, *Node)
 	Clearfat     func(*Node)
-	Cmp64        func(*Node, *Node, int, int, *obj.Prog) // only on 32-bit systems
+	Cmp64        func(*Node, *Node, Op, int, *obj.Prog) // only on 32-bit systems
 	Defframe     func(*obj.Prog)
-	Dodiv        func(int, *Node, *Node, *Node)
+	Dodiv        func(Op, *Node, *Node, *Node)
 	Excise       func(*Flow)
 	Expandchecks func(*obj.Prog)
 	Getg         func(*Node)
@@ -809,7 +817,7 @@
 	// function calls needed during the evaluation, and on 32-bit systems
 	// the values are guaranteed not to be 64-bit values, so no in-memory
 	// temporaries are necessary.
-	Ginscmp func(op int, t *Type, n1, n2 *Node, likely int) *obj.Prog
+	Ginscmp func(op Op, t *Type, n1, n2 *Node, likely int) *obj.Prog
 
 	// Ginsboolval inserts instructions to convert the result
 	// of a just-completed comparison to a boolean value.
@@ -838,7 +846,7 @@
 	FtoB         func(int) uint64
 	BtoR         func(uint64) int
 	BtoF         func(uint64) int
-	Optoas       func(int, *Type) int
+	Optoas       func(Op, *Type) int
 	Doregbits    func(int) uint64
 	Regnames     func(*int) []string
 	Use387       bool // should 8g use 387 FP instructions instead of sse2.
diff --git a/src/cmd/compile/internal/gc/go.y b/src/cmd/compile/internal/gc/go.y
index 599449f..65e8d2d 100644
--- a/src/cmd/compile/internal/gc/go.y
+++ b/src/cmd/compile/internal/gc/go.y
@@ -254,6 +254,7 @@
 			break;
 		}
 		if my.Name == "init" {
+			lineno = int32($1)
 			Yyerror("cannot import package as init - init must be a func");
 			break;
 		}
@@ -315,7 +316,9 @@
 		} else if importpkg.Name != $2.Name {
 			Yyerror("conflicting names %s and %s for package %q", importpkg.Name, $2.Name, importpkg.Path);
 		}
-		importpkg.Direct = true;
+		if incannedimport == 0 {
+			importpkg.Direct = true;
+		}
 		importpkg.Safe = curio.importsafe
 
 		if safemode != 0 && !curio.importsafe {
@@ -487,7 +490,7 @@
 |	expr LASOP expr
 	{
 		$$ = Nod(OASOP, $1, $3);
-		$$.Etype = uint8($2);			// rathole to pass opcode
+		$$.Etype = EType($2);			// rathole to pass opcode
 	}
 |	expr_list '=' expr_list
 	{
@@ -510,7 +513,7 @@
 			}
 			if $1.Next != nil {
 				Yyerror("argument count mismatch: %d = %d", count($1), 1);
-			} else if ($1.N.Op != ONAME && $1.N.Op != OTYPE && $1.N.Op != ONONAME) || isblank($1.N) {
+			} else if ($1.N.Op != ONAME && $1.N.Op != OTYPE && $1.N.Op != ONONAME && ($1.N.Op != OLITERAL || $1.N.Name == nil)) || isblank($1.N) {
 				Yyerror("invalid variable name %s in type switch", $1.N);
 			} else {
 				$$.Left = dclname($1.N.Sym);
@@ -523,13 +526,15 @@
 	{
 		$$ = Nod(OASOP, $1, Nodintconst(1));
 		$$.Implicit = true;
-		$$.Etype = OADD;
+		// TODO(marvin): Fix Node.EType type union.
+		$$.Etype = EType(OADD);
 	}
 |	expr LDEC
 	{
 		$$ = Nod(OASOP, $1, Nodintconst(1));
 		$$.Implicit = true;
-		$$.Etype = OSUB;
+		// TODO(marvin): Fix Node.EType type union.
+		$$.Etype = EType(OSUB);
 	}
 
 case:
@@ -1392,7 +1397,9 @@
 		$$.Noescape = noescape;
 		$$.Func.Norace = norace;
 		$$.Func.Nosplit = nosplit;
+		$$.Func.Noinline = noinline;
 		$$.Func.Nowritebarrier = nowritebarrier;
+		$$.Func.Nowritebarrierrec = nowritebarrierrec;
 		$$.Func.Systemstack = systemstack;
 		funcbody($$);
 	}
@@ -1578,11 +1585,13 @@
 		if nsyntaxerrors == 0 {
 			testdclstack();
 		}
-		nointerface = false
 		noescape = false
+		noinline = false
+		nointerface = false
 		norace = false
 		nosplit = false
 		nowritebarrier = false
+		nowritebarrierrec = false
 		systemstack = false
 	}
 
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index b52e14d..f17a701 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -43,9 +43,7 @@
 
 var dpc *obj.Prog
 
-/*
- * Is this node a memory operand?
- */
+// Is this node a memory operand?
 func Ismem(n *Node) bool {
 	switch n.Op {
 	case OITAB,
@@ -85,7 +83,7 @@
 	p := Prog(as)
 	p.To.Type = obj.TYPE_BRANCH
 	p.To.Val = nil
-	if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' {
+	if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' && Thearch.Thechar != '0' {
 		p.From.Type = obj.TYPE_CONST
 		if likely > 0 {
 			p.From.Offset = 1
@@ -337,7 +335,7 @@
 		// n->left is PHEAP ONAME for stack parameter.
 	// compute address of actual parameter on stack.
 	case OPARAM:
-		a.Etype = Simtype[n.Left.Type.Etype]
+		a.Etype = uint8(Simtype[n.Left.Type.Etype])
 
 		a.Width = n.Left.Type.Width
 		a.Offset = n.Xoffset
@@ -362,7 +360,7 @@
 	case ONAME:
 		a.Etype = 0
 		if n.Type != nil {
-			a.Etype = Simtype[n.Type.Etype]
+			a.Etype = uint8(Simtype[n.Type.Etype])
 		}
 		a.Offset = n.Xoffset
 		s := n.Sym
@@ -406,6 +404,17 @@
 
 		a.Sym = Linksym(s)
 
+	case ODOT:
+		// A special case to make write barriers more efficient.
+		// Taking the address of the first field of a named struct
+		// is the same as taking the address of the struct.
+		if n.Left.Type.Etype != TSTRUCT || n.Left.Type.Type.Sym != n.Right.Sym {
+			Debug['h'] = 1
+			Dump("naddr", n)
+			Fatalf("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
+		}
+		Naddr(a, n.Left)
+
 	case OLITERAL:
 		if Thearch.Thechar == '8' {
 			a.Width = 0
@@ -440,7 +449,7 @@
 	case OADDR:
 		Naddr(a, n.Left)
 		a.Etype = uint8(Tptr)
-		if Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
+		if Thearch.Thechar != '0' && Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
 			a.Width = int64(Widthptr)
 		}
 		if a.Type != obj.TYPE_MEM {
@@ -466,7 +475,7 @@
 		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
 			break // ptr(nil)
 		}
-		a.Etype = Simtype[Tptr]
+		a.Etype = uint8(Simtype[Tptr])
 		a.Offset += int64(Array_array)
 		a.Width = int64(Widthptr)
 
@@ -477,7 +486,7 @@
 		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
 			break // len(nil)
 		}
-		a.Etype = Simtype[TUINT]
+		a.Etype = uint8(Simtype[TUINT])
 		a.Offset += int64(Array_nel)
 		if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
 			a.Width = int64(Widthint)
@@ -490,7 +499,7 @@
 		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
 			break // cap(nil)
 		}
-		a.Etype = Simtype[TUINT]
+		a.Etype = uint8(Simtype[TUINT])
 		a.Offset += int64(Array_cap)
 		if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
 			a.Width = int64(Widthint)
@@ -675,16 +684,14 @@
 	return n > len(Thearch.ReservedRegs)
 }
 
-/*
- * allocate register of type t, leave in n.
- * if o != N, o may be reusable register.
- * caller must Regfree(n).
- */
+// allocate register of type t, leave in n.
+// if o != N, o may be reusable register.
+// caller must Regfree(n).
 func Regalloc(n *Node, t *Type, o *Node) {
 	if t == nil {
 		Fatalf("regalloc: t nil")
 	}
-	et := int(Simtype[t.Etype])
+	et := Simtype[t.Etype]
 	if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) {
 		Fatalf("regalloc 64bit")
 	}
diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go
index 5fbc82d..6071ab4 100644
--- a/src/cmd/compile/internal/gc/init.go
+++ b/src/cmd/compile/internal/gc/init.go
@@ -17,13 +17,11 @@
 //		a->offset += v;
 //		break;
 
-/*
- * a function named init is a special case.
- * it is called by the initialization before
- * main is run. to make it unique within a
- * package and also uncallable, the name,
- * normally "pkg.init", is altered to "pkg.init.1".
- */
+// a function named init is a special case.
+// it is called by the initialization before
+// main is run. to make it unique within a
+// package and also uncallable, the name,
+// normally "pkg.init", is altered to "pkg.init.1".
 
 var renameinit_initgen int
 
@@ -32,24 +30,22 @@
 	return Lookupf("init.%d", renameinit_initgen)
 }
 
-/*
- * hand-craft the following initialization code
- *	var initdone· uint8 				(1)
- *	func init()					(2)
- *		if initdone· != 0 {			(3)
- *			if initdone· == 2		(4)
- *				return
- *			throw();			(5)
- *		}
- *		initdone· = 1;				(6)
- *		// over all matching imported symbols
- *			<pkg>.init()			(7)
- *		{ <init stmts> }			(8)
- *		init.<n>() // if any			(9)
- *		initdone· = 2;				(10)
- *		return					(11)
- *	}
- */
+// hand-craft the following initialization code
+//	var initdone· uint8 				(1)
+//	func init()					(2)
+//		if initdone· != 0 {			(3)
+//			if initdone· == 2		(4)
+//				return
+//			throw();			(5)
+//		}
+//		initdone· = 1;				(6)
+//		// over all matching imported symbols
+//			<pkg>.init()			(7)
+//		{ <init stmts> }			(8)
+//		init.<n>() // if any			(9)
+//		initdone· = 2;				(10)
+//		return					(11)
+//	}
 func anyinit(n *NodeList) bool {
 	// are there any interesting init statements
 	for l := n; l != nil; l = l.Next {
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
index 1f9b473..64afd67 100644
--- a/src/cmd/compile/internal/gc/inl.go
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -106,6 +106,11 @@
 		Fatalf("caninl no nname %v", Nconv(fn, obj.FmtSign))
 	}
 
+	// If marked "go:noinline", don't inline
+	if fn.Func.Noinline {
+		return
+	}
+
 	// If fn has no body (is defined outside of Go), cannot inline it.
 	if fn.Nbody == nil {
 		return
@@ -124,13 +129,13 @@
 		}
 	}
 
-	// Runtime package must not be race instrumented.
-	// Racewalk skips runtime package. However, some runtime code can be
+	// Runtime package must not be instrumented.
+	// Instrument skips runtime package. However, some runtime code can be
 	// inlined into other packages and instrumented there. To avoid this,
-	// we disable inlining of runtime functions in race mode.
+	// we disable inlining of runtime functions when instrumenting.
 	// The example that we observed is inlining of LockOSThread,
 	// which lead to false race reports on m contents.
-	if flag_race != 0 && myimportpath == "runtime" {
+	if instrumenting && myimportpath == "runtime" {
 		return
 	}
 
@@ -345,7 +350,8 @@
 	case ODEFER, OPROC:
 		switch n.Left.Op {
 		case OCALLFUNC, OCALLMETH:
-			n.Left.Etype = n.Op
+			// TODO(marvin): Fix Node.EType type union.
+			n.Left.Etype = EType(n.Op)
 		}
 		fallthrough
 
@@ -445,7 +451,8 @@
 	// switch at the top of this function.
 	switch n.Op {
 	case OCALLFUNC, OCALLMETH:
-		if n.Etype == OPROC || n.Etype == ODEFER {
+		// TODO(marvin): Fix Node.EType type union.
+		if n.Etype == EType(OPROC) || n.Etype == EType(ODEFER) {
 			return
 		}
 	}
diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go
index 340e37f..1f468c1 100644
--- a/src/cmd/compile/internal/gc/lex.go
+++ b/src/cmd/compile/internal/gc/lex.go
@@ -58,6 +58,7 @@
 	{"slice", &Debug_slice},           // print information about slice compilation
 	{"typeassert", &Debug_typeassert}, // print information about type assertion inlining
 	{"wb", &Debug_wb},                 // print information about write barriers
+	{"export", &Debug_export},         // print export data
 }
 
 const (
@@ -200,6 +201,9 @@
 	obj.Flagcount("l", "disable inlining", &Debug['l'])
 	obj.Flagcount("live", "debug liveness analysis", &debuglive)
 	obj.Flagcount("m", "print optimization decisions", &Debug['m'])
+	obj.Flagcount("msan", "build code compatible with C/C++ memory sanitizer", &flag_msan)
+	obj.Flagcount("newexport", "use new export format", &newexport) // TODO(gri) remove eventually (issue 13241)
+	obj.Flagcount("oldparser", "use old parser", &oldparser)        // TODO(gri) remove eventually (issue 13240)
 	obj.Flagcount("nolocalimports", "reject local (relative) imports", &nolocalimports)
 	obj.Flagstr("o", "write output to `file`", &outfile)
 	obj.Flagstr("p", "set expected package import `path`", &myimportpath)
@@ -217,11 +221,15 @@
 	obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y'])
 	var flag_shared int
 	var flag_dynlink bool
-	if Thearch.Thechar == '6' || Thearch.Thechar == '5' {
+	switch Thearch.Thechar {
+	case '5', '6', '7', '9':
 		obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared)
 	}
 	if Thearch.Thechar == '6' {
 		obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel)
+	}
+	switch Thearch.Thechar {
+	case '5', '6', '7', '8', '9':
 		flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
 	}
 	obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile)
@@ -248,6 +256,15 @@
 		racepkg = mkpkg("runtime/race")
 		racepkg.Name = "race"
 	}
+	if flag_msan != 0 {
+		msanpkg = mkpkg("runtime/msan")
+		msanpkg.Name = "msan"
+	}
+	if flag_race != 0 && flag_msan != 0 {
+		log.Fatal("can not use both -race and -msan")
+	} else if flag_race != 0 || flag_msan != 0 {
+		instrumenting = true
+	}
 
 	// parse -d argument
 	if debugstr != "" {
@@ -301,7 +318,19 @@
 	lexlineno = 1
 	const BOM = 0xFEFF
 
+	// Uncomment the line below to temporarily switch the compiler back
+	// to the yacc-based parser. Short-term work-around for issues with
+	// the new recursive-descent parser for which setting -oldparser is
+	// not sufficient.
+	// TODO(gri) remove this eventually (issue 13240)
+	//
+	// oldparser = 1
+
 	for _, infile = range flag.Args() {
+		if trace && Debug['x'] != 0 && oldparser == 0 {
+			fmt.Printf("--- %s ---\n", infile)
+		}
+
 		linehistpush(infile)
 
 		curio.infile = infile
@@ -463,6 +492,10 @@
 		fninit(xtop)
 	}
 
+	if compiling_runtime != 0 {
+		checknowritebarrierrec()
+	}
+
 	// Phase 9: Check external declarations.
 	for i, n := range externdcl {
 		if n.Op == ONAME {
@@ -521,7 +554,7 @@
 }
 
 func skiptopkgdef(b *obj.Biobuf) bool {
-	/* archive header */
+	// archive header
 	p := obj.Brdline(b, '\n')
 	if p == "" {
 		return false
@@ -533,7 +566,7 @@
 		return false
 	}
 
-	/* symbol table may be first; skip it */
+	// symbol table may be first; skip it
 	sz := arsize(b, "__.GOSYMDEF")
 
 	if sz >= 0 {
@@ -542,7 +575,7 @@
 		obj.Bseek(b, 8, 0)
 	}
 
-	/* package export block is next */
+	// package export block is next
 	sz = arsize(b, "__.PKGDEF")
 
 	if sz <= 0 {
@@ -620,6 +653,9 @@
 		} else if flag_race != 0 {
 			suffixsep = "_"
 			suffix = "race"
+		} else if flag_msan != 0 {
+			suffixsep = "_"
+			suffix = "msan"
 		}
 
 		file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", goroot, goos, goarch, suffixsep, suffix, name)
@@ -640,6 +676,7 @@
 	cannedimports("fake.o", "$$\n")
 }
 
+// TODO(gri) line argument doesn't appear to be used
 func importfile(f *Val, line int) {
 	if _, ok := f.U.(string); !ok {
 		Yyerror("import statement not a string")
@@ -771,42 +808,69 @@
 	// so don't record the full path.
 	linehistpragma(file[len(file)-len(path_)-2:]) // acts as #pragma lib
 
-	/*
-	 * position the input right
-	 * after $$ and return
-	 */
-	pushedio = curio
+	// In the importfile, if we find:
+	// $$\n  (old format): position the input right after $$\n and return
+	// $$B\n (new format): import directly, then feed the lexer a dummy statement
 
-	curio.bin = imp
-	curio.peekc = 0
-	curio.peekc1 = 0
-	curio.infile = file
-	curio.nlsemi = false
-	typecheckok = true
-
+	// look for $$
+	var c int
 	for {
-		c := getc()
-		if c == EOF {
+		c = obj.Bgetc(imp)
+		if c < 0 {
 			break
 		}
-		if c != '$' {
-			continue
+		if c == '$' {
+			c = obj.Bgetc(imp)
+			if c == '$' || c < 0 {
+				break
+			}
 		}
-		c = getc()
-		if c == EOF {
-			break
-		}
-		if c != '$' {
-			continue
-		}
-		return
 	}
 
-	Yyerror("no import in %q", f.U.(string))
-	unimportfile()
+	// get character after $$
+	if c >= 0 {
+		c = obj.Bgetc(imp)
+	}
+
+	switch c {
+	case '\n':
+		// old export format
+		pushedio = curio
+
+		curio.bin = imp
+		curio.peekc = 0
+		curio.peekc1 = 0
+		curio.infile = file
+		curio.nlsemi = false
+		typecheckok = true
+
+		if oldparser == 0 {
+			push_parser()
+		}
+
+	case 'B':
+		// new export format
+		obj.Bgetc(imp) // skip \n after $$B
+		Import(imp)
+
+		// continue as if the package was imported before (see above)
+		tag := ""
+		if importpkg.Safe {
+			tag = "safe"
+		}
+		p := fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
+		cannedimports(file, p)
+
+	default:
+		Yyerror("no import in %q", f.U.(string))
+	}
 }
 
 func unimportfile() {
+	if oldparser == 0 {
+		pop_parser()
+	}
+
 	if curio.bin != nil {
 		obj.Bterm(curio.bin)
 		curio.bin = nil
@@ -836,6 +900,10 @@
 
 	typecheckok = true
 	incannedimport = 1
+
+	if oldparser == 0 {
+		push_parser()
+	}
 }
 
 func isSpace(c int) bool {
@@ -909,10 +977,10 @@
 		goto l0
 	}
 
-	lineno = lexlineno /* start of token */
+	lineno = lexlineno // start of token
 
 	if c >= utf8.RuneSelf {
-		/* all multibyte runes are alpha */
+		// all multibyte runes are alpha
 		cp = &lexbuf
 		cp.Reset()
 
@@ -1036,7 +1104,7 @@
 			c1 = '.'
 		}
 
-		/* "..." */
+		// "..."
 	case '"':
 		lexbuf.Reset()
 		lexbuf.WriteString(`"<string>"`)
@@ -1057,7 +1125,7 @@
 
 		goto strlit
 
-		/* `...` */
+		// `...`
 	case '`':
 		lexbuf.Reset()
 		lexbuf.WriteString("`<string>`")
@@ -1083,7 +1151,7 @@
 
 		goto strlit
 
-		/* '.' */
+		// '.'
 	case '\'':
 		if escchar('\'', &escflag, &v) {
 			Yyerror("empty character literal or unescaped ' in character literal")
@@ -1148,14 +1216,14 @@
 		}
 
 		if c1 == '=' {
-			c = ODIV
+			c = int(ODIV)
 			goto asop
 		}
 
 	case ':':
 		c1 = getc()
 		if c1 == '=' {
-			c = LCOLAS
+			c = int(LCOLAS)
 			yylval.i = int(lexlineno)
 			goto lx
 		}
@@ -1163,48 +1231,48 @@
 	case '*':
 		c1 = getc()
 		if c1 == '=' {
-			c = OMUL
+			c = int(OMUL)
 			goto asop
 		}
 
 	case '%':
 		c1 = getc()
 		if c1 == '=' {
-			c = OMOD
+			c = int(OMOD)
 			goto asop
 		}
 
 	case '+':
 		c1 = getc()
 		if c1 == '+' {
-			c = LINC
+			c = int(LINC)
 			goto lx
 		}
 
 		if c1 == '=' {
-			c = OADD
+			c = int(OADD)
 			goto asop
 		}
 
 	case '-':
 		c1 = getc()
 		if c1 == '-' {
-			c = LDEC
+			c = int(LDEC)
 			goto lx
 		}
 
 		if c1 == '=' {
-			c = OSUB
+			c = int(OSUB)
 			goto asop
 		}
 
 	case '>':
 		c1 = getc()
 		if c1 == '>' {
-			c = LRSH
+			c = int(LRSH)
 			c1 = getc()
 			if c1 == '=' {
-				c = ORSH
+				c = int(ORSH)
 				goto asop
 			}
 
@@ -1212,19 +1280,19 @@
 		}
 
 		if c1 == '=' {
-			c = LGE
+			c = int(LGE)
 			goto lx
 		}
 
-		c = LGT
+		c = int(LGT)
 
 	case '<':
 		c1 = getc()
 		if c1 == '<' {
-			c = LLSH
+			c = int(LLSH)
 			c1 = getc()
 			if c1 == '=' {
-				c = OLSH
+				c = int(OLSH)
 				goto asop
 			}
 
@@ -1232,43 +1300,43 @@
 		}
 
 		if c1 == '=' {
-			c = LLE
+			c = int(LLE)
 			goto lx
 		}
 
 		if c1 == '-' {
-			c = LCOMM
+			c = int(LCOMM)
 			goto lx
 		}
 
-		c = LLT
+		c = int(LLT)
 
 	case '=':
 		c1 = getc()
 		if c1 == '=' {
-			c = LEQ
+			c = int(LEQ)
 			goto lx
 		}
 
 	case '!':
 		c1 = getc()
 		if c1 == '=' {
-			c = LNE
+			c = int(LNE)
 			goto lx
 		}
 
 	case '&':
 		c1 = getc()
 		if c1 == '&' {
-			c = LANDAND
+			c = int(LANDAND)
 			goto lx
 		}
 
 		if c1 == '^' {
-			c = LANDNOT
+			c = int(LANDNOT)
 			c1 = getc()
 			if c1 == '=' {
-				c = OANDNOT
+				c = int(OANDNOT)
 				goto asop
 			}
 
@@ -1276,49 +1344,49 @@
 		}
 
 		if c1 == '=' {
-			c = OAND
+			c = int(OAND)
 			goto asop
 		}
 
 	case '|':
 		c1 = getc()
 		if c1 == '|' {
-			c = LOROR
+			c = int(LOROR)
 			goto lx
 		}
 
 		if c1 == '=' {
-			c = OOR
+			c = int(OOR)
 			goto asop
 		}
 
 	case '^':
 		c1 = getc()
 		if c1 == '=' {
-			c = OXOR
+			c = int(OXOR)
 			goto asop
 		}
 
-		/*
-		 * clumsy dance:
-		 * to implement rule that disallows
-		 *	if T{1}[0] { ... }
-		 * but allows
-		 * 	if (T{1}[0]) { ... }
-		 * the block bodies for if/for/switch/select
-		 * begin with an LBODY token, not '{'.
-		 *
-		 * when we see the keyword, the next
-		 * non-parenthesized '{' becomes an LBODY.
-		 * loophack is normally false.
-		 * a keyword sets it to true.
-		 * parens push loophack onto a stack and go back to false.
-		 * a '{' with loophack == true becomes LBODY and disables loophack.
-		 *
-		 * i said it was clumsy.
-		 */
+		// clumsy dance:
+		// to implement rule that disallows
+		//	if T{1}[0] { ... }
+		// but allows
+		// 	if (T{1}[0]) { ... }
+		// the block bodies for if/for/switch/select
+		// begin with an LBODY token, not '{'.
+		//
+		// when we see the keyword, the next
+		// non-parenthesized '{' becomes an LBODY.
+		// loophack is normally false.
+		// a keyword sets it to true.
+		// parens push loophack onto a stack and go back to false.
+		// a '{' with loophack == true becomes LBODY and disables loophack.
+		//
+		// I said it was clumsy.
+		//
+		// We only need the loophack when running with -oldparser.
 	case '(', '[':
-		if loophack || _yylex_lstk != nil {
+		if oldparser != 0 && (loophack || _yylex_lstk != nil) {
 			h = new(Loophack)
 			if h == nil {
 				Flusherrors()
@@ -1335,7 +1403,7 @@
 		goto lx
 
 	case ')', ']':
-		if _yylex_lstk != nil {
+		if oldparser != 0 && _yylex_lstk != nil {
 			h = _yylex_lstk
 			loophack = h.v
 			_yylex_lstk = h.next
@@ -1344,7 +1412,7 @@
 		goto lx
 
 	case '{':
-		if loophack {
+		if oldparser != 0 && loophack {
 			if Debug['x'] != 0 {
 				fmt.Printf("%v lex: LBODY\n", Ctxt.Line(int(lexlineno)))
 			}
@@ -1389,10 +1457,8 @@
 	}
 	return LASOP
 
-	/*
-	 * cp is set to lexbuf and some
-	 * prefix has been stored
-	 */
+	// cp is set to lexbuf and some
+	// prefix has been stored
 talph:
 	for {
 		if c >= utf8.RuneSelf {
@@ -1403,6 +1469,9 @@
 			if !unicode.IsLetter(r) && !unicode.IsDigit(r) && (importpkg == nil || r != 0xb7) {
 				Yyerror("invalid identifier character U+%04x", r)
 			}
+			if cp.Len() == 0 && unicode.IsDigit(r) {
+				Yyerror("identifier cannot begin with digit U+%04x", r)
+			}
 			cp.WriteRune(r)
 		} else if !isAlnum(c) && c != '_' {
 			break
@@ -1421,7 +1490,9 @@
 		goto l0
 
 	case LFOR, LIF, LSWITCH, LSELECT:
-		loophack = true // see comment about loophack above
+		if oldparser != 0 {
+			loophack = true // see comment about loophack above
+		}
 	}
 
 	if Debug['x'] != 0 {
@@ -1500,7 +1571,7 @@
 	mpatoflt(&yylval.val.U.(*Mpcplx).Imag, str)
 	if yylval.val.U.(*Mpcplx).Imag.Val.IsInf() {
 		Yyerror("overflow in imaginary constant")
-		Mpmovecflt(&yylval.val.U.(*Mpcplx).Real, 0.0)
+		Mpmovecflt(&yylval.val.U.(*Mpcplx).Imag, 0.0)
 	}
 
 	if Debug['x'] != 0 {
@@ -1557,12 +1628,10 @@
 	return p != ""
 }
 
-/*
- * read and interpret syntax that looks like
- * //line parse.y:15
- * as a discontinuity in sequential line numbers.
- * the next line of input comes from parse.y:15
- */
+// read and interpret syntax that looks like
+// //line parse.y:15
+// as a discontinuity in sequential line numbers.
+// the next line of input comes from parse.y:15
 func getlinepragma() int {
 	var cmd, verb, name string
 
@@ -1629,6 +1698,11 @@
 			return c
 		}
 
+		if verb == "go:noinline" {
+			noinline = true
+			return c
+		}
+
 		if verb == "go:systemstack" {
 			if compiling_runtime == 0 {
 				Yyerror("//go:systemstack only allowed in runtime")
@@ -1644,6 +1718,15 @@
 			nowritebarrier = true
 			return c
 		}
+
+		if verb == "go:nowritebarrierrec" {
+			if compiling_runtime == 0 {
+				Yyerror("//go:nowritebarrierrec only allowed in runtime")
+			}
+			nowritebarrierrec = true
+			nowritebarrier = true // Implies nowritebarrier
+			return c
+		}
 		return c
 	}
 	if c != 'l' {
@@ -1851,13 +1934,18 @@
 	Yyerror("%s", msg)
 }
 
+var oldparser int // if set, theparser is used (otherwise we use the recursive-descent parser)
 var theparser yyParser
 var parsing bool
 
 func yyparse() {
-	theparser = yyNewParser()
 	parsing = true
-	theparser.Parse(yy{})
+	if oldparser != 0 {
+		theparser = yyNewParser()
+		theparser.Parse(yy{})
+	} else {
+		parse_file()
+	}
 	parsing = false
 }
 
@@ -2121,10 +2209,10 @@
 var syms = []struct {
 	name    string
 	lexical int
-	etype   int
-	op      int
+	etype   EType
+	op      Op
 }{
-	/* basic types */
+	// basic types
 	{"int8", LNAME, TINT8, OXXX},
 	{"int16", LNAME, TINT16, OXXX},
 	{"int32", LNAME, TINT32, OXXX},
@@ -2195,7 +2283,7 @@
 		s1.Lexical = uint16(lex)
 
 		if etype := s.etype; etype != Txxx {
-			if etype < 0 || etype >= len(Types) {
+			if int(etype) >= len(Types) {
 				Fatalf("lexinit: %s bad etype", s.name)
 			}
 			s2 := Pkglookup(s.name, builtinpkg)
@@ -2216,12 +2304,13 @@
 			continue
 		}
 
+		// TODO(marvin): Fix Node.EType type union.
 		if etype := s.op; etype != OXXX {
 			s2 := Pkglookup(s.name, builtinpkg)
 			s2.Lexical = LNAME
 			s2.Def = Nod(ONAME, nil, nil)
 			s2.Def.Sym = s2
-			s2.Def.Etype = uint8(etype)
+			s2.Def.Etype = EType(etype)
 		}
 	}
 
@@ -2330,38 +2419,34 @@
 }
 
 func lexfini() {
-	var s *Sym
-	var lex int
-	var etype int
-	var i int
-
-	for i = 0; i < len(syms); i++ {
-		lex = syms[i].lexical
+	for i := range syms {
+		lex := syms[i].lexical
 		if lex != LNAME {
 			continue
 		}
-		s = Lookup(syms[i].name)
+		s := Lookup(syms[i].name)
 		s.Lexical = uint16(lex)
 
-		etype = syms[i].etype
+		etype := syms[i].etype
 		if etype != Txxx && (etype != TANY || Debug['A'] != 0) && s.Def == nil {
 			s.Def = typenod(Types[etype])
 			s.Def.Name = new(Name)
 			s.Origpkg = builtinpkg
 		}
 
-		etype = syms[i].op
-		if etype != OXXX && s.Def == nil {
+		// TODO(marvin): Fix Node.EType type union.
+		etype = EType(syms[i].op)
+		if etype != EType(OXXX) && s.Def == nil {
 			s.Def = Nod(ONAME, nil, nil)
 			s.Def.Sym = s
-			s.Def.Etype = uint8(etype)
+			s.Def.Etype = etype
 			s.Origpkg = builtinpkg
 		}
 	}
 
 	// backend-specific builtin types (e.g. int).
-	for i = range Thearch.Typedefs {
-		s = Lookup(Thearch.Typedefs[i].Name)
+	for i := range Thearch.Typedefs {
+		s := Lookup(Thearch.Typedefs[i].Name)
 		if s.Def == nil {
 			s.Def = typenod(Types[Thearch.Typedefs[i].Etype])
 			s.Def.Name = new(Name)
@@ -2371,30 +2456,25 @@
 
 	// there's only so much table-driven we can handle.
 	// these are special cases.
-	s = Lookup("byte")
-
-	if s.Def == nil {
+	if s := Lookup("byte"); s.Def == nil {
 		s.Def = typenod(bytetype)
 		s.Def.Name = new(Name)
 		s.Origpkg = builtinpkg
 	}
 
-	s = Lookup("error")
-	if s.Def == nil {
+	if s := Lookup("error"); s.Def == nil {
 		s.Def = typenod(errortype)
 		s.Def.Name = new(Name)
 		s.Origpkg = builtinpkg
 	}
 
-	s = Lookup("rune")
-	if s.Def == nil {
+	if s := Lookup("rune"); s.Def == nil {
 		s.Def = typenod(runetype)
 		s.Def.Name = new(Name)
 		s.Origpkg = builtinpkg
 	}
 
-	s = Lookup("nil")
-	if s.Def == nil {
+	if s := Lookup("nil"); s.Def == nil {
 		var v Val
 		v.U = new(NilVal)
 		s.Def = nodlit(v)
@@ -2403,23 +2483,20 @@
 		s.Origpkg = builtinpkg
 	}
 
-	s = Lookup("iota")
-	if s.Def == nil {
+	if s := Lookup("iota"); s.Def == nil {
 		s.Def = Nod(OIOTA, nil, nil)
 		s.Def.Sym = s
 		s.Origpkg = builtinpkg
 	}
 
-	s = Lookup("true")
-	if s.Def == nil {
+	if s := Lookup("true"); s.Def == nil {
 		s.Def = Nodbool(true)
 		s.Def.Sym = s
 		s.Def.Name = new(Name)
 		s.Origpkg = builtinpkg
 	}
 
-	s = Lookup("false")
-	if s.Def == nil {
+	if s := Lookup("false"); s.Def == nil {
 		s.Def = Nodbool(false)
 		s.Def.Sym = s
 		s.Def.Name = new(Name)
diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/gc/mkbuiltin.go
index ea3877f..b1e4458 100644
--- a/src/cmd/compile/internal/gc/mkbuiltin.go
+++ b/src/cmd/compile/internal/gc/mkbuiltin.go
@@ -4,8 +4,9 @@
 
 // +build ignore
 
-// Generate builtin.go from $* (runtime.go and unsafe.go).
-// Run this after changing runtime.go and unsafe.go
+// Generate builtin.go from builtin/runtime.go and builtin/unsafe.go
+// (passed as arguments on the command line by a go:generate comment).
+// Run this after changing builtin/runtime.go and builtin/unsafe.go
 // or after changing the export metadata format in the compiler.
 // Either way, you need to have a working compiler binary first.
 package main
diff --git a/src/cmd/compile/internal/gc/mparith3.go b/src/cmd/compile/internal/gc/mparith3.go
index f91a64b..889c461 100644
--- a/src/cmd/compile/internal/gc/mparith3.go
+++ b/src/cmd/compile/internal/gc/mparith3.go
@@ -113,7 +113,7 @@
 		Yyerror("mpgetflt ovf")
 	}
 
-	return x
+	return x + 0 // avoid -0 (should not be needed, but be conservative)
 }
 
 func mpgetflt32(a *Mpflt) float64 {
@@ -125,7 +125,7 @@
 		Yyerror("mpgetflt32 ovf")
 	}
 
-	return x
+	return x + 0 // avoid -0 (should not be needed, but be conservative)
 }
 
 func Mpmovecflt(a *Mpflt, c float64) {
@@ -133,6 +133,10 @@
 		fmt.Printf("\nconst %g", c)
 	}
 
+	// convert -0 to 0
+	if c == 0 {
+		c = 0
+	}
 	a.Val.SetFloat64(c)
 
 	if Mpdebug {
@@ -141,7 +145,10 @@
 }
 
 func mpnegflt(a *Mpflt) {
-	a.Val.Neg(&a.Val)
+	// avoid -0
+	if a.Val.Sign() != 0 {
+		a.Val.Neg(&a.Val)
+	}
 }
 
 //
@@ -163,15 +170,20 @@
 		// - decimal point and binary point in constant
 		// TODO(gri) use different conversion function or check separately
 		Yyerror("malformed constant: %s", as)
-		a.Val.SetUint64(0)
+		a.Val.SetFloat64(0)
 		return
 	}
 
 	if f.IsInf() {
 		Yyerror("constant too large: %s", as)
-		a.Val.SetUint64(0)
+		a.Val.SetFloat64(0)
 		return
 	}
+
+	// -0 becomes 0
+	if f.Sign() == 0 && f.Signbit() {
+		a.Val.SetFloat64(0)
+	}
 }
 
 func (f *Mpflt) String() string {
@@ -188,13 +200,18 @@
 	// determine sign
 	f := &fvp.Val
 	var sign string
-	if fvp.Val.Signbit() {
+	if f.Sign() < 0 {
 		sign = "-"
 		f = new(big.Float).Abs(f)
 	} else if flag&obj.FmtSign != 0 {
 		sign = "+"
 	}
 
+	// Don't try to convert infinities (will not terminate).
+	if f.IsInf() {
+		return sign + "Inf"
+	}
+
 	// Use fmt formatting if in float64 range (common case).
 	if x, _ := f.Float64(); !math.IsInf(x, 0) {
 		return fmt.Sprintf("%s%.6g", sign, x)
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index 9d35dfd..66549be 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -10,9 +10,7 @@
 	"strconv"
 )
 
-/*
- * architecture-independent object file output
- */
+// architecture-independent object file output
 const (
 	ArhdrSize = 60
 )
@@ -279,7 +277,7 @@
 	a.Sym = Linksym(symdata)
 	a.Node = symdata.Def
 	a.Offset = 0
-	a.Etype = Simtype[TINT]
+	a.Etype = uint8(Simtype[TINT])
 }
 
 func datagostring(sval string, a *obj.Addr) {
@@ -289,7 +287,7 @@
 	a.Sym = Linksym(symhdr)
 	a.Node = symhdr.Def
 	a.Offset = 0
-	a.Etype = TSTRING
+	a.Etype = uint8(TSTRING)
 }
 
 func dgostringptr(s *Sym, off int, str string) int {
@@ -314,7 +312,7 @@
 	p.From3.Offset = int64(Widthptr)
 	datagostring(*lit, &p.To)
 	p.To.Type = obj.TYPE_ADDR
-	p.To.Etype = Simtype[TINT]
+	p.To.Etype = uint8(Simtype[TINT])
 	off += Widthptr
 
 	return off
@@ -375,8 +373,8 @@
 }
 
 func gdatacomplex(nam *Node, cval *Mpcplx) {
-	w := cplxsubtype(int(nam.Type.Etype))
-	w = int(Types[w].Width)
+	cst := cplxsubtype(nam.Type.Etype)
+	w := int(Types[cst].Width)
 
 	p := Thearch.Gins(obj.ADATA, nam, nil)
 	p.From3 = new(obj.Addr)
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index c783d64..d01539e 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -396,7 +396,7 @@
 // contain m or k. They are usually unnecessary, but in the unnecessary
 // cases they are also typically registerizable, so not much harm done.
 // And this only applies to the multiple-assignment form.
-// We could do a more precise analysis if needed, like in walk.c.
+// We could do a more precise analysis if needed, like in walk.go.
 //
 // Ordermapassign also inserts these temporaries if needed for
 // calling writebarrierfat with a pointer to n->right.
@@ -408,7 +408,7 @@
 	case OAS:
 		order.out = list(order.out, n)
 
-		// We call writebarrierfat only for values > 4 pointers long. See walk.c.
+		// We call writebarrierfat only for values > 4 pointers long. See walk.go.
 		if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) && n.Left.Type.Width > int64(4*Widthptr))) && !isaddrokay(n.Right) {
 			m := n.Left
 			n.Left = ordertemp(m.Type, order, false)
@@ -434,7 +434,7 @@
 				a = Nod(OAS, m, l.N)
 				typecheck(&a, Etop)
 				post = list(post, a)
-			} else if flag_race != 0 && n.Op == OAS2FUNC && !isblank(l.N) {
+			} else if instrumenting && n.Op == OAS2FUNC && !isblank(l.N) {
 				m = l.N
 				l.N = ordertemp(m.Type, order, false)
 				a = Nod(OAS, m, l.N)
@@ -509,7 +509,8 @@
 			tmp1.Etype = 0 // now an rvalue not an lvalue
 		}
 		tmp1 = ordercopyexpr(tmp1, n.Left.Type, order, 0)
-		n.Right = Nod(int(n.Etype), tmp1, n.Right)
+		// TODO(marvin): Fix Node.EType type union.
+		n.Right = Nod(Op(n.Etype), tmp1, n.Right)
 		typecheck(&n.Right, Erv)
 		orderexpr(&n.Right, order, nil)
 		n.Etype = 0
@@ -756,7 +757,7 @@
 		ordercallargs(&n.List, order)
 		order.out = list(order.out, n)
 
-		// Special: clean case temporaries in each block entry.
+	// Special: clean case temporaries in each block entry.
 	// Select must enter one of its blocks, so there is no
 	// need for a cleaning at the end.
 	// Doubly special: evaluation order for select is stricter
@@ -1093,7 +1094,7 @@
 		OREAL,
 		ORECOVER:
 		ordercall(n, order)
-		if lhs == nil || lhs.Op != ONAME || flag_race != 0 {
+		if lhs == nil || lhs.Op != ONAME || instrumenting {
 			n = ordercopyexpr(n, n.Type, order, 0)
 		}
 
@@ -1153,7 +1154,7 @@
 		// TODO(rsc): The Isfat is for consistency with componentgen and walkexpr.
 		// It needs to be removed in all three places.
 		// That would allow inlining x.(struct{*int}) the same as x.(*int).
-		if !isdirectiface(n.Type) || Isfat(n.Type) || flag_race != 0 {
+		if !isdirectiface(n.Type) || Isfat(n.Type) || instrumenting {
 			n = ordercopyexpr(n, n.Type, order, 1)
 		}
 
diff --git a/src/cmd/compile/internal/gc/parser.go b/src/cmd/compile/internal/gc/parser.go
new file mode 100644
index 0000000..c8a6826
--- /dev/null
+++ b/src/cmd/compile/internal/gc/parser.go
@@ -0,0 +1,3372 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+const trace = false // if set, parse tracing can be enabled with -x
+
+// TODO(gri) Once we handle imports w/o redirecting the underlying
+// source of the lexer we can get rid of these. They are here for
+// compatibility with the existing yacc-based parser setup (issue 13242).
+var thenewparser parser // the parser in use
+var savedstate []parser // saved parser state, used during import
+
+func push_parser() {
+	savedstate = append(savedstate, thenewparser)
+	thenewparser = parser{}
+	thenewparser.next()
+}
+
+func pop_parser() {
+	n := len(savedstate) - 1
+	thenewparser = savedstate[n]
+	savedstate = savedstate[:n]
+}
+
+func parse_file() {
+	thenewparser = parser{}
+	thenewparser.loadsys()
+	thenewparser.next()
+	thenewparser.file()
+}
+
+// This loads the definitions for the low-level runtime functions,
+// so that the compiler can generate calls to them,
+// but does not make the name "runtime" visible as a package.
+//
+// go.y:loadsys
+func (p *parser) loadsys() {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("loadsys")()
+	}
+
+	importpkg = Runtimepkg
+
+	if Debug['A'] != 0 {
+		cannedimports("runtime.Builtin", "package runtime\n\n$$\n\n")
+	} else {
+		cannedimports("runtime.Builtin", runtimeimport)
+	}
+	curio.importsafe = true
+
+	p.import_package()
+	p.import_there()
+
+	importpkg = nil
+}
+
+type parser struct {
+	tok    int32     // next token (one-token look-ahead)
+	op     Op        // valid if tok == LASOP
+	val    Val       // valid if tok == LLITERAL
+	sym_   *Sym      // valid if tok == LNAME
+	nest   int       // expression nesting level (for complit ambiguity resolution)
+	yy     yySymType // for temporary use by next
+	indent int       // tracing support
+}
+
+func (p *parser) next() {
+	p.tok = yylex(&p.yy)
+	p.op = Op(p.yy.i)
+	p.val = p.yy.val
+	p.sym_ = p.yy.sym
+}
+
+func (p *parser) got(tok int32) bool {
+	if p.tok == tok {
+		p.next()
+		return true
+	}
+	return false
+}
+
+func (p *parser) want(tok int32) {
+	if p.tok != EOF && !p.got(tok) {
+		p.syntax_error("")
+		p.advance()
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Syntax error handling
+
+func (p *parser) syntax_error(msg string) {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("syntax_error (" + msg + ")")()
+	}
+
+	if p.tok == EOF && nerrors > 0 {
+		return // avoid meaningless follow-up errors
+	}
+
+	// add punctuation etc. as needed to msg
+	switch {
+	case msg == "":
+		// nothing to do
+	case strings.HasPrefix(msg, "in"), strings.HasPrefix(msg, "at"), strings.HasPrefix(msg, "after"):
+		msg = " " + msg
+	case strings.HasPrefix(msg, "expecting"):
+		msg = ", " + msg
+	default:
+		// plain error - we don't care about current token
+		Yyerror("syntax error: " + msg)
+		return
+	}
+
+	// determine token string
+	var tok string
+	switch p.tok {
+	case LLITERAL:
+		// this is also done in Yyerror but it's cleaner to do it here
+		tok = litbuf
+	case LNAME:
+		if p.sym_ != nil && p.sym_.Name != "" {
+			tok = p.sym_.Name
+		} else {
+			tok = "name"
+		}
+	case LASOP:
+		tok = goopnames[p.op] + "="
+	default:
+		tok = tokstring(p.tok)
+	}
+
+	Yyerror("syntax error: unexpected " + tok + msg)
+}
+
+// Advance consumes tokens until it finds a token of the stoplist.
+// If the stoplist is empty or no advance was necessary, the next
+// token is consumed.
+func (p *parser) advance(stoplist ...int32) {
+	if len(stoplist) == 0 {
+		p.next()
+		return
+	}
+
+	for n := 0; p.tok != EOF; n++ {
+		for _, stop := range stoplist {
+			if p.tok == stop {
+				if n == 0 {
+					p.next() // consume at least one token
+				}
+				return
+			}
+		}
+		p.next()
+	}
+}
+
+func tokstring(tok int32) string {
+	switch tok {
+	case EOF:
+		return "EOF"
+	case ',':
+		return "comma"
+	case ';':
+		return "semicolon or newline"
+	}
+	if 0 <= tok && tok < 128 {
+		// get invisibles properly backslashed
+		s := strconv.QuoteRune(tok)
+		if n := len(s); n > 0 && s[0] == '\'' && s[n-1] == '\'' {
+			s = s[1 : n-1]
+		}
+		return s
+	}
+	if s := tokstrings[tok]; s != "" {
+		return s
+	}
+	// catchall
+	return yyTokname(int(tok))
+}
+
+// TODO(gri) figure out why yyTokname doesn't work for us as expected
+// (issue 13243)
+var tokstrings = map[int32]string{
+	LLITERAL:           "LLITERAL",
+	LASOP:              "op=",
+	LCOLAS:             ":=",
+	LBREAK:             "break",
+	LCASE:              "case",
+	LCHAN:              "chan",
+	LCONST:             "const",
+	LCONTINUE:          "continue",
+	LDDD:               "...",
+	LDEFAULT:           "default",
+	LDEFER:             "defer",
+	LELSE:              "else",
+	LFALL:              "fallthrough",
+	LFOR:               "for",
+	LFUNC:              "func",
+	LGO:                "go",
+	LGOTO:              "goto",
+	LIF:                "if",
+	LIMPORT:            "import",
+	LINTERFACE:         "interface",
+	LMAP:               "map",
+	LNAME:              "LNAME",
+	LPACKAGE:           "package",
+	LRANGE:             "range",
+	LRETURN:            "return",
+	LSELECT:            "select",
+	LSTRUCT:            "struct",
+	LSWITCH:            "switch",
+	LTYPE:              "type",
+	LVAR:               "var",
+	LANDAND:            "&&",
+	LANDNOT:            "&^",
+	LBODY:              "LBODY", // we should never see this one
+	LCOMM:              "<-",
+	LDEC:               "--",
+	LEQ:                "==",
+	LGE:                ">=",
+	LGT:                ">",
+	LIGNORE:            "LIGNORE", // we should never see this one
+	LINC:               "++",
+	LLE:                "<=",
+	LLSH:               "<<",
+	LLT:                "<",
+	LNE:                "!=",
+	LOROR:              "||",
+	LRSH:               ">>",
+	NotPackage:         "NotPackage",         // we should never see this one
+	NotParen:           "NotParen",           // we should never see this one
+	PreferToRightParen: "PreferToRightParen", // we should never see this one
+}
+
+func (p *parser) print_trace(msg ...interface{}) {
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	fmt.Printf("%5d: ", lineno)
+
+	// TODO(gri) imports screw up p.indent - fix this
+	// (issue 13243)
+	if p.indent < 0 {
+		p.indent = 0
+	}
+
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(msg...)
+}
+
+// usage: defer p.trace(msg)()
+func (p *parser) trace(msg string) func() {
+	p.print_trace(msg, "(")
+	p.indent++
+	return func() {
+		p.indent--
+		if x := recover(); x != nil {
+			panic(x) // skip print_trace
+		}
+		p.print_trace(")")
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Parsing package files
+
+// go.y:file
+func (p *parser) file() {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("file")()
+	}
+
+	p.package_()
+
+	//go.y:imports
+	for p.tok == LIMPORT {
+		p.import_()
+		p.want(';')
+	}
+
+	xtop = concat(xtop, p.xdcl_list())
+}
+
+// go.y:package
+func (p *parser) package_() {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("package_")()
+	}
+
+	if p.got(LPACKAGE) {
+		mkpackage(p.sym().Name)
+		p.want(';')
+	} else {
+		p.syntax_error("package statement must be first")
+		errorexit()
+	}
+}
+
+// go.y:import
+func (p *parser) import_() {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("import_")()
+	}
+
+	p.want(LIMPORT)
+	if p.got('(') {
+		for p.tok != EOF && p.tok != ')' {
+			p.import_stmt()
+			p.osemi()
+		}
+		p.want(')')
+	} else {
+		p.import_stmt()
+	}
+}
+
+// go.y:import_stmt
+func (p *parser) import_stmt() {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("import_stmt")()
+	}
+
+	line := int32(p.import_here())
+	if p.tok == LPACKAGE {
+		p.import_package()
+		p.import_there()
+
+		ipkg := importpkg
+		my := importmyname
+		importpkg = nil
+		importmyname = nil
+
+		if my == nil {
+			my = Lookup(ipkg.Name)
+		}
+
+		pack := Nod(OPACK, nil, nil)
+		pack.Sym = my
+		pack.Name.Pkg = ipkg
+		pack.Lineno = line
+
+		if strings.HasPrefix(my.Name, ".") {
+			importdot(ipkg, pack)
+			return
+		}
+		if my.Name == "init" {
+			lineno = line
+			Yyerror("cannot import package as init - init must be a func")
+			return
+		}
+		if my.Name == "_" {
+			return
+		}
+		if my.Def != nil {
+			lineno = line
+			redeclare(my, "as imported package name")
+		}
+		my.Def = pack
+		my.Lastlineno = line
+		my.Block = 1 // at top level
+
+		return
+	}
+
+	p.import_there()
+	// When an invalid import path is passed to importfile,
+	// it calls Yyerror and then sets up a fake import with
+	// no package statement. This allows us to test more
+	// than one invalid import statement in a single file.
+	if nerrors == 0 {
+		Fatalf("phase error in import")
+	}
+}
+
+// go.y:import_here
+func (p *parser) import_here() int {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("import_here")()
+	}
+
+	importmyname = nil
+	switch p.tok {
+	case LNAME, '@', '?':
+		// import with given name
+		importmyname = p.sym()
+
+	case '.':
+		// import into my name space
+		importmyname = Lookup(".")
+		p.next()
+	}
+
+	var path Val
+	if p.tok == LLITERAL {
+		path = p.val
+		p.next()
+	} else {
+		p.syntax_error("missing import path; require quoted string")
+		p.advance(';', ')')
+	}
+
+	line := parserline() // TODO(gri) check correct placement of this (issue 13243)
+	importfile(&path, line)
+	return line
+}
+
+// go.y:import_package
+func (p *parser) import_package() {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("import_package")()
+	}
+
+	p.want(LPACKAGE)
+	var name string
+	if p.tok == LNAME {
+		name = p.sym_.Name
+		p.next()
+	} else {
+		p.import_error()
+	}
+
+	// go.y:import_safety
+	if p.tok == LNAME {
+		if p.sym_.Name == "safe" {
+			curio.importsafe = true
+		}
+		p.next()
+	}
+	p.want(';')
+
+	if importpkg.Name == "" {
+		importpkg.Name = name
+		numImport[name]++
+	} else if importpkg.Name != name {
+		Yyerror("conflicting names %s and %s for package %q", importpkg.Name, name, importpkg.Path)
+	}
+	if incannedimport == 0 {
+		importpkg.Direct = true
+	}
+	importpkg.Safe = curio.importsafe
+
+	if safemode != 0 && !curio.importsafe {
+		Yyerror("cannot import unsafe package %q", importpkg.Path)
+	}
+}
+
+// go.y:import_there
+func (p *parser) import_there() {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("import_there")()
+	}
+
+	defercheckwidth()
+
+	p.hidden_import_list()
+	p.want('$')
+	// don't read past 2nd '$'
+	if p.tok != '$' {
+		p.import_error()
+	}
+
+	resumecheckwidth()
+	unimportfile()
+}
+
+// go.y:common_dcl
+func (p *parser) common_dcl() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("common_dcl")()
+	}
+
+	var dcl func() *NodeList
+	switch p.tok {
+	case LVAR:
+		dcl = p.vardcl
+
+	case LCONST:
+		iota_ = 0
+		dcl = p.constdcl
+
+	case LTYPE:
+		dcl = p.typedcl
+
+	default:
+		panic("unreachable")
+	}
+
+	p.next()
+	var l *NodeList
+	if p.got('(') {
+		for p.tok != EOF && p.tok != ')' {
+			l = concat(l, dcl())
+			p.osemi()
+		}
+		p.want(')')
+	} else {
+		l = dcl()
+	}
+
+	iota_ = -100000
+	lastconst = nil
+
+	return l
+}
+
+// go.y:vardcl
+func (p *parser) vardcl() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("vardcl")()
+	}
+
+	names := p.dcl_name_list()
+	var typ *Node
+	var exprs *NodeList
+	if p.got('=') {
+		exprs = p.expr_list()
+	} else {
+		typ = p.ntype()
+		if p.got('=') {
+			exprs = p.expr_list()
+		}
+	}
+
+	return variter(names, typ, exprs)
+}
+
+// go.y:constdcl
+func (p *parser) constdcl() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("constdcl")()
+	}
+
+	names := p.dcl_name_list()
+	var typ *Node
+	var exprs *NodeList
+	if p.tok != EOF && p.tok != ';' && p.tok != ')' {
+		if p.tok != '=' {
+			typ = p.ntype()
+		}
+		if p.got('=') {
+			exprs = p.expr_list()
+		}
+	}
+
+	return constiter(names, typ, exprs)
+}
+
+// go.y:typedcl
+func (p *parser) typedcl() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("typedcl")()
+	}
+
+	name := typedcl0(p.sym())
+
+	// handle case where type is missing
+	var typ *Node
+	if p.tok != ';' {
+		typ = p.ntype()
+	} else {
+		p.syntax_error("in type declaration")
+		p.advance(';', ')')
+	}
+
+	return list1(typedcl1(name, typ, true))
+}
+
+// go.y:simple_stmt
+// may return missing_stmt if labelOk is set
+func (p *parser) simple_stmt(labelOk, rangeOk bool) *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("simple_stmt")()
+	}
+
+	if rangeOk && p.got(LRANGE) {
+		// LRANGE expr
+		r := Nod(ORANGE, nil, p.expr())
+		r.Etype = 0 // := flag
+		return r
+	}
+
+	lhs := p.expr_list()
+
+	if count(lhs) == 1 && p.tok != '=' && p.tok != LCOLAS && p.tok != LRANGE {
+		// expr
+		lhs := lhs.N
+		switch p.tok {
+		case LASOP:
+			// expr LASOP expr
+			op := p.op
+			p.next()
+			rhs := p.expr()
+
+			stmt := Nod(OASOP, lhs, rhs)
+			stmt.Etype = EType(op) // rathole to pass opcode
+			return stmt
+
+		case LINC:
+			// expr LINC
+			p.next()
+
+			stmt := Nod(OASOP, lhs, Nodintconst(1))
+			stmt.Implicit = true
+			stmt.Etype = EType(OADD)
+			return stmt
+
+		case LDEC:
+			// expr LDEC
+			p.next()
+
+			stmt := Nod(OASOP, lhs, Nodintconst(1))
+			stmt.Implicit = true
+			stmt.Etype = EType(OSUB)
+			return stmt
+
+		case ':':
+			// labelname ':' stmt
+			if labelOk {
+				// If we have a labelname, it was parsed by operand
+				// (calling p.name()) and given an ONAME, ONONAME, or OTYPE node.
+				if lhs.Op == ONAME || lhs.Op == ONONAME || lhs.Op == OTYPE {
+					lhs = newname(lhs.Sym)
+				} else {
+					p.syntax_error("expecting semicolon or newline or }")
+					// we already progressed, no need to advance
+				}
+				lhs := Nod(OLABEL, lhs, nil)
+				lhs.Sym = dclstack // context, for goto restrictions
+				p.next()           // consume ':' after making label node for correct lineno
+				return p.labeled_stmt(lhs)
+			}
+			fallthrough
+
+		default:
+			// expr
+			// These nodes do not carry line numbers.
+			// Since a bare name used as an expression is an error,
+			// introduce a wrapper node to give the correct line.
+			switch lhs.Op {
+			case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+				lhs = Nod(OPAREN, lhs, nil)
+				lhs.Implicit = true
+			}
+			return lhs
+		}
+	}
+
+	// expr_list
+	switch p.tok {
+	case '=':
+		p.next()
+		if rangeOk && p.got(LRANGE) {
+			// expr_list '=' LRANGE expr
+			r := Nod(ORANGE, nil, p.expr())
+			r.List = lhs
+			r.Etype = 0 // := flag
+			return r
+		}
+
+		// expr_list '=' expr_list
+		rhs := p.expr_list()
+
+		if lhs.Next == nil && rhs.Next == nil {
+			// simple
+			return Nod(OAS, lhs.N, rhs.N)
+		}
+		// multiple
+		stmt := Nod(OAS2, nil, nil)
+		stmt.List = lhs
+		stmt.Rlist = rhs
+		return stmt
+
+	case LCOLAS:
+		line := lineno
+		p.next()
+
+		if rangeOk && p.got(LRANGE) {
+			// expr_list LCOLAS LRANGE expr
+			r := Nod(ORANGE, nil, p.expr())
+			r.List = lhs
+			r.Colas = true
+			colasdefn(lhs, r)
+			return r
+		}
+
+		// expr_list LCOLAS expr_list
+		rhs := p.expr_list()
+
+		if rhs.N.Op == OTYPESW {
+			ts := Nod(OTYPESW, nil, rhs.N.Right)
+			if rhs.Next != nil {
+				Yyerror("expr.(type) must be alone in list")
+			}
+			if lhs.Next != nil {
+				Yyerror("argument count mismatch: %d = %d", count(lhs), 1)
+			} else if (lhs.N.Op != ONAME && lhs.N.Op != OTYPE && lhs.N.Op != ONONAME && (lhs.N.Op != OLITERAL || lhs.N.Name == nil)) || isblank(lhs.N) {
+				Yyerror("invalid variable name %s in type switch", lhs.N)
+			} else {
+				ts.Left = dclname(lhs.N.Sym)
+			} // it's a colas, so must not re-use an oldname
+			return ts
+		}
+		return colas(lhs, rhs, int32(line))
+
+	default:
+		p.syntax_error("expecting := or = or comma")
+		p.advance(';', '}')
+		return nil
+	}
+}
+
+// may return missing_stmt
+func (p *parser) labeled_stmt(label *Node) *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("labeled_stmt")()
+	}
+
+	var ls *Node // labeled statement
+	if p.tok != '}' && p.tok != EOF {
+		ls = p.stmt()
+		if ls == missing_stmt {
+			// report error at line of ':' token
+			saved := lexlineno
+			lexlineno = prevlineno
+			p.syntax_error("missing statement after label")
+			// we are already at the end of the labeled statement - no need to advance
+			lexlineno = saved
+			return missing_stmt
+		}
+	}
+
+	label.Name.Defn = ls
+	l := list1(label)
+	if ls != nil {
+		l = list(l, ls)
+	}
+	return liststmt(l)
+}
+
+// go.y:case
+func (p *parser) case_(tswitch *Node) *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("case_")()
+	}
+
+	switch p.tok {
+	case LCASE:
+		p.next()
+		cases := p.expr_list() // expr_or_type_list
+		switch p.tok {
+		case ':':
+			// LCASE expr_or_type_list ':'
+
+			// will be converted to OCASE
+			// right will point to next case
+			// done in casebody()
+			markdcl()
+			stmt := Nod(OXCASE, nil, nil)
+			stmt.List = cases
+			if tswitch != nil {
+				if n := tswitch.Left; n != nil {
+					// type switch - declare variable
+					nn := newname(n.Sym)
+					declare(nn, dclcontext)
+					stmt.Rlist = list1(nn)
+
+					// keep track of the instances for reporting unused
+					nn.Name.Defn = tswitch
+				}
+			}
+
+			p.next() // consume ':' after declaring type switch var for correct lineno
+			return stmt
+
+		case '=':
+			// LCASE expr_or_type_list '=' expr ':'
+			p.next()
+			rhs := p.expr()
+
+			// will be converted to OCASE
+			// right will point to next case
+			// done in casebody()
+			markdcl()
+			stmt := Nod(OXCASE, nil, nil)
+			var n *Node
+			if cases.Next == nil {
+				n = Nod(OAS, cases.N, rhs)
+			} else {
+				n = Nod(OAS2, nil, nil)
+				n.List = cases
+				n.Rlist = list1(rhs)
+			}
+			stmt.List = list1(n)
+
+			p.want(':') // consume ':' after declaring select cases for correct lineno
+			return stmt
+
+		case LCOLAS:
+			// LCASE expr_or_type_list LCOLAS expr ':'
+			p.next()
+			rhs := p.expr()
+
+			// will be converted to OCASE
+			// right will point to next case
+			// done in casebody()
+			markdcl()
+			stmt := Nod(OXCASE, nil, nil)
+			stmt.List = list1(colas(cases, list1(rhs), int32(p.op)))
+
+			p.want(':') // consume ':' after declaring select cases for correct lineno
+			return stmt
+
+		default:
+			p.syntax_error("expecting := or = or : or comma")
+			p.advance(LCASE, LDEFAULT, '}')
+			return nil
+		}
+
+	case LDEFAULT:
+		// LDEFAULT ':'
+		p.next()
+
+		markdcl()
+		stmt := Nod(OXCASE, nil, nil)
+		if tswitch != nil {
+			if n := tswitch.Left; n != nil {
+				// type switch - declare variable
+				nn := newname(n.Sym)
+				declare(nn, dclcontext)
+				stmt.Rlist = list1(nn)
+
+				// keep track of the instances for reporting unused
+				nn.Name.Defn = tswitch
+			}
+		}
+
+		p.want(':') // consume ':' after declaring type switch var for correct lineno
+		return stmt
+
+	default:
+		p.syntax_error("expecting case or default or }")
+		p.advance(LCASE, LDEFAULT, '}')
+		return nil
+	}
+}
+
+// go.y:compound_stmt
+func (p *parser) compound_stmt(else_clause bool) *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("compound_stmt")()
+	}
+
+	if p.tok == '{' {
+		markdcl()
+		p.next() // consume ';' after markdcl() for correct lineno
+	} else if else_clause {
+		p.syntax_error("else must be followed by if or statement block")
+		p.advance('}')
+		return nil
+	} else {
+		panic("unreachable")
+	}
+
+	l := p.stmt_list()
+
+	var stmt *Node
+	if l == nil {
+		stmt = Nod(OEMPTY, nil, nil)
+	} else {
+		stmt = liststmt(l)
+	}
+	popdcl()
+
+	p.want('}') // TODO(gri) is this correct location w/ respect to popdcl()? (issue 13243)
+
+	return stmt
+}
+
+// go.y:caseblock
+func (p *parser) caseblock(tswitch *Node) *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("caseblock")()
+	}
+
+	stmt := p.case_(tswitch)
+
+	// If the last token read by the lexer was consumed
+	// as part of the case, clear it (parser has cleared yychar).
+	// If the last token read by the lexer was the lookahead
+	// leave it alone (parser has it cached in yychar).
+	// This is so that the stmt_list action doesn't look at
+	// the case tokens if the stmt_list is empty.
+	//yylast = yychar;
+	stmt.Xoffset = int64(block)
+
+	stmt.Nbody = p.stmt_list()
+
+	// TODO(gri) what do we need to do here? (issue 13243)
+	// // This is the only place in the language where a statement
+	// // list is not allowed to drop the final semicolon, because
+	// // it's the only place where a statement list is not followed
+	// // by a closing brace.  Handle the error for pedantry.
+
+	// // Find the final token of the statement list.
+	// // yylast is lookahead; yyprev is last of stmt_list
+	// last := yyprev;
+
+	// if last > 0 && last != ';' && yychar != '}' {
+	// 	Yyerror("missing statement after label");
+	// }
+
+	popdcl()
+
+	return stmt
+}
+
+// go.y:caseblock_list
+func (p *parser) caseblock_list(tswitch *Node) (l *NodeList) {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("caseblock_list")()
+	}
+
+	if !p.got('{') {
+		p.syntax_error("missing { after switch clause")
+		p.advance('}')
+		return nil
+	}
+
+	for p.tok != '}' {
+		l = list(l, p.caseblock(tswitch))
+	}
+	p.want('}')
+	return
+}
+
+// go.y:loop_body
+func (p *parser) loop_body(context string) *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("loop_body")()
+	}
+
+	if p.tok == '{' {
+		markdcl()
+		p.next() // consume ';' after markdcl() for correct lineno
+	} else {
+		p.syntax_error("missing { after " + context)
+		p.advance('}')
+		return nil
+	}
+
+	body := p.stmt_list()
+	popdcl()
+	p.want('}')
+
+	return body
+}
+
+// go.y:for_header
+func (p *parser) for_header() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("for_header")()
+	}
+
+	init, cond, post := p.header(true)
+
+	if init != nil || post != nil {
+		// init ; test ; incr
+		if post != nil && post.Colas {
+			Yyerror("cannot declare in the for-increment")
+		}
+		h := Nod(OFOR, nil, nil)
+		if init != nil {
+			h.Ninit = list1(init)
+		}
+		h.Left = cond
+		h.Right = post
+		return h
+	}
+
+	if cond != nil && cond.Op == ORANGE {
+		// range_stmt - handled by pexpr
+		return cond
+	}
+
+	// normal test
+	h := Nod(OFOR, nil, nil)
+	h.Left = cond
+	return h
+}
+
+// go.y:for_body
+func (p *parser) for_body() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("for_body")()
+	}
+
+	stmt := p.for_header()
+	body := p.loop_body("for clause")
+
+	stmt.Nbody = concat(stmt.Nbody, body)
+	return stmt
+}
+
+// go.y:for_stmt
+func (p *parser) for_stmt() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("for_stmt")()
+	}
+
+	p.want(LFOR)
+	markdcl()
+	body := p.for_body()
+	popdcl()
+
+	return body
+}
+
+func (p *parser) header(for_stmt bool) (init, cond, post *Node) {
+	if p.tok == '{' {
+		return
+	}
+
+	nest := p.nest
+	p.nest = -1
+
+	if p.tok != ';' {
+		// accept potential vardcl but complain
+		// (for test/syntax/forvar.go)
+		if for_stmt && p.tok == LVAR {
+			Yyerror("var declaration not allowed in for initializer")
+			p.next()
+		}
+		init = p.simple_stmt(false, for_stmt)
+		// If we have a range clause, we are done.
+		if for_stmt && init.Op == ORANGE {
+			cond = init
+			init = nil
+
+			p.nest = nest
+			return
+		}
+	}
+	if p.got(';') {
+		if for_stmt {
+			if p.tok != ';' {
+				cond = p.simple_stmt(false, false)
+			}
+			p.want(';')
+			if p.tok != '{' {
+				post = p.simple_stmt(false, false)
+			}
+		} else if p.tok != '{' {
+			cond = p.simple_stmt(false, false)
+		}
+	} else {
+		cond = init
+		init = nil
+	}
+
+	p.nest = nest
+
+	return
+}
+
+// go.y:if_header
+func (p *parser) if_header() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("if_header")()
+	}
+
+	init, cond, _ := p.header(false)
+	h := Nod(OIF, nil, nil)
+	h.Ninit = list1(init)
+	h.Left = cond
+	return h
+}
+
+// go.y:if_stmt
+func (p *parser) if_stmt() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("if_stmt")()
+	}
+
+	p.want(LIF)
+
+	markdcl()
+
+	stmt := p.if_header()
+	if stmt.Left == nil {
+		Yyerror("missing condition in if statement")
+	}
+
+	stmt.Nbody = p.loop_body("if clause")
+
+	l := p.elseif_list_else()
+
+	n := stmt
+	popdcl()
+	for nn := l; nn != nil; nn = nn.Next {
+		if nn.N.Op == OIF {
+			popdcl()
+		}
+		n.Rlist = list1(nn.N)
+		n = nn.N
+	}
+
+	return stmt
+}
+
+// go.y:elsif
+func (p *parser) elseif() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("elseif")()
+	}
+
+	// LELSE LIF already consumed
+	markdcl()
+
+	stmt := p.if_header()
+	if stmt.Left == nil {
+		Yyerror("missing condition in if statement")
+	}
+
+	stmt.Nbody = p.loop_body("if clause")
+
+	return list1(stmt)
+}
+
+// go.y:elsif_list
+// go.y:else
+func (p *parser) elseif_list_else() (l *NodeList) {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("elseif_list_else")()
+	}
+
+	for p.got(LELSE) {
+		if p.got(LIF) {
+			l = concat(l, p.elseif())
+		} else {
+			l = concat(l, p.else_())
+			break
+		}
+	}
+
+	return l
+}
+
+// go.y:else
+func (p *parser) else_() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("else")()
+	}
+
+	l := &NodeList{N: p.compound_stmt(true)}
+	l.End = l
+	return l
+
+}
+
+// go.y:switch_stmt
+func (p *parser) switch_stmt() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("switch_stmt")()
+	}
+
+	p.want(LSWITCH)
+	markdcl()
+
+	hdr := p.if_header()
+	hdr.Op = OSWITCH
+
+	tswitch := hdr.Left
+	if tswitch != nil && tswitch.Op != OTYPESW {
+		tswitch = nil
+	}
+
+	hdr.List = p.caseblock_list(tswitch)
+	popdcl()
+
+	return hdr
+}
+
+// go.y:select_stmt
+func (p *parser) select_stmt() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("select_stmt")()
+	}
+
+	p.want(LSELECT)
+	hdr := Nod(OSELECT, nil, nil)
+	hdr.List = p.caseblock_list(nil)
+	return hdr
+}
+
+// TODO(gri) should have lexer return this info - no need for separate lookup
+// (issue 13244)
+var prectab = map[int32]struct {
+	prec int // > 0 (0 indicates not found)
+	op   Op
+}{
+	// not an expression anymore, but left in so we can give a good error
+	// message when used in expression context
+	LCOMM: {1, OSEND},
+
+	LOROR: {2, OOROR},
+
+	LANDAND: {3, OANDAND},
+
+	LEQ: {4, OEQ},
+	LNE: {4, ONE},
+	LLE: {4, OLE},
+	LGE: {4, OGE},
+	LLT: {4, OLT},
+	LGT: {4, OGT},
+
+	'+': {5, OADD},
+	'-': {5, OSUB},
+	'|': {5, OOR},
+	'^': {5, OXOR},
+
+	'*':     {6, OMUL},
+	'/':     {6, ODIV},
+	'%':     {6, OMOD},
+	'&':     {6, OAND},
+	LLSH:    {6, OLSH},
+	LRSH:    {6, ORSH},
+	LANDNOT: {6, OANDNOT},
+}
+
+func (p *parser) bexpr(prec int) *Node {
+	// don't trace bexpr - only leads to overly nested trace output
+
+	x := p.uexpr()
+	t := prectab[p.tok]
+	for tprec := t.prec; tprec >= prec; tprec-- {
+		for tprec == prec {
+			p.next()
+			y := p.bexpr(t.prec + 1)
+			x = Nod(t.op, x, y)
+			t = prectab[p.tok]
+			tprec = t.prec
+		}
+	}
+	return x
+}
+
+// go.y:expr
+func (p *parser) expr() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("expr")()
+	}
+
+	return p.bexpr(1)
+}
+
+// go.y:uexpr
+func (p *parser) uexpr() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("uexpr")()
+	}
+
+	var op Op
+	switch p.tok {
+	case '*':
+		op = OIND
+
+	case '&':
+		p.next()
+		x := p.uexpr()
+		if x.Op == OCOMPLIT {
+			// Special case for &T{...}: turn into (*T){...}.
+			x.Right = Nod(OIND, x.Right, nil)
+			x.Right.Implicit = true
+		} else {
+			x = Nod(OADDR, x, nil)
+		}
+		return x
+
+	case '+':
+		op = OPLUS
+
+	case '-':
+		op = OMINUS
+
+	case '!':
+		op = ONOT
+
+	case '~':
+		// TODO(gri) do this in the lexer instead (issue 13244)
+		p.next()
+		x := p.uexpr()
+		Yyerror("the bitwise complement operator is ^")
+		return Nod(OCOM, x, nil)
+
+	case '^':
+		op = OCOM
+
+	case LCOMM:
+		// receive operation (<-s2) or receive-only channel type (<-chan s3)
+		p.next()
+		if p.got(LCHAN) {
+			// <-chan T
+			t := Nod(OTCHAN, p.chan_elem(), nil)
+			t.Etype = Crecv
+			return t
+		}
+		return Nod(ORECV, p.uexpr(), nil)
+
+	default:
+		return p.pexpr(false)
+	}
+
+	// simple uexpr
+	p.next()
+	return Nod(op, p.uexpr(), nil)
+}
+
+// call-like statements that can be preceded by 'defer' and 'go'
+//
+// go.y:pseudocall
+func (p *parser) pseudocall() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("pseudocall")()
+	}
+
+	x := p.pexpr(true)
+	if x.Op != OCALL {
+		Yyerror("argument to go/defer must be function call")
+	}
+	return x
+}
+
+// go.y:pexpr (partial)
+func (p *parser) operand(keep_parens bool) *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("operand")()
+	}
+
+	switch p.tok {
+	case LLITERAL:
+		x := nodlit(p.val)
+		p.next()
+		return x
+
+	case LNAME, '@', '?':
+		return p.name()
+
+	case '(':
+		p.next()
+		p.nest++
+		x := p.expr() // expr_or_type
+		p.nest--
+		p.want(')')
+
+		// Need to know on lhs of := whether there are ( ).
+		// Don't bother with the OPAREN in other cases:
+		// it's just a waste of memory and time.
+		//
+		// But if the next token is a { , introduce OPAREN since
+		// we may have a composite literal and we need to know
+		// if there were ()'s'.
+		//
+		// TODO(gri) could simplify this if we parse complits
+		// in operand (see respective comment in pexpr).
+		//
+		// (We can probably not do this because of qualified types
+		// as in pkg.Type{}) (issue 13243).
+		if keep_parens || p.tok == '{' {
+			return Nod(OPAREN, x, nil)
+		}
+		switch x.Op {
+		case ONAME, ONONAME, OPACK, OTYPE, OLITERAL, OTYPESW:
+			return Nod(OPAREN, x, nil)
+		}
+		return x
+
+	case LFUNC:
+		t := p.ntype() // fntype
+		if p.tok == '{' {
+			// fnlitdcl
+			closurehdr(t)
+			// fnliteral
+			p.next() // consume '{'
+			p.nest++
+			body := p.stmt_list()
+			p.nest--
+			p.want('}')
+			return closurebody(body)
+		}
+		return t
+
+	case '[', LCHAN, LMAP, LSTRUCT, LINTERFACE:
+		return p.ntype() // othertype
+
+	case '{':
+		// common case: p.header is missing simple_stmt before { in if, for, switch
+		p.syntax_error("missing operand")
+		// '{' will be consumed in pexpr - no need to consume it here
+		return nil
+
+	default:
+		p.syntax_error("expecting expression")
+		p.advance()
+		return nil
+	}
+}
+
+// go.y:pexpr, pexpr_no_paren
+func (p *parser) pexpr(keep_parens bool) *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("pexpr")()
+	}
+
+	x := p.operand(keep_parens)
+
+loop:
+	for {
+		switch p.tok {
+		case '.':
+			p.next()
+			switch p.tok {
+			case LNAME, '@', '?':
+				// pexpr '.' sym
+				x = p.new_dotname(x)
+
+			case '(':
+				p.next()
+				switch p.tok {
+				default:
+					// pexpr '.' '(' expr_or_type ')'
+					t := p.expr() // expr_or_type
+					p.want(')')
+					x = Nod(ODOTTYPE, x, t)
+
+				case LTYPE:
+					// pexpr '.' '(' LTYPE ')'
+					p.next()
+					p.want(')')
+					x = Nod(OTYPESW, nil, x)
+				}
+
+			default:
+				p.syntax_error("expecting name or (")
+				p.advance(';', '}')
+			}
+
+		case '[':
+			p.next()
+			p.nest++
+			var index [3]*Node
+			if p.tok != ':' {
+				index[0] = p.expr()
+			}
+			ncol := 0
+			for ncol < len(index)-1 && p.got(':') {
+				ncol++
+				if p.tok != EOF && p.tok != ':' && p.tok != ']' {
+					index[ncol] = p.expr()
+				}
+			}
+			p.nest--
+			p.want(']')
+
+			switch ncol {
+			case 0:
+				i := index[0]
+				if i == nil {
+					Yyerror("missing index in index expression")
+				}
+				x = Nod(OINDEX, x, i)
+			case 1:
+				i := index[0]
+				j := index[1]
+				x = Nod(OSLICE, x, Nod(OKEY, i, j))
+			case 2:
+				i := index[0]
+				j := index[1]
+				k := index[2]
+				if j == nil {
+					Yyerror("middle index required in 3-index slice")
+				}
+				if k == nil {
+					Yyerror("final index required in 3-index slice")
+				}
+				x = Nod(OSLICE3, x, Nod(OKEY, i, Nod(OKEY, j, k)))
+
+			default:
+				panic("unreachable")
+			}
+
+		case '(':
+			// convtype '(' expr ocomma ')'
+			p.next()
+			p.nest++
+			args, ddd := p.arg_list()
+			p.nest--
+			p.want(')')
+
+			// call or conversion
+			x = Nod(OCALL, x, nil)
+			x.List = args
+			x.Isddd = ddd
+
+		case '{':
+			// TODO(gri) should this (complit acceptance) be in operand?
+			// accept ()'s around the complit type but complain if we have a complit
+			// (issue 13243)
+			t := x
+			for t.Op == OPAREN {
+				t = t.Left
+			}
+			// determine if '{' belongs to a complit or a compound_stmt
+			complit_ok := false
+			switch t.Op {
+			case ONAME, ONONAME, OTYPE, OPACK, OXDOT, ODOT:
+				if p.nest >= 0 {
+					// x is considered a comptype
+					complit_ok = true
+				}
+			case OTARRAY, OTSTRUCT, OTMAP:
+				// x is a comptype
+				complit_ok = true
+			}
+			if !complit_ok {
+				break loop
+			}
+			if t != x {
+				p.syntax_error("cannot parenthesize type in composite literal")
+				// already progressed, no need to advance
+			}
+			n := p.complitexpr()
+			n.Right = x
+			x = n
+
+		default:
+			break loop
+		}
+	}
+
+	return x
+}
+
+// go.y:keyval
+func (p *parser) keyval() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("keyval")()
+	}
+
+	x := p.bare_complitexpr()
+	if p.got(':') {
+		x = Nod(OKEY, x, p.bare_complitexpr())
+	}
+	return x
+}
+
+// go.y:bare_complitexpr
+func (p *parser) bare_complitexpr() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("bare_complitexpr")()
+	}
+
+	if p.tok == '{' {
+		// '{' start_complit braced_keyval_list '}'
+		return p.complitexpr()
+	}
+
+	x := p.expr()
+
+	// These nodes do not carry line numbers.
+	// Since a composite literal commonly spans several lines,
+	// the line number on errors may be misleading.
+	// Introduce a wrapper node to give the correct line.
+
+	// TODO(gri) This is causing trouble when used for keys. Need to fix complit parsing.
+	// switch x.Op {
+	// case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+	// 	x = Nod(OPAREN, x, nil)
+	// 	x.Implicit = true
+	// }
+	// (issue 13243)
+	return x
+}
+
+// go.y:complitexpr
+func (p *parser) complitexpr() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("complitexpr")()
+	}
+
+	// make node early so we get the right line number
+	n := Nod(OCOMPLIT, nil, nil)
+
+	p.want('{')
+	p.nest++
+
+	var l *NodeList
+	for p.tok != EOF && p.tok != '}' {
+		l = list(l, p.keyval())
+		p.ocomma("composite literal")
+	}
+
+	p.nest--
+	p.want('}')
+
+	n.List = l
+	return n
+}
+
+// names and types
+//	newname is used before declared
+//	oldname is used after declared
+//
+// go.y:new_name:
+func (p *parser) new_name(sym *Sym) *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("new_name")()
+	}
+
+	if sym != nil {
+		return newname(sym)
+	}
+	return nil
+}
+
+// go.y:onew_name:
+func (p *parser) onew_name() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("onew_name")()
+	}
+
+	switch p.tok {
+	case LNAME, '@', '?':
+		return p.new_name(p.sym())
+	}
+	return nil
+}
+
+// go.y:sym
+func (p *parser) sym() *Sym {
+	switch p.tok {
+	case LNAME:
+		s := p.sym_
+		p.next()
+		// during imports, unqualified non-exported identifiers are from builtinpkg
+		if importpkg != nil && !exportname(s.Name) {
+			s = Pkglookup(s.Name, builtinpkg)
+		}
+		return s
+
+	case '@':
+		return p.hidden_importsym()
+
+	case '?':
+		p.next()
+		return nil
+
+	default:
+		p.syntax_error("")
+		p.advance()
+		return new(Sym)
+	}
+}
+
+func mkname(sym *Sym) *Node {
+	n := oldname(sym)
+	if n.Name != nil && n.Name.Pack != nil {
+		n.Name.Pack.Used = true
+	}
+	return n
+}
+
+// go.y:name
+func (p *parser) name() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("name")()
+	}
+
+	return mkname(p.sym())
+}
+
+// go.y:dotdotdot
+func (p *parser) dotdotdot() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("dotdotdot")()
+	}
+
+	p.want(LDDD)
+	switch p.tok {
+	case LCOMM, LFUNC, '[', LCHAN, LMAP, LSTRUCT, LINTERFACE, '*', LNAME, '@', '?', '(':
+		return Nod(ODDD, p.ntype(), nil)
+	}
+
+	Yyerror("final argument in variadic function missing type")
+	return Nod(ODDD, typenod(typ(TINTER)), nil)
+}
+
+// go.y:ntype
+func (p *parser) ntype() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("ntype")()
+	}
+
+	switch p.tok {
+	case LCOMM:
+		// recvchantype
+		p.next()
+		p.want(LCHAN)
+		t := Nod(OTCHAN, p.chan_elem(), nil)
+		t.Etype = Crecv
+		return t
+
+	case LFUNC:
+		// fntype
+		p.next()
+		params := p.param_list()
+		result := p.fnres()
+		params = checkarglist(params, 1)
+		t := Nod(OTFUNC, nil, nil)
+		t.List = params
+		t.Rlist = result
+		return t
+
+	case '[':
+		// '[' oexpr ']' ntype
+		// '[' LDDD ']' ntype
+		p.next()
+		p.nest++
+		var len *Node
+		if p.tok != ']' {
+			if p.got(LDDD) {
+				len = Nod(ODDD, nil, nil)
+			} else {
+				len = p.expr()
+			}
+		}
+		p.nest--
+		p.want(']')
+		return Nod(OTARRAY, len, p.ntype())
+
+	case LCHAN:
+		// LCHAN non_recvchantype
+		// LCHAN LCOMM ntype
+		p.next()
+		var dir EType = Cboth
+		if p.got(LCOMM) {
+			dir = Csend
+		}
+		t := Nod(OTCHAN, p.chan_elem(), nil)
+		t.Etype = dir
+		return t
+
+	case LMAP:
+		// LMAP '[' ntype ']' ntype
+		p.next()
+		p.want('[')
+		key := p.ntype()
+		p.want(']')
+		val := p.ntype()
+		return Nod(OTMAP, key, val)
+
+	case LSTRUCT:
+		return p.structtype()
+
+	case LINTERFACE:
+		return p.interfacetype()
+
+	case '*':
+		// ptrtype
+		p.next()
+		return Nod(OIND, p.ntype(), nil)
+
+	case LNAME, '@', '?':
+		return p.dotname()
+
+	case '(':
+		p.next()
+		t := p.ntype()
+		p.want(')')
+		return t
+
+	case LDDD:
+		// permit ...T but complain
+		// TODO(gri) introduced for test/fixedbugs/bug228.go - maybe adjust bug or find better solution
+		// (issue 13243)
+		p.syntax_error("")
+		p.advance()
+		return p.ntype()
+
+	default:
+		p.syntax_error("")
+		p.advance()
+		return nil
+	}
+}
+
+func (p *parser) chan_elem() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("chan_elem")()
+	}
+
+	switch p.tok {
+	case LCOMM, LFUNC,
+		'[', LCHAN, LMAP, LSTRUCT, LINTERFACE,
+		'*',
+		LNAME, '@', '?',
+		'(',
+		LDDD:
+		return p.ntype()
+
+	default:
+		p.syntax_error("missing channel element type")
+		// assume element type is simply absent - don't advance
+		return nil
+	}
+}
+
+// go.y:fnret_type
+// TODO(gri) only called from fnres - inline and remove this one
+// (issue 13243)
+func (p *parser) fnret_type() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("fnret_type")()
+	}
+
+	switch p.tok {
+	case LFUNC, // fntype
+		LCOMM,                                 // recvchantype
+		'[', LCHAN, LMAP, LSTRUCT, LINTERFACE, // othertype
+		'*': // ptrtype
+		return p.ntype()
+
+	default:
+		return p.dotname()
+	}
+}
+
+// go.y:dotname (partial)
+func (p *parser) new_dotname(pkg *Node) *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("new_dotname")()
+	}
+
+	sel := p.sym()
+	if pkg.Op == OPACK {
+		s := restrictlookup(sel.Name, pkg.Name.Pkg)
+		pkg.Used = true
+		return oldname(s)
+	}
+	return Nod(OXDOT, pkg, newname(sel))
+
+}
+
+// go.y:dotname
+func (p *parser) dotname() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("dotname")()
+	}
+
+	name := p.name()
+	if p.got('.') {
+		return p.new_dotname(name)
+	}
+	return name
+}
+
+// go.y:structtype
+func (p *parser) structtype() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("structtype")()
+	}
+
+	p.want(LSTRUCT)
+	p.want('{')
+	var l *NodeList
+	for p.tok != EOF && p.tok != '}' {
+		l = concat(l, p.structdcl())
+		p.osemi()
+	}
+	p.want('}')
+
+	t := Nod(OTSTRUCT, nil, nil)
+	t.List = l
+	return t
+}
+
+// go.y:interfacetype
+func (p *parser) interfacetype() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("interfacetype")()
+	}
+
+	p.want(LINTERFACE)
+	p.want('{')
+	var l *NodeList
+	for p.tok != EOF && p.tok != '}' {
+		l = list(l, p.interfacedcl())
+		p.osemi()
+	}
+	p.want('}')
+
+	t := Nod(OTINTER, nil, nil)
+	t.List = l
+	return t
+}
+
+// Function stuff.
+// All in one place to show how crappy it all is.
+//
+// go.y:xfndcl
+func (p *parser) xfndcl() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("xfndcl")()
+	}
+
+	p.want(LFUNC)
+	f := p.fndcl()
+	body := p.fnbody()
+
+	if f == nil {
+		return nil
+	}
+	if noescape && body != nil {
+		Yyerror("can only use //go:noescape with external func implementations")
+	}
+
+	f.Nbody = body
+	f.Func.Endlineno = lineno
+	f.Noescape = noescape
+	f.Func.Norace = norace
+	f.Func.Nosplit = nosplit
+	f.Func.Noinline = noinline
+	f.Func.Nowritebarrier = nowritebarrier
+	f.Func.Nowritebarrierrec = nowritebarrierrec
+	f.Func.Systemstack = systemstack
+	funcbody(f)
+
+	return f
+}
+
+// go.y:fndcl
+func (p *parser) fndcl() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("fndcl")()
+	}
+
+	switch p.tok {
+	case LNAME, '@', '?':
+		// sym '(' oarg_type_list_ocomma ')' fnres
+		name := p.sym()
+		params := p.param_list()
+		result := p.fnres()
+
+		params = checkarglist(params, 1)
+
+		if name.Name == "init" {
+			name = renameinit()
+			if params != nil || result != nil {
+				Yyerror("func init must have no arguments and no return values")
+			}
+		}
+
+		if localpkg.Name == "main" && name.Name == "main" {
+			if params != nil || result != nil {
+				Yyerror("func main must have no arguments and no return values")
+			}
+		}
+
+		t := Nod(OTFUNC, nil, nil)
+		t.List = params
+		t.Rlist = result
+
+		f := Nod(ODCLFUNC, nil, nil)
+		f.Func.Nname = newfuncname(name)
+		f.Func.Nname.Name.Defn = f
+		f.Func.Nname.Name.Param.Ntype = t // TODO: check if nname already has an ntype
+		declare(f.Func.Nname, PFUNC)
+
+		funchdr(f)
+		return f
+
+	case '(':
+		// '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')' fnres
+		rparam := p.param_list()
+		name := p.sym()
+		params := p.param_list()
+		result := p.fnres()
+
+		rparam = checkarglist(rparam, 0)
+		params = checkarglist(params, 1)
+
+		if rparam == nil {
+			Yyerror("method has no receiver")
+			return nil
+		}
+
+		if rparam.Next != nil {
+			Yyerror("method has multiple receivers")
+			return nil
+		}
+
+		rcvr := rparam.N
+		if rcvr.Op != ODCLFIELD {
+			Yyerror("bad receiver in method")
+			return nil
+		}
+
+		t := Nod(OTFUNC, rcvr, nil)
+		t.List = params
+		t.Rlist = result
+
+		f := Nod(ODCLFUNC, nil, nil)
+		f.Func.Shortname = newfuncname(name)
+		f.Func.Nname = methodname1(f.Func.Shortname, rcvr.Right)
+		f.Func.Nname.Name.Defn = f
+		f.Func.Nname.Name.Param.Ntype = t
+		f.Func.Nname.Nointerface = nointerface
+		declare(f.Func.Nname, PFUNC)
+
+		funchdr(f)
+		return f
+
+	default:
+		p.syntax_error("expecting name or (")
+		p.advance('{', ';')
+		return nil
+	}
+}
+
+// go.y:hidden_fndcl
+func (p *parser) hidden_fndcl() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_fndcl")()
+	}
+
+	switch p.tok {
+	default:
+		// hidden_pkg_importsym '(' ohidden_funarg_list ')' ohidden_funres
+		s1 := p.hidden_pkg_importsym()
+		p.want('(')
+		s3 := p.ohidden_funarg_list()
+		p.want(')')
+		s5 := p.ohidden_funres()
+
+		s := s1
+		t := functype(nil, s3, s5)
+
+		importsym(s, ONAME)
+		if s.Def != nil && s.Def.Op == ONAME {
+			if Eqtype(t, s.Def.Type) {
+				dclcontext = PDISCARD // since we skip funchdr below
+				return nil
+			}
+			Yyerror("inconsistent definition for func %v during import\n\t%v\n\t%v", s, s.Def.Type, t)
+		}
+
+		ss := newfuncname(s)
+		ss.Type = t
+		declare(ss, PFUNC)
+
+		funchdr(ss)
+		return ss
+
+	case '(':
+		// '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list ')' ohidden_funres
+		p.next()
+		s2 := p.hidden_funarg_list()
+		p.want(')')
+		s4 := p.sym()
+		p.want('(')
+		s6 := p.ohidden_funarg_list()
+		p.want(')')
+		s8 := p.ohidden_funres()
+
+		ss := methodname1(newname(s4), s2.N.Right)
+		ss.Type = functype(s2.N, s6, s8)
+
+		checkwidth(ss.Type)
+		addmethod(s4, ss.Type, false, nointerface)
+		nointerface = false
+		funchdr(ss)
+
+		// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
+		// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
+		// out by typecheck's lookdot as this $$.ttype.  So by providing
+		// this back link here we avoid special casing there.
+		ss.Type.Nname = ss
+		return ss
+	}
+}
+
+// go.y:fnbody
+func (p *parser) fnbody() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("fnbody")()
+	}
+
+	if p.got('{') {
+		body := p.stmt_list()
+		p.want('}')
+		if body == nil {
+			body = list1(Nod(OEMPTY, nil, nil))
+		}
+		return body
+	}
+
+	return nil
+}
+
+// go.y:fnres
+func (p *parser) fnres() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("fnres")()
+	}
+
+	switch p.tok {
+	default:
+		return nil
+
+	case LCOMM, LFUNC, '[', LCHAN, LMAP, LSTRUCT, LINTERFACE, '*', LNAME, '@', '?':
+		result := p.fnret_type()
+		return list1(Nod(ODCLFIELD, nil, result))
+
+	case '(':
+		result := p.param_list()
+		return checkarglist(result, 0)
+	}
+}
+
+// go.y:xdcl_list
+func (p *parser) xdcl_list() (l *NodeList) {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("xdcl_list")()
+	}
+
+loop:
+	for p.tok != EOF {
+		switch p.tok {
+		case LVAR, LCONST, LTYPE:
+			l = concat(l, p.common_dcl())
+
+		case LFUNC:
+			l = list(l, p.xfndcl())
+
+		default:
+			if p.tok == '{' && l != nil && l.End.N.Op == ODCLFUNC && l.End.N.Nbody == nil {
+				// opening { of function declaration on next line
+				p.syntax_error("unexpected semicolon or newline before {")
+			} else {
+				p.syntax_error("non-declaration statement outside function body")
+			}
+			p.advance(LVAR, LCONST, LTYPE, LFUNC)
+			goto loop
+		}
+
+		if nsyntaxerrors == 0 {
+			testdclstack()
+		}
+
+		noescape = false
+		noinline = false
+		nointerface = false
+		norace = false
+		nosplit = false
+		nowritebarrier = false
+		nowritebarrierrec = false
+		systemstack = false
+
+		// Consume ';' AFTER resetting the above flags since
+		// it may read the subsequent comment line which may
+		// set the flags for the next function declaration.
+		if p.tok != EOF && !p.got(';') {
+			p.syntax_error("after top level declaration")
+			p.advance(LVAR, LCONST, LTYPE, LFUNC)
+			goto loop
+		}
+	}
+	return
+}
+
+// go.y:structdcl
+func (p *parser) structdcl() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("structdcl")()
+	}
+
+	var sym *Sym
+	switch p.tok {
+	case LNAME:
+		sym = p.sym_
+		p.next()
+		if sym == nil {
+			panic("unreachable") // we must have a sym for LNAME
+		}
+		if p.tok == '.' || p.tok == LLITERAL || p.tok == ';' || p.tok == '}' {
+			// embed oliteral
+			field := p.embed(sym)
+			tag := p.oliteral()
+
+			field.SetVal(tag)
+			return list1(field)
+		}
+
+		// LNAME belongs to first *Sym of new_name_list
+		//
+		// during imports, unqualified non-exported identifiers are from builtinpkg
+		if importpkg != nil && !exportname(sym.Name) {
+			sym = Pkglookup(sym.Name, builtinpkg)
+			if sym == nil {
+				p.import_error()
+			}
+		}
+		fallthrough
+
+	case '@', '?':
+		// new_name_list ntype oliteral
+		fields := p.new_name_list(sym)
+		typ := p.ntype()
+		tag := p.oliteral()
+
+		if l := fields; l == nil || l.N.Sym.Name == "?" {
+			// ? symbol, during import (list1(nil) == nil)
+			n := typ
+			if n.Op == OIND {
+				n = n.Left
+			}
+			n = embedded(n.Sym, importpkg)
+			n.Right = typ
+			n.SetVal(tag)
+			return list1(n)
+		}
+
+		for l := fields; l != nil; l = l.Next {
+			l.N = Nod(ODCLFIELD, l.N, typ)
+			l.N.SetVal(tag)
+		}
+		return fields
+
+	case '(':
+		p.next()
+		if p.got('*') {
+			// '(' '*' embed ')' oliteral
+			field := p.embed(nil)
+			p.want(')')
+			tag := p.oliteral()
+
+			field.Right = Nod(OIND, field.Right, nil)
+			field.SetVal(tag)
+			Yyerror("cannot parenthesize embedded type")
+			return list1(field)
+
+		} else {
+			// '(' embed ')' oliteral
+			field := p.embed(nil)
+			p.want(')')
+			tag := p.oliteral()
+
+			field.SetVal(tag)
+			Yyerror("cannot parenthesize embedded type")
+			return list1(field)
+		}
+
+	case '*':
+		p.next()
+		if p.got('(') {
+			// '*' '(' embed ')' oliteral
+			field := p.embed(nil)
+			p.want(')')
+			tag := p.oliteral()
+
+			field.Right = Nod(OIND, field.Right, nil)
+			field.SetVal(tag)
+			Yyerror("cannot parenthesize embedded type")
+			return list1(field)
+
+		} else {
+			// '*' embed oliteral
+			field := p.embed(nil)
+			tag := p.oliteral()
+
+			field.Right = Nod(OIND, field.Right, nil)
+			field.SetVal(tag)
+			return list1(field)
+		}
+
+	default:
+		p.syntax_error("expecting field name or embedded type")
+		p.advance(';', '}')
+		return nil
+	}
+}
+
+// go.y:oliteral
+func (p *parser) oliteral() (v Val) {
+	if p.tok == LLITERAL {
+		v = p.val
+		p.next()
+	}
+	return
+}
+
+// go.y:packname
+func (p *parser) packname(name *Sym) *Sym {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("embed")()
+	}
+
+	if name != nil {
+		// LNAME was already consumed and is coming in as name
+	} else if p.tok == LNAME {
+		name = p.sym_
+		p.next()
+	} else {
+		p.syntax_error("expecting name")
+		p.advance('.', ';', '}')
+		name = new(Sym)
+	}
+
+	if p.got('.') {
+		// LNAME '.' sym
+		s := p.sym()
+
+		var pkg *Pkg
+		if name.Def == nil || name.Def.Op != OPACK {
+			Yyerror("%v is not a package", name)
+			pkg = localpkg
+		} else {
+			name.Def.Used = true
+			pkg = name.Def.Name.Pkg
+		}
+		return restrictlookup(s.Name, pkg)
+	}
+
+	// LNAME
+	if n := oldname(name); n.Name != nil && n.Name.Pack != nil {
+		n.Name.Pack.Used = true
+	}
+	return name
+}
+
+// go.y:embed
+func (p *parser) embed(sym *Sym) *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("embed")()
+	}
+
+	pkgname := p.packname(sym)
+	return embedded(pkgname, localpkg)
+}
+
+// go.y: interfacedcl
+func (p *parser) interfacedcl() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("interfacedcl")()
+	}
+
+	switch p.tok {
+	case LNAME:
+		sym := p.sym_
+		p.next()
+
+		// accept potential name list but complain
+		hasNameList := false
+		for p.got(',') {
+			p.sym()
+			hasNameList = true
+		}
+		if hasNameList {
+			p.syntax_error("name list not allowed in interface type")
+			// already progressed, no need to advance
+		}
+
+		if p.tok != '(' {
+			// packname
+			pname := p.packname(sym)
+			return Nod(ODCLFIELD, nil, oldname(pname))
+		}
+
+		// newname indcl
+		mname := newname(sym)
+		sig := p.indcl()
+
+		meth := Nod(ODCLFIELD, mname, sig)
+		ifacedcl(meth)
+		return meth
+
+	case '(':
+		p.next()
+		pname := p.packname(nil)
+		p.want(')')
+		n := Nod(ODCLFIELD, nil, oldname(pname))
+		Yyerror("cannot parenthesize embedded type")
+		return n
+
+	default:
+		p.syntax_error("")
+		p.advance(';', '}')
+		return nil
+	}
+}
+
+// go.y:indcl
+func (p *parser) indcl() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("indcl")()
+	}
+
+	params := p.param_list()
+	result := p.fnres()
+
+	// without func keyword
+	params = checkarglist(params, 1)
+	t := Nod(OTFUNC, fakethis(), nil)
+	t.List = params
+	t.Rlist = result
+
+	return t
+}
+
+// go.y:arg_type
+func (p *parser) arg_type() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("arg_type")()
+	}
+
+	switch p.tok {
+	case LNAME, '@', '?':
+		name := p.sym()
+		switch p.tok {
+		case LCOMM, LFUNC, '[', LCHAN, LMAP, LSTRUCT, LINTERFACE, '*', LNAME, '@', '?', '(':
+			// sym name_or_type
+			typ := p.ntype()
+			nn := Nod(ONONAME, nil, nil)
+			nn.Sym = name
+			return Nod(OKEY, nn, typ)
+
+		case LDDD:
+			// sym dotdotdot
+			typ := p.dotdotdot()
+			nn := Nod(ONONAME, nil, nil)
+			nn.Sym = name
+			return Nod(OKEY, nn, typ)
+
+		default:
+			// name_or_type
+			name := mkname(name)
+			// from dotname
+			if p.got('.') {
+				return p.new_dotname(name)
+			}
+			return name
+		}
+
+	case LDDD:
+		// dotdotdot
+		return p.dotdotdot()
+
+	case LCOMM, LFUNC, '[', LCHAN, LMAP, LSTRUCT, LINTERFACE, '*', '(':
+		// name_or_type
+		return p.ntype()
+
+	default:
+		p.syntax_error("expecting )")
+		p.advance(',', ')')
+		return nil
+	}
+}
+
+// go.y:oarg_type_list_ocomma + surrounding ()'s
+func (p *parser) param_list() (l *NodeList) {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("param_list")()
+	}
+
+	p.want('(')
+	for p.tok != EOF && p.tok != ')' {
+		l = list(l, p.arg_type())
+		p.ocomma("parameter list")
+	}
+	p.want(')')
+	return
+}
+
+var missing_stmt = Nod(OXXX, nil, nil)
+
+// go.y:stmt
+// maty return missing_stmt
+func (p *parser) stmt() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("stmt")()
+	}
+
+	switch p.tok {
+	case '{':
+		return p.compound_stmt(false)
+
+	case LVAR, LCONST, LTYPE:
+		return liststmt(p.common_dcl())
+
+	case LNAME, '@', '?', LLITERAL, LFUNC, '(', // operands
+		'[', LSTRUCT, LMAP, LCHAN, LINTERFACE, // composite types
+		'+', '-', '*', '&', '^', '~', LCOMM, '!': // unary operators
+		return p.simple_stmt(true, false)
+
+	case LFOR:
+		return p.for_stmt()
+
+	case LSWITCH:
+		return p.switch_stmt()
+
+	case LSELECT:
+		return p.select_stmt()
+
+	case LIF:
+		return p.if_stmt()
+
+	case LFALL:
+		p.next()
+		// will be converted to OFALL
+		stmt := Nod(OXFALL, nil, nil)
+		stmt.Xoffset = int64(block)
+		return stmt
+
+	case LBREAK:
+		p.next()
+		return Nod(OBREAK, p.onew_name(), nil)
+
+	case LCONTINUE:
+		p.next()
+		return Nod(OCONTINUE, p.onew_name(), nil)
+
+	case LGO:
+		p.next()
+		return Nod(OPROC, p.pseudocall(), nil)
+
+	case LDEFER:
+		p.next()
+		return Nod(ODEFER, p.pseudocall(), nil)
+
+	case LGOTO:
+		p.next()
+		stmt := Nod(OGOTO, p.new_name(p.sym()), nil)
+		stmt.Sym = dclstack // context, for goto restrictions
+		return stmt
+
+	case LRETURN:
+		p.next()
+		var results *NodeList
+		if p.tok != ';' && p.tok != '}' {
+			results = p.expr_list()
+		}
+
+		stmt := Nod(ORETURN, nil, nil)
+		stmt.List = results
+		if stmt.List == nil && Curfn != nil {
+			for l := Curfn.Func.Dcl; l != nil; l = l.Next {
+				if l.N.Class == PPARAM {
+					continue
+				}
+				if l.N.Class != PPARAMOUT {
+					break
+				}
+				if l.N.Sym.Def != l.N {
+					Yyerror("%s is shadowed during return", l.N.Sym.Name)
+				}
+			}
+		}
+
+		return stmt
+
+	case ';':
+		return nil
+
+	default:
+		return missing_stmt
+	}
+}
+
+// go.y:stmt_list
+func (p *parser) stmt_list() (l *NodeList) {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("stmt_list")()
+	}
+
+	for p.tok != EOF && p.tok != '}' && p.tok != LCASE && p.tok != LDEFAULT {
+		s := p.stmt()
+		if s == missing_stmt {
+			break
+		}
+		l = list(l, s)
+		// customized version of osemi:
+		// ';' is optional before a closing ')' or '}'
+		if p.tok == ')' || p.tok == '}' {
+			continue
+		}
+		if !p.got(';') {
+			p.syntax_error("at end of statement")
+			p.advance(';', '}')
+		}
+	}
+	return
+}
+
+// go.y:new_name_list
+// if first != nil we have the first symbol already
+func (p *parser) new_name_list(first *Sym) *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("new_name_list")()
+	}
+
+	if first == nil {
+		first = p.sym() // may still be nil
+	}
+	l := list1(p.new_name(first))
+	for p.got(',') {
+		l = list(l, p.new_name(p.sym()))
+	}
+	return l
+}
+
+// go.y:dcl_name_list
+func (p *parser) dcl_name_list() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("dcl_name_list")()
+	}
+
+	l := list1(dclname(p.sym()))
+	for p.got(',') {
+		l = list(l, dclname(p.sym()))
+	}
+	return l
+}
+
+// go.y:expr_list
+func (p *parser) expr_list() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("expr_list")()
+	}
+
+	l := list1(p.expr())
+	for p.got(',') {
+		l = list(l, p.expr())
+	}
+	return l
+}
+
+// go.y:expr_or_type_list
+func (p *parser) arg_list() (l *NodeList, ddd bool) {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("arg_list")()
+	}
+
+	// TODO(gri) make this more tolerant in the presence of LDDD
+	// that is not at the end (issue 13243).
+
+	for p.tok != EOF && p.tok != ')' && !ddd {
+		l = list(l, p.expr()) // expr_or_type
+		ddd = p.got(LDDD)
+		p.ocomma("argument list")
+	}
+
+	return
+}
+
+// go.y:osemi
+func (p *parser) osemi() {
+	// ';' is optional before a closing ')' or '}'
+	if p.tok == ')' || p.tok == '}' {
+		return
+	}
+	p.want(';')
+}
+
+// go.y:ocomma
+func (p *parser) ocomma(context string) {
+	switch p.tok {
+	case ')', '}':
+		// ',' is optional before a closing ')' or '}'
+		return
+	case ';':
+		p.syntax_error("need trailing comma before newline in " + context)
+		p.next() // interpret ';' as comma
+		return
+	}
+	p.want(',')
+}
+
+// ----------------------------------------------------------------------------
+// Importing packages
+
+func (p *parser) import_error() {
+	p.syntax_error("in export data of imported package")
+	p.next()
+}
+
+// The methods below reflect a 1:1 translation of the corresponding go.y yacc
+// productions They could be simplified significantly and also use better
+// variable names. However, we will be able to delete them once we enable the
+// new export format by default, so it's not worth the effort.
+
+// go.y:hidden_importsym:
+func (p *parser) hidden_importsym() *Sym {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_importsym")()
+	}
+
+	p.want('@')
+	var s2 Val
+	if p.tok == LLITERAL {
+		s2 = p.val
+		p.next()
+	} else {
+		p.import_error()
+	}
+	p.want('.')
+
+	switch p.tok {
+	case LNAME:
+		s4 := p.sym_
+		p.next()
+
+		var p *Pkg
+
+		if s2.U.(string) == "" {
+			p = importpkg
+		} else {
+			if isbadimport(s2.U.(string)) {
+				errorexit()
+			}
+			p = mkpkg(s2.U.(string))
+		}
+		return Pkglookup(s4.Name, p)
+
+	case '?':
+		p.next()
+
+		var p *Pkg
+
+		if s2.U.(string) == "" {
+			p = importpkg
+		} else {
+			if isbadimport(s2.U.(string)) {
+				errorexit()
+			}
+			p = mkpkg(s2.U.(string))
+		}
+		return Pkglookup("?", p)
+
+	default:
+		p.import_error()
+		return nil
+	}
+}
+
+// go.y:ohidden_funarg_list
+func (p *parser) ohidden_funarg_list() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("ohidden_funarg_list")()
+	}
+
+	var ss *NodeList
+	if p.tok != ')' {
+		ss = p.hidden_funarg_list()
+	}
+	return ss
+}
+
+// go.y:ohidden_structdcl_list
+func (p *parser) ohidden_structdcl_list() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("ohidden_structdcl_list")()
+	}
+
+	var ss *NodeList
+	if p.tok != '}' {
+		ss = p.hidden_structdcl_list()
+	}
+	return ss
+}
+
+// go.y:ohidden_interfacedcl_list
+func (p *parser) ohidden_interfacedcl_list() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("ohidden_interfacedcl_list")()
+	}
+
+	var ss *NodeList
+	if p.tok != '}' {
+		ss = p.hidden_interfacedcl_list()
+	}
+	return ss
+}
+
+// import syntax from package header
+//
+// go.y:hidden_import
+func (p *parser) hidden_import() {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_import")()
+	}
+
+	switch p.tok {
+	case LIMPORT:
+		// LIMPORT LNAME LLITERAL ';'
+		p.next()
+		var s2 *Sym
+		if p.tok == LNAME {
+			s2 = p.sym_
+			p.next()
+		} else {
+			p.import_error()
+		}
+		var s3 Val
+		if p.tok == LLITERAL {
+			s3 = p.val
+			p.next()
+		} else {
+			p.import_error()
+		}
+		p.want(';')
+
+		importimport(s2, s3.U.(string))
+
+	case LVAR:
+		// LVAR hidden_pkg_importsym hidden_type ';'
+		p.next()
+		s2 := p.hidden_pkg_importsym()
+		s3 := p.hidden_type()
+		p.want(';')
+
+		importvar(s2, s3)
+
+	case LCONST:
+		// LCONST hidden_pkg_importsym '=' hidden_constant ';'
+		// LCONST hidden_pkg_importsym hidden_type '=' hidden_constant ';'
+		p.next()
+		s2 := p.hidden_pkg_importsym()
+		var s3 *Type = Types[TIDEAL]
+		if p.tok != '=' {
+			s3 = p.hidden_type()
+		}
+		p.want('=')
+		s4 := p.hidden_constant()
+		p.want(';')
+
+		importconst(s2, s3, s4)
+
+	case LTYPE:
+		// LTYPE hidden_pkgtype hidden_type ';'
+		p.next()
+		s2 := p.hidden_pkgtype()
+		s3 := p.hidden_type()
+		p.want(';')
+
+		importtype(s2, s3)
+
+	case LFUNC:
+		// LFUNC hidden_fndcl fnbody ';'
+		p.next()
+		s2 := p.hidden_fndcl()
+		s3 := p.fnbody()
+		p.want(';')
+
+		if s2 == nil {
+			dclcontext = PEXTERN // since we skip the funcbody below
+			return
+		}
+
+		s2.Func.Inl = s3
+
+		funcbody(s2)
+		importlist = append(importlist, s2)
+
+		if Debug['E'] > 0 {
+			fmt.Printf("import [%q] func %v \n", importpkg.Path, s2)
+			if Debug['m'] > 2 && s2.Func.Inl != nil {
+				fmt.Printf("inl body:%v\n", s2.Func.Inl)
+			}
+		}
+
+	default:
+		p.import_error()
+	}
+}
+
+// go.y:hidden_pkg_importsym
+func (p *parser) hidden_pkg_importsym() *Sym {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_pkg_importsym")()
+	}
+
+	s1 := p.hidden_importsym()
+
+	ss := s1
+	structpkg = ss.Pkg
+
+	return ss
+}
+
+// go.y:hidden_pkgtype
+func (p *parser) hidden_pkgtype() *Type {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_pkgtype")()
+	}
+
+	s1 := p.hidden_pkg_importsym()
+
+	ss := pkgtype(s1)
+	importsym(s1, OTYPE)
+
+	return ss
+}
+
+// ----------------------------------------------------------------------------
+// Importing types
+
+// go.y:hidden_type
+func (p *parser) hidden_type() *Type {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_type")()
+	}
+
+	switch p.tok {
+	default:
+		return p.hidden_type_misc()
+	case LCOMM:
+		return p.hidden_type_recv_chan()
+	case LFUNC:
+		return p.hidden_type_func()
+	}
+}
+
+// go.y:hidden_type_non_recv_chan
+func (p *parser) hidden_type_non_recv_chan() *Type {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_type_non_recv_chan")()
+	}
+
+	switch p.tok {
+	default:
+		return p.hidden_type_misc()
+	case LFUNC:
+		return p.hidden_type_func()
+	}
+}
+
+// go.y:hidden_type_misc
+func (p *parser) hidden_type_misc() *Type {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_type_misc")()
+	}
+
+	switch p.tok {
+	case '@':
+		// hidden_importsym
+		s1 := p.hidden_importsym()
+		return pkgtype(s1)
+
+	case LNAME:
+		// LNAME
+		s1 := p.sym_
+		p.next()
+
+		// predefined name like uint8
+		s1 = Pkglookup(s1.Name, builtinpkg)
+		if s1.Def == nil || s1.Def.Op != OTYPE {
+			Yyerror("%s is not a type", s1.Name)
+			return nil
+		} else {
+			return s1.Def.Type
+		}
+
+	case '[':
+		// '[' ']' hidden_type
+		// '[' LLITERAL ']' hidden_type
+		p.next()
+		var s2 *Node
+		if p.tok == LLITERAL {
+			s2 = nodlit(p.val)
+			p.next()
+		}
+		p.want(']')
+		s4 := p.hidden_type()
+
+		return aindex(s2, s4)
+
+	case LMAP:
+		// LMAP '[' hidden_type ']' hidden_type
+		p.next()
+		p.want('[')
+		s3 := p.hidden_type()
+		p.want(']')
+		s5 := p.hidden_type()
+
+		return maptype(s3, s5)
+
+	case LSTRUCT:
+		// LSTRUCT '{' ohidden_structdcl_list '}'
+		p.next()
+		p.want('{')
+		s3 := p.ohidden_structdcl_list()
+		p.want('}')
+
+		return tostruct(s3)
+
+	case LINTERFACE:
+		// LINTERFACE '{' ohidden_interfacedcl_list '}'
+		p.next()
+		p.want('{')
+		s3 := p.ohidden_interfacedcl_list()
+		p.want('}')
+
+		return tointerface(s3)
+
+	case '*':
+		// '*' hidden_type
+		p.next()
+		s2 := p.hidden_type()
+		return Ptrto(s2)
+
+	case LCHAN:
+		p.next()
+		switch p.tok {
+		default:
+			// LCHAN hidden_type_non_recv_chan
+			s2 := p.hidden_type_non_recv_chan()
+			ss := typ(TCHAN)
+			ss.Type = s2
+			ss.Chan = Cboth
+			return ss
+
+		case '(':
+			// LCHAN '(' hidden_type_recv_chan ')'
+			p.next()
+			s3 := p.hidden_type_recv_chan()
+			p.want(')')
+			ss := typ(TCHAN)
+			ss.Type = s3
+			ss.Chan = Cboth
+			return ss
+
+		case LCOMM:
+			// LCHAN hidden_type
+			p.next()
+			s3 := p.hidden_type()
+			ss := typ(TCHAN)
+			ss.Type = s3
+			ss.Chan = Csend
+			return ss
+		}
+
+	default:
+		p.import_error()
+		return nil
+	}
+}
+
+// go.y:hidden_type_recv_chan
+func (p *parser) hidden_type_recv_chan() *Type {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_type_recv_chan")()
+	}
+
+	p.want(LCOMM)
+	p.want(LCHAN)
+	s3 := p.hidden_type()
+
+	ss := typ(TCHAN)
+	ss.Type = s3
+	ss.Chan = Crecv
+	return ss
+}
+
+// go.y:hidden_type_func
+func (p *parser) hidden_type_func() *Type {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_type_func")()
+	}
+
+	p.want(LFUNC)
+	p.want('(')
+	s3 := p.ohidden_funarg_list()
+	p.want(')')
+	s5 := p.ohidden_funres()
+
+	return functype(nil, s3, s5)
+}
+
+// go.y:hidden_funarg
+func (p *parser) hidden_funarg() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_funarg")()
+	}
+
+	s1 := p.sym()
+	switch p.tok {
+	default:
+		s2 := p.hidden_type()
+		s3 := p.oliteral()
+
+		ss := Nod(ODCLFIELD, nil, typenod(s2))
+		if s1 != nil {
+			ss.Left = newname(s1)
+		}
+		ss.SetVal(s3)
+		return ss
+
+	case LDDD:
+		p.next()
+		s3 := p.hidden_type()
+		s4 := p.oliteral()
+
+		var t *Type
+
+		t = typ(TARRAY)
+		t.Bound = -1
+		t.Type = s3
+
+		ss := Nod(ODCLFIELD, nil, typenod(t))
+		if s1 != nil {
+			ss.Left = newname(s1)
+		}
+		ss.Isddd = true
+		ss.SetVal(s4)
+
+		return ss
+	}
+}
+
+// go.y:hidden_structdcl
+func (p *parser) hidden_structdcl() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_structdcl")()
+	}
+
+	s1 := p.sym()
+	s2 := p.hidden_type()
+	s3 := p.oliteral()
+
+	var s *Sym
+	var pkg *Pkg
+
+	var ss *Node
+	if s1 != nil && s1.Name != "?" {
+		ss = Nod(ODCLFIELD, newname(s1), typenod(s2))
+		ss.SetVal(s3)
+	} else {
+		s = s2.Sym
+		if s == nil && Isptr[s2.Etype] {
+			s = s2.Type.Sym
+		}
+		pkg = importpkg
+		if s1 != nil {
+			pkg = s1.Pkg
+		}
+		ss = embedded(s, pkg)
+		ss.Right = typenod(s2)
+		ss.SetVal(s3)
+	}
+
+	return ss
+}
+
+// go.y:hidden_interfacedcl
+func (p *parser) hidden_interfacedcl() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_interfacedcl")()
+	}
+
+	// TODO(gri) possible conflict here: both cases may start with '@' per grammar
+	// (issue 13245).
+
+	switch p.tok {
+	case LNAME, '@', '?':
+		s1 := p.sym()
+		p.want('(')
+		s3 := p.ohidden_funarg_list()
+		p.want(')')
+		s5 := p.ohidden_funres()
+
+		return Nod(ODCLFIELD, newname(s1), typenod(functype(fakethis(), s3, s5)))
+
+	default:
+		s1 := p.hidden_type()
+
+		return Nod(ODCLFIELD, nil, typenod(s1))
+	}
+}
+
+// go.y:ohidden_funres
+func (p *parser) ohidden_funres() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("ohidden_funres")()
+	}
+
+	switch p.tok {
+	default:
+		return nil
+
+	case '(', '@', LNAME, '[', LMAP, LSTRUCT, LINTERFACE, '*', LCHAN, LCOMM, LFUNC:
+		return p.hidden_funres()
+	}
+}
+
+// go.y:hidden_funres
+func (p *parser) hidden_funres() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_funres")()
+	}
+
+	switch p.tok {
+	case '(':
+		p.next()
+		s2 := p.ohidden_funarg_list()
+		p.want(')')
+		return s2
+
+	default:
+		s1 := p.hidden_type()
+		return list1(Nod(ODCLFIELD, nil, typenod(s1)))
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Importing constants
+
+// go.y:hidden_literal
+func (p *parser) hidden_literal() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_literal")()
+	}
+
+	switch p.tok {
+	case LLITERAL:
+		ss := nodlit(p.val)
+		p.next()
+		return ss
+
+	case '-':
+		p.next()
+		if p.tok == LLITERAL {
+			ss := nodlit(p.val)
+			p.next()
+			switch ss.Val().Ctype() {
+			case CTINT, CTRUNE:
+				mpnegfix(ss.Val().U.(*Mpint))
+				break
+			case CTFLT:
+				mpnegflt(ss.Val().U.(*Mpflt))
+				break
+			case CTCPLX:
+				mpnegflt(&ss.Val().U.(*Mpcplx).Real)
+				mpnegflt(&ss.Val().U.(*Mpcplx).Imag)
+				break
+			default:
+				Yyerror("bad negated constant")
+			}
+			return ss
+		} else {
+			p.import_error()
+			return nil
+		}
+
+	case LNAME, '@', '?':
+		s1 := p.sym()
+		ss := oldname(Pkglookup(s1.Name, builtinpkg))
+		if ss.Op != OLITERAL {
+			Yyerror("bad constant %v", ss.Sym)
+		}
+		return ss
+
+	default:
+		p.import_error()
+		return nil
+	}
+}
+
+// go.y:hidden_constant
+func (p *parser) hidden_constant() *Node {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_constant")()
+	}
+
+	switch p.tok {
+	default:
+		return p.hidden_literal()
+	case '(':
+		p.next()
+		s2 := p.hidden_literal()
+		p.want('+')
+		s4 := p.hidden_literal()
+		p.want(')')
+
+		if s2.Val().Ctype() == CTRUNE && s4.Val().Ctype() == CTINT {
+			ss := s2
+			mpaddfixfix(s2.Val().U.(*Mpint), s4.Val().U.(*Mpint), 0)
+			return ss
+		}
+		s4.Val().U.(*Mpcplx).Real = s4.Val().U.(*Mpcplx).Imag
+		Mpmovecflt(&s4.Val().U.(*Mpcplx).Imag, 0.0)
+		return nodcplxlit(s2.Val(), s4.Val())
+	}
+}
+
+// go.y:hidden_import_list
+func (p *parser) hidden_import_list() {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_import_list")()
+	}
+
+	for p.tok != '$' {
+		p.hidden_import()
+	}
+}
+
+// go.y:hidden_funarg_list
+func (p *parser) hidden_funarg_list() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_funarg_list")()
+	}
+
+	s1 := p.hidden_funarg()
+	ss := list1(s1)
+	for p.got(',') {
+		s3 := p.hidden_funarg()
+		ss = list(ss, s3)
+	}
+	return ss
+}
+
+// go.y:hidden_structdcl_list
+func (p *parser) hidden_structdcl_list() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_structdcl_list")()
+	}
+
+	s1 := p.hidden_structdcl()
+	ss := list1(s1)
+	for p.got(';') {
+		s3 := p.hidden_structdcl()
+		ss = list(ss, s3)
+	}
+	return ss
+}
+
+// go.y:hidden_interfacedcl_list
+func (p *parser) hidden_interfacedcl_list() *NodeList {
+	if trace && Debug['x'] != 0 {
+		defer p.trace("hidden_interfacedcl_list")()
+	}
+
+	s1 := p.hidden_interfacedcl()
+	ss := list1(s1)
+	for p.got(';') {
+		s3 := p.hidden_interfacedcl()
+		ss = list(ss, s3)
+	}
+	return ss
+}
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index c8f2059..c0d4a9f 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -130,6 +130,9 @@
 }
 
 func emitptrargsmap() {
+	if Curfn.Func.Nname.Sym.Name == "_" {
+		return
+	}
 	sym := Lookup(fmt.Sprintf("%s.args_stackmap", Curfn.Func.Nname.Sym.Name))
 
 	nptr := int(Curfn.Type.Argwid / int64(Widthptr))
@@ -283,7 +286,7 @@
 		if haspointers(n.Type) {
 			stkptrsize = Stksize
 		}
-		if Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+		if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
 			Stksize = Rnd(Stksize, int64(Widthptr))
 		}
 		if Stksize >= 1<<31 {
@@ -320,7 +323,7 @@
 		Fatalf("bad checknil")
 	}
 
-	if ((Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
+	if ((Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
 		var reg Node
 		Regalloc(&reg, Types[Tptr], n)
 		Cgen(n, &reg)
@@ -406,8 +409,8 @@
 	if nerrors != 0 {
 		goto ret
 	}
-	if flag_race != 0 {
-		racewalk(Curfn)
+	if instrumenting {
+		instrument(Curfn)
 	}
 	if nerrors != 0 {
 		goto ret
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index a09247b..7765d2d 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -821,7 +821,7 @@
 		return
 	}
 	var a *Node
-	var class uint8
+	var class Class
 	for l := fn.Func.Dcl; l != nil; l = l.Next {
 		a = l.N
 		class = a.Class &^ PHEAP
@@ -1434,7 +1434,14 @@
 						// the PCDATA must begin one instruction early too.
 						// The instruction before a call to deferreturn is always a
 						// no-op, to keep PC-specific data unambiguous.
-						splicebefore(lv, bb, newpcdataprog(p.Opt.(*obj.Prog), pos), p.Opt.(*obj.Prog))
+						prev := p.Opt.(*obj.Prog)
+						if Ctxt.Arch.Thechar == '9' {
+							// On ppc64 there is an additional instruction
+							// (another no-op or reload of toc pointer) before
+							// the call.
+							prev = prev.Opt.(*obj.Prog)
+						}
+						splicebefore(lv, bb, newpcdataprog(prev, pos), prev)
 					} else {
 						splicebefore(lv, bb, newpcdataprog(p, pos), p)
 					}
diff --git a/src/cmd/compile/internal/gc/popt.go b/src/cmd/compile/internal/gc/popt.go
index 985ebb6..4d71ab6 100644
--- a/src/cmd/compile/internal/gc/popt.go
+++ b/src/cmd/compile/internal/gc/popt.go
@@ -88,7 +88,7 @@
 // longer and more difficult to follow during debugging.
 // Remove them.
 
-/* what instruction does a JMP to p eventually land on? */
+// what instruction does a JMP to p eventually land on?
 func chasejmp(p *obj.Prog, jmploop *int) *obj.Prog {
 	n := 0
 	for p != nil && p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH {
@@ -104,14 +104,12 @@
 	return p
 }
 
-/*
- * reuse reg pointer for mark/sweep state.
- * leave reg==nil at end because alive==nil.
- */
+// reuse reg pointer for mark/sweep state.
+// leave reg==nil at end because alive==nil.
 var alive interface{} = nil
 var dead interface{} = 1
 
-/* mark all code reachable from firstp as alive */
+// mark all code reachable from firstp as alive
 func mark(firstp *obj.Prog) {
 	for p := firstp; p != nil; p = p.Link {
 		if p.Opt != dead {
@@ -335,21 +333,19 @@
 	}
 }
 
-/*
- * find looping structure
- *
- * 1) find reverse postordering
- * 2) find approximate dominators,
- *	the actual dominators if the flow graph is reducible
- *	otherwise, dominators plus some other non-dominators.
- *	See Matthew S. Hecht and Jeffrey D. Ullman,
- *	"Analysis of a Simple Algorithm for Global Data Flow Problems",
- *	Conf.  Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
- *	Oct. 1-3, 1973, pp.  207-217.
- * 3) find all nodes with a predecessor dominated by the current node.
- *	such a node is a loop head.
- *	recursively, all preds with a greater rpo number are in the loop
- */
+// find looping structure
+//
+// 1) find reverse postordering
+// 2) find approximate dominators,
+//	the actual dominators if the flow graph is reducible
+//	otherwise, dominators plus some other non-dominators.
+//	See Matthew S. Hecht and Jeffrey D. Ullman,
+//	"Analysis of a Simple Algorithm for Global Data Flow Problems",
+//	Conf.  Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
+//	Oct. 1-3, 1973, pp.  207-217.
+// 3) find all nodes with a predecessor dominated by the current node.
+//	such a node is a loop head.
+//	recursively, all preds with a greater rpo number are in the loop
 func postorder(r *Flow, rpo2r []*Flow, n int32) int32 {
 	r.Rpo = 1
 	r1 := r.S1
@@ -903,7 +899,7 @@
 		return
 	}
 
-	if Debug_checknil > 1 { /* || strcmp(curfn->nname->sym->name, "f1") == 0 */
+	if Debug_checknil > 1 { // || strcmp(curfn->nname->sym->name, "f1") == 0
 		Dumpit("nilopt", g.Start, 0)
 	}
 
diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go
index f127dfd..1f9e167 100644
--- a/src/cmd/compile/internal/gc/racewalk.go
+++ b/src/cmd/compile/internal/gc/racewalk.go
@@ -9,13 +9,20 @@
 	"strings"
 )
 
-// The racewalk pass modifies the code tree for the function as follows:
+// The instrument pass modifies the code tree for instrumentation.
+//
+// For flag_race it modifies the function as follows:
 //
 // 1. It inserts a call to racefuncenterfp at the beginning of each function.
 // 2. It inserts a call to racefuncexit at the end of each function.
 // 3. It inserts a call to raceread before each memory read.
 // 4. It inserts a call to racewrite before each memory write.
 //
+// For flag_msan:
+//
+// 1. It inserts a call to msanread before each memory read.
+// 2. It inserts a call to msanwrite before each memory write.
+//
 // The rewriting is not yet complete. Certain nodes are not rewritten
 // but should be.
 
@@ -24,11 +31,11 @@
 
 // Do not instrument the following packages at all,
 // at best instrumentation would cause infinite recursion.
-var omit_pkgs = []string{"runtime", "runtime/race"}
+var omit_pkgs = []string{"runtime/internal/atomic", "runtime/internal/sys", "runtime", "runtime/race", "runtime/msan"}
 
 // Only insert racefuncenterfp/racefuncexit into the following packages.
 // Memory accesses in the packages are either uninteresting or will cause false positives.
-var noinst_pkgs = []string{"sync", "sync/atomic"}
+var norace_inst_pkgs = []string{"sync", "sync/atomic"}
 
 func ispkgin(pkgs []string) bool {
 	if myimportpath != "" {
@@ -42,35 +49,35 @@
 	return false
 }
 
-// TODO(rsc): Remove. Put //go:norace on forkAndExecInChild instead.
-func isforkfunc(fn *Node) bool {
-	// Special case for syscall.forkAndExecInChild.
-	// In the child, this function must not acquire any locks, because
-	// they might have been locked at the time of the fork.  This means
-	// no rescheduling, no malloc calls, and no new stack segments.
-	// Race instrumentation does all of the above.
-	return myimportpath != "" && myimportpath == "syscall" && fn.Func.Nname.Sym.Name == "forkAndExecInChild"
-}
-
-func racewalk(fn *Node) {
-	if ispkgin(omit_pkgs) || isforkfunc(fn) || fn.Func.Norace {
+func instrument(fn *Node) {
+	if ispkgin(omit_pkgs) || fn.Func.Norace {
 		return
 	}
 
-	if !ispkgin(noinst_pkgs) {
-		racewalklist(fn.Nbody, nil)
+	if flag_race == 0 || !ispkgin(norace_inst_pkgs) {
+		instrumentlist(fn.Nbody, nil)
 
 		// nothing interesting for race detector in fn->enter
-		racewalklist(fn.Func.Exit, nil)
+		instrumentlist(fn.Func.Exit, nil)
 	}
 
-	nd := mkcall("racefuncenterfp", nil, nil, Nod(OADDR, nodfp, nil))
-	fn.Func.Enter = concat(list1(nd), fn.Func.Enter)
-	nd = mkcall("racefuncexit", nil, nil)
-	fn.Func.Exit = list(fn.Func.Exit, nd)
+	if flag_race != 0 {
+		// nodpc is the PC of the caller as extracted by
+		// getcallerpc. We use -widthptr(FP) for x86.
+		// BUG: this will not work on arm.
+		nodpc := Nod(OXXX, nil, nil)
+
+		*nodpc = *nodfp
+		nodpc.Type = Types[TUINTPTR]
+		nodpc.Xoffset = int64(-Widthptr)
+		nd := mkcall("racefuncenter", nil, nil, nodpc)
+		fn.Func.Enter = concat(list1(nd), fn.Func.Enter)
+		nd = mkcall("racefuncexit", nil, nil)
+		fn.Func.Exit = list(fn.Func.Exit, nd)
+	}
 
 	if Debug['W'] != 0 {
-		s := fmt.Sprintf("after racewalk %v", fn.Func.Nname.Sym)
+		s := fmt.Sprintf("after instrument %v", fn.Func.Nname.Sym)
 		dumplist(s, fn.Nbody)
 		s = fmt.Sprintf("enter %v", fn.Func.Nname.Sym)
 		dumplist(s, fn.Func.Enter)
@@ -79,12 +86,12 @@
 	}
 }
 
-func racewalklist(l *NodeList, init **NodeList) {
+func instrumentlist(l *NodeList, init **NodeList) {
 	var instr *NodeList
 
 	for ; l != nil; l = l.Next {
 		instr = nil
-		racewalknode(&l.N, &instr, 0, 0)
+		instrumentnode(&l.N, &instr, 0, 0)
 		if init == nil {
 			l.N.Ninit = concat(l.N.Ninit, instr)
 		} else {
@@ -96,7 +103,7 @@
 // walkexpr and walkstmt combined
 // walks the tree and adds calls to the
 // instrumentation code to top-level (statement) nodes' init
-func racewalknode(np **Node, init **NodeList, wr int, skip int) {
+func instrumentnode(np **Node, init **NodeList, wr int, skip int) {
 	n := *np
 
 	if n == nil {
@@ -104,35 +111,35 @@
 	}
 
 	if Debug['w'] > 1 {
-		Dump("racewalk-before", n)
+		Dump("instrument-before", n)
 	}
 	setlineno(n)
 	if init == nil {
-		Fatalf("racewalk: bad init list")
+		Fatalf("instrument: bad init list")
 	}
 	if init == &n.Ninit {
 		// If init == &n->ninit and n->ninit is non-nil,
-		// racewalknode might append it to itself.
+		// instrumentnode might append it to itself.
 		// nil it out and handle it separately before putting it back.
 		l := n.Ninit
 
 		n.Ninit = nil
-		racewalklist(l, nil)
-		racewalknode(&n, &l, wr, skip) // recurse with nil n->ninit
+		instrumentlist(l, nil)
+		instrumentnode(&n, &l, wr, skip) // recurse with nil n->ninit
 		appendinit(&n, l)
 		*np = n
 		return
 	}
 
-	racewalklist(n.Ninit, nil)
+	instrumentlist(n.Ninit, nil)
 
 	switch n.Op {
 	default:
-		Fatalf("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
+		Fatalf("instrument: unknown node type %v", Oconv(int(n.Op), 0))
 
 	case OAS, OASWB, OAS2FUNC:
-		racewalknode(&n.Left, init, 1, 0)
-		racewalknode(&n.Right, init, 0, 0)
+		instrumentnode(&n.Left, init, 1, 0)
+		instrumentnode(&n.Right, init, 0, 0)
 		goto ret
 
 		// can't matter
@@ -144,7 +151,7 @@
 		for l := n.List; l != nil; l = l.Next {
 			switch l.N.Op {
 			case OCALLFUNC, OCALLMETH, OCALLINTER:
-				racewalknode(&l.N, &out, 0, 0)
+				instrumentnode(&l.N, &out, 0, 0)
 				out = list(out, l.N)
 				// Scan past OAS nodes copying results off stack.
 				// Those must not be instrumented, because the
@@ -156,7 +163,7 @@
 					out = list(out, l.N)
 				}
 			default:
-				racewalknode(&l.N, &out, 0, 0)
+				instrumentnode(&l.N, &out, 0, 0)
 				out = list(out, l.N)
 			}
 		}
@@ -164,22 +171,22 @@
 		goto ret
 
 	case ODEFER:
-		racewalknode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
 		goto ret
 
 	case OPROC:
-		racewalknode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
 		goto ret
 
 	case OCALLINTER:
-		racewalknode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
 		goto ret
 
 		// Instrument dst argument of runtime.writebarrier* calls
 	// as we do not instrument runtime code.
 	// typedslicecopy is instrumented in runtime.
 	case OCALLFUNC:
-		racewalknode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
 		goto ret
 
 	case ONOT,
@@ -189,32 +196,32 @@
 		OIMAG,
 		OCOM,
 		OSQRT:
-		racewalknode(&n.Left, init, wr, 0)
+		instrumentnode(&n.Left, init, wr, 0)
 		goto ret
 
 	case ODOTINTER:
-		racewalknode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
 		goto ret
 
 	case ODOT:
-		racewalknode(&n.Left, init, 0, 1)
+		instrumentnode(&n.Left, init, 0, 1)
 		callinstr(&n, init, wr, skip)
 		goto ret
 
 	case ODOTPTR: // dst = (*x).f with implicit *; otherwise it's ODOT+OIND
-		racewalknode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
 
 		callinstr(&n, init, wr, skip)
 		goto ret
 
 	case OIND: // *p
-		racewalknode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
 
 		callinstr(&n, init, wr, skip)
 		goto ret
 
 	case OSPTR, OLEN, OCAP:
-		racewalknode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
 		if Istype(n.Left.Type, TMAP) {
 			n1 := Nod(OCONVNOP, n.Left, nil)
 			n1.Type = Ptrto(Types[TUINT8])
@@ -243,18 +250,18 @@
 		OGT,
 		OADD,
 		OCOMPLEX:
-		racewalknode(&n.Left, init, wr, 0)
-		racewalknode(&n.Right, init, wr, 0)
+		instrumentnode(&n.Left, init, wr, 0)
+		instrumentnode(&n.Right, init, wr, 0)
 		goto ret
 
 	case OANDAND, OOROR:
-		racewalknode(&n.Left, init, wr, 0)
+		instrumentnode(&n.Left, init, wr, 0)
 
 		// walk has ensured the node has moved to a location where
 		// side effects are safe.
 		// n->right may not be executed,
 		// so instrumentation goes to n->right->ninit, not init.
-		racewalknode(&n.Right, &n.Right.Ninit, wr, 0)
+		instrumentnode(&n.Right, &n.Right.Ninit, wr, 0)
 
 		goto ret
 
@@ -263,57 +270,57 @@
 		goto ret
 
 	case OCONV:
-		racewalknode(&n.Left, init, wr, 0)
+		instrumentnode(&n.Left, init, wr, 0)
 		goto ret
 
 	case OCONVNOP:
-		racewalknode(&n.Left, init, wr, 0)
+		instrumentnode(&n.Left, init, wr, 0)
 		goto ret
 
 	case ODIV, OMOD:
-		racewalknode(&n.Left, init, wr, 0)
-		racewalknode(&n.Right, init, wr, 0)
+		instrumentnode(&n.Left, init, wr, 0)
+		instrumentnode(&n.Right, init, wr, 0)
 		goto ret
 
 	case OINDEX:
 		if !Isfixedarray(n.Left.Type) {
-			racewalknode(&n.Left, init, 0, 0)
+			instrumentnode(&n.Left, init, 0, 0)
 		} else if !islvalue(n.Left) {
 			// index of unaddressable array, like Map[k][i].
-			racewalknode(&n.Left, init, wr, 0)
+			instrumentnode(&n.Left, init, wr, 0)
 
-			racewalknode(&n.Right, init, 0, 0)
+			instrumentnode(&n.Right, init, 0, 0)
 			goto ret
 		}
 
-		racewalknode(&n.Right, init, 0, 0)
+		instrumentnode(&n.Right, init, 0, 0)
 		if n.Left.Type.Etype != TSTRING {
 			callinstr(&n, init, wr, skip)
 		}
 		goto ret
 
 	case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
-		racewalknode(&n.Left, init, 0, 0)
-		racewalknode(&n.Right, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Right, init, 0, 0)
 		goto ret
 
 	case OKEY:
-		racewalknode(&n.Left, init, 0, 0)
-		racewalknode(&n.Right, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Right, init, 0, 0)
 		goto ret
 
 	case OADDR:
-		racewalknode(&n.Left, init, 0, 1)
+		instrumentnode(&n.Left, init, 0, 1)
 		goto ret
 
 		// n->left is Type* which is not interesting.
 	case OEFACE:
-		racewalknode(&n.Right, init, 0, 0)
+		instrumentnode(&n.Right, init, 0, 0)
 
 		goto ret
 
 	case OITAB:
-		racewalknode(&n.Left, init, 0, 0)
+		instrumentnode(&n.Left, init, 0, 0)
 		goto ret
 
 		// should not appear in AST by now
@@ -357,31 +364,31 @@
 		OAS2RECV,
 		OAS2MAPR,
 		OASOP:
-		Yyerror("racewalk: %v must be lowered by now", Oconv(int(n.Op), 0))
+		Yyerror("instrument: %v must be lowered by now", Oconv(int(n.Op), 0))
 
 		goto ret
 
 		// impossible nodes: only appear in backend.
 	case ORROTC, OEXTEND:
-		Yyerror("racewalk: %v cannot exist now", Oconv(int(n.Op), 0))
+		Yyerror("instrument: %v cannot exist now", Oconv(int(n.Op), 0))
 		goto ret
 
 	case OGETG:
-		Yyerror("racewalk: OGETG can happen only in runtime which we don't instrument")
+		Yyerror("instrument: OGETG can happen only in runtime which we don't instrument")
 		goto ret
 
 	case OFOR:
 		if n.Left != nil {
-			racewalknode(&n.Left, &n.Left.Ninit, 0, 0)
+			instrumentnode(&n.Left, &n.Left.Ninit, 0, 0)
 		}
 		if n.Right != nil {
-			racewalknode(&n.Right, &n.Right.Ninit, 0, 0)
+			instrumentnode(&n.Right, &n.Right.Ninit, 0, 0)
 		}
 		goto ret
 
 	case OIF, OSWITCH:
 		if n.Left != nil {
-			racewalknode(&n.Left, &n.Left.Ninit, 0, 0)
+			instrumentnode(&n.Left, &n.Left.Ninit, 0, 0)
 		}
 		goto ret
 
@@ -418,16 +425,17 @@
 
 ret:
 	if n.Op != OBLOCK { // OBLOCK is handled above in a special way.
-		racewalklist(n.List, init)
+		instrumentlist(n.List, init)
 	}
-	racewalklist(n.Nbody, nil)
-	racewalklist(n.Rlist, nil)
+	instrumentlist(n.Nbody, nil)
+	instrumentlist(n.Rlist, nil)
 	*np = n
 }
 
 func isartificial(n *Node) bool {
 	// compiler-emitted artificial things that we do not want to instrument,
-	// cant' possibly participate in a data race.
+	// can't possibly participate in a data race.
+	// can't be seen by C/C++ and therefore irrelevant for msan.
 	if n.Op == ONAME && n.Sym != nil && n.Sym.Name != "" {
 		if n.Sym.Name == "_" {
 			return true
@@ -489,13 +497,31 @@
 		n = treecopy(n, 0)
 		makeaddable(n)
 		var f *Node
-		if t.Etype == TSTRUCT || Isfixedarray(t) {
+		if flag_msan != 0 {
+			name := "msanread"
+			if wr != 0 {
+				name = "msanwrite"
+			}
+			// dowidth may not have been called for PEXTERN.
+			dowidth(t)
+			w := t.Width
+			if w == BADWIDTH {
+				Fatalf("instrument: %v badwidth", t)
+			}
+			f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(w))
+		} else if flag_race != 0 && (t.Etype == TSTRUCT || Isfixedarray(t)) {
 			name := "racereadrange"
 			if wr != 0 {
 				name = "racewriterange"
 			}
-			f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(t.Width))
-		} else {
+			// dowidth may not have been called for PEXTERN.
+			dowidth(t)
+			w := t.Width
+			if w == BADWIDTH {
+				Fatalf("instrument: %v badwidth", t)
+			}
+			f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(w))
+		} else if flag_race != 0 {
 			name := "raceread"
 			if wr != 0 {
 				name = "racewrite"
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
index 59f7d40..4386bcf 100644
--- a/src/cmd/compile/internal/gc/range.go
+++ b/src/cmd/compile/internal/gc/range.go
@@ -6,9 +6,7 @@
 
 import "cmd/internal/obj"
 
-/*
- * range
- */
+// range
 func typecheckrange(n *Node) {
 	var toomany int
 	var why string
@@ -340,7 +338,7 @@
 //
 // Parameters are as in walkrange: "for v1, v2 = range a".
 func memclrrange(n, v1, v2, a *Node) bool {
-	if Debug['N'] != 0 || flag_race != 0 {
+	if Debug['N'] != 0 || instrumenting {
 		return false
 	}
 	if v1 == nil || v2 != nil {
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
index e7138d9..deaeb46 100644
--- a/src/cmd/compile/internal/gc/reflect.go
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -12,9 +12,7 @@
 	"sort"
 )
 
-/*
- * runtime interface and reflection data structures
- */
+// runtime interface and reflection data structures
 var signatlist *NodeList
 
 // byMethodNameAndPackagePath sorts method signatures by name, then package path.
@@ -237,10 +235,8 @@
 	return i
 }
 
-/*
- * f is method type, with receiver.
- * return function type, receiver as first argument (or not).
- */
+// f is method type, with receiver.
+// return function type, receiver as first argument (or not).
 func methodfunc(f *Type, receiver *Type) *Type {
 	var in *NodeList
 	if receiver != nil {
@@ -477,10 +473,8 @@
 	return dsymptr(s, ot, pkg.Pathsym, 0)
 }
 
-/*
- * uncommonType
- * ../../runtime/type.go:/uncommonType
- */
+// uncommonType
+// ../../runtime/type.go:/uncommonType
 func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
 	m := methods(t)
 	if t.Sym == nil && len(m) == 0 {
@@ -686,10 +680,8 @@
 	}
 }
 
-/*
- * commonType
- * ../../runtime/type.go:/commonType
- */
+// commonType
+// ../../runtime/type.go:/commonType
 
 var dcommontype_algarray *Sym
 
@@ -997,7 +989,7 @@
 		dupok = obj.DUPOK
 	}
 
-	if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
+	if localpkg.Name == "runtime" && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
 		goto ok
 	}
 
@@ -1040,7 +1032,7 @@
 			ot = dsymptr(s, ot, s1, 0)
 		}
 
-		// ../../runtime/type.go:/ChanType
+	// ../../runtime/type.go:/ChanType
 	case TCHAN:
 		s1 := dtypesym(t.Type)
 
@@ -1114,7 +1106,7 @@
 			ot = dsymptr(s, ot, dtypesym(a.type_), 0)
 		}
 
-		// ../../../runtime/type.go:/MapType
+	// ../../../runtime/type.go:/MapType
 	case TMAP:
 		s1 := dtypesym(t.Down)
 
@@ -1162,7 +1154,7 @@
 		xt = ot - 2*Widthptr
 		ot = dsymptr(s, ot, s1, 0)
 
-		// ../../runtime/type.go:/StructType
+	// ../../runtime/type.go:/StructType
 	// for security, only the exported fields.
 	case TSTRUCT:
 		n := 0
@@ -1188,7 +1180,8 @@
 				}
 			} else {
 				ot = dgostringptr(s, ot, "")
-				if t1.Type.Sym != nil && t1.Type.Sym.Pkg == builtinpkg {
+				if t1.Type.Sym != nil &&
+					(t1.Type.Sym.Pkg == builtinpkg || !exportname(t1.Type.Sym.Name)) {
 					ot = dgopkgpath(s, ot, localpkg)
 				} else {
 					ot = dgostringptr(s, ot, "")
@@ -1273,8 +1266,8 @@
 	// so this is as good as any.
 	// another possible choice would be package main,
 	// but using runtime means fewer copies in .6 files.
-	if compiling_runtime != 0 {
-		for i := 1; i <= TBOOL; i++ {
+	if localpkg.Name == "runtime" {
+		for i := EType(1); i <= TBOOL; i++ {
 			dtypesym(Ptrto(Types[i]))
 		}
 		dtypesym(Ptrto(Types[TSTRING]))
@@ -1292,6 +1285,9 @@
 		if flag_race != 0 {
 			dimportpath(racepkg)
 		}
+		if flag_msan != 0 {
+			dimportpath(msanpkg)
+		}
 		dimportpath(mkpkg("main"))
 	}
 }
diff --git a/src/cmd/compile/internal/gc/reg.go b/src/cmd/compile/internal/gc/reg.go
index ff6ec32..f575094 100644
--- a/src/cmd/compile/internal/gc/reg.go
+++ b/src/cmd/compile/internal/gc/reg.go
@@ -48,7 +48,7 @@
 	width      int
 	id         int // index in vars
 	name       int8
-	etype      int8
+	etype      EType
 	addr       int8
 }
 
@@ -218,10 +218,8 @@
 	}
 }
 
-/*
- * add mov b,rn
- * just after r
- */
+// add mov b,rn
+// just after r
 func addmove(r *Flow, bn int, rn int, f int) {
 	p1 := Ctxt.NewProg()
 	Clearp(p1)
@@ -251,7 +249,7 @@
 	p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
 
 	// TODO(rsc): Remove special case here.
-	if (Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
+	if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
 		p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
 	}
 	p1.From.Type = obj.TYPE_REG
@@ -282,9 +280,7 @@
 }
 
 func mkvar(f *Flow, a *obj.Addr) Bits {
-	/*
-	 * mark registers used
-	 */
+	// mark registers used
 	if a.Type == obj.TYPE_NONE {
 		return zbits
 	}
@@ -306,7 +302,7 @@
 		// TODO(rsc): Remove special case here.
 	case obj.TYPE_ADDR:
 		var bit Bits
-		if Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+		if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
 			goto memcase
 		}
 		a.Type = obj.TYPE_MEM
@@ -356,7 +352,7 @@
 	if node.Sym == nil || node.Sym.Name[0] == '.' {
 		return zbits
 	}
-	et := int(a.Etype)
+	et := EType(a.Etype)
 	o := a.Offset
 	w := a.Width
 	if w < 0 {
@@ -369,7 +365,7 @@
 		v = &vars[i]
 		if v.node == node && int(v.name) == n {
 			if v.offset == o {
-				if int(v.etype) == et {
+				if v.etype == et {
 					if int64(v.width) == w {
 						// TODO(rsc): Remove special case for arm here.
 						if flag == 0 || Thearch.Thechar != '5' {
@@ -423,7 +419,7 @@
 	v.id = i
 	v.offset = o
 	v.name = int8(n)
-	v.etype = int8(et)
+	v.etype = et
 	v.width = int(w)
 	v.addr = int8(flag) // funny punning
 	v.node = node
@@ -460,7 +456,7 @@
 	}
 
 	// Treat values with their address taken as live at calls,
-	// because the garbage collector's liveness analysis in ../gc/plive.c does.
+	// because the garbage collector's liveness analysis in plive.go does.
 	// These must be consistent or else we will elide stores and the garbage
 	// collector will see uninitialized data.
 	// The typical case where our own analysis is out of sync is when the
@@ -473,7 +469,7 @@
 	// sets addrtaken, even though it ends up not being actually shared.
 	// If we were better about _ elision, _ = &x would suffice too.
 	// The broader := in a closure problem is mentioned in a comment in
-	// closure.c:/^typecheckclosure and dcl.c:/^oldname.
+	// closure.go:/^typecheckclosure and dcl.go:/^oldname.
 	if node.Addrtaken {
 		v.addr = 1
 	}
@@ -491,7 +487,7 @@
 	}
 
 	if Debug['R'] != 0 {
-		fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(int(et), 0), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr)
+		fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(et), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr)
 	}
 	Ostats.Nvar++
 
@@ -655,7 +651,7 @@
 	r.regno = 0
 	switch v.etype {
 	default:
-		Fatalf("unknown etype %d/%v", Bitno(b), Econv(int(v.etype), 0))
+		Fatalf("unknown etype %d/%v", Bitno(b), Econv(v.etype))
 
 	case TINT8,
 		TUINT8,
@@ -1036,11 +1032,9 @@
 func regopt(firstp *obj.Prog) {
 	mergetemp(firstp)
 
-	/*
-	 * control flow is more complicated in generated go code
-	 * than in generated c code.  define pseudo-variables for
-	 * registers, so we have complete register usage information.
-	 */
+	// control flow is more complicated in generated go code
+	// than in generated c code.  define pseudo-variables for
+	// registers, so we have complete register usage information.
 	var nreg int
 	regnames := Thearch.Regnames(&nreg)
 
@@ -1063,12 +1057,10 @@
 	ivar = zbits
 	ovar = zbits
 
-	/*
-	 * pass 1
-	 * build aux data structure
-	 * allocate pcs
-	 * find use and set of variables
-	 */
+	// pass 1
+	// build aux data structure
+	// allocate pcs
+	// find use and set of variables
 	g := Flowstart(firstp, func() interface{} { return new(Reg) })
 	if g == nil {
 		for i := 0; i < nvar; i++ {
@@ -1151,7 +1143,7 @@
 		}
 
 		if Debug['R'] != 0 && Debug['v'] != 0 {
-			fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(int(v.etype), 0), v.width, v.node, v.offset)
+			fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(v.etype), v.width, v.node, v.offset)
 		}
 	}
 
@@ -1159,23 +1151,19 @@
 		Dumpit("pass1", firstf, 1)
 	}
 
-	/*
-	 * pass 2
-	 * find looping structure
-	 */
+	// pass 2
+	// find looping structure
 	flowrpo(g)
 
 	if Debug['R'] != 0 && Debug['v'] != 0 {
 		Dumpit("pass2", firstf, 1)
 	}
 
-	/*
-	 * pass 2.5
-	 * iterate propagating fat vardef covering forward
-	 * r->act records vars with a VARDEF since the last CALL.
-	 * (r->act will be reused in pass 5 for something else,
-	 * but we'll be done with it by then.)
-	 */
+	// pass 2.5
+	// iterate propagating fat vardef covering forward
+	// r->act records vars with a VARDEF since the last CALL.
+	// (r->act will be reused in pass 5 for something else,
+	// but we'll be done with it by then.)
 	active := 0
 
 	for f := firstf; f != nil; f = f.Link {
@@ -1192,11 +1180,9 @@
 		}
 	}
 
-	/*
-	 * pass 3
-	 * iterate propagating usage
-	 * 	back until flow graph is complete
-	 */
+	// pass 3
+	// iterate propagating usage
+	// 	back until flow graph is complete
 	var f1 *Flow
 	var i int
 	var f *Flow
@@ -1212,7 +1198,7 @@
 		}
 	}
 
-	/* pick up unreachable code */
+	// pick up unreachable code
 loop11:
 	i = 0
 
@@ -1235,11 +1221,9 @@
 		Dumpit("pass3", firstf, 1)
 	}
 
-	/*
-	 * pass 4
-	 * iterate propagating register/variable synchrony
-	 * 	forward until graph is complete
-	 */
+	// pass 4
+	// iterate propagating register/variable synchrony
+	// 	forward until graph is complete
 loop2:
 	change = 0
 
@@ -1255,10 +1239,8 @@
 		Dumpit("pass4", firstf, 1)
 	}
 
-	/*
-	 * pass 4.5
-	 * move register pseudo-variables into regu.
-	 */
+	// pass 4.5
+	// move register pseudo-variables into regu.
 	mask := uint64((1 << uint(nreg)) - 1)
 	for f := firstf; f != nil; f = f.Link {
 		r := f.Data.(*Reg)
@@ -1278,11 +1260,9 @@
 		Dumpit("pass4.5", firstf, 1)
 	}
 
-	/*
-	 * pass 5
-	 * isolate regions
-	 * calculate costs (paint1)
-	 */
+	// pass 5
+	// isolate regions
+	// calculate costs (paint1)
 	var bit Bits
 	if f := firstf; f != nil {
 		r := f.Data.(*Reg)
@@ -1358,11 +1338,9 @@
 		Dumpit("pass5", firstf, 1)
 	}
 
-	/*
-	 * pass 6
-	 * determine used registers (paint2)
-	 * replace code (paint3)
-	 */
+	// pass 6
+	// determine used registers (paint2)
+	// replace code (paint3)
 	if Debug['R'] != 0 && Debug['v'] != 0 {
 		fmt.Printf("\nregisterizing\n")
 	}
@@ -1379,16 +1357,14 @@
 		if rgp.regno != 0 {
 			if Debug['R'] != 0 && Debug['v'] != 0 {
 				v := &vars[rgp.varno]
-				fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(int(v.etype), 0), obj.Rconv(int(rgp.regno)), usedreg, vreg)
+				fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(v.etype), obj.Rconv(int(rgp.regno)), usedreg, vreg)
 			}
 
 			paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno))
 		}
 	}
 
-	/*
-	 * free aux structures. peep allocates new ones.
-	 */
+	// free aux structures. peep allocates new ones.
 	for i := 0; i < nvar; i++ {
 		vars[i].node.SetOpt(nil)
 	}
@@ -1404,17 +1380,13 @@
 		firstf = nil
 	}
 
-	/*
-	 * pass 7
-	 * peep-hole on basic block
-	 */
+	// pass 7
+	// peep-hole on basic block
 	if Debug['R'] == 0 || Debug['P'] != 0 {
 		Thearch.Peep(firstp)
 	}
 
-	/*
-	 * eliminate nops
-	 */
+	// eliminate nops
 	for p := firstp; p != nil; p = p.Link {
 		for p.Link != nil && p.Link.As == obj.ANOP {
 			p.Link = p.Link.Link
diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go
index 83f53c1..e770c8f 100644
--- a/src/cmd/compile/internal/gc/select.go
+++ b/src/cmd/compile/internal/gc/select.go
@@ -4,9 +4,7 @@
 
 package gc
 
-/*
- * select
- */
+// select
 func typecheckselect(sel *Node) {
 	var ncase *Node
 	var n *Node
@@ -109,7 +107,7 @@
 	}
 
 	// optimization: one-case select: single op.
-	// TODO(rsc): Reenable optimization once order.c can handle it.
+	// TODO(rsc): Reenable optimization once order.go can handle it.
 	// golang.org/issue/7672.
 	if i == 1 {
 		cas := sel.List.N
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
index c1165cd..3a7560b 100644
--- a/src/cmd/compile/internal/gc/sinit.go
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -9,9 +9,7 @@
 	"fmt"
 )
 
-/*
- * static initialization
- */
+// static initialization
 const (
 	InitNotStarted = 0
 	InitDone       = 1
@@ -248,10 +246,8 @@
 	return lout
 }
 
-/*
- * compilation of top-level (static) assignments
- * into DATA statements if at all possible.
- */
+// compilation of top-level (static) assignments
+// into DATA statements if at all possible.
 func staticinit(n *Node, out **NodeList) bool {
 	if n.Op != ONAME || n.Class != PEXTERN || n.Name.Defn == nil || n.Name.Defn.Op != OAS {
 		Fatalf("staticinit")
@@ -489,13 +485,11 @@
 	return false
 }
 
-/*
- * from here down is the walk analysis
- * of composite literals.
- * most of the work is to generate
- * data statements for the constant
- * part of the composite literal.
- */
+// from here down is the walk analysis
+// of composite literals.
+// most of the work is to generate
+// data statements for the constant
+// part of the composite literal.
 func staticname(t *Type, ctxt int) *Node {
 	n := newname(Lookupf("statictmp_%.4d", statuniqgen))
 	statuniqgen++
@@ -765,7 +759,7 @@
 	// set auto to point at new temp or heap (3 assign)
 	var a *Node
 	if x := prealloc[n]; x != nil {
-		// temp allocated during order.c for dddarg
+		// temp allocated during order.go for dddarg
 		x.Type = t
 
 		if vstat == nil {
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 4cdfa5c..4604fa6 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -134,9 +134,6 @@
 			s.Unimplementedf("local variable with class %s%s unimplemented", classnames[n.Class&^PHEAP], str)
 		}
 	}
-	// nodfp is a special argument which is the function's FP.
-	aux := &ssa.ArgSymbol{Typ: Types[TUINTPTR], Node: nodfp}
-	s.decladdrs[nodfp] = s.entryNewValue1A(ssa.OpAddr, Types[TUINTPTR], aux, s.sp)
 
 	// Convert the AST-based IR to the SSA-based IR
 	s.stmtList(fn.Func.Enter)
@@ -847,8 +844,8 @@
 }
 
 type opAndType struct {
-	op    uint8
-	etype uint8
+	op    Op
+	etype EType
 }
 
 var opToSSA = map[opAndType]ssa.Op{
@@ -1061,7 +1058,7 @@
 	opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt,
 }
 
-func (s *state) concreteEtype(t *Type) uint8 {
+func (s *state) concreteEtype(t *Type) EType {
 	e := t.Etype
 	switch e {
 	default:
@@ -1084,11 +1081,11 @@
 	}
 }
 
-func (s *state) ssaOp(op uint8, t *Type) ssa.Op {
+func (s *state) ssaOp(op Op, t *Type) ssa.Op {
 	etype := s.concreteEtype(t)
 	x, ok := opToSSA[opAndType{op, etype}]
 	if !ok {
-		s.Unimplementedf("unhandled binary op %s %s", opnames[op], Econv(int(etype), 0))
+		s.Unimplementedf("unhandled binary op %s %s", opnames[op], Econv(etype))
 	}
 	return x
 }
@@ -1102,20 +1099,20 @@
 }
 
 type opAndTwoTypes struct {
-	op     uint8
-	etype1 uint8
-	etype2 uint8
+	op     Op
+	etype1 EType
+	etype2 EType
 }
 
 type twoTypes struct {
-	etype1 uint8
-	etype2 uint8
+	etype1 EType
+	etype2 EType
 }
 
 type twoOpsAndType struct {
 	op1              ssa.Op
 	op2              ssa.Op
-	intermediateType uint8
+	intermediateType EType
 }
 
 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
@@ -1241,21 +1238,21 @@
 	opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
 }
 
-func (s *state) ssaShiftOp(op uint8, t *Type, u *Type) ssa.Op {
+func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op {
 	etype1 := s.concreteEtype(t)
 	etype2 := s.concreteEtype(u)
 	x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
 	if !ok {
-		s.Unimplementedf("unhandled shift op %s etype=%s/%s", opnames[op], Econv(int(etype1), 0), Econv(int(etype2), 0))
+		s.Unimplementedf("unhandled shift op %s etype=%s/%s", opnames[op], Econv(etype1), Econv(etype2))
 	}
 	return x
 }
 
-func (s *state) ssaRotateOp(op uint8, t *Type) ssa.Op {
+func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op {
 	etype1 := s.concreteEtype(t)
 	x, ok := opToSSA[opAndType{op, etype1}]
 	if !ok {
-		s.Unimplementedf("unhandled rotate op %s etype=%s", opnames[op], Econv(int(etype1), 0))
+		s.Unimplementedf("unhandled rotate op %s etype=%s", opnames[op], Econv(etype1))
 	}
 	return x
 }
@@ -1402,7 +1399,7 @@
 			return nil
 		}
 		if etypesign(from.Etype) != etypesign(to.Etype) {
-			s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, Econv(int(from.Etype), 0), to, Econv(int(to.Etype), 0))
+			s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, Econv(from.Etype), to, Econv(to.Etype))
 			return nil
 		}
 
@@ -1547,7 +1544,7 @@
 				s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
 		}
 
-		s.Unimplementedf("unhandled OCONV %s -> %s", Econv(int(n.Left.Type.Etype), 0), Econv(int(n.Type.Etype), 0))
+		s.Unimplementedf("unhandled OCONV %s -> %s", Econv(n.Left.Type.Etype), Econv(n.Type.Etype))
 		return nil
 
 	case ODOTTYPE:
@@ -1990,7 +1987,7 @@
 			}
 			if haspointers(et) {
 				// TODO: just one write barrier call for all of these writes?
-				// TODO: maybe just one writeBarrierEnabled check?
+				// TODO: maybe just one writeBarrier.enabled check?
 				s.insertWB(et, addr, n.Lineno)
 			}
 		}
@@ -2263,7 +2260,7 @@
 
 // etypesign returns the signed-ness of e, for integer/pointer etypes.
 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
-func etypesign(e uint8) int8 {
+func etypesign(e EType) int8 {
 	switch e {
 	case TINT8, TINT16, TINT32, TINT64, TINT:
 		return -1
@@ -2313,13 +2310,17 @@
 		case PPARAM:
 			// parameter slot
 			v := s.decladdrs[n]
-			if v == nil {
-				if flag_race != 0 && n.String() == ".fp" {
-					s.Unimplementedf("race detector mishandles nodfp")
-				}
-				s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
+			if v != nil {
+				return v
 			}
-			return v
+			if n.String() == ".fp" {
+				// Special arg that points to the frame pointer.
+				// (Used by the race detector, others?)
+				aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
+				return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp)
+			}
+			s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
+			return nil
 		case PAUTO:
 			// We need to regenerate the address of autos
 			// at every use.  This prevents LEA instructions
@@ -2609,13 +2610,14 @@
 // Note: there must be no GC suspension points between the write and
 // the call that this function inserts.
 func (s *state) insertWB(t *Type, p *ssa.Value, line int32) {
-	// if writeBarrierEnabled {
+	// if writeBarrier.enabled {
 	//   typedmemmove_nostore(&t, p)
 	// }
 	bThen := s.f.NewBlock(ssa.BlockPlain)
 
-	aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrierEnabled", 0).Sym}
+	aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym}
 	flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TBOOL]), aux, s.sb)
+	// TODO: select the .enabled field.  It is currently first, so not needed for now.
 	flag := s.newValue2(ssa.OpLoad, Types[TBOOL], flagaddr, s.mem())
 	b := s.endBlock()
 	b.Kind = ssa.BlockIf
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index df6b6f6..573a6e4 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -34,7 +34,7 @@
 }
 
 func parserline() int {
-	if parsing && theparser.Lookahead() > 0 {
+	if oldparser != 0 && parsing && theparser.Lookahead() > 0 {
 		// parser has one symbol lookahead
 		return int(prevlineno)
 	}
@@ -347,9 +347,9 @@
 	}
 }
 
-func Nod(op int, nleft *Node, nright *Node) *Node {
+func Nod(op Op, nleft *Node, nright *Node) *Node {
 	n := new(Node)
-	n.Op = uint8(op)
+	n.Op = op
 	n.Left = nleft
 	n.Right = nright
 	n.Lineno = int32(parserline())
@@ -382,7 +382,7 @@
 	if n.Orig != nil {
 		return
 	}
-	norig := Nod(int(n.Op), nil, nil)
+	norig := Nod(n.Op, nil, nil)
 	*norig = *n
 	n.Orig = norig
 }
@@ -546,11 +546,11 @@
 	if key != nil {
 		var bad *Type
 		atype := algtype1(key, &bad)
-		var mtype int
+		var mtype EType
 		if bad == nil {
-			mtype = int(key.Etype)
+			mtype = key.Etype
 		} else {
-			mtype = int(bad.Etype)
+			mtype = bad.Etype
 		}
 		switch mtype {
 		default:
@@ -581,9 +581,9 @@
 	return t
 }
 
-func typ(et int) *Type {
+func typ(et EType) *Type {
 	t := new(Type)
-	t.Etype = uint8(et)
+	t.Etype = et
 	t.Width = BADWIDTH
 	t.Lineno = int(lineno)
 	t.Orig = t
@@ -777,7 +777,7 @@
 	return true
 }
 
-func isptrto(t *Type, et int) bool {
+func isptrto(t *Type, et EType) bool {
 	if t == nil {
 		return false
 	}
@@ -788,14 +788,14 @@
 	if t == nil {
 		return false
 	}
-	if int(t.Etype) != et {
+	if t.Etype != et {
 		return false
 	}
 	return true
 }
 
-func Istype(t *Type, et int) bool {
-	return t != nil && int(t.Etype) == et
+func Istype(t *Type, et EType) bool {
+	return t != nil && t.Etype == et
 }
 
 func Isfixedarray(t *Type) bool {
@@ -846,10 +846,8 @@
 	return false
 }
 
-/*
- * given receiver of type t (t == r or t == *r)
- * return type to hang methods off (r).
- */
+// given receiver of type t (t == r or t == *r)
+// return type to hang methods off (r).
 func methtype(t *Type, mustname int) *Type {
 	if t == nil {
 		return nil
@@ -890,7 +888,7 @@
 	return t
 }
 
-func cplxsubtype(et int) int {
+func cplxsubtype(et EType) EType {
 	switch et {
 	case TCOMPLEX64:
 		return TFLOAT32
@@ -899,7 +897,7 @@
 		return TFLOAT64
 	}
 
-	Fatalf("cplxsubtype: %v\n", Econv(int(et), 0))
+	Fatalf("cplxsubtype: %v\n", Econv(et))
 	return 0
 }
 
@@ -1056,7 +1054,7 @@
 // Is type src assignment compatible to type dst?
 // If so, return op code to use in conversion.
 // If not, return 0.
-func assignop(src *Type, dst *Type, why *string) int {
+func assignop(src *Type, dst *Type, why *string) Op {
 	if why != nil {
 		*why = ""
 	}
@@ -1180,7 +1178,7 @@
 // Can we convert a value of type src to a value of type dst?
 // If so, return op code to use in conversion (maybe OCONVNOP).
 // If not, return 0.
-func convertop(src *Type, dst *Type, why *string) int {
+func convertop(src *Type, dst *Type, why *string) Op {
 	if why != nil {
 		*why = ""
 	}
@@ -1383,9 +1381,7 @@
 	}
 }
 
-/*
- * Is this a 64-bit type?
- */
+// Is this a 64-bit type?
 func Is64(t *Type) bool {
 	if t == nil {
 		return false
@@ -1398,12 +1394,10 @@
 	return false
 }
 
-/*
- * Is a conversion between t1 and t2 a no-op?
- */
+// Is a conversion between t1 and t2 a no-op?
 func Noconv(t1 *Type, t2 *Type) bool {
-	e1 := int(Simtype[t1.Etype])
-	e2 := int(Simtype[t2.Etype])
+	e1 := Simtype[t1.Etype]
+	e2 := Simtype[t2.Etype]
 
 	switch e1 {
 	case TINT8, TUINT8:
@@ -1501,18 +1495,16 @@
 	return n
 }
 
-/*
- * compute a hash value for type t.
- * if t is a method type, ignore the receiver
- * so that the hash can be used in interface checks.
- * %T already contains
- * all the necessary logic to generate a representation
- * of the type that completely describes it.
- * using smprint here avoids duplicating that code.
- * using md5 here is overkill, but i got tired of
- * accidental collisions making the runtime think
- * two types are equal when they really aren't.
- */
+// compute a hash value for type t.
+// if t is a method type, ignore the receiver
+// so that the hash can be used in interface checks.
+// %T already contains
+// all the necessary logic to generate a representation
+// of the type that completely describes it.
+// using smprint here avoids duplicating that code.
+// using md5 here is overkill, but i got tired of
+// accidental collisions making the runtime think
+// two types are equal when they really aren't.
 func typehash(t *Type) uint32 {
 	var p string
 
@@ -1613,12 +1605,10 @@
 	}
 }
 
-/*
- * calculate sethi/ullman number
- * roughly how many registers needed to
- * compile a node. used to compile the
- * hardest side first to minimize registers.
- */
+// calculate sethi/ullman number
+// roughly how many registers needed to
+// compile a node. used to compile the
+// hardest side first to minimize registers.
 func ullmancalc(n *Node) {
 	if n == nil {
 		return
@@ -1643,9 +1633,9 @@
 		ul = UINF
 		goto out
 
-		// hard with race detector
+		// hard with instrumented code
 	case OANDAND, OOROR:
-		if flag_race != 0 {
+		if instrumenting {
 			ul = UINF
 			goto out
 		}
@@ -1673,7 +1663,7 @@
 	n.Ullman = uint8(ul)
 }
 
-func badtype(o int, tl *Type, tr *Type) {
+func badtype(op Op, tl *Type, tr *Type) {
 	fmt_ := ""
 	if tl != nil {
 		fmt_ += fmt.Sprintf("\n\t%v", tl)
@@ -1692,12 +1682,10 @@
 	}
 
 	s := fmt_
-	Yyerror("illegal types for operand: %v%s", Oconv(int(o), 0), s)
+	Yyerror("illegal types for operand: %v%s", Oconv(int(op), 0), s)
 }
 
-/*
- * iterator to walk a structure declaration
- */
+// iterator to walk a structure declaration
 func Structfirst(s *Iter, nn **Type) *Type {
 	var t *Type
 
@@ -1749,9 +1737,7 @@
 	return t
 }
 
-/*
- * iterator to this and inargs in a function
- */
+// iterator to this and inargs in a function
 func funcfirst(s *Iter, t *Type) *Type {
 	var fp *Type
 
@@ -1823,8 +1809,8 @@
 
 // Brcom returns !(op).
 // For example, Brcom(==) is !=.
-func Brcom(a int) int {
-	switch a {
+func Brcom(op Op) Op {
+	switch op {
 	case OEQ:
 		return ONE
 	case ONE:
@@ -1838,14 +1824,14 @@
 	case OGE:
 		return OLT
 	}
-	Fatalf("brcom: no com for %v\n", Oconv(a, 0))
-	return a
+	Fatalf("brcom: no com for %v\n", Oconv(int(op), 0))
+	return op
 }
 
 // Brrev returns reverse(op).
 // For example, Brrev(<) is >.
-func Brrev(a int) int {
-	switch a {
+func Brrev(op Op) Op {
+	switch op {
 	case OEQ:
 		return OEQ
 	case ONE:
@@ -1859,14 +1845,12 @@
 	case OGE:
 		return OLE
 	}
-	Fatalf("brrev: no rev for %v\n", Oconv(a, 0))
-	return a
+	Fatalf("brrev: no rev for %v\n", Oconv(int(op), 0))
+	return op
 }
 
-/*
- * return side effect-free n, appending side effects to init.
- * result is assignable if n is.
- */
+// return side effect-free n, appending side effects to init.
+// result is assignable if n is.
 func safeexpr(n *Node, init **NodeList) *Node {
 	if n == nil {
 		return nil
@@ -1935,10 +1919,8 @@
 	return l
 }
 
-/*
- * return side-effect free and cheap n, appending side effects to init.
- * result may not be assignable.
- */
+// return side-effect free and cheap n, appending side effects to init.
+// result may not be assignable.
 func cheapexpr(n *Node, init **NodeList) *Node {
 	switch n.Op {
 	case ONAME, OLITERAL:
@@ -1963,14 +1945,10 @@
 	}
 }
 
-/*
- * unicode-aware case-insensitive strcmp
- */
+// unicode-aware case-insensitive strcmp
 
-/*
- * code to resolve elided DOTs
- * in embedded types
- */
+// code to resolve elided DOTs
+// in embedded types
 
 // search depth 0 --
 // return count of fields+methods
@@ -2103,16 +2081,14 @@
 	return n
 }
 
-/*
- * code to help generate trampoline
- * functions for methods on embedded
- * subtypes.
- * these are approx the same as
- * the corresponding adddot routines
- * except that they expect to be called
- * with unique tasks and they return
- * the actual methods.
- */
+// code to help generate trampoline
+// functions for methods on embedded
+// subtypes.
+// these are approx the same as
+// the corresponding adddot routines
+// except that they expect to be called
+// with unique tasks and they return
+// the actual methods.
 type Symlink struct {
 	field     *Type
 	link      *Symlink
@@ -2260,9 +2236,7 @@
 	}
 }
 
-/*
- * Given funarg struct list, return list of ODCLFIELD Node fn args.
- */
+// Given funarg struct list, return list of ODCLFIELD Node fn args.
 func structargs(tl **Type, mustname int) *NodeList {
 	var savet Iter
 	var a *Node
@@ -2293,29 +2267,27 @@
 	return args
 }
 
-/*
- * Generate a wrapper function to convert from
- * a receiver of type T to a receiver of type U.
- * That is,
- *
- *	func (t T) M() {
- *		...
- *	}
- *
- * already exists; this function generates
- *
- *	func (u U) M() {
- *		u.M()
- *	}
- *
- * where the types T and U are such that u.M() is valid
- * and calls the T.M method.
- * The resulting function is for use in method tables.
- *
- *	rcvr - U
- *	method - M func (t T)(), a TFIELD type struct
- *	newnam - the eventual mangled name of this function
- */
+// Generate a wrapper function to convert from
+// a receiver of type T to a receiver of type U.
+// That is,
+//
+//	func (t T) M() {
+//		...
+//	}
+//
+// already exists; this function generates
+//
+//	func (u U) M() {
+//		u.M()
+//	}
+//
+// where the types T and U are such that u.M() is valid
+// and calls the T.M method.
+// The resulting function is for use in method tables.
+//
+//	rcvr - U
+//	method - M func (t T)(), a TFIELD type struct
+//	newnam - the eventual mangled name of this function
 
 var genwrapper_linehistdone int = 0
 
@@ -2405,7 +2377,7 @@
 	dot := adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
 
 	// generate call
-	if flag_race == 0 && Isptr[rcvr.Etype] && Isptr[methodrcvr.Etype] && method.Embedded != 0 && !isifacemethod(method.Type) {
+	if !instrumenting && Isptr[rcvr.Etype] && Isptr[methodrcvr.Etype] && method.Embedded != 0 && !isifacemethod(method.Type) {
 		// generate tail call: adjust pointer receiver and jump to embedded method.
 		dot = dot.Left // skip final .M
 		if !Isptr[dotlist[0].field.Type.Etype] {
@@ -2511,9 +2483,7 @@
 	return n
 }
 
-/*
- * Generate a helper function to compute the hash of a value of type t.
- */
+// Generate a helper function to compute the hash of a value of type t.
 func genhash(sym *Sym, t *Type) {
 	if Debug['r'] != 0 {
 		fmt.Printf("genhash %v %v\n", sym, t)
@@ -2748,9 +2718,7 @@
 	return nif
 }
 
-/*
- * Generate a helper function to check equality of two values of type t.
- */
+// Generate a helper function to check equality of two values of type t.
 func geneq(sym *Sym, t *Type) {
 	if Debug['r'] != 0 {
 		fmt.Printf("geneq %v %v\n", sym, t)
@@ -3020,17 +2988,15 @@
 	return true
 }
 
-/*
- * even simpler simtype; get rid of ptr, bool.
- * assuming that the front end has rejected
- * all the invalid conversions (like ptr -> bool)
- */
-func Simsimtype(t *Type) int {
+// even simpler simtype; get rid of ptr, bool.
+// assuming that the front end has rejected
+// all the invalid conversions (like ptr -> bool)
+func Simsimtype(t *Type) EType {
 	if t == nil {
 		return 0
 	}
 
-	et := int(Simtype[t.Etype])
+	et := Simtype[t.Etype]
 	switch et {
 	case TPTR32:
 		et = TUINT32
@@ -3062,9 +3028,7 @@
 	return n
 }
 
-/*
- * return nelem of list
- */
+// return nelem of list
 func structcount(t *Type) int {
 	var s Iter
 
@@ -3075,11 +3039,9 @@
 	return v
 }
 
-/*
- * return power of 2 of the constant
- * operand. -1 if it is not a power of 2.
- * 1000+ if it is a -(power of 2)
- */
+// return power of 2 of the constant
+// operand. -1 if it is not a power of 2.
+// 1000+ if it is a -(power of 2)
 func powtwo(n *Node) int {
 	if n == nil || n.Op != OLITERAL || n.Type == nil {
 		return -1
@@ -3113,12 +3075,10 @@
 	return -1
 }
 
-/*
- * return the unsigned type for
- * a signed integer type.
- * returns T if input is not a
- * signed integer type.
- */
+// return the unsigned type for
+// a signed integer type.
+// returns T if input is not a
+// signed integer type.
 func tounsigned(t *Type) *Type {
 	// this is types[et+1], but not sure
 	// that this relation is immutable
@@ -3146,10 +3106,8 @@
 	return t
 }
 
-/*
- * magic number for signed division
- * see hacker's delight chapter 10
- */
+// magic number for signed division
+// see hacker's delight chapter 10
 func Smagic(m *Magic) {
 	var mask uint64
 
@@ -3243,10 +3201,8 @@
 	m.S = p - m.W
 }
 
-/*
- * magic number for unsigned division
- * see hacker's delight chapter 10
- */
+// magic number for unsigned division
+// see hacker's delight chapter 10
 func Umagic(m *Magic) {
 	var mask uint64
 
@@ -3353,15 +3309,13 @@
 	return nil
 }
 
-/*
- * Convert raw string to the prefix that will be used in the symbol
- * table.  All control characters, space, '%' and '"', as well as
- * non-7-bit clean bytes turn into %xx.  The period needs escaping
- * only in the last segment of the path, and it makes for happier
- * users if we escape that as little as possible.
- *
- * If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
- */
+// Convert raw string to the prefix that will be used in the symbol
+// table.  All control characters, space, '%' and '"', as well as
+// non-7-bit clean bytes turn into %xx.  The period needs escaping
+// only in the last segment of the path, and it makes for happier
+// users if we escape that as little as possible.
+//
+// If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
 func pathtoprefix(s string) string {
 	slash := strings.LastIndex(s, "/")
 	for i := 0; i < len(s); i++ {
@@ -3479,10 +3433,8 @@
 	*init = list(*init, n)
 }
 
-/*
- * Can this type be stored directly in an interface word?
- * Yes, if the representation is a single pointer.
- */
+// Can this type be stored directly in an interface word?
+// Yes, if the representation is a single pointer.
 func isdirectiface(t *Type) bool {
 	switch t.Etype {
 	case TPTR32,
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
index 9ed30b2..3600e18 100644
--- a/src/cmd/compile/internal/gc/swt.go
+++ b/src/cmd/compile/internal/gc/swt.go
@@ -744,11 +744,11 @@
 	n2 := c2.node.Left
 
 	// sort by type (for switches on interface)
-	ct := int(n1.Val().Ctype())
-	if ct > int(n2.Val().Ctype()) {
+	ct := n1.Val().Ctype()
+	if ct > n2.Val().Ctype() {
 		return +1
 	}
-	if ct < int(n2.Val().Ctype()) {
+	if ct < n2.Val().Ctype() {
 		return -1
 	}
 	if !Eqtype(n1.Type, n2.Type) {
diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go
index 6ef0cb0..da23e05 100644
--- a/src/cmd/compile/internal/gc/syntax.go
+++ b/src/cmd/compile/internal/gc/syntax.go
@@ -42,13 +42,13 @@
 
 	Esc uint16 // EscXXX
 
-	Op          uint8
+	Op          Op
 	Nointerface bool
 	Ullman      uint8 // sethi/ullman number
 	Addable     bool  // addressable
-	Etype       uint8 // op for OASOP, etype for OTYPE, exclam for export, 6g saved reg
+	Etype       EType // op for OASOP, etype for OTYPE, exclam for export, 6g saved reg
 	Bounded     bool  // bounds check unnecessary
-	Class       uint8 // PPARAM, PAUTO, PEXTERN, etc
+	Class       Class // PPARAM, PAUTO, PEXTERN, etc
 	Embedded    uint8 // ODCLFIELD embedded type
 	Colas       bool  // OAS resulting from :=
 	Diag        uint8 // already printed error about this
@@ -169,18 +169,24 @@
 
 	Endlineno int32
 
-	Norace         bool // func must not have race detector annotations
-	Nosplit        bool // func should not execute on separate stack
-	Nowritebarrier bool // emit compiler error instead of write barrier
-	Dupok          bool // duplicate definitions ok
-	Wrapper        bool // is method wrapper
-	Needctxt       bool // function uses context register (has closure variables)
-	Systemstack    bool // must run on system stack
+	Norace            bool // func must not have race detector annotations
+	Nosplit           bool // func should not execute on separate stack
+	Noinline          bool // func should not be inlined
+	Nowritebarrier    bool // emit compiler error instead of write barrier
+	Nowritebarrierrec bool // error on write barrier in this or recursive callees
+	Dupok             bool // duplicate definitions ok
+	Wrapper           bool // is method wrapper
+	Needctxt          bool // function uses context register (has closure variables)
+	Systemstack       bool // must run on system stack
+
+	WBLineno int32 // line number of first write barrier
 }
 
+type Op uint8
+
 // Node ops.
 const (
-	OXXX = iota
+	OXXX = Op(iota)
 
 	// names
 	ONAME    // var, const or func name
diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go
index 483ebd9..3f218ee 100644
--- a/src/cmd/compile/internal/gc/type.go
+++ b/src/cmd/compile/internal/gc/type.go
@@ -24,7 +24,7 @@
 }
 
 func (t *Type) SimpleString() string {
-	return Econv(int(t.Etype), 0)
+	return Econv(t.Etype)
 }
 
 func (t *Type) Equal(u ssa.Type) bool {
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index f30d071..354a2fa 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -11,18 +11,14 @@
 	"strings"
 )
 
-/*
- * type check the whole tree of an expression.
- * calculates expression types.
- * evaluates compile time constants.
- * marks variables that escape the local frame.
- * rewrites n->op to be more specific in some cases.
- */
+// type check the whole tree of an expression.
+// calculates expression types.
+// evaluates compile time constants.
+// marks variables that escape the local frame.
+// rewrites n->op to be more specific in some cases.
 var typecheckdefstack []*Node
 
-/*
- * resolve ONONAME to definition, if any.
- */
+// resolve ONONAME to definition, if any.
 func resolve(n *Node) *Node {
 	if n != nil && n.Op == ONONAME && n.Sym != nil {
 		r := n.Sym.Def
@@ -79,8 +75,8 @@
 	if Isslice(t) {
 		return "slice"
 	}
-	et := int(t.Etype)
-	if 0 <= et && et < len(_typekind) {
+	et := t.Etype
+	if int(et) < len(_typekind) {
 		s := _typekind[et]
 		if s != "" {
 			return s
@@ -194,9 +190,7 @@
 	return n
 }
 
-/*
- * does n contain a call or receive operation?
- */
+// does n contain a call or receive operation?
 func callrecv(n *Node) bool {
 	if n == nil {
 		return false
@@ -275,9 +269,7 @@
 
 		Fatalf("typecheck %v", Oconv(int(n.Op), 0))
 
-		/*
-		 * names
-		 */
+	// names
 	case OLITERAL:
 		ok |= Erv
 
@@ -327,9 +319,7 @@
 	case ODDD:
 		break
 
-		/*
-		 * types (OIND is with exprs)
-		 */
+	// types (OIND is with exprs)
 	case OTYPE:
 		ok |= Etype
 
@@ -420,7 +410,8 @@
 		}
 		t := typ(TCHAN)
 		t.Type = l.Type
-		t.Chan = n.Etype
+		// TODO(marvin): Fix Node.EType type union.
+		t.Chan = uint8(n.Etype)
 		n.Op = OTYPE
 		n.Type = t
 		n.Left = nil
@@ -454,9 +445,7 @@
 			return
 		}
 
-		/*
-		 * type or expr
-		 */
+	// type or expr
 	case OIND:
 		ntop := Erv | Etype
 
@@ -492,9 +481,7 @@
 		n.Type = t.Type
 		break OpSwitch
 
-		/*
-		 * arithmetic exprs
-		 */
+	// arithmetic exprs
 	case OASOP,
 		OADD,
 		OAND,
@@ -517,7 +504,7 @@
 		OSUB,
 		OXOR:
 		var l *Node
-		var op int
+		var op Op
 		var r *Node
 		if n.Op == OASOP {
 			ok |= Etop
@@ -528,7 +515,8 @@
 				n.Type = nil
 				return
 			}
-			op = int(n.Etype)
+			// TODO(marvin): Fix Node.EType type union.
+			op = Op(n.Etype)
 		} else {
 			ok |= Erv
 			l = typecheck(&n.Left, Erv|top&Eiota)
@@ -537,7 +525,7 @@
 				n.Type = nil
 				return
 			}
-			op = int(n.Op)
+			op = n.Op
 		}
 		if op == OLSH || op == ORSH {
 			defaultlit(&r, Types[TUINT])
@@ -576,11 +564,11 @@
 		if t.Etype == TIDEAL {
 			t = r.Type
 		}
-		et := int(t.Etype)
+		et := t.Etype
 		if et == TIDEAL {
 			et = TINT
 		}
-		aop := 0
+		var aop Op = OXXX
 		if iscmp[n.Op] && t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
 			// comparison is okay as long as one side is
 			// assignable to the other.  convert so they have
@@ -633,7 +621,7 @@
 			}
 
 		converted:
-			et = int(t.Etype)
+			et = t.Etype
 		}
 
 		if t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
@@ -715,7 +703,8 @@
 
 		if et == TSTRING {
 			if iscmp[n.Op] {
-				n.Etype = n.Op
+				// TODO(marvin): Fix Node.EType type union.
+				n.Etype = EType(n.Op)
 				n.Op = OCMPSTR
 			} else if n.Op == OADD {
 				// create OADDSTR node with list of strings in x + y + z + (w + v) + ...
@@ -745,7 +734,8 @@
 			} else if r.Op == OLITERAL && r.Val().Ctype() == CTNIL {
 			} else // leave alone for back end
 			if Isinter(r.Type) == Isinter(l.Type) {
-				n.Etype = n.Op
+				// TODO(marvin): Fix Node.EType type union.
+				n.Etype = EType(n.Op)
 				n.Op = OCMPIFACE
 			}
 		}
@@ -778,9 +768,7 @@
 		n.Type = t
 		break OpSwitch
 
-		/*
-		 * exprs
-		 */
+	// exprs
 	case OADDR:
 		ok |= Erv
 
@@ -1247,9 +1235,7 @@
 		}
 		break OpSwitch
 
-		/*
-		 * call and call like
-		 */
+	// call and call like
 	case OCALL:
 		l := n.Left
 
@@ -1269,12 +1255,14 @@
 		n.Diag |= n.Left.Diag
 		l = n.Left
 		if l.Op == ONAME && l.Etype != 0 {
-			if n.Isddd && l.Etype != OAPPEND {
+			// TODO(marvin): Fix Node.EType type union.
+			if n.Isddd && Op(l.Etype) != OAPPEND {
 				Yyerror("invalid use of ... with builtin %v", l)
 			}
 
 			// builtin: OLEN, OCAP, etc.
-			n.Op = l.Etype
+			// TODO(marvin): Fix Node.EType type union.
+			n.Op = Op(l.Etype)
 
 			n.Left = n.Right
 			n.Right = nil
@@ -1426,7 +1414,7 @@
 				n.Orig = r
 			}
 
-			n.Type = Types[cplxsubtype(int(t.Etype))]
+			n.Type = Types[cplxsubtype(t.Etype)]
 			break OpSwitch
 		}
 
@@ -1751,8 +1739,8 @@
 			return
 		}
 		var why string
-		n.Op = uint8(convertop(t, n.Type, &why))
-		if (n.Op) == 0 {
+		n.Op = convertop(t, n.Type, &why)
+		if n.Op == 0 {
 			if n.Diag == 0 && !n.Type.Broke {
 				Yyerror("cannot convert %v to type %v%s", Nconv(n.Left, obj.FmtLong), n.Type, why)
 				n.Diag = 1
@@ -2024,9 +2012,7 @@
 		typecheck(&n.Left, Erv)
 		break OpSwitch
 
-		/*
-		 * statements
-		 */
+	// statements
 	case OAS:
 		ok |= Etop
 
@@ -2462,7 +2448,7 @@
 }
 
 func derefall(t *Type) *Type {
-	for t != nil && int(t.Etype) == Tptr {
+	for t != nil && t.Etype == Tptr {
 		t = t.Type
 	}
 	return t
@@ -2534,20 +2520,20 @@
 		dowidth(tt)
 		rcvr := getthisx(f2.Type).Type.Type
 		if !Eqtype(rcvr, tt) {
-			if int(rcvr.Etype) == Tptr && Eqtype(rcvr.Type, tt) {
+			if rcvr.Etype == Tptr && Eqtype(rcvr.Type, tt) {
 				checklvalue(n.Left, "call pointer method on")
 				n.Left = Nod(OADDR, n.Left, nil)
 				n.Left.Implicit = true
 				typecheck(&n.Left, Etype|Erv)
-			} else if int(tt.Etype) == Tptr && int(rcvr.Etype) != Tptr && Eqtype(tt.Type, rcvr) {
+			} else if tt.Etype == Tptr && rcvr.Etype != Tptr && Eqtype(tt.Type, rcvr) {
 				n.Left = Nod(OIND, n.Left, nil)
 				n.Left.Implicit = true
 				typecheck(&n.Left, Etype|Erv)
-			} else if int(tt.Etype) == Tptr && int(tt.Type.Etype) == Tptr && Eqtype(derefall(tt), derefall(rcvr)) {
+			} else if tt.Etype == Tptr && tt.Type.Etype == Tptr && Eqtype(derefall(tt), derefall(rcvr)) {
 				Yyerror("calling method %v with receiver %v requires explicit dereference", n.Right, Nconv(n.Left, obj.FmtLong))
-				for int(tt.Etype) == Tptr {
+				for tt.Etype == Tptr {
 					// Stop one level early for method with pointer receiver.
-					if int(rcvr.Etype) == Tptr && int(tt.Type.Etype) != Tptr {
+					if rcvr.Etype == Tptr && tt.Type.Etype != Tptr {
 						break
 					}
 					n.Left = Nod(OIND, n.Left, nil)
@@ -2605,6 +2591,8 @@
 	return false
 }
 
+// downcount is the same as countfield
+// TODO decide if we want both (for semantic reasons)
 func downcount(t *Type) int {
 	n := 0
 	for tl := t.Type; tl != nil; tl = tl.Down {
@@ -2614,10 +2602,8 @@
 	return n
 }
 
-/*
- * typecheck assignment: type list = expression list
- */
-func typecheckaste(op int, call *Node, isddd bool, tstruct *Type, nl *NodeList, desc func() string) {
+// typecheck assignment: type list = expression list
+func typecheckaste(op Op, call *Node, isddd bool, tstruct *Type, nl *NodeList, desc func() string) {
 	var t *Type
 	var n *Node
 	var n1 int
@@ -2793,9 +2779,7 @@
 	goto out
 }
 
-/*
- * type check composite
- */
+// type check composite
 func fielddup(n *Node, hash map[string]bool) {
 	if n.Op != ONAME {
 		Fatalf("fielddup: not ONAME")
@@ -2937,12 +2921,12 @@
 	}
 
 	// Save original node (including n->right)
-	norig := Nod(int(n.Op), nil, nil)
+	norig := Nod(n.Op, nil, nil)
 
 	*norig = *n
 
 	setlineno(n.Right)
-	l := typecheck(&n.Right, Etype|Ecomplit) /* sic */
+	l := typecheck(&n.Right, Etype|Ecomplit) // sic
 	t := l.Type
 	if t == nil {
 		n.Type = nil
@@ -3178,9 +3162,7 @@
 	return
 }
 
-/*
- * lvalue etc
- */
+// lvalue etc
 func islvalue(n *Node) bool {
 	switch n.Op {
 	case OINDEX:
@@ -3279,11 +3261,9 @@
 	return false
 }
 
-/*
- * type check assignment.
- * if this assignment is the definition of a var on the left side,
- * fill in the var's type.
- */
+// type check assignment.
+// if this assignment is the definition of a var on the left side,
+// fill in the var's type.
 func typecheckas(n *Node) {
 	// delicate little dance.
 	// the definition of n may refer to this assignment
@@ -3452,9 +3432,7 @@
 	}
 }
 
-/*
- * type check function definition
- */
+// type check function definition
 func typecheckfunc(n *Node) {
 	typecheck(&n.Func.Nname, Erv|Easgn)
 	t := n.Func.Nname.Type
diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go
index 44a658f..8884374 100644
--- a/src/cmd/compile/internal/gc/unsafe.go
+++ b/src/cmd/compile/internal/gc/unsafe.go
@@ -6,13 +6,11 @@
 
 import "cmd/internal/obj"
 
-/*
- * look for
- *	unsafe.Sizeof
- *	unsafe.Offsetof
- *	unsafe.Alignof
- * rewrite with a constant
- */
+// look for
+//	unsafe.Sizeof
+//	unsafe.Offsetof
+//	unsafe.Alignof
+// rewrite with a constant
 func unsafenmagic(nn *Node) *Node {
 	fn := nn.Left
 	args := nn.List
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
index 6533c9a..7ed3b39 100644
--- a/src/cmd/compile/internal/gc/util.go
+++ b/src/cmd/compile/internal/gc/util.go
@@ -4,19 +4,12 @@
 	"os"
 	"runtime"
 	"runtime/pprof"
-	"strconv"
 )
 
 func (n *Node) Line() string {
 	return Ctxt.LineHist.LineString(int(n.Lineno))
 }
 
-func atoi(s string) int {
-	// NOTE: Not strconv.Atoi, accepts hex and octal prefixes.
-	n, _ := strconv.ParseInt(s, 0, 0)
-	return int(n)
-}
-
 var atExitFuncs []func()
 
 func AtExit(f func()) {
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 22d4478..6dfe969 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -237,7 +237,7 @@
 			walkprintfunc(&n.Left, &n.Ninit)
 
 		case OCOPY:
-			n.Left = copyany(n.Left, &n.Ninit, 1)
+			n.Left = copyany(n.Left, &n.Ninit, true)
 
 		default:
 			walkexpr(&n.Left, &n.Ninit)
@@ -269,7 +269,7 @@
 			walkprintfunc(&n.Left, &n.Ninit)
 
 		case OCOPY:
-			n.Left = copyany(n.Left, &n.Ninit, 1)
+			n.Left = copyany(n.Left, &n.Ninit, true)
 
 		default:
 			walkexpr(&n.Left, &n.Ninit)
@@ -288,7 +288,7 @@
 			// so that reorder3 can fix up conflicts
 			var rl *NodeList
 
-			var cl uint8
+			var cl Class
 			for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
 				cl = ll.N.Class &^ PHEAP
 				if cl == PAUTO {
@@ -313,19 +313,19 @@
 				if f.Op != OCALLFUNC && f.Op != OCALLMETH && f.Op != OCALLINTER {
 					Fatalf("expected return of call, have %v", f)
 				}
-				n.List = concat(list1(f), ascompatet(int(n.Op), rl, &f.Type, 0, &n.Ninit))
+				n.List = concat(list1(f), ascompatet(n.Op, rl, &f.Type, 0, &n.Ninit))
 				break
 			}
 
 			// move function calls out, to make reorder3's job easier.
 			walkexprlistsafe(n.List, &n.Ninit)
 
-			ll := ascompatee(int(n.Op), rl, n.List, &n.Ninit)
+			ll := ascompatee(n.Op, rl, n.List, &n.Ninit)
 			n.List = reorder3(ll)
 			break
 		}
 
-		ll := ascompatte(int(n.Op), nil, false, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
+		ll := ascompatte(n.Op, nil, false, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
 		n.List = ll
 
 	case ORETJMP:
@@ -366,13 +366,11 @@
 	return Smallintconst(l) && Smallintconst(r) && (t.Type.Width == 0 || Mpgetfix(r.Val().U.(*Mpint)) < (1<<16)/t.Type.Width)
 }
 
-/*
- * walk the whole tree of the body of an
- * expression or simple statement.
- * the types expressions are calculated.
- * compile-time constants are evaluated.
- * complex side effects like statements are appended to init
- */
+// walk the whole tree of the body of an
+// expression or simple statement.
+// the types expressions are calculated.
+// compile-time constants are evaluated.
+// complex side effects like statements are appended to init
 func walkexprlist(l *NodeList, init **NodeList) {
 	for ; l != nil; l = l.Next {
 		walkexpr(&l.N, init)
@@ -430,6 +428,7 @@
 		Fatalf("missed typecheck: %v\n", Nconv(n, obj.FmtSign))
 	}
 
+opswitch:
 	switch n.Op {
 	default:
 		Dump("walk", n)
@@ -441,7 +440,6 @@
 		OEMPTY,
 		OPARAM,
 		OGETG:
-		goto ret
 
 	case ONOT,
 		OMINUS,
@@ -452,16 +450,13 @@
 		ODOTMETH,
 		ODOTINTER:
 		walkexpr(&n.Left, init)
-		goto ret
 
 	case OIND:
 		walkexpr(&n.Left, init)
-		goto ret
 
 	case ODOT:
 		usefield(n)
 		walkexpr(&n.Left, init)
-		goto ret
 
 	case ODOTPTR:
 		usefield(n)
@@ -473,16 +468,13 @@
 		}
 
 		walkexpr(&n.Left, init)
-		goto ret
 
 	case OEFACE:
 		walkexpr(&n.Left, init)
 		walkexpr(&n.Right, init)
-		goto ret
 
 	case OSPTR, OITAB:
 		walkexpr(&n.Left, init)
-		goto ret
 
 	case OLEN, OCAP:
 		walkexpr(&n.Left, init)
@@ -500,8 +492,6 @@
 			n.Typecheck = 1
 		}
 
-		goto ret
-
 	case OLSH, ORSH:
 		walkexpr(&n.Left, init)
 		walkexpr(&n.Right, init)
@@ -510,7 +500,6 @@
 		if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) {
 			Warn("shift bounds check elided")
 		}
-		goto ret
 
 		// Use results from call expression as arguments for complex.
 	case OAND,
@@ -530,13 +519,11 @@
 
 		walkexpr(&n.Left, init)
 		walkexpr(&n.Right, init)
-		goto ret
 
 	case OOR, OXOR:
 		walkexpr(&n.Left, init)
 		walkexpr(&n.Right, init)
 		walkrotate(&n)
-		goto ret
 
 	case OEQ, ONE:
 		walkexpr(&n.Left, init)
@@ -552,7 +539,6 @@
 		safemode = 0
 		walkcompare(&n, init)
 		safemode = old_safemode
-		goto ret
 
 	case OANDAND, OOROR:
 		walkexpr(&n.Left, init)
@@ -564,45 +550,37 @@
 
 		walkexpr(&n.Right, &ll)
 		addinit(&n.Right, ll)
-		goto ret
 
 	case OPRINT, OPRINTN:
 		walkexprlist(n.List, init)
 		n = walkprint(n, init)
-		goto ret
 
 	case OPANIC:
 		n = mkcall("gopanic", nil, init, n.Left)
-		goto ret
 
 	case ORECOVER:
 		n = mkcall("gorecover", n.Type, init, Nod(OADDR, nodfp, nil))
-		goto ret
 
 	case OLITERAL:
 		n.Addable = true
-		goto ret
 
 	case OCLOSUREVAR, OCFUNC:
 		n.Addable = true
-		goto ret
 
 	case ONAME:
 		if n.Class&PHEAP == 0 && n.Class != PPARAMREF {
 			n.Addable = true
 		}
-		goto ret
 
 	case OCALLINTER:
 		t := n.Left.Type
 		if n.List != nil && n.List.N.Op == OAS {
-			goto ret
+			break
 		}
 		walkexpr(&n.Left, init)
 		walkexprlist(n.List, init)
-		ll := ascompatte(int(n.Op), n, n.Isddd, getinarg(t), n.List, 0, init)
+		ll := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
 		n.List = reorder1(ll)
-		goto ret
 
 	case OCALLFUNC:
 		if n.Left.Op == OCLOSURE {
@@ -632,7 +610,7 @@
 
 		t := n.Left.Type
 		if n.List != nil && n.List.N.Op == OAS {
-			goto ret
+			break
 		}
 
 		walkexpr(&n.Left, init)
@@ -644,28 +622,26 @@
 				n.Op = OSQRT
 				n.Left = n.List.N
 				n.List = nil
-				goto ret
+				break opswitch
 			}
 		}
 
-		ll := ascompatte(int(n.Op), n, n.Isddd, getinarg(t), n.List, 0, init)
+		ll := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
 		n.List = reorder1(ll)
-		goto ret
 
 	case OCALLMETH:
 		t := n.Left.Type
 		if n.List != nil && n.List.N.Op == OAS {
-			goto ret
+			break
 		}
 		walkexpr(&n.Left, init)
 		walkexprlist(n.List, init)
-		ll := ascompatte(int(n.Op), n, false, getthis(t), list1(n.Left.Left), 0, init)
-		lr := ascompatte(int(n.Op), n, n.Isddd, getinarg(t), n.List, 0, init)
+		ll := ascompatte(n.Op, n, false, getthis(t), list1(n.Left.Left), 0, init)
+		lr := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
 		ll = concat(ll, lr)
 		n.Left.Left = nil
 		ullmancalc(n.Left)
 		n.List = reorder1(ll)
-		goto ret
 
 	case OAS:
 		*init = concat(*init, n.Ninit)
@@ -675,11 +651,11 @@
 		n.Left = safeexpr(n.Left, init)
 
 		if oaslit(n, init) {
-			goto ret
+			break
 		}
 
-		if n.Right == nil || iszero(n.Right) && flag_race == 0 {
-			goto ret
+		if n.Right == nil || iszero(n.Right) && !instrumenting {
+			break
 		}
 
 		switch n.Right.Op {
@@ -690,7 +666,7 @@
 			// TODO(rsc): The Isfat is for consistency with componentgen and orderexpr.
 			// It needs to be removed in all three places.
 			// That would allow inlining x.(struct{*int}) the same as x.(*int).
-			if isdirectiface(n.Right.Type) && !Isfat(n.Right.Type) && flag_race == 0 {
+			if isdirectiface(n.Right.Type) && !Isfat(n.Right.Type) && !instrumenting {
 				// handled directly during cgen
 				walkexpr(&n.Right, init)
 				break
@@ -713,7 +689,7 @@
 
 			n = mkcall1(fn, nil, init, typename(r.Type), r.Left, n1)
 			walkexpr(&n, init)
-			goto ret
+			break opswitch
 
 		case ORECV:
 			// x = <-c; n.Left is x, n.Right.Left is c.
@@ -724,7 +700,7 @@
 			r := n.Right.Left // the channel
 			n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, typename(r.Type), r, n1)
 			walkexpr(&n, init)
-			goto ret
+			break opswitch
 
 		case OAPPEND:
 			// x = append(...)
@@ -738,7 +714,7 @@
 			if r.Op == OAPPEND {
 				// Left in place for back end.
 				// Do not add a new write barrier.
-				goto ret
+				break opswitch
 			}
 			// Otherwise, lowered for race detector.
 			// Treat as ordinary assignment.
@@ -751,8 +727,6 @@
 			n = applywritebarrier(n, init)
 		}
 
-		goto ret
-
 	case OAS2:
 		*init = concat(*init, n.Ninit)
 		n.Ninit = nil
@@ -764,7 +738,6 @@
 			lr.N = applywritebarrier(lr.N, init)
 		}
 		n = liststmt(ll)
-		goto ret
 
 		// a,b,... = fn()
 	case OAS2FUNC:
@@ -775,12 +748,11 @@
 		walkexprlistsafe(n.List, init)
 		walkexpr(&r, init)
 
-		ll := ascompatet(int(n.Op), n.List, &r.Type, 0, init)
+		ll := ascompatet(n.Op, n.List, &r.Type, 0, init)
 		for lr := ll; lr != nil; lr = lr.Next {
 			lr.N = applywritebarrier(lr.N, init)
 		}
 		n = liststmt(concat(list1(r), ll))
-		goto ret
 
 		// x, y = <-c
 	// orderstmt made sure x is addressable.
@@ -802,7 +774,6 @@
 		r = mkcall1(fn, n.List.Next.N.Type, init, typename(r.Left.Type), r.Left, n1)
 		n = Nod(OAS, n.List.Next.N, r)
 		typecheck(&n, Etop)
-		goto ret
 
 		// a,b = m[i];
 	case OAS2MAPR:
@@ -873,7 +844,6 @@
 		walkexpr(&n, init)
 
 		// TODO: ptr is always non-nil, so disable nil check for this OIND op.
-		goto ret
 
 	case ODELETE:
 		*init = concat(*init, n.Ninit)
@@ -888,18 +858,17 @@
 
 		t := map_.Type
 		n = mkcall1(mapfndel("mapdelete", t), nil, init, typename(t), map_, key)
-		goto ret
 
 	case OAS2DOTTYPE:
 		e := n.Rlist.N // i.(T)
 		// TODO(rsc): The Isfat is for consistency with componentgen and orderexpr.
 		// It needs to be removed in all three places.
 		// That would allow inlining x.(struct{*int}) the same as x.(*int).
-		if isdirectiface(e.Type) && !Isfat(e.Type) && flag_race == 0 {
+		if isdirectiface(e.Type) && !Isfat(e.Type) && !instrumenting {
 			// handled directly during gen.
 			walkexprlistsafe(n.List, init)
 			walkexpr(&e.Left, init)
-			goto ret
+			break
 		}
 
 		// res, ok = i.(T)
@@ -943,7 +912,7 @@
 				}
 				n = Nod(OAS, ok, fast)
 				typecheck(&n, Etop)
-				goto ret
+				break
 			}
 		}
 
@@ -964,14 +933,12 @@
 		call := mkcall1(fn, oktype, init, typename(t), from, resptr)
 		n = Nod(OAS, ok, call)
 		typecheck(&n, Etop)
-		goto ret
 
 	case ODOTTYPE, ODOTTYPE2:
 		if !isdirectiface(n.Type) || Isfat(n.Type) {
 			Fatalf("walkexpr ODOTTYPE") // should see inside OAS only
 		}
 		walkexpr(&n.Left, init)
-		goto ret
 
 	case OCONVIFACE:
 		walkexpr(&n.Left, init)
@@ -982,7 +949,7 @@
 			l.Type = n.Type
 			l.Typecheck = n.Typecheck
 			n = l
-			goto ret
+			break
 		}
 
 		// Build name of function: convI2E etc.
@@ -1015,16 +982,15 @@
 			ll = list(ll, l)
 
 			if isdirectiface(n.Left.Type) {
-				/* For pointer types, we can make a special form of optimization
-				 *
-				 * These statements are put onto the expression init list:
-				 * 	Itab *tab = atomicloadtype(&cache);
-				 * 	if(tab == nil)
-				 * 		tab = typ2Itab(type, itype, &cache);
-				 *
-				 * The CONVIFACE expression is replaced with this:
-				 * 	OEFACE{tab, ptr};
-				 */
+				// For pointer types, we can make a special form of optimization
+				//
+				// These statements are put onto the expression init list:
+				// 	Itab *tab = atomicloadtype(&cache);
+				// 	if(tab == nil)
+				// 		tab = typ2Itab(type, itype, &cache);
+				//
+				// The CONVIFACE expression is replaced with this:
+				// 	OEFACE{tab, ptr};
 				l := temp(Ptrto(Types[TUINT8]))
 
 				n1 := Nod(OAS, l, sym.Def)
@@ -1048,7 +1014,7 @@
 				l.Typecheck = n.Typecheck
 				l.Type = n.Type
 				n = l
-				goto ret
+				break
 			}
 		}
 
@@ -1090,37 +1056,35 @@
 		n.List = ll
 		typecheck(&n, Erv)
 		walkexpr(&n, init)
-		goto ret
 
 	case OCONV, OCONVNOP:
 		if Thearch.Thechar == '5' {
 			if Isfloat[n.Left.Type.Etype] {
 				if n.Type.Etype == TINT64 {
 					n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
-					goto ret
+					break
 				}
 
 				if n.Type.Etype == TUINT64 {
 					n = mkcall("float64touint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
-					goto ret
+					break
 				}
 			}
 
 			if Isfloat[n.Type.Etype] {
 				if n.Left.Type.Etype == TINT64 {
 					n = mkcall("int64tofloat64", n.Type, init, conv(n.Left, Types[TINT64]))
-					goto ret
+					break
 				}
 
 				if n.Left.Type.Etype == TUINT64 {
 					n = mkcall("uint64tofloat64", n.Type, init, conv(n.Left, Types[TUINT64]))
-					goto ret
+					break
 				}
 			}
 		}
 
 		walkexpr(&n.Left, init)
-		goto ret
 
 	case OANDNOT:
 		walkexpr(&n.Left, init)
@@ -1128,46 +1092,40 @@
 		n.Right = Nod(OCOM, n.Right, nil)
 		typecheck(&n.Right, Erv)
 		walkexpr(&n.Right, init)
-		goto ret
 
 	case OMUL:
 		walkexpr(&n.Left, init)
 		walkexpr(&n.Right, init)
 		walkmul(&n, init)
-		goto ret
 
 	case ODIV, OMOD:
 		walkexpr(&n.Left, init)
 		walkexpr(&n.Right, init)
 
-		/*
-		 * rewrite complex div into function call.
-		 */
-		et := int(n.Left.Type.Etype)
+		// rewrite complex div into function call.
+		et := n.Left.Type.Etype
 
 		if Iscomplex[et] && n.Op == ODIV {
 			t := n.Type
 			n = mkcall("complex128div", Types[TCOMPLEX128], init, conv(n.Left, Types[TCOMPLEX128]), conv(n.Right, Types[TCOMPLEX128]))
 			n = conv(n, t)
-			goto ret
+			break
 		}
 
 		// Nothing to do for float divisions.
 		if Isfloat[et] {
-			goto ret
+			break
 		}
 
 		// Try rewriting as shifts or magic multiplies.
 		walkdiv(&n, init)
 
-		/*
-		 * rewrite 64-bit div and mod into function calls
-		 * on 32-bit architectures.
-		 */
+		// rewrite 64-bit div and mod into function calls
+		// on 32-bit architectures.
 		switch n.Op {
 		case OMOD, ODIV:
 			if Widthreg >= 8 || (et != TUINT64 && et != TINT64) {
-				goto ret
+				break opswitch
 			}
 			var fn string
 			if et == TINT64 {
@@ -1181,13 +1139,8 @@
 				fn += "mod"
 			}
 			n = mkcall(fn, n.Type, init, conv(n.Left, Types[et]), conv(n.Right, Types[et]))
-
-		default:
-			break
 		}
 
-		goto ret
-
 	case OINDEX:
 		walkexpr(&n.Left, init)
 
@@ -1200,7 +1153,7 @@
 		// if range of type cannot exceed static array bound,
 		// disable bounds check.
 		if n.Bounded {
-			goto ret
+			break
 		}
 		t := n.Left.Type
 		if t != nil && Isptr[t.Etype] {
@@ -1239,11 +1192,10 @@
 				Yyerror("index out of bounds")
 			}
 		}
-		goto ret
 
 	case OINDEXMAP:
 		if n.Etype == 1 {
-			goto ret
+			break
 		}
 		walkexpr(&n.Left, init)
 		walkexpr(&n.Right, init)
@@ -1280,8 +1232,6 @@
 		n.Type = t.Type
 		n.Typecheck = 1
 
-		goto ret
-
 	case ORECV:
 		Fatalf("walkexpr ORECV") // should see inside OAS only
 
@@ -1294,7 +1244,6 @@
 		}
 		walkexpr(&n.Right.Right, init)
 		n = reduceSlice(n)
-		goto ret
 
 	case OSLICE3, OSLICE3ARR:
 		walkexpr(&n.Left, init)
@@ -1316,13 +1265,10 @@
 				n.Op = OSLICEARR
 			}
 			n = reduceSlice(n)
-			goto ret
 		}
-		goto ret
 
 	case OADDR:
 		walkexpr(&n.Left, init)
-		goto ret
 
 	case ONEW:
 		if n.Esc == EscNone {
@@ -1340,33 +1286,34 @@
 			n = callnew(n.Type.Type)
 		}
 
-		goto ret
-
 		// If one argument to the comparison is an empty string,
 	// comparing the lengths instead will yield the same result
 	// without the function call.
 	case OCMPSTR:
 		if (Isconst(n.Left, CTSTR) && len(n.Left.Val().U.(string)) == 0) || (Isconst(n.Right, CTSTR) && len(n.Right.Val().U.(string)) == 0) {
-			r := Nod(int(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
+			// TODO(marvin): Fix Node.EType type union.
+			r := Nod(Op(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
 			typecheck(&r, Erv)
 			walkexpr(&r, init)
 			r.Type = n.Type
 			n = r
-			goto ret
+			break
 		}
 
 		// s + "badgerbadgerbadger" == "badgerbadgerbadger"
-		if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && strlit(n.Right) == strlit(n.Left.List.Next.N) {
-			r := Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
+		if (Op(n.Etype) == OEQ || Op(n.Etype) == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && strlit(n.Right) == strlit(n.Left.List.Next.N) {
+			// TODO(marvin): Fix Node.EType type union.
+			r := Nod(Op(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
 			typecheck(&r, Erv)
 			walkexpr(&r, init)
 			r.Type = n.Type
 			n = r
-			goto ret
+			break
 		}
 
 		var r *Node
-		if n.Etype == OEQ || n.Etype == ONE {
+		// TODO(marvin): Fix Node.EType type union.
+		if Op(n.Etype) == OEQ || Op(n.Etype) == ONE {
 			// prepare for rewrite below
 			n.Left = cheapexpr(n.Left, init)
 
@@ -1376,7 +1323,8 @@
 
 			// quick check of len before full compare for == or !=
 			// eqstring assumes that the lengths are equal
-			if n.Etype == OEQ {
+			// TODO(marvin): Fix Node.EType type union.
+			if Op(n.Etype) == OEQ {
 				// len(left) == len(right) && eqstring(left, right)
 				r = Nod(OANDAND, Nod(OEQ, Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil)), r)
 			} else {
@@ -1392,7 +1340,8 @@
 			// sys_cmpstring(s1, s2) :: 0
 			r = mkcall("cmpstring", Types[TINT], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
 
-			r = Nod(int(n.Etype), r, Nodintconst(0))
+			// TODO(marvin): Fix Node.EType type union.
+			r = Nod(Op(n.Etype), r, Nodintconst(0))
 		}
 
 		typecheck(&r, Erv)
@@ -1401,19 +1350,16 @@
 		}
 		r.Type = n.Type
 		n = r
-		goto ret
 
 	case OADDSTR:
 		n = addstr(n, init)
-		goto ret
 
 	case OAPPEND:
 		// order should make sure we only see OAS(node, OAPPEND), which we handle above.
 		Fatalf("append outside assignment")
 
 	case OCOPY:
-		n = copyany(n, init, flag_race)
-		goto ret
+		n = copyany(n, init, instrumenting)
 
 		// cannot use chanfn - closechan takes any, not chan any
 	case OCLOSE:
@@ -1421,11 +1367,9 @@
 
 		substArgTypes(fn, n.Left.Type)
 		n = mkcall1(fn, nil, init, n.Left)
-		goto ret
 
 	case OMAKECHAN:
 		n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]))
-		goto ret
 
 	case OMAKEMAP:
 		t := n.Type
@@ -1456,7 +1400,6 @@
 
 		substArgTypes(fn, hmap(t), mapbucket(t), t.Down, t.Type)
 		n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]), a, r)
-		goto ret
 
 	case OMAKESLICE:
 		l := n.Left
@@ -1490,8 +1433,6 @@
 			n = mkcall1(fn, n.Type, init, typename(n.Type), conv(l, Types[TINT64]), conv(r, Types[TINT64]))
 		}
 
-		goto ret
-
 	case ORUNESTR:
 		a := nodnil()
 		if n.Esc == EscNone {
@@ -1503,8 +1444,6 @@
 		// intstring(*[4]byte, rune)
 		n = mkcall("intstring", n.Type, init, a, conv(n.Left, Types[TINT64]))
 
-		goto ret
-
 	case OARRAYBYTESTR:
 		a := nodnil()
 		if n.Esc == EscNone {
@@ -1517,14 +1456,10 @@
 		// slicebytetostring(*[32]byte, []byte) string;
 		n = mkcall("slicebytetostring", n.Type, init, a, n.Left)
 
-		goto ret
-
 		// slicebytetostringtmp([]byte) string;
 	case OARRAYBYTESTRTMP:
 		n = mkcall("slicebytetostringtmp", n.Type, init, n.Left)
 
-		goto ret
-
 		// slicerunetostring(*[32]byte, []rune) string;
 	case OARRAYRUNESTR:
 		a := nodnil()
@@ -1537,7 +1472,6 @@
 		}
 
 		n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
-		goto ret
 
 		// stringtoslicebyte(*32[byte], string) []byte;
 	case OSTRARRAYBYTE:
@@ -1551,14 +1485,11 @@
 		}
 
 		n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, Types[TSTRING]))
-		goto ret
 
 		// stringtoslicebytetmp(string) []byte;
 	case OSTRARRAYBYTETMP:
 		n = mkcall("stringtoslicebytetmp", n.Type, init, conv(n.Left, Types[TSTRING]))
 
-		goto ret
-
 		// stringtoslicerune(*[32]rune, string) []rune
 	case OSTRARRAYRUNE:
 		a := nodnil()
@@ -1571,7 +1502,6 @@
 		}
 
 		n = mkcall("stringtoslicerune", n.Type, init, a, n.Left)
-		goto ret
 
 		// ifaceeq(i1 any-1, i2 any-2) (ret bool);
 	case OCMPIFACE:
@@ -1589,12 +1519,14 @@
 		n.Left = cheapexpr(n.Left, init)
 		substArgTypes(fn, n.Right.Type, n.Left.Type)
 		r := mkcall1(fn, n.Type, init, n.Left, n.Right)
-		if n.Etype == ONE {
+		// TODO(marvin): Fix Node.EType type union.
+		if Op(n.Etype) == ONE {
 			r = Nod(ONOT, r, nil)
 		}
 
 		// check itable/type before full compare.
-		if n.Etype == OEQ {
+		// TODO(marvin): Fix Node.EType type union.
+		if Op(n.Etype) == OEQ {
 			r = Nod(OANDAND, Nod(OEQ, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
 		} else {
 			r = Nod(OOROR, Nod(ONE, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
@@ -1603,13 +1535,11 @@
 		walkexpr(&r, init)
 		r.Type = n.Type
 		n = r
-		goto ret
 
 	case OARRAYLIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
 		var_ := temp(n.Type)
 		anylit(0, n, var_, init)
 		n = var_
-		goto ret
 
 	case OSEND:
 		n1 := n.Right
@@ -1617,25 +1547,19 @@
 		walkexpr(&n1, init)
 		n1 = Nod(OADDR, n1, nil)
 		n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, typename(n.Left.Type), n.Left, n1)
-		goto ret
 
 	case OCLOSURE:
 		n = walkclosure(n, init)
-		goto ret
 
 	case OCALLPART:
 		n = walkpartialcall(n, init)
-		goto ret
 	}
 
-	Fatalf("missing switch %v", Oconv(int(n.Op), 0))
-
 	// Expressions that are constant at run time but not
 	// considered const by the language spec are not turned into
 	// constants until walk. For example, if n is y%1 == 0, the
 	// walk of y%1 may have replaced it by 0.
 	// Check whether n with its updated args is itself now a constant.
-ret:
 	t := n.Type
 
 	evconst(n)
@@ -1670,7 +1594,7 @@
 	return n
 }
 
-func ascompatee1(op int, l *Node, r *Node, init **NodeList) *Node {
+func ascompatee1(op Op, l *Node, r *Node, init **NodeList) *Node {
 	// convas will turn map assigns into function calls,
 	// making it impossible for reorder3 to work.
 	n := Nod(OAS, l, r)
@@ -1682,12 +1606,10 @@
 	return convas(n, init)
 }
 
-func ascompatee(op int, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
-	/*
-	 * check assign expression list to
-	 * a expression list. called in
-	 *	expr-list = expr-list
-	 */
+func ascompatee(op Op, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
+	// check assign expression list to
+	// a expression list. called in
+	//	expr-list = expr-list
 
 	// ensure order of evaluation for function calls
 	for ll := nl; ll != nil; ll = ll.Next {
@@ -1715,12 +1637,10 @@
 	return nn
 }
 
-/*
- * l is an lv and rt is the type of an rv
- * return 1 if this implies a function call
- * evaluating the lv or a function call
- * in the conversion of the types
- */
+// l is an lv and rt is the type of an rv
+// return 1 if this implies a function call
+// evaluating the lv or a function call
+// in the conversion of the types
 func fncall(l *Node, rt *Type) bool {
 	if l.Ullman >= UINF || l.Op == OINDEXMAP {
 		return true
@@ -1735,18 +1655,16 @@
 	return true
 }
 
-func ascompatet(op int, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
+func ascompatet(op Op, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
 	var l *Node
 	var tmp *Node
 	var a *Node
 	var ll *NodeList
 	var saver Iter
 
-	/*
-	 * check assign type list to
-	 * a expression list. called in
-	 *	expr-list = func()
-	 */
+	// check assign type list to
+	// a expression list. called in
+	//	expr-list = func()
 	r := Structfirst(&saver, nr)
 
 	var nn *NodeList
@@ -1796,9 +1714,7 @@
 	return concat(nn, mm)
 }
 
-/*
-* package all the arguments that match a ... T parameter into a []T.
- */
+// package all the arguments that match a ... T parameter into a []T.
 func mkdotargslice(lr0 *NodeList, nn *NodeList, l *Type, fp int, init **NodeList, ddd *Node) *NodeList {
 	esc := uint16(EscUnknown)
 	if ddd != nil {
@@ -1832,9 +1748,7 @@
 	return nn
 }
 
-/*
- * helpers for shape errors
- */
+// helpers for shape errors
 func dumptypes(nl **Type, what string) string {
 	var savel Iter
 
@@ -1878,13 +1792,11 @@
 	return fmt_
 }
 
-/*
- * check assign expression list to
- * a type list. called in
- *	return expr-list
- *	func(expr-list)
- */
-func ascompatte(op int, call *Node, isddd bool, nl **Type, lr *NodeList, fp int, init **NodeList) *NodeList {
+// check assign expression list to
+// a type list. called in
+//	return expr-list
+//	func(expr-list)
+func ascompatte(op Op, call *Node, isddd bool, nl **Type, lr *NodeList, fp int, init **NodeList) *NodeList {
 	var savel Iter
 
 	lr0 := lr
@@ -1997,9 +1909,9 @@
 	var n *Node
 	var on *Node
 	var t *Type
-	var et int
+	var et EType
 
-	op := int(nn.Op)
+	op := nn.Op
 	all := nn.List
 	var calls *NodeList
 	notfirst := false
@@ -2040,7 +1952,7 @@
 		}
 
 		t = n.Type
-		et = int(n.Type.Etype)
+		et = n.Type.Etype
 		if Isinter(n.Type) {
 			if isnilinter(n.Type) {
 				on = syslook("printeface", 1)
@@ -2286,14 +2198,12 @@
 	return n
 }
 
-/*
- * from ascompat[te]
- * evaluating actual function arguments.
- *	f(a,b)
- * if there is exactly one function expr,
- * then it is done first. otherwise must
- * make temp variables
- */
+// from ascompat[te]
+// evaluating actual function arguments.
+//	f(a,b)
+// if there is exactly one function expr,
+// then it is done first. otherwise must
+// make temp variables
 func reorder1(all *NodeList) *NodeList {
 	var n *Node
 
@@ -2350,14 +2260,12 @@
 	return concat(g, r)
 }
 
-/*
- * from ascompat[ee]
- *	a,b = c,d
- * simultaneous assignment. there cannot
- * be later use of an earlier lvalue.
- *
- * function calls have been removed.
- */
+// from ascompat[ee]
+//	a,b = c,d
+// simultaneous assignment. there cannot
+// be later use of an earlier lvalue.
+//
+// function calls have been removed.
 func reorder3(all *NodeList) *NodeList {
 	var l *Node
 
@@ -2413,12 +2321,10 @@
 	return concat(early, all)
 }
 
-/*
- * if the evaluation of *np would be affected by the
- * assignments in all up to but not including stop,
- * copy into a temporary during *early and
- * replace *np with that temp.
- */
+// if the evaluation of *np would be affected by the
+// assignments in all up to but not including stop,
+// copy into a temporary during *early and
+// replace *np with that temp.
 func reorder3save(np **Node, all *NodeList, stop *NodeList, early **NodeList) {
 	n := *np
 	if !aliased(n, all, stop) {
@@ -2432,10 +2338,8 @@
 	*np = q.Left
 }
 
-/*
- * what's the outer value that a write to n affects?
- * outer value means containing struct or array.
- */
+// what's the outer value that a write to n affects?
+// outer value means containing struct or array.
 func outervalue(n *Node) *Node {
 	for {
 		if n.Op == OXDOT {
@@ -2457,10 +2361,8 @@
 	return n
 }
 
-/*
- * Is it possible that the computation of n might be
- * affected by writes in as up to but not including stop?
- */
+// Is it possible that the computation of n might be
+// affected by writes in as up to but not including stop?
 func aliased(n *Node, all *NodeList, stop *NodeList) bool {
 	if n == nil {
 		return false
@@ -2521,11 +2423,9 @@
 	return true
 }
 
-/*
- * does the evaluation of n only refer to variables
- * whose addresses have not been taken?
- * (and no other memory)
- */
+// does the evaluation of n only refer to variables
+// whose addresses have not been taken?
+// (and no other memory)
 func varexpr(n *Node) bool {
 	if n == nil {
 		return true
@@ -2574,9 +2474,7 @@
 	return false
 }
 
-/*
- * is the name l mentioned in r?
- */
+// is the name l mentioned in r?
 func vmatch2(l *Node, r *Node) bool {
 	if r == nil {
 		return false
@@ -2604,14 +2502,10 @@
 	return false
 }
 
-/*
- * is any name mentioned in l also mentioned in r?
- * called by sinit.go
- */
+// is any name mentioned in l also mentioned in r?
+// called by sinit.go
 func vmatch1(l *Node, r *Node) bool {
-	/*
-	 * isolate all left sides
-	 */
+	// isolate all left sides
 	if l == nil || r == nil {
 		return false
 	}
@@ -2649,11 +2543,9 @@
 	return false
 }
 
-/*
- * walk through argin parameters.
- * generate and return code to allocate
- * copies of escaped parameters to the heap.
- */
+// walk through argin parameters.
+// generate and return code to allocate
+// copies of escaped parameters to the heap.
 func paramstoheap(argin **Type, out int) *NodeList {
 	var savet Iter
 	var v *Node
@@ -2699,9 +2591,7 @@
 	return nn
 }
 
-/*
- * walk through argout parameters copying back to stack
- */
+// walk through argout parameters copying back to stack
 func returnsfromheap(argin **Type) *NodeList {
 	var savet Iter
 	var v *Node
@@ -2718,11 +2608,9 @@
 	return nn
 }
 
-/*
- * take care of migrating any function in/out args
- * between the stack and the heap.  adds code to
- * curfn's before and after lists.
- */
+// take care of migrating any function in/out args
+// between the stack and the heap.  adds code to
+// curfn's before and after lists.
 func heapmoves() {
 	lno := lineno
 	lineno = Curfn.Lineno
@@ -2938,7 +2826,7 @@
 		substArgTypes(fn, l1.Type, l2.Type)
 		nt := mkcall1(fn, Types[TINT], &l, typename(l1.Type.Type), nptr1, nptr2)
 		l = list(l, nt)
-	} else if flag_race != 0 {
+	} else if instrumenting {
 		// rely on runtime to instrument copy.
 		// copy(s[len(l1):len(l1)+len(l2)], l2)
 		nptr1 := Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
@@ -3037,8 +2925,8 @@
 	}
 
 	// General case, with no function calls left as arguments.
-	// Leave for gen, except that race detector requires old form
-	if flag_race == 0 {
+	// Leave for gen, except that instrumentation requires old form.
+	if !instrumenting {
 		return n
 	}
 
@@ -3091,13 +2979,13 @@
 //
 // Also works if b is a string.
 //
-func copyany(n *Node, init **NodeList, runtimecall int) *Node {
+func copyany(n *Node, init **NodeList, runtimecall bool) *Node {
 	if haspointers(n.Left.Type.Type) {
 		fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
 		return mkcall1(fn, n.Type, init, typename(n.Left.Type.Type), n.Left, n.Right)
 	}
 
-	if runtimecall != 0 {
+	if runtimecall {
 		var fn *Node
 		if n.Right.Type.Etype == TSTRING {
 			fn = syslook("slicestringcopy", 1)
@@ -3281,7 +3169,7 @@
 	typecheck(&a, Etop)
 	*init = list(*init, a)
 
-	andor := OANDAND
+	var andor Op = OANDAND
 	if n.Op == ONE {
 		andor = OOROR
 	}
@@ -3295,7 +3183,7 @@
 		for i := 0; int64(i) < t.Bound; i++ {
 			li = Nod(OINDEX, l, Nodintconst(int64(i)))
 			ri = Nod(OINDEX, r, Nodintconst(int64(i)))
-			a = Nod(int(n.Op), li, ri)
+			a = Nod(n.Op, li, ri)
 			if expr == nil {
 				expr = a
 			} else {
@@ -3321,7 +3209,7 @@
 			}
 			li = Nod(OXDOT, l, newname(t1.Sym))
 			ri = Nod(OXDOT, r, newname(t1.Sym))
-			a = Nod(int(n.Op), li, ri)
+			a = Nod(n.Op, li, ri)
 			if expr == nil {
 				expr = a
 			} else {
@@ -3402,7 +3290,7 @@
 }
 
 func walkrotate(np **Node) {
-	if Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+	if Thearch.Thechar == '0' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
 		return
 	}
 
@@ -3455,9 +3343,7 @@
 	return
 }
 
-/*
- * walkmul rewrites integer multiplication by powers of two as shifts.
- */
+// walkmul rewrites integer multiplication by powers of two as shifts.
 func walkmul(np **Node, init **NodeList) {
 	n := *np
 	if !Isint[n.Type.Etype] {
@@ -3526,15 +3412,13 @@
 	*np = n
 }
 
-/*
- * walkdiv rewrites division by a constant as less expensive
- * operations.
- */
+// walkdiv rewrites division by a constant as less expensive
+// operations.
 func walkdiv(np **Node, init **NodeList) {
 	// if >= 0, nr is 1<<pow // 1 if nr is negative.
 
 	// TODO(minux)
-	if Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+	if Thearch.Thechar == '0' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
 		return
 	}
 
@@ -4040,7 +3924,7 @@
 	Curfn = nil
 	funchdr(fn)
 
-	a = Nod(int(n.Op), nil, nil)
+	a = Nod(n.Op, nil, nil)
 	a.List = printargs
 	typecheck(&a, Etop)
 	walkstmt(&a)
diff --git a/src/cmd/compile/internal/gc/y.go b/src/cmd/compile/internal/gc/y.go
index dfb0fa4..c400830 100644
--- a/src/cmd/compile/internal/gc/y.go
+++ b/src/cmd/compile/internal/gc/y.go
@@ -154,7 +154,7 @@
 const yyErrCode = 2
 const yyMaxDepth = 200
 
-//line go.y:2308
+//line go.y:2317
 func fixlbrace(lbr int) {
 	// If the opening brace was an LBODY,
 	// set up for another one now that we're done.
@@ -1291,6 +1291,7 @@
 				break
 			}
 			if my.Name == "init" {
+				lineno = int32(yyDollar[1].i)
 				Yyerror("cannot import package as init - init must be a func")
 				break
 			}
@@ -1307,7 +1308,7 @@
 		}
 	case 12:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:272
+		//line go.y:273
 		{
 			// When an invalid import path is passed to importfile,
 			// it calls Yyerror and then sets up a fake import with
@@ -1319,7 +1320,7 @@
 		}
 	case 15:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:288
+		//line go.y:289
 		{
 			// import with original name
 			yyVAL.i = parserline()
@@ -1328,7 +1329,7 @@
 		}
 	case 16:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:295
+		//line go.y:296
 		{
 			// import with given name
 			yyVAL.i = parserline()
@@ -1337,7 +1338,7 @@
 		}
 	case 17:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:302
+		//line go.y:303
 		{
 			// import into my name space
 			yyVAL.i = parserline()
@@ -1346,7 +1347,7 @@
 		}
 	case 18:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:311
+		//line go.y:312
 		{
 			if importpkg.Name == "" {
 				importpkg.Name = yyDollar[2].sym.Name
@@ -1354,7 +1355,9 @@
 			} else if importpkg.Name != yyDollar[2].sym.Name {
 				Yyerror("conflicting names %s and %s for package %q", importpkg.Name, yyDollar[2].sym.Name, importpkg.Path)
 			}
-			importpkg.Direct = true
+			if incannedimport == 0 {
+				importpkg.Direct = true
+			}
 			importpkg.Safe = curio.importsafe
 
 			if safemode != 0 && !curio.importsafe {
@@ -1363,7 +1366,7 @@
 		}
 	case 20:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:328
+		//line go.y:331
 		{
 			if yyDollar[1].sym.Name == "safe" {
 				curio.importsafe = true
@@ -1371,64 +1374,64 @@
 		}
 	case 21:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:335
+		//line go.y:338
 		{
 			defercheckwidth()
 		}
 	case 22:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:339
+		//line go.y:342
 		{
 			resumecheckwidth()
 			unimportfile()
 		}
 	case 23:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:348
+		//line go.y:351
 		{
 			Yyerror("empty top-level declaration")
 			yyVAL.list = nil
 		}
 	case 25:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:354
+		//line go.y:357
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 26:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:358
+		//line go.y:361
 		{
 			Yyerror("non-declaration statement outside function body")
 			yyVAL.list = nil
 		}
 	case 27:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:363
+		//line go.y:366
 		{
 			yyVAL.list = nil
 		}
 	case 28:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:369
+		//line go.y:372
 		{
 			yyVAL.list = yyDollar[2].list
 		}
 	case 29:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:373
+		//line go.y:376
 		{
 			yyVAL.list = yyDollar[3].list
 		}
 	case 30:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:377
+		//line go.y:380
 		{
 			yyVAL.list = nil
 		}
 	case 31:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:381
+		//line go.y:384
 		{
 			yyVAL.list = yyDollar[2].list
 			iota_ = -100000
@@ -1436,7 +1439,7 @@
 		}
 	case 32:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:387
+		//line go.y:390
 		{
 			yyVAL.list = yyDollar[3].list
 			iota_ = -100000
@@ -1444,7 +1447,7 @@
 		}
 	case 33:
 		yyDollar = yyS[yypt-7 : yypt+1]
-		//line go.y:393
+		//line go.y:396
 		{
 			yyVAL.list = concat(yyDollar[3].list, yyDollar[5].list)
 			iota_ = -100000
@@ -1452,80 +1455,80 @@
 		}
 	case 34:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:399
+		//line go.y:402
 		{
 			yyVAL.list = nil
 			iota_ = -100000
 		}
 	case 35:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:404
+		//line go.y:407
 		{
 			yyVAL.list = list1(yyDollar[2].node)
 		}
 	case 36:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:408
+		//line go.y:411
 		{
 			yyVAL.list = yyDollar[3].list
 		}
 	case 37:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:412
+		//line go.y:415
 		{
 			yyVAL.list = nil
 		}
 	case 38:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:418
+		//line go.y:421
 		{
 			iota_ = 0
 		}
 	case 39:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:424
+		//line go.y:427
 		{
 			yyVAL.list = variter(yyDollar[1].list, yyDollar[2].node, nil)
 		}
 	case 40:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:428
+		//line go.y:431
 		{
 			yyVAL.list = variter(yyDollar[1].list, yyDollar[2].node, yyDollar[4].list)
 		}
 	case 41:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:432
+		//line go.y:435
 		{
 			yyVAL.list = variter(yyDollar[1].list, nil, yyDollar[3].list)
 		}
 	case 42:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:438
+		//line go.y:441
 		{
 			yyVAL.list = constiter(yyDollar[1].list, yyDollar[2].node, yyDollar[4].list)
 		}
 	case 43:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:442
+		//line go.y:445
 		{
 			yyVAL.list = constiter(yyDollar[1].list, nil, yyDollar[3].list)
 		}
 	case 45:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:449
+		//line go.y:452
 		{
 			yyVAL.list = constiter(yyDollar[1].list, yyDollar[2].node, nil)
 		}
 	case 46:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:453
+		//line go.y:456
 		{
 			yyVAL.list = constiter(yyDollar[1].list, nil, nil)
 		}
 	case 47:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:459
+		//line go.y:462
 		{
 			// different from dclname because the name
 			// becomes visible right here, not at the end
@@ -1534,13 +1537,13 @@
 		}
 	case 48:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:468
+		//line go.y:471
 		{
 			yyVAL.node = typedcl1(yyDollar[1].node, yyDollar[2].node, true)
 		}
 	case 49:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:474
+		//line go.y:477
 		{
 			yyVAL.node = yyDollar[1].node
 
@@ -1556,14 +1559,14 @@
 		}
 	case 50:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:488
+		//line go.y:491
 		{
 			yyVAL.node = Nod(OASOP, yyDollar[1].node, yyDollar[3].node)
-			yyVAL.node.Etype = uint8(yyDollar[2].i) // rathole to pass opcode
+			yyVAL.node.Etype = EType(yyDollar[2].i) // rathole to pass opcode
 		}
 	case 51:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:493
+		//line go.y:496
 		{
 			if yyDollar[1].list.Next == nil && yyDollar[3].list.Next == nil {
 				// simple
@@ -1577,7 +1580,7 @@
 		}
 	case 52:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:505
+		//line go.y:508
 		{
 			if yyDollar[3].list.N.Op == OTYPESW {
 				yyVAL.node = Nod(OTYPESW, nil, yyDollar[3].list.N.Right)
@@ -1586,7 +1589,7 @@
 				}
 				if yyDollar[1].list.Next != nil {
 					Yyerror("argument count mismatch: %d = %d", count(yyDollar[1].list), 1)
-				} else if (yyDollar[1].list.N.Op != ONAME && yyDollar[1].list.N.Op != OTYPE && yyDollar[1].list.N.Op != ONONAME) || isblank(yyDollar[1].list.N) {
+				} else if (yyDollar[1].list.N.Op != ONAME && yyDollar[1].list.N.Op != OTYPE && yyDollar[1].list.N.Op != ONONAME && (yyDollar[1].list.N.Op != OLITERAL || yyDollar[1].list.N.Name == nil)) || isblank(yyDollar[1].list.N) {
 					Yyerror("invalid variable name %s in type switch", yyDollar[1].list.N)
 				} else {
 					yyVAL.node.Left = dclname(yyDollar[1].list.N.Sym)
@@ -1597,23 +1600,25 @@
 		}
 	case 53:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:523
+		//line go.y:526
 		{
 			yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
 			yyVAL.node.Implicit = true
-			yyVAL.node.Etype = OADD
+			// TODO(marvin): Fix Node.EType type union.
+			yyVAL.node.Etype = EType(OADD)
 		}
 	case 54:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:529
+		//line go.y:533
 		{
 			yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
 			yyVAL.node.Implicit = true
-			yyVAL.node.Etype = OSUB
+			// TODO(marvin): Fix Node.EType type union.
+			yyVAL.node.Etype = EType(OSUB)
 		}
 	case 55:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:537
+		//line go.y:542
 		{
 			var n, nn *Node
 
@@ -1638,7 +1643,7 @@
 		}
 	case 56:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:560
+		//line go.y:565
 		{
 			var n *Node
 
@@ -1658,7 +1663,7 @@
 		}
 	case 57:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:578
+		//line go.y:583
 		{
 			// will be converted to OCASE
 			// right will point to next case
@@ -1669,7 +1674,7 @@
 		}
 	case 58:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:587
+		//line go.y:592
 		{
 			var n, nn *Node
 
@@ -1690,13 +1695,13 @@
 		}
 	case 59:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:608
+		//line go.y:613
 		{
 			markdcl()
 		}
 	case 60:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:612
+		//line go.y:617
 		{
 			if yyDollar[3].list == nil {
 				yyVAL.node = Nod(OEMPTY, nil, nil)
@@ -1707,7 +1712,7 @@
 		}
 	case 61:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:623
+		//line go.y:628
 		{
 			// If the last token read by the lexer was consumed
 			// as part of the case, clear it (parser has cleared yychar).
@@ -1720,7 +1725,7 @@
 		}
 	case 62:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:634
+		//line go.y:639
 		{
 			// This is the only place in the language where a statement
 			// list is not allowed to drop the final semicolon, because
@@ -1740,32 +1745,32 @@
 		}
 	case 63:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:653
+		//line go.y:658
 		{
 			yyVAL.list = nil
 		}
 	case 64:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:657
+		//line go.y:662
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[2].node)
 		}
 	case 65:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:663
+		//line go.y:668
 		{
 			markdcl()
 		}
 	case 66:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:667
+		//line go.y:672
 		{
 			yyVAL.list = yyDollar[3].list
 			popdcl()
 		}
 	case 67:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:674
+		//line go.y:679
 		{
 			yyVAL.node = Nod(ORANGE, nil, yyDollar[4].node)
 			yyVAL.node.List = yyDollar[1].list
@@ -1773,7 +1778,7 @@
 		}
 	case 68:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:680
+		//line go.y:685
 		{
 			yyVAL.node = Nod(ORANGE, nil, yyDollar[4].node)
 			yyVAL.node.List = yyDollar[1].list
@@ -1782,14 +1787,14 @@
 		}
 	case 69:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:687
+		//line go.y:692
 		{
 			yyVAL.node = Nod(ORANGE, nil, yyDollar[2].node)
 			yyVAL.node.Etype = 0 // := flag
 		}
 	case 70:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:694
+		//line go.y:699
 		{
 			// init ; test ; incr
 			if yyDollar[5].node != nil && yyDollar[5].node.Colas {
@@ -1804,7 +1809,7 @@
 		}
 	case 71:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:707
+		//line go.y:712
 		{
 			// normal test
 			yyVAL.node = Nod(OFOR, nil, nil)
@@ -1812,27 +1817,27 @@
 		}
 	case 73:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:716
+		//line go.y:721
 		{
 			yyVAL.node = yyDollar[1].node
 			yyVAL.node.Nbody = concat(yyVAL.node.Nbody, yyDollar[2].list)
 		}
 	case 74:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:723
+		//line go.y:728
 		{
 			markdcl()
 		}
 	case 75:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:727
+		//line go.y:732
 		{
 			yyVAL.node = yyDollar[3].node
 			popdcl()
 		}
 	case 76:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:734
+		//line go.y:739
 		{
 			// test
 			yyVAL.node = Nod(OIF, nil, nil)
@@ -1840,7 +1845,7 @@
 		}
 	case 77:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:740
+		//line go.y:745
 		{
 			// init ; test
 			yyVAL.node = Nod(OIF, nil, nil)
@@ -1851,13 +1856,13 @@
 		}
 	case 78:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:752
+		//line go.y:757
 		{
 			markdcl()
 		}
 	case 79:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:756
+		//line go.y:761
 		{
 			if yyDollar[3].node.Left == nil {
 				Yyerror("missing condition in if statement")
@@ -1865,13 +1870,13 @@
 		}
 	case 80:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:762
+		//line go.y:767
 		{
 			yyDollar[3].node.Nbody = yyDollar[5].list
 		}
 	case 81:
 		yyDollar = yyS[yypt-8 : yypt+1]
-		//line go.y:766
+		//line go.y:771
 		{
 			var n *Node
 			var nn *NodeList
@@ -1889,13 +1894,13 @@
 		}
 	case 82:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:784
+		//line go.y:789
 		{
 			markdcl()
 		}
 	case 83:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:788
+		//line go.y:793
 		{
 			if yyDollar[4].node.Left == nil {
 				Yyerror("missing condition in if statement")
@@ -1905,25 +1910,25 @@
 		}
 	case 84:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:797
+		//line go.y:802
 		{
 			yyVAL.list = nil
 		}
 	case 85:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:801
+		//line go.y:806
 		{
 			yyVAL.list = concat(yyDollar[1].list, yyDollar[2].list)
 		}
 	case 86:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:806
+		//line go.y:811
 		{
 			yyVAL.list = nil
 		}
 	case 87:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:810
+		//line go.y:815
 		{
 			l := &NodeList{N: yyDollar[2].node}
 			l.End = l
@@ -1931,13 +1936,13 @@
 		}
 	case 88:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:818
+		//line go.y:823
 		{
 			markdcl()
 		}
 	case 89:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:822
+		//line go.y:827
 		{
 			var n *Node
 			n = yyDollar[3].node.Left
@@ -1948,7 +1953,7 @@
 		}
 	case 90:
 		yyDollar = yyS[yypt-7 : yypt+1]
-		//line go.y:831
+		//line go.y:836
 		{
 			yyVAL.node = yyDollar[3].node
 			yyVAL.node.Op = OSWITCH
@@ -1958,13 +1963,13 @@
 		}
 	case 91:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:841
+		//line go.y:846
 		{
 			typesw = Nod(OXXX, typesw, nil)
 		}
 	case 92:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:845
+		//line go.y:850
 		{
 			yyVAL.node = Nod(OSELECT, nil, nil)
 			yyVAL.node.Lineno = typesw.Lineno
@@ -1973,133 +1978,133 @@
 		}
 	case 94:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:858
+		//line go.y:863
 		{
 			yyVAL.node = Nod(OOROR, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 95:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:862
+		//line go.y:867
 		{
 			yyVAL.node = Nod(OANDAND, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 96:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:866
+		//line go.y:871
 		{
 			yyVAL.node = Nod(OEQ, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 97:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:870
+		//line go.y:875
 		{
 			yyVAL.node = Nod(ONE, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 98:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:874
+		//line go.y:879
 		{
 			yyVAL.node = Nod(OLT, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 99:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:878
+		//line go.y:883
 		{
 			yyVAL.node = Nod(OLE, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 100:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:882
+		//line go.y:887
 		{
 			yyVAL.node = Nod(OGE, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 101:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:886
+		//line go.y:891
 		{
 			yyVAL.node = Nod(OGT, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 102:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:890
+		//line go.y:895
 		{
 			yyVAL.node = Nod(OADD, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 103:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:894
+		//line go.y:899
 		{
 			yyVAL.node = Nod(OSUB, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 104:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:898
+		//line go.y:903
 		{
 			yyVAL.node = Nod(OOR, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 105:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:902
+		//line go.y:907
 		{
 			yyVAL.node = Nod(OXOR, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 106:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:906
+		//line go.y:911
 		{
 			yyVAL.node = Nod(OMUL, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 107:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:910
+		//line go.y:915
 		{
 			yyVAL.node = Nod(ODIV, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 108:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:914
+		//line go.y:919
 		{
 			yyVAL.node = Nod(OMOD, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 109:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:918
+		//line go.y:923
 		{
 			yyVAL.node = Nod(OAND, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 110:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:922
+		//line go.y:927
 		{
 			yyVAL.node = Nod(OANDNOT, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 111:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:926
+		//line go.y:931
 		{
 			yyVAL.node = Nod(OLSH, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 112:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:930
+		//line go.y:935
 		{
 			yyVAL.node = Nod(ORSH, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 113:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:935
+		//line go.y:940
 		{
 			yyVAL.node = Nod(OSEND, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 115:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:942
+		//line go.y:947
 		{
 			yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
 		}
 	case 116:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:946
+		//line go.y:951
 		{
 			if yyDollar[2].node.Op == OCOMPLIT {
 				// Special case for &T{...}: turn into (*T){...}.
@@ -2112,57 +2117,57 @@
 		}
 	case 117:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:957
+		//line go.y:962
 		{
 			yyVAL.node = Nod(OPLUS, yyDollar[2].node, nil)
 		}
 	case 118:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:961
+		//line go.y:966
 		{
 			yyVAL.node = Nod(OMINUS, yyDollar[2].node, nil)
 		}
 	case 119:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:965
+		//line go.y:970
 		{
 			yyVAL.node = Nod(ONOT, yyDollar[2].node, nil)
 		}
 	case 120:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:969
+		//line go.y:974
 		{
 			Yyerror("the bitwise complement operator is ^")
 			yyVAL.node = Nod(OCOM, yyDollar[2].node, nil)
 		}
 	case 121:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:974
+		//line go.y:979
 		{
 			yyVAL.node = Nod(OCOM, yyDollar[2].node, nil)
 		}
 	case 122:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:978
+		//line go.y:983
 		{
 			yyVAL.node = Nod(ORECV, yyDollar[2].node, nil)
 		}
 	case 123:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:988
+		//line go.y:993
 		{
 			yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
 		}
 	case 124:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:992
+		//line go.y:997
 		{
 			yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
 			yyVAL.node.List = yyDollar[3].list
 		}
 	case 125:
 		yyDollar = yyS[yypt-6 : yypt+1]
-		//line go.y:997
+		//line go.y:1002
 		{
 			yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
 			yyVAL.node.List = yyDollar[3].list
@@ -2170,13 +2175,13 @@
 		}
 	case 126:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1005
+		//line go.y:1010
 		{
 			yyVAL.node = nodlit(yyDollar[1].val)
 		}
 	case 128:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1010
+		//line go.y:1015
 		{
 			if yyDollar[1].node.Op == OPACK {
 				var s *Sym
@@ -2189,31 +2194,31 @@
 		}
 	case 129:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1021
+		//line go.y:1026
 		{
 			yyVAL.node = Nod(ODOTTYPE, yyDollar[1].node, yyDollar[4].node)
 		}
 	case 130:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1025
+		//line go.y:1030
 		{
 			yyVAL.node = Nod(OTYPESW, nil, yyDollar[1].node)
 		}
 	case 131:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1029
+		//line go.y:1034
 		{
 			yyVAL.node = Nod(OINDEX, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 132:
 		yyDollar = yyS[yypt-6 : yypt+1]
-		//line go.y:1033
+		//line go.y:1038
 		{
 			yyVAL.node = Nod(OSLICE, yyDollar[1].node, Nod(OKEY, yyDollar[3].node, yyDollar[5].node))
 		}
 	case 133:
 		yyDollar = yyS[yypt-8 : yypt+1]
-		//line go.y:1037
+		//line go.y:1042
 		{
 			if yyDollar[5].node == nil {
 				Yyerror("middle index required in 3-index slice")
@@ -2225,7 +2230,7 @@
 		}
 	case 135:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1048
+		//line go.y:1053
 		{
 			// conversion
 			yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
@@ -2233,7 +2238,7 @@
 		}
 	case 136:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1054
+		//line go.y:1059
 		{
 			yyVAL.node = yyDollar[3].node
 			yyVAL.node.Right = yyDollar[1].node
@@ -2242,7 +2247,7 @@
 		}
 	case 137:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1061
+		//line go.y:1066
 		{
 			yyVAL.node = yyDollar[3].node
 			yyVAL.node.Right = yyDollar[1].node
@@ -2250,7 +2255,7 @@
 		}
 	case 138:
 		yyDollar = yyS[yypt-7 : yypt+1]
-		//line go.y:1067
+		//line go.y:1072
 		{
 			Yyerror("cannot parenthesize type in composite literal")
 			yyVAL.node = yyDollar[5].node
@@ -2259,7 +2264,7 @@
 		}
 	case 140:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1076
+		//line go.y:1081
 		{
 			// composite expression.
 			// make node early so we get the right line number.
@@ -2267,13 +2272,13 @@
 		}
 	case 141:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1084
+		//line go.y:1089
 		{
 			yyVAL.node = Nod(OKEY, yyDollar[1].node, yyDollar[3].node)
 		}
 	case 142:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1090
+		//line go.y:1095
 		{
 			// These nodes do not carry line numbers.
 			// Since a composite literal commonly spans several lines,
@@ -2288,21 +2293,21 @@
 		}
 	case 143:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1103
+		//line go.y:1108
 		{
 			yyVAL.node = yyDollar[2].node
 			yyVAL.node.List = yyDollar[3].list
 		}
 	case 145:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1111
+		//line go.y:1116
 		{
 			yyVAL.node = yyDollar[2].node
 			yyVAL.node.List = yyDollar[3].list
 		}
 	case 147:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1119
+		//line go.y:1124
 		{
 			yyVAL.node = yyDollar[2].node
 
@@ -2316,19 +2321,19 @@
 		}
 	case 151:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1140
+		//line go.y:1145
 		{
 			yyVAL.i = LBODY
 		}
 	case 152:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1144
+		//line go.y:1149
 		{
 			yyVAL.i = '{'
 		}
 	case 153:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1155
+		//line go.y:1160
 		{
 			if yyDollar[1].sym == nil {
 				yyVAL.node = nil
@@ -2338,19 +2343,19 @@
 		}
 	case 154:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1165
+		//line go.y:1170
 		{
 			yyVAL.node = dclname(yyDollar[1].sym)
 		}
 	case 155:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1170
+		//line go.y:1175
 		{
 			yyVAL.node = nil
 		}
 	case 157:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1177
+		//line go.y:1182
 		{
 			yyVAL.sym = yyDollar[1].sym
 			// during imports, unqualified non-exported identifiers are from builtinpkg
@@ -2360,13 +2365,13 @@
 		}
 	case 159:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1186
+		//line go.y:1191
 		{
 			yyVAL.sym = nil
 		}
 	case 160:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1192
+		//line go.y:1197
 		{
 			var p *Pkg
 
@@ -2382,7 +2387,7 @@
 		}
 	case 161:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1206
+		//line go.y:1211
 		{
 			var p *Pkg
 
@@ -2398,7 +2403,7 @@
 		}
 	case 162:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1222
+		//line go.y:1227
 		{
 			yyVAL.node = oldname(yyDollar[1].sym)
 			if yyVAL.node.Name != nil && yyVAL.node.Name.Pack != nil {
@@ -2407,38 +2412,38 @@
 		}
 	case 164:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1243
+		//line go.y:1248
 		{
 			Yyerror("final argument in variadic function missing type")
 			yyVAL.node = Nod(ODDD, typenod(typ(TINTER)), nil)
 		}
 	case 165:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1248
+		//line go.y:1253
 		{
 			yyVAL.node = Nod(ODDD, yyDollar[2].node, nil)
 		}
 	case 171:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1259
+		//line go.y:1264
 		{
 			yyVAL.node = yyDollar[2].node
 		}
 	case 175:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1268
+		//line go.y:1273
 		{
 			yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
 		}
 	case 180:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1278
+		//line go.y:1283
 		{
 			yyVAL.node = yyDollar[2].node
 		}
 	case 190:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1299
+		//line go.y:1304
 		{
 			if yyDollar[1].node.Op == OPACK {
 				var s *Sym
@@ -2451,53 +2456,53 @@
 		}
 	case 191:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1312
+		//line go.y:1317
 		{
 			yyVAL.node = Nod(OTARRAY, yyDollar[2].node, yyDollar[4].node)
 		}
 	case 192:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1316
+		//line go.y:1321
 		{
 			// array literal of nelem
 			yyVAL.node = Nod(OTARRAY, Nod(ODDD, nil, nil), yyDollar[4].node)
 		}
 	case 193:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1321
+		//line go.y:1326
 		{
 			yyVAL.node = Nod(OTCHAN, yyDollar[2].node, nil)
 			yyVAL.node.Etype = Cboth
 		}
 	case 194:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1326
+		//line go.y:1331
 		{
 			yyVAL.node = Nod(OTCHAN, yyDollar[3].node, nil)
 			yyVAL.node.Etype = Csend
 		}
 	case 195:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1331
+		//line go.y:1336
 		{
 			yyVAL.node = Nod(OTMAP, yyDollar[3].node, yyDollar[5].node)
 		}
 	case 198:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1339
+		//line go.y:1344
 		{
 			yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
 		}
 	case 199:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1345
+		//line go.y:1350
 		{
 			yyVAL.node = Nod(OTCHAN, yyDollar[3].node, nil)
 			yyVAL.node.Etype = Crecv
 		}
 	case 200:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1352
+		//line go.y:1357
 		{
 			yyVAL.node = Nod(OTSTRUCT, nil, nil)
 			yyVAL.node.List = yyDollar[3].list
@@ -2505,14 +2510,14 @@
 		}
 	case 201:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1358
+		//line go.y:1363
 		{
 			yyVAL.node = Nod(OTSTRUCT, nil, nil)
 			fixlbrace(yyDollar[2].i)
 		}
 	case 202:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1365
+		//line go.y:1370
 		{
 			yyVAL.node = Nod(OTINTER, nil, nil)
 			yyVAL.node.List = yyDollar[3].list
@@ -2520,14 +2525,14 @@
 		}
 	case 203:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1371
+		//line go.y:1376
 		{
 			yyVAL.node = Nod(OTINTER, nil, nil)
 			fixlbrace(yyDollar[2].i)
 		}
 	case 204:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1382
+		//line go.y:1387
 		{
 			yyVAL.node = yyDollar[2].node
 			if yyVAL.node == nil {
@@ -2541,13 +2546,15 @@
 			yyVAL.node.Noescape = noescape
 			yyVAL.node.Func.Norace = norace
 			yyVAL.node.Func.Nosplit = nosplit
+			yyVAL.node.Func.Noinline = noinline
 			yyVAL.node.Func.Nowritebarrier = nowritebarrier
+			yyVAL.node.Func.Nowritebarrierrec = nowritebarrierrec
 			yyVAL.node.Func.Systemstack = systemstack
 			funcbody(yyVAL.node)
 		}
 	case 205:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1402
+		//line go.y:1409
 		{
 			var t *Node
 
@@ -2580,7 +2587,7 @@
 		}
 	case 206:
 		yyDollar = yyS[yypt-8 : yypt+1]
-		//line go.y:1433
+		//line go.y:1440
 		{
 			var rcvr, t *Node
 
@@ -2618,7 +2625,7 @@
 		}
 	case 207:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1471
+		//line go.y:1478
 		{
 			var s *Sym
 			var t *Type
@@ -2645,7 +2652,7 @@
 		}
 	case 208:
 		yyDollar = yyS[yypt-8 : yypt+1]
-		//line go.y:1496
+		//line go.y:1503
 		{
 			yyVAL.node = methodname1(newname(yyDollar[4].sym), yyDollar[2].list.N.Right)
 			yyVAL.node.Type = functype(yyDollar[2].list.N, yyDollar[6].list, yyDollar[8].list)
@@ -2663,7 +2670,7 @@
 		}
 	case 209:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1514
+		//line go.y:1521
 		{
 			yyDollar[3].list = checkarglist(yyDollar[3].list, 1)
 			yyVAL.node = Nod(OTFUNC, nil, nil)
@@ -2672,13 +2679,13 @@
 		}
 	case 210:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1522
+		//line go.y:1529
 		{
 			yyVAL.list = nil
 		}
 	case 211:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1526
+		//line go.y:1533
 		{
 			yyVAL.list = yyDollar[2].list
 			if yyVAL.list == nil {
@@ -2687,108 +2694,110 @@
 		}
 	case 212:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1535
+		//line go.y:1542
 		{
 			yyVAL.list = nil
 		}
 	case 213:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1539
+		//line go.y:1546
 		{
 			yyVAL.list = list1(Nod(ODCLFIELD, nil, yyDollar[1].node))
 		}
 	case 214:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1543
+		//line go.y:1550
 		{
 			yyDollar[2].list = checkarglist(yyDollar[2].list, 0)
 			yyVAL.list = yyDollar[2].list
 		}
 	case 215:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1550
+		//line go.y:1557
 		{
 			closurehdr(yyDollar[1].node)
 		}
 	case 216:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1556
+		//line go.y:1563
 		{
 			yyVAL.node = closurebody(yyDollar[3].list)
 			fixlbrace(yyDollar[2].i)
 		}
 	case 217:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1561
+		//line go.y:1568
 		{
 			yyVAL.node = closurebody(nil)
 		}
 	case 218:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1572
+		//line go.y:1579
 		{
 			yyVAL.list = nil
 		}
 	case 219:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1576
+		//line go.y:1583
 		{
 			yyVAL.list = concat(yyDollar[1].list, yyDollar[2].list)
 			if nsyntaxerrors == 0 {
 				testdclstack()
 			}
-			nointerface = false
 			noescape = false
+			noinline = false
+			nointerface = false
 			norace = false
 			nosplit = false
 			nowritebarrier = false
+			nowritebarrierrec = false
 			systemstack = false
 		}
 	case 221:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1592
+		//line go.y:1601
 		{
 			yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
 		}
 	case 223:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1599
+		//line go.y:1608
 		{
 			yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
 		}
 	case 224:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1605
+		//line go.y:1614
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 225:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1609
+		//line go.y:1618
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 227:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1616
+		//line go.y:1625
 		{
 			yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
 		}
 	case 228:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1622
+		//line go.y:1631
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 229:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1626
+		//line go.y:1635
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 230:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1632
+		//line go.y:1641
 		{
 			var l *NodeList
 
@@ -2814,14 +2823,14 @@
 		}
 	case 231:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1656
+		//line go.y:1665
 		{
 			yyDollar[1].node.SetVal(yyDollar[2].val)
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 232:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1661
+		//line go.y:1670
 		{
 			yyDollar[2].node.SetVal(yyDollar[4].val)
 			yyVAL.list = list1(yyDollar[2].node)
@@ -2829,7 +2838,7 @@
 		}
 	case 233:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1667
+		//line go.y:1676
 		{
 			yyDollar[2].node.Right = Nod(OIND, yyDollar[2].node.Right, nil)
 			yyDollar[2].node.SetVal(yyDollar[3].val)
@@ -2837,7 +2846,7 @@
 		}
 	case 234:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1673
+		//line go.y:1682
 		{
 			yyDollar[3].node.Right = Nod(OIND, yyDollar[3].node.Right, nil)
 			yyDollar[3].node.SetVal(yyDollar[5].val)
@@ -2846,7 +2855,7 @@
 		}
 	case 235:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:1680
+		//line go.y:1689
 		{
 			yyDollar[3].node.Right = Nod(OIND, yyDollar[3].node.Right, nil)
 			yyDollar[3].node.SetVal(yyDollar[5].val)
@@ -2855,7 +2864,7 @@
 		}
 	case 236:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1689
+		//line go.y:1698
 		{
 			var n *Node
 
@@ -2867,7 +2876,7 @@
 		}
 	case 237:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1699
+		//line go.y:1708
 		{
 			var pkg *Pkg
 
@@ -2882,33 +2891,33 @@
 		}
 	case 238:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1714
+		//line go.y:1723
 		{
 			yyVAL.node = embedded(yyDollar[1].sym, localpkg)
 		}
 	case 239:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1720
+		//line go.y:1729
 		{
 			yyVAL.node = Nod(ODCLFIELD, yyDollar[1].node, yyDollar[2].node)
 			ifacedcl(yyVAL.node)
 		}
 	case 240:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1725
+		//line go.y:1734
 		{
 			yyVAL.node = Nod(ODCLFIELD, nil, oldname(yyDollar[1].sym))
 		}
 	case 241:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1729
+		//line go.y:1738
 		{
 			yyVAL.node = Nod(ODCLFIELD, nil, oldname(yyDollar[2].sym))
 			Yyerror("cannot parenthesize embedded type")
 		}
 	case 242:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1736
+		//line go.y:1745
 		{
 			// without func keyword
 			yyDollar[2].list = checkarglist(yyDollar[2].list, 1)
@@ -2918,7 +2927,7 @@
 		}
 	case 244:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1750
+		//line go.y:1759
 		{
 			yyVAL.node = Nod(ONONAME, nil, nil)
 			yyVAL.node.Sym = yyDollar[1].sym
@@ -2926,7 +2935,7 @@
 		}
 	case 245:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1756
+		//line go.y:1765
 		{
 			yyVAL.node = Nod(ONONAME, nil, nil)
 			yyVAL.node.Sym = yyDollar[1].sym
@@ -2934,56 +2943,56 @@
 		}
 	case 247:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1765
+		//line go.y:1774
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 248:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1769
+		//line go.y:1778
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 249:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1774
+		//line go.y:1783
 		{
 			yyVAL.list = nil
 		}
 	case 250:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1778
+		//line go.y:1787
 		{
 			yyVAL.list = yyDollar[1].list
 		}
 	case 251:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1786
+		//line go.y:1795
 		{
 			yyVAL.node = nil
 		}
 	case 253:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1791
+		//line go.y:1800
 		{
 			yyVAL.node = liststmt(yyDollar[1].list)
 		}
 	case 255:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1796
+		//line go.y:1805
 		{
 			yyVAL.node = nil
 		}
 	case 261:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1807
+		//line go.y:1816
 		{
 			yyDollar[1].node = Nod(OLABEL, yyDollar[1].node, nil)
 			yyDollar[1].node.Sym = dclstack // context, for goto restrictions
 		}
 	case 262:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:1812
+		//line go.y:1821
 		{
 			var l *NodeList
 
@@ -2996,7 +3005,7 @@
 		}
 	case 263:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1823
+		//line go.y:1832
 		{
 			// will be converted to OFALL
 			yyVAL.node = Nod(OXFALL, nil, nil)
@@ -3004,38 +3013,38 @@
 		}
 	case 264:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1829
+		//line go.y:1838
 		{
 			yyVAL.node = Nod(OBREAK, yyDollar[2].node, nil)
 		}
 	case 265:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1833
+		//line go.y:1842
 		{
 			yyVAL.node = Nod(OCONTINUE, yyDollar[2].node, nil)
 		}
 	case 266:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1837
+		//line go.y:1846
 		{
 			yyVAL.node = Nod(OPROC, yyDollar[2].node, nil)
 		}
 	case 267:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1841
+		//line go.y:1850
 		{
 			yyVAL.node = Nod(ODEFER, yyDollar[2].node, nil)
 		}
 	case 268:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1845
+		//line go.y:1854
 		{
 			yyVAL.node = Nod(OGOTO, yyDollar[2].node, nil)
 			yyVAL.node.Sym = dclstack // context, for goto restrictions
 		}
 	case 269:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1850
+		//line go.y:1859
 		{
 			yyVAL.node = Nod(ORETURN, nil, nil)
 			yyVAL.node.List = yyDollar[2].list
@@ -3057,7 +3066,7 @@
 		}
 	case 270:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1872
+		//line go.y:1881
 		{
 			yyVAL.list = nil
 			if yyDollar[1].node != nil {
@@ -3066,7 +3075,7 @@
 		}
 	case 271:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1879
+		//line go.y:1888
 		{
 			yyVAL.list = yyDollar[1].list
 			if yyDollar[3].node != nil {
@@ -3075,163 +3084,163 @@
 		}
 	case 272:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1888
+		//line go.y:1897
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 273:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1892
+		//line go.y:1901
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 274:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1898
+		//line go.y:1907
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 275:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1902
+		//line go.y:1911
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 276:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1908
+		//line go.y:1917
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 277:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1912
+		//line go.y:1921
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 278:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1918
+		//line go.y:1927
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 279:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1922
+		//line go.y:1931
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 280:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1931
+		//line go.y:1940
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 281:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:1935
+		//line go.y:1944
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 282:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1939
+		//line go.y:1948
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 283:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:1943
+		//line go.y:1952
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 284:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1948
+		//line go.y:1957
 		{
 			yyVAL.list = nil
 		}
 	case 285:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:1952
+		//line go.y:1961
 		{
 			yyVAL.list = yyDollar[1].list
 		}
 	case 290:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1966
+		//line go.y:1975
 		{
 			yyVAL.node = nil
 		}
 	case 292:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1972
+		//line go.y:1981
 		{
 			yyVAL.list = nil
 		}
 	case 294:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1978
+		//line go.y:1987
 		{
 			yyVAL.node = nil
 		}
 	case 296:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1984
+		//line go.y:1993
 		{
 			yyVAL.list = nil
 		}
 	case 298:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1990
+		//line go.y:1999
 		{
 			yyVAL.list = nil
 		}
 	case 300:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:1996
+		//line go.y:2005
 		{
 			yyVAL.list = nil
 		}
 	case 302:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:2002
+		//line go.y:2011
 		{
 			yyVAL.val.U = nil
 		}
 	case 304:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:2012
+		//line go.y:2021
 		{
 			importimport(yyDollar[2].sym, yyDollar[3].val.U.(string))
 		}
 	case 305:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:2016
+		//line go.y:2025
 		{
 			importvar(yyDollar[2].sym, yyDollar[3].typ)
 		}
 	case 306:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:2020
+		//line go.y:2029
 		{
 			importconst(yyDollar[2].sym, Types[TIDEAL], yyDollar[4].node)
 		}
 	case 307:
 		yyDollar = yyS[yypt-6 : yypt+1]
-		//line go.y:2024
+		//line go.y:2033
 		{
 			importconst(yyDollar[2].sym, yyDollar[3].typ, yyDollar[5].node)
 		}
 	case 308:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:2028
+		//line go.y:2037
 		{
 			importtype(yyDollar[2].typ, yyDollar[3].typ)
 		}
 	case 309:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:2032
+		//line go.y:2041
 		{
 			if yyDollar[2].node == nil {
 				dclcontext = PEXTERN // since we skip the funcbody below
@@ -3252,27 +3261,27 @@
 		}
 	case 310:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2053
+		//line go.y:2062
 		{
 			yyVAL.sym = yyDollar[1].sym
 			structpkg = yyVAL.sym.Pkg
 		}
 	case 311:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2060
+		//line go.y:2069
 		{
 			yyVAL.typ = pkgtype(yyDollar[1].sym)
 			importsym(yyDollar[1].sym, OTYPE)
 		}
 	case 317:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2080
+		//line go.y:2089
 		{
 			yyVAL.typ = pkgtype(yyDollar[1].sym)
 		}
 	case 318:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2084
+		//line go.y:2093
 		{
 			// predefined name like uint8
 			yyDollar[1].sym = Pkglookup(yyDollar[1].sym.Name, builtinpkg)
@@ -3285,43 +3294,43 @@
 		}
 	case 319:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:2095
+		//line go.y:2104
 		{
 			yyVAL.typ = aindex(nil, yyDollar[3].typ)
 		}
 	case 320:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:2099
+		//line go.y:2108
 		{
 			yyVAL.typ = aindex(nodlit(yyDollar[2].val), yyDollar[4].typ)
 		}
 	case 321:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:2103
+		//line go.y:2112
 		{
 			yyVAL.typ = maptype(yyDollar[3].typ, yyDollar[5].typ)
 		}
 	case 322:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:2107
+		//line go.y:2116
 		{
 			yyVAL.typ = tostruct(yyDollar[3].list)
 		}
 	case 323:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:2111
+		//line go.y:2120
 		{
 			yyVAL.typ = tointerface(yyDollar[3].list)
 		}
 	case 324:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:2115
+		//line go.y:2124
 		{
 			yyVAL.typ = Ptrto(yyDollar[2].typ)
 		}
 	case 325:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:2119
+		//line go.y:2128
 		{
 			yyVAL.typ = typ(TCHAN)
 			yyVAL.typ.Type = yyDollar[2].typ
@@ -3329,7 +3338,7 @@
 		}
 	case 326:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:2125
+		//line go.y:2134
 		{
 			yyVAL.typ = typ(TCHAN)
 			yyVAL.typ.Type = yyDollar[3].typ
@@ -3337,7 +3346,7 @@
 		}
 	case 327:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:2131
+		//line go.y:2140
 		{
 			yyVAL.typ = typ(TCHAN)
 			yyVAL.typ.Type = yyDollar[3].typ
@@ -3345,7 +3354,7 @@
 		}
 	case 328:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:2139
+		//line go.y:2148
 		{
 			yyVAL.typ = typ(TCHAN)
 			yyVAL.typ.Type = yyDollar[3].typ
@@ -3353,13 +3362,13 @@
 		}
 	case 329:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:2147
+		//line go.y:2156
 		{
 			yyVAL.typ = functype(nil, yyDollar[3].list, yyDollar[5].list)
 		}
 	case 330:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:2153
+		//line go.y:2162
 		{
 			yyVAL.node = Nod(ODCLFIELD, nil, typenod(yyDollar[2].typ))
 			if yyDollar[1].sym != nil {
@@ -3369,7 +3378,7 @@
 		}
 	case 331:
 		yyDollar = yyS[yypt-4 : yypt+1]
-		//line go.y:2161
+		//line go.y:2170
 		{
 			var t *Type
 
@@ -3386,7 +3395,7 @@
 		}
 	case 332:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:2178
+		//line go.y:2187
 		{
 			var s *Sym
 			var p *Pkg
@@ -3410,43 +3419,43 @@
 		}
 	case 333:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:2202
+		//line go.y:2211
 		{
 			yyVAL.node = Nod(ODCLFIELD, newname(yyDollar[1].sym), typenod(functype(fakethis(), yyDollar[3].list, yyDollar[5].list)))
 		}
 	case 334:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2206
+		//line go.y:2215
 		{
 			yyVAL.node = Nod(ODCLFIELD, nil, typenod(yyDollar[1].typ))
 		}
 	case 335:
 		yyDollar = yyS[yypt-0 : yypt+1]
-		//line go.y:2211
+		//line go.y:2220
 		{
 			yyVAL.list = nil
 		}
 	case 337:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:2218
+		//line go.y:2227
 		{
 			yyVAL.list = yyDollar[2].list
 		}
 	case 338:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2222
+		//line go.y:2231
 		{
 			yyVAL.list = list1(Nod(ODCLFIELD, nil, typenod(yyDollar[1].typ)))
 		}
 	case 339:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2232
+		//line go.y:2241
 		{
 			yyVAL.node = nodlit(yyDollar[1].val)
 		}
 	case 340:
 		yyDollar = yyS[yypt-2 : yypt+1]
-		//line go.y:2236
+		//line go.y:2245
 		{
 			yyVAL.node = nodlit(yyDollar[2].val)
 			switch yyVAL.node.Val().Ctype() {
@@ -3466,7 +3475,7 @@
 		}
 	case 341:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2254
+		//line go.y:2263
 		{
 			yyVAL.node = oldname(Pkglookup(yyDollar[1].sym.Name, builtinpkg))
 			if yyVAL.node.Op != OLITERAL {
@@ -3475,7 +3484,7 @@
 		}
 	case 343:
 		yyDollar = yyS[yypt-5 : yypt+1]
-		//line go.y:2264
+		//line go.y:2273
 		{
 			if yyDollar[2].node.Val().Ctype() == CTRUNE && yyDollar[4].node.Val().Ctype() == CTINT {
 				yyVAL.node = yyDollar[2].node
@@ -3488,37 +3497,37 @@
 		}
 	case 346:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2280
+		//line go.y:2289
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 347:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:2284
+		//line go.y:2293
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 348:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2290
+		//line go.y:2299
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 349:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:2294
+		//line go.y:2303
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
 	case 350:
 		yyDollar = yyS[yypt-1 : yypt+1]
-		//line go.y:2300
+		//line go.y:2309
 		{
 			yyVAL.list = list1(yyDollar[1].node)
 		}
 	case 351:
 		yyDollar = yyS[yypt-3 : yypt+1]
-		//line go.y:2304
+		//line go.y:2313
 		{
 			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
 		}
diff --git a/src/cmd/compile/internal/mips64/cgen.go b/src/cmd/compile/internal/mips64/cgen.go
index 4f3092c..434bfc7 100644
--- a/src/cmd/compile/internal/mips64/cgen.go
+++ b/src/cmd/compile/internal/mips64/cgen.go
@@ -2,12 +2,12 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package ppc64
+package mips64
 
 import (
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
-	"cmd/internal/obj/ppc64"
+	"cmd/internal/obj/mips"
 )
 
 func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
@@ -23,16 +23,16 @@
 		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
 
 	case 1:
-		op = ppc64.AMOVBU
+		op = mips.AMOVB
 
 	case 2:
-		op = ppc64.AMOVHU
+		op = mips.AMOVH
 
 	case 4:
-		op = ppc64.AMOVWZU // there is no lwau, only lwaux
+		op = mips.AMOVW
 
 	case 8:
-		op = ppc64.AMOVDU
+		op = mips.AMOVV
 	}
 
 	if w%int64(align) != 0 {
@@ -53,7 +53,7 @@
 	if n.Ullman >= res.Ullman {
 		gc.Agenr(n, &dst, res) // temporarily use dst
 		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
-		gins(ppc64.AMOVD, &dst, &src)
+		gins(mips.AMOVV, &dst, &src)
 		if res.Op == gc.ONAME {
 			gc.Gvardef(res)
 		}
@@ -76,28 +76,28 @@
 	if dir < 0 {
 		if c >= 4 {
 			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
-			gins(ppc64.AMOVD, &src, &nend)
+			gins(mips.AMOVV, &src, &nend)
 		}
 
-		p := gins(ppc64.AADD, nil, &src)
+		p := gins(mips.AADDV, nil, &src)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = w
 
-		p = gins(ppc64.AADD, nil, &dst)
+		p = gins(mips.AADDV, nil, &dst)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = w
 	} else {
-		p := gins(ppc64.AADD, nil, &src)
+		p := gins(mips.AADDV, nil, &src)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = int64(-dir)
 
-		p = gins(ppc64.AADD, nil, &dst)
+		p = gins(mips.AADDV, nil, &dst)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = int64(-dir)
 
 		if c >= 4 {
 			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
-			p := gins(ppc64.AMOVD, &src, &nend)
+			p := gins(mips.AMOVV, &src, &nend)
 			p.From.Type = obj.TYPE_ADDR
 			p.From.Offset = w
 		}
@@ -111,35 +111,43 @@
 		p.From.Offset = int64(dir)
 		ploop := p
 
+		p = gins(mips.AADDV, nil, &src)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(dir)
+
 		p = gins(op, &tmp, &dst)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Offset = int64(dir)
 
-		p = gins(ppc64.ACMP, &src, &nend)
+		p = gins(mips.AADDV, nil, &dst)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(dir)
 
-		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
+		gc.Patch(ginsbranch(mips.ABNE, nil, &src, &nend, 0), ploop)
 		gc.Regfree(&nend)
 	} else {
-		// TODO(austin): Instead of generating ADD $-8,R8; ADD
-		// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
-		// generate the offsets directly and eliminate the
-		// ADDs.  That will produce shorter, more
+		// TODO: Instead of generating ADDV $-8,R8; ADDV
+		// $-8,R7; n*(MOVV 8(R8),R9; ADDV $8,R8; MOVV R9,8(R7);
+		// ADDV $8,R7;) just generate the offsets directly and
+		// eliminate the ADDs.  That will produce shorter, more
 		// pipeline-able code.
 		var p *obj.Prog
-		for {
-			tmp14 := c
-			c--
-			if tmp14 <= 0 {
-				break
-			}
-
+		for ; c > 0; c-- {
 			p = gins(op, &src, &tmp)
 			p.From.Type = obj.TYPE_MEM
 			p.From.Offset = int64(dir)
 
+			p = gins(mips.AADDV, nil, &src)
+			p.From.Type = obj.TYPE_CONST
+			p.From.Offset = int64(dir)
+
 			p = gins(op, &tmp, &dst)
 			p.To.Type = obj.TYPE_MEM
 			p.To.Offset = int64(dir)
+
+			p = gins(mips.AADDV, nil, &dst)
+			p.From.Type = obj.TYPE_CONST
+			p.From.Offset = int64(dir)
 		}
 	}
 
diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go
index 16509da..00ffe17 100644
--- a/src/cmd/compile/internal/mips64/galign.go
+++ b/src/cmd/compile/internal/mips64/galign.go
@@ -2,27 +2,27 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package ppc64
+package mips64
 
 import (
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
-	"cmd/internal/obj/ppc64"
+	"cmd/internal/obj/mips"
 )
 
-var thechar int = '9'
+var thechar int = '0'
 
-var thestring string = "ppc64"
+var thestring string = "mips64"
 
 var thelinkarch *obj.LinkArch
 
 func linkarchinit() {
 	thestring = obj.Getgoarch()
 	gc.Thearch.Thestring = thestring
-	if thestring == "ppc64le" {
-		thelinkarch = &ppc64.Linkppc64le
+	if thestring == "mips64le" {
+		thelinkarch = &mips.Linkmips64le
 	} else {
-		thelinkarch = &ppc64.Linkppc64
+		thelinkarch = &mips.Linkmips64
 	}
 	gc.Thearch.Thelinkarch = thelinkarch
 }
@@ -50,15 +50,15 @@
 	gc.Thearch.Thestring = thestring
 	gc.Thearch.Thelinkarch = thelinkarch
 	gc.Thearch.Typedefs = typedefs
-	gc.Thearch.REGSP = ppc64.REGSP
-	gc.Thearch.REGCTXT = ppc64.REGCTXT
-	gc.Thearch.REGCALLX = ppc64.REG_R3
-	gc.Thearch.REGCALLX2 = ppc64.REG_R4
-	gc.Thearch.REGRETURN = ppc64.REG_R3
-	gc.Thearch.REGMIN = ppc64.REG_R0
-	gc.Thearch.REGMAX = ppc64.REG_R31
-	gc.Thearch.FREGMIN = ppc64.REG_F0
-	gc.Thearch.FREGMAX = ppc64.REG_F31
+	gc.Thearch.REGSP = mips.REGSP
+	gc.Thearch.REGCTXT = mips.REGCTXT
+	gc.Thearch.REGCALLX = mips.REG_R1
+	gc.Thearch.REGCALLX2 = mips.REG_R2
+	gc.Thearch.REGRETURN = mips.REGRET
+	gc.Thearch.REGMIN = mips.REG_R0
+	gc.Thearch.REGMAX = mips.REG_R31
+	gc.Thearch.FREGMIN = mips.REG_F0
+	gc.Thearch.FREGMAX = mips.REG_F31
 	gc.Thearch.MAXWIDTH = MAXWIDTH
 	gc.Thearch.ReservedRegs = resvd
 
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
index 2779140..8c285a2 100644
--- a/src/cmd/compile/internal/mips64/ggen.go
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -2,12 +2,12 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package ppc64
+package mips64
 
 import (
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
-	"cmd/internal/obj/ppc64"
+	"cmd/internal/obj/mips"
 	"fmt"
 )
 
@@ -69,30 +69,35 @@
 	}
 	if cnt < int64(4*gc.Widthptr) {
 		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
+			p = appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+frame+lo+i)
 		}
 		// TODO(dfc): https://golang.org/issue/12108
 		// If DUFFZERO is used inside a tail call (see genwrapper) it will
 		// overwrite the link register.
 	} else if false && cnt <= int64(128*gc.Widthptr) {
-		p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
-		p.Reg = ppc64.REGSP
+		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0)
+		p.Reg = mips.REGSP
 		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
 		f := gc.Sysfunc("duffzero")
 		gc.Naddr(&p.To, f)
 		gc.Afunclit(&p.To, f)
-		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+		p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
 	} else {
-		p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
-		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
-		p.Reg = ppc64.REGSP
-		p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
-		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
-		p.Reg = ppc64.REGRT1
-		p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
+		//	ADDV	$(8+frame+lo-8), SP, r1
+		//	ADDV	$cnt, r1, r2
+		// loop:
+		//	MOVV	R0, (Widthptr)r1
+		//	ADDV	$Widthptr, r1
+		//	BNE		r1, r2, loop
+		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0)
+		p.Reg = mips.REGSP
+		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+		p.Reg = mips.REGRT1
+		p = appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
 		p1 := p
-		p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
-		p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+		p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
+		p = appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+		p.Reg = mips.REGRT2
 		gc.Patch(p, p1)
 	}
 
@@ -117,8 +122,8 @@
 
 func ginsnop() {
 	var reg gc.Node
-	gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
-	gins(ppc64.AOR, &reg, &reg)
+	gc.Nodreg(&reg, gc.Types[gc.TINT], mips.REG_R0)
+	gins(mips.ANOR, &reg, &reg)
 }
 
 var panicdiv *gc.Node
@@ -130,26 +135,10 @@
  *	res = nl % nr
  * according to op.
  */
-func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	// Have to be careful about handling
-	// most negative int divided by -1 correctly.
-	// The hardware will generate undefined result.
-	// Also need to explicitly trap on division on zero,
-	// the hardware will silently generate undefined result.
-	// DIVW will leave unpredicable result in higher 32-bit,
-	// so always use DIVD/DIVDU.
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	t := nl.Type
 
 	t0 := t
-	check := 0
-	if gc.Issigned[t.Etype] {
-		check = 1
-		if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
-			check = 0
-		} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
-			check = 0
-		}
-	}
 
 	if t.Width < 8 {
 		if gc.Issigned[t.Etype] {
@@ -157,7 +146,6 @@
 		} else {
 			t = gc.Types[gc.TUINT64]
 		}
-		check = 0
 	}
 
 	a := optoas(gc.ODIV, t)
@@ -186,65 +174,26 @@
 	}
 
 	// Handle divide-by-zero panic.
-	p1 := gins(optoas(gc.OCMP, t), &tr, nil)
-
-	p1.To.Type = obj.TYPE_REG
-	p1.To.Reg = ppc64.REGZERO
-	p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+	p1 := ginsbranch(mips.ABNE, nil, &tr, nil, 0)
 	if panicdiv == nil {
 		panicdiv = gc.Sysfunc("panicdivide")
 	}
 	gc.Ginscall(panicdiv, -1)
 	gc.Patch(p1, gc.Pc)
 
-	var p2 *obj.Prog
-	if check != 0 {
-		var nm1 gc.Node
-		gc.Nodconst(&nm1, t, -1)
-		gins(optoas(gc.OCMP, t), &tr, &nm1)
-		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-		if op == gc.ODIV {
-			// a / (-1) is -a.
-			gins(optoas(gc.OMINUS, t), nil, &tl)
-
-			gmove(&tl, res)
-		} else {
-			// a % (-1) is 0.
-			var nz gc.Node
-			gc.Nodconst(&nz, t, 0)
-
-			gmove(&nz, res)
-		}
-
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-	}
-
-	p1 = gins(a, &tr, &tl)
+	gins3(a, &tr, &tl, nil)
+	gc.Regfree(&tr)
 	if op == gc.ODIV {
-		gc.Regfree(&tr)
-		gmove(&tl, res)
-	} else {
-		// A%B = A-(A/B*B)
-		var tm gc.Node
-		gc.Regalloc(&tm, t, nil)
-
-		// patch div to use the 3 register form
-		// TODO(minux): add gins3?
-		p1.Reg = p1.To.Reg
-
-		p1.To.Reg = tm.Reg
-		gins(optoas(gc.OMUL, t), &tr, &tm)
-		gc.Regfree(&tr)
-		gins(optoas(gc.OSUB, t), &tm, &tl)
-		gc.Regfree(&tm)
-		gmove(&tl, res)
+		var lo gc.Node
+		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
+		gins(mips.AMOVV, &lo, &tl)
+	} else { // remainder in REG_HI
+		var hi gc.Node
+		gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
+		gins(mips.AMOVV, &hi, &tl)
 	}
-
+	gmove(&tl, res)
 	gc.Regfree(&tl)
-	if check != 0 {
-		gc.Patch(p2, gc.Pc)
-	}
 }
 
 /*
@@ -254,9 +203,7 @@
 func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	// largest ullman on left.
 	if nl.Ullman < nr.Ullman {
-		tmp := (*gc.Node)(nl)
-		nl = nr
-		nr = tmp
+		nl, nr = nr, nl
 	}
 
 	t := (*gc.Type)(nl.Type)
@@ -269,26 +216,35 @@
 	case gc.TINT8,
 		gc.TINT16,
 		gc.TINT32:
-		gins(optoas(gc.OMUL, t), &n2, &n1)
-		p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
+		gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
+		var lo gc.Node
+		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
+		gins(mips.AMOVV, &lo, &n1)
+		p := (*obj.Prog)(gins(mips.ASRAV, nil, &n1))
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = int64(w)
 
 	case gc.TUINT8,
 		gc.TUINT16,
 		gc.TUINT32:
-		gins(optoas(gc.OMUL, t), &n2, &n1)
-		p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
+		gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
+		var lo gc.Node
+		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
+		gins(mips.AMOVV, &lo, &n1)
+		p := (*obj.Prog)(gins(mips.ASRLV, nil, &n1))
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = int64(w)
 
 	case gc.TINT64,
 		gc.TUINT64:
 		if gc.Issigned[t.Etype] {
-			gins(ppc64.AMULHD, &n2, &n1)
+			gins3(mips.AMULV, &n2, &n1, nil)
 		} else {
-			gins(ppc64.AMULHDU, &n2, &n1)
+			gins3(mips.AMULVU, &n2, &n1, nil)
 		}
+		var hi gc.Node
+		gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
+		gins(mips.AMOVV, &hi, &n1)
 
 	default:
 		gc.Fatalf("cgen_hmul %v", t)
@@ -304,7 +260,7 @@
  *	res = nl << nr
  *	res = nl >> nr
  */
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	a := int(optoas(op, nl.Type))
 
 	if nr.Op == gc.OLITERAL {
@@ -372,9 +328,11 @@
 
 	// test and fix up large shifts
 	if !bounded {
+		var rtmp gc.Node
+		gc.Nodreg(&rtmp, tcount, mips.REGTMP)
 		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
-		gins(optoas(gc.OCMP, tcount), &n1, &n3)
-		p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
+		gins3(mips.ASGTU, &n3, &n1, &rtmp)
+		p1 := ginsbranch(mips.ABNE, nil, &rtmp, nil, 0)
 		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
 			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
 			gins(a, &n3, &n2)
@@ -410,61 +368,64 @@
 	c := uint64(w % 8) // bytes
 	q := uint64(w / 8) // dwords
 
-	if gc.Reginuse(ppc64.REGRT1) {
-		gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
+	if gc.Reginuse(mips.REGRT1) {
+		gc.Fatalf("%v in use during clearfat", obj.Rconv(mips.REGRT1))
 	}
 
 	var r0 gc.Node
-	gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
+	gc.Nodreg(&r0, gc.Types[gc.TUINT64], mips.REGZERO)
 	var dst gc.Node
-	gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
+	gc.Nodreg(&dst, gc.Types[gc.Tptr], mips.REGRT1)
 	gc.Regrealloc(&dst)
 	gc.Agen(nl, &dst)
 
 	var boff uint64
 	if q > 128 {
-		p := gins(ppc64.ASUB, nil, &dst)
+		p := gins(mips.ASUBV, nil, &dst)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = 8
 
 		var end gc.Node
 		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
-		p = gins(ppc64.AMOVD, &dst, &end)
+		p = gins(mips.AMOVV, &dst, &end)
 		p.From.Type = obj.TYPE_ADDR
 		p.From.Offset = int64(q * 8)
 
-		p = gins(ppc64.AMOVDU, &r0, &dst)
+		p = gins(mips.AMOVV, &r0, &dst)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Offset = 8
 		pl := (*obj.Prog)(p)
 
-		p = gins(ppc64.ACMP, &dst, &end)
-		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
+		p = gins(mips.AADDV, nil, &dst)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = 8
+
+		gc.Patch(ginsbranch(mips.ABNE, nil, &dst, &end, 0), pl)
 
 		gc.Regfree(&end)
 
-		// The loop leaves R3 on the last zeroed dword
+		// The loop leaves R1 on the last zeroed dword
 		boff = 8
 		// TODO(dfc): https://golang.org/issue/12108
 		// If DUFFZERO is used inside a tail call (see genwrapper) it will
 		// overwrite the link register.
 	} else if false && q >= 4 {
-		p := gins(ppc64.ASUB, nil, &dst)
+		p := gins(mips.ASUBV, nil, &dst)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = 8
 		f := (*gc.Node)(gc.Sysfunc("duffzero"))
 		p = gins(obj.ADUFFZERO, nil, f)
 		gc.Afunclit(&p.To, f)
 
-		// 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
-		p.To.Offset = int64(4 * (128 - q))
+		// 8 and 128 = magic constants: see ../../runtime/asm_mips64x.s
+		p.To.Offset = int64(8 * (128 - q))
 
-		// duffzero leaves R3 on the last zeroed dword
+		// duffzero leaves R1 on the last zeroed dword
 		boff = 8
 	} else {
 		var p *obj.Prog
 		for t := uint64(0); t < q; t++ {
-			p = gins(ppc64.AMOVD, &r0, &dst)
+			p = gins(mips.AMOVV, &r0, &dst)
 			p.To.Type = obj.TYPE_MEM
 			p.To.Offset = int64(8 * t)
 		}
@@ -474,7 +435,7 @@
 
 	var p *obj.Prog
 	for t := uint64(0); t < c; t++ {
-		p = gins(ppc64.AMOVB, &r0, &dst)
+		p = gins(mips.AMOVB, &r0, &dst)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Offset = int64(t + boff)
 	}
@@ -486,7 +447,6 @@
 // Expand CHECKNIL pseudo-op into actual nil pointer check.
 func expandchecks(firstp *obj.Prog) {
 	var p1 *obj.Prog
-	var p2 *obj.Prog
 
 	for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
 		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
@@ -502,63 +462,33 @@
 			gc.Fatalf("invalid nil check %v\n", p)
 		}
 
-		/*
-			// check is
-			//	TD $4, R0, arg (R0 is always zero)
-			// eqv. to:
-			// 	tdeq r0, arg
-			// NOTE: this needs special runtime support to make SIGTRAP recoverable.
-			reg = p->from.reg;
-			p->as = ATD;
-			p->from = p->to = p->from3 = zprog.from;
-			p->from.type = TYPE_CONST;
-			p->from.offset = 4;
-			p->from.reg = 0;
-			p->reg = REGZERO;
-			p->to.type = TYPE_REG;
-			p->to.reg = reg;
-		*/
 		// check is
-		//	CMP arg, R0
-		//	BNE 2(PC) [likely]
-		//	MOVD R0, 0(R0)
+		//	BNE arg, 2(PC)
+		//	MOVV R0, 0(R0)
 		p1 = gc.Ctxt.NewProg()
-
-		p2 = gc.Ctxt.NewProg()
 		gc.Clearp(p1)
-		gc.Clearp(p2)
-		p1.Link = p2
-		p2.Link = p.Link
+		p1.Link = p.Link
 		p.Link = p1
 		p1.Lineno = p.Lineno
-		p2.Lineno = p.Lineno
 		p1.Pc = 9999
-		p2.Pc = 9999
-		p.As = ppc64.ACMP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = ppc64.REGZERO
-		p1.As = ppc64.ABNE
 
-		//p1->from.type = TYPE_CONST;
-		//p1->from.offset = 1; // likely
-		p1.To.Type = obj.TYPE_BRANCH
-
-		p1.To.Val = p2.Link
+		p.As = mips.ABNE
+		p.To.Type = obj.TYPE_BRANCH
+		p.To.Val = p1.Link
 
 		// crash by write to memory address 0.
-		p2.As = ppc64.AMOVD
-
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = ppc64.REGZERO
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = ppc64.REGZERO
-		p2.To.Offset = 0
+		p1.As = mips.AMOVV
+		p1.From.Type = obj.TYPE_REG
+		p1.From.Reg = mips.REGZERO
+		p1.To.Type = obj.TYPE_MEM
+		p1.To.Reg = mips.REGZERO
+		p1.To.Offset = 0
 	}
 }
 
 // res = runtime.getg()
 func getg(res *gc.Node) {
 	var n1 gc.Node
-	gc.Nodreg(&n1, res.Type, ppc64.REGG)
+	gc.Nodreg(&n1, res.Type, mips.REGG)
 	gmove(&n1, res)
 }
diff --git a/src/cmd/compile/internal/mips64/gsubr.go b/src/cmd/compile/internal/mips64/gsubr.go
index dde05c4..d2065d9 100644
--- a/src/cmd/compile/internal/mips64/gsubr.go
+++ b/src/cmd/compile/internal/mips64/gsubr.go
@@ -28,34 +28,28 @@
 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 // THE SOFTWARE.
 
-package ppc64
+package mips64
 
 import (
 	"cmd/compile/internal/big"
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
-	"cmd/internal/obj/ppc64"
+	"cmd/internal/obj/mips"
 	"fmt"
 )
 
 var resvd = []int{
-	ppc64.REGZERO,
-	ppc64.REGSP, // reserved for SP
-	// We need to preserve the C ABI TLS pointer because sigtramp
-	// may happen during C code and needs to access the g.  C
-	// clobbers REGG, so if Go were to clobber REGTLS, sigtramp
-	// won't know which convention to use.  By preserving REGTLS,
-	// we can just retrieve g from TLS when we aren't sure.
-	ppc64.REGTLS,
-
-	// TODO(austin): Consolidate REGTLS and REGG?
-	ppc64.REGG,
-	ppc64.REGTMP, // REGTMP
-	ppc64.FREGCVI,
-	ppc64.FREGZERO,
-	ppc64.FREGHALF,
-	ppc64.FREGONE,
-	ppc64.FREGTWO,
+	mips.REGZERO,
+	mips.REGSP,   // reserved for SP
+	mips.REGLINK, // reserved for link
+	mips.REGG,
+	mips.REGTMP,
+	mips.REG_R26, // kernel
+	mips.REG_R27, // kernel
+	mips.FREGZERO,
+	mips.FREGHALF,
+	mips.FREGONE,
+	mips.FREGTWO,
 }
 
 /*
@@ -67,13 +61,13 @@
 
 	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
 
-	if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD {
+	if as != mips.AMOVV && (c < -mips.BIG || c > mips.BIG) || n2.Op != gc.OREGISTER || as == mips.AMUL || as == mips.AMULU || as == mips.AMULV || as == mips.AMULVU {
 		// cannot have more than 16-bit of immediate in ADD, etc.
 		// instead, MOV into register first.
 		var ntmp gc.Node
 		gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
 
-		rawgins(ppc64.AMOVD, &n1, &ntmp)
+		rawgins(mips.AMOVV, &n1, &ntmp)
 		rawgins(as, &ntmp, n2)
 		gc.Regfree(&ntmp)
 		return
@@ -82,67 +76,116 @@
 	rawgins(as, &n1, n2)
 }
 
-/*
- * generate
- *	as n, $c (CMP/CMPU)
- */
-func ginscon2(as int, n2 *gc.Node, c int64) {
-	var n1 gc.Node
-
-	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
-
-	switch as {
-	default:
-		gc.Fatalf("ginscon2")
-
-	case ppc64.ACMP:
-		if -ppc64.BIG <= c && c <= ppc64.BIG {
-			rawgins(as, n2, &n1)
-			return
-		}
-
-	case ppc64.ACMPU:
-		if 0 <= c && c <= 2*ppc64.BIG {
-			rawgins(as, n2, &n1)
-			return
-		}
+// generate branch
+// n1, n2 are registers
+func ginsbranch(as int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+	p := gc.Gbranch(as, t, likely)
+	gc.Naddr(&p.From, n1)
+	if n2 != nil {
+		p.Reg = n2.Reg
 	}
-
-	// MOV n1 into register first
-	var ntmp gc.Node
-	gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
-
-	rawgins(ppc64.AMOVD, &n1, &ntmp)
-	rawgins(as, n2, &ntmp)
-	gc.Regfree(&ntmp)
+	return p
 }
 
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
-	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
-		// Reverse comparison to place constant last.
-		op = gc.Brrev(op)
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+	if !gc.Isfloat[t.Etype] && (op == gc.OLT || op == gc.OGE) {
+		// swap nodes to fit SGT instruction
 		n1, n2 = n2, n1
 	}
+	if gc.Isfloat[t.Etype] && (op == gc.OLT || op == gc.OLE) {
+		// swap nodes to fit CMPGT, CMPGE instructions and reverse relation
+		n1, n2 = n2, n1
+		if op == gc.OLT {
+			op = gc.OGT
+		} else {
+			op = gc.OGE
+		}
+	}
 
 	var r1, r2, g1, g2 gc.Node
 	gc.Regalloc(&r1, t, n1)
 	gc.Regalloc(&g1, n1.Type, &r1)
 	gc.Cgen(n1, &g1)
 	gmove(&g1, &r1)
-	if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
-		ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
-	} else {
-		gc.Regalloc(&r2, t, n2)
-		gc.Regalloc(&g2, n1.Type, &r2)
-		gc.Cgen(n2, &g2)
-		gmove(&g2, &r2)
-		rawgins(optoas(gc.OCMP, t), &r1, &r2)
-		gc.Regfree(&g2)
-		gc.Regfree(&r2)
+
+	gc.Regalloc(&r2, t, n2)
+	gc.Regalloc(&g2, n1.Type, &r2)
+	gc.Cgen(n2, &g2)
+	gmove(&g2, &r2)
+
+	var p *obj.Prog
+	var ntmp gc.Node
+	gc.Nodreg(&ntmp, gc.Types[gc.TINT], mips.REGTMP)
+
+	switch gc.Simtype[t.Etype] {
+	case gc.TINT8,
+		gc.TINT16,
+		gc.TINT32,
+		gc.TINT64:
+		if op == gc.OEQ || op == gc.ONE {
+			p = ginsbranch(optoas(op, t), nil, &r1, &r2, likely)
+		} else {
+			gins3(mips.ASGT, &r1, &r2, &ntmp)
+
+			p = ginsbranch(optoas(op, t), nil, &ntmp, nil, likely)
+		}
+
+	case gc.TBOOL,
+		gc.TUINT8,
+		gc.TUINT16,
+		gc.TUINT32,
+		gc.TUINT64,
+		gc.TPTR32,
+		gc.TPTR64:
+		if op == gc.OEQ || op == gc.ONE {
+			p = ginsbranch(optoas(op, t), nil, &r1, &r2, likely)
+		} else {
+			gins3(mips.ASGTU, &r1, &r2, &ntmp)
+
+			p = ginsbranch(optoas(op, t), nil, &ntmp, nil, likely)
+		}
+
+	case gc.TFLOAT32:
+		switch op {
+		default:
+			gc.Fatalf("ginscmp: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+
+		case gc.OEQ,
+			gc.ONE:
+			gins3(mips.ACMPEQF, &r1, &r2, nil)
+
+		case gc.OGE:
+			gins3(mips.ACMPGEF, &r1, &r2, nil)
+
+		case gc.OGT:
+			gins3(mips.ACMPGTF, &r1, &r2, nil)
+		}
+		p = gc.Gbranch(optoas(op, t), nil, likely)
+
+	case gc.TFLOAT64:
+		switch op {
+		default:
+			gc.Fatalf("ginscmp: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+
+		case gc.OEQ,
+			gc.ONE:
+			gins3(mips.ACMPEQD, &r1, &r2, nil)
+
+		case gc.OGE:
+			gins3(mips.ACMPGED, &r1, &r2, nil)
+
+		case gc.OGT:
+			gins3(mips.ACMPGTD, &r1, &r2, nil)
+		}
+		p = gc.Gbranch(optoas(op, t), nil, likely)
 	}
+
+	gc.Regfree(&g2)
+	gc.Regfree(&r2)
 	gc.Regfree(&g1)
 	gc.Regfree(&r1)
-	return gc.Gbranch(optoas(op, t), nil, likely)
+
+	return p
 }
 
 // set up nodes representing 2^63
@@ -209,7 +252,7 @@
 			f.Convconst(&con, gc.Types[gc.TINT64])
 			var r1 gc.Node
 			gc.Regalloc(&r1, con.Type, t)
-			gins(ppc64.AMOVD, &con, &r1)
+			gins(mips.AMOVV, &con, &r1)
 			gmove(&r1, t)
 			gc.Regfree(&r1)
 			return
@@ -221,7 +264,7 @@
 			f.Convconst(&con, gc.Types[gc.TUINT64])
 			var r1 gc.Node
 			gc.Regalloc(&r1, con.Type, t)
-			gins(ppc64.AMOVD, &con, &r1)
+			gins(mips.AMOVV, &con, &r1)
 			gmove(&r1, t)
 			gc.Regfree(&r1)
 			return
@@ -236,21 +279,13 @@
 		}
 	}
 
-	// float constants come from memory.
-	//if(isfloat[tt])
-	//	goto hard;
-
-	// 64-bit immediates are also from memory.
-	//if(isint[tt])
-	//	goto hard;
-	//// 64-bit immediates are really 32-bit sign-extended
-	//// unless moving into a register.
-	//if(isint[tt]) {
-	//	if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
-	//		goto hard;
-	//	if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
-	//		goto hard;
-	//}
+	// value -> value copy, first operand in memory.
+	// any floating point operand requires register
+	// src, so goto hard to copy to register first.
+	if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) {
+		cvt = gc.Types[ft]
+		goto hard
+	}
 
 	// value -> value copy, only one memory operand.
 	// figure out the instruction to use.
@@ -268,62 +303,57 @@
 		 */
 	case gc.TINT8<<16 | gc.TINT8, // same size
 		gc.TUINT8<<16 | gc.TINT8,
-		gc.TINT16<<16 | gc.TINT8,
-		// truncate
+		gc.TINT16<<16 | gc.TINT8, // truncate
 		gc.TUINT16<<16 | gc.TINT8,
 		gc.TINT32<<16 | gc.TINT8,
 		gc.TUINT32<<16 | gc.TINT8,
 		gc.TINT64<<16 | gc.TINT8,
 		gc.TUINT64<<16 | gc.TINT8:
-		a = ppc64.AMOVB
+		a = mips.AMOVB
 
 	case gc.TINT8<<16 | gc.TUINT8, // same size
 		gc.TUINT8<<16 | gc.TUINT8,
-		gc.TINT16<<16 | gc.TUINT8,
-		// truncate
+		gc.TINT16<<16 | gc.TUINT8, // truncate
 		gc.TUINT16<<16 | gc.TUINT8,
 		gc.TINT32<<16 | gc.TUINT8,
 		gc.TUINT32<<16 | gc.TUINT8,
 		gc.TINT64<<16 | gc.TUINT8,
 		gc.TUINT64<<16 | gc.TUINT8:
-		a = ppc64.AMOVBZ
+		a = mips.AMOVBU
 
 	case gc.TINT16<<16 | gc.TINT16, // same size
 		gc.TUINT16<<16 | gc.TINT16,
-		gc.TINT32<<16 | gc.TINT16,
-		// truncate
+		gc.TINT32<<16 | gc.TINT16, // truncate
 		gc.TUINT32<<16 | gc.TINT16,
 		gc.TINT64<<16 | gc.TINT16,
 		gc.TUINT64<<16 | gc.TINT16:
-		a = ppc64.AMOVH
+		a = mips.AMOVH
 
 	case gc.TINT16<<16 | gc.TUINT16, // same size
 		gc.TUINT16<<16 | gc.TUINT16,
-		gc.TINT32<<16 | gc.TUINT16,
-		// truncate
+		gc.TINT32<<16 | gc.TUINT16, // truncate
 		gc.TUINT32<<16 | gc.TUINT16,
 		gc.TINT64<<16 | gc.TUINT16,
 		gc.TUINT64<<16 | gc.TUINT16:
-		a = ppc64.AMOVHZ
+		a = mips.AMOVHU
 
 	case gc.TINT32<<16 | gc.TINT32, // same size
 		gc.TUINT32<<16 | gc.TINT32,
-		gc.TINT64<<16 | gc.TINT32,
-		// truncate
+		gc.TINT64<<16 | gc.TINT32, // truncate
 		gc.TUINT64<<16 | gc.TINT32:
-		a = ppc64.AMOVW
+		a = mips.AMOVW
 
 	case gc.TINT32<<16 | gc.TUINT32, // same size
 		gc.TUINT32<<16 | gc.TUINT32,
-		gc.TINT64<<16 | gc.TUINT32,
+		gc.TINT64<<16 | gc.TUINT32, // truncate
 		gc.TUINT64<<16 | gc.TUINT32:
-		a = ppc64.AMOVWZ
+		a = mips.AMOVWU
 
 	case gc.TINT64<<16 | gc.TINT64, // same size
 		gc.TINT64<<16 | gc.TUINT64,
 		gc.TUINT64<<16 | gc.TINT64,
 		gc.TUINT64<<16 | gc.TUINT64:
-		a = ppc64.AMOVD
+		a = mips.AMOVV
 
 		/*
 		 * integer up-conversions
@@ -334,7 +364,7 @@
 		gc.TINT8<<16 | gc.TUINT32,
 		gc.TINT8<<16 | gc.TINT64,
 		gc.TINT8<<16 | gc.TUINT64:
-		a = ppc64.AMOVB
+		a = mips.AMOVB
 
 		goto rdst
 
@@ -344,7 +374,7 @@
 		gc.TUINT8<<16 | gc.TUINT32,
 		gc.TUINT8<<16 | gc.TINT64,
 		gc.TUINT8<<16 | gc.TUINT64:
-		a = ppc64.AMOVBZ
+		a = mips.AMOVBU
 
 		goto rdst
 
@@ -352,7 +382,7 @@
 		gc.TINT16<<16 | gc.TUINT32,
 		gc.TINT16<<16 | gc.TINT64,
 		gc.TINT16<<16 | gc.TUINT64:
-		a = ppc64.AMOVH
+		a = mips.AMOVH
 
 		goto rdst
 
@@ -360,19 +390,19 @@
 		gc.TUINT16<<16 | gc.TUINT32,
 		gc.TUINT16<<16 | gc.TINT64,
 		gc.TUINT16<<16 | gc.TUINT64:
-		a = ppc64.AMOVHZ
+		a = mips.AMOVHU
 
 		goto rdst
 
 	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
 		gc.TINT32<<16 | gc.TUINT64:
-		a = ppc64.AMOVW
+		a = mips.AMOVW
 
 		goto rdst
 
 	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
 		gc.TUINT32<<16 | gc.TUINT64:
-		a = ppc64.AMOVWZ
+		a = mips.AMOVWU
 
 		goto rdst
 
@@ -402,49 +432,39 @@
 		gc.TFLOAT64<<16 | gc.TUINT64:
 		bignodes()
 
-		var r1 gc.Node
-		gc.Regalloc(&r1, gc.Types[ft], f)
+		gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], nil)
 		gmove(f, &r1)
 		if tt == gc.TUINT64 {
 			gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
 			gmove(&bigf, &r2)
-			gins(ppc64.AFCMPU, &r1, &r2)
-			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
-			gins(ppc64.AFSUB, &r2, &r1)
+			gins3(mips.ACMPGED, &r1, &r2, nil)
+			p1 := gc.Gbranch(mips.ABFPF, nil, 0)
+			gins(mips.ASUBD, &r2, &r1)
 			gc.Patch(p1, gc.Pc)
 			gc.Regfree(&r2)
 		}
 
-		gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
-		var r3 gc.Node
-		gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
-		gins(ppc64.AFCTIDZ, &r1, &r2)
-		p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
-		p1.To.Type = obj.TYPE_MEM
-		p1.To.Reg = ppc64.REGSP
-		p1.To.Offset = -8
-		p1 = gins(ppc64.AMOVD, nil, &r3)
-		p1.From.Type = obj.TYPE_MEM
-		p1.From.Reg = ppc64.REGSP
-		p1.From.Offset = -8
-		gc.Regfree(&r2)
+		gc.Regalloc(&r2, gc.Types[gc.TINT64], t)
+		gins(mips.ATRUNCDV, &r1, &r1)
+		gins(mips.AMOVV, &r1, &r2)
 		gc.Regfree(&r1)
+
 		if tt == gc.TUINT64 {
-			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
-			gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
-			gins(ppc64.AMOVD, &bigi, &r1)
-			gins(ppc64.AADD, &r1, &r3)
+			p1 := gc.Gbranch(mips.ABFPF, nil, 0) // use FCR0 here again
+			gc.Nodreg(&r1, gc.Types[gc.TINT64], mips.REGTMP)
+			gmove(&bigi, &r1)
+			gins(mips.AADDVU, &r1, &r2)
 			gc.Patch(p1, gc.Pc)
 		}
 
-		gmove(&r3, t)
-		gc.Regfree(&r3)
+		gmove(&r2, t)
+		gc.Regfree(&r2)
 		return
 
 		//warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
 	//return;
 	// algorithm is:
-	//	if small enough, use native int64 -> uint64 conversion.
+	//	if small enough, use native int64 -> float64 conversion.
 	//	otherwise, halve (rounding to odd?), convert, and double.
 	/*
 	 * integer to float
@@ -467,35 +487,29 @@
 		gc.TUINT64<<16 | gc.TFLOAT64:
 		bignodes()
 
-		var r1 gc.Node
+		var rtmp gc.Node
 		gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
 		gmove(f, &r1)
 		if ft == gc.TUINT64 {
-			gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
-			gmove(&bigi, &r2)
-			gins(ppc64.ACMPU, &r1, &r2)
-			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
-			p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
+			gc.Nodreg(&rtmp, gc.Types[gc.TUINT64], mips.REGTMP)
+			gmove(&bigi, &rtmp)
+			gins(mips.AAND, &r1, &rtmp)
+			p1 := ginsbranch(mips.ABEQ, nil, &rtmp, nil, 0)
+			p2 := gins(mips.ASRLV, nil, &r1)
 			p2.From.Type = obj.TYPE_CONST
 			p2.From.Offset = 1
 			gc.Patch(p1, gc.Pc)
 		}
 
 		gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
-		p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
-		p1.To.Type = obj.TYPE_MEM
-		p1.To.Reg = ppc64.REGSP
-		p1.To.Offset = -8
-		p1 = gins(ppc64.AFMOVD, nil, &r2)
-		p1.From.Type = obj.TYPE_MEM
-		p1.From.Reg = ppc64.REGSP
-		p1.From.Offset = -8
-		gins(ppc64.AFCFID, &r2, &r2)
+		gins(mips.AMOVV, &r1, &r2)
+		gins(mips.AMOVVD, &r2, &r2)
 		gc.Regfree(&r1)
+
 		if ft == gc.TUINT64 {
-			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
-			gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
-			gins(ppc64.AFMUL, &r1, &r2)
+			p1 := ginsbranch(mips.ABEQ, nil, &rtmp, nil, 0)
+			gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], mips.FREGTWO)
+			gins(mips.AMULD, &r1, &r2)
 			gc.Patch(p1, gc.Pc)
 		}
 
@@ -507,17 +521,17 @@
 		 * float to float
 		 */
 	case gc.TFLOAT32<<16 | gc.TFLOAT32:
-		a = ppc64.AFMOVS
+		a = mips.AMOVF
 
 	case gc.TFLOAT64<<16 | gc.TFLOAT64:
-		a = ppc64.AFMOVD
+		a = mips.AMOVD
 
 	case gc.TFLOAT32<<16 | gc.TFLOAT64:
-		a = ppc64.AFMOVS
+		a = mips.AMOVFD
 		goto rdst
 
 	case gc.TFLOAT64<<16 | gc.TFLOAT32:
-		a = ppc64.AFRSP
+		a = mips.AMOVDF
 		goto rdst
 	}
 
@@ -555,17 +569,24 @@
 			return nil // caller must not use
 		}
 	}
-	if as == ppc64.ACMP || as == ppc64.ACMPU {
-		if x, ok := t.IntLiteral(); ok {
-			ginscon2(as, f, x)
-			return nil // caller must not use
-		}
-	}
 	return rawgins(as, f, t)
 }
 
 /*
  * generate one instruction:
+ *	as f, r, t
+ * r must be register, if not nil
+ */
+func gins3(as int, f, r, t *gc.Node) *obj.Prog {
+	p := rawgins(as, f, t)
+	if r != nil {
+		p.Reg = r.Reg
+	}
+	return p
+}
+
+/*
+ * generate one instruction:
  *	as f, t
  */
 func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
@@ -578,37 +599,52 @@
 
 	switch as {
 	case obj.ACALL:
-		if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
-			// Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
-			pp := gc.Prog(as)
-			pp.From = p.From
-			pp.To.Type = obj.TYPE_REG
-			pp.To.Reg = ppc64.REG_CTR
-
-			p.As = ppc64.AMOVD
-			p.From = p.To
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = ppc64.REG_CTR
+		if p.To.Type == obj.TYPE_REG {
+			// Allow front end to emit CALL REG, and rewrite into CALL (REG).
+			p.From = obj.Addr{}
+			p.To.Type = obj.TYPE_MEM
+			p.To.Offset = 0
 
 			if gc.Debug['g'] != 0 {
 				fmt.Printf("%v\n", p)
-				fmt.Printf("%v\n", pp)
 			}
 
-			return pp
+			return p
 		}
 
 	// Bad things the front end has done to us. Crash to find call stack.
-	case ppc64.AAND, ppc64.AMULLD:
+	case mips.AAND:
 		if p.From.Type == obj.TYPE_CONST {
 			gc.Debug['h'] = 1
 			gc.Fatalf("bad inst: %v", p)
 		}
-	case ppc64.ACMP, ppc64.ACMPU:
+	case mips.ASGT, mips.ASGTU:
 		if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
 			gc.Debug['h'] = 1
 			gc.Fatalf("bad inst: %v", p)
 		}
+
+	// Special cases
+	case mips.AMUL, mips.AMULU, mips.AMULV, mips.AMULVU:
+		if p.From.Type == obj.TYPE_CONST {
+			gc.Debug['h'] = 1
+			gc.Fatalf("bad inst: %v", p)
+		}
+
+		pp := gc.Prog(mips.AMOVV)
+		pp.From.Type = obj.TYPE_REG
+		pp.From.Reg = mips.REG_LO
+		pp.To = p.To
+
+		p.Reg = p.To.Reg
+		p.To = obj.Addr{}
+
+	case mips.ASUBVU:
+		// unary
+		if f == nil {
+			p.From = p.To
+			p.Reg = mips.REGZERO
+		}
 	}
 
 	if gc.Debug['g'] != 0 {
@@ -617,26 +653,19 @@
 
 	w := int32(0)
 	switch as {
-	case ppc64.AMOVB,
-		ppc64.AMOVBU,
-		ppc64.AMOVBZ,
-		ppc64.AMOVBZU:
+	case mips.AMOVB,
+		mips.AMOVBU:
 		w = 1
 
-	case ppc64.AMOVH,
-		ppc64.AMOVHU,
-		ppc64.AMOVHZ,
-		ppc64.AMOVHZU:
+	case mips.AMOVH,
+		mips.AMOVHU:
 		w = 2
 
-	case ppc64.AMOVW,
-		ppc64.AMOVWU,
-		ppc64.AMOVWZ,
-		ppc64.AMOVWZU:
+	case mips.AMOVW,
+		mips.AMOVWU:
 		w = 4
 
-	case ppc64.AMOVD,
-		ppc64.AMOVDU:
+	case mips.AMOVV:
 		if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
 			break
 		}
@@ -655,230 +684,247 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
 
+	// avoid constant conversions in switches below
+	const (
+		OMINUS_ = uint32(gc.OMINUS) << 16
+		OLSH_   = uint32(gc.OLSH) << 16
+		ORSH_   = uint32(gc.ORSH) << 16
+		OADD_   = uint32(gc.OADD) << 16
+		OSUB_   = uint32(gc.OSUB) << 16
+		OMUL_   = uint32(gc.OMUL) << 16
+		ODIV_   = uint32(gc.ODIV) << 16
+		OOR_    = uint32(gc.OOR) << 16
+		OAND_   = uint32(gc.OAND) << 16
+		OXOR_   = uint32(gc.OXOR) << 16
+		OEQ_    = uint32(gc.OEQ) << 16
+		ONE_    = uint32(gc.ONE) << 16
+		OLT_    = uint32(gc.OLT) << 16
+		OLE_    = uint32(gc.OLE) << 16
+		OGE_    = uint32(gc.OGE) << 16
+		OGT_    = uint32(gc.OGT) << 16
+		OCMP_   = uint32(gc.OCMP) << 16
+		OAS_    = uint32(gc.OAS) << 16
+		OHMUL_  = uint32(gc.OHMUL) << 16
+	)
+
 	a := int(obj.AXXX)
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
 
-	case gc.OEQ<<16 | gc.TBOOL,
-		gc.OEQ<<16 | gc.TINT8,
-		gc.OEQ<<16 | gc.TUINT8,
-		gc.OEQ<<16 | gc.TINT16,
-		gc.OEQ<<16 | gc.TUINT16,
-		gc.OEQ<<16 | gc.TINT32,
-		gc.OEQ<<16 | gc.TUINT32,
-		gc.OEQ<<16 | gc.TINT64,
-		gc.OEQ<<16 | gc.TUINT64,
-		gc.OEQ<<16 | gc.TPTR32,
-		gc.OEQ<<16 | gc.TPTR64,
-		gc.OEQ<<16 | gc.TFLOAT32,
-		gc.OEQ<<16 | gc.TFLOAT64:
-		a = ppc64.ABEQ
+	case OEQ_ | gc.TBOOL,
+		OEQ_ | gc.TINT8,
+		OEQ_ | gc.TUINT8,
+		OEQ_ | gc.TINT16,
+		OEQ_ | gc.TUINT16,
+		OEQ_ | gc.TINT32,
+		OEQ_ | gc.TUINT32,
+		OEQ_ | gc.TINT64,
+		OEQ_ | gc.TUINT64,
+		OEQ_ | gc.TPTR32,
+		OEQ_ | gc.TPTR64:
+		a = mips.ABEQ
 
-	case gc.ONE<<16 | gc.TBOOL,
-		gc.ONE<<16 | gc.TINT8,
-		gc.ONE<<16 | gc.TUINT8,
-		gc.ONE<<16 | gc.TINT16,
-		gc.ONE<<16 | gc.TUINT16,
-		gc.ONE<<16 | gc.TINT32,
-		gc.ONE<<16 | gc.TUINT32,
-		gc.ONE<<16 | gc.TINT64,
-		gc.ONE<<16 | gc.TUINT64,
-		gc.ONE<<16 | gc.TPTR32,
-		gc.ONE<<16 | gc.TPTR64,
-		gc.ONE<<16 | gc.TFLOAT32,
-		gc.ONE<<16 | gc.TFLOAT64:
-		a = ppc64.ABNE
+	case OEQ_ | gc.TFLOAT32, // ACMPEQF
+		OEQ_ | gc.TFLOAT64: // ACMPEQD
+		a = mips.ABFPT
 
-	case gc.OLT<<16 | gc.TINT8, // ACMP
-		gc.OLT<<16 | gc.TINT16,
-		gc.OLT<<16 | gc.TINT32,
-		gc.OLT<<16 | gc.TINT64,
-		gc.OLT<<16 | gc.TUINT8,
-		// ACMPU
-		gc.OLT<<16 | gc.TUINT16,
-		gc.OLT<<16 | gc.TUINT32,
-		gc.OLT<<16 | gc.TUINT64,
-		gc.OLT<<16 | gc.TFLOAT32,
-		// AFCMPU
-		gc.OLT<<16 | gc.TFLOAT64:
-		a = ppc64.ABLT
+	case ONE_ | gc.TBOOL,
+		ONE_ | gc.TINT8,
+		ONE_ | gc.TUINT8,
+		ONE_ | gc.TINT16,
+		ONE_ | gc.TUINT16,
+		ONE_ | gc.TINT32,
+		ONE_ | gc.TUINT32,
+		ONE_ | gc.TINT64,
+		ONE_ | gc.TUINT64,
+		ONE_ | gc.TPTR32,
+		ONE_ | gc.TPTR64:
+		a = mips.ABNE
 
-	case gc.OLE<<16 | gc.TINT8, // ACMP
-		gc.OLE<<16 | gc.TINT16,
-		gc.OLE<<16 | gc.TINT32,
-		gc.OLE<<16 | gc.TINT64,
-		gc.OLE<<16 | gc.TUINT8,
-		// ACMPU
-		gc.OLE<<16 | gc.TUINT16,
-		gc.OLE<<16 | gc.TUINT32,
-		gc.OLE<<16 | gc.TUINT64:
-		// No OLE for floats, because it mishandles NaN.
-		// Front end must reverse comparison or use OLT and OEQ together.
-		a = ppc64.ABLE
+	case ONE_ | gc.TFLOAT32, // ACMPEQF
+		ONE_ | gc.TFLOAT64: // ACMPEQD
+		a = mips.ABFPF
 
-	case gc.OGT<<16 | gc.TINT8,
-		gc.OGT<<16 | gc.TINT16,
-		gc.OGT<<16 | gc.TINT32,
-		gc.OGT<<16 | gc.TINT64,
-		gc.OGT<<16 | gc.TUINT8,
-		gc.OGT<<16 | gc.TUINT16,
-		gc.OGT<<16 | gc.TUINT32,
-		gc.OGT<<16 | gc.TUINT64,
-		gc.OGT<<16 | gc.TFLOAT32,
-		gc.OGT<<16 | gc.TFLOAT64:
-		a = ppc64.ABGT
+	case OLT_ | gc.TINT8, // ASGT
+		OLT_ | gc.TINT16,
+		OLT_ | gc.TINT32,
+		OLT_ | gc.TINT64,
+		OLT_ | gc.TUINT8, // ASGTU
+		OLT_ | gc.TUINT16,
+		OLT_ | gc.TUINT32,
+		OLT_ | gc.TUINT64:
+		a = mips.ABNE
 
-	case gc.OGE<<16 | gc.TINT8,
-		gc.OGE<<16 | gc.TINT16,
-		gc.OGE<<16 | gc.TINT32,
-		gc.OGE<<16 | gc.TINT64,
-		gc.OGE<<16 | gc.TUINT8,
-		gc.OGE<<16 | gc.TUINT16,
-		gc.OGE<<16 | gc.TUINT32,
-		gc.OGE<<16 | gc.TUINT64:
-		// No OGE for floats, because it mishandles NaN.
-		// Front end must reverse comparison or use OLT and OEQ together.
-		a = ppc64.ABGE
+	case OLT_ | gc.TFLOAT32, // ACMPGEF
+		OLT_ | gc.TFLOAT64: // ACMPGED
+		a = mips.ABFPT
 
-	case gc.OCMP<<16 | gc.TBOOL,
-		gc.OCMP<<16 | gc.TINT8,
-		gc.OCMP<<16 | gc.TINT16,
-		gc.OCMP<<16 | gc.TINT32,
-		gc.OCMP<<16 | gc.TPTR32,
-		gc.OCMP<<16 | gc.TINT64:
-		a = ppc64.ACMP
+	case OLE_ | gc.TINT8, // ASGT
+		OLE_ | gc.TINT16,
+		OLE_ | gc.TINT32,
+		OLE_ | gc.TINT64,
+		OLE_ | gc.TUINT8, // ASGTU
+		OLE_ | gc.TUINT16,
+		OLE_ | gc.TUINT32,
+		OLE_ | gc.TUINT64:
+		a = mips.ABEQ
 
-	case gc.OCMP<<16 | gc.TUINT8,
-		gc.OCMP<<16 | gc.TUINT16,
-		gc.OCMP<<16 | gc.TUINT32,
-		gc.OCMP<<16 | gc.TUINT64,
-		gc.OCMP<<16 | gc.TPTR64:
-		a = ppc64.ACMPU
+	case OLE_ | gc.TFLOAT32, // ACMPGTF
+		OLE_ | gc.TFLOAT64: // ACMPGTD
+		a = mips.ABFPT
 
-	case gc.OCMP<<16 | gc.TFLOAT32,
-		gc.OCMP<<16 | gc.TFLOAT64:
-		a = ppc64.AFCMPU
+	case OGT_ | gc.TINT8, // ASGT
+		OGT_ | gc.TINT16,
+		OGT_ | gc.TINT32,
+		OGT_ | gc.TINT64,
+		OGT_ | gc.TUINT8, // ASGTU
+		OGT_ | gc.TUINT16,
+		OGT_ | gc.TUINT32,
+		OGT_ | gc.TUINT64:
+		a = mips.ABNE
 
-	case gc.OAS<<16 | gc.TBOOL,
-		gc.OAS<<16 | gc.TINT8:
-		a = ppc64.AMOVB
+	case OGT_ | gc.TFLOAT32, // ACMPGTF
+		OGT_ | gc.TFLOAT64: // ACMPGTD
+		a = mips.ABFPT
 
-	case gc.OAS<<16 | gc.TUINT8:
-		a = ppc64.AMOVBZ
+	case OGE_ | gc.TINT8, // ASGT
+		OGE_ | gc.TINT16,
+		OGE_ | gc.TINT32,
+		OGE_ | gc.TINT64,
+		OGE_ | gc.TUINT8, // ASGTU
+		OGE_ | gc.TUINT16,
+		OGE_ | gc.TUINT32,
+		OGE_ | gc.TUINT64:
+		a = mips.ABEQ
 
-	case gc.OAS<<16 | gc.TINT16:
-		a = ppc64.AMOVH
+	case OGE_ | gc.TFLOAT32, // ACMPGEF
+		OGE_ | gc.TFLOAT64: // ACMPGED
+		a = mips.ABFPT
 
-	case gc.OAS<<16 | gc.TUINT16:
-		a = ppc64.AMOVHZ
+	case OAS_ | gc.TBOOL,
+		OAS_ | gc.TINT8:
+		a = mips.AMOVB
 
-	case gc.OAS<<16 | gc.TINT32:
-		a = ppc64.AMOVW
+	case OAS_ | gc.TUINT8:
+		a = mips.AMOVBU
 
-	case gc.OAS<<16 | gc.TUINT32,
-		gc.OAS<<16 | gc.TPTR32:
-		a = ppc64.AMOVWZ
+	case OAS_ | gc.TINT16:
+		a = mips.AMOVH
 
-	case gc.OAS<<16 | gc.TINT64,
-		gc.OAS<<16 | gc.TUINT64,
-		gc.OAS<<16 | gc.TPTR64:
-		a = ppc64.AMOVD
+	case OAS_ | gc.TUINT16:
+		a = mips.AMOVHU
 
-	case gc.OAS<<16 | gc.TFLOAT32:
-		a = ppc64.AFMOVS
+	case OAS_ | gc.TINT32:
+		a = mips.AMOVW
 
-	case gc.OAS<<16 | gc.TFLOAT64:
-		a = ppc64.AFMOVD
+	case OAS_ | gc.TUINT32,
+		OAS_ | gc.TPTR32:
+		a = mips.AMOVWU
 
-	case gc.OADD<<16 | gc.TINT8,
-		gc.OADD<<16 | gc.TUINT8,
-		gc.OADD<<16 | gc.TINT16,
-		gc.OADD<<16 | gc.TUINT16,
-		gc.OADD<<16 | gc.TINT32,
-		gc.OADD<<16 | gc.TUINT32,
-		gc.OADD<<16 | gc.TPTR32,
-		gc.OADD<<16 | gc.TINT64,
-		gc.OADD<<16 | gc.TUINT64,
-		gc.OADD<<16 | gc.TPTR64:
-		a = ppc64.AADD
+	case OAS_ | gc.TINT64,
+		OAS_ | gc.TUINT64,
+		OAS_ | gc.TPTR64:
+		a = mips.AMOVV
 
-	case gc.OADD<<16 | gc.TFLOAT32:
-		a = ppc64.AFADDS
+	case OAS_ | gc.TFLOAT32:
+		a = mips.AMOVF
 
-	case gc.OADD<<16 | gc.TFLOAT64:
-		a = ppc64.AFADD
+	case OAS_ | gc.TFLOAT64:
+		a = mips.AMOVD
 
-	case gc.OSUB<<16 | gc.TINT8,
-		gc.OSUB<<16 | gc.TUINT8,
-		gc.OSUB<<16 | gc.TINT16,
-		gc.OSUB<<16 | gc.TUINT16,
-		gc.OSUB<<16 | gc.TINT32,
-		gc.OSUB<<16 | gc.TUINT32,
-		gc.OSUB<<16 | gc.TPTR32,
-		gc.OSUB<<16 | gc.TINT64,
-		gc.OSUB<<16 | gc.TUINT64,
-		gc.OSUB<<16 | gc.TPTR64:
-		a = ppc64.ASUB
+	case OADD_ | gc.TINT8,
+		OADD_ | gc.TUINT8,
+		OADD_ | gc.TINT16,
+		OADD_ | gc.TUINT16,
+		OADD_ | gc.TINT32,
+		OADD_ | gc.TUINT32,
+		OADD_ | gc.TPTR32:
+		a = mips.AADDU
 
-	case gc.OSUB<<16 | gc.TFLOAT32:
-		a = ppc64.AFSUBS
+	case OADD_ | gc.TINT64,
+		OADD_ | gc.TUINT64,
+		OADD_ | gc.TPTR64:
+		a = mips.AADDVU
 
-	case gc.OSUB<<16 | gc.TFLOAT64:
-		a = ppc64.AFSUB
+	case OADD_ | gc.TFLOAT32:
+		a = mips.AADDF
 
-	case gc.OMINUS<<16 | gc.TINT8,
-		gc.OMINUS<<16 | gc.TUINT8,
-		gc.OMINUS<<16 | gc.TINT16,
-		gc.OMINUS<<16 | gc.TUINT16,
-		gc.OMINUS<<16 | gc.TINT32,
-		gc.OMINUS<<16 | gc.TUINT32,
-		gc.OMINUS<<16 | gc.TPTR32,
-		gc.OMINUS<<16 | gc.TINT64,
-		gc.OMINUS<<16 | gc.TUINT64,
-		gc.OMINUS<<16 | gc.TPTR64:
-		a = ppc64.ANEG
+	case OADD_ | gc.TFLOAT64:
+		a = mips.AADDD
 
-	case gc.OAND<<16 | gc.TINT8,
-		gc.OAND<<16 | gc.TUINT8,
-		gc.OAND<<16 | gc.TINT16,
-		gc.OAND<<16 | gc.TUINT16,
-		gc.OAND<<16 | gc.TINT32,
-		gc.OAND<<16 | gc.TUINT32,
-		gc.OAND<<16 | gc.TPTR32,
-		gc.OAND<<16 | gc.TINT64,
-		gc.OAND<<16 | gc.TUINT64,
-		gc.OAND<<16 | gc.TPTR64:
-		a = ppc64.AAND
+	case OSUB_ | gc.TINT8,
+		OSUB_ | gc.TUINT8,
+		OSUB_ | gc.TINT16,
+		OSUB_ | gc.TUINT16,
+		OSUB_ | gc.TINT32,
+		OSUB_ | gc.TUINT32,
+		OSUB_ | gc.TPTR32:
+		a = mips.ASUBU
 
-	case gc.OOR<<16 | gc.TINT8,
-		gc.OOR<<16 | gc.TUINT8,
-		gc.OOR<<16 | gc.TINT16,
-		gc.OOR<<16 | gc.TUINT16,
-		gc.OOR<<16 | gc.TINT32,
-		gc.OOR<<16 | gc.TUINT32,
-		gc.OOR<<16 | gc.TPTR32,
-		gc.OOR<<16 | gc.TINT64,
-		gc.OOR<<16 | gc.TUINT64,
-		gc.OOR<<16 | gc.TPTR64:
-		a = ppc64.AOR
+	case OSUB_ | gc.TINT64,
+		OSUB_ | gc.TUINT64,
+		OSUB_ | gc.TPTR64:
+		a = mips.ASUBVU
 
-	case gc.OXOR<<16 | gc.TINT8,
-		gc.OXOR<<16 | gc.TUINT8,
-		gc.OXOR<<16 | gc.TINT16,
-		gc.OXOR<<16 | gc.TUINT16,
-		gc.OXOR<<16 | gc.TINT32,
-		gc.OXOR<<16 | gc.TUINT32,
-		gc.OXOR<<16 | gc.TPTR32,
-		gc.OXOR<<16 | gc.TINT64,
-		gc.OXOR<<16 | gc.TUINT64,
-		gc.OXOR<<16 | gc.TPTR64:
-		a = ppc64.AXOR
+	case OSUB_ | gc.TFLOAT32:
+		a = mips.ASUBF
+
+	case OSUB_ | gc.TFLOAT64:
+		a = mips.ASUBD
+
+	case OMINUS_ | gc.TINT8,
+		OMINUS_ | gc.TUINT8,
+		OMINUS_ | gc.TINT16,
+		OMINUS_ | gc.TUINT16,
+		OMINUS_ | gc.TINT32,
+		OMINUS_ | gc.TUINT32,
+		OMINUS_ | gc.TPTR32,
+		OMINUS_ | gc.TINT64,
+		OMINUS_ | gc.TUINT64,
+		OMINUS_ | gc.TPTR64:
+		a = mips.ASUBVU
+
+	case OAND_ | gc.TINT8,
+		OAND_ | gc.TUINT8,
+		OAND_ | gc.TINT16,
+		OAND_ | gc.TUINT16,
+		OAND_ | gc.TINT32,
+		OAND_ | gc.TUINT32,
+		OAND_ | gc.TPTR32,
+		OAND_ | gc.TINT64,
+		OAND_ | gc.TUINT64,
+		OAND_ | gc.TPTR64:
+		a = mips.AAND
+
+	case OOR_ | gc.TINT8,
+		OOR_ | gc.TUINT8,
+		OOR_ | gc.TINT16,
+		OOR_ | gc.TUINT16,
+		OOR_ | gc.TINT32,
+		OOR_ | gc.TUINT32,
+		OOR_ | gc.TPTR32,
+		OOR_ | gc.TINT64,
+		OOR_ | gc.TUINT64,
+		OOR_ | gc.TPTR64:
+		a = mips.AOR
+
+	case OXOR_ | gc.TINT8,
+		OXOR_ | gc.TUINT8,
+		OXOR_ | gc.TINT16,
+		OXOR_ | gc.TUINT16,
+		OXOR_ | gc.TINT32,
+		OXOR_ | gc.TUINT32,
+		OXOR_ | gc.TPTR32,
+		OXOR_ | gc.TINT64,
+		OXOR_ | gc.TUINT64,
+		OXOR_ | gc.TPTR64:
+		a = mips.AXOR
 
 		// TODO(minux): handle rotates
 	//case CASE(OLROT, TINT8):
@@ -894,31 +940,31 @@
 	//	a = 0//???; RLDC?
 	//	break;
 
-	case gc.OLSH<<16 | gc.TINT8,
-		gc.OLSH<<16 | gc.TUINT8,
-		gc.OLSH<<16 | gc.TINT16,
-		gc.OLSH<<16 | gc.TUINT16,
-		gc.OLSH<<16 | gc.TINT32,
-		gc.OLSH<<16 | gc.TUINT32,
-		gc.OLSH<<16 | gc.TPTR32,
-		gc.OLSH<<16 | gc.TINT64,
-		gc.OLSH<<16 | gc.TUINT64,
-		gc.OLSH<<16 | gc.TPTR64:
-		a = ppc64.ASLD
+	case OLSH_ | gc.TINT8,
+		OLSH_ | gc.TUINT8,
+		OLSH_ | gc.TINT16,
+		OLSH_ | gc.TUINT16,
+		OLSH_ | gc.TINT32,
+		OLSH_ | gc.TUINT32,
+		OLSH_ | gc.TPTR32,
+		OLSH_ | gc.TINT64,
+		OLSH_ | gc.TUINT64,
+		OLSH_ | gc.TPTR64:
+		a = mips.ASLLV
 
-	case gc.ORSH<<16 | gc.TUINT8,
-		gc.ORSH<<16 | gc.TUINT16,
-		gc.ORSH<<16 | gc.TUINT32,
-		gc.ORSH<<16 | gc.TPTR32,
-		gc.ORSH<<16 | gc.TUINT64,
-		gc.ORSH<<16 | gc.TPTR64:
-		a = ppc64.ASRD
+	case ORSH_ | gc.TUINT8,
+		ORSH_ | gc.TUINT16,
+		ORSH_ | gc.TUINT32,
+		ORSH_ | gc.TPTR32,
+		ORSH_ | gc.TUINT64,
+		ORSH_ | gc.TPTR64:
+		a = mips.ASRLV
 
-	case gc.ORSH<<16 | gc.TINT8,
-		gc.ORSH<<16 | gc.TINT16,
-		gc.ORSH<<16 | gc.TINT32,
-		gc.ORSH<<16 | gc.TINT64:
-		a = ppc64.ASRAD
+	case ORSH_ | gc.TINT8,
+		ORSH_ | gc.TINT16,
+		ORSH_ | gc.TINT32,
+		ORSH_ | gc.TINT64:
+		a = mips.ASRAV
 
 		// TODO(minux): handle rotates
 	//case CASE(ORROTC, TINT8):
@@ -932,54 +978,52 @@
 	//	a = 0//??? RLDC??
 	//	break;
 
-	case gc.OHMUL<<16 | gc.TINT64:
-		a = ppc64.AMULHD
+	case OHMUL_ | gc.TINT64:
+		a = mips.AMULV
 
-	case gc.OHMUL<<16 | gc.TUINT64,
-		gc.OHMUL<<16 | gc.TPTR64:
-		a = ppc64.AMULHDU
+	case OHMUL_ | gc.TUINT64,
+		OHMUL_ | gc.TPTR64:
+		a = mips.AMULVU
 
-	case gc.OMUL<<16 | gc.TINT8,
-		gc.OMUL<<16 | gc.TINT16,
-		gc.OMUL<<16 | gc.TINT32,
-		gc.OMUL<<16 | gc.TINT64:
-		a = ppc64.AMULLD
+	case OMUL_ | gc.TINT8,
+		OMUL_ | gc.TINT16,
+		OMUL_ | gc.TINT32,
+		OMUL_ | gc.TINT64:
+		a = mips.AMULV
 
-	case gc.OMUL<<16 | gc.TUINT8,
-		gc.OMUL<<16 | gc.TUINT16,
-		gc.OMUL<<16 | gc.TUINT32,
-		gc.OMUL<<16 | gc.TPTR32,
-		// don't use word multiply, the high 32-bit are undefined.
-		gc.OMUL<<16 | gc.TUINT64,
-		gc.OMUL<<16 | gc.TPTR64:
-		// for 64-bit multiplies, signedness doesn't matter.
-		a = ppc64.AMULLD
+	case OMUL_ | gc.TUINT8,
+		OMUL_ | gc.TUINT16,
+		OMUL_ | gc.TUINT32,
+		OMUL_ | gc.TPTR32,
+		OMUL_ | gc.TUINT64,
+		OMUL_ | gc.TPTR64:
+		a = mips.AMULVU
 
-	case gc.OMUL<<16 | gc.TFLOAT32:
-		a = ppc64.AFMULS
+	case OMUL_ | gc.TFLOAT32:
+		a = mips.AMULF
 
-	case gc.OMUL<<16 | gc.TFLOAT64:
-		a = ppc64.AFMUL
+	case OMUL_ | gc.TFLOAT64:
+		a = mips.AMULD
 
-	case gc.ODIV<<16 | gc.TINT8,
-		gc.ODIV<<16 | gc.TINT16,
-		gc.ODIV<<16 | gc.TINT32,
-		gc.ODIV<<16 | gc.TINT64:
-		a = ppc64.ADIVD
+	case ODIV_ | gc.TINT8,
+		ODIV_ | gc.TINT16,
+		ODIV_ | gc.TINT32,
+		ODIV_ | gc.TINT64:
+		a = mips.ADIVV
 
-	case gc.ODIV<<16 | gc.TUINT8,
-		gc.ODIV<<16 | gc.TUINT16,
-		gc.ODIV<<16 | gc.TUINT32,
-		gc.ODIV<<16 | gc.TPTR32,
-		gc.ODIV<<16 | gc.TUINT64,
-		gc.ODIV<<16 | gc.TPTR64:
-		a = ppc64.ADIVDU
+	case ODIV_ | gc.TUINT8,
+		ODIV_ | gc.TUINT16,
+		ODIV_ | gc.TUINT32,
+		ODIV_ | gc.TPTR32,
+		ODIV_ | gc.TUINT64,
+		ODIV_ | gc.TPTR64:
+		a = mips.ADIVVU
 
-	case gc.ODIV<<16 | gc.TFLOAT32:
-		a = ppc64.AFDIVS
+	case ODIV_ | gc.TFLOAT32:
+		a = mips.ADIVF
 
-	case gc.ODIV<<16 | gc.TFLOAT64:
-		a = ppc64.AFDIV
+	case ODIV_ | gc.TFLOAT64:
+		a = mips.ADIVD
 	}
 
 	return a
diff --git a/src/cmd/compile/internal/mips64/opt.go b/src/cmd/compile/internal/mips64/opt.go
deleted file mode 100644
index 1704f63..0000000
--- a/src/cmd/compile/internal/mips64/opt.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2014 The Go Authors.  All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ppc64
-
-// Many Power ISA arithmetic and logical instructions come in four
-// standard variants.  These bits let us map between variants.
-const (
-	V_CC = 1 << 0 // xCC (affect CR field 0 flags)
-	V_V  = 1 << 1 // xV (affect SO and OV flags)
-)
diff --git a/src/cmd/compile/internal/mips64/peep.go b/src/cmd/compile/internal/mips64/peep.go
index 9c3f1ed..681a31c 100644
--- a/src/cmd/compile/internal/mips64/peep.go
+++ b/src/cmd/compile/internal/mips64/peep.go
@@ -28,12 +28,12 @@
 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 // THE SOFTWARE.
 
-package ppc64
+package mips64
 
 import (
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
-	"cmd/internal/obj/ppc64"
+	"cmd/internal/obj/mips"
 	"fmt"
 )
 
@@ -64,11 +64,11 @@
 		// can eliminate moves that don't care without
 		// breaking moves that do care.  This might let us
 		// simplify or remove the next peep loop, too.
-		if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
+		if p.As == mips.AMOVV || p.As == mips.AMOVF || p.As == mips.AMOVD {
 			if regtyp(&p.To) {
 				// Try to eliminate reg->reg moves
 				if regtyp(&p.From) {
-					if p.From.Type == p.To.Type {
+					if isfreg(&p.From) == isfreg(&p.To) {
 						if copyprop(r) {
 							excise(r)
 							t++
@@ -82,9 +82,9 @@
 				// Convert uses to $0 to uses of R0 and
 				// propagate R0
 				if regzer(&p.From) != 0 {
-					if p.To.Type == obj.TYPE_REG {
+					if p.To.Type == obj.TYPE_REG && !isfreg(&p.To) {
 						p.From.Type = obj.TYPE_REG
-						p.From.Reg = ppc64.REGZERO
+						p.From.Reg = mips.REGZERO
 						if copyprop(r) {
 							excise(r)
 							t++
@@ -113,12 +113,12 @@
 		default:
 			continue
 
-		case ppc64.AMOVH,
-			ppc64.AMOVHZ,
-			ppc64.AMOVB,
-			ppc64.AMOVBZ,
-			ppc64.AMOVW,
-			ppc64.AMOVWZ:
+		case mips.AMOVH,
+			mips.AMOVHU,
+			mips.AMOVB,
+			mips.AMOVBU,
+			mips.AMOVW,
+			mips.AMOVWU:
 			if p.To.Type != obj.TYPE_REG {
 				continue
 			}
@@ -141,209 +141,6 @@
 		excise(r1)
 	}
 
-	if gc.Debug['D'] > 1 {
-		goto ret /* allow following code improvement to be suppressed */
-	}
-
-	/*
-	 * look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
-	 * when OP can set condition codes correctly
-	 */
-	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		case ppc64.ACMP,
-			ppc64.ACMPW: /* always safe? */
-			if regzer(&p.To) == 0 {
-				continue
-			}
-			r1 = r.S1
-			if r1 == nil {
-				continue
-			}
-			switch r1.Prog.As {
-			default:
-				continue
-
-				/* the conditions can be complex and these are currently little used */
-			case ppc64.ABCL,
-				ppc64.ABC:
-				continue
-
-			case ppc64.ABEQ,
-				ppc64.ABGE,
-				ppc64.ABGT,
-				ppc64.ABLE,
-				ppc64.ABLT,
-				ppc64.ABNE,
-				ppc64.ABVC,
-				ppc64.ABVS:
-				break
-			}
-
-			r1 = r
-			for {
-				r1 = gc.Uniqp(r1)
-				if r1 == nil || r1.Prog.As != obj.ANOP {
-					break
-				}
-			}
-
-			if r1 == nil {
-				continue
-			}
-			p1 = r1.Prog
-			if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.From.Reg {
-				continue
-			}
-			switch p1.As {
-			/* irregular instructions */
-			case ppc64.ASUB,
-				ppc64.AADD,
-				ppc64.AXOR,
-				ppc64.AOR:
-				if p1.From.Type == obj.TYPE_CONST || p1.From.Type == obj.TYPE_ADDR {
-					continue
-				}
-			}
-
-			switch p1.As {
-			default:
-				continue
-
-			case ppc64.AMOVW,
-				ppc64.AMOVD:
-				if p1.From.Type != obj.TYPE_REG {
-					continue
-				}
-				continue
-
-			case ppc64.AANDCC,
-				ppc64.AANDNCC,
-				ppc64.AORCC,
-				ppc64.AORNCC,
-				ppc64.AXORCC,
-				ppc64.ASUBCC,
-				ppc64.ASUBECC,
-				ppc64.ASUBMECC,
-				ppc64.ASUBZECC,
-				ppc64.AADDCC,
-				ppc64.AADDCCC,
-				ppc64.AADDECC,
-				ppc64.AADDMECC,
-				ppc64.AADDZECC,
-				ppc64.ARLWMICC,
-				ppc64.ARLWNMCC,
-				/* don't deal with floating point instructions for now */
-				/*
-					case AFABS:
-					case AFADD:
-					case AFADDS:
-					case AFCTIW:
-					case AFCTIWZ:
-					case AFDIV:
-					case AFDIVS:
-					case AFMADD:
-					case AFMADDS:
-					case AFMOVD:
-					case AFMSUB:
-					case AFMSUBS:
-					case AFMUL:
-					case AFMULS:
-					case AFNABS:
-					case AFNEG:
-					case AFNMADD:
-					case AFNMADDS:
-					case AFNMSUB:
-					case AFNMSUBS:
-					case AFRSP:
-					case AFSUB:
-					case AFSUBS:
-					case ACNTLZW:
-					case AMTFSB0:
-					case AMTFSB1:
-				*/
-				ppc64.AADD,
-				ppc64.AADDV,
-				ppc64.AADDC,
-				ppc64.AADDCV,
-				ppc64.AADDME,
-				ppc64.AADDMEV,
-				ppc64.AADDE,
-				ppc64.AADDEV,
-				ppc64.AADDZE,
-				ppc64.AADDZEV,
-				ppc64.AAND,
-				ppc64.AANDN,
-				ppc64.ADIVW,
-				ppc64.ADIVWV,
-				ppc64.ADIVWU,
-				ppc64.ADIVWUV,
-				ppc64.ADIVD,
-				ppc64.ADIVDV,
-				ppc64.ADIVDU,
-				ppc64.ADIVDUV,
-				ppc64.AEQV,
-				ppc64.AEXTSB,
-				ppc64.AEXTSH,
-				ppc64.AEXTSW,
-				ppc64.AMULHW,
-				ppc64.AMULHWU,
-				ppc64.AMULLW,
-				ppc64.AMULLWV,
-				ppc64.AMULHD,
-				ppc64.AMULHDU,
-				ppc64.AMULLD,
-				ppc64.AMULLDV,
-				ppc64.ANAND,
-				ppc64.ANEG,
-				ppc64.ANEGV,
-				ppc64.ANOR,
-				ppc64.AOR,
-				ppc64.AORN,
-				ppc64.AREM,
-				ppc64.AREMV,
-				ppc64.AREMU,
-				ppc64.AREMUV,
-				ppc64.AREMD,
-				ppc64.AREMDV,
-				ppc64.AREMDU,
-				ppc64.AREMDUV,
-				ppc64.ARLWMI,
-				ppc64.ARLWNM,
-				ppc64.ASLW,
-				ppc64.ASRAW,
-				ppc64.ASRW,
-				ppc64.ASLD,
-				ppc64.ASRAD,
-				ppc64.ASRD,
-				ppc64.ASUB,
-				ppc64.ASUBV,
-				ppc64.ASUBC,
-				ppc64.ASUBCV,
-				ppc64.ASUBME,
-				ppc64.ASUBMEV,
-				ppc64.ASUBE,
-				ppc64.ASUBEV,
-				ppc64.ASUBZE,
-				ppc64.ASUBZEV,
-				ppc64.AXOR:
-				t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC)
-			}
-
-			if gc.Debug['D'] != 0 {
-				fmt.Printf("cmp %v; %v -> ", p1, p)
-			}
-			p1.As = int16(t)
-			if gc.Debug['D'] != 0 {
-				fmt.Printf("%v\n", p1)
-			}
-			excise(r)
-			continue
-		}
-	}
-
-ret:
 	gc.Flowend(g)
 }
 
@@ -368,7 +165,7 @@
 		}
 	}
 	if a.Type == obj.TYPE_REG {
-		if a.Reg == ppc64.REGZERO {
+		if a.Reg == mips.REGZERO {
 			return 1
 		}
 	}
@@ -377,7 +174,11 @@
 
 func regtyp(a *obj.Addr) bool {
 	// TODO(rsc): Floating point register exclusions?
-	return a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO
+	return a.Type == obj.TYPE_REG && mips.REG_R0 <= a.Reg && a.Reg <= mips.REG_F31 && a.Reg != mips.REGZERO
+}
+
+func isfreg(a *obj.Addr) bool {
+	return mips.REG_F0 <= a.Reg && a.Reg <= mips.REG_F31
 }
 
 /*
@@ -607,7 +408,7 @@
 //	0 otherwise (not touched)
 func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
 	if p.From3Type() != obj.TYPE_NONE {
-		// 9g never generates a from3
+		// never generates a from3
 		fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
 	}
 
@@ -617,34 +418,29 @@
 		return 2
 
 	case obj.ANOP, /* read p->from, write p->to */
-		ppc64.AMOVH,
-		ppc64.AMOVHZ,
-		ppc64.AMOVB,
-		ppc64.AMOVBZ,
-		ppc64.AMOVW,
-		ppc64.AMOVWZ,
-		ppc64.AMOVD,
-		ppc64.ANEG,
-		ppc64.ANEGCC,
-		ppc64.AADDME,
-		ppc64.AADDMECC,
-		ppc64.AADDZE,
-		ppc64.AADDZECC,
-		ppc64.ASUBME,
-		ppc64.ASUBMECC,
-		ppc64.ASUBZE,
-		ppc64.ASUBZECC,
-		ppc64.AFCTIW,
-		ppc64.AFCTIWZ,
-		ppc64.AFCTID,
-		ppc64.AFCTIDZ,
-		ppc64.AFCFID,
-		ppc64.AFCFIDCC,
-		ppc64.AFMOVS,
-		ppc64.AFMOVD,
-		ppc64.AFRSP,
-		ppc64.AFNEG,
-		ppc64.AFNEGCC:
+		mips.AMOVV,
+		mips.AMOVF,
+		mips.AMOVD,
+		mips.AMOVH,
+		mips.AMOVHU,
+		mips.AMOVB,
+		mips.AMOVBU,
+		mips.AMOVW,
+		mips.AMOVWU,
+		mips.AMOVFD,
+		mips.AMOVDF,
+		mips.AMOVDW,
+		mips.AMOVWD,
+		mips.AMOVFW,
+		mips.AMOVWF,
+		mips.AMOVDV,
+		mips.AMOVVD,
+		mips.AMOVFV,
+		mips.AMOVVF,
+		mips.ATRUNCFV,
+		mips.ATRUNCDV,
+		mips.ATRUNCFW,
+		mips.ATRUNCDW:
 		if s != nil {
 			if copysub(&p.From, v, s, 1) != 0 {
 				return 1
@@ -680,103 +476,37 @@
 
 		return 0
 
-	case ppc64.AMOVBU, /* rar p->from, write p->to or read p->from, rar p->to */
-		ppc64.AMOVBZU,
-		ppc64.AMOVHU,
-		ppc64.AMOVHZU,
-		ppc64.AMOVWZU,
-		ppc64.AMOVDU:
-		if p.From.Type == obj.TYPE_MEM {
-			if copyas(&p.From, v) {
-				// No s!=nil check; need to fail
-				// anyway in that case
-				return 2
-			}
+	case mips.ASGT, /* read p->from, read p->reg, write p->to */
+		mips.ASGTU,
 
-			if s != nil {
-				if copysub(&p.To, v, s, 1) != 0 {
-					return 1
-				}
-				return 0
-			}
+		mips.AADD,
+		mips.AADDU,
+		mips.ASUB,
+		mips.ASUBU,
+		mips.ASLL,
+		mips.ASRL,
+		mips.ASRA,
+		mips.AOR,
+		mips.ANOR,
+		mips.AAND,
+		mips.AXOR,
 
-			if copyas(&p.To, v) {
-				return 3
-			}
-		} else if p.To.Type == obj.TYPE_MEM {
-			if copyas(&p.To, v) {
-				return 2
-			}
-			if s != nil {
-				if copysub(&p.From, v, s, 1) != 0 {
-					return 1
-				}
-				return 0
-			}
+		mips.AADDV,
+		mips.AADDVU,
+		mips.ASUBV,
+		mips.ASUBVU,
+		mips.ASLLV,
+		mips.ASRLV,
+		mips.ASRAV,
 
-			if copyau(&p.From, v) {
-				return 1
-			}
-		} else {
-			fmt.Printf("copyu: bad %v\n", p)
-		}
-
-		return 0
-
-	case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
-		ppc64.ARLWMICC:
-		if copyas(&p.To, v) {
-			return 2
-		}
-		fallthrough
-
-		/* fall through */
-	case ppc64.AADD,
-		/* read p->from, read p->reg, write p->to */
-		ppc64.AADDC,
-		ppc64.AADDE,
-		ppc64.ASUB,
-		ppc64.ASLW,
-		ppc64.ASRW,
-		ppc64.ASRAW,
-		ppc64.ASLD,
-		ppc64.ASRD,
-		ppc64.ASRAD,
-		ppc64.AOR,
-		ppc64.AORCC,
-		ppc64.AORN,
-		ppc64.AORNCC,
-		ppc64.AAND,
-		ppc64.AANDCC,
-		ppc64.AANDN,
-		ppc64.AANDNCC,
-		ppc64.ANAND,
-		ppc64.ANANDCC,
-		ppc64.ANOR,
-		ppc64.ANORCC,
-		ppc64.AXOR,
-		ppc64.AMULHW,
-		ppc64.AMULHWU,
-		ppc64.AMULLW,
-		ppc64.AMULLD,
-		ppc64.ADIVW,
-		ppc64.ADIVD,
-		ppc64.ADIVWU,
-		ppc64.ADIVDU,
-		ppc64.AREM,
-		ppc64.AREMU,
-		ppc64.AREMD,
-		ppc64.AREMDU,
-		ppc64.ARLWNM,
-		ppc64.ARLWNMCC,
-		ppc64.AFADDS,
-		ppc64.AFADD,
-		ppc64.AFSUBS,
-		ppc64.AFSUB,
-		ppc64.AFMULS,
-		ppc64.AFMUL,
-		ppc64.AFDIVS,
-		ppc64.AFDIV:
+		mips.AADDF,
+		mips.AADDD,
+		mips.ASUBF,
+		mips.ASUBD,
+		mips.AMULF,
+		mips.AMULD,
+		mips.ADIVF,
+		mips.ADIVD:
 		if s != nil {
 			if copysub(&p.From, v, s, 1) != 0 {
 				return 1
@@ -822,42 +552,47 @@
 		}
 		return 0
 
-	case ppc64.ABEQ,
-		ppc64.ABGT,
-		ppc64.ABGE,
-		ppc64.ABLT,
-		ppc64.ABLE,
-		ppc64.ABNE,
-		ppc64.ABVC,
-		ppc64.ABVS:
-		return 0
-
 	case obj.ACHECKNIL, /* read p->from */
-		ppc64.ACMP, /* read p->from, read p->to */
-		ppc64.ACMPU,
-		ppc64.ACMPW,
-		ppc64.ACMPWU,
-		ppc64.AFCMPO,
-		ppc64.AFCMPU:
+		mips.ABEQ, /* read p->from, read p->reg */
+		mips.ABNE,
+		mips.ABGTZ,
+		mips.ABGEZ,
+		mips.ABLTZ,
+		mips.ABLEZ,
+
+		mips.ACMPEQD,
+		mips.ACMPEQF,
+		mips.ACMPGED,
+		mips.ACMPGEF,
+		mips.ACMPGTD,
+		mips.ACMPGTF,
+		mips.ABFPF,
+		mips.ABFPT,
+
+		mips.AMUL,
+		mips.AMULU,
+		mips.ADIV,
+		mips.ADIVU,
+		mips.AMULV,
+		mips.AMULVU,
+		mips.ADIVV,
+		mips.ADIVVU:
 		if s != nil {
 			if copysub(&p.From, v, s, 1) != 0 {
 				return 1
 			}
-			return copysub(&p.To, v, s, 1)
+			return copysub1(p, v, s, 1)
 		}
 
 		if copyau(&p.From, v) {
 			return 1
 		}
-		if copyau(&p.To, v) {
+		if copyau1(p, v) {
 			return 1
 		}
 		return 0
 
-		// 9g never generates a branch to a GPR (this isn't
-	// even a normal instruction; liblink turns it in to a
-	// mov and a branch).
-	case ppc64.ABR: /* read p->to */
+	case mips.AJMP: /* read p->to */
 		if s != nil {
 			if copysub(&p.To, v, s, 1) != 0 {
 				return 1
@@ -870,7 +605,7 @@
 		}
 		return 0
 
-	case obj.ARET: /* funny */
+	case mips.ARET: /* funny */
 		if s != nil {
 			return 0
 		}
@@ -879,20 +614,20 @@
 		// everything is set (and not used).
 		return 3
 
-	case ppc64.ABL: /* funny */
+	case mips.AJAL: /* funny */
 		if v.Type == obj.TYPE_REG {
 			// TODO(rsc): REG_R0 and REG_F0 used to be
 			// (when register numbers started at 0) exregoffset and exfregoffset,
 			// which are unset entirely.
 			// It's strange that this handles R0 and F0 differently from the other
 			// registers. Possible failure to optimize?
-			if ppc64.REG_R0 < v.Reg && v.Reg <= ppc64.REGEXT {
+			if mips.REG_R0 < v.Reg && v.Reg <= mips.REG_R31 {
 				return 2
 			}
-			if v.Reg == ppc64.REGARG {
+			if v.Reg == mips.REGARG {
 				return 2
 			}
-			if ppc64.REG_F0 < v.Reg && v.Reg <= ppc64.FREGEXT {
+			if mips.REG_F0 < v.Reg && v.Reg <= mips.REG_F31 {
 				return 2
 			}
 		}
@@ -913,28 +648,28 @@
 		}
 		return 3
 
-		// R0 is zero, used by DUFFZERO, cannot be substituted.
-	// R3 is ptr to memory, used and set, cannot be substituted.
+	// R0 is zero, used by DUFFZERO, cannot be substituted.
+	// R1 is ptr to memory, used and set, cannot be substituted.
 	case obj.ADUFFZERO:
 		if v.Type == obj.TYPE_REG {
 			if v.Reg == 0 {
 				return 1
 			}
-			if v.Reg == 3 {
+			if v.Reg == 1 {
 				return 2
 			}
 		}
 
 		return 0
 
-		// R3, R4 are ptr to src, dst, used and set, cannot be substituted.
-	// R5 is scratch, set by DUFFCOPY, cannot be substituted.
+	// R1, R2 are ptr to src, dst, used and set, cannot be substituted.
+	// R3 is scratch, set by DUFFCOPY, cannot be substituted.
 	case obj.ADUFFCOPY:
 		if v.Type == obj.TYPE_REG {
-			if v.Reg == 3 || v.Reg == 4 {
+			if v.Reg == 1 || v.Reg == 2 {
 				return 2
 			}
-			if v.Reg == 5 {
+			if v.Reg == 3 {
 				return 3
 			}
 		}
@@ -943,7 +678,7 @@
 
 	case obj.ATEXT: /* funny */
 		if v.Type == obj.TYPE_REG {
-			if v.Reg == ppc64.REGARG {
+			if v.Reg == mips.REGARG {
 				return 3
 			}
 		}
@@ -1006,7 +741,7 @@
 }
 
 // copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
-// Returns 1 on failure to substitute (it always succeeds on ppc64).
+// Returns 1 on failure to substitute (it always succeeds on mips).
 func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
 	if f != 0 {
 		if copyau(a, v) {
@@ -1017,7 +752,7 @@
 }
 
 // copysub1 replaces v with s in p1->reg if f!=0 or indicates if it could if f==0.
-// Returns 1 on failure to substitute (it always succeeds on ppc64).
+// Returns 1 on failure to substitute (it always succeeds on mips).
 func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
 	if f != 0 {
 		if copyau1(p1, v) {
@@ -1047,5 +782,5 @@
 }
 
 func stackaddr(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP
+	return a.Type == obj.TYPE_REG && a.Reg == mips.REGSP
 }
diff --git a/src/cmd/compile/internal/mips64/prog.go b/src/cmd/compile/internal/mips64/prog.go
index 9b8719b..bf13d82 100644
--- a/src/cmd/compile/internal/mips64/prog.go
+++ b/src/cmd/compile/internal/mips64/prog.go
@@ -2,12 +2,12 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package ppc64
+package mips64
 
 import (
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
-	"cmd/internal/obj/ppc64"
+	"cmd/internal/obj/mips"
 )
 
 const (
@@ -24,7 +24,7 @@
 // size variants of an operation even if we just use a subset.
 //
 // The table is formatted for 8-space tabs.
-var progtable = [ppc64.ALAST]obj.ProgInfo{
+var progtable = [mips.ALAST]obj.ProgInfo{
 	obj.ATYPE:     {Flags: gc.Pseudo | gc.Skip},
 	obj.ATEXT:     {Flags: gc.Pseudo},
 	obj.AFUNCDATA: {Flags: gc.Pseudo},
@@ -36,106 +36,105 @@
 	obj.AVARKILL:  {Flags: gc.Pseudo | gc.RightWrite},
 
 	// NOP is an internal no-op that also stands
-	// for USED and SET annotations, not the Power opcode.
+	// for USED and SET annotations, not the MIPS opcode.
 	obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
 
 	// Integer
-	ppc64.AADD:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASUB:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ANEG:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AAND:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AOR:     {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AXOR:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AMULLD:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AMULLW:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AMULHD:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AMULHDU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ADIVD:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ADIVDU:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASLD:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASRD:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASRAD:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ACMP:    {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
-	ppc64.ACMPU:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
-	ppc64.ATD:     {Flags: gc.SizeQ | gc.RightRead},
+	mips.AADD:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AADDU:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AADDV:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AADDVU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASUB:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASUBU:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASUBV:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASUBVU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AAND:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AOR:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AXOR:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ANOR:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AMUL:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+	mips.AMULU:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+	mips.AMULV:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+	mips.AMULVU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+	mips.ADIV:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+	mips.ADIVU:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+	mips.ADIVV:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+	mips.ADIVVU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+	mips.AREM:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+	mips.AREMU:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+	mips.AREMV:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+	mips.AREMVU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+	mips.ASLL:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASLLV:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASRA:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASRAV:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASRL:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASRLV:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASGT:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASGTU:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
 
 	// Floating point.
-	ppc64.AFADD:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFADDS:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFSUB:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFSUBS:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFMUL:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFMULS:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFDIV:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFDIVS:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFCTIDZ: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFCFID:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFCMPU:  {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
-	ppc64.AFRSP:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.AADDF:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AADDD:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASUBF:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ASUBD:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AMULF:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AMULD:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ADIVF:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.ADIVD:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+	mips.AABSF:    {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
+	mips.AABSD:    {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
+	mips.ANEGF:    {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
+	mips.ANEGD:    {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
+	mips.ACMPEQF:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
+	mips.ACMPEQD:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
+	mips.ACMPGTF:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
+	mips.ACMPGTD:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
+	mips.ACMPGEF:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
+	mips.ACMPGED:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
+	mips.AMOVFD:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.AMOVDF:   {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.AMOVFW:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.AMOVWF:   {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.AMOVDW:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.AMOVWD:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.AMOVFV:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.AMOVVF:   {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.AMOVDV:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.AMOVVD:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.ATRUNCFW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.ATRUNCDW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.ATRUNCFV: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
+	mips.ATRUNCDV: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
 
 	// Moves
-	ppc64.AMOVB:  {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AMOVBU: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
-	ppc64.AMOVBZ: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AMOVH:  {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AMOVHU: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
-	ppc64.AMOVHZ: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AMOVW:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-
-	// there is no AMOVWU.
-	ppc64.AMOVWZU: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
-	ppc64.AMOVWZ:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AMOVD:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	ppc64.AMOVDU:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc},
-	ppc64.AFMOVS:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AFMOVD:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
+	mips.AMOVB:  {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+	mips.AMOVBU: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+	mips.AMOVH:  {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+	mips.AMOVHU: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+	mips.AMOVW:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+	mips.AMOVWU: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+	mips.AMOVV:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
+	mips.AMOVF:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+	mips.AMOVD:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
 
 	// Jumps
-	ppc64.ABR:     {Flags: gc.Jump | gc.Break},
-	ppc64.ABL:     {Flags: gc.Call},
-	ppc64.ABEQ:    {Flags: gc.Cjmp},
-	ppc64.ABNE:    {Flags: gc.Cjmp},
-	ppc64.ABGE:    {Flags: gc.Cjmp},
-	ppc64.ABLT:    {Flags: gc.Cjmp},
-	ppc64.ABGT:    {Flags: gc.Cjmp},
-	ppc64.ABLE:    {Flags: gc.Cjmp},
-	obj.ARET:      {Flags: gc.Break},
+	mips.AJMP:     {Flags: gc.Jump | gc.Break},
+	mips.AJAL:     {Flags: gc.Call},
+	mips.ABEQ:     {Flags: gc.Cjmp},
+	mips.ABNE:     {Flags: gc.Cjmp},
+	mips.ABGEZ:    {Flags: gc.Cjmp},
+	mips.ABLTZ:    {Flags: gc.Cjmp},
+	mips.ABGTZ:    {Flags: gc.Cjmp},
+	mips.ABLEZ:    {Flags: gc.Cjmp},
+	mips.ABFPF:    {Flags: gc.Cjmp},
+	mips.ABFPT:    {Flags: gc.Cjmp},
+	mips.ARET:     {Flags: gc.Break},
 	obj.ADUFFZERO: {Flags: gc.Call},
 	obj.ADUFFCOPY: {Flags: gc.Call},
 }
 
-var initproginfo_initialized int
-
-func initproginfo() {
-	var addvariant = []int{V_CC, V_V, V_CC | V_V}
-
-	if initproginfo_initialized != 0 {
-		return
-	}
-	initproginfo_initialized = 1
-
-	// Perform one-time expansion of instructions in progtable to
-	// their CC, V, and VCC variants
-	var as2 int
-	var i int
-	var variant int
-	for as := int(0); as < len(progtable); as++ {
-		if progtable[as].Flags == 0 {
-			continue
-		}
-		variant = as2variant(as)
-		for i = 0; i < len(addvariant); i++ {
-			as2 = variant2as(as, variant|addvariant[i])
-			if as2 != 0 && progtable[as2].Flags == 0 {
-				progtable[as2] = progtable[as]
-			}
-		}
-	}
-}
-
 func proginfo(p *obj.Prog) {
-	initproginfo()
-
 	info := &p.Info
 	*info = progtable[p.As]
 	if info.Flags == 0 {
@@ -149,16 +148,10 @@
 
 	if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
 		info.Regindex |= RtoB(int(p.From.Reg))
-		if info.Flags&gc.PostInc != 0 {
-			info.Regset |= RtoB(int(p.From.Reg))
-		}
 	}
 
 	if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
 		info.Regindex |= RtoB(int(p.To.Reg))
-		if info.Flags&gc.PostInc != 0 {
-			info.Regset |= RtoB(int(p.To.Reg))
-		}
 	}
 
 	if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
@@ -167,148 +160,14 @@
 	}
 
 	if p.As == obj.ADUFFZERO {
-		info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3)
-		info.Regset |= RtoB(ppc64.REG_R3)
+		info.Reguse |= 1<<0 | RtoB(mips.REGRT1)
+		info.Regset |= RtoB(mips.REGRT1)
 	}
 
 	if p.As == obj.ADUFFCOPY {
 		// TODO(austin) Revisit when duffcopy is implemented
-		info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5)
+		info.Reguse |= RtoB(mips.REGRT1) | RtoB(mips.REGRT2) | RtoB(mips.REG_R3)
 
-		info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4)
+		info.Regset |= RtoB(mips.REGRT1) | RtoB(mips.REGRT2)
 	}
 }
-
-// Instruction variants table.  Initially this contains entries only
-// for the "base" form of each instruction.  On the first call to
-// as2variant or variant2as, we'll add the variants to the table.
-var varianttable = [ppc64.ALAST][4]int{
-	ppc64.AADD:     [4]int{ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
-	ppc64.AADDC:    [4]int{ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
-	ppc64.AADDE:    [4]int{ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
-	ppc64.AADDME:   [4]int{ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
-	ppc64.AADDZE:   [4]int{ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
-	ppc64.AAND:     [4]int{ppc64.AAND, ppc64.AANDCC, 0, 0},
-	ppc64.AANDN:    [4]int{ppc64.AANDN, ppc64.AANDNCC, 0, 0},
-	ppc64.ACNTLZD:  [4]int{ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
-	ppc64.ACNTLZW:  [4]int{ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
-	ppc64.ADIVD:    [4]int{ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
-	ppc64.ADIVDU:   [4]int{ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
-	ppc64.ADIVW:    [4]int{ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
-	ppc64.ADIVWU:   [4]int{ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
-	ppc64.AEQV:     [4]int{ppc64.AEQV, ppc64.AEQVCC, 0, 0},
-	ppc64.AEXTSB:   [4]int{ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
-	ppc64.AEXTSH:   [4]int{ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
-	ppc64.AEXTSW:   [4]int{ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
-	ppc64.AFABS:    [4]int{ppc64.AFABS, ppc64.AFABSCC, 0, 0},
-	ppc64.AFADD:    [4]int{ppc64.AFADD, ppc64.AFADDCC, 0, 0},
-	ppc64.AFADDS:   [4]int{ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
-	ppc64.AFCFID:   [4]int{ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
-	ppc64.AFCTID:   [4]int{ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
-	ppc64.AFCTIDZ:  [4]int{ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
-	ppc64.AFCTIW:   [4]int{ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
-	ppc64.AFCTIWZ:  [4]int{ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
-	ppc64.AFDIV:    [4]int{ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
-	ppc64.AFDIVS:   [4]int{ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
-	ppc64.AFMADD:   [4]int{ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
-	ppc64.AFMADDS:  [4]int{ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
-	ppc64.AFMOVD:   [4]int{ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
-	ppc64.AFMSUB:   [4]int{ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
-	ppc64.AFMSUBS:  [4]int{ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
-	ppc64.AFMUL:    [4]int{ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
-	ppc64.AFMULS:   [4]int{ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
-	ppc64.AFNABS:   [4]int{ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
-	ppc64.AFNEG:    [4]int{ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
-	ppc64.AFNMADD:  [4]int{ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
-	ppc64.AFNMADDS: [4]int{ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
-	ppc64.AFNMSUB:  [4]int{ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
-	ppc64.AFNMSUBS: [4]int{ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
-	ppc64.AFRES:    [4]int{ppc64.AFRES, ppc64.AFRESCC, 0, 0},
-	ppc64.AFRSP:    [4]int{ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
-	ppc64.AFRSQRTE: [4]int{ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
-	ppc64.AFSEL:    [4]int{ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
-	ppc64.AFSQRT:   [4]int{ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
-	ppc64.AFSQRTS:  [4]int{ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
-	ppc64.AFSUB:    [4]int{ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
-	ppc64.AFSUBS:   [4]int{ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
-	ppc64.AMTFSB0:  [4]int{ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
-	ppc64.AMTFSB1:  [4]int{ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
-	ppc64.AMULHD:   [4]int{ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
-	ppc64.AMULHDU:  [4]int{ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
-	ppc64.AMULHW:   [4]int{ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
-	ppc64.AMULHWU:  [4]int{ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
-	ppc64.AMULLD:   [4]int{ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
-	ppc64.AMULLW:   [4]int{ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
-	ppc64.ANAND:    [4]int{ppc64.ANAND, ppc64.ANANDCC, 0, 0},
-	ppc64.ANEG:     [4]int{ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
-	ppc64.ANOR:     [4]int{ppc64.ANOR, ppc64.ANORCC, 0, 0},
-	ppc64.AOR:      [4]int{ppc64.AOR, ppc64.AORCC, 0, 0},
-	ppc64.AORN:     [4]int{ppc64.AORN, ppc64.AORNCC, 0, 0},
-	ppc64.AREM:     [4]int{ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
-	ppc64.AREMD:    [4]int{ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
-	ppc64.AREMDU:   [4]int{ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
-	ppc64.AREMU:    [4]int{ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
-	ppc64.ARLDC:    [4]int{ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
-	ppc64.ARLDCL:   [4]int{ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
-	ppc64.ARLDCR:   [4]int{ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
-	ppc64.ARLDMI:   [4]int{ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
-	ppc64.ARLWMI:   [4]int{ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
-	ppc64.ARLWNM:   [4]int{ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
-	ppc64.ASLD:     [4]int{ppc64.ASLD, ppc64.ASLDCC, 0, 0},
-	ppc64.ASLW:     [4]int{ppc64.ASLW, ppc64.ASLWCC, 0, 0},
-	ppc64.ASRAD:    [4]int{ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
-	ppc64.ASRAW:    [4]int{ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
-	ppc64.ASRD:     [4]int{ppc64.ASRD, ppc64.ASRDCC, 0, 0},
-	ppc64.ASRW:     [4]int{ppc64.ASRW, ppc64.ASRWCC, 0, 0},
-	ppc64.ASUB:     [4]int{ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
-	ppc64.ASUBC:    [4]int{ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
-	ppc64.ASUBE:    [4]int{ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
-	ppc64.ASUBME:   [4]int{ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
-	ppc64.ASUBZE:   [4]int{ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
-	ppc64.AXOR:     [4]int{ppc64.AXOR, ppc64.AXORCC, 0, 0},
-}
-
-var initvariants_initialized int
-
-func initvariants() {
-	if initvariants_initialized != 0 {
-		return
-	}
-	initvariants_initialized = 1
-
-	var j int
-	for i := int(0); i < len(varianttable); i++ {
-		if varianttable[i][0] == 0 {
-			// Instruction has no variants
-			varianttable[i][0] = i
-
-			continue
-		}
-
-		// Copy base form to other variants
-		if varianttable[i][0] == i {
-			for j = 0; j < len(varianttable[i]); j++ {
-				varianttable[varianttable[i][j]] = varianttable[i]
-			}
-		}
-	}
-}
-
-// as2variant returns the variant (V_*) flags of instruction as.
-func as2variant(as int) int {
-	initvariants()
-	for i := int(0); i < len(varianttable[as]); i++ {
-		if varianttable[as][i] == as {
-			return i
-		}
-	}
-	gc.Fatalf("as2variant: instruction %v is not a variant of itself", obj.Aconv(as))
-	return 0
-}
-
-// variant2as returns the instruction as with the given variant (V_*) flags.
-// If no such variant exists, this returns 0.
-func variant2as(as int, flags int) int {
-	initvariants()
-	return varianttable[as][flags]
-}
diff --git a/src/cmd/compile/internal/mips64/reg.go b/src/cmd/compile/internal/mips64/reg.go
index fa1cb71..5c46588 100644
--- a/src/cmd/compile/internal/mips64/reg.go
+++ b/src/cmd/compile/internal/mips64/reg.go
@@ -28,9 +28,9 @@
 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 // THE SOFTWARE.
 
-package ppc64
+package mips64
 
-import "cmd/internal/obj/ppc64"
+import "cmd/internal/obj/mips"
 import "cmd/compile/internal/gc"
 
 const (
@@ -111,10 +111,10 @@
 
 func excludedregs() uint64 {
 	// Exclude registers with fixed functions
-	regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS))
+	regbits := uint64(1<<0 | RtoB(mips.REGSP) | RtoB(mips.REGG) | RtoB(mips.REGTMP) | RtoB(mips.REGLINK) | RtoB(mips.REG_R26) | RtoB(mips.REG_R27))
 
 	// Also exclude floating point registers with fixed constants
-	regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
+	regbits |= RtoB(mips.FREGZERO) | RtoB(mips.FREGHALF) | RtoB(mips.FREGONE) | RtoB(mips.FREGTWO)
 
 	return regbits
 }
@@ -136,11 +136,11 @@
  *	32+31	F31
  */
 func RtoB(r int) uint64 {
-	if r > ppc64.REG_R0 && r <= ppc64.REG_R31 {
-		return 1 << uint(r-ppc64.REG_R0)
+	if r > mips.REG_R0 && r <= mips.REG_R31 {
+		return 1 << uint(r-mips.REG_R0)
 	}
-	if r >= ppc64.REG_F0 && r <= ppc64.REG_F31 {
-		return 1 << uint(32+r-ppc64.REG_F0)
+	if r >= mips.REG_F0 && r <= mips.REG_F31 {
+		return 1 << uint(32+r-mips.REG_F0)
 	}
 	return 0
 }
@@ -150,7 +150,7 @@
 	if b == 0 {
 		return 0
 	}
-	return gc.Bitno(b) + ppc64.REG_R0
+	return gc.Bitno(b) + mips.REG_R0
 }
 
 func BtoF(b uint64) int {
@@ -158,5 +158,5 @@
 	if b == 0 {
 		return 0
 	}
-	return gc.Bitno(b) + ppc64.REG_F0
+	return gc.Bitno(b) + mips.REG_F0
 }
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
index 16509da..2bd49fd 100644
--- a/src/cmd/compile/internal/ppc64/galign.go
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -43,6 +43,11 @@
 	gc.Widthptr = 8
 	gc.Widthint = 8
 	gc.Widthreg = 8
+
+	if gc.Ctxt.Flag_shared != 0 {
+		gc.Thearch.ReservedRegs = append(gc.Thearch.ReservedRegs, ppc64.REG_R2)
+		gc.Thearch.ReservedRegs = append(gc.Thearch.ReservedRegs, ppc64.REG_R12)
+	}
 }
 
 func Main() {
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
index d0bdebb..28fcecf 100644
--- a/src/cmd/compile/internal/ppc64/ggen.go
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -127,7 +127,7 @@
  *	res = nl % nr
  * according to op.
  */
-func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	// Have to be careful about handling
 	// most negative int divided by -1 correctly.
 	// The hardware will generate undefined result.
@@ -299,7 +299,7 @@
  *	res = nl << nr
  *	res = nl >> nr
  */
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	a := int(optoas(op, nl.Type))
 
 	if nr.Op == gc.OLITERAL {
diff --git a/src/cmd/compile/internal/ppc64/gsubr.go b/src/cmd/compile/internal/ppc64/gsubr.go
index dde05c4..534ea62 100644
--- a/src/cmd/compile/internal/ppc64/gsubr.go
+++ b/src/cmd/compile/internal/ppc64/gsubr.go
@@ -117,7 +117,7 @@
 	gc.Regfree(&ntmp)
 }
 
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
 	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
 		// Reverse comparison to place constant last.
 		op = gc.Brrev(op)
@@ -580,6 +580,18 @@
 	case obj.ACALL:
 		if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
 			// Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
+			if gc.Ctxt.Flag_shared != 0 {
+				// Make sure function pointer is in R12 as well when
+				// compiling Go into PIC.
+				// TODO(mwhudson): it would obviously be better to
+				// change the register allocation to put the value in
+				// R12 already, but I don't know how to do that.
+				q := gc.Prog(as)
+				q.As = ppc64.AMOVD
+				q.From = p.To
+				q.To.Type = obj.TYPE_REG
+				q.To.Reg = ppc64.REG_R12
+			}
 			pp := gc.Prog(as)
 			pp.From = p.From
 			pp.To.Type = obj.TYPE_REG
@@ -590,6 +602,19 @@
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = ppc64.REG_CTR
 
+			if gc.Ctxt.Flag_shared != 0 {
+				// When compiling Go into PIC, the function we just
+				// called via pointer might have been implemented in
+				// a separate module and so overwritten the TOC
+				// pointer in R2; reload it.
+				q := gc.Prog(ppc64.AMOVD)
+				q.From.Type = obj.TYPE_MEM
+				q.From.Offset = 24
+				q.From.Reg = ppc64.REGSP
+				q.To.Type = obj.TYPE_REG
+				q.To.Reg = ppc64.REG_R2
+			}
+
 			if gc.Debug['g'] != 0 {
 				fmt.Printf("%v\n", p)
 				fmt.Printf("%v\n", pp)
@@ -655,229 +680,252 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
 
+	// avoid constant conversions in switches below
+	const (
+		OMINUS_ = uint32(gc.OMINUS) << 16
+		OLSH_   = uint32(gc.OLSH) << 16
+		ORSH_   = uint32(gc.ORSH) << 16
+		OADD_   = uint32(gc.OADD) << 16
+		OSUB_   = uint32(gc.OSUB) << 16
+		OMUL_   = uint32(gc.OMUL) << 16
+		ODIV_   = uint32(gc.ODIV) << 16
+		OOR_    = uint32(gc.OOR) << 16
+		OAND_   = uint32(gc.OAND) << 16
+		OXOR_   = uint32(gc.OXOR) << 16
+		OEQ_    = uint32(gc.OEQ) << 16
+		ONE_    = uint32(gc.ONE) << 16
+		OLT_    = uint32(gc.OLT) << 16
+		OLE_    = uint32(gc.OLE) << 16
+		OGE_    = uint32(gc.OGE) << 16
+		OGT_    = uint32(gc.OGT) << 16
+		OCMP_   = uint32(gc.OCMP) << 16
+		OAS_    = uint32(gc.OAS) << 16
+		OHMUL_  = uint32(gc.OHMUL) << 16
+	)
+
 	a := int(obj.AXXX)
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
 
-	case gc.OEQ<<16 | gc.TBOOL,
-		gc.OEQ<<16 | gc.TINT8,
-		gc.OEQ<<16 | gc.TUINT8,
-		gc.OEQ<<16 | gc.TINT16,
-		gc.OEQ<<16 | gc.TUINT16,
-		gc.OEQ<<16 | gc.TINT32,
-		gc.OEQ<<16 | gc.TUINT32,
-		gc.OEQ<<16 | gc.TINT64,
-		gc.OEQ<<16 | gc.TUINT64,
-		gc.OEQ<<16 | gc.TPTR32,
-		gc.OEQ<<16 | gc.TPTR64,
-		gc.OEQ<<16 | gc.TFLOAT32,
-		gc.OEQ<<16 | gc.TFLOAT64:
+	case OEQ_ | gc.TBOOL,
+		OEQ_ | gc.TINT8,
+		OEQ_ | gc.TUINT8,
+		OEQ_ | gc.TINT16,
+		OEQ_ | gc.TUINT16,
+		OEQ_ | gc.TINT32,
+		OEQ_ | gc.TUINT32,
+		OEQ_ | gc.TINT64,
+		OEQ_ | gc.TUINT64,
+		OEQ_ | gc.TPTR32,
+		OEQ_ | gc.TPTR64,
+		OEQ_ | gc.TFLOAT32,
+		OEQ_ | gc.TFLOAT64:
 		a = ppc64.ABEQ
 
-	case gc.ONE<<16 | gc.TBOOL,
-		gc.ONE<<16 | gc.TINT8,
-		gc.ONE<<16 | gc.TUINT8,
-		gc.ONE<<16 | gc.TINT16,
-		gc.ONE<<16 | gc.TUINT16,
-		gc.ONE<<16 | gc.TINT32,
-		gc.ONE<<16 | gc.TUINT32,
-		gc.ONE<<16 | gc.TINT64,
-		gc.ONE<<16 | gc.TUINT64,
-		gc.ONE<<16 | gc.TPTR32,
-		gc.ONE<<16 | gc.TPTR64,
-		gc.ONE<<16 | gc.TFLOAT32,
-		gc.ONE<<16 | gc.TFLOAT64:
+	case ONE_ | gc.TBOOL,
+		ONE_ | gc.TINT8,
+		ONE_ | gc.TUINT8,
+		ONE_ | gc.TINT16,
+		ONE_ | gc.TUINT16,
+		ONE_ | gc.TINT32,
+		ONE_ | gc.TUINT32,
+		ONE_ | gc.TINT64,
+		ONE_ | gc.TUINT64,
+		ONE_ | gc.TPTR32,
+		ONE_ | gc.TPTR64,
+		ONE_ | gc.TFLOAT32,
+		ONE_ | gc.TFLOAT64:
 		a = ppc64.ABNE
 
-	case gc.OLT<<16 | gc.TINT8, // ACMP
-		gc.OLT<<16 | gc.TINT16,
-		gc.OLT<<16 | gc.TINT32,
-		gc.OLT<<16 | gc.TINT64,
-		gc.OLT<<16 | gc.TUINT8,
+	case OLT_ | gc.TINT8, // ACMP
+		OLT_ | gc.TINT16,
+		OLT_ | gc.TINT32,
+		OLT_ | gc.TINT64,
+		OLT_ | gc.TUINT8,
 		// ACMPU
-		gc.OLT<<16 | gc.TUINT16,
-		gc.OLT<<16 | gc.TUINT32,
-		gc.OLT<<16 | gc.TUINT64,
-		gc.OLT<<16 | gc.TFLOAT32,
+		OLT_ | gc.TUINT16,
+		OLT_ | gc.TUINT32,
+		OLT_ | gc.TUINT64,
+		OLT_ | gc.TFLOAT32,
 		// AFCMPU
-		gc.OLT<<16 | gc.TFLOAT64:
+		OLT_ | gc.TFLOAT64:
 		a = ppc64.ABLT
 
-	case gc.OLE<<16 | gc.TINT8, // ACMP
-		gc.OLE<<16 | gc.TINT16,
-		gc.OLE<<16 | gc.TINT32,
-		gc.OLE<<16 | gc.TINT64,
-		gc.OLE<<16 | gc.TUINT8,
+	case OLE_ | gc.TINT8, // ACMP
+		OLE_ | gc.TINT16,
+		OLE_ | gc.TINT32,
+		OLE_ | gc.TINT64,
+		OLE_ | gc.TUINT8,
 		// ACMPU
-		gc.OLE<<16 | gc.TUINT16,
-		gc.OLE<<16 | gc.TUINT32,
-		gc.OLE<<16 | gc.TUINT64:
+		OLE_ | gc.TUINT16,
+		OLE_ | gc.TUINT32,
+		OLE_ | gc.TUINT64:
 		// No OLE for floats, because it mishandles NaN.
 		// Front end must reverse comparison or use OLT and OEQ together.
 		a = ppc64.ABLE
 
-	case gc.OGT<<16 | gc.TINT8,
-		gc.OGT<<16 | gc.TINT16,
-		gc.OGT<<16 | gc.TINT32,
-		gc.OGT<<16 | gc.TINT64,
-		gc.OGT<<16 | gc.TUINT8,
-		gc.OGT<<16 | gc.TUINT16,
-		gc.OGT<<16 | gc.TUINT32,
-		gc.OGT<<16 | gc.TUINT64,
-		gc.OGT<<16 | gc.TFLOAT32,
-		gc.OGT<<16 | gc.TFLOAT64:
+	case OGT_ | gc.TINT8,
+		OGT_ | gc.TINT16,
+		OGT_ | gc.TINT32,
+		OGT_ | gc.TINT64,
+		OGT_ | gc.TUINT8,
+		OGT_ | gc.TUINT16,
+		OGT_ | gc.TUINT32,
+		OGT_ | gc.TUINT64,
+		OGT_ | gc.TFLOAT32,
+		OGT_ | gc.TFLOAT64:
 		a = ppc64.ABGT
 
-	case gc.OGE<<16 | gc.TINT8,
-		gc.OGE<<16 | gc.TINT16,
-		gc.OGE<<16 | gc.TINT32,
-		gc.OGE<<16 | gc.TINT64,
-		gc.OGE<<16 | gc.TUINT8,
-		gc.OGE<<16 | gc.TUINT16,
-		gc.OGE<<16 | gc.TUINT32,
-		gc.OGE<<16 | gc.TUINT64:
+	case OGE_ | gc.TINT8,
+		OGE_ | gc.TINT16,
+		OGE_ | gc.TINT32,
+		OGE_ | gc.TINT64,
+		OGE_ | gc.TUINT8,
+		OGE_ | gc.TUINT16,
+		OGE_ | gc.TUINT32,
+		OGE_ | gc.TUINT64:
 		// No OGE for floats, because it mishandles NaN.
 		// Front end must reverse comparison or use OLT and OEQ together.
 		a = ppc64.ABGE
 
-	case gc.OCMP<<16 | gc.TBOOL,
-		gc.OCMP<<16 | gc.TINT8,
-		gc.OCMP<<16 | gc.TINT16,
-		gc.OCMP<<16 | gc.TINT32,
-		gc.OCMP<<16 | gc.TPTR32,
-		gc.OCMP<<16 | gc.TINT64:
+	case OCMP_ | gc.TBOOL,
+		OCMP_ | gc.TINT8,
+		OCMP_ | gc.TINT16,
+		OCMP_ | gc.TINT32,
+		OCMP_ | gc.TPTR32,
+		OCMP_ | gc.TINT64:
 		a = ppc64.ACMP
 
-	case gc.OCMP<<16 | gc.TUINT8,
-		gc.OCMP<<16 | gc.TUINT16,
-		gc.OCMP<<16 | gc.TUINT32,
-		gc.OCMP<<16 | gc.TUINT64,
-		gc.OCMP<<16 | gc.TPTR64:
+	case OCMP_ | gc.TUINT8,
+		OCMP_ | gc.TUINT16,
+		OCMP_ | gc.TUINT32,
+		OCMP_ | gc.TUINT64,
+		OCMP_ | gc.TPTR64:
 		a = ppc64.ACMPU
 
-	case gc.OCMP<<16 | gc.TFLOAT32,
-		gc.OCMP<<16 | gc.TFLOAT64:
+	case OCMP_ | gc.TFLOAT32,
+		OCMP_ | gc.TFLOAT64:
 		a = ppc64.AFCMPU
 
-	case gc.OAS<<16 | gc.TBOOL,
-		gc.OAS<<16 | gc.TINT8:
+	case OAS_ | gc.TBOOL,
+		OAS_ | gc.TINT8:
 		a = ppc64.AMOVB
 
-	case gc.OAS<<16 | gc.TUINT8:
+	case OAS_ | gc.TUINT8:
 		a = ppc64.AMOVBZ
 
-	case gc.OAS<<16 | gc.TINT16:
+	case OAS_ | gc.TINT16:
 		a = ppc64.AMOVH
 
-	case gc.OAS<<16 | gc.TUINT16:
+	case OAS_ | gc.TUINT16:
 		a = ppc64.AMOVHZ
 
-	case gc.OAS<<16 | gc.TINT32:
+	case OAS_ | gc.TINT32:
 		a = ppc64.AMOVW
 
-	case gc.OAS<<16 | gc.TUINT32,
-		gc.OAS<<16 | gc.TPTR32:
+	case OAS_ | gc.TUINT32,
+		OAS_ | gc.TPTR32:
 		a = ppc64.AMOVWZ
 
-	case gc.OAS<<16 | gc.TINT64,
-		gc.OAS<<16 | gc.TUINT64,
-		gc.OAS<<16 | gc.TPTR64:
+	case OAS_ | gc.TINT64,
+		OAS_ | gc.TUINT64,
+		OAS_ | gc.TPTR64:
 		a = ppc64.AMOVD
 
-	case gc.OAS<<16 | gc.TFLOAT32:
+	case OAS_ | gc.TFLOAT32:
 		a = ppc64.AFMOVS
 
-	case gc.OAS<<16 | gc.TFLOAT64:
+	case OAS_ | gc.TFLOAT64:
 		a = ppc64.AFMOVD
 
-	case gc.OADD<<16 | gc.TINT8,
-		gc.OADD<<16 | gc.TUINT8,
-		gc.OADD<<16 | gc.TINT16,
-		gc.OADD<<16 | gc.TUINT16,
-		gc.OADD<<16 | gc.TINT32,
-		gc.OADD<<16 | gc.TUINT32,
-		gc.OADD<<16 | gc.TPTR32,
-		gc.OADD<<16 | gc.TINT64,
-		gc.OADD<<16 | gc.TUINT64,
-		gc.OADD<<16 | gc.TPTR64:
+	case OADD_ | gc.TINT8,
+		OADD_ | gc.TUINT8,
+		OADD_ | gc.TINT16,
+		OADD_ | gc.TUINT16,
+		OADD_ | gc.TINT32,
+		OADD_ | gc.TUINT32,
+		OADD_ | gc.TPTR32,
+		OADD_ | gc.TINT64,
+		OADD_ | gc.TUINT64,
+		OADD_ | gc.TPTR64:
 		a = ppc64.AADD
 
-	case gc.OADD<<16 | gc.TFLOAT32:
+	case OADD_ | gc.TFLOAT32:
 		a = ppc64.AFADDS
 
-	case gc.OADD<<16 | gc.TFLOAT64:
+	case OADD_ | gc.TFLOAT64:
 		a = ppc64.AFADD
 
-	case gc.OSUB<<16 | gc.TINT8,
-		gc.OSUB<<16 | gc.TUINT8,
-		gc.OSUB<<16 | gc.TINT16,
-		gc.OSUB<<16 | gc.TUINT16,
-		gc.OSUB<<16 | gc.TINT32,
-		gc.OSUB<<16 | gc.TUINT32,
-		gc.OSUB<<16 | gc.TPTR32,
-		gc.OSUB<<16 | gc.TINT64,
-		gc.OSUB<<16 | gc.TUINT64,
-		gc.OSUB<<16 | gc.TPTR64:
+	case OSUB_ | gc.TINT8,
+		OSUB_ | gc.TUINT8,
+		OSUB_ | gc.TINT16,
+		OSUB_ | gc.TUINT16,
+		OSUB_ | gc.TINT32,
+		OSUB_ | gc.TUINT32,
+		OSUB_ | gc.TPTR32,
+		OSUB_ | gc.TINT64,
+		OSUB_ | gc.TUINT64,
+		OSUB_ | gc.TPTR64:
 		a = ppc64.ASUB
 
-	case gc.OSUB<<16 | gc.TFLOAT32:
+	case OSUB_ | gc.TFLOAT32:
 		a = ppc64.AFSUBS
 
-	case gc.OSUB<<16 | gc.TFLOAT64:
+	case OSUB_ | gc.TFLOAT64:
 		a = ppc64.AFSUB
 
-	case gc.OMINUS<<16 | gc.TINT8,
-		gc.OMINUS<<16 | gc.TUINT8,
-		gc.OMINUS<<16 | gc.TINT16,
-		gc.OMINUS<<16 | gc.TUINT16,
-		gc.OMINUS<<16 | gc.TINT32,
-		gc.OMINUS<<16 | gc.TUINT32,
-		gc.OMINUS<<16 | gc.TPTR32,
-		gc.OMINUS<<16 | gc.TINT64,
-		gc.OMINUS<<16 | gc.TUINT64,
-		gc.OMINUS<<16 | gc.TPTR64:
+	case OMINUS_ | gc.TINT8,
+		OMINUS_ | gc.TUINT8,
+		OMINUS_ | gc.TINT16,
+		OMINUS_ | gc.TUINT16,
+		OMINUS_ | gc.TINT32,
+		OMINUS_ | gc.TUINT32,
+		OMINUS_ | gc.TPTR32,
+		OMINUS_ | gc.TINT64,
+		OMINUS_ | gc.TUINT64,
+		OMINUS_ | gc.TPTR64:
 		a = ppc64.ANEG
 
-	case gc.OAND<<16 | gc.TINT8,
-		gc.OAND<<16 | gc.TUINT8,
-		gc.OAND<<16 | gc.TINT16,
-		gc.OAND<<16 | gc.TUINT16,
-		gc.OAND<<16 | gc.TINT32,
-		gc.OAND<<16 | gc.TUINT32,
-		gc.OAND<<16 | gc.TPTR32,
-		gc.OAND<<16 | gc.TINT64,
-		gc.OAND<<16 | gc.TUINT64,
-		gc.OAND<<16 | gc.TPTR64:
+	case OAND_ | gc.TINT8,
+		OAND_ | gc.TUINT8,
+		OAND_ | gc.TINT16,
+		OAND_ | gc.TUINT16,
+		OAND_ | gc.TINT32,
+		OAND_ | gc.TUINT32,
+		OAND_ | gc.TPTR32,
+		OAND_ | gc.TINT64,
+		OAND_ | gc.TUINT64,
+		OAND_ | gc.TPTR64:
 		a = ppc64.AAND
 
-	case gc.OOR<<16 | gc.TINT8,
-		gc.OOR<<16 | gc.TUINT8,
-		gc.OOR<<16 | gc.TINT16,
-		gc.OOR<<16 | gc.TUINT16,
-		gc.OOR<<16 | gc.TINT32,
-		gc.OOR<<16 | gc.TUINT32,
-		gc.OOR<<16 | gc.TPTR32,
-		gc.OOR<<16 | gc.TINT64,
-		gc.OOR<<16 | gc.TUINT64,
-		gc.OOR<<16 | gc.TPTR64:
+	case OOR_ | gc.TINT8,
+		OOR_ | gc.TUINT8,
+		OOR_ | gc.TINT16,
+		OOR_ | gc.TUINT16,
+		OOR_ | gc.TINT32,
+		OOR_ | gc.TUINT32,
+		OOR_ | gc.TPTR32,
+		OOR_ | gc.TINT64,
+		OOR_ | gc.TUINT64,
+		OOR_ | gc.TPTR64:
 		a = ppc64.AOR
 
-	case gc.OXOR<<16 | gc.TINT8,
-		gc.OXOR<<16 | gc.TUINT8,
-		gc.OXOR<<16 | gc.TINT16,
-		gc.OXOR<<16 | gc.TUINT16,
-		gc.OXOR<<16 | gc.TINT32,
-		gc.OXOR<<16 | gc.TUINT32,
-		gc.OXOR<<16 | gc.TPTR32,
-		gc.OXOR<<16 | gc.TINT64,
-		gc.OXOR<<16 | gc.TUINT64,
-		gc.OXOR<<16 | gc.TPTR64:
+	case OXOR_ | gc.TINT8,
+		OXOR_ | gc.TUINT8,
+		OXOR_ | gc.TINT16,
+		OXOR_ | gc.TUINT16,
+		OXOR_ | gc.TINT32,
+		OXOR_ | gc.TUINT32,
+		OXOR_ | gc.TPTR32,
+		OXOR_ | gc.TINT64,
+		OXOR_ | gc.TUINT64,
+		OXOR_ | gc.TPTR64:
 		a = ppc64.AXOR
 
 		// TODO(minux): handle rotates
@@ -894,30 +942,30 @@
 	//	a = 0//???; RLDC?
 	//	break;
 
-	case gc.OLSH<<16 | gc.TINT8,
-		gc.OLSH<<16 | gc.TUINT8,
-		gc.OLSH<<16 | gc.TINT16,
-		gc.OLSH<<16 | gc.TUINT16,
-		gc.OLSH<<16 | gc.TINT32,
-		gc.OLSH<<16 | gc.TUINT32,
-		gc.OLSH<<16 | gc.TPTR32,
-		gc.OLSH<<16 | gc.TINT64,
-		gc.OLSH<<16 | gc.TUINT64,
-		gc.OLSH<<16 | gc.TPTR64:
+	case OLSH_ | gc.TINT8,
+		OLSH_ | gc.TUINT8,
+		OLSH_ | gc.TINT16,
+		OLSH_ | gc.TUINT16,
+		OLSH_ | gc.TINT32,
+		OLSH_ | gc.TUINT32,
+		OLSH_ | gc.TPTR32,
+		OLSH_ | gc.TINT64,
+		OLSH_ | gc.TUINT64,
+		OLSH_ | gc.TPTR64:
 		a = ppc64.ASLD
 
-	case gc.ORSH<<16 | gc.TUINT8,
-		gc.ORSH<<16 | gc.TUINT16,
-		gc.ORSH<<16 | gc.TUINT32,
-		gc.ORSH<<16 | gc.TPTR32,
-		gc.ORSH<<16 | gc.TUINT64,
-		gc.ORSH<<16 | gc.TPTR64:
+	case ORSH_ | gc.TUINT8,
+		ORSH_ | gc.TUINT16,
+		ORSH_ | gc.TUINT32,
+		ORSH_ | gc.TPTR32,
+		ORSH_ | gc.TUINT64,
+		ORSH_ | gc.TPTR64:
 		a = ppc64.ASRD
 
-	case gc.ORSH<<16 | gc.TINT8,
-		gc.ORSH<<16 | gc.TINT16,
-		gc.ORSH<<16 | gc.TINT32,
-		gc.ORSH<<16 | gc.TINT64:
+	case ORSH_ | gc.TINT8,
+		ORSH_ | gc.TINT16,
+		ORSH_ | gc.TINT32,
+		ORSH_ | gc.TINT64:
 		a = ppc64.ASRAD
 
 		// TODO(minux): handle rotates
@@ -932,53 +980,53 @@
 	//	a = 0//??? RLDC??
 	//	break;
 
-	case gc.OHMUL<<16 | gc.TINT64:
+	case OHMUL_ | gc.TINT64:
 		a = ppc64.AMULHD
 
-	case gc.OHMUL<<16 | gc.TUINT64,
-		gc.OHMUL<<16 | gc.TPTR64:
+	case OHMUL_ | gc.TUINT64,
+		OHMUL_ | gc.TPTR64:
 		a = ppc64.AMULHDU
 
-	case gc.OMUL<<16 | gc.TINT8,
-		gc.OMUL<<16 | gc.TINT16,
-		gc.OMUL<<16 | gc.TINT32,
-		gc.OMUL<<16 | gc.TINT64:
+	case OMUL_ | gc.TINT8,
+		OMUL_ | gc.TINT16,
+		OMUL_ | gc.TINT32,
+		OMUL_ | gc.TINT64:
 		a = ppc64.AMULLD
 
-	case gc.OMUL<<16 | gc.TUINT8,
-		gc.OMUL<<16 | gc.TUINT16,
-		gc.OMUL<<16 | gc.TUINT32,
-		gc.OMUL<<16 | gc.TPTR32,
+	case OMUL_ | gc.TUINT8,
+		OMUL_ | gc.TUINT16,
+		OMUL_ | gc.TUINT32,
+		OMUL_ | gc.TPTR32,
 		// don't use word multiply, the high 32-bit are undefined.
-		gc.OMUL<<16 | gc.TUINT64,
-		gc.OMUL<<16 | gc.TPTR64:
+		OMUL_ | gc.TUINT64,
+		OMUL_ | gc.TPTR64:
 		// for 64-bit multiplies, signedness doesn't matter.
 		a = ppc64.AMULLD
 
-	case gc.OMUL<<16 | gc.TFLOAT32:
+	case OMUL_ | gc.TFLOAT32:
 		a = ppc64.AFMULS
 
-	case gc.OMUL<<16 | gc.TFLOAT64:
+	case OMUL_ | gc.TFLOAT64:
 		a = ppc64.AFMUL
 
-	case gc.ODIV<<16 | gc.TINT8,
-		gc.ODIV<<16 | gc.TINT16,
-		gc.ODIV<<16 | gc.TINT32,
-		gc.ODIV<<16 | gc.TINT64:
+	case ODIV_ | gc.TINT8,
+		ODIV_ | gc.TINT16,
+		ODIV_ | gc.TINT32,
+		ODIV_ | gc.TINT64:
 		a = ppc64.ADIVD
 
-	case gc.ODIV<<16 | gc.TUINT8,
-		gc.ODIV<<16 | gc.TUINT16,
-		gc.ODIV<<16 | gc.TUINT32,
-		gc.ODIV<<16 | gc.TPTR32,
-		gc.ODIV<<16 | gc.TUINT64,
-		gc.ODIV<<16 | gc.TPTR64:
+	case ODIV_ | gc.TUINT8,
+		ODIV_ | gc.TUINT16,
+		ODIV_ | gc.TUINT32,
+		ODIV_ | gc.TPTR32,
+		ODIV_ | gc.TUINT64,
+		ODIV_ | gc.TPTR64:
 		a = ppc64.ADIVDU
 
-	case gc.ODIV<<16 | gc.TFLOAT32:
+	case ODIV_ | gc.TFLOAT32:
 		a = ppc64.AFDIVS
 
-	case gc.ODIV<<16 | gc.TFLOAT64:
+	case ODIV_ | gc.TFLOAT64:
 		a = ppc64.AFDIV
 	}
 
diff --git a/src/cmd/compile/internal/ppc64/reg.go b/src/cmd/compile/internal/ppc64/reg.go
index a301836..da3f34a 100644
--- a/src/cmd/compile/internal/ppc64/reg.go
+++ b/src/cmd/compile/internal/ppc64/reg.go
@@ -113,6 +113,12 @@
 	// Exclude registers with fixed functions
 	regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS) | RtoB(ppc64.REGTMP))
 
+	if gc.Ctxt.Flag_shared != 0 {
+		// When compiling Go into PIC, R2 is reserved to be the TOC pointer
+		// and R12 so that calls via function pointer can stomp on it.
+		regbits |= RtoB(ppc64.REG_R2)
+		regbits |= RtoB(ppc64.REG_R12)
+	}
 	// Also exclude floating point registers with fixed constants
 	regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
 
diff --git a/src/cmd/compile/internal/x86/cgen64.go b/src/cmd/compile/internal/x86/cgen64.go
index f1e570d..7e40a32 100644
--- a/src/cmd/compile/internal/x86/cgen64.go
+++ b/src/cmd/compile/internal/x86/cgen64.go
@@ -113,19 +113,18 @@
 		gins(x86.ASUBL, &lo2, &ax)
 		gins(x86.ASBBL, &hi2, &dx)
 
-		// let's call the next two EX and FX.
 	case gc.OMUL:
-		var ex gc.Node
+		// let's call the next three EX, FX and GX
+		var ex, fx, gx gc.Node
 		gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil)
-
-		var fx gc.Node
 		gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil)
+		gc.Regalloc(&gx, gc.Types[gc.TPTR32], nil)
 
-		// load args into DX:AX and EX:CX.
+		// load args into DX:AX and EX:GX.
 		gins(x86.AMOVL, &lo1, &ax)
 
 		gins(x86.AMOVL, &hi1, &dx)
-		gins(x86.AMOVL, &lo2, &cx)
+		gins(x86.AMOVL, &lo2, &gx)
 		gins(x86.AMOVL, &hi2, &ex)
 
 		// if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply.
@@ -133,25 +132,26 @@
 
 		gins(x86.AORL, &ex, &fx)
 		p1 := gc.Gbranch(x86.AJNE, nil, 0)
-		gins(x86.AMULL, &cx, nil) // implicit &ax
+		gins(x86.AMULL, &gx, nil) // implicit &ax
 		p2 := gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 
 		// full 64x64 -> 64, from 32x32 -> 64.
-		gins(x86.AIMULL, &cx, &dx)
+		gins(x86.AIMULL, &gx, &dx)
 
 		gins(x86.AMOVL, &ax, &fx)
 		gins(x86.AIMULL, &ex, &fx)
 		gins(x86.AADDL, &dx, &fx)
-		gins(x86.AMOVL, &cx, &dx)
+		gins(x86.AMOVL, &gx, &dx)
 		gins(x86.AMULL, &dx, nil) // implicit &ax
 		gins(x86.AADDL, &fx, &dx)
 		gc.Patch(p2, gc.Pc)
 
 		gc.Regfree(&ex)
 		gc.Regfree(&fx)
+		gc.Regfree(&gx)
 
-		// We only rotate by a constant c in [0,64).
+	// We only rotate by a constant c in [0,64).
 	// if c >= 32:
 	//	lo, hi = hi, lo
 	//	c -= 32
@@ -486,8 +486,8 @@
 
 		gins(x86.AMOVL, &lo1, &ax)
 		gins(x86.AMOVL, &hi1, &dx)
-		gins(optoas(int(n.Op), lo1.Type), &lo2, &ax)
-		gins(optoas(int(n.Op), lo1.Type), &hi2, &dx)
+		gins(optoas(n.Op, lo1.Type), &lo2, &ax)
+		gins(optoas(n.Op, lo1.Type), &hi2, &dx)
 	}
 
 	if gc.Is64(r.Type) {
@@ -505,7 +505,7 @@
  * generate comparison of nl, nr, both 64-bit.
  * nl is memory; nr is constant or memory.
  */
-func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
+func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) {
 	var lo1 gc.Node
 	var hi1 gc.Node
 	var lo2 gc.Node
diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go
index 85ae808..4e72dcb 100644
--- a/src/cmd/compile/internal/x86/ggen.go
+++ b/src/cmd/compile/internal/x86/ggen.go
@@ -191,7 +191,7 @@
  *	res = nl % nr
  * according to op.
  */
-func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
 	// Have to be careful about handling
 	// most negative int divided by -1 correctly.
 	// The hardware will trap.
@@ -338,7 +338,7 @@
  *	res = nl / nr
  *	res = nl % nr
  */
-func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_div(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	if gc.Is64(nl.Type) {
 		gc.Fatalf("cgen_div %v", nl.Type)
 	}
@@ -365,7 +365,7 @@
  *	res = nl << nr
  *	res = nl >> nr
  */
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	if nl.Type.Width > 4 {
 		gc.Fatalf("cgen_shift %v", nl.Type)
 	}
@@ -489,7 +489,7 @@
  * there is no 2-operand byte multiply instruction so
  * we do a full-width multiplication and truncate afterwards.
  */
-func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
+func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
 	if optoas(op, nl.Type) != x86.AIMULB {
 		return false
 	}
@@ -628,18 +628,18 @@
 		if nl.Ullman >= nr.Ullman {
 			gc.Cgen(nl, &f0)
 			if nr.Addable {
-				gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
+				gins(foptoas(n.Op, n.Type, 0), nr, &f0)
 			} else {
 				gc.Cgen(nr, &f0)
-				gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
+				gins(foptoas(n.Op, n.Type, Fpop), &f0, &f1)
 			}
 		} else {
 			gc.Cgen(nr, &f0)
 			if nl.Addable {
-				gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
+				gins(foptoas(n.Op, n.Type, Frev), nl, &f0)
 			} else {
 				gc.Cgen(nl, &f0)
-				gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
+				gins(foptoas(n.Op, n.Type, Frev|Fpop), &f0, &f1)
 			}
 		}
 
@@ -651,7 +651,7 @@
 	gc.Cgen(nl, &f0)
 
 	if n.Op != gc.OCONV && n.Op != gc.OPLUS {
-		gins(foptoas(int(n.Op), n.Type, 0), nil, nil)
+		gins(foptoas(n.Op, n.Type, 0), nil, nil)
 	}
 	gmove(&f0, res)
 	return
@@ -678,7 +678,7 @@
 		// symmetric binary
 	case gc.OADD,
 		gc.OMUL:
-		a = foptoas(int(n.Op), nl.Type, 0)
+		a = foptoas(n.Op, nl.Type, 0)
 
 		goto sbop
 
@@ -686,7 +686,7 @@
 	case gc.OSUB,
 		gc.OMOD,
 		gc.ODIV:
-		a = foptoas(int(n.Op), nl.Type, 0)
+		a = foptoas(n.Op, nl.Type, 0)
 
 		goto abop
 	}
@@ -729,7 +729,7 @@
 func bgen_float(n *gc.Node, wantTrue bool, likely int, to *obj.Prog) {
 	nl := n.Left
 	nr := n.Right
-	a := int(n.Op)
+	op := n.Op
 	if !wantTrue {
 		// brcom is not valid on floats when NaN is involved.
 		p1 := gc.Gbranch(obj.AJMP, nil, 0)
@@ -745,11 +745,11 @@
 	}
 
 	if gc.Thearch.Use387 {
-		a = gc.Brrev(a) // because the args are stacked
-		if a == gc.OGE || a == gc.OGT {
+		op = gc.Brrev(op) // because the args are stacked
+		if op == gc.OGE || op == gc.OGT {
 			// only < and <= work right with NaN; reverse if needed
 			nl, nr = nr, nl
-			a = gc.Brrev(a)
+			op = gc.Brrev(op)
 		}
 
 		var ax, n2, tmp gc.Node
@@ -808,10 +808,10 @@
 			nl = &n3
 		}
 
-		if a == gc.OGE || a == gc.OGT {
-			// only < and <= work right with NaN; reverse if needed
+		if op == gc.OGE || op == gc.OGT {
+			// only < and <= work right with NopN; reverse if needed
 			nl, nr = nr, nl
-			a = gc.Brrev(a)
+			op = gc.Brrev(op)
 		}
 
 		gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
@@ -821,7 +821,7 @@
 		gc.Regfree(nr)
 	}
 
-	switch a {
+	switch op {
 	case gc.OEQ:
 		// neither NE nor P
 		p1 := gc.Gbranch(x86.AJNE, nil, -likely)
@@ -834,7 +834,7 @@
 		gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)
 		gc.Patch(gc.Gbranch(x86.AJPS, nil, likely), to)
 	default:
-		gc.Patch(gc.Gbranch(optoas(a, nr.Type), nil, likely), to)
+		gc.Patch(gc.Gbranch(optoas(op, nr.Type), nil, likely), to)
 	}
 }
 
diff --git a/src/cmd/compile/internal/x86/gsubr.go b/src/cmd/compile/internal/x86/gsubr.go
index f57bbcb..5127bb2 100644
--- a/src/cmd/compile/internal/x86/gsubr.go
+++ b/src/cmd/compile/internal/x86/gsubr.go
@@ -53,402 +53,443 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
 
+	// avoid constant conversions in switches below
+	const (
+		OMINUS_  = uint32(gc.OMINUS) << 16
+		OLSH_    = uint32(gc.OLSH) << 16
+		ORSH_    = uint32(gc.ORSH) << 16
+		OADD_    = uint32(gc.OADD) << 16
+		OSUB_    = uint32(gc.OSUB) << 16
+		OMUL_    = uint32(gc.OMUL) << 16
+		ODIV_    = uint32(gc.ODIV) << 16
+		OMOD_    = uint32(gc.OMOD) << 16
+		OOR_     = uint32(gc.OOR) << 16
+		OAND_    = uint32(gc.OAND) << 16
+		OXOR_    = uint32(gc.OXOR) << 16
+		OEQ_     = uint32(gc.OEQ) << 16
+		ONE_     = uint32(gc.ONE) << 16
+		OLT_     = uint32(gc.OLT) << 16
+		OLE_     = uint32(gc.OLE) << 16
+		OGE_     = uint32(gc.OGE) << 16
+		OGT_     = uint32(gc.OGT) << 16
+		OCMP_    = uint32(gc.OCMP) << 16
+		OAS_     = uint32(gc.OAS) << 16
+		OHMUL_   = uint32(gc.OHMUL) << 16
+		OADDR_   = uint32(gc.OADDR) << 16
+		OINC_    = uint32(gc.OINC) << 16
+		ODEC_    = uint32(gc.ODEC) << 16
+		OLROT_   = uint32(gc.OLROT) << 16
+		OEXTEND_ = uint32(gc.OEXTEND) << 16
+		OCOM_    = uint32(gc.OCOM) << 16
+	)
+
 	a := obj.AXXX
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
 
-	case gc.OADDR<<16 | gc.TPTR32:
+	case OADDR_ | gc.TPTR32:
 		a = x86.ALEAL
 
-	case gc.OEQ<<16 | gc.TBOOL,
-		gc.OEQ<<16 | gc.TINT8,
-		gc.OEQ<<16 | gc.TUINT8,
-		gc.OEQ<<16 | gc.TINT16,
-		gc.OEQ<<16 | gc.TUINT16,
-		gc.OEQ<<16 | gc.TINT32,
-		gc.OEQ<<16 | gc.TUINT32,
-		gc.OEQ<<16 | gc.TINT64,
-		gc.OEQ<<16 | gc.TUINT64,
-		gc.OEQ<<16 | gc.TPTR32,
-		gc.OEQ<<16 | gc.TPTR64,
-		gc.OEQ<<16 | gc.TFLOAT32,
-		gc.OEQ<<16 | gc.TFLOAT64:
+	case OEQ_ | gc.TBOOL,
+		OEQ_ | gc.TINT8,
+		OEQ_ | gc.TUINT8,
+		OEQ_ | gc.TINT16,
+		OEQ_ | gc.TUINT16,
+		OEQ_ | gc.TINT32,
+		OEQ_ | gc.TUINT32,
+		OEQ_ | gc.TINT64,
+		OEQ_ | gc.TUINT64,
+		OEQ_ | gc.TPTR32,
+		OEQ_ | gc.TPTR64,
+		OEQ_ | gc.TFLOAT32,
+		OEQ_ | gc.TFLOAT64:
 		a = x86.AJEQ
 
-	case gc.ONE<<16 | gc.TBOOL,
-		gc.ONE<<16 | gc.TINT8,
-		gc.ONE<<16 | gc.TUINT8,
-		gc.ONE<<16 | gc.TINT16,
-		gc.ONE<<16 | gc.TUINT16,
-		gc.ONE<<16 | gc.TINT32,
-		gc.ONE<<16 | gc.TUINT32,
-		gc.ONE<<16 | gc.TINT64,
-		gc.ONE<<16 | gc.TUINT64,
-		gc.ONE<<16 | gc.TPTR32,
-		gc.ONE<<16 | gc.TPTR64,
-		gc.ONE<<16 | gc.TFLOAT32,
-		gc.ONE<<16 | gc.TFLOAT64:
+	case ONE_ | gc.TBOOL,
+		ONE_ | gc.TINT8,
+		ONE_ | gc.TUINT8,
+		ONE_ | gc.TINT16,
+		ONE_ | gc.TUINT16,
+		ONE_ | gc.TINT32,
+		ONE_ | gc.TUINT32,
+		ONE_ | gc.TINT64,
+		ONE_ | gc.TUINT64,
+		ONE_ | gc.TPTR32,
+		ONE_ | gc.TPTR64,
+		ONE_ | gc.TFLOAT32,
+		ONE_ | gc.TFLOAT64:
 		a = x86.AJNE
 
-	case gc.OLT<<16 | gc.TINT8,
-		gc.OLT<<16 | gc.TINT16,
-		gc.OLT<<16 | gc.TINT32,
-		gc.OLT<<16 | gc.TINT64:
+	case OLT_ | gc.TINT8,
+		OLT_ | gc.TINT16,
+		OLT_ | gc.TINT32,
+		OLT_ | gc.TINT64:
 		a = x86.AJLT
 
-	case gc.OLT<<16 | gc.TUINT8,
-		gc.OLT<<16 | gc.TUINT16,
-		gc.OLT<<16 | gc.TUINT32,
-		gc.OLT<<16 | gc.TUINT64:
+	case OLT_ | gc.TUINT8,
+		OLT_ | gc.TUINT16,
+		OLT_ | gc.TUINT32,
+		OLT_ | gc.TUINT64:
 		a = x86.AJCS
 
-	case gc.OLE<<16 | gc.TINT8,
-		gc.OLE<<16 | gc.TINT16,
-		gc.OLE<<16 | gc.TINT32,
-		gc.OLE<<16 | gc.TINT64:
+	case OLE_ | gc.TINT8,
+		OLE_ | gc.TINT16,
+		OLE_ | gc.TINT32,
+		OLE_ | gc.TINT64:
 		a = x86.AJLE
 
-	case gc.OLE<<16 | gc.TUINT8,
-		gc.OLE<<16 | gc.TUINT16,
-		gc.OLE<<16 | gc.TUINT32,
-		gc.OLE<<16 | gc.TUINT64:
+	case OLE_ | gc.TUINT8,
+		OLE_ | gc.TUINT16,
+		OLE_ | gc.TUINT32,
+		OLE_ | gc.TUINT64:
 		a = x86.AJLS
 
-	case gc.OGT<<16 | gc.TINT8,
-		gc.OGT<<16 | gc.TINT16,
-		gc.OGT<<16 | gc.TINT32,
-		gc.OGT<<16 | gc.TINT64:
+	case OGT_ | gc.TINT8,
+		OGT_ | gc.TINT16,
+		OGT_ | gc.TINT32,
+		OGT_ | gc.TINT64:
 		a = x86.AJGT
 
-	case gc.OGT<<16 | gc.TUINT8,
-		gc.OGT<<16 | gc.TUINT16,
-		gc.OGT<<16 | gc.TUINT32,
-		gc.OGT<<16 | gc.TUINT64,
-		gc.OLT<<16 | gc.TFLOAT32,
-		gc.OLT<<16 | gc.TFLOAT64:
+	case OGT_ | gc.TUINT8,
+		OGT_ | gc.TUINT16,
+		OGT_ | gc.TUINT32,
+		OGT_ | gc.TUINT64,
+		OLT_ | gc.TFLOAT32,
+		OLT_ | gc.TFLOAT64:
 		a = x86.AJHI
 
-	case gc.OGE<<16 | gc.TINT8,
-		gc.OGE<<16 | gc.TINT16,
-		gc.OGE<<16 | gc.TINT32,
-		gc.OGE<<16 | gc.TINT64:
+	case OGE_ | gc.TINT8,
+		OGE_ | gc.TINT16,
+		OGE_ | gc.TINT32,
+		OGE_ | gc.TINT64:
 		a = x86.AJGE
 
-	case gc.OGE<<16 | gc.TUINT8,
-		gc.OGE<<16 | gc.TUINT16,
-		gc.OGE<<16 | gc.TUINT32,
-		gc.OGE<<16 | gc.TUINT64,
-		gc.OLE<<16 | gc.TFLOAT32,
-		gc.OLE<<16 | gc.TFLOAT64:
+	case OGE_ | gc.TUINT8,
+		OGE_ | gc.TUINT16,
+		OGE_ | gc.TUINT32,
+		OGE_ | gc.TUINT64,
+		OLE_ | gc.TFLOAT32,
+		OLE_ | gc.TFLOAT64:
 		a = x86.AJCC
 
-	case gc.OCMP<<16 | gc.TBOOL,
-		gc.OCMP<<16 | gc.TINT8,
-		gc.OCMP<<16 | gc.TUINT8:
+	case OCMP_ | gc.TBOOL,
+		OCMP_ | gc.TINT8,
+		OCMP_ | gc.TUINT8:
 		a = x86.ACMPB
 
-	case gc.OCMP<<16 | gc.TINT16,
-		gc.OCMP<<16 | gc.TUINT16:
+	case OCMP_ | gc.TINT16,
+		OCMP_ | gc.TUINT16:
 		a = x86.ACMPW
 
-	case gc.OCMP<<16 | gc.TINT32,
-		gc.OCMP<<16 | gc.TUINT32,
-		gc.OCMP<<16 | gc.TPTR32:
+	case OCMP_ | gc.TINT32,
+		OCMP_ | gc.TUINT32,
+		OCMP_ | gc.TPTR32:
 		a = x86.ACMPL
 
-	case gc.OAS<<16 | gc.TBOOL,
-		gc.OAS<<16 | gc.TINT8,
-		gc.OAS<<16 | gc.TUINT8:
+	case OAS_ | gc.TBOOL,
+		OAS_ | gc.TINT8,
+		OAS_ | gc.TUINT8:
 		a = x86.AMOVB
 
-	case gc.OAS<<16 | gc.TINT16,
-		gc.OAS<<16 | gc.TUINT16:
+	case OAS_ | gc.TINT16,
+		OAS_ | gc.TUINT16:
 		a = x86.AMOVW
 
-	case gc.OAS<<16 | gc.TINT32,
-		gc.OAS<<16 | gc.TUINT32,
-		gc.OAS<<16 | gc.TPTR32:
+	case OAS_ | gc.TINT32,
+		OAS_ | gc.TUINT32,
+		OAS_ | gc.TPTR32:
 		a = x86.AMOVL
 
-	case gc.OAS<<16 | gc.TFLOAT32:
+	case OAS_ | gc.TFLOAT32:
 		a = x86.AMOVSS
 
-	case gc.OAS<<16 | gc.TFLOAT64:
+	case OAS_ | gc.TFLOAT64:
 		a = x86.AMOVSD
 
-	case gc.OADD<<16 | gc.TINT8,
-		gc.OADD<<16 | gc.TUINT8:
+	case OADD_ | gc.TINT8,
+		OADD_ | gc.TUINT8:
 		a = x86.AADDB
 
-	case gc.OADD<<16 | gc.TINT16,
-		gc.OADD<<16 | gc.TUINT16:
+	case OADD_ | gc.TINT16,
+		OADD_ | gc.TUINT16:
 		a = x86.AADDW
 
-	case gc.OADD<<16 | gc.TINT32,
-		gc.OADD<<16 | gc.TUINT32,
-		gc.OADD<<16 | gc.TPTR32:
+	case OADD_ | gc.TINT32,
+		OADD_ | gc.TUINT32,
+		OADD_ | gc.TPTR32:
 		a = x86.AADDL
 
-	case gc.OSUB<<16 | gc.TINT8,
-		gc.OSUB<<16 | gc.TUINT8:
+	case OSUB_ | gc.TINT8,
+		OSUB_ | gc.TUINT8:
 		a = x86.ASUBB
 
-	case gc.OSUB<<16 | gc.TINT16,
-		gc.OSUB<<16 | gc.TUINT16:
+	case OSUB_ | gc.TINT16,
+		OSUB_ | gc.TUINT16:
 		a = x86.ASUBW
 
-	case gc.OSUB<<16 | gc.TINT32,
-		gc.OSUB<<16 | gc.TUINT32,
-		gc.OSUB<<16 | gc.TPTR32:
+	case OSUB_ | gc.TINT32,
+		OSUB_ | gc.TUINT32,
+		OSUB_ | gc.TPTR32:
 		a = x86.ASUBL
 
-	case gc.OINC<<16 | gc.TINT8,
-		gc.OINC<<16 | gc.TUINT8:
+	case OINC_ | gc.TINT8,
+		OINC_ | gc.TUINT8:
 		a = x86.AINCB
 
-	case gc.OINC<<16 | gc.TINT16,
-		gc.OINC<<16 | gc.TUINT16:
+	case OINC_ | gc.TINT16,
+		OINC_ | gc.TUINT16:
 		a = x86.AINCW
 
-	case gc.OINC<<16 | gc.TINT32,
-		gc.OINC<<16 | gc.TUINT32,
-		gc.OINC<<16 | gc.TPTR32:
+	case OINC_ | gc.TINT32,
+		OINC_ | gc.TUINT32,
+		OINC_ | gc.TPTR32:
 		a = x86.AINCL
 
-	case gc.ODEC<<16 | gc.TINT8,
-		gc.ODEC<<16 | gc.TUINT8:
+	case ODEC_ | gc.TINT8,
+		ODEC_ | gc.TUINT8:
 		a = x86.ADECB
 
-	case gc.ODEC<<16 | gc.TINT16,
-		gc.ODEC<<16 | gc.TUINT16:
+	case ODEC_ | gc.TINT16,
+		ODEC_ | gc.TUINT16:
 		a = x86.ADECW
 
-	case gc.ODEC<<16 | gc.TINT32,
-		gc.ODEC<<16 | gc.TUINT32,
-		gc.ODEC<<16 | gc.TPTR32:
+	case ODEC_ | gc.TINT32,
+		ODEC_ | gc.TUINT32,
+		ODEC_ | gc.TPTR32:
 		a = x86.ADECL
 
-	case gc.OCOM<<16 | gc.TINT8,
-		gc.OCOM<<16 | gc.TUINT8:
+	case OCOM_ | gc.TINT8,
+		OCOM_ | gc.TUINT8:
 		a = x86.ANOTB
 
-	case gc.OCOM<<16 | gc.TINT16,
-		gc.OCOM<<16 | gc.TUINT16:
+	case OCOM_ | gc.TINT16,
+		OCOM_ | gc.TUINT16:
 		a = x86.ANOTW
 
-	case gc.OCOM<<16 | gc.TINT32,
-		gc.OCOM<<16 | gc.TUINT32,
-		gc.OCOM<<16 | gc.TPTR32:
+	case OCOM_ | gc.TINT32,
+		OCOM_ | gc.TUINT32,
+		OCOM_ | gc.TPTR32:
 		a = x86.ANOTL
 
-	case gc.OMINUS<<16 | gc.TINT8,
-		gc.OMINUS<<16 | gc.TUINT8:
+	case OMINUS_ | gc.TINT8,
+		OMINUS_ | gc.TUINT8:
 		a = x86.ANEGB
 
-	case gc.OMINUS<<16 | gc.TINT16,
-		gc.OMINUS<<16 | gc.TUINT16:
+	case OMINUS_ | gc.TINT16,
+		OMINUS_ | gc.TUINT16:
 		a = x86.ANEGW
 
-	case gc.OMINUS<<16 | gc.TINT32,
-		gc.OMINUS<<16 | gc.TUINT32,
-		gc.OMINUS<<16 | gc.TPTR32:
+	case OMINUS_ | gc.TINT32,
+		OMINUS_ | gc.TUINT32,
+		OMINUS_ | gc.TPTR32:
 		a = x86.ANEGL
 
-	case gc.OAND<<16 | gc.TINT8,
-		gc.OAND<<16 | gc.TUINT8:
+	case OAND_ | gc.TINT8,
+		OAND_ | gc.TUINT8:
 		a = x86.AANDB
 
-	case gc.OAND<<16 | gc.TINT16,
-		gc.OAND<<16 | gc.TUINT16:
+	case OAND_ | gc.TINT16,
+		OAND_ | gc.TUINT16:
 		a = x86.AANDW
 
-	case gc.OAND<<16 | gc.TINT32,
-		gc.OAND<<16 | gc.TUINT32,
-		gc.OAND<<16 | gc.TPTR32:
+	case OAND_ | gc.TINT32,
+		OAND_ | gc.TUINT32,
+		OAND_ | gc.TPTR32:
 		a = x86.AANDL
 
-	case gc.OOR<<16 | gc.TINT8,
-		gc.OOR<<16 | gc.TUINT8:
+	case OOR_ | gc.TINT8,
+		OOR_ | gc.TUINT8:
 		a = x86.AORB
 
-	case gc.OOR<<16 | gc.TINT16,
-		gc.OOR<<16 | gc.TUINT16:
+	case OOR_ | gc.TINT16,
+		OOR_ | gc.TUINT16:
 		a = x86.AORW
 
-	case gc.OOR<<16 | gc.TINT32,
-		gc.OOR<<16 | gc.TUINT32,
-		gc.OOR<<16 | gc.TPTR32:
+	case OOR_ | gc.TINT32,
+		OOR_ | gc.TUINT32,
+		OOR_ | gc.TPTR32:
 		a = x86.AORL
 
-	case gc.OXOR<<16 | gc.TINT8,
-		gc.OXOR<<16 | gc.TUINT8:
+	case OXOR_ | gc.TINT8,
+		OXOR_ | gc.TUINT8:
 		a = x86.AXORB
 
-	case gc.OXOR<<16 | gc.TINT16,
-		gc.OXOR<<16 | gc.TUINT16:
+	case OXOR_ | gc.TINT16,
+		OXOR_ | gc.TUINT16:
 		a = x86.AXORW
 
-	case gc.OXOR<<16 | gc.TINT32,
-		gc.OXOR<<16 | gc.TUINT32,
-		gc.OXOR<<16 | gc.TPTR32:
+	case OXOR_ | gc.TINT32,
+		OXOR_ | gc.TUINT32,
+		OXOR_ | gc.TPTR32:
 		a = x86.AXORL
 
-	case gc.OLROT<<16 | gc.TINT8,
-		gc.OLROT<<16 | gc.TUINT8:
+	case OLROT_ | gc.TINT8,
+		OLROT_ | gc.TUINT8:
 		a = x86.AROLB
 
-	case gc.OLROT<<16 | gc.TINT16,
-		gc.OLROT<<16 | gc.TUINT16:
+	case OLROT_ | gc.TINT16,
+		OLROT_ | gc.TUINT16:
 		a = x86.AROLW
 
-	case gc.OLROT<<16 | gc.TINT32,
-		gc.OLROT<<16 | gc.TUINT32,
-		gc.OLROT<<16 | gc.TPTR32:
+	case OLROT_ | gc.TINT32,
+		OLROT_ | gc.TUINT32,
+		OLROT_ | gc.TPTR32:
 		a = x86.AROLL
 
-	case gc.OLSH<<16 | gc.TINT8,
-		gc.OLSH<<16 | gc.TUINT8:
+	case OLSH_ | gc.TINT8,
+		OLSH_ | gc.TUINT8:
 		a = x86.ASHLB
 
-	case gc.OLSH<<16 | gc.TINT16,
-		gc.OLSH<<16 | gc.TUINT16:
+	case OLSH_ | gc.TINT16,
+		OLSH_ | gc.TUINT16:
 		a = x86.ASHLW
 
-	case gc.OLSH<<16 | gc.TINT32,
-		gc.OLSH<<16 | gc.TUINT32,
-		gc.OLSH<<16 | gc.TPTR32:
+	case OLSH_ | gc.TINT32,
+		OLSH_ | gc.TUINT32,
+		OLSH_ | gc.TPTR32:
 		a = x86.ASHLL
 
-	case gc.ORSH<<16 | gc.TUINT8:
+	case ORSH_ | gc.TUINT8:
 		a = x86.ASHRB
 
-	case gc.ORSH<<16 | gc.TUINT16:
+	case ORSH_ | gc.TUINT16:
 		a = x86.ASHRW
 
-	case gc.ORSH<<16 | gc.TUINT32,
-		gc.ORSH<<16 | gc.TPTR32:
+	case ORSH_ | gc.TUINT32,
+		ORSH_ | gc.TPTR32:
 		a = x86.ASHRL
 
-	case gc.ORSH<<16 | gc.TINT8:
+	case ORSH_ | gc.TINT8:
 		a = x86.ASARB
 
-	case gc.ORSH<<16 | gc.TINT16:
+	case ORSH_ | gc.TINT16:
 		a = x86.ASARW
 
-	case gc.ORSH<<16 | gc.TINT32:
+	case ORSH_ | gc.TINT32:
 		a = x86.ASARL
 
-	case gc.OHMUL<<16 | gc.TINT8,
-		gc.OMUL<<16 | gc.TINT8,
-		gc.OMUL<<16 | gc.TUINT8:
+	case OHMUL_ | gc.TINT8,
+		OMUL_ | gc.TINT8,
+		OMUL_ | gc.TUINT8:
 		a = x86.AIMULB
 
-	case gc.OHMUL<<16 | gc.TINT16,
-		gc.OMUL<<16 | gc.TINT16,
-		gc.OMUL<<16 | gc.TUINT16:
+	case OHMUL_ | gc.TINT16,
+		OMUL_ | gc.TINT16,
+		OMUL_ | gc.TUINT16:
 		a = x86.AIMULW
 
-	case gc.OHMUL<<16 | gc.TINT32,
-		gc.OMUL<<16 | gc.TINT32,
-		gc.OMUL<<16 | gc.TUINT32,
-		gc.OMUL<<16 | gc.TPTR32:
+	case OHMUL_ | gc.TINT32,
+		OMUL_ | gc.TINT32,
+		OMUL_ | gc.TUINT32,
+		OMUL_ | gc.TPTR32:
 		a = x86.AIMULL
 
-	case gc.OHMUL<<16 | gc.TUINT8:
+	case OHMUL_ | gc.TUINT8:
 		a = x86.AMULB
 
-	case gc.OHMUL<<16 | gc.TUINT16:
+	case OHMUL_ | gc.TUINT16:
 		a = x86.AMULW
 
-	case gc.OHMUL<<16 | gc.TUINT32,
-		gc.OHMUL<<16 | gc.TPTR32:
+	case OHMUL_ | gc.TUINT32,
+		OHMUL_ | gc.TPTR32:
 		a = x86.AMULL
 
-	case gc.ODIV<<16 | gc.TINT8,
-		gc.OMOD<<16 | gc.TINT8:
+	case ODIV_ | gc.TINT8,
+		OMOD_ | gc.TINT8:
 		a = x86.AIDIVB
 
-	case gc.ODIV<<16 | gc.TUINT8,
-		gc.OMOD<<16 | gc.TUINT8:
+	case ODIV_ | gc.TUINT8,
+		OMOD_ | gc.TUINT8:
 		a = x86.ADIVB
 
-	case gc.ODIV<<16 | gc.TINT16,
-		gc.OMOD<<16 | gc.TINT16:
+	case ODIV_ | gc.TINT16,
+		OMOD_ | gc.TINT16:
 		a = x86.AIDIVW
 
-	case gc.ODIV<<16 | gc.TUINT16,
-		gc.OMOD<<16 | gc.TUINT16:
+	case ODIV_ | gc.TUINT16,
+		OMOD_ | gc.TUINT16:
 		a = x86.ADIVW
 
-	case gc.ODIV<<16 | gc.TINT32,
-		gc.OMOD<<16 | gc.TINT32:
+	case ODIV_ | gc.TINT32,
+		OMOD_ | gc.TINT32:
 		a = x86.AIDIVL
 
-	case gc.ODIV<<16 | gc.TUINT32,
-		gc.ODIV<<16 | gc.TPTR32,
-		gc.OMOD<<16 | gc.TUINT32,
-		gc.OMOD<<16 | gc.TPTR32:
+	case ODIV_ | gc.TUINT32,
+		ODIV_ | gc.TPTR32,
+		OMOD_ | gc.TUINT32,
+		OMOD_ | gc.TPTR32:
 		a = x86.ADIVL
 
-	case gc.OEXTEND<<16 | gc.TINT16:
+	case OEXTEND_ | gc.TINT16:
 		a = x86.ACWD
 
-	case gc.OEXTEND<<16 | gc.TINT32:
+	case OEXTEND_ | gc.TINT32:
 		a = x86.ACDQ
 	}
 
 	return a
 }
 
-func foptoas(op int, t *gc.Type, flg int) int {
+func foptoas(op gc.Op, t *gc.Type, flg int) int {
 	a := obj.AXXX
-	et := int(gc.Simtype[t.Etype])
+	et := gc.Simtype[t.Etype]
+
+	// avoid constant conversions in switches below
+	const (
+		OCMP_   = uint32(gc.OCMP) << 16
+		OAS_    = uint32(gc.OAS) << 16
+		OADD_   = uint32(gc.OADD) << 16
+		OSUB_   = uint32(gc.OSUB) << 16
+		OMUL_   = uint32(gc.OMUL) << 16
+		ODIV_   = uint32(gc.ODIV) << 16
+		OMINUS_ = uint32(gc.OMINUS) << 16
+	)
 
 	if !gc.Thearch.Use387 {
 		switch uint32(op)<<16 | uint32(et) {
 		default:
 			gc.Fatalf("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), t)
 
-		case gc.OCMP<<16 | gc.TFLOAT32:
+		case OCMP_ | gc.TFLOAT32:
 			a = x86.AUCOMISS
 
-		case gc.OCMP<<16 | gc.TFLOAT64:
+		case OCMP_ | gc.TFLOAT64:
 			a = x86.AUCOMISD
 
-		case gc.OAS<<16 | gc.TFLOAT32:
+		case OAS_ | gc.TFLOAT32:
 			a = x86.AMOVSS
 
-		case gc.OAS<<16 | gc.TFLOAT64:
+		case OAS_ | gc.TFLOAT64:
 			a = x86.AMOVSD
 
-		case gc.OADD<<16 | gc.TFLOAT32:
+		case OADD_ | gc.TFLOAT32:
 			a = x86.AADDSS
 
-		case gc.OADD<<16 | gc.TFLOAT64:
+		case OADD_ | gc.TFLOAT64:
 			a = x86.AADDSD
 
-		case gc.OSUB<<16 | gc.TFLOAT32:
+		case OSUB_ | gc.TFLOAT32:
 			a = x86.ASUBSS
 
-		case gc.OSUB<<16 | gc.TFLOAT64:
+		case OSUB_ | gc.TFLOAT64:
 			a = x86.ASUBSD
 
-		case gc.OMUL<<16 | gc.TFLOAT32:
+		case OMUL_ | gc.TFLOAT32:
 			a = x86.AMULSS
 
-		case gc.OMUL<<16 | gc.TFLOAT64:
+		case OMUL_ | gc.TFLOAT64:
 			a = x86.AMULSD
 
-		case gc.ODIV<<16 | gc.TFLOAT32:
+		case ODIV_ | gc.TFLOAT32:
 			a = x86.ADIVSS
 
-		case gc.ODIV<<16 | gc.TFLOAT64:
+		case ODIV_ | gc.TFLOAT64:
 			a = x86.ADIVSD
 		}
 
@@ -470,79 +511,79 @@
 	}
 
 	switch uint32(op)<<16 | (uint32(et)<<8 | uint32(flg)) {
-	case gc.OADD<<16 | (gc.TFLOAT32<<8 | 0):
+	case OADD_ | (gc.TFLOAT32<<8 | 0):
 		return x86.AFADDF
 
-	case gc.OADD<<16 | (gc.TFLOAT64<<8 | 0):
+	case OADD_ | (gc.TFLOAT64<<8 | 0):
 		return x86.AFADDD
 
-	case gc.OADD<<16 | (gc.TFLOAT64<<8 | Fpop):
+	case OADD_ | (gc.TFLOAT64<<8 | Fpop):
 		return x86.AFADDDP
 
-	case gc.OSUB<<16 | (gc.TFLOAT32<<8 | 0):
+	case OSUB_ | (gc.TFLOAT32<<8 | 0):
 		return x86.AFSUBF
 
-	case gc.OSUB<<16 | (gc.TFLOAT32<<8 | Frev):
+	case OSUB_ | (gc.TFLOAT32<<8 | Frev):
 		return x86.AFSUBRF
 
-	case gc.OSUB<<16 | (gc.TFLOAT64<<8 | 0):
+	case OSUB_ | (gc.TFLOAT64<<8 | 0):
 		return x86.AFSUBD
 
-	case gc.OSUB<<16 | (gc.TFLOAT64<<8 | Frev):
+	case OSUB_ | (gc.TFLOAT64<<8 | Frev):
 		return x86.AFSUBRD
 
-	case gc.OSUB<<16 | (gc.TFLOAT64<<8 | Fpop):
+	case OSUB_ | (gc.TFLOAT64<<8 | Fpop):
 		return x86.AFSUBDP
 
-	case gc.OSUB<<16 | (gc.TFLOAT64<<8 | (Fpop | Frev)):
+	case OSUB_ | (gc.TFLOAT64<<8 | (Fpop | Frev)):
 		return x86.AFSUBRDP
 
-	case gc.OMUL<<16 | (gc.TFLOAT32<<8 | 0):
+	case OMUL_ | (gc.TFLOAT32<<8 | 0):
 		return x86.AFMULF
 
-	case gc.OMUL<<16 | (gc.TFLOAT64<<8 | 0):
+	case OMUL_ | (gc.TFLOAT64<<8 | 0):
 		return x86.AFMULD
 
-	case gc.OMUL<<16 | (gc.TFLOAT64<<8 | Fpop):
+	case OMUL_ | (gc.TFLOAT64<<8 | Fpop):
 		return x86.AFMULDP
 
-	case gc.ODIV<<16 | (gc.TFLOAT32<<8 | 0):
+	case ODIV_ | (gc.TFLOAT32<<8 | 0):
 		return x86.AFDIVF
 
-	case gc.ODIV<<16 | (gc.TFLOAT32<<8 | Frev):
+	case ODIV_ | (gc.TFLOAT32<<8 | Frev):
 		return x86.AFDIVRF
 
-	case gc.ODIV<<16 | (gc.TFLOAT64<<8 | 0):
+	case ODIV_ | (gc.TFLOAT64<<8 | 0):
 		return x86.AFDIVD
 
-	case gc.ODIV<<16 | (gc.TFLOAT64<<8 | Frev):
+	case ODIV_ | (gc.TFLOAT64<<8 | Frev):
 		return x86.AFDIVRD
 
-	case gc.ODIV<<16 | (gc.TFLOAT64<<8 | Fpop):
+	case ODIV_ | (gc.TFLOAT64<<8 | Fpop):
 		return x86.AFDIVDP
 
-	case gc.ODIV<<16 | (gc.TFLOAT64<<8 | (Fpop | Frev)):
+	case ODIV_ | (gc.TFLOAT64<<8 | (Fpop | Frev)):
 		return x86.AFDIVRDP
 
-	case gc.OCMP<<16 | (gc.TFLOAT32<<8 | 0):
+	case OCMP_ | (gc.TFLOAT32<<8 | 0):
 		return x86.AFCOMF
 
-	case gc.OCMP<<16 | (gc.TFLOAT32<<8 | Fpop):
+	case OCMP_ | (gc.TFLOAT32<<8 | Fpop):
 		return x86.AFCOMFP
 
-	case gc.OCMP<<16 | (gc.TFLOAT64<<8 | 0):
+	case OCMP_ | (gc.TFLOAT64<<8 | 0):
 		return x86.AFCOMD
 
-	case gc.OCMP<<16 | (gc.TFLOAT64<<8 | Fpop):
+	case OCMP_ | (gc.TFLOAT64<<8 | Fpop):
 		return x86.AFCOMDP
 
-	case gc.OCMP<<16 | (gc.TFLOAT64<<8 | Fpop2):
+	case OCMP_ | (gc.TFLOAT64<<8 | Fpop2):
 		return x86.AFCOMDPP
 
-	case gc.OMINUS<<16 | (gc.TFLOAT32<<8 | 0):
+	case OMINUS_ | (gc.TFLOAT32<<8 | 0):
 		return x86.AFCHS
 
-	case gc.OMINUS<<16 | (gc.TFLOAT64<<8 | 0):
+	case OMINUS_ | (gc.TFLOAT64<<8 | 0):
 		return x86.AFCHS
 	}
 
@@ -583,8 +624,8 @@
 	gins(as, &n1, n2)
 }
 
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
-	if gc.Isint[t.Etype] || int(t.Etype) == gc.Tptr {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+	if gc.Isint[t.Etype] || t.Etype == gc.Tptr {
 		if (n1.Op == gc.OLITERAL || n1.Op == gc.OADDR && n1.Left.Op == gc.ONAME) && n2.Op != gc.OLITERAL {
 			// Reverse comparison to place constant (including address constant) last.
 			op = gc.Brrev(op)
@@ -911,15 +952,13 @@
 		} else {
 			// Implementation of conversion-free x = y for int64 or uint64 x.
 			// This is generated by the code that copies small values out of closures,
-			// and that code has DX live, so avoid DX and use CX instead.
+			// and that code has DX live, so avoid DX and just use AX twice.
 			var r1 gc.Node
 			gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX)
-			var r2 gc.Node
-			gc.Nodreg(&r2, gc.Types[gc.TUINT32], x86.REG_CX)
 			gins(x86.AMOVL, &flo, &r1)
-			gins(x86.AMOVL, &fhi, &r2)
 			gins(x86.AMOVL, &r1, &tlo)
-			gins(x86.AMOVL, &r2, &thi)
+			gins(x86.AMOVL, &fhi, &r1)
+			gins(x86.AMOVL, &r1, &thi)
 		}
 
 		splitclean()