cmd/compile, cmd/link: create from 5g, 5l, etc

Trivial merging of 5g, 6g, ... into go tool compile,
and similarlly 5l, 6l, ... into go tool link.
The files compile/main.go and link/main.go are new.
Everything else in those directories is a move followed by
change of imports and package name.

This CL breaks the build. Manual fixups are in the next CL.

See golang-dev thread titled "go tool compile, etc" for background.

Change-Id: Id35ff5a5859ad9037c61275d637b1bd51df6828b
Reviewed-on: https://go-review.googlesource.com/10287
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Rob Pike <r@golang.org>
diff --git a/src/cmd/compile/internal/amd64/cgen.go b/src/cmd/compile/internal/amd64/cgen.go
new file mode 100644
index 0000000..71f8f88
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/cgen.go
@@ -0,0 +1,151 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+)
+
+func blockcopy(n, ns *gc.Node, osrc, odst, w int64) {
+	var noddi gc.Node
+	gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
+	var nodsi gc.Node
+	gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
+
+	var nodl gc.Node
+	var nodr gc.Node
+	if n.Ullman >= ns.Ullman {
+		gc.Agenr(n, &nodr, &nodsi)
+		if ns.Op == gc.ONAME {
+			gc.Gvardef(ns)
+		}
+		gc.Agenr(ns, &nodl, &noddi)
+	} else {
+		if ns.Op == gc.ONAME {
+			gc.Gvardef(ns)
+		}
+		gc.Agenr(ns, &nodl, &noddi)
+		gc.Agenr(n, &nodr, &nodsi)
+	}
+
+	if nodl.Reg != x86.REG_DI {
+		gmove(&nodl, &noddi)
+	}
+	if nodr.Reg != x86.REG_SI {
+		gmove(&nodr, &nodsi)
+	}
+	gc.Regfree(&nodl)
+	gc.Regfree(&nodr)
+
+	c := w % 8 // bytes
+	q := w / 8 // quads
+
+	var oldcx gc.Node
+	var cx gc.Node
+	savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
+
+	// if we are copying forward on the stack and
+	// the src and dst overlap, then reverse direction
+	if osrc < odst && odst < osrc+w {
+		// reverse direction
+		gins(x86.ASTD, nil, nil) // set direction flag
+		if c > 0 {
+			gconreg(addptr, w-1, x86.REG_SI)
+			gconreg(addptr, w-1, x86.REG_DI)
+
+			gconreg(movptr, c, x86.REG_CX)
+			gins(x86.AREP, nil, nil)   // repeat
+			gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
+		}
+
+		if q > 0 {
+			if c > 0 {
+				gconreg(addptr, -7, x86.REG_SI)
+				gconreg(addptr, -7, x86.REG_DI)
+			} else {
+				gconreg(addptr, w-8, x86.REG_SI)
+				gconreg(addptr, w-8, x86.REG_DI)
+			}
+
+			gconreg(movptr, q, x86.REG_CX)
+			gins(x86.AREP, nil, nil)   // repeat
+			gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
+		}
+
+		// we leave with the flag clear
+		gins(x86.ACLD, nil, nil)
+	} else {
+		// normal direction
+		if q > 128 || (gc.Nacl && q >= 4) {
+			gconreg(movptr, q, x86.REG_CX)
+			gins(x86.AREP, nil, nil)   // repeat
+			gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
+		} else if q >= 4 {
+			p := gins(obj.ADUFFCOPY, nil, nil)
+			p.To.Type = obj.TYPE_ADDR
+			p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
+
+			// 14 and 128 = magic constants: see ../../runtime/asm_amd64.s
+			p.To.Offset = 14 * (128 - q)
+		} else if !gc.Nacl && c == 0 {
+			// We don't need the MOVSQ side-effect of updating SI and DI,
+			// and issuing a sequence of MOVQs directly is faster.
+			nodsi.Op = gc.OINDREG
+
+			noddi.Op = gc.OINDREG
+			for q > 0 {
+				gmove(&nodsi, &cx) // MOVQ x+(SI),CX
+				gmove(&cx, &noddi) // MOVQ CX,x+(DI)
+				nodsi.Xoffset += 8
+				noddi.Xoffset += 8
+				q--
+			}
+		} else {
+			for q > 0 {
+				gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
+				q--
+			}
+		}
+
+		// copy the remaining c bytes
+		if w < 4 || c <= 1 || (odst < osrc && osrc < odst+w) {
+			for c > 0 {
+				gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
+				c--
+			}
+		} else if w < 8 || c <= 4 {
+			nodsi.Op = gc.OINDREG
+			noddi.Op = gc.OINDREG
+			cx.Type = gc.Types[gc.TINT32]
+			nodsi.Type = gc.Types[gc.TINT32]
+			noddi.Type = gc.Types[gc.TINT32]
+			if c > 4 {
+				nodsi.Xoffset = 0
+				noddi.Xoffset = 0
+				gmove(&nodsi, &cx)
+				gmove(&cx, &noddi)
+			}
+
+			nodsi.Xoffset = c - 4
+			noddi.Xoffset = c - 4
+			gmove(&nodsi, &cx)
+			gmove(&cx, &noddi)
+		} else {
+			nodsi.Op = gc.OINDREG
+			noddi.Op = gc.OINDREG
+			cx.Type = gc.Types[gc.TINT64]
+			nodsi.Type = gc.Types[gc.TINT64]
+			noddi.Type = gc.Types[gc.TINT64]
+			nodsi.Xoffset = c - 8
+			noddi.Xoffset = c - 8
+			gmove(&nodsi, &cx)
+			gmove(&cx, &noddi)
+		}
+	}
+
+	restx(&cx, &oldcx)
+}
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
new file mode 100644
index 0000000..79bf94a
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -0,0 +1,129 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+)
+
+var (
+	thechar     int           = '6'
+	thestring   string        = "amd64"
+	thelinkarch *obj.LinkArch = &x86.Linkamd64
+)
+
+func linkarchinit() {
+	if obj.Getgoarch() == "amd64p32" {
+		thelinkarch = &x86.Linkamd64p32
+		gc.Thearch.Thelinkarch = thelinkarch
+		thestring = "amd64p32"
+		gc.Thearch.Thestring = "amd64p32"
+	}
+}
+
+var MAXWIDTH int64 = 1 << 50
+
+var (
+	addptr int = x86.AADDQ
+	movptr int = x86.AMOVQ
+	leaptr int = x86.ALEAQ
+	cmpptr int = x86.ACMPQ
+)
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, and uintptr
+ */
+var typedefs = []gc.Typedef{
+	gc.Typedef{"int", gc.TINT, gc.TINT64},
+	gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
+	gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+}
+
+func betypeinit() {
+	gc.Widthptr = 8
+	gc.Widthint = 8
+	gc.Widthreg = 8
+	if obj.Getgoarch() == "amd64p32" {
+		gc.Widthptr = 4
+		gc.Widthint = 4
+		addptr = x86.AADDL
+		movptr = x86.AMOVL
+		leaptr = x86.ALEAL
+		cmpptr = x86.ACMPL
+		typedefs[0].Sameas = gc.TINT32
+		typedefs[1].Sameas = gc.TUINT32
+		typedefs[2].Sameas = gc.TUINT32
+	}
+
+	if gc.Ctxt.Flag_dynlink {
+		gc.Thearch.ReservedRegs = append(gc.Thearch.ReservedRegs, x86.REG_R15)
+	}
+}
+
+func Main() {
+	if obj.Getgoos() == "nacl" {
+		resvd = append(resvd, x86.REG_BP, x86.REG_R15)
+	} else if obj.Framepointer_enabled != 0 {
+		resvd = append(resvd, x86.REG_BP)
+	}
+
+	gc.Thearch.Thechar = thechar
+	gc.Thearch.Thestring = thestring
+	gc.Thearch.Thelinkarch = thelinkarch
+	gc.Thearch.Typedefs = typedefs
+	gc.Thearch.REGSP = x86.REGSP
+	gc.Thearch.REGCTXT = x86.REGCTXT
+	gc.Thearch.REGCALLX = x86.REG_BX
+	gc.Thearch.REGCALLX2 = x86.REG_AX
+	gc.Thearch.REGRETURN = x86.REG_AX
+	gc.Thearch.REGMIN = x86.REG_AX
+	gc.Thearch.REGMAX = x86.REG_R15
+	gc.Thearch.FREGMIN = x86.REG_X0
+	gc.Thearch.FREGMAX = x86.REG_X15
+	gc.Thearch.MAXWIDTH = MAXWIDTH
+	gc.Thearch.ReservedRegs = resvd
+
+	gc.Thearch.AddIndex = addindex
+	gc.Thearch.Betypeinit = betypeinit
+	gc.Thearch.Cgen_bmul = cgen_bmul
+	gc.Thearch.Cgen_hmul = cgen_hmul
+	gc.Thearch.Cgen_shift = cgen_shift
+	gc.Thearch.Clearfat = clearfat
+	gc.Thearch.Defframe = defframe
+	gc.Thearch.Dodiv = dodiv
+	gc.Thearch.Excise = excise
+	gc.Thearch.Expandchecks = expandchecks
+	gc.Thearch.Getg = getg
+	gc.Thearch.Gins = gins
+	gc.Thearch.Ginsboolval = ginsboolval
+	gc.Thearch.Ginscmp = ginscmp
+	gc.Thearch.Ginscon = ginscon
+	gc.Thearch.Ginsnop = ginsnop
+	gc.Thearch.Gmove = gmove
+	gc.Thearch.Linkarchinit = linkarchinit
+	gc.Thearch.Peep = peep
+	gc.Thearch.Proginfo = proginfo
+	gc.Thearch.Regtyp = regtyp
+	gc.Thearch.Sameaddr = sameaddr
+	gc.Thearch.Smallindir = smallindir
+	gc.Thearch.Stackaddr = stackaddr
+	gc.Thearch.Blockcopy = blockcopy
+	gc.Thearch.Sudoaddable = sudoaddable
+	gc.Thearch.Sudoclean = sudoclean
+	gc.Thearch.Excludedregs = excludedregs
+	gc.Thearch.RtoB = RtoB
+	gc.Thearch.FtoB = FtoB
+	gc.Thearch.BtoR = BtoR
+	gc.Thearch.BtoF = BtoF
+	gc.Thearch.Optoas = optoas
+	gc.Thearch.Doregbits = doregbits
+	gc.Thearch.Regnames = regnames
+
+	gc.Main()
+	gc.Exit(0)
+}
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
new file mode 100644
index 0000000..6425633
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -0,0 +1,743 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+)
+
+func defframe(ptxt *obj.Prog) {
+	var n *gc.Node
+
+	// fill in argument size, stack size
+	ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+	ptxt.To.Offset = int64(frame)
+
+	// insert code to zero ambiguously live variables
+	// so that the garbage collector only sees initialized values
+	// when it looks for pointers.
+	p := ptxt
+
+	hi := int64(0)
+	lo := hi
+	ax := uint32(0)
+
+	// iterate through declarations - they are sorted in decreasing xoffset order.
+	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
+		n = l.N
+		if !n.Name.Needzero {
+			continue
+		}
+		if n.Class != gc.PAUTO {
+			gc.Fatal("needzero class %d", n.Class)
+		}
+		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+			gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+		}
+
+		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
+			// merge with range we already have
+			lo = n.Xoffset
+
+			continue
+		}
+
+		// zero old range
+		p = zerorange(p, int64(frame), lo, hi, &ax)
+
+		// set new range
+		hi = n.Xoffset + n.Type.Width
+
+		lo = n.Xoffset
+	}
+
+	// zero final range
+	zerorange(p, int64(frame), lo, hi, &ax)
+}
+
+// DUFFZERO consists of repeated blocks of 4 MOVs + ADD,
+// with 4 STOSQs at the very end.
+// The trailing STOSQs prevent the need for a DI preadjustment
+// for small numbers of words to clear.
+// See runtime/mkduff.go.
+const (
+	dzBlocks    = 31 // number of MOV/ADD blocks
+	dzBlockLen  = 4  // number of clears per block
+	dzBlockSize = 19 // size of instructions in a single block
+	dzMovSize   = 4  // size of single MOV instruction w/ offset
+	dzAddSize   = 4  // size of single ADD instruction
+	dzDIStep    = 8  // number of bytes cleared by each MOV instruction
+
+	dzTailLen  = 4 // number of final STOSQ instructions
+	dzTailSize = 2 // size of single STOSQ instruction
+
+	dzSize = dzBlocks*dzBlockSize + dzTailLen*dzTailSize // total size of DUFFZERO routine
+)
+
+// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
+// q is the number of words to zero.
+func dzDI(q int64) int64 {
+	if q < dzTailLen {
+		return 0
+	}
+	q -= dzTailLen
+	if q%dzBlockLen == 0 {
+		return 0
+	}
+	return -dzDIStep * (dzBlockLen - q%dzBlockLen)
+}
+
+// dzOff returns the offset for a jump into DUFFZERO.
+// q is the number of words to zero.
+func dzOff(q int64) int64 {
+	off := int64(dzSize)
+	if q < dzTailLen {
+		return off - q*dzTailSize
+	}
+	off -= dzTailLen * dzTailSize
+	q -= dzTailLen
+	blocks, steps := q/dzBlockLen, q%dzBlockLen
+	off -= dzBlockSize * blocks
+	if steps > 0 {
+		off -= dzAddSize + dzMovSize*steps
+	}
+	return off
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
+	cnt := hi - lo
+	if cnt == 0 {
+		return p
+	}
+	if *ax == 0 {
+		p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+		*ax = 1
+	}
+
+	if cnt%int64(gc.Widthreg) != 0 {
+		// should only happen with nacl
+		if cnt%int64(gc.Widthptr) != 0 {
+			gc.Fatal("zerorange count not a multiple of widthptr %d", cnt)
+		}
+		p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
+		lo += int64(gc.Widthptr)
+		cnt -= int64(gc.Widthptr)
+	}
+
+	if cnt <= int64(4*gc.Widthreg) {
+		for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
+			p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
+		}
+	} else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
+		q := cnt / int64(gc.Widthreg)
+		p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(q), obj.TYPE_REG, x86.REG_DI, 0)
+		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(q))
+		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+	} else {
+		p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
+		p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
+		p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+		p = appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+	}
+
+	return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+	q := gc.Ctxt.NewProg()
+	gc.Clearp(q)
+	q.As = int16(as)
+	q.Lineno = p.Lineno
+	q.From.Type = int16(ftype)
+	q.From.Reg = int16(freg)
+	q.From.Offset = foffset
+	q.To.Type = int16(ttype)
+	q.To.Reg = int16(treg)
+	q.To.Offset = toffset
+	q.Link = p.Link
+	p.Link = q
+	return q
+}
+
+var panicdiv *gc.Node
+
+/*
+ * generate division.
+ * generates one of:
+ *	res = nl / nr
+ *	res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	// Have to be careful about handling
+	// most negative int divided by -1 correctly.
+	// The hardware will trap.
+	// Also the byte divide instruction needs AH,
+	// which we otherwise don't have to deal with.
+	// Easiest way to avoid for int8, int16: use int32.
+	// For int32 and int64, use explicit test.
+	// Could use int64 hw for int32.
+	t := nl.Type
+
+	t0 := t
+	check := 0
+	if gc.Issigned[t.Etype] {
+		check = 1
+		if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
+			check = 0
+		} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
+			check = 0
+		}
+	}
+
+	if t.Width < 4 {
+		if gc.Issigned[t.Etype] {
+			t = gc.Types[gc.TINT32]
+		} else {
+			t = gc.Types[gc.TUINT32]
+		}
+		check = 0
+	}
+
+	a := optoas(op, t)
+
+	var n3 gc.Node
+	gc.Regalloc(&n3, t0, nil)
+	var ax gc.Node
+	var oldax gc.Node
+	if nl.Ullman >= nr.Ullman {
+		savex(x86.REG_AX, &ax, &oldax, res, t0)
+		gc.Cgen(nl, &ax)
+		gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
+		gc.Cgen(nr, &n3)
+		gc.Regfree(&ax)
+	} else {
+		gc.Cgen(nr, &n3)
+		savex(x86.REG_AX, &ax, &oldax, res, t0)
+		gc.Cgen(nl, &ax)
+	}
+
+	if t != t0 {
+		// Convert
+		ax1 := ax
+
+		n31 := n3
+		ax.Type = t
+		n3.Type = t
+		gmove(&ax1, &ax)
+		gmove(&n31, &n3)
+	}
+
+	var n4 gc.Node
+	if gc.Nacl {
+		// Native Client does not relay the divide-by-zero trap
+		// to the executing program, so we must insert a check
+		// for ourselves.
+		gc.Nodconst(&n4, t, 0)
+
+		gins(optoas(gc.OCMP, t), &n3, &n4)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		if panicdiv == nil {
+			panicdiv = gc.Sysfunc("panicdivide")
+		}
+		gc.Ginscall(panicdiv, -1)
+		gc.Patch(p1, gc.Pc)
+	}
+
+	var p2 *obj.Prog
+	if check != 0 {
+		gc.Nodconst(&n4, t, -1)
+		gins(optoas(gc.OCMP, t), &n3, &n4)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		if op == gc.ODIV {
+			// a / (-1) is -a.
+			gins(optoas(gc.OMINUS, t), nil, &ax)
+
+			gmove(&ax, res)
+		} else {
+			// a % (-1) is 0.
+			gc.Nodconst(&n4, t, 0)
+
+			gmove(&n4, res)
+		}
+
+		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+	}
+
+	var olddx gc.Node
+	var dx gc.Node
+	savex(x86.REG_DX, &dx, &olddx, res, t)
+	if !gc.Issigned[t.Etype] {
+		gc.Nodconst(&n4, t, 0)
+		gmove(&n4, &dx)
+	} else {
+		gins(optoas(gc.OEXTEND, t), nil, nil)
+	}
+	gins(a, &n3, nil)
+	gc.Regfree(&n3)
+	if op == gc.ODIV {
+		gmove(&ax, res)
+	} else {
+		gmove(&dx, res)
+	}
+	restx(&dx, &olddx)
+	if check != 0 {
+		gc.Patch(p2, gc.Pc)
+	}
+	restx(&ax, &oldax)
+}
+
+/*
+ * register dr is one of the special ones (AX, CX, DI, SI, etc.).
+ * we need to use it.  if it is already allocated as a temporary
+ * (r > 1; can only happen if a routine like sgen passed a
+ * special as cgen's res and then cgen used regalloc to reuse
+ * it as its own temporary), then move it for now to another
+ * register.  caller must call restx to move it back.
+ * the move is not necessary if dr == res, because res is
+ * known to be dead.
+ */
+func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
+	r := reg[dr]
+
+	// save current ax and dx if they are live
+	// and not the destination
+	*oldx = gc.Node{}
+
+	gc.Nodreg(x, t, dr)
+	if r > 1 && !gc.Samereg(x, res) {
+		gc.Regalloc(oldx, gc.Types[gc.TINT64], nil)
+		x.Type = gc.Types[gc.TINT64]
+		gmove(x, oldx)
+		x.Type = t
+		oldx.Etype = r // squirrel away old r value
+		reg[dr] = 1
+	}
+}
+
+func restx(x *gc.Node, oldx *gc.Node) {
+	if oldx.Op != 0 {
+		x.Type = gc.Types[gc.TINT64]
+		reg[x.Reg] = oldx.Etype
+		gmove(oldx, x)
+		gc.Regfree(oldx)
+	}
+}
+
+/*
+ * generate high multiply:
+ *   res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	t := nl.Type
+	a := optoas(gc.OHMUL, t)
+	if nl.Ullman < nr.Ullman {
+		tmp := nl
+		nl = nr
+		nr = tmp
+	}
+
+	var n1 gc.Node
+	gc.Cgenr(nl, &n1, res)
+	var n2 gc.Node
+	gc.Cgenr(nr, &n2, nil)
+	var ax gc.Node
+	gc.Nodreg(&ax, t, x86.REG_AX)
+	gmove(&n1, &ax)
+	gins(a, &n2, nil)
+	gc.Regfree(&n2)
+	gc.Regfree(&n1)
+
+	var dx gc.Node
+	if t.Width == 1 {
+		// byte multiply behaves differently.
+		gc.Nodreg(&ax, t, x86.REG_AH)
+
+		gc.Nodreg(&dx, t, x86.REG_DX)
+		gmove(&ax, &dx)
+	}
+
+	gc.Nodreg(&dx, t, x86.REG_DX)
+	gmove(&dx, res)
+}
+
+/*
+ * generate shift according to op, one of:
+ *	res = nl << nr
+ *	res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	a := optoas(op, nl.Type)
+
+	if nr.Op == gc.OLITERAL {
+		var n1 gc.Node
+		gc.Regalloc(&n1, nl.Type, res)
+		gc.Cgen(nl, &n1)
+		sc := uint64(nr.Int())
+		if sc >= uint64(nl.Type.Width*8) {
+			// large shift gets 2 shifts by width-1
+			var n3 gc.Node
+			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+
+			gins(a, &n3, &n1)
+			gins(a, &n3, &n1)
+		} else {
+			gins(a, nr, &n1)
+		}
+		gmove(&n1, res)
+		gc.Regfree(&n1)
+		return
+	}
+
+	if nl.Ullman >= gc.UINF {
+		var n4 gc.Node
+		gc.Tempname(&n4, nl.Type)
+		gc.Cgen(nl, &n4)
+		nl = &n4
+	}
+
+	if nr.Ullman >= gc.UINF {
+		var n5 gc.Node
+		gc.Tempname(&n5, nr.Type)
+		gc.Cgen(nr, &n5)
+		nr = &n5
+	}
+
+	rcx := int(reg[x86.REG_CX])
+	var n1 gc.Node
+	gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
+
+	// Allow either uint32 or uint64 as shift type,
+	// to avoid unnecessary conversion from uint32 to uint64
+	// just to do the comparison.
+	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
+
+	if tcount.Etype < gc.TUINT32 {
+		tcount = gc.Types[gc.TUINT32]
+	}
+
+	gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
+	var n3 gc.Node
+	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
+
+	var cx gc.Node
+	gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
+
+	var oldcx gc.Node
+	if rcx > 0 && !gc.Samereg(&cx, res) {
+		gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
+		gmove(&cx, &oldcx)
+	}
+
+	cx.Type = tcount
+
+	var n2 gc.Node
+	if gc.Samereg(&cx, res) {
+		gc.Regalloc(&n2, nl.Type, nil)
+	} else {
+		gc.Regalloc(&n2, nl.Type, res)
+	}
+	if nl.Ullman >= nr.Ullman {
+		gc.Cgen(nl, &n2)
+		gc.Cgen(nr, &n1)
+		gmove(&n1, &n3)
+	} else {
+		gc.Cgen(nr, &n1)
+		gmove(&n1, &n3)
+		gc.Cgen(nl, &n2)
+	}
+
+	gc.Regfree(&n3)
+
+	// test and fix up large shifts
+	if !bounded {
+		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
+		gins(optoas(gc.OCMP, tcount), &n1, &n3)
+		p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
+		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
+			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+			gins(a, &n3, &n2)
+		} else {
+			gc.Nodconst(&n3, nl.Type, 0)
+			gmove(&n3, &n2)
+		}
+
+		gc.Patch(p1, gc.Pc)
+	}
+
+	gins(a, &n1, &n2)
+
+	if oldcx.Op != 0 {
+		cx.Type = gc.Types[gc.TUINT64]
+		gmove(&oldcx, &cx)
+		gc.Regfree(&oldcx)
+	}
+
+	gmove(&n2, res)
+
+	gc.Regfree(&n1)
+	gc.Regfree(&n2)
+}
+
+/*
+ * generate byte multiply:
+ *	res = nl * nr
+ * there is no 2-operand byte multiply instruction so
+ * we do a full-width multiplication and truncate afterwards.
+ */
+func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
+	if optoas(op, nl.Type) != x86.AIMULB {
+		return false
+	}
+
+	// largest ullman on left.
+	if nl.Ullman < nr.Ullman {
+		tmp := nl
+		nl = nr
+		nr = tmp
+	}
+
+	// generate operands in "8-bit" registers.
+	var n1b gc.Node
+	gc.Regalloc(&n1b, nl.Type, res)
+
+	gc.Cgen(nl, &n1b)
+	var n2b gc.Node
+	gc.Regalloc(&n2b, nr.Type, nil)
+	gc.Cgen(nr, &n2b)
+
+	// perform full-width multiplication.
+	t := gc.Types[gc.TUINT64]
+
+	if gc.Issigned[nl.Type.Etype] {
+		t = gc.Types[gc.TINT64]
+	}
+	var n1 gc.Node
+	gc.Nodreg(&n1, t, int(n1b.Reg))
+	var n2 gc.Node
+	gc.Nodreg(&n2, t, int(n2b.Reg))
+	a := optoas(op, t)
+	gins(a, &n2, &n1)
+
+	// truncate.
+	gmove(&n1, res)
+
+	gc.Regfree(&n1b)
+	gc.Regfree(&n2b)
+	return true
+}
+
+func clearfat(nl *gc.Node) {
+	/* clear a fat object */
+	if gc.Debug['g'] != 0 {
+		gc.Dump("\nclearfat", nl)
+	}
+
+	w := nl.Type.Width
+
+	// Avoid taking the address for simple enough types.
+	if gc.Componentgen(nil, nl) {
+		return
+	}
+
+	c := w % 8 // bytes
+	q := w / 8 // quads
+
+	if q < 4 {
+		// Write sequence of MOV 0, off(base) instead of using STOSQ.
+		// The hope is that although the code will be slightly longer,
+		// the MOVs will have no dependencies and pipeline better
+		// than the unrolled STOSQ loop.
+		// NOTE: Must use agen, not igen, so that optimizer sees address
+		// being taken. We are not writing on field boundaries.
+		var n1 gc.Node
+		gc.Agenr(nl, &n1, nil)
+
+		n1.Op = gc.OINDREG
+		var z gc.Node
+		gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
+		for {
+			tmp14 := q
+			q--
+			if tmp14 <= 0 {
+				break
+			}
+			n1.Type = z.Type
+			gins(x86.AMOVQ, &z, &n1)
+			n1.Xoffset += 8
+		}
+
+		if c >= 4 {
+			gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
+			n1.Type = z.Type
+			gins(x86.AMOVL, &z, &n1)
+			n1.Xoffset += 4
+			c -= 4
+		}
+
+		gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
+		for {
+			tmp15 := c
+			c--
+			if tmp15 <= 0 {
+				break
+			}
+			n1.Type = z.Type
+			gins(x86.AMOVB, &z, &n1)
+			n1.Xoffset++
+		}
+
+		gc.Regfree(&n1)
+		return
+	}
+
+	var oldn1 gc.Node
+	var n1 gc.Node
+	savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
+	gc.Agen(nl, &n1)
+
+	var ax gc.Node
+	var oldax gc.Node
+	savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
+	gconreg(x86.AMOVL, 0, x86.REG_AX)
+
+	if q > 128 || gc.Nacl {
+		gconreg(movptr, q, x86.REG_CX)
+		gins(x86.AREP, nil, nil)   // repeat
+		gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
+	} else {
+		if di := dzDI(q); di != 0 {
+			gconreg(addptr, di, x86.REG_DI)
+		}
+		p := gins(obj.ADUFFZERO, nil, nil)
+		p.To.Type = obj.TYPE_ADDR
+		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+		p.To.Offset = dzOff(q)
+	}
+
+	z := ax
+	di := n1
+	if w >= 8 && c >= 4 {
+		di.Op = gc.OINDREG
+		z.Type = gc.Types[gc.TINT64]
+		di.Type = z.Type
+		p := gins(x86.AMOVQ, &z, &di)
+		p.To.Scale = 1
+		p.To.Offset = c - 8
+	} else if c >= 4 {
+		di.Op = gc.OINDREG
+		z.Type = gc.Types[gc.TINT32]
+		di.Type = z.Type
+		gins(x86.AMOVL, &z, &di)
+		if c > 4 {
+			p := gins(x86.AMOVL, &z, &di)
+			p.To.Scale = 1
+			p.To.Offset = c - 4
+		}
+	} else {
+		for c > 0 {
+			gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
+			c--
+		}
+	}
+
+	restx(&n1, &oldn1)
+	restx(&ax, &oldax)
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+	var p1 *obj.Prog
+	var p2 *obj.Prog
+
+	for p := firstp; p != nil; p = p.Link {
+		if p.As != obj.ACHECKNIL {
+			continue
+		}
+		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+			gc.Warnl(int(p.Lineno), "generated nil check")
+		}
+
+		// check is
+		//	CMP arg, $0
+		//	JNE 2(PC) (likely)
+		//	MOV AX, 0
+		p1 = gc.Ctxt.NewProg()
+
+		p2 = gc.Ctxt.NewProg()
+		gc.Clearp(p1)
+		gc.Clearp(p2)
+		p1.Link = p2
+		p2.Link = p.Link
+		p.Link = p1
+		p1.Lineno = p.Lineno
+		p2.Lineno = p.Lineno
+		p1.Pc = 9999
+		p2.Pc = 9999
+		p.As = int16(cmpptr)
+		p.To.Type = obj.TYPE_CONST
+		p.To.Offset = 0
+		p1.As = x86.AJNE
+		p1.From.Type = obj.TYPE_CONST
+		p1.From.Offset = 1 // likely
+		p1.To.Type = obj.TYPE_BRANCH
+		p1.To.Val = p2.Link
+
+		// crash by write to memory address 0.
+		// if possible, since we know arg is 0, use 0(arg),
+		// which will be shorter to encode than plain 0.
+		p2.As = x86.AMOVL
+
+		p2.From.Type = obj.TYPE_REG
+		p2.From.Reg = x86.REG_AX
+		if regtyp(&p.From) {
+			p2.To.Type = obj.TYPE_MEM
+			p2.To.Reg = p.From.Reg
+		} else {
+			p2.To.Type = obj.TYPE_MEM
+			p2.To.Reg = x86.REG_NONE
+		}
+
+		p2.To.Offset = 0
+	}
+}
+
+// addr += index*width if possible.
+func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
+	switch width {
+	case 1, 2, 4, 8:
+		p1 := gins(x86.ALEAQ, index, addr)
+		p1.From.Type = obj.TYPE_MEM
+		p1.From.Scale = int16(width)
+		p1.From.Index = p1.From.Reg
+		p1.From.Reg = p1.To.Reg
+		return true
+	}
+	return false
+}
+
+// res = runtime.getg()
+func getg(res *gc.Node) {
+	var n1 gc.Node
+	gc.Regalloc(&n1, res.Type, res)
+	mov := optoas(gc.OAS, gc.Types[gc.Tptr])
+	p := gins(mov, nil, &n1)
+	p.From.Type = obj.TYPE_REG
+	p.From.Reg = x86.REG_TLS
+	p = gins(mov, nil, &n1)
+	p.From = p.To
+	p.From.Type = obj.TYPE_MEM
+	p.From.Index = x86.REG_TLS
+	p.From.Scale = 1
+	gmove(&n1, res)
+	gc.Regfree(&n1)
+}
diff --git a/src/cmd/compile/internal/amd64/gsubr.go b/src/cmd/compile/internal/amd64/gsubr.go
new file mode 100644
index 0000000..a8e4170
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/gsubr.go
@@ -0,0 +1,1380 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package amd64
+
+import (
+	"cmd/compile/internal/big"
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+	"fmt"
+)
+
+var resvd = []int{
+	x86.REG_DI, // for movstring
+	x86.REG_SI, // for movstring
+
+	x86.REG_AX, // for divide
+	x86.REG_CX, // for shift
+	x86.REG_DX, // for divide
+	x86.REG_SP, // for stack
+}
+
+/*
+ * generate
+ *	as $c, reg
+ */
+func gconreg(as int, c int64, reg int) {
+	var nr gc.Node
+
+	switch as {
+	case x86.AADDL,
+		x86.AMOVL,
+		x86.ALEAL:
+		gc.Nodreg(&nr, gc.Types[gc.TINT32], reg)
+
+	default:
+		gc.Nodreg(&nr, gc.Types[gc.TINT64], reg)
+	}
+
+	ginscon(as, c, &nr)
+}
+
+/*
+ * generate
+ *	as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+	var n1 gc.Node
+
+	switch as {
+	case x86.AADDL,
+		x86.AMOVL,
+		x86.ALEAL:
+		gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
+
+	default:
+		gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+	}
+
+	if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) {
+		// cannot have 64-bit immediate in ADD, etc.
+		// instead, MOV into register first.
+		var ntmp gc.Node
+		gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+		gins(x86.AMOVQ, &n1, &ntmp)
+		gins(as, &ntmp, n2)
+		gc.Regfree(&ntmp)
+		return
+	}
+
+	gins(as, &n1, n2)
+}
+
+func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL {
+		// Reverse comparison to place constant last.
+		op = gc.Brrev(op)
+		n1, n2 = n2, n1
+	}
+	// General case.
+	var r1, r2, g1, g2 gc.Node
+	if n1.Op == gc.ONAME && n1.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG {
+		r1 = *n1
+	} else {
+		gc.Regalloc(&r1, t, n1)
+		gc.Regalloc(&g1, n1.Type, &r1)
+		gc.Cgen(n1, &g1)
+		gmove(&g1, &r1)
+	}
+	if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] && gc.Smallintconst(n2) {
+		r2 = *n2
+	} else {
+		gc.Regalloc(&r2, t, n2)
+		gc.Regalloc(&g2, n1.Type, &r2)
+		gc.Cgen(n2, &g2)
+		gmove(&g2, &r2)
+	}
+	gins(optoas(gc.OCMP, t), &r1, &r2)
+	if r1.Op == gc.OREGISTER {
+		gc.Regfree(&g1)
+		gc.Regfree(&r1)
+	}
+	if r2.Op == gc.OREGISTER {
+		gc.Regfree(&g2)
+		gc.Regfree(&r2)
+	}
+	return gc.Gbranch(optoas(op, t), nil, likely)
+}
+
+func ginsboolval(a int, n *gc.Node) {
+	gins(jmptoset(a), nil, n)
+}
+
+// set up nodes representing 2^63
+var (
+	bigi         gc.Node
+	bigf         gc.Node
+	bignodes_did bool
+)
+
+func bignodes() {
+	if bignodes_did {
+		return
+	}
+	bignodes_did = true
+
+	var i big.Int
+	i.SetInt64(1)
+	i.Lsh(&i, 63)
+
+	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
+	bigi.SetBigInt(&i)
+
+	bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
+}
+
+/*
+ * generate move:
+ *	t = f
+ * hard part is conversions.
+ */
+func gmove(f *gc.Node, t *gc.Node) {
+	if gc.Debug['M'] != 0 {
+		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+	}
+
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+	cvt := t.Type
+
+	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
+		gc.Complexmove(f, t)
+		return
+	}
+
+	// cannot have two memory operands
+	var a int
+	if gc.Ismem(f) && gc.Ismem(t) {
+		goto hard
+	}
+
+	// convert constant to desired type
+	if f.Op == gc.OLITERAL {
+		var con gc.Node
+		f.Convconst(&con, t.Type)
+		f = &con
+		ft = tt // so big switch will choose a simple mov
+
+		// some constants can't move directly to memory.
+		if gc.Ismem(t) {
+			// float constants come from memory.
+			if gc.Isfloat[tt] {
+				goto hard
+			}
+
+			// 64-bit immediates are really 32-bit sign-extended
+			// unless moving into a register.
+			if gc.Isint[tt] {
+				if i := con.Int(); int64(int32(i)) != i {
+					goto hard
+				}
+			}
+		}
+	}
+
+	// value -> value copy, only one memory operand.
+	// figure out the instruction to use.
+	// break out of switch for one-instruction gins.
+	// goto rdst for "destination must be register".
+	// goto hard for "convert to cvt type first".
+	// otherwise handle and return.
+
+	switch uint32(ft)<<16 | uint32(tt) {
+	default:
+		gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+
+		/*
+		 * integer copy and truncate
+		 */
+	case gc.TINT8<<16 | gc.TINT8, // same size
+		gc.TINT8<<16 | gc.TUINT8,
+		gc.TUINT8<<16 | gc.TINT8,
+		gc.TUINT8<<16 | gc.TUINT8,
+		gc.TINT16<<16 | gc.TINT8,
+		// truncate
+		gc.TUINT16<<16 | gc.TINT8,
+		gc.TINT32<<16 | gc.TINT8,
+		gc.TUINT32<<16 | gc.TINT8,
+		gc.TINT64<<16 | gc.TINT8,
+		gc.TUINT64<<16 | gc.TINT8,
+		gc.TINT16<<16 | gc.TUINT8,
+		gc.TUINT16<<16 | gc.TUINT8,
+		gc.TINT32<<16 | gc.TUINT8,
+		gc.TUINT32<<16 | gc.TUINT8,
+		gc.TINT64<<16 | gc.TUINT8,
+		gc.TUINT64<<16 | gc.TUINT8:
+		a = x86.AMOVB
+
+	case gc.TINT16<<16 | gc.TINT16, // same size
+		gc.TINT16<<16 | gc.TUINT16,
+		gc.TUINT16<<16 | gc.TINT16,
+		gc.TUINT16<<16 | gc.TUINT16,
+		gc.TINT32<<16 | gc.TINT16,
+		// truncate
+		gc.TUINT32<<16 | gc.TINT16,
+		gc.TINT64<<16 | gc.TINT16,
+		gc.TUINT64<<16 | gc.TINT16,
+		gc.TINT32<<16 | gc.TUINT16,
+		gc.TUINT32<<16 | gc.TUINT16,
+		gc.TINT64<<16 | gc.TUINT16,
+		gc.TUINT64<<16 | gc.TUINT16:
+		a = x86.AMOVW
+
+	case gc.TINT32<<16 | gc.TINT32, // same size
+		gc.TINT32<<16 | gc.TUINT32,
+		gc.TUINT32<<16 | gc.TINT32,
+		gc.TUINT32<<16 | gc.TUINT32:
+		a = x86.AMOVL
+
+	case gc.TINT64<<16 | gc.TINT32, // truncate
+		gc.TUINT64<<16 | gc.TINT32,
+		gc.TINT64<<16 | gc.TUINT32,
+		gc.TUINT64<<16 | gc.TUINT32:
+		a = x86.AMOVQL
+
+	case gc.TINT64<<16 | gc.TINT64, // same size
+		gc.TINT64<<16 | gc.TUINT64,
+		gc.TUINT64<<16 | gc.TINT64,
+		gc.TUINT64<<16 | gc.TUINT64:
+		a = x86.AMOVQ
+
+		/*
+		 * integer up-conversions
+		 */
+	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+		gc.TINT8<<16 | gc.TUINT16:
+		a = x86.AMOVBWSX
+
+		goto rdst
+
+	case gc.TINT8<<16 | gc.TINT32,
+		gc.TINT8<<16 | gc.TUINT32:
+		a = x86.AMOVBLSX
+		goto rdst
+
+	case gc.TINT8<<16 | gc.TINT64,
+		gc.TINT8<<16 | gc.TUINT64:
+		a = x86.AMOVBQSX
+		goto rdst
+
+	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+		gc.TUINT8<<16 | gc.TUINT16:
+		a = x86.AMOVBWZX
+
+		goto rdst
+
+	case gc.TUINT8<<16 | gc.TINT32,
+		gc.TUINT8<<16 | gc.TUINT32:
+		a = x86.AMOVBLZX
+		goto rdst
+
+	case gc.TUINT8<<16 | gc.TINT64,
+		gc.TUINT8<<16 | gc.TUINT64:
+		a = x86.AMOVBQZX
+		goto rdst
+
+	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+		gc.TINT16<<16 | gc.TUINT32:
+		a = x86.AMOVWLSX
+
+		goto rdst
+
+	case gc.TINT16<<16 | gc.TINT64,
+		gc.TINT16<<16 | gc.TUINT64:
+		a = x86.AMOVWQSX
+		goto rdst
+
+	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+		gc.TUINT16<<16 | gc.TUINT32:
+		a = x86.AMOVWLZX
+
+		goto rdst
+
+	case gc.TUINT16<<16 | gc.TINT64,
+		gc.TUINT16<<16 | gc.TUINT64:
+		a = x86.AMOVWQZX
+		goto rdst
+
+	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+		gc.TINT32<<16 | gc.TUINT64:
+		a = x86.AMOVLQSX
+
+		goto rdst
+
+		// AMOVL into a register zeros the top of the register,
+	// so this is not always necessary, but if we rely on AMOVL
+	// the optimizer is almost certain to screw with us.
+	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+		gc.TUINT32<<16 | gc.TUINT64:
+		a = x86.AMOVLQZX
+
+		goto rdst
+
+		/*
+		* float to integer
+		 */
+	case gc.TFLOAT32<<16 | gc.TINT32:
+		a = x86.ACVTTSS2SL
+
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TINT32:
+		a = x86.ACVTTSD2SL
+		goto rdst
+
+	case gc.TFLOAT32<<16 | gc.TINT64:
+		a = x86.ACVTTSS2SQ
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TINT64:
+		a = x86.ACVTTSD2SQ
+		goto rdst
+
+		// convert via int32.
+	case gc.TFLOAT32<<16 | gc.TINT16,
+		gc.TFLOAT32<<16 | gc.TINT8,
+		gc.TFLOAT32<<16 | gc.TUINT16,
+		gc.TFLOAT32<<16 | gc.TUINT8,
+		gc.TFLOAT64<<16 | gc.TINT16,
+		gc.TFLOAT64<<16 | gc.TINT8,
+		gc.TFLOAT64<<16 | gc.TUINT16,
+		gc.TFLOAT64<<16 | gc.TUINT8:
+		cvt = gc.Types[gc.TINT32]
+
+		goto hard
+
+		// convert via int64.
+	case gc.TFLOAT32<<16 | gc.TUINT32,
+		gc.TFLOAT64<<16 | gc.TUINT32:
+		cvt = gc.Types[gc.TINT64]
+
+		goto hard
+
+		// algorithm is:
+	//	if small enough, use native float64 -> int64 conversion.
+	//	otherwise, subtract 2^63, convert, and add it back.
+	case gc.TFLOAT32<<16 | gc.TUINT64,
+		gc.TFLOAT64<<16 | gc.TUINT64:
+		a := x86.ACVTTSS2SQ
+
+		if ft == gc.TFLOAT64 {
+			a = x86.ACVTTSD2SQ
+		}
+		bignodes()
+		var r1 gc.Node
+		gc.Regalloc(&r1, gc.Types[ft], nil)
+		var r2 gc.Node
+		gc.Regalloc(&r2, gc.Types[tt], t)
+		var r3 gc.Node
+		gc.Regalloc(&r3, gc.Types[ft], nil)
+		var r4 gc.Node
+		gc.Regalloc(&r4, gc.Types[tt], nil)
+		gins(optoas(gc.OAS, f.Type), f, &r1)
+		gins(optoas(gc.OCMP, f.Type), &bigf, &r1)
+		p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
+		gins(a, &r1, &r2)
+		p2 := gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+		gins(optoas(gc.OAS, f.Type), &bigf, &r3)
+		gins(optoas(gc.OSUB, f.Type), &r3, &r1)
+		gins(a, &r1, &r2)
+		gins(x86.AMOVQ, &bigi, &r4)
+		gins(x86.AXORQ, &r4, &r2)
+		gc.Patch(p2, gc.Pc)
+		gmove(&r2, t)
+		gc.Regfree(&r4)
+		gc.Regfree(&r3)
+		gc.Regfree(&r2)
+		gc.Regfree(&r1)
+		return
+
+		/*
+		 * integer to float
+		 */
+	case gc.TINT32<<16 | gc.TFLOAT32:
+		a = x86.ACVTSL2SS
+
+		goto rdst
+
+	case gc.TINT32<<16 | gc.TFLOAT64:
+		a = x86.ACVTSL2SD
+		goto rdst
+
+	case gc.TINT64<<16 | gc.TFLOAT32:
+		a = x86.ACVTSQ2SS
+		goto rdst
+
+	case gc.TINT64<<16 | gc.TFLOAT64:
+		a = x86.ACVTSQ2SD
+		goto rdst
+
+		// convert via int32
+	case gc.TINT16<<16 | gc.TFLOAT32,
+		gc.TINT16<<16 | gc.TFLOAT64,
+		gc.TINT8<<16 | gc.TFLOAT32,
+		gc.TINT8<<16 | gc.TFLOAT64,
+		gc.TUINT16<<16 | gc.TFLOAT32,
+		gc.TUINT16<<16 | gc.TFLOAT64,
+		gc.TUINT8<<16 | gc.TFLOAT32,
+		gc.TUINT8<<16 | gc.TFLOAT64:
+		cvt = gc.Types[gc.TINT32]
+
+		goto hard
+
+		// convert via int64.
+	case gc.TUINT32<<16 | gc.TFLOAT32,
+		gc.TUINT32<<16 | gc.TFLOAT64:
+		cvt = gc.Types[gc.TINT64]
+
+		goto hard
+
+		// algorithm is:
+	//	if small enough, use native int64 -> uint64 conversion.
+	//	otherwise, halve (rounding to odd?), convert, and double.
+	case gc.TUINT64<<16 | gc.TFLOAT32,
+		gc.TUINT64<<16 | gc.TFLOAT64:
+		a := x86.ACVTSQ2SS
+
+		if tt == gc.TFLOAT64 {
+			a = x86.ACVTSQ2SD
+		}
+		var zero gc.Node
+		gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0)
+		var one gc.Node
+		gc.Nodconst(&one, gc.Types[gc.TUINT64], 1)
+		var r1 gc.Node
+		gc.Regalloc(&r1, f.Type, f)
+		var r2 gc.Node
+		gc.Regalloc(&r2, t.Type, t)
+		var r3 gc.Node
+		gc.Regalloc(&r3, f.Type, nil)
+		var r4 gc.Node
+		gc.Regalloc(&r4, f.Type, nil)
+		gmove(f, &r1)
+		gins(x86.ACMPQ, &r1, &zero)
+		p1 := gc.Gbranch(x86.AJLT, nil, +1)
+		gins(a, &r1, &r2)
+		p2 := gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+		gmove(&r1, &r3)
+		gins(x86.ASHRQ, &one, &r3)
+		gmove(&r1, &r4)
+		gins(x86.AANDL, &one, &r4)
+		gins(x86.AORQ, &r4, &r3)
+		gins(a, &r3, &r2)
+		gins(optoas(gc.OADD, t.Type), &r2, &r2)
+		gc.Patch(p2, gc.Pc)
+		gmove(&r2, t)
+		gc.Regfree(&r4)
+		gc.Regfree(&r3)
+		gc.Regfree(&r2)
+		gc.Regfree(&r1)
+		return
+
+		/*
+		 * float to float
+		 */
+	case gc.TFLOAT32<<16 | gc.TFLOAT32:
+		a = x86.AMOVSS
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT64:
+		a = x86.AMOVSD
+
+	case gc.TFLOAT32<<16 | gc.TFLOAT64:
+		a = x86.ACVTSS2SD
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT32:
+		a = x86.ACVTSD2SS
+		goto rdst
+	}
+
+	gins(a, f, t)
+	return
+
+	// requires register destination
+rdst:
+	{
+		var r1 gc.Node
+		gc.Regalloc(&r1, t.Type, t)
+
+		gins(a, f, &r1)
+		gmove(&r1, t)
+		gc.Regfree(&r1)
+		return
+	}
+
+	// requires register intermediate
+hard:
+	var r1 gc.Node
+	gc.Regalloc(&r1, cvt, t)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	gc.Regfree(&r1)
+	return
+}
+
+func samaddr(f *gc.Node, t *gc.Node) bool {
+	if f.Op != t.Op {
+		return false
+	}
+
+	switch f.Op {
+	case gc.OREGISTER:
+		if f.Reg != t.Reg {
+			break
+		}
+		return true
+	}
+
+	return false
+}
+
+/*
+ * generate one instruction:
+ *	as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+	//	Node nod;
+
+	//	if(f != N && f->op == OINDEX) {
+	//		gc.Regalloc(&nod, &regnode, Z);
+	//		v = constnode.vconst;
+	//		gc.Cgen(f->right, &nod);
+	//		constnode.vconst = v;
+	//		idx.reg = nod.reg;
+	//		gc.Regfree(&nod);
+	//	}
+	//	if(t != N && t->op == OINDEX) {
+	//		gc.Regalloc(&nod, &regnode, Z);
+	//		v = constnode.vconst;
+	//		gc.Cgen(t->right, &nod);
+	//		constnode.vconst = v;
+	//		idx.reg = nod.reg;
+	//		gc.Regfree(&nod);
+	//	}
+
+	if f != nil && f.Op == gc.OADDR && (as == x86.AMOVL || as == x86.AMOVQ) {
+		// Turn MOVL $xxx into LEAL xxx.
+		// These should be equivalent but most of the backend
+		// only expects to see LEAL, because that's what we had
+		// historically generated. Various hidden assumptions are baked in by now.
+		if as == x86.AMOVL {
+			as = x86.ALEAL
+		} else {
+			as = x86.ALEAQ
+		}
+		f = f.Left
+	}
+
+	switch as {
+	case x86.AMOVB,
+		x86.AMOVW,
+		x86.AMOVL,
+		x86.AMOVQ,
+		x86.AMOVSS,
+		x86.AMOVSD:
+		if f != nil && t != nil && samaddr(f, t) {
+			return nil
+		}
+
+	case x86.ALEAQ:
+		if f != nil && gc.Isconst(f, gc.CTNIL) {
+			gc.Fatal("gins LEAQ nil %v", f.Type)
+		}
+	}
+
+	p := gc.Prog(as)
+	gc.Naddr(&p.From, f)
+	gc.Naddr(&p.To, t)
+
+	if gc.Debug['g'] != 0 {
+		fmt.Printf("%v\n", p)
+	}
+
+	w := int32(0)
+	switch as {
+	case x86.AMOVB:
+		w = 1
+
+	case x86.AMOVW:
+		w = 2
+
+	case x86.AMOVL:
+		w = 4
+
+	case x86.AMOVQ:
+		w = 8
+	}
+
+	if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Width > int64(w))) {
+		gc.Dump("f", f)
+		gc.Dump("t", t)
+		gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+	}
+
+	if p.To.Type == obj.TYPE_ADDR && w > 0 {
+		gc.Fatal("bad use of addr: %v", p)
+	}
+
+	return p
+}
+
+func ginsnop() {
+	// This is actually not the x86 NOP anymore,
+	// but at the point where it gets used, AX is dead
+	// so it's okay if we lose the high bits.
+	var reg gc.Node
+	gc.Nodreg(&reg, gc.Types[gc.TINT], x86.REG_AX)
+	gins(x86.AXCHGL, &reg, &reg)
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+	if t == nil {
+		gc.Fatal("optoas: t is nil")
+	}
+
+	a := obj.AXXX
+	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+	default:
+		gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
+
+	case gc.OADDR<<16 | gc.TPTR32:
+		a = x86.ALEAL
+
+	case gc.OADDR<<16 | gc.TPTR64:
+		a = x86.ALEAQ
+
+	case gc.OEQ<<16 | gc.TBOOL,
+		gc.OEQ<<16 | gc.TINT8,
+		gc.OEQ<<16 | gc.TUINT8,
+		gc.OEQ<<16 | gc.TINT16,
+		gc.OEQ<<16 | gc.TUINT16,
+		gc.OEQ<<16 | gc.TINT32,
+		gc.OEQ<<16 | gc.TUINT32,
+		gc.OEQ<<16 | gc.TINT64,
+		gc.OEQ<<16 | gc.TUINT64,
+		gc.OEQ<<16 | gc.TPTR32,
+		gc.OEQ<<16 | gc.TPTR64,
+		gc.OEQ<<16 | gc.TFLOAT32,
+		gc.OEQ<<16 | gc.TFLOAT64:
+		a = x86.AJEQ
+
+	case gc.ONE<<16 | gc.TBOOL,
+		gc.ONE<<16 | gc.TINT8,
+		gc.ONE<<16 | gc.TUINT8,
+		gc.ONE<<16 | gc.TINT16,
+		gc.ONE<<16 | gc.TUINT16,
+		gc.ONE<<16 | gc.TINT32,
+		gc.ONE<<16 | gc.TUINT32,
+		gc.ONE<<16 | gc.TINT64,
+		gc.ONE<<16 | gc.TUINT64,
+		gc.ONE<<16 | gc.TPTR32,
+		gc.ONE<<16 | gc.TPTR64,
+		gc.ONE<<16 | gc.TFLOAT32,
+		gc.ONE<<16 | gc.TFLOAT64:
+		a = x86.AJNE
+
+	case gc.OPS<<16 | gc.TBOOL,
+		gc.OPS<<16 | gc.TINT8,
+		gc.OPS<<16 | gc.TUINT8,
+		gc.OPS<<16 | gc.TINT16,
+		gc.OPS<<16 | gc.TUINT16,
+		gc.OPS<<16 | gc.TINT32,
+		gc.OPS<<16 | gc.TUINT32,
+		gc.OPS<<16 | gc.TINT64,
+		gc.OPS<<16 | gc.TUINT64,
+		gc.OPS<<16 | gc.TPTR32,
+		gc.OPS<<16 | gc.TPTR64,
+		gc.OPS<<16 | gc.TFLOAT32,
+		gc.OPS<<16 | gc.TFLOAT64:
+		a = x86.AJPS
+
+	case gc.OPC<<16 | gc.TBOOL,
+		gc.OPC<<16 | gc.TINT8,
+		gc.OPC<<16 | gc.TUINT8,
+		gc.OPC<<16 | gc.TINT16,
+		gc.OPC<<16 | gc.TUINT16,
+		gc.OPC<<16 | gc.TINT32,
+		gc.OPC<<16 | gc.TUINT32,
+		gc.OPC<<16 | gc.TINT64,
+		gc.OPC<<16 | gc.TUINT64,
+		gc.OPC<<16 | gc.TPTR32,
+		gc.OPC<<16 | gc.TPTR64,
+		gc.OPC<<16 | gc.TFLOAT32,
+		gc.OPC<<16 | gc.TFLOAT64:
+		a = x86.AJPC
+
+	case gc.OLT<<16 | gc.TINT8,
+		gc.OLT<<16 | gc.TINT16,
+		gc.OLT<<16 | gc.TINT32,
+		gc.OLT<<16 | gc.TINT64:
+		a = x86.AJLT
+
+	case gc.OLT<<16 | gc.TUINT8,
+		gc.OLT<<16 | gc.TUINT16,
+		gc.OLT<<16 | gc.TUINT32,
+		gc.OLT<<16 | gc.TUINT64:
+		a = x86.AJCS
+
+	case gc.OLE<<16 | gc.TINT8,
+		gc.OLE<<16 | gc.TINT16,
+		gc.OLE<<16 | gc.TINT32,
+		gc.OLE<<16 | gc.TINT64:
+		a = x86.AJLE
+
+	case gc.OLE<<16 | gc.TUINT8,
+		gc.OLE<<16 | gc.TUINT16,
+		gc.OLE<<16 | gc.TUINT32,
+		gc.OLE<<16 | gc.TUINT64:
+		a = x86.AJLS
+
+	case gc.OGT<<16 | gc.TINT8,
+		gc.OGT<<16 | gc.TINT16,
+		gc.OGT<<16 | gc.TINT32,
+		gc.OGT<<16 | gc.TINT64:
+		a = x86.AJGT
+
+	case gc.OGT<<16 | gc.TUINT8,
+		gc.OGT<<16 | gc.TUINT16,
+		gc.OGT<<16 | gc.TUINT32,
+		gc.OGT<<16 | gc.TUINT64,
+		gc.OLT<<16 | gc.TFLOAT32,
+		gc.OLT<<16 | gc.TFLOAT64:
+		a = x86.AJHI
+
+	case gc.OGE<<16 | gc.TINT8,
+		gc.OGE<<16 | gc.TINT16,
+		gc.OGE<<16 | gc.TINT32,
+		gc.OGE<<16 | gc.TINT64:
+		a = x86.AJGE
+
+	case gc.OGE<<16 | gc.TUINT8,
+		gc.OGE<<16 | gc.TUINT16,
+		gc.OGE<<16 | gc.TUINT32,
+		gc.OGE<<16 | gc.TUINT64,
+		gc.OLE<<16 | gc.TFLOAT32,
+		gc.OLE<<16 | gc.TFLOAT64:
+		a = x86.AJCC
+
+	case gc.OCMP<<16 | gc.TBOOL,
+		gc.OCMP<<16 | gc.TINT8,
+		gc.OCMP<<16 | gc.TUINT8:
+		a = x86.ACMPB
+
+	case gc.OCMP<<16 | gc.TINT16,
+		gc.OCMP<<16 | gc.TUINT16:
+		a = x86.ACMPW
+
+	case gc.OCMP<<16 | gc.TINT32,
+		gc.OCMP<<16 | gc.TUINT32,
+		gc.OCMP<<16 | gc.TPTR32:
+		a = x86.ACMPL
+
+	case gc.OCMP<<16 | gc.TINT64,
+		gc.OCMP<<16 | gc.TUINT64,
+		gc.OCMP<<16 | gc.TPTR64:
+		a = x86.ACMPQ
+
+	case gc.OCMP<<16 | gc.TFLOAT32:
+		a = x86.AUCOMISS
+
+	case gc.OCMP<<16 | gc.TFLOAT64:
+		a = x86.AUCOMISD
+
+	case gc.OAS<<16 | gc.TBOOL,
+		gc.OAS<<16 | gc.TINT8,
+		gc.OAS<<16 | gc.TUINT8:
+		a = x86.AMOVB
+
+	case gc.OAS<<16 | gc.TINT16,
+		gc.OAS<<16 | gc.TUINT16:
+		a = x86.AMOVW
+
+	case gc.OAS<<16 | gc.TINT32,
+		gc.OAS<<16 | gc.TUINT32,
+		gc.OAS<<16 | gc.TPTR32:
+		a = x86.AMOVL
+
+	case gc.OAS<<16 | gc.TINT64,
+		gc.OAS<<16 | gc.TUINT64,
+		gc.OAS<<16 | gc.TPTR64:
+		a = x86.AMOVQ
+
+	case gc.OAS<<16 | gc.TFLOAT32:
+		a = x86.AMOVSS
+
+	case gc.OAS<<16 | gc.TFLOAT64:
+		a = x86.AMOVSD
+
+	case gc.OADD<<16 | gc.TINT8,
+		gc.OADD<<16 | gc.TUINT8:
+		a = x86.AADDB
+
+	case gc.OADD<<16 | gc.TINT16,
+		gc.OADD<<16 | gc.TUINT16:
+		a = x86.AADDW
+
+	case gc.OADD<<16 | gc.TINT32,
+		gc.OADD<<16 | gc.TUINT32,
+		gc.OADD<<16 | gc.TPTR32:
+		a = x86.AADDL
+
+	case gc.OADD<<16 | gc.TINT64,
+		gc.OADD<<16 | gc.TUINT64,
+		gc.OADD<<16 | gc.TPTR64:
+		a = x86.AADDQ
+
+	case gc.OADD<<16 | gc.TFLOAT32:
+		a = x86.AADDSS
+
+	case gc.OADD<<16 | gc.TFLOAT64:
+		a = x86.AADDSD
+
+	case gc.OSUB<<16 | gc.TINT8,
+		gc.OSUB<<16 | gc.TUINT8:
+		a = x86.ASUBB
+
+	case gc.OSUB<<16 | gc.TINT16,
+		gc.OSUB<<16 | gc.TUINT16:
+		a = x86.ASUBW
+
+	case gc.OSUB<<16 | gc.TINT32,
+		gc.OSUB<<16 | gc.TUINT32,
+		gc.OSUB<<16 | gc.TPTR32:
+		a = x86.ASUBL
+
+	case gc.OSUB<<16 | gc.TINT64,
+		gc.OSUB<<16 | gc.TUINT64,
+		gc.OSUB<<16 | gc.TPTR64:
+		a = x86.ASUBQ
+
+	case gc.OSUB<<16 | gc.TFLOAT32:
+		a = x86.ASUBSS
+
+	case gc.OSUB<<16 | gc.TFLOAT64:
+		a = x86.ASUBSD
+
+	case gc.OINC<<16 | gc.TINT8,
+		gc.OINC<<16 | gc.TUINT8:
+		a = x86.AINCB
+
+	case gc.OINC<<16 | gc.TINT16,
+		gc.OINC<<16 | gc.TUINT16:
+		a = x86.AINCW
+
+	case gc.OINC<<16 | gc.TINT32,
+		gc.OINC<<16 | gc.TUINT32,
+		gc.OINC<<16 | gc.TPTR32:
+		a = x86.AINCL
+
+	case gc.OINC<<16 | gc.TINT64,
+		gc.OINC<<16 | gc.TUINT64,
+		gc.OINC<<16 | gc.TPTR64:
+		a = x86.AINCQ
+
+	case gc.ODEC<<16 | gc.TINT8,
+		gc.ODEC<<16 | gc.TUINT8:
+		a = x86.ADECB
+
+	case gc.ODEC<<16 | gc.TINT16,
+		gc.ODEC<<16 | gc.TUINT16:
+		a = x86.ADECW
+
+	case gc.ODEC<<16 | gc.TINT32,
+		gc.ODEC<<16 | gc.TUINT32,
+		gc.ODEC<<16 | gc.TPTR32:
+		a = x86.ADECL
+
+	case gc.ODEC<<16 | gc.TINT64,
+		gc.ODEC<<16 | gc.TUINT64,
+		gc.ODEC<<16 | gc.TPTR64:
+		a = x86.ADECQ
+
+	case gc.OMINUS<<16 | gc.TINT8,
+		gc.OMINUS<<16 | gc.TUINT8:
+		a = x86.ANEGB
+
+	case gc.OMINUS<<16 | gc.TINT16,
+		gc.OMINUS<<16 | gc.TUINT16:
+		a = x86.ANEGW
+
+	case gc.OMINUS<<16 | gc.TINT32,
+		gc.OMINUS<<16 | gc.TUINT32,
+		gc.OMINUS<<16 | gc.TPTR32:
+		a = x86.ANEGL
+
+	case gc.OMINUS<<16 | gc.TINT64,
+		gc.OMINUS<<16 | gc.TUINT64,
+		gc.OMINUS<<16 | gc.TPTR64:
+		a = x86.ANEGQ
+
+	case gc.OAND<<16 | gc.TBOOL,
+		gc.OAND<<16 | gc.TINT8,
+		gc.OAND<<16 | gc.TUINT8:
+		a = x86.AANDB
+
+	case gc.OAND<<16 | gc.TINT16,
+		gc.OAND<<16 | gc.TUINT16:
+		a = x86.AANDW
+
+	case gc.OAND<<16 | gc.TINT32,
+		gc.OAND<<16 | gc.TUINT32,
+		gc.OAND<<16 | gc.TPTR32:
+		a = x86.AANDL
+
+	case gc.OAND<<16 | gc.TINT64,
+		gc.OAND<<16 | gc.TUINT64,
+		gc.OAND<<16 | gc.TPTR64:
+		a = x86.AANDQ
+
+	case gc.OOR<<16 | gc.TBOOL,
+		gc.OOR<<16 | gc.TINT8,
+		gc.OOR<<16 | gc.TUINT8:
+		a = x86.AORB
+
+	case gc.OOR<<16 | gc.TINT16,
+		gc.OOR<<16 | gc.TUINT16:
+		a = x86.AORW
+
+	case gc.OOR<<16 | gc.TINT32,
+		gc.OOR<<16 | gc.TUINT32,
+		gc.OOR<<16 | gc.TPTR32:
+		a = x86.AORL
+
+	case gc.OOR<<16 | gc.TINT64,
+		gc.OOR<<16 | gc.TUINT64,
+		gc.OOR<<16 | gc.TPTR64:
+		a = x86.AORQ
+
+	case gc.OXOR<<16 | gc.TINT8,
+		gc.OXOR<<16 | gc.TUINT8:
+		a = x86.AXORB
+
+	case gc.OXOR<<16 | gc.TINT16,
+		gc.OXOR<<16 | gc.TUINT16:
+		a = x86.AXORW
+
+	case gc.OXOR<<16 | gc.TINT32,
+		gc.OXOR<<16 | gc.TUINT32,
+		gc.OXOR<<16 | gc.TPTR32:
+		a = x86.AXORL
+
+	case gc.OXOR<<16 | gc.TINT64,
+		gc.OXOR<<16 | gc.TUINT64,
+		gc.OXOR<<16 | gc.TPTR64:
+		a = x86.AXORQ
+
+	case gc.OLROT<<16 | gc.TINT8,
+		gc.OLROT<<16 | gc.TUINT8:
+		a = x86.AROLB
+
+	case gc.OLROT<<16 | gc.TINT16,
+		gc.OLROT<<16 | gc.TUINT16:
+		a = x86.AROLW
+
+	case gc.OLROT<<16 | gc.TINT32,
+		gc.OLROT<<16 | gc.TUINT32,
+		gc.OLROT<<16 | gc.TPTR32:
+		a = x86.AROLL
+
+	case gc.OLROT<<16 | gc.TINT64,
+		gc.OLROT<<16 | gc.TUINT64,
+		gc.OLROT<<16 | gc.TPTR64:
+		a = x86.AROLQ
+
+	case gc.OLSH<<16 | gc.TINT8,
+		gc.OLSH<<16 | gc.TUINT8:
+		a = x86.ASHLB
+
+	case gc.OLSH<<16 | gc.TINT16,
+		gc.OLSH<<16 | gc.TUINT16:
+		a = x86.ASHLW
+
+	case gc.OLSH<<16 | gc.TINT32,
+		gc.OLSH<<16 | gc.TUINT32,
+		gc.OLSH<<16 | gc.TPTR32:
+		a = x86.ASHLL
+
+	case gc.OLSH<<16 | gc.TINT64,
+		gc.OLSH<<16 | gc.TUINT64,
+		gc.OLSH<<16 | gc.TPTR64:
+		a = x86.ASHLQ
+
+	case gc.ORSH<<16 | gc.TUINT8:
+		a = x86.ASHRB
+
+	case gc.ORSH<<16 | gc.TUINT16:
+		a = x86.ASHRW
+
+	case gc.ORSH<<16 | gc.TUINT32,
+		gc.ORSH<<16 | gc.TPTR32:
+		a = x86.ASHRL
+
+	case gc.ORSH<<16 | gc.TUINT64,
+		gc.ORSH<<16 | gc.TPTR64:
+		a = x86.ASHRQ
+
+	case gc.ORSH<<16 | gc.TINT8:
+		a = x86.ASARB
+
+	case gc.ORSH<<16 | gc.TINT16:
+		a = x86.ASARW
+
+	case gc.ORSH<<16 | gc.TINT32:
+		a = x86.ASARL
+
+	case gc.ORSH<<16 | gc.TINT64:
+		a = x86.ASARQ
+
+	case gc.ORROTC<<16 | gc.TINT8,
+		gc.ORROTC<<16 | gc.TUINT8:
+		a = x86.ARCRB
+
+	case gc.ORROTC<<16 | gc.TINT16,
+		gc.ORROTC<<16 | gc.TUINT16:
+		a = x86.ARCRW
+
+	case gc.ORROTC<<16 | gc.TINT32,
+		gc.ORROTC<<16 | gc.TUINT32:
+		a = x86.ARCRL
+
+	case gc.ORROTC<<16 | gc.TINT64,
+		gc.ORROTC<<16 | gc.TUINT64:
+		a = x86.ARCRQ
+
+	case gc.OHMUL<<16 | gc.TINT8,
+		gc.OMUL<<16 | gc.TINT8,
+		gc.OMUL<<16 | gc.TUINT8:
+		a = x86.AIMULB
+
+	case gc.OHMUL<<16 | gc.TINT16,
+		gc.OMUL<<16 | gc.TINT16,
+		gc.OMUL<<16 | gc.TUINT16:
+		a = x86.AIMULW
+
+	case gc.OHMUL<<16 | gc.TINT32,
+		gc.OMUL<<16 | gc.TINT32,
+		gc.OMUL<<16 | gc.TUINT32,
+		gc.OMUL<<16 | gc.TPTR32:
+		a = x86.AIMULL
+
+	case gc.OHMUL<<16 | gc.TINT64,
+		gc.OMUL<<16 | gc.TINT64,
+		gc.OMUL<<16 | gc.TUINT64,
+		gc.OMUL<<16 | gc.TPTR64:
+		a = x86.AIMULQ
+
+	case gc.OHMUL<<16 | gc.TUINT8:
+		a = x86.AMULB
+
+	case gc.OHMUL<<16 | gc.TUINT16:
+		a = x86.AMULW
+
+	case gc.OHMUL<<16 | gc.TUINT32,
+		gc.OHMUL<<16 | gc.TPTR32:
+		a = x86.AMULL
+
+	case gc.OHMUL<<16 | gc.TUINT64,
+		gc.OHMUL<<16 | gc.TPTR64:
+		a = x86.AMULQ
+
+	case gc.OMUL<<16 | gc.TFLOAT32:
+		a = x86.AMULSS
+
+	case gc.OMUL<<16 | gc.TFLOAT64:
+		a = x86.AMULSD
+
+	case gc.ODIV<<16 | gc.TINT8,
+		gc.OMOD<<16 | gc.TINT8:
+		a = x86.AIDIVB
+
+	case gc.ODIV<<16 | gc.TUINT8,
+		gc.OMOD<<16 | gc.TUINT8:
+		a = x86.ADIVB
+
+	case gc.ODIV<<16 | gc.TINT16,
+		gc.OMOD<<16 | gc.TINT16:
+		a = x86.AIDIVW
+
+	case gc.ODIV<<16 | gc.TUINT16,
+		gc.OMOD<<16 | gc.TUINT16:
+		a = x86.ADIVW
+
+	case gc.ODIV<<16 | gc.TINT32,
+		gc.OMOD<<16 | gc.TINT32:
+		a = x86.AIDIVL
+
+	case gc.ODIV<<16 | gc.TUINT32,
+		gc.ODIV<<16 | gc.TPTR32,
+		gc.OMOD<<16 | gc.TUINT32,
+		gc.OMOD<<16 | gc.TPTR32:
+		a = x86.ADIVL
+
+	case gc.ODIV<<16 | gc.TINT64,
+		gc.OMOD<<16 | gc.TINT64:
+		a = x86.AIDIVQ
+
+	case gc.ODIV<<16 | gc.TUINT64,
+		gc.ODIV<<16 | gc.TPTR64,
+		gc.OMOD<<16 | gc.TUINT64,
+		gc.OMOD<<16 | gc.TPTR64:
+		a = x86.ADIVQ
+
+	case gc.OEXTEND<<16 | gc.TINT16:
+		a = x86.ACWD
+
+	case gc.OEXTEND<<16 | gc.TINT32:
+		a = x86.ACDQ
+
+	case gc.OEXTEND<<16 | gc.TINT64:
+		a = x86.ACQO
+
+	case gc.ODIV<<16 | gc.TFLOAT32:
+		a = x86.ADIVSS
+
+	case gc.ODIV<<16 | gc.TFLOAT64:
+		a = x86.ADIVSD
+
+	case gc.OSQRT<<16 | gc.TFLOAT64:
+		a = x86.ASQRTSD
+	}
+
+	return a
+}
+
+// jmptoset returns ASETxx for AJxx.
+func jmptoset(jmp int) int {
+	switch jmp {
+	case x86.AJEQ:
+		return x86.ASETEQ
+	case x86.AJNE:
+		return x86.ASETNE
+	case x86.AJLT:
+		return x86.ASETLT
+	case x86.AJCS:
+		return x86.ASETCS
+	case x86.AJLE:
+		return x86.ASETLE
+	case x86.AJLS:
+		return x86.ASETLS
+	case x86.AJGT:
+		return x86.ASETGT
+	case x86.AJHI:
+		return x86.ASETHI
+	case x86.AJGE:
+		return x86.ASETGE
+	case x86.AJCC:
+		return x86.ASETCC
+	case x86.AJMI:
+		return x86.ASETMI
+	case x86.AJOC:
+		return x86.ASETOC
+	case x86.AJOS:
+		return x86.ASETOS
+	case x86.AJPC:
+		return x86.ASETPC
+	case x86.AJPL:
+		return x86.ASETPL
+	case x86.AJPS:
+		return x86.ASETPS
+	}
+	gc.Fatal("jmptoset: no entry for %v", gc.Oconv(jmp, 0))
+	panic("unreachable")
+}
+
+const (
+	ODynam   = 1 << 0
+	OAddable = 1 << 1
+)
+
+var clean [20]gc.Node
+
+var cleani int = 0
+
+func sudoclean() {
+	if clean[cleani-1].Op != gc.OEMPTY {
+		gc.Regfree(&clean[cleani-1])
+	}
+	if clean[cleani-2].Op != gc.OEMPTY {
+		gc.Regfree(&clean[cleani-2])
+	}
+	cleani -= 2
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+	if n.Type == nil {
+		return false
+	}
+
+	*a = obj.Addr{}
+
+	switch n.Op {
+	case gc.OLITERAL:
+		if !gc.Isconst(n, gc.CTINT) {
+			break
+		}
+		v := n.Int()
+		if v >= 32000 || v <= -32000 {
+			break
+		}
+		switch as {
+		default:
+			return false
+
+		case x86.AADDB,
+			x86.AADDW,
+			x86.AADDL,
+			x86.AADDQ,
+			x86.ASUBB,
+			x86.ASUBW,
+			x86.ASUBL,
+			x86.ASUBQ,
+			x86.AANDB,
+			x86.AANDW,
+			x86.AANDL,
+			x86.AANDQ,
+			x86.AORB,
+			x86.AORW,
+			x86.AORL,
+			x86.AORQ,
+			x86.AXORB,
+			x86.AXORW,
+			x86.AXORL,
+			x86.AXORQ,
+			x86.AINCB,
+			x86.AINCW,
+			x86.AINCL,
+			x86.AINCQ,
+			x86.ADECB,
+			x86.ADECW,
+			x86.ADECL,
+			x86.ADECQ,
+			x86.AMOVB,
+			x86.AMOVW,
+			x86.AMOVL,
+			x86.AMOVQ:
+			break
+		}
+
+		cleani += 2
+		reg := &clean[cleani-1]
+		reg1 := &clean[cleani-2]
+		reg.Op = gc.OEMPTY
+		reg1.Op = gc.OEMPTY
+		gc.Naddr(a, n)
+		return true
+
+	case gc.ODOT,
+		gc.ODOTPTR:
+		cleani += 2
+		reg := &clean[cleani-1]
+		reg1 := &clean[cleani-2]
+		reg.Op = gc.OEMPTY
+		reg1.Op = gc.OEMPTY
+		var nn *gc.Node
+		var oary [10]int64
+		o := gc.Dotoffset(n, oary[:], &nn)
+		if nn == nil {
+			sudoclean()
+			return false
+		}
+
+		if nn.Addable && o == 1 && oary[0] >= 0 {
+			// directly addressable set of DOTs
+			n1 := *nn
+
+			n1.Type = n.Type
+			n1.Xoffset += oary[0]
+			gc.Naddr(a, &n1)
+			return true
+		}
+
+		gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
+		n1 := *reg
+		n1.Op = gc.OINDREG
+		if oary[0] >= 0 {
+			gc.Agen(nn, reg)
+			n1.Xoffset = oary[0]
+		} else {
+			gc.Cgen(nn, reg)
+			gc.Cgen_checknil(reg)
+			n1.Xoffset = -(oary[0] + 1)
+		}
+
+		for i := 1; i < o; i++ {
+			if oary[i] >= 0 {
+				gc.Fatal("can't happen")
+			}
+			gins(movptr, &n1, reg)
+			gc.Cgen_checknil(reg)
+			n1.Xoffset = -(oary[i] + 1)
+		}
+
+		a.Type = obj.TYPE_NONE
+		a.Index = obj.TYPE_NONE
+		gc.Fixlargeoffset(&n1)
+		gc.Naddr(a, &n1)
+		return true
+
+	case gc.OINDEX:
+		return false
+	}
+
+	return false
+}
diff --git a/src/cmd/compile/internal/amd64/peep.go b/src/cmd/compile/internal/amd64/peep.go
new file mode 100644
index 0000000..19db68e9
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/peep.go
@@ -0,0 +1,1038 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package amd64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+	"fmt"
+)
+
+var gactive uint32
+
+const (
+	exregoffset = x86.REG_R15
+)
+
+// do we need the carry bit
+func needc(p *obj.Prog) bool {
+	for p != nil {
+		flags := progcarryflags(p)
+		if flags&gc.UseCarry != 0 {
+			return true
+		}
+		if flags&(gc.SetCarry|gc.KillCarry) != 0 {
+			return false
+		}
+		p = p.Link
+	}
+
+	return false
+}
+
+func rnops(r *gc.Flow) *gc.Flow {
+	if r != nil {
+		var p *obj.Prog
+		var r1 *gc.Flow
+		for {
+			p = r.Prog
+			if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
+				break
+			}
+			r1 = gc.Uniqs(r)
+			if r1 == nil {
+				break
+			}
+			r = r1
+		}
+	}
+
+	return r
+}
+
+func peep(firstp *obj.Prog) {
+	g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+	if g == nil {
+		return
+	}
+	gactive = 0
+
+	// byte, word arithmetic elimination.
+	elimshortmov(g)
+
+	// constant propagation
+	// find MOV $con,R followed by
+	// another MOV $con,R without
+	// setting R in the interim
+	var p *obj.Prog
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		case x86.ALEAL,
+			x86.ALEAQ:
+			if regtyp(&p.To) {
+				if p.From.Sym != nil {
+					if p.From.Index == x86.REG_NONE {
+						conprop(r)
+					}
+				}
+			}
+
+		case x86.AMOVB,
+			x86.AMOVW,
+			x86.AMOVL,
+			x86.AMOVQ,
+			x86.AMOVSS,
+			x86.AMOVSD:
+			if regtyp(&p.To) {
+				if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
+					conprop(r)
+				}
+			}
+		}
+	}
+
+	var r *gc.Flow
+	var r1 *gc.Flow
+	var p1 *obj.Prog
+	var t int
+loop1:
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		gc.Dumpit("loop1", g.Start, 0)
+	}
+
+	t = 0
+	for r = g.Start; r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		case x86.AMOVL,
+			x86.AMOVQ,
+			x86.AMOVSS,
+			x86.AMOVSD:
+			if regtyp(&p.To) {
+				if regtyp(&p.From) {
+					if copyprop(g, r) {
+						excise(r)
+						t++
+					} else if subprop(r) && copyprop(g, r) {
+						excise(r)
+						t++
+					}
+				}
+			}
+
+		case x86.AMOVBLZX,
+			x86.AMOVWLZX,
+			x86.AMOVBLSX,
+			x86.AMOVWLSX:
+			if regtyp(&p.To) {
+				r1 = rnops(gc.Uniqs(r))
+				if r1 != nil {
+					p1 = r1.Prog
+					if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
+						p1.As = x86.AMOVL
+						t++
+					}
+				}
+			}
+
+		case x86.AMOVBQSX,
+			x86.AMOVBQZX,
+			x86.AMOVWQSX,
+			x86.AMOVWQZX,
+			x86.AMOVLQSX,
+			x86.AMOVLQZX,
+			x86.AMOVQL:
+			if regtyp(&p.To) {
+				r1 = rnops(gc.Uniqs(r))
+				if r1 != nil {
+					p1 = r1.Prog
+					if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
+						p1.As = x86.AMOVQ
+						t++
+					}
+				}
+			}
+
+		case x86.AADDL,
+			x86.AADDQ,
+			x86.AADDW:
+			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
+				break
+			}
+			if p.From.Offset == -1 {
+				if p.As == x86.AADDQ {
+					p.As = x86.ADECQ
+				} else if p.As == x86.AADDL {
+					p.As = x86.ADECL
+				} else {
+					p.As = x86.ADECW
+				}
+				p.From = obj.Addr{}
+				break
+			}
+
+			if p.From.Offset == 1 {
+				if p.As == x86.AADDQ {
+					p.As = x86.AINCQ
+				} else if p.As == x86.AADDL {
+					p.As = x86.AINCL
+				} else {
+					p.As = x86.AINCW
+				}
+				p.From = obj.Addr{}
+				break
+			}
+
+		case x86.ASUBL,
+			x86.ASUBQ,
+			x86.ASUBW:
+			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
+				break
+			}
+			if p.From.Offset == -1 {
+				if p.As == x86.ASUBQ {
+					p.As = x86.AINCQ
+				} else if p.As == x86.ASUBL {
+					p.As = x86.AINCL
+				} else {
+					p.As = x86.AINCW
+				}
+				p.From = obj.Addr{}
+				break
+			}
+
+			if p.From.Offset == 1 {
+				if p.As == x86.ASUBQ {
+					p.As = x86.ADECQ
+				} else if p.As == x86.ASUBL {
+					p.As = x86.ADECL
+				} else {
+					p.As = x86.ADECW
+				}
+				p.From = obj.Addr{}
+				break
+			}
+		}
+	}
+
+	if t != 0 {
+		goto loop1
+	}
+
+	// MOVLQZX removal.
+	// The MOVLQZX exists to avoid being confused for a
+	// MOVL that is just copying 32-bit data around during
+	// copyprop.  Now that copyprop is done, remov MOVLQZX R1, R2
+	// if it is dominated by an earlier ADDL/MOVL/etc into R1 that
+	// will have already cleared the high bits.
+	//
+	// MOVSD removal.
+	// We never use packed registers, so a MOVSD between registers
+	// can be replaced by MOVAPD, which moves the pair of float64s
+	// instead of just the lower one.  We only use the lower one, but
+	// the processor can do better if we do moves using both.
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		p = r.Prog
+		if p.As == x86.AMOVLQZX {
+			if regtyp(&p.From) {
+				if p.From.Type == p.To.Type && p.From.Reg == p.To.Reg {
+					if prevl(r, int(p.From.Reg)) {
+						excise(r)
+					}
+				}
+			}
+		}
+
+		if p.As == x86.AMOVSD {
+			if regtyp(&p.From) {
+				if regtyp(&p.To) {
+					p.As = x86.AMOVAPD
+				}
+			}
+		}
+	}
+
+	// load pipelining
+	// push any load from memory as early as possible
+	// to give it time to complete before use.
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		case x86.AMOVB,
+			x86.AMOVW,
+			x86.AMOVL,
+			x86.AMOVQ,
+			x86.AMOVLQZX:
+			if regtyp(&p.To) && !regconsttyp(&p.From) {
+				pushback(r)
+			}
+		}
+	}
+
+	gc.Flowend(g)
+}
+
+func pushback(r0 *gc.Flow) {
+	var r *gc.Flow
+	var p *obj.Prog
+
+	var b *gc.Flow
+	p0 := (*obj.Prog)(r0.Prog)
+	for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
+		p = r.Prog
+		if p.As != obj.ANOP {
+			if !regconsttyp(&p.From) || !regtyp(&p.To) {
+				break
+			}
+			if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 {
+				break
+			}
+		}
+
+		if p.As == obj.ACALL {
+			break
+		}
+		b = r
+	}
+
+	if b == nil {
+		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+			fmt.Printf("no pushback: %v\n", r0.Prog)
+			if r != nil {
+				fmt.Printf("\t%v [%v]\n", r.Prog, gc.Uniqs(r) != nil)
+			}
+		}
+
+		return
+	}
+
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		fmt.Printf("pushback\n")
+		for r := (*gc.Flow)(b); ; r = r.Link {
+			fmt.Printf("\t%v\n", r.Prog)
+			if r == r0 {
+				break
+			}
+		}
+	}
+
+	t := obj.Prog(*r0.Prog)
+	for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
+		p0 = r.Link.Prog
+		p = r.Prog
+		p0.As = p.As
+		p0.Lineno = p.Lineno
+		p0.From = p.From
+		p0.To = p.To
+
+		if r == b {
+			break
+		}
+	}
+
+	p0 = r.Prog
+	p0.As = t.As
+	p0.Lineno = t.Lineno
+	p0.From = t.From
+	p0.To = t.To
+
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		fmt.Printf("\tafter\n")
+		for r := (*gc.Flow)(b); ; r = r.Link {
+			fmt.Printf("\t%v\n", r.Prog)
+			if r == r0 {
+				break
+			}
+		}
+	}
+}
+
+func excise(r *gc.Flow) {
+	p := (*obj.Prog)(r.Prog)
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		fmt.Printf("%v ===delete===\n", p)
+	}
+
+	obj.Nopout(p)
+
+	gc.Ostats.Ndelmov++
+}
+
+func regtyp(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_R15 || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X15)
+}
+
+// movb elimination.
+// movb is simulated by the linker
+// when a register other than ax, bx, cx, dx
+// is used, so rewrite to other instructions
+// when possible.  a movb into a register
+// can smash the entire 32-bit register without
+// causing any trouble.
+//
+// TODO: Using the Q forms here instead of the L forms
+// seems unnecessary, and it makes the instructions longer.
+func elimshortmov(g *gc.Graph) {
+	var p *obj.Prog
+
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		p = r.Prog
+		if regtyp(&p.To) {
+			switch p.As {
+			case x86.AINCB,
+				x86.AINCW:
+				p.As = x86.AINCQ
+
+			case x86.ADECB,
+				x86.ADECW:
+				p.As = x86.ADECQ
+
+			case x86.ANEGB,
+				x86.ANEGW:
+				p.As = x86.ANEGQ
+
+			case x86.ANOTB,
+				x86.ANOTW:
+				p.As = x86.ANOTQ
+			}
+
+			if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
+				// move or artihmetic into partial register.
+				// from another register or constant can be movl.
+				// we don't switch to 64-bit arithmetic if it can
+				// change how the carry bit is set (and the carry bit is needed).
+				switch p.As {
+				case x86.AMOVB,
+					x86.AMOVW:
+					p.As = x86.AMOVQ
+
+				case x86.AADDB,
+					x86.AADDW:
+					if !needc(p.Link) {
+						p.As = x86.AADDQ
+					}
+
+				case x86.ASUBB,
+					x86.ASUBW:
+					if !needc(p.Link) {
+						p.As = x86.ASUBQ
+					}
+
+				case x86.AMULB,
+					x86.AMULW:
+					p.As = x86.AMULQ
+
+				case x86.AIMULB,
+					x86.AIMULW:
+					p.As = x86.AIMULQ
+
+				case x86.AANDB,
+					x86.AANDW:
+					p.As = x86.AANDQ
+
+				case x86.AORB,
+					x86.AORW:
+					p.As = x86.AORQ
+
+				case x86.AXORB,
+					x86.AXORW:
+					p.As = x86.AXORQ
+
+				case x86.ASHLB,
+					x86.ASHLW:
+					p.As = x86.ASHLQ
+				}
+			} else if p.From.Type != obj.TYPE_REG {
+				// explicit zero extension, but don't
+				// do that if source is a byte register
+				// (only AH can occur and it's forbidden).
+				switch p.As {
+				case x86.AMOVB:
+					p.As = x86.AMOVBQZX
+
+				case x86.AMOVW:
+					p.As = x86.AMOVWQZX
+				}
+			}
+		}
+	}
+}
+
+// is 'a' a register or constant?
+func regconsttyp(a *obj.Addr) bool {
+	if regtyp(a) {
+		return true
+	}
+	switch a.Type {
+	case obj.TYPE_CONST,
+		obj.TYPE_FCONST,
+		obj.TYPE_SCONST,
+		obj.TYPE_ADDR: // TODO(rsc): Not all TYPE_ADDRs are constants.
+		return true
+	}
+
+	return false
+}
+
+// is reg guaranteed to be truncated by a previous L instruction?
+func prevl(r0 *gc.Flow, reg int) bool {
+	for r := (*gc.Flow)(gc.Uniqp(r0)); r != nil; r = gc.Uniqp(r) {
+		p := r.Prog
+		if p.To.Type == obj.TYPE_REG && int(p.To.Reg) == reg {
+			flags := progflags(p)
+			if flags&gc.RightWrite != 0 {
+				if flags&gc.SizeL != 0 {
+					return true
+				}
+				return false
+			}
+		}
+	}
+
+	return false
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ *	MOV	a, R0
+ *	ADD	b, R0	/ no use of R1
+ *	MOV	R0, R1
+ * would be converted to
+ *	MOV	a, R1
+ *	ADD	b, R1
+ *	MOV	R1, R0
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ */
+func subprop(r0 *gc.Flow) bool {
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		fmt.Printf("subprop %v\n", r0.Prog)
+	}
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	if !regtyp(v1) {
+		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+			fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
+		}
+		return false
+	}
+
+	v2 := (*obj.Addr)(&p.To)
+	if !regtyp(v2) {
+		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+			fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
+		}
+		return false
+	}
+
+	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+			fmt.Printf("\t? %v\n", r.Prog)
+		}
+		if gc.Uniqs(r) == nil {
+			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+				fmt.Printf("\tno unique successor\n")
+			}
+			break
+		}
+
+		p = r.Prog
+		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+			continue
+		}
+		if p.Info.Flags&gc.Call != 0 {
+			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+				fmt.Printf("\tfound %v; return 0\n", p)
+			}
+			return false
+		}
+
+		if p.Info.Reguse|p.Info.Regset != 0 {
+			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+				fmt.Printf("\tfound %v; return 0\n", p)
+			}
+			return false
+		}
+
+		if (p.Info.Flags&gc.Move != 0) && (p.Info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
+			copysub(&p.To, v1, v2, 1)
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+				if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
+					fmt.Printf(" excise")
+				}
+				fmt.Printf("\n")
+			}
+
+			for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+				p = r.Prog
+				copysub(&p.From, v1, v2, 1)
+				copysub(&p.To, v1, v2, 1)
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("%v\n", r.Prog)
+				}
+			}
+
+			t := int(int(v1.Reg))
+			v1.Reg = v2.Reg
+			v2.Reg = int16(t)
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("%v last\n", r.Prog)
+			}
+			return true
+		}
+
+		if copyau(&p.From, v2) || copyau(&p.To, v2) {
+			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+				fmt.Printf("\tcopyau %v failed\n", gc.Ctxt.Dconv(v2))
+			}
+			break
+		}
+
+		if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+				fmt.Printf("\tcopysub failed\n")
+			}
+			break
+		}
+	}
+
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		fmt.Printf("\tran off end; return 0\n")
+	}
+	return false
+}
+
+/*
+ * The idea is to remove redundant copies.
+ *	v1->v2	F=0
+ *	(use v2	s/v2/v1/)*
+ *	set v1	F=1
+ *	use v2	return fail
+ *	-----------------
+ *	v1->v2	F=0
+ *	(use v2	s/v2/v1/)*
+ *	set v1	F=1
+ *	set v2	return success
+ */
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		fmt.Printf("copyprop %v\n", r0.Prog)
+	}
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	v2 := (*obj.Addr)(&p.To)
+	if copyas(v1, v2) {
+		return true
+	}
+	gactive++
+	return copy1(v1, v2, r0.S1, 0)
+}
+
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+	if uint32(r.Active) == gactive {
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("act set; return 1\n")
+		}
+		return true
+	}
+
+	r.Active = int32(gactive)
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
+	}
+	var t int
+	var p *obj.Prog
+	for ; r != nil; r = r.S1 {
+		p = r.Prog
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("%v", p)
+		}
+		if f == 0 && gc.Uniqp(r) == nil {
+			f = 1
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; merge; f=%d", f)
+			}
+		}
+
+		t = copyu(p, v2, nil)
+		switch t {
+		case 2: /* rar, can't split */
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+			}
+			return false
+
+		case 3: /* set */
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+			}
+			return true
+
+		case 1, /* used, substitute */
+			4: /* use and set */
+			if f != 0 {
+				if gc.Debug['P'] == 0 {
+					return false
+				}
+				if t == 4 {
+					fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+				} else {
+					fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+				}
+				return false
+			}
+
+			if copyu(p, v2, v1) != 0 {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; sub fail; return 0\n")
+				}
+				return false
+			}
+
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; sub %v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
+			}
+			if t == 4 {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+				}
+				return true
+			}
+		}
+
+		if f == 0 {
+			t = copyu(p, v1, nil)
+			if f == 0 && (t == 2 || t == 3 || t == 4) {
+				f = 1
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+				}
+			}
+		}
+
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("\n")
+		}
+		if r.S2 != nil {
+			if !copy1(v1, v2, r.S2, f) {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+/*
+ * return
+ * 1 if v only used (and substitute),
+ * 2 if read-alter-rewrite
+ * 3 if set
+ * 4 if set and used
+ * 0 otherwise (not touched)
+ */
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+	switch p.As {
+	case obj.AJMP:
+		if s != nil {
+			if copysub(&p.To, v, s, 1) != 0 {
+				return 1
+			}
+			return 0
+		}
+
+		if copyau(&p.To, v) {
+			return 1
+		}
+		return 0
+
+	case obj.ARET:
+		if s != nil {
+			return 1
+		}
+		return 3
+
+	case obj.ACALL:
+		if x86.REGEXT != 0 /*TypeKind(100016)*/ && v.Type == obj.TYPE_REG && v.Reg <= x86.REGEXT && v.Reg > exregoffset {
+			return 2
+		}
+		if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
+			return 2
+		}
+		if v.Type == p.From.Type && v.Reg == p.From.Reg {
+			return 2
+		}
+
+		if s != nil {
+			if copysub(&p.To, v, s, 1) != 0 {
+				return 1
+			}
+			return 0
+		}
+
+		if copyau(&p.To, v) {
+			return 4
+		}
+		return 3
+
+	case obj.ATEXT:
+		if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
+			return 3
+		}
+		return 0
+	}
+
+	if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+		return 0
+	}
+
+	if (p.Info.Reguse|p.Info.Regset)&RtoB(int(v.Reg)) != 0 {
+		return 2
+	}
+
+	if p.Info.Flags&gc.LeftAddr != 0 {
+		if copyas(&p.From, v) {
+			return 2
+		}
+	}
+
+	if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
+		if copyas(&p.To, v) {
+			return 2
+		}
+	}
+
+	if p.Info.Flags&gc.RightWrite != 0 {
+		if copyas(&p.To, v) {
+			if s != nil {
+				return copysub(&p.From, v, s, 1)
+			}
+			if copyau(&p.From, v) {
+				return 4
+			}
+			return 3
+		}
+	}
+
+	if p.Info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
+		if s != nil {
+			if copysub(&p.From, v, s, 1) != 0 {
+				return 1
+			}
+			return copysub(&p.To, v, s, 1)
+		}
+
+		if copyau(&p.From, v) {
+			return 1
+		}
+		if copyau(&p.To, v) {
+			return 1
+		}
+	}
+
+	return 0
+}
+
+/*
+ * direct reference,
+ * could be set/use depending on
+ * semantics
+ */
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+	if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_R15B {
+		gc.Fatal("use of byte register")
+	}
+	if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_R15B {
+		gc.Fatal("use of byte register")
+	}
+
+	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+		return false
+	}
+	if regtyp(v) {
+		return true
+	}
+	if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+		if v.Offset == a.Offset {
+			return true
+		}
+	}
+	return false
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+		return false
+	}
+	if regtyp(v) {
+		return true
+	}
+	if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+		if v.Offset == a.Offset {
+			return true
+		}
+	}
+	return false
+}
+
+/*
+ * either direct or indirect
+ */
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+	if copyas(a, v) {
+		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+			fmt.Printf("\tcopyau: copyas returned 1\n")
+		}
+		return true
+	}
+
+	if regtyp(v) {
+		if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
+			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+				fmt.Printf("\tcopyau: found indir use - return 1\n")
+			}
+			return true
+		}
+
+		if a.Index == v.Reg {
+			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+				fmt.Printf("\tcopyau: found index use - return 1\n")
+			}
+			return true
+		}
+	}
+
+	return false
+}
+
+/*
+ * substitute s for v in a
+ * return failure to substitute
+ */
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+	if copyas(a, v) {
+		reg := int(int(s.Reg))
+		if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 {
+			if f != 0 {
+				a.Reg = int16(reg)
+			}
+		}
+
+		return 0
+	}
+
+	if regtyp(v) {
+		reg := int(int(v.Reg))
+		if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
+			if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
+				return 1 /* can't use BP-base with index */
+			}
+			if f != 0 {
+				a.Reg = s.Reg
+			}
+		}
+
+		//			return 0;
+		if int(a.Index) == reg {
+			if f != 0 {
+				a.Index = s.Reg
+			}
+			return 0
+		}
+
+		return 0
+	}
+
+	return 0
+}
+
+func conprop(r0 *gc.Flow) {
+	var p *obj.Prog
+	var t int
+
+	p0 := (*obj.Prog)(r0.Prog)
+	v0 := (*obj.Addr)(&p0.To)
+	r := (*gc.Flow)(r0)
+
+loop:
+	r = gc.Uniqs(r)
+	if r == nil || r == r0 {
+		return
+	}
+	if gc.Uniqp(r) == nil {
+		return
+	}
+
+	p = r.Prog
+	t = copyu(p, v0, nil)
+	switch t {
+	case 0, // miss
+		1: // use
+		goto loop
+
+	case 2, // rar
+		4: // use and set
+		break
+
+	case 3: // set
+		if p.As == p0.As {
+			if p.From.Type == p0.From.Type {
+				if p.From.Reg == p0.From.Reg {
+					if p.From.Node == p0.From.Node {
+						if p.From.Offset == p0.From.Offset {
+							if p.From.Scale == p0.From.Scale {
+								if p.From.Type == obj.TYPE_FCONST && p.From.Val.(float64) == p0.From.Val.(float64) {
+									if p.From.Index == p0.From.Index {
+										excise(r)
+										goto loop
+									}
+								}
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+	return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096
+}
+
+func stackaddr(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP
+}
diff --git a/src/cmd/compile/internal/amd64/prog.go b/src/cmd/compile/internal/amd64/prog.go
new file mode 100644
index 0000000..00918c8
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/prog.go
@@ -0,0 +1,308 @@
+// Copyright 2013 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+)
+
+const (
+	LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
+	RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [x86.ALAST]obj.ProgInfo{
+	obj.ATYPE:     {gc.Pseudo | gc.Skip, 0, 0, 0},
+	obj.ATEXT:     {gc.Pseudo, 0, 0, 0},
+	obj.AFUNCDATA: {gc.Pseudo, 0, 0, 0},
+	obj.APCDATA:   {gc.Pseudo, 0, 0, 0},
+	obj.AUNDEF:    {gc.Break, 0, 0, 0},
+	obj.AUSEFIELD: {gc.OK, 0, 0, 0},
+	obj.ACHECKNIL: {gc.LeftRead, 0, 0, 0},
+	obj.AVARDEF:   {gc.Pseudo | gc.RightWrite, 0, 0, 0},
+	obj.AVARKILL:  {gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+	// NOP is an internal no-op that also stands
+	// for USED and SET annotations, not the Intel opcode.
+	obj.ANOP:       {gc.LeftRead | gc.RightWrite, 0, 0, 0},
+	x86.AADCL:      {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.AADCQ:      {gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.AADCW:      {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.AADDB:      {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AADDL:      {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AADDW:      {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AADDQ:      {gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AADDSD:     {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.AADDSS:     {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.AANDB:      {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AANDL:      {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AANDQ:      {gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AANDW:      {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	obj.ACALL:      {gc.RightAddr | gc.Call | gc.KillCarry, 0, 0, 0},
+	x86.ACDQ:       {gc.OK, AX, AX | DX, 0},
+	x86.ACQO:       {gc.OK, AX, AX | DX, 0},
+	x86.ACWD:       {gc.OK, AX, AX | DX, 0},
+	x86.ACLD:       {gc.OK, 0, 0, 0},
+	x86.ASTD:       {gc.OK, 0, 0, 0},
+	x86.ACMPB:      {gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACMPL:      {gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACMPQ:      {gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACMPW:      {gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACOMISD:    {gc.SizeD | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACOMISS:    {gc.SizeF | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACVTSD2SL:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSD2SQ:  {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSD2SS:  {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSL2SD:  {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSL2SS:  {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSQ2SD:  {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSQ2SS:  {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSS2SD:  {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSS2SL:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSS2SQ:  {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTTSD2SL: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTTSD2SQ: {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTTSS2SL: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTTSS2SQ: {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ADECB:      {gc.SizeB | RightRdwr, 0, 0, 0},
+	x86.ADECL:      {gc.SizeL | RightRdwr, 0, 0, 0},
+	x86.ADECQ:      {gc.SizeQ | RightRdwr, 0, 0, 0},
+	x86.ADECW:      {gc.SizeW | RightRdwr, 0, 0, 0},
+	x86.ADIVB:      {gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+	x86.ADIVL:      {gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+	x86.ADIVQ:      {gc.SizeQ | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+	x86.ADIVW:      {gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+	x86.ADIVSD:     {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.ADIVSS:     {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.AIDIVB:     {gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+	x86.AIDIVL:     {gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+	x86.AIDIVQ:     {gc.SizeQ | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+	x86.AIDIVW:     {gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+	x86.AIMULB:     {gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+	x86.AIMULL:     {gc.SizeL | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+	x86.AIMULQ:     {gc.SizeQ | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+	x86.AIMULW:     {gc.SizeW | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+	x86.AINCB:      {gc.SizeB | RightRdwr, 0, 0, 0},
+	x86.AINCL:      {gc.SizeL | RightRdwr, 0, 0, 0},
+	x86.AINCQ:      {gc.SizeQ | RightRdwr, 0, 0, 0},
+	x86.AINCW:      {gc.SizeW | RightRdwr, 0, 0, 0},
+	x86.AJCC:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJCS:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJEQ:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJGE:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJGT:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJHI:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJLE:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJLS:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJLT:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJMI:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJNE:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJOC:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJOS:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJPC:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJPL:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJPS:       {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	obj.AJMP:       {gc.Jump | gc.Break | gc.KillCarry, 0, 0, 0},
+	x86.ALEAL:      {gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+	x86.ALEAQ:      {gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+	x86.AMOVBLSX:   {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVBLZX:   {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVBQSX:   {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVBQZX:   {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVBWSX:   {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVBWZX:   {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVLQSX:   {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVLQZX:   {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVWLSX:   {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVWLZX:   {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVWQSX:   {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVWQZX:   {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVQL:     {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVB:      {gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMOVL:      {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMOVQ:      {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMOVW:      {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMOVSB:     {gc.OK, DI | SI, DI | SI, 0},
+	x86.AMOVSL:     {gc.OK, DI | SI, DI | SI, 0},
+	x86.AMOVSQ:     {gc.OK, DI | SI, DI | SI, 0},
+	x86.AMOVSW:     {gc.OK, DI | SI, DI | SI, 0},
+	obj.ADUFFCOPY:  {gc.OK, DI | SI, DI | SI | CX, 0},
+	x86.AMOVSD:     {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMOVSS:     {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+	// We use MOVAPD as a faster synonym for MOVSD.
+	x86.AMOVAPD:   {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMULB:     {gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+	x86.AMULL:     {gc.SizeL | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+	x86.AMULQ:     {gc.SizeQ | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+	x86.AMULW:     {gc.SizeW | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+	x86.AMULSD:    {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.AMULSS:    {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.ANEGB:     {gc.SizeB | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ANEGL:     {gc.SizeL | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ANEGQ:     {gc.SizeQ | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ANEGW:     {gc.SizeW | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ANOTB:     {gc.SizeB | RightRdwr, 0, 0, 0},
+	x86.ANOTL:     {gc.SizeL | RightRdwr, 0, 0, 0},
+	x86.ANOTQ:     {gc.SizeQ | RightRdwr, 0, 0, 0},
+	x86.ANOTW:     {gc.SizeW | RightRdwr, 0, 0, 0},
+	x86.AORB:      {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AORL:      {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AORQ:      {gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AORW:      {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.APOPQ:     {gc.SizeQ | gc.RightWrite, 0, 0, 0},
+	x86.APUSHQ:    {gc.SizeQ | gc.LeftRead, 0, 0, 0},
+	x86.ARCLB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCLL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCLQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCLW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCRB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCRL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCRQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCRW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.AREP:      {gc.OK, CX, CX, 0},
+	x86.AREPN:     {gc.OK, CX, CX, 0},
+	obj.ARET:      {gc.Break | gc.KillCarry, 0, 0, 0},
+	x86.AROLB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.AROLL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.AROLQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.AROLW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ARORB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ARORL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ARORQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ARORW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASALB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASALL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASALQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASALW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASARB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASARL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASARQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASARW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASBBB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ASBBL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ASBBQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ASBBW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ASETCC:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETCS:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETEQ:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETGE:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETGT:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETHI:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETLE:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETLS:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETLT:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETMI:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETNE:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETOC:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETOS:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETPC:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETPL:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASETPS:    {gc.SizeB | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+	x86.ASHLB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHLL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHLQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHLW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHRB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHRL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHRQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHRW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASQRTSD:   {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.ASTOSB:    {gc.OK, AX | DI, DI, 0},
+	x86.ASTOSL:    {gc.OK, AX | DI, DI, 0},
+	x86.ASTOSQ:    {gc.OK, AX | DI, DI, 0},
+	x86.ASTOSW:    {gc.OK, AX | DI, DI, 0},
+	obj.ADUFFZERO: {gc.OK, AX | DI, DI, 0},
+	x86.ASUBB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ASUBL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ASUBQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ASUBW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ASUBSD:    {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.ASUBSS:    {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.ATESTB:    {gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ATESTL:    {gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ATESTQ:    {gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ATESTW:    {gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.AUCOMISD:  {gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	x86.AUCOMISS:  {gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	x86.AXCHGB:    {gc.SizeB | LeftRdwr | RightRdwr, 0, 0, 0},
+	x86.AXCHGL:    {gc.SizeL | LeftRdwr | RightRdwr, 0, 0, 0},
+	x86.AXCHGQ:    {gc.SizeQ | LeftRdwr | RightRdwr, 0, 0, 0},
+	x86.AXCHGW:    {gc.SizeW | LeftRdwr | RightRdwr, 0, 0, 0},
+	x86.AXORB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AXORL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AXORQ:     {gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AXORW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+}
+
+func progflags(p *obj.Prog) uint32 {
+	flags := progtable[p.As].Flags
+	if flags&gc.ImulAXDX != 0 && p.To.Type != obj.TYPE_NONE {
+		flags |= RightRdwr
+	}
+	return flags
+}
+
+func progcarryflags(p *obj.Prog) uint32 {
+	return progtable[p.As].Flags
+}
+
+func proginfo(p *obj.Prog) {
+	info := &p.Info
+	*info = progtable[p.As]
+	if info.Flags == 0 {
+		gc.Fatal("unknown instruction %v", p)
+	}
+
+	if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST {
+		info.Reguse |= CX
+	}
+
+	if info.Flags&gc.ImulAXDX != 0 {
+		if p.To.Type == obj.TYPE_NONE {
+			info.Reguse |= AX
+			info.Regset |= AX | DX
+		} else {
+			info.Flags |= RightRdwr
+		}
+	}
+
+	// Addressing makes some registers used.
+	if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE {
+		info.Regindex |= RtoB(int(p.From.Reg))
+	}
+	if p.From.Index != x86.REG_NONE {
+		info.Regindex |= RtoB(int(p.From.Index))
+	}
+	if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE {
+		info.Regindex |= RtoB(int(p.To.Reg))
+	}
+	if p.To.Index != x86.REG_NONE {
+		info.Regindex |= RtoB(int(p.To.Index))
+	}
+	if gc.Ctxt.Flag_dynlink {
+		// When -dynlink is passed, many operations on external names (and
+		// also calling duffzero/duffcopy) use R15 as a scratch register.
+		if p.As == x86.ALEAQ || info.Flags == gc.Pseudo || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
+			return
+		}
+		if p.As == obj.ADUFFZERO || p.As == obj.ADUFFCOPY || (p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local) || (p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local) {
+			info.Reguse |= R15
+			info.Regset |= R15
+			return
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/amd64/reg.go b/src/cmd/compile/internal/amd64/reg.go
new file mode 100644
index 0000000..7d4f406
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/reg.go
@@ -0,0 +1,154 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package amd64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+)
+
+const (
+	NREGVAR = 32
+)
+
+var reg [x86.MAXREG]uint8
+
+var regname = []string{
+	".AX",
+	".CX",
+	".DX",
+	".BX",
+	".SP",
+	".BP",
+	".SI",
+	".DI",
+	".R8",
+	".R9",
+	".R10",
+	".R11",
+	".R12",
+	".R13",
+	".R14",
+	".R15",
+	".X0",
+	".X1",
+	".X2",
+	".X3",
+	".X4",
+	".X5",
+	".X6",
+	".X7",
+	".X8",
+	".X9",
+	".X10",
+	".X11",
+	".X12",
+	".X13",
+	".X14",
+	".X15",
+}
+
+func regnames(n *int) []string {
+	*n = NREGVAR
+	return regname
+}
+
+func excludedregs() uint64 {
+	return RtoB(x86.REG_SP)
+}
+
+func doregbits(r int) uint64 {
+	b := uint64(0)
+	if r >= x86.REG_AX && r <= x86.REG_R15 {
+		b |= RtoB(r)
+	} else if r >= x86.REG_AL && r <= x86.REG_R15B {
+		b |= RtoB(r - x86.REG_AL + x86.REG_AX)
+	} else if r >= x86.REG_AH && r <= x86.REG_BH {
+		b |= RtoB(r - x86.REG_AH + x86.REG_AX)
+	} else if r >= x86.REG_X0 && r <= x86.REG_X0+15 {
+		b |= FtoB(r)
+	}
+	return b
+}
+
+// For ProgInfo.
+const (
+	AX  = 1 << (x86.REG_AX - x86.REG_AX)
+	BX  = 1 << (x86.REG_BX - x86.REG_AX)
+	CX  = 1 << (x86.REG_CX - x86.REG_AX)
+	DX  = 1 << (x86.REG_DX - x86.REG_AX)
+	DI  = 1 << (x86.REG_DI - x86.REG_AX)
+	SI  = 1 << (x86.REG_SI - x86.REG_AX)
+	R15 = 1 << (x86.REG_R15 - x86.REG_AX)
+)
+
+func RtoB(r int) uint64 {
+	if r < x86.REG_AX || r > x86.REG_R15 {
+		return 0
+	}
+	return 1 << uint(r-x86.REG_AX)
+}
+
+func BtoR(b uint64) int {
+	b &= 0xffff
+	if gc.Nacl {
+		b &^= (1<<(x86.REG_BP-x86.REG_AX) | 1<<(x86.REG_R15-x86.REG_AX))
+	} else if obj.Framepointer_enabled != 0 {
+		// BP is part of the calling convention if framepointer_enabled.
+		b &^= (1 << (x86.REG_BP - x86.REG_AX))
+	}
+	if b == 0 {
+		return 0
+	}
+	return gc.Bitno(b) + x86.REG_AX
+}
+
+/*
+ *	bit	reg
+ *	16	X0
+ *	...
+ *	31	X15
+ */
+func FtoB(f int) uint64 {
+	if f < x86.REG_X0 || f > x86.REG_X15 {
+		return 0
+	}
+	return 1 << uint(f-x86.REG_X0+16)
+}
+
+func BtoF(b uint64) int {
+	b &= 0xFFFF0000
+	if b == 0 {
+		return 0
+	}
+	return gc.Bitno(b) - 16 + x86.REG_X0
+}
diff --git a/src/cmd/compile/internal/arm/cgen.go b/src/cmd/compile/internal/arm/cgen.go
new file mode 100644
index 0000000..8ea6c5f
--- /dev/null
+++ b/src/cmd/compile/internal/arm/cgen.go
@@ -0,0 +1,229 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm"
+)
+
+/*
+ * generate array index into res.
+ * n might be any size; res is 32-bit.
+ * returns Prog* to patch to panic call.
+ */
+func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
+	if !gc.Is64(n.Type) {
+		gc.Cgen(n, res)
+		return nil
+	}
+
+	var tmp gc.Node
+	gc.Tempname(&tmp, gc.Types[gc.TINT64])
+	gc.Cgen(n, &tmp)
+	var lo gc.Node
+	var hi gc.Node
+	split64(&tmp, &lo, &hi)
+	gmove(&lo, res)
+	if bounded {
+		splitclean()
+		return nil
+	}
+
+	var n1 gc.Node
+	gc.Regalloc(&n1, gc.Types[gc.TINT32], nil)
+	var n2 gc.Node
+	gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
+	var zero gc.Node
+	gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
+	gmove(&hi, &n1)
+	gmove(&zero, &n2)
+	gins(arm.ACMP, &n1, &n2)
+	gc.Regfree(&n2)
+	gc.Regfree(&n1)
+	splitclean()
+	return gc.Gbranch(arm.ABNE, nil, -1)
+}
+
+func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
+	gc.Tempname(res, n.Type)
+	return cgenindex(n, res, bounded)
+}
+
+func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
+	// determine alignment.
+	// want to avoid unaligned access, so have to use
+	// smaller operations for less aligned types.
+	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
+	align := int(n.Type.Align)
+
+	var op int
+	switch align {
+	default:
+		gc.Fatal("sgen: invalid alignment %d for %v", align, n.Type)
+
+	case 1:
+		op = arm.AMOVB
+
+	case 2:
+		op = arm.AMOVH
+
+	case 4:
+		op = arm.AMOVW
+	}
+
+	if w%int64(align) != 0 {
+		gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
+	}
+	c := int32(w / int64(align))
+
+	if osrc%int64(align) != 0 || odst%int64(align) != 0 {
+		gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
+	}
+
+	// if we are copying forward on the stack and
+	// the src and dst overlap, then reverse direction
+	dir := align
+	if osrc < odst && int64(odst) < int64(osrc)+w {
+		dir = -dir
+	}
+
+	if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
+		var r0 gc.Node
+		r0.Op = gc.OREGISTER
+		r0.Reg = arm.REG_R0
+		var r1 gc.Node
+		r1.Op = gc.OREGISTER
+		r1.Reg = arm.REG_R0 + 1
+		var r2 gc.Node
+		r2.Op = gc.OREGISTER
+		r2.Reg = arm.REG_R0 + 2
+
+		var src gc.Node
+		gc.Regalloc(&src, gc.Types[gc.Tptr], &r1)
+		var dst gc.Node
+		gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2)
+		if n.Ullman >= res.Ullman {
+			// eval n first
+			gc.Agen(n, &src)
+
+			if res.Op == gc.ONAME {
+				gc.Gvardef(res)
+			}
+			gc.Agen(res, &dst)
+		} else {
+			// eval res first
+			if res.Op == gc.ONAME {
+				gc.Gvardef(res)
+			}
+			gc.Agen(res, &dst)
+			gc.Agen(n, &src)
+		}
+
+		var tmp gc.Node
+		gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0)
+		f := gc.Sysfunc("duffcopy")
+		p := gins(obj.ADUFFCOPY, nil, f)
+		gc.Afunclit(&p.To, f)
+
+		// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
+		p.To.Offset = 8 * (128 - int64(c))
+
+		gc.Regfree(&tmp)
+		gc.Regfree(&src)
+		gc.Regfree(&dst)
+		return
+	}
+
+	var dst gc.Node
+	var src gc.Node
+	if n.Ullman >= res.Ullman {
+		gc.Agenr(n, &dst, res) // temporarily use dst
+		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
+		gins(arm.AMOVW, &dst, &src)
+		if res.Op == gc.ONAME {
+			gc.Gvardef(res)
+		}
+		gc.Agen(res, &dst)
+	} else {
+		if res.Op == gc.ONAME {
+			gc.Gvardef(res)
+		}
+		gc.Agenr(res, &dst, res)
+		gc.Agenr(n, &src, nil)
+	}
+
+	var tmp gc.Node
+	gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil)
+
+	// set up end marker
+	var nend gc.Node
+
+	if c >= 4 {
+		gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil)
+
+		p := gins(arm.AMOVW, &src, &nend)
+		p.From.Type = obj.TYPE_ADDR
+		if dir < 0 {
+			p.From.Offset = int64(dir)
+		} else {
+			p.From.Offset = w
+		}
+	}
+
+	// move src and dest to the end of block if necessary
+	if dir < 0 {
+		p := gins(arm.AMOVW, &src, &src)
+		p.From.Type = obj.TYPE_ADDR
+		p.From.Offset = w + int64(dir)
+
+		p = gins(arm.AMOVW, &dst, &dst)
+		p.From.Type = obj.TYPE_ADDR
+		p.From.Offset = w + int64(dir)
+	}
+
+	// move
+	if c >= 4 {
+		p := gins(op, &src, &tmp)
+		p.From.Type = obj.TYPE_MEM
+		p.From.Offset = int64(dir)
+		p.Scond |= arm.C_PBIT
+		ploop := p
+
+		p = gins(op, &tmp, &dst)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Offset = int64(dir)
+		p.Scond |= arm.C_PBIT
+
+		p = gins(arm.ACMP, &src, nil)
+		raddr(&nend, p)
+
+		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
+		gc.Regfree(&nend)
+	} else {
+		var p *obj.Prog
+		for {
+			tmp14 := c
+			c--
+			if tmp14 <= 0 {
+				break
+			}
+			p = gins(op, &src, &tmp)
+			p.From.Type = obj.TYPE_MEM
+			p.From.Offset = int64(dir)
+			p.Scond |= arm.C_PBIT
+
+			p = gins(op, &tmp, &dst)
+			p.To.Type = obj.TYPE_MEM
+			p.To.Offset = int64(dir)
+			p.Scond |= arm.C_PBIT
+		}
+	}
+
+	gc.Regfree(&dst)
+	gc.Regfree(&src)
+	gc.Regfree(&tmp)
+}
diff --git a/src/cmd/compile/internal/arm/cgen64.go b/src/cmd/compile/internal/arm/cgen64.go
new file mode 100644
index 0000000..6c88b76
--- /dev/null
+++ b/src/cmd/compile/internal/arm/cgen64.go
@@ -0,0 +1,859 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm"
+)
+
+/*
+ * attempt to generate 64-bit
+ *	res = n
+ * return 1 on success, 0 if op not handled.
+ */
+func cgen64(n *gc.Node, res *gc.Node) {
+	if res.Op != gc.OINDREG && res.Op != gc.ONAME {
+		gc.Dump("n", n)
+		gc.Dump("res", res)
+		gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
+	}
+
+	l := n.Left
+	var t1 gc.Node
+	if !l.Addable {
+		gc.Tempname(&t1, l.Type)
+		gc.Cgen(l, &t1)
+		l = &t1
+	}
+
+	var hi1 gc.Node
+	var lo1 gc.Node
+	split64(l, &lo1, &hi1)
+	switch n.Op {
+	default:
+		gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
+
+	case gc.OMINUS:
+		var lo2 gc.Node
+		var hi2 gc.Node
+		split64(res, &lo2, &hi2)
+
+		gc.Regalloc(&t1, lo1.Type, nil)
+		var al gc.Node
+		gc.Regalloc(&al, lo1.Type, nil)
+		var ah gc.Node
+		gc.Regalloc(&ah, hi1.Type, nil)
+
+		gins(arm.AMOVW, &lo1, &al)
+		gins(arm.AMOVW, &hi1, &ah)
+
+		gmove(ncon(0), &t1)
+		p1 := gins(arm.ASUB, &al, &t1)
+		p1.Scond |= arm.C_SBIT
+		gins(arm.AMOVW, &t1, &lo2)
+
+		gmove(ncon(0), &t1)
+		gins(arm.ASBC, &ah, &t1)
+		gins(arm.AMOVW, &t1, &hi2)
+
+		gc.Regfree(&t1)
+		gc.Regfree(&al)
+		gc.Regfree(&ah)
+		splitclean()
+		splitclean()
+		return
+
+	case gc.OCOM:
+		gc.Regalloc(&t1, lo1.Type, nil)
+		gmove(ncon(^uint32(0)), &t1)
+
+		var lo2 gc.Node
+		var hi2 gc.Node
+		split64(res, &lo2, &hi2)
+		var n1 gc.Node
+		gc.Regalloc(&n1, lo1.Type, nil)
+
+		gins(arm.AMOVW, &lo1, &n1)
+		gins(arm.AEOR, &t1, &n1)
+		gins(arm.AMOVW, &n1, &lo2)
+
+		gins(arm.AMOVW, &hi1, &n1)
+		gins(arm.AEOR, &t1, &n1)
+		gins(arm.AMOVW, &n1, &hi2)
+
+		gc.Regfree(&t1)
+		gc.Regfree(&n1)
+		splitclean()
+		splitclean()
+		return
+
+		// binary operators.
+	// common setup below.
+	case gc.OADD,
+		gc.OSUB,
+		gc.OMUL,
+		gc.OLSH,
+		gc.ORSH,
+		gc.OAND,
+		gc.OOR,
+		gc.OXOR,
+		gc.OLROT:
+		break
+	}
+
+	// setup for binary operators
+	r := n.Right
+
+	if r != nil && !r.Addable {
+		var t2 gc.Node
+		gc.Tempname(&t2, r.Type)
+		gc.Cgen(r, &t2)
+		r = &t2
+	}
+
+	var hi2 gc.Node
+	var lo2 gc.Node
+	if gc.Is64(r.Type) {
+		split64(r, &lo2, &hi2)
+	}
+
+	var al gc.Node
+	gc.Regalloc(&al, lo1.Type, nil)
+	var ah gc.Node
+	gc.Regalloc(&ah, hi1.Type, nil)
+
+	// Do op.  Leave result in ah:al.
+	switch n.Op {
+	default:
+		gc.Fatal("cgen64: not implemented: %v\n", n)
+
+		// TODO: Constants
+	case gc.OADD:
+		var bl gc.Node
+		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
+
+		var bh gc.Node
+		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
+		gins(arm.AMOVW, &hi1, &ah)
+		gins(arm.AMOVW, &lo1, &al)
+		gins(arm.AMOVW, &hi2, &bh)
+		gins(arm.AMOVW, &lo2, &bl)
+		p1 := gins(arm.AADD, &bl, &al)
+		p1.Scond |= arm.C_SBIT
+		gins(arm.AADC, &bh, &ah)
+		gc.Regfree(&bl)
+		gc.Regfree(&bh)
+
+		// TODO: Constants.
+	case gc.OSUB:
+		var bl gc.Node
+		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
+
+		var bh gc.Node
+		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
+		gins(arm.AMOVW, &lo1, &al)
+		gins(arm.AMOVW, &hi1, &ah)
+		gins(arm.AMOVW, &lo2, &bl)
+		gins(arm.AMOVW, &hi2, &bh)
+		p1 := gins(arm.ASUB, &bl, &al)
+		p1.Scond |= arm.C_SBIT
+		gins(arm.ASBC, &bh, &ah)
+		gc.Regfree(&bl)
+		gc.Regfree(&bh)
+
+		// TODO(kaib): this can be done with 4 regs and does not need 6
+	case gc.OMUL:
+		var bl gc.Node
+		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
+
+		var bh gc.Node
+		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
+		var cl gc.Node
+		gc.Regalloc(&cl, gc.Types[gc.TPTR32], nil)
+		var ch gc.Node
+		gc.Regalloc(&ch, gc.Types[gc.TPTR32], nil)
+
+		// load args into bh:bl and bh:bl.
+		gins(arm.AMOVW, &hi1, &bh)
+
+		gins(arm.AMOVW, &lo1, &bl)
+		gins(arm.AMOVW, &hi2, &ch)
+		gins(arm.AMOVW, &lo2, &cl)
+
+		// bl * cl -> ah al
+		p1 := gins(arm.AMULLU, nil, nil)
+
+		p1.From.Type = obj.TYPE_REG
+		p1.From.Reg = bl.Reg
+		p1.Reg = cl.Reg
+		p1.To.Type = obj.TYPE_REGREG
+		p1.To.Reg = ah.Reg
+		p1.To.Offset = int64(al.Reg)
+
+		//print("%P\n", p1);
+
+		// bl * ch + ah -> ah
+		p1 = gins(arm.AMULA, nil, nil)
+
+		p1.From.Type = obj.TYPE_REG
+		p1.From.Reg = bl.Reg
+		p1.Reg = ch.Reg
+		p1.To.Type = obj.TYPE_REGREG2
+		p1.To.Reg = ah.Reg
+		p1.To.Offset = int64(ah.Reg)
+
+		//print("%P\n", p1);
+
+		// bh * cl + ah -> ah
+		p1 = gins(arm.AMULA, nil, nil)
+
+		p1.From.Type = obj.TYPE_REG
+		p1.From.Reg = bh.Reg
+		p1.Reg = cl.Reg
+		p1.To.Type = obj.TYPE_REGREG2
+		p1.To.Reg = ah.Reg
+		p1.To.Offset = int64(ah.Reg)
+
+		//print("%P\n", p1);
+
+		gc.Regfree(&bh)
+
+		gc.Regfree(&bl)
+		gc.Regfree(&ch)
+		gc.Regfree(&cl)
+
+		// We only rotate by a constant c in [0,64).
+	// if c >= 32:
+	//	lo, hi = hi, lo
+	//	c -= 32
+	// if c == 0:
+	//	no-op
+	// else:
+	//	t = hi
+	//	shld hi:lo, c
+	//	shld lo:t, c
+	case gc.OLROT:
+		v := uint64(r.Int())
+
+		var bl gc.Node
+		gc.Regalloc(&bl, lo1.Type, nil)
+		var bh gc.Node
+		gc.Regalloc(&bh, hi1.Type, nil)
+		if v >= 32 {
+			// reverse during load to do the first 32 bits of rotate
+			v -= 32
+
+			gins(arm.AMOVW, &hi1, &bl)
+			gins(arm.AMOVW, &lo1, &bh)
+		} else {
+			gins(arm.AMOVW, &hi1, &bh)
+			gins(arm.AMOVW, &lo1, &bl)
+		}
+
+		if v == 0 {
+			gins(arm.AMOVW, &bh, &ah)
+			gins(arm.AMOVW, &bl, &al)
+		} else {
+			// rotate by 1 <= v <= 31
+			//	MOVW	bl<<v, al
+			//	MOVW	bh<<v, ah
+			//	OR		bl>>(32-v), ah
+			//	OR		bh>>(32-v), al
+			gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
+
+			gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
+			gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
+			gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al)
+		}
+
+		gc.Regfree(&bl)
+		gc.Regfree(&bh)
+
+	case gc.OLSH:
+		var bl gc.Node
+		gc.Regalloc(&bl, lo1.Type, nil)
+		var bh gc.Node
+		gc.Regalloc(&bh, hi1.Type, nil)
+		gins(arm.AMOVW, &hi1, &bh)
+		gins(arm.AMOVW, &lo1, &bl)
+
+		var p6 *obj.Prog
+		var s gc.Node
+		var n1 gc.Node
+		var creg gc.Node
+		var p1 *obj.Prog
+		var p2 *obj.Prog
+		var p3 *obj.Prog
+		var p4 *obj.Prog
+		var p5 *obj.Prog
+		if r.Op == gc.OLITERAL {
+			v := uint64(r.Int())
+			if v >= 64 {
+				// TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
+				// here and below (verify it optimizes to EOR)
+				gins(arm.AEOR, &al, &al)
+
+				gins(arm.AEOR, &ah, &ah)
+			} else if v > 32 {
+				gins(arm.AEOR, &al, &al)
+
+				//	MOVW	bl<<(v-32), ah
+				gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v-32), &ah)
+			} else if v == 32 {
+				gins(arm.AEOR, &al, &al)
+				gins(arm.AMOVW, &bl, &ah)
+			} else if v > 0 {
+				//	MOVW	bl<<v, al
+				gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
+
+				//	MOVW	bh<<v, ah
+				gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
+
+				//	OR		bl>>(32-v), ah
+				gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
+			} else {
+				gins(arm.AMOVW, &bl, &al)
+				gins(arm.AMOVW, &bh, &ah)
+			}
+
+			goto olsh_break
+		}
+
+		gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
+		gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
+		if gc.Is64(r.Type) {
+			// shift is >= 1<<32
+			var cl gc.Node
+			var ch gc.Node
+			split64(r, &cl, &ch)
+
+			gmove(&ch, &s)
+			gins(arm.ATST, &s, nil)
+			p6 = gc.Gbranch(arm.ABNE, nil, 0)
+			gmove(&cl, &s)
+			splitclean()
+		} else {
+			gmove(r, &s)
+			p6 = nil
+		}
+
+		gins(arm.ATST, &s, nil)
+
+		// shift == 0
+		p1 = gins(arm.AMOVW, &bl, &al)
+
+		p1.Scond = arm.C_SCOND_EQ
+		p1 = gins(arm.AMOVW, &bh, &ah)
+		p1.Scond = arm.C_SCOND_EQ
+		p2 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+		// shift is < 32
+		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
+
+		gmove(&n1, &creg)
+		gins(arm.ACMP, &s, &creg)
+
+		//	MOVW.LO		bl<<s, al
+		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	MOVW.LO		bh<<s, ah
+		p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	SUB.LO		s, creg
+		p1 = gins(arm.ASUB, &s, &creg)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	OR.LO		bl>>creg, ah
+		p1 = gregshift(arm.AORR, &bl, arm.SHIFT_LR, &creg, &ah)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	BLO	end
+		p3 = gc.Gbranch(arm.ABLO, nil, 0)
+
+		// shift == 32
+		p1 = gins(arm.AEOR, &al, &al)
+
+		p1.Scond = arm.C_SCOND_EQ
+		p1 = gins(arm.AMOVW, &bl, &ah)
+		p1.Scond = arm.C_SCOND_EQ
+		p4 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+		// shift is < 64
+		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
+
+		gmove(&n1, &creg)
+		gins(arm.ACMP, &s, &creg)
+
+		//	EOR.LO	al, al
+		p1 = gins(arm.AEOR, &al, &al)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	MOVW.LO		creg>>1, creg
+		p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	SUB.LO		creg, s
+		p1 = gins(arm.ASUB, &creg, &s)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	MOVW	bl<<s, ah
+		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &ah)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		p5 = gc.Gbranch(arm.ABLO, nil, 0)
+
+		// shift >= 64
+		if p6 != nil {
+			gc.Patch(p6, gc.Pc)
+		}
+		gins(arm.AEOR, &al, &al)
+		gins(arm.AEOR, &ah, &ah)
+
+		gc.Patch(p2, gc.Pc)
+		gc.Patch(p3, gc.Pc)
+		gc.Patch(p4, gc.Pc)
+		gc.Patch(p5, gc.Pc)
+		gc.Regfree(&s)
+		gc.Regfree(&creg)
+
+	olsh_break:
+		gc.Regfree(&bl)
+		gc.Regfree(&bh)
+
+	case gc.ORSH:
+		var bl gc.Node
+		gc.Regalloc(&bl, lo1.Type, nil)
+		var bh gc.Node
+		gc.Regalloc(&bh, hi1.Type, nil)
+		gins(arm.AMOVW, &hi1, &bh)
+		gins(arm.AMOVW, &lo1, &bl)
+
+		var p4 *obj.Prog
+		var p5 *obj.Prog
+		var n1 gc.Node
+		var p6 *obj.Prog
+		var s gc.Node
+		var p1 *obj.Prog
+		var p2 *obj.Prog
+		var creg gc.Node
+		var p3 *obj.Prog
+		if r.Op == gc.OLITERAL {
+			v := uint64(r.Int())
+			if v >= 64 {
+				if bh.Type.Etype == gc.TINT32 {
+					//	MOVW	bh->31, al
+					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
+
+					//	MOVW	bh->31, ah
+					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+				} else {
+					gins(arm.AEOR, &al, &al)
+					gins(arm.AEOR, &ah, &ah)
+				}
+			} else if v > 32 {
+				if bh.Type.Etype == gc.TINT32 {
+					//	MOVW	bh->(v-32), al
+					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v-32), &al)
+
+					//	MOVW	bh->31, ah
+					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+				} else {
+					//	MOVW	bh>>(v-32), al
+					gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v-32), &al)
+
+					gins(arm.AEOR, &ah, &ah)
+				}
+			} else if v == 32 {
+				gins(arm.AMOVW, &bh, &al)
+				if bh.Type.Etype == gc.TINT32 {
+					//	MOVW	bh->31, ah
+					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+				} else {
+					gins(arm.AEOR, &ah, &ah)
+				}
+			} else if v > 0 {
+				//	MOVW	bl>>v, al
+				gshift(arm.AMOVW, &bl, arm.SHIFT_LR, int32(v), &al)
+
+				//	OR		bh<<(32-v), al
+				gshift(arm.AORR, &bh, arm.SHIFT_LL, int32(32-v), &al)
+
+				if bh.Type.Etype == gc.TINT32 {
+					//	MOVW	bh->v, ah
+					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v), &ah)
+				} else {
+					//	MOVW	bh>>v, ah
+					gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v), &ah)
+				}
+			} else {
+				gins(arm.AMOVW, &bl, &al)
+				gins(arm.AMOVW, &bh, &ah)
+			}
+
+			goto orsh_break
+		}
+
+		gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
+		gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
+		if gc.Is64(r.Type) {
+			// shift is >= 1<<32
+			var ch gc.Node
+			var cl gc.Node
+			split64(r, &cl, &ch)
+
+			gmove(&ch, &s)
+			gins(arm.ATST, &s, nil)
+			var p1 *obj.Prog
+			if bh.Type.Etype == gc.TINT32 {
+				p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+			} else {
+				p1 = gins(arm.AEOR, &ah, &ah)
+			}
+			p1.Scond = arm.C_SCOND_NE
+			p6 = gc.Gbranch(arm.ABNE, nil, 0)
+			gmove(&cl, &s)
+			splitclean()
+		} else {
+			gmove(r, &s)
+			p6 = nil
+		}
+
+		gins(arm.ATST, &s, nil)
+
+		// shift == 0
+		p1 = gins(arm.AMOVW, &bl, &al)
+
+		p1.Scond = arm.C_SCOND_EQ
+		p1 = gins(arm.AMOVW, &bh, &ah)
+		p1.Scond = arm.C_SCOND_EQ
+		p2 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+		// check if shift is < 32
+		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
+
+		gmove(&n1, &creg)
+		gins(arm.ACMP, &s, &creg)
+
+		//	MOVW.LO		bl>>s, al
+		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	SUB.LO		s,creg
+		p1 = gins(arm.ASUB, &s, &creg)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	OR.LO		bh<<(32-s), al
+		p1 = gregshift(arm.AORR, &bh, arm.SHIFT_LL, &creg, &al)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		if bh.Type.Etype == gc.TINT32 {
+			//	MOVW	bh->s, ah
+			p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &ah)
+		} else {
+			//	MOVW	bh>>s, ah
+			p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &ah)
+		}
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	BLO	end
+		p3 = gc.Gbranch(arm.ABLO, nil, 0)
+
+		// shift == 32
+		p1 = gins(arm.AMOVW, &bh, &al)
+
+		p1.Scond = arm.C_SCOND_EQ
+		if bh.Type.Etype == gc.TINT32 {
+			gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+		} else {
+			gins(arm.AEOR, &ah, &ah)
+		}
+		p4 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+		// check if shift is < 64
+		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
+
+		gmove(&n1, &creg)
+		gins(arm.ACMP, &s, &creg)
+
+		//	MOVW.LO		creg>>1, creg
+		p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		//	SUB.LO		creg, s
+		p1 = gins(arm.ASUB, &creg, &s)
+
+		p1.Scond = arm.C_SCOND_LO
+
+		if bh.Type.Etype == gc.TINT32 {
+			//	MOVW	bh->(s-32), al
+			p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)
+
+			p1.Scond = arm.C_SCOND_LO
+		} else {
+			//	MOVW	bh>>(v-32), al
+			p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)
+
+			p1.Scond = arm.C_SCOND_LO
+		}
+
+		//	BLO	end
+		p5 = gc.Gbranch(arm.ABLO, nil, 0)
+
+		// s >= 64
+		if p6 != nil {
+			gc.Patch(p6, gc.Pc)
+		}
+		if bh.Type.Etype == gc.TINT32 {
+			//	MOVW	bh->31, al
+			gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
+		} else {
+			gins(arm.AEOR, &al, &al)
+		}
+
+		gc.Patch(p2, gc.Pc)
+		gc.Patch(p3, gc.Pc)
+		gc.Patch(p4, gc.Pc)
+		gc.Patch(p5, gc.Pc)
+		gc.Regfree(&s)
+		gc.Regfree(&creg)
+
+	orsh_break:
+		gc.Regfree(&bl)
+		gc.Regfree(&bh)
+
+		// TODO(kaib): literal optimizations
+	// make constant the right side (it usually is anyway).
+	//		if(lo1.op == OLITERAL) {
+	//			nswap(&lo1, &lo2);
+	//			nswap(&hi1, &hi2);
+	//		}
+	//		if(lo2.op == OLITERAL) {
+	//			// special cases for constants.
+	//			lv = mpgetfix(lo2.val.u.xval);
+	//			hv = mpgetfix(hi2.val.u.xval);
+	//			splitclean();	// right side
+	//			split64(res, &lo2, &hi2);
+	//			switch(n->op) {
+	//			case OXOR:
+	//				gmove(&lo1, &lo2);
+	//				gmove(&hi1, &hi2);
+	//				switch(lv) {
+	//				case 0:
+	//					break;
+	//				case 0xffffffffu:
+	//					gins(ANOTL, N, &lo2);
+	//					break;
+	//				default:
+	//					gins(AXORL, ncon(lv), &lo2);
+	//					break;
+	//				}
+	//				switch(hv) {
+	//				case 0:
+	//					break;
+	//				case 0xffffffffu:
+	//					gins(ANOTL, N, &hi2);
+	//					break;
+	//				default:
+	//					gins(AXORL, ncon(hv), &hi2);
+	//					break;
+	//				}
+	//				break;
+
+	//			case OAND:
+	//				switch(lv) {
+	//				case 0:
+	//					gins(AMOVL, ncon(0), &lo2);
+	//					break;
+	//				default:
+	//					gmove(&lo1, &lo2);
+	//					if(lv != 0xffffffffu)
+	//						gins(AANDL, ncon(lv), &lo2);
+	//					break;
+	//				}
+	//				switch(hv) {
+	//				case 0:
+	//					gins(AMOVL, ncon(0), &hi2);
+	//					break;
+	//				default:
+	//					gmove(&hi1, &hi2);
+	//					if(hv != 0xffffffffu)
+	//						gins(AANDL, ncon(hv), &hi2);
+	//					break;
+	//				}
+	//				break;
+
+	//			case OOR:
+	//				switch(lv) {
+	//				case 0:
+	//					gmove(&lo1, &lo2);
+	//					break;
+	//				case 0xffffffffu:
+	//					gins(AMOVL, ncon(0xffffffffu), &lo2);
+	//					break;
+	//				default:
+	//					gmove(&lo1, &lo2);
+	//					gins(AORL, ncon(lv), &lo2);
+	//					break;
+	//				}
+	//				switch(hv) {
+	//				case 0:
+	//					gmove(&hi1, &hi2);
+	//					break;
+	//				case 0xffffffffu:
+	//					gins(AMOVL, ncon(0xffffffffu), &hi2);
+	//					break;
+	//				default:
+	//					gmove(&hi1, &hi2);
+	//					gins(AORL, ncon(hv), &hi2);
+	//					break;
+	//				}
+	//				break;
+	//			}
+	//			splitclean();
+	//			splitclean();
+	//			goto out;
+	//		}
+	case gc.OXOR,
+		gc.OAND,
+		gc.OOR:
+		var n1 gc.Node
+		gc.Regalloc(&n1, lo1.Type, nil)
+
+		gins(arm.AMOVW, &lo1, &al)
+		gins(arm.AMOVW, &hi1, &ah)
+		gins(arm.AMOVW, &lo2, &n1)
+		gins(optoas(int(n.Op), lo1.Type), &n1, &al)
+		gins(arm.AMOVW, &hi2, &n1)
+		gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
+		gc.Regfree(&n1)
+	}
+
+	if gc.Is64(r.Type) {
+		splitclean()
+	}
+	splitclean()
+
+	split64(res, &lo1, &hi1)
+	gins(arm.AMOVW, &al, &lo1)
+	gins(arm.AMOVW, &ah, &hi1)
+	splitclean()
+
+	//out:
+	gc.Regfree(&al)
+
+	gc.Regfree(&ah)
+}
+
+/*
+ * generate comparison of nl, nr, both 64-bit.
+ * nl is memory; nr is constant or memory.
+ */
+func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
+	var lo1 gc.Node
+	var hi1 gc.Node
+	var lo2 gc.Node
+	var hi2 gc.Node
+	var r1 gc.Node
+	var r2 gc.Node
+
+	split64(nl, &lo1, &hi1)
+	split64(nr, &lo2, &hi2)
+
+	// compare most significant word;
+	// if they differ, we're done.
+	t := hi1.Type
+
+	gc.Regalloc(&r1, gc.Types[gc.TINT32], nil)
+	gc.Regalloc(&r2, gc.Types[gc.TINT32], nil)
+	gins(arm.AMOVW, &hi1, &r1)
+	gins(arm.AMOVW, &hi2, &r2)
+	gins(arm.ACMP, &r1, &r2)
+	gc.Regfree(&r1)
+	gc.Regfree(&r2)
+
+	var br *obj.Prog
+	switch op {
+	default:
+		gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), t)
+
+		// cmp hi
+	// bne L
+	// cmp lo
+	// beq to
+	// L:
+	case gc.OEQ:
+		br = gc.Gbranch(arm.ABNE, nil, -likely)
+
+		// cmp hi
+	// bne to
+	// cmp lo
+	// bne to
+	case gc.ONE:
+		gc.Patch(gc.Gbranch(arm.ABNE, nil, likely), to)
+
+		// cmp hi
+	// bgt to
+	// blt L
+	// cmp lo
+	// bge to (or bgt to)
+	// L:
+	case gc.OGE,
+		gc.OGT:
+		gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
+
+		br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
+
+		// cmp hi
+	// blt to
+	// bgt L
+	// cmp lo
+	// ble to (or jlt to)
+	// L:
+	case gc.OLE,
+		gc.OLT:
+		gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
+
+		br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
+	}
+
+	// compare least significant word
+	t = lo1.Type
+
+	gc.Regalloc(&r1, gc.Types[gc.TINT32], nil)
+	gc.Regalloc(&r2, gc.Types[gc.TINT32], nil)
+	gins(arm.AMOVW, &lo1, &r1)
+	gins(arm.AMOVW, &lo2, &r2)
+	gins(arm.ACMP, &r1, &r2)
+	gc.Regfree(&r1)
+	gc.Regfree(&r2)
+
+	// jump again
+	gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
+
+	// point first branch down here if appropriate
+	if br != nil {
+		gc.Patch(br, gc.Pc)
+	}
+
+	splitclean()
+	splitclean()
+}
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
new file mode 100644
index 0000000..60a39d3
--- /dev/null
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -0,0 +1,94 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm"
+)
+
+var thechar int = '5'
+
+var thestring string = "arm"
+
+var thelinkarch *obj.LinkArch = &arm.Linkarm
+
+func linkarchinit() {
+}
+
+var MAXWIDTH int64 = (1 << 32) - 1
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, and uintptr
+ */
+var typedefs = []gc.Typedef{
+	gc.Typedef{"int", gc.TINT, gc.TINT32},
+	gc.Typedef{"uint", gc.TUINT, gc.TUINT32},
+	gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT32},
+}
+
+func betypeinit() {
+	gc.Widthptr = 4
+	gc.Widthint = 4
+	gc.Widthreg = 4
+}
+
+func Main() {
+	gc.Thearch.Thechar = thechar
+	gc.Thearch.Thestring = thestring
+	gc.Thearch.Thelinkarch = thelinkarch
+	gc.Thearch.Typedefs = typedefs
+	gc.Thearch.REGSP = arm.REGSP
+	gc.Thearch.REGCTXT = arm.REGCTXT
+	gc.Thearch.REGCALLX = arm.REG_R1
+	gc.Thearch.REGCALLX2 = arm.REG_R2
+	gc.Thearch.REGRETURN = arm.REG_R0
+	gc.Thearch.REGMIN = arm.REG_R0
+	gc.Thearch.REGMAX = arm.REGEXT
+	gc.Thearch.FREGMIN = arm.REG_F0
+	gc.Thearch.FREGMAX = arm.FREGEXT
+	gc.Thearch.MAXWIDTH = MAXWIDTH
+	gc.Thearch.ReservedRegs = resvd
+
+	gc.Thearch.Betypeinit = betypeinit
+	gc.Thearch.Cgen64 = cgen64
+	gc.Thearch.Cgen_hmul = cgen_hmul
+	gc.Thearch.Cgen_shift = cgen_shift
+	gc.Thearch.Clearfat = clearfat
+	gc.Thearch.Cmp64 = cmp64
+	gc.Thearch.Defframe = defframe
+	gc.Thearch.Excise = excise
+	gc.Thearch.Expandchecks = expandchecks
+	gc.Thearch.Getg = getg
+	gc.Thearch.Gins = gins
+	gc.Thearch.Ginscmp = ginscmp
+	gc.Thearch.Ginscon = ginscon
+	gc.Thearch.Ginsnop = ginsnop
+	gc.Thearch.Gmove = gmove
+	gc.Thearch.Cgenindex = cgenindex
+	gc.Thearch.Linkarchinit = linkarchinit
+	gc.Thearch.Peep = peep
+	gc.Thearch.Proginfo = proginfo
+	gc.Thearch.Regtyp = regtyp
+	gc.Thearch.Sameaddr = sameaddr
+	gc.Thearch.Smallindir = smallindir
+	gc.Thearch.Stackaddr = stackaddr
+	gc.Thearch.Blockcopy = blockcopy
+	gc.Thearch.Sudoaddable = sudoaddable
+	gc.Thearch.Sudoclean = sudoclean
+	gc.Thearch.Excludedregs = excludedregs
+	gc.Thearch.RtoB = RtoB
+	gc.Thearch.FtoB = RtoB
+	gc.Thearch.BtoR = BtoR
+	gc.Thearch.BtoF = BtoF
+	gc.Thearch.Optoas = optoas
+	gc.Thearch.Doregbits = doregbits
+	gc.Thearch.Regnames = regnames
+
+	gc.Main()
+	gc.Exit(0)
+}
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
new file mode 100644
index 0000000..6633351
--- /dev/null
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -0,0 +1,529 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm"
+)
+
+func defframe(ptxt *obj.Prog) {
+	var n *gc.Node
+
+	// fill in argument size, stack size
+	ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+	ptxt.To.Offset = int64(frame)
+
+	// insert code to contain ambiguously live variables
+	// so that garbage collector only sees initialized values
+	// when it looks for pointers.
+	p := ptxt
+
+	hi := int64(0)
+	lo := hi
+	r0 := uint32(0)
+	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
+		n = l.N
+		if !n.Name.Needzero {
+			continue
+		}
+		if n.Class != gc.PAUTO {
+			gc.Fatal("needzero class %d", n.Class)
+		}
+		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+			gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+		}
+		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) {
+			// merge with range we already have
+			lo = gc.Rnd(n.Xoffset, int64(gc.Widthptr))
+
+			continue
+		}
+
+		// zero old range
+		p = zerorange(p, int64(frame), lo, hi, &r0)
+
+		// set new range
+		hi = n.Xoffset + n.Type.Width
+
+		lo = n.Xoffset
+	}
+
+	// zero final range
+	zerorange(p, int64(frame), lo, hi, &r0)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
+	cnt := hi - lo
+	if cnt == 0 {
+		return p
+	}
+	if *r0 == 0 {
+		p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
+		*r0 = 1
+	}
+
+	if cnt < int64(4*gc.Widthptr) {
+		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+			p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
+		}
+	} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
+		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
+		p.Reg = arm.REGSP
+		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+		f := gc.Sysfunc("duffzero")
+		gc.Naddr(&p.To, f)
+		gc.Afunclit(&p.To, f)
+		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+	} else {
+		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
+		p.Reg = arm.REGSP
+		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
+		p.Reg = arm.REG_R1
+		p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
+		p1 := p
+		p.Scond |= arm.C_PBIT
+		p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
+		p.Reg = arm.REG_R2
+		p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+		gc.Patch(p, p1)
+	}
+
+	return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int, treg int, toffset int32) *obj.Prog {
+	q := gc.Ctxt.NewProg()
+	gc.Clearp(q)
+	q.As = int16(as)
+	q.Lineno = p.Lineno
+	q.From.Type = int16(ftype)
+	q.From.Reg = int16(freg)
+	q.From.Offset = int64(foffset)
+	q.To.Type = int16(ttype)
+	q.To.Reg = int16(treg)
+	q.To.Offset = int64(toffset)
+	q.Link = p.Link
+	p.Link = q
+	return q
+}
+
+/*
+ * generate high multiply
+ *  res = (nl * nr) >> wordsize
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	if nl.Ullman < nr.Ullman {
+		tmp := nl
+		nl = nr
+		nr = tmp
+	}
+
+	t := nl.Type
+	w := int(t.Width * 8)
+	var n1 gc.Node
+	gc.Regalloc(&n1, t, res)
+	gc.Cgen(nl, &n1)
+	var n2 gc.Node
+	gc.Regalloc(&n2, t, nil)
+	gc.Cgen(nr, &n2)
+	switch gc.Simtype[t.Etype] {
+	case gc.TINT8,
+		gc.TINT16:
+		gins(optoas(gc.OMUL, t), &n2, &n1)
+		gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
+
+	case gc.TUINT8,
+		gc.TUINT16:
+		gins(optoas(gc.OMUL, t), &n2, &n1)
+		gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1)
+
+		// perform a long multiplication.
+	case gc.TINT32,
+		gc.TUINT32:
+		var p *obj.Prog
+		if gc.Issigned[t.Etype] {
+			p = gins(arm.AMULL, &n2, nil)
+		} else {
+			p = gins(arm.AMULLU, &n2, nil)
+		}
+
+		// n2 * n1 -> (n1 n2)
+		p.Reg = n1.Reg
+
+		p.To.Type = obj.TYPE_REGREG
+		p.To.Reg = n1.Reg
+		p.To.Offset = int64(n2.Reg)
+
+	default:
+		gc.Fatal("cgen_hmul %v", t)
+	}
+
+	gc.Cgen(&n1, res)
+	gc.Regfree(&n1)
+	gc.Regfree(&n2)
+}
+
+/*
+ * generate shift according to op, one of:
+ *	res = nl << nr
+ *	res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	if nl.Type.Width > 4 {
+		gc.Fatal("cgen_shift %v", nl.Type)
+	}
+
+	w := int(nl.Type.Width * 8)
+
+	if op == gc.OLROT {
+		v := nr.Int()
+		var n1 gc.Node
+		gc.Regalloc(&n1, nl.Type, res)
+		if w == 32 {
+			gc.Cgen(nl, &n1)
+			gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
+		} else {
+			var n2 gc.Node
+			gc.Regalloc(&n2, nl.Type, nil)
+			gc.Cgen(nl, &n2)
+			gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
+			gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
+			gc.Regfree(&n2)
+
+			// Ensure sign/zero-extended result.
+			gins(optoas(gc.OAS, nl.Type), &n1, &n1)
+		}
+
+		gmove(&n1, res)
+		gc.Regfree(&n1)
+		return
+	}
+
+	if nr.Op == gc.OLITERAL {
+		var n1 gc.Node
+		gc.Regalloc(&n1, nl.Type, res)
+		gc.Cgen(nl, &n1)
+		sc := uint64(nr.Int())
+		if sc == 0 {
+		} else // nothing to do
+		if sc >= uint64(nl.Type.Width*8) {
+			if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
+				gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
+			} else {
+				gins(arm.AEOR, &n1, &n1)
+			}
+		} else {
+			if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
+				gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
+			} else if op == gc.ORSH {
+				gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
+			} else {
+				gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
+			}
+		}
+
+		if w < 32 && op == gc.OLSH {
+			gins(optoas(gc.OAS, nl.Type), &n1, &n1)
+		}
+		gmove(&n1, res)
+		gc.Regfree(&n1)
+		return
+	}
+
+	tr := nr.Type
+	var t gc.Node
+	var n1 gc.Node
+	var n2 gc.Node
+	var n3 gc.Node
+	if tr.Width > 4 {
+		var nt gc.Node
+		gc.Tempname(&nt, nr.Type)
+		if nl.Ullman >= nr.Ullman {
+			gc.Regalloc(&n2, nl.Type, res)
+			gc.Cgen(nl, &n2)
+			gc.Cgen(nr, &nt)
+			n1 = nt
+		} else {
+			gc.Cgen(nr, &nt)
+			gc.Regalloc(&n2, nl.Type, res)
+			gc.Cgen(nl, &n2)
+		}
+
+		var hi gc.Node
+		var lo gc.Node
+		split64(&nt, &lo, &hi)
+		gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
+		gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
+		gmove(&lo, &n1)
+		gmove(&hi, &n3)
+		splitclean()
+		gins(arm.ATST, &n3, nil)
+		gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
+		p1 := gins(arm.AMOVW, &t, &n1)
+		p1.Scond = arm.C_SCOND_NE
+		tr = gc.Types[gc.TUINT32]
+		gc.Regfree(&n3)
+	} else {
+		if nl.Ullman >= nr.Ullman {
+			gc.Regalloc(&n2, nl.Type, res)
+			gc.Cgen(nl, &n2)
+			gc.Regalloc(&n1, nr.Type, nil)
+			gc.Cgen(nr, &n1)
+		} else {
+			gc.Regalloc(&n1, nr.Type, nil)
+			gc.Cgen(nr, &n1)
+			gc.Regalloc(&n2, nl.Type, res)
+			gc.Cgen(nl, &n2)
+		}
+	}
+
+	// test for shift being 0
+	gins(arm.ATST, &n1, nil)
+
+	p3 := gc.Gbranch(arm.ABEQ, nil, -1)
+
+	// test and fix up large shifts
+	// TODO: if(!bounded), don't emit some of this.
+	gc.Regalloc(&n3, tr, nil)
+
+	gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
+	gmove(&t, &n3)
+	gins(arm.ACMP, &n1, &n3)
+	if op == gc.ORSH {
+		var p1 *obj.Prog
+		var p2 *obj.Prog
+		if gc.Issigned[nl.Type.Etype] {
+			p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
+			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
+		} else {
+			p1 = gins(arm.AEOR, &n2, &n2)
+			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
+		}
+
+		p1.Scond = arm.C_SCOND_HS
+		p2.Scond = arm.C_SCOND_LO
+	} else {
+		p1 := gins(arm.AEOR, &n2, &n2)
+		p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
+		p1.Scond = arm.C_SCOND_HS
+		p2.Scond = arm.C_SCOND_LO
+	}
+
+	gc.Regfree(&n3)
+
+	gc.Patch(p3, gc.Pc)
+
+	// Left-shift of smaller word must be sign/zero-extended.
+	if w < 32 && op == gc.OLSH {
+		gins(optoas(gc.OAS, nl.Type), &n2, &n2)
+	}
+	gmove(&n2, res)
+
+	gc.Regfree(&n1)
+	gc.Regfree(&n2)
+}
+
+func clearfat(nl *gc.Node) {
+	/* clear a fat object */
+	if gc.Debug['g'] != 0 {
+		gc.Dump("\nclearfat", nl)
+	}
+
+	w := uint32(nl.Type.Width)
+
+	// Avoid taking the address for simple enough types.
+	if gc.Componentgen(nil, nl) {
+		return
+	}
+
+	c := w % 4 // bytes
+	q := w / 4 // quads
+
+	var r0 gc.Node
+	r0.Op = gc.OREGISTER
+
+	r0.Reg = arm.REG_R0
+	var r1 gc.Node
+	r1.Op = gc.OREGISTER
+	r1.Reg = arm.REG_R1
+	var dst gc.Node
+	gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
+	gc.Agen(nl, &dst)
+	var nc gc.Node
+	gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
+	var nz gc.Node
+	gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
+	gc.Cgen(&nc, &nz)
+
+	if q > 128 {
+		var end gc.Node
+		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
+		p := gins(arm.AMOVW, &dst, &end)
+		p.From.Type = obj.TYPE_ADDR
+		p.From.Offset = int64(q) * 4
+
+		p = gins(arm.AMOVW, &nz, &dst)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Offset = 4
+		p.Scond |= arm.C_PBIT
+		pl := p
+
+		p = gins(arm.ACMP, &dst, nil)
+		raddr(&end, p)
+		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
+
+		gc.Regfree(&end)
+	} else if q >= 4 && !gc.Nacl {
+		f := gc.Sysfunc("duffzero")
+		p := gins(obj.ADUFFZERO, nil, f)
+		gc.Afunclit(&p.To, f)
+
+		// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
+		p.To.Offset = 4 * (128 - int64(q))
+	} else {
+		var p *obj.Prog
+		for q > 0 {
+			p = gins(arm.AMOVW, &nz, &dst)
+			p.To.Type = obj.TYPE_MEM
+			p.To.Offset = 4
+			p.Scond |= arm.C_PBIT
+
+			//print("1. %P\n", p);
+			q--
+		}
+	}
+
+	var p *obj.Prog
+	for c > 0 {
+		p = gins(arm.AMOVB, &nz, &dst)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Offset = 1
+		p.Scond |= arm.C_PBIT
+
+		//print("2. %P\n", p);
+		c--
+	}
+
+	gc.Regfree(&dst)
+	gc.Regfree(&nz)
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+	var reg int
+	var p1 *obj.Prog
+
+	for p := firstp; p != nil; p = p.Link {
+		if p.As != obj.ACHECKNIL {
+			continue
+		}
+		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+			gc.Warnl(int(p.Lineno), "generated nil check")
+		}
+		if p.From.Type != obj.TYPE_REG {
+			gc.Fatal("invalid nil check %v", p)
+		}
+		reg = int(p.From.Reg)
+
+		// check is
+		//	CMP arg, $0
+		//	MOV.EQ arg, 0(arg)
+		p1 = gc.Ctxt.NewProg()
+
+		gc.Clearp(p1)
+		p1.Link = p.Link
+		p.Link = p1
+		p1.Lineno = p.Lineno
+		p1.Pc = 9999
+		p1.As = arm.AMOVW
+		p1.From.Type = obj.TYPE_REG
+		p1.From.Reg = int16(reg)
+		p1.To.Type = obj.TYPE_MEM
+		p1.To.Reg = int16(reg)
+		p1.To.Offset = 0
+		p1.Scond = arm.C_SCOND_EQ
+		p.As = arm.ACMP
+		p.From.Type = obj.TYPE_CONST
+		p.From.Reg = 0
+		p.From.Offset = 0
+		p.Reg = int16(reg)
+	}
+}
+
+func ginsnop() {
+	var r gc.Node
+	gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
+	p := gins(arm.AAND, &r, &r)
+	p.Scond = arm.C_SCOND_EQ
+}
+
+/*
+ * generate
+ *	as $c, n
+ */
+func ginscon(as int, c int64, n *gc.Node) {
+	var n1 gc.Node
+	gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
+	var n2 gc.Node
+	gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
+	gmove(&n1, &n2)
+	gins(as, &n2, n)
+	gc.Regfree(&n2)
+}
+
+func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL {
+		op = gc.Brrev(op)
+		n1, n2 = n2, n1
+	}
+	var r1, r2, g1, g2 gc.Node
+	gc.Regalloc(&r1, t, n1)
+	gc.Regalloc(&g1, n1.Type, &r1)
+	gc.Cgen(n1, &g1)
+	gmove(&g1, &r1)
+	if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 {
+		gins(arm.ACMP, &r1, n2)
+	} else {
+		gc.Regalloc(&r2, t, n2)
+		gc.Regalloc(&g2, n1.Type, &r2)
+		gc.Cgen(n2, &g2)
+		gmove(&g2, &r2)
+		gins(optoas(gc.OCMP, t), &r1, &r2)
+		gc.Regfree(&g2)
+		gc.Regfree(&r2)
+	}
+	gc.Regfree(&g1)
+	gc.Regfree(&r1)
+	return gc.Gbranch(optoas(op, t), nil, likely)
+}
+
+// addr += index*width if possible.
+func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
+	switch width {
+	case 2:
+		gshift(arm.AADD, index, arm.SHIFT_LL, 1, addr)
+		return true
+	case 4:
+		gshift(arm.AADD, index, arm.SHIFT_LL, 2, addr)
+		return true
+	case 8:
+		gshift(arm.AADD, index, arm.SHIFT_LL, 3, addr)
+		return true
+	}
+	return false
+}
+
+// res = runtime.getg()
+func getg(res *gc.Node) {
+	var n1 gc.Node
+	gc.Nodreg(&n1, res.Type, arm.REGG)
+	gmove(&n1, res)
+}
diff --git a/src/cmd/compile/internal/arm/gsubr.go b/src/cmd/compile/internal/arm/gsubr.go
new file mode 100644
index 0000000..5263f15
--- /dev/null
+++ b/src/cmd/compile/internal/arm/gsubr.go
@@ -0,0 +1,1209 @@
+// Derived from Inferno utils/5c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/txt.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm"
+	"fmt"
+)
+
+var resvd = []int{
+	arm.REG_R9,  // formerly reserved for m; might be okay to reuse now; not sure about NaCl
+	arm.REG_R10, // reserved for g
+}
+
+/*
+ * return constant i node.
+ * overwritten by next call, but useful in calls to gins.
+ */
+
+var ncon_n gc.Node
+
+func ncon(i uint32) *gc.Node {
+	if ncon_n.Type == nil {
+		gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
+	}
+	ncon_n.SetInt(int64(i))
+	return &ncon_n
+}
+
+var sclean [10]gc.Node
+
+var nsclean int
+
+/*
+ * n is a 64-bit value.  fill in lo and hi to refer to its 32-bit halves.
+ */
+func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
+	if !gc.Is64(n.Type) {
+		gc.Fatal("split64 %v", n.Type)
+	}
+
+	if nsclean >= len(sclean) {
+		gc.Fatal("split64 clean")
+	}
+	sclean[nsclean].Op = gc.OEMPTY
+	nsclean++
+	switch n.Op {
+	default:
+		switch n.Op {
+		default:
+			var n1 gc.Node
+			if !dotaddable(n, &n1) {
+				gc.Igen(n, &n1, nil)
+				sclean[nsclean-1] = n1
+			}
+
+			n = &n1
+
+		case gc.ONAME:
+			if n.Class == gc.PPARAMREF {
+				var n1 gc.Node
+				gc.Cgen(n.Name.Heapaddr, &n1)
+				sclean[nsclean-1] = n1
+				n = &n1
+			}
+
+			// nothing
+		case gc.OINDREG:
+			break
+		}
+
+		*lo = *n
+		*hi = *n
+		lo.Type = gc.Types[gc.TUINT32]
+		if n.Type.Etype == gc.TINT64 {
+			hi.Type = gc.Types[gc.TINT32]
+		} else {
+			hi.Type = gc.Types[gc.TUINT32]
+		}
+		hi.Xoffset += 4
+
+	case gc.OLITERAL:
+		var n1 gc.Node
+		n.Convconst(&n1, n.Type)
+		i := n1.Int()
+		gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
+		i >>= 32
+		if n.Type.Etype == gc.TINT64 {
+			gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
+		} else {
+			gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
+		}
+	}
+}
+
+func splitclean() {
+	if nsclean <= 0 {
+		gc.Fatal("splitclean")
+	}
+	nsclean--
+	if sclean[nsclean].Op != gc.OEMPTY {
+		gc.Regfree(&sclean[nsclean])
+	}
+}
+
+func gmove(f *gc.Node, t *gc.Node) {
+	if gc.Debug['M'] != 0 {
+		fmt.Printf("gmove %v -> %v\n", f, t)
+	}
+
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+	cvt := t.Type
+
+	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
+		gc.Complexmove(f, t)
+		return
+	}
+
+	// cannot have two memory operands;
+	// except 64-bit, which always copies via registers anyway.
+	var a int
+	var r1 gc.Node
+	if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
+		goto hard
+	}
+
+	// convert constant to desired type
+	if f.Op == gc.OLITERAL {
+		var con gc.Node
+		switch tt {
+		default:
+			f.Convconst(&con, t.Type)
+
+		case gc.TINT16,
+			gc.TINT8:
+			var con gc.Node
+			f.Convconst(&con, gc.Types[gc.TINT32])
+			var r1 gc.Node
+			gc.Regalloc(&r1, con.Type, t)
+			gins(arm.AMOVW, &con, &r1)
+			gmove(&r1, t)
+			gc.Regfree(&r1)
+			return
+
+		case gc.TUINT16,
+			gc.TUINT8:
+			var con gc.Node
+			f.Convconst(&con, gc.Types[gc.TUINT32])
+			var r1 gc.Node
+			gc.Regalloc(&r1, con.Type, t)
+			gins(arm.AMOVW, &con, &r1)
+			gmove(&r1, t)
+			gc.Regfree(&r1)
+			return
+		}
+
+		f = &con
+		ft = gc.Simsimtype(con.Type)
+
+		// constants can't move directly to memory
+		if gc.Ismem(t) && !gc.Is64(t.Type) {
+			goto hard
+		}
+	}
+
+	// value -> value copy, only one memory operand.
+	// figure out the instruction to use.
+	// break out of switch for one-instruction gins.
+	// goto rdst for "destination must be register".
+	// goto hard for "convert to cvt type first".
+	// otherwise handle and return.
+
+	switch uint32(ft)<<16 | uint32(tt) {
+	default:
+		// should not happen
+		gc.Fatal("gmove %v -> %v", f, t)
+		return
+
+		/*
+		 * integer copy and truncate
+		 */
+	case gc.TINT8<<16 | gc.TINT8: // same size
+		if !gc.Ismem(f) {
+			a = arm.AMOVB
+			break
+		}
+		fallthrough
+
+	case gc.TUINT8<<16 | gc.TINT8,
+		gc.TINT16<<16 | gc.TINT8, // truncate
+		gc.TUINT16<<16 | gc.TINT8,
+		gc.TINT32<<16 | gc.TINT8,
+		gc.TUINT32<<16 | gc.TINT8:
+		a = arm.AMOVBS
+
+	case gc.TUINT8<<16 | gc.TUINT8:
+		if !gc.Ismem(f) {
+			a = arm.AMOVB
+			break
+		}
+		fallthrough
+
+	case gc.TINT8<<16 | gc.TUINT8,
+		gc.TINT16<<16 | gc.TUINT8,
+		gc.TUINT16<<16 | gc.TUINT8,
+		gc.TINT32<<16 | gc.TUINT8,
+		gc.TUINT32<<16 | gc.TUINT8:
+		a = arm.AMOVBU
+
+	case gc.TINT64<<16 | gc.TINT8, // truncate low word
+		gc.TUINT64<<16 | gc.TINT8:
+		a = arm.AMOVBS
+
+		goto trunc64
+
+	case gc.TINT64<<16 | gc.TUINT8,
+		gc.TUINT64<<16 | gc.TUINT8:
+		a = arm.AMOVBU
+		goto trunc64
+
+	case gc.TINT16<<16 | gc.TINT16: // same size
+		if !gc.Ismem(f) {
+			a = arm.AMOVH
+			break
+		}
+		fallthrough
+
+	case gc.TUINT16<<16 | gc.TINT16,
+		gc.TINT32<<16 | gc.TINT16, // truncate
+		gc.TUINT32<<16 | gc.TINT16:
+		a = arm.AMOVHS
+
+	case gc.TUINT16<<16 | gc.TUINT16:
+		if !gc.Ismem(f) {
+			a = arm.AMOVH
+			break
+		}
+		fallthrough
+
+	case gc.TINT16<<16 | gc.TUINT16,
+		gc.TINT32<<16 | gc.TUINT16,
+		gc.TUINT32<<16 | gc.TUINT16:
+		a = arm.AMOVHU
+
+	case gc.TINT64<<16 | gc.TINT16, // truncate low word
+		gc.TUINT64<<16 | gc.TINT16:
+		a = arm.AMOVHS
+
+		goto trunc64
+
+	case gc.TINT64<<16 | gc.TUINT16,
+		gc.TUINT64<<16 | gc.TUINT16:
+		a = arm.AMOVHU
+		goto trunc64
+
+	case gc.TINT32<<16 | gc.TINT32, // same size
+		gc.TINT32<<16 | gc.TUINT32,
+		gc.TUINT32<<16 | gc.TINT32,
+		gc.TUINT32<<16 | gc.TUINT32:
+		a = arm.AMOVW
+
+	case gc.TINT64<<16 | gc.TINT32, // truncate
+		gc.TUINT64<<16 | gc.TINT32,
+		gc.TINT64<<16 | gc.TUINT32,
+		gc.TUINT64<<16 | gc.TUINT32:
+		var flo gc.Node
+		var fhi gc.Node
+		split64(f, &flo, &fhi)
+
+		var r1 gc.Node
+		gc.Regalloc(&r1, t.Type, nil)
+		gins(arm.AMOVW, &flo, &r1)
+		gins(arm.AMOVW, &r1, t)
+		gc.Regfree(&r1)
+		splitclean()
+		return
+
+	case gc.TINT64<<16 | gc.TINT64, // same size
+		gc.TINT64<<16 | gc.TUINT64,
+		gc.TUINT64<<16 | gc.TINT64,
+		gc.TUINT64<<16 | gc.TUINT64:
+		var fhi gc.Node
+		var flo gc.Node
+		split64(f, &flo, &fhi)
+
+		var tlo gc.Node
+		var thi gc.Node
+		split64(t, &tlo, &thi)
+		var r1 gc.Node
+		gc.Regalloc(&r1, flo.Type, nil)
+		var r2 gc.Node
+		gc.Regalloc(&r2, fhi.Type, nil)
+		gins(arm.AMOVW, &flo, &r1)
+		gins(arm.AMOVW, &fhi, &r2)
+		gins(arm.AMOVW, &r1, &tlo)
+		gins(arm.AMOVW, &r2, &thi)
+		gc.Regfree(&r1)
+		gc.Regfree(&r2)
+		splitclean()
+		splitclean()
+		return
+
+		/*
+		 * integer up-conversions
+		 */
+	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+		gc.TINT8<<16 | gc.TUINT16,
+		gc.TINT8<<16 | gc.TINT32,
+		gc.TINT8<<16 | gc.TUINT32:
+		a = arm.AMOVBS
+
+		goto rdst
+
+	case gc.TINT8<<16 | gc.TINT64, // convert via int32
+		gc.TINT8<<16 | gc.TUINT64:
+		cvt = gc.Types[gc.TINT32]
+
+		goto hard
+
+	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+		gc.TUINT8<<16 | gc.TUINT16,
+		gc.TUINT8<<16 | gc.TINT32,
+		gc.TUINT8<<16 | gc.TUINT32:
+		a = arm.AMOVBU
+
+		goto rdst
+
+	case gc.TUINT8<<16 | gc.TINT64, // convert via uint32
+		gc.TUINT8<<16 | gc.TUINT64:
+		cvt = gc.Types[gc.TUINT32]
+
+		goto hard
+
+	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+		gc.TINT16<<16 | gc.TUINT32:
+		a = arm.AMOVHS
+
+		goto rdst
+
+	case gc.TINT16<<16 | gc.TINT64, // convert via int32
+		gc.TINT16<<16 | gc.TUINT64:
+		cvt = gc.Types[gc.TINT32]
+
+		goto hard
+
+	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+		gc.TUINT16<<16 | gc.TUINT32:
+		a = arm.AMOVHU
+
+		goto rdst
+
+	case gc.TUINT16<<16 | gc.TINT64, // convert via uint32
+		gc.TUINT16<<16 | gc.TUINT64:
+		cvt = gc.Types[gc.TUINT32]
+
+		goto hard
+
+	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+		gc.TINT32<<16 | gc.TUINT64:
+		var tlo gc.Node
+		var thi gc.Node
+		split64(t, &tlo, &thi)
+
+		var r1 gc.Node
+		gc.Regalloc(&r1, tlo.Type, nil)
+		var r2 gc.Node
+		gc.Regalloc(&r2, thi.Type, nil)
+		gmove(f, &r1)
+		p1 := gins(arm.AMOVW, &r1, &r2)
+		p1.From.Type = obj.TYPE_SHIFT
+		p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Reg)&15 // r1->31
+		p1.From.Reg = 0
+
+		//print("gmove: %P\n", p1);
+		gins(arm.AMOVW, &r1, &tlo)
+
+		gins(arm.AMOVW, &r2, &thi)
+		gc.Regfree(&r1)
+		gc.Regfree(&r2)
+		splitclean()
+		return
+
+	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+		gc.TUINT32<<16 | gc.TUINT64:
+		var thi gc.Node
+		var tlo gc.Node
+		split64(t, &tlo, &thi)
+
+		gmove(f, &tlo)
+		var r1 gc.Node
+		gc.Regalloc(&r1, thi.Type, nil)
+		gins(arm.AMOVW, ncon(0), &r1)
+		gins(arm.AMOVW, &r1, &thi)
+		gc.Regfree(&r1)
+		splitclean()
+		return
+
+		//	case CASE(TFLOAT64, TUINT64):
+	/*
+	* float to integer
+	 */
+	case gc.TFLOAT32<<16 | gc.TINT8,
+		gc.TFLOAT32<<16 | gc.TUINT8,
+		gc.TFLOAT32<<16 | gc.TINT16,
+		gc.TFLOAT32<<16 | gc.TUINT16,
+		gc.TFLOAT32<<16 | gc.TINT32,
+		gc.TFLOAT32<<16 | gc.TUINT32,
+
+		//	case CASE(TFLOAT32, TUINT64):
+
+		gc.TFLOAT64<<16 | gc.TINT8,
+		gc.TFLOAT64<<16 | gc.TUINT8,
+		gc.TFLOAT64<<16 | gc.TINT16,
+		gc.TFLOAT64<<16 | gc.TUINT16,
+		gc.TFLOAT64<<16 | gc.TINT32,
+		gc.TFLOAT64<<16 | gc.TUINT32:
+		fa := arm.AMOVF
+
+		a := arm.AMOVFW
+		if ft == gc.TFLOAT64 {
+			fa = arm.AMOVD
+			a = arm.AMOVDW
+		}
+
+		ta := arm.AMOVW
+		switch tt {
+		case gc.TINT8:
+			ta = arm.AMOVBS
+
+		case gc.TUINT8:
+			ta = arm.AMOVBU
+
+		case gc.TINT16:
+			ta = arm.AMOVHS
+
+		case gc.TUINT16:
+			ta = arm.AMOVHU
+		}
+
+		var r1 gc.Node
+		gc.Regalloc(&r1, gc.Types[ft], f)
+		var r2 gc.Node
+		gc.Regalloc(&r2, gc.Types[tt], t)
+		gins(fa, f, &r1)        // load to fpu
+		p1 := gins(a, &r1, &r1) // convert to w
+		switch tt {
+		case gc.TUINT8,
+			gc.TUINT16,
+			gc.TUINT32:
+			p1.Scond |= arm.C_UBIT
+		}
+
+		gins(arm.AMOVW, &r1, &r2) // copy to cpu
+		gins(ta, &r2, t)          // store
+		gc.Regfree(&r1)
+		gc.Regfree(&r2)
+		return
+
+		/*
+		 * integer to float
+		 */
+	case gc.TINT8<<16 | gc.TFLOAT32,
+		gc.TUINT8<<16 | gc.TFLOAT32,
+		gc.TINT16<<16 | gc.TFLOAT32,
+		gc.TUINT16<<16 | gc.TFLOAT32,
+		gc.TINT32<<16 | gc.TFLOAT32,
+		gc.TUINT32<<16 | gc.TFLOAT32,
+		gc.TINT8<<16 | gc.TFLOAT64,
+		gc.TUINT8<<16 | gc.TFLOAT64,
+		gc.TINT16<<16 | gc.TFLOAT64,
+		gc.TUINT16<<16 | gc.TFLOAT64,
+		gc.TINT32<<16 | gc.TFLOAT64,
+		gc.TUINT32<<16 | gc.TFLOAT64:
+		fa := arm.AMOVW
+
+		switch ft {
+		case gc.TINT8:
+			fa = arm.AMOVBS
+
+		case gc.TUINT8:
+			fa = arm.AMOVBU
+
+		case gc.TINT16:
+			fa = arm.AMOVHS
+
+		case gc.TUINT16:
+			fa = arm.AMOVHU
+		}
+
+		a := arm.AMOVWF
+		ta := arm.AMOVF
+		if tt == gc.TFLOAT64 {
+			a = arm.AMOVWD
+			ta = arm.AMOVD
+		}
+
+		var r1 gc.Node
+		gc.Regalloc(&r1, gc.Types[ft], f)
+		var r2 gc.Node
+		gc.Regalloc(&r2, gc.Types[tt], t)
+		gins(fa, f, &r1)          // load to cpu
+		gins(arm.AMOVW, &r1, &r2) // copy to fpu
+		p1 := gins(a, &r2, &r2)   // convert
+		switch ft {
+		case gc.TUINT8,
+			gc.TUINT16,
+			gc.TUINT32:
+			p1.Scond |= arm.C_UBIT
+		}
+
+		gins(ta, &r2, t) // store
+		gc.Regfree(&r1)
+		gc.Regfree(&r2)
+		return
+
+	case gc.TUINT64<<16 | gc.TFLOAT32,
+		gc.TUINT64<<16 | gc.TFLOAT64:
+		gc.Fatal("gmove UINT64, TFLOAT not implemented")
+		return
+
+		/*
+		 * float to float
+		 */
+	case gc.TFLOAT32<<16 | gc.TFLOAT32:
+		a = arm.AMOVF
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT64:
+		a = arm.AMOVD
+
+	case gc.TFLOAT32<<16 | gc.TFLOAT64:
+		var r1 gc.Node
+		gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t)
+		gins(arm.AMOVF, f, &r1)
+		gins(arm.AMOVFD, &r1, &r1)
+		gins(arm.AMOVD, &r1, t)
+		gc.Regfree(&r1)
+		return
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT32:
+		var r1 gc.Node
+		gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t)
+		gins(arm.AMOVD, f, &r1)
+		gins(arm.AMOVDF, &r1, &r1)
+		gins(arm.AMOVF, &r1, t)
+		gc.Regfree(&r1)
+		return
+	}
+
+	gins(a, f, t)
+	return
+
+	// TODO(kaib): we almost always require a register dest anyway, this can probably be
+	// removed.
+	// requires register destination
+rdst:
+	{
+		gc.Regalloc(&r1, t.Type, t)
+
+		gins(a, f, &r1)
+		gmove(&r1, t)
+		gc.Regfree(&r1)
+		return
+	}
+
+	// requires register intermediate
+hard:
+	gc.Regalloc(&r1, cvt, t)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	gc.Regfree(&r1)
+	return
+
+	// truncate 64 bit integer
+trunc64:
+	var fhi gc.Node
+	var flo gc.Node
+	split64(f, &flo, &fhi)
+
+	gc.Regalloc(&r1, t.Type, nil)
+	gins(a, &flo, &r1)
+	gins(a, &r1, t)
+	gc.Regfree(&r1)
+	splitclean()
+	return
+}
+
+func samaddr(f *gc.Node, t *gc.Node) bool {
+	if f.Op != t.Op {
+		return false
+	}
+
+	switch f.Op {
+	case gc.OREGISTER:
+		if f.Reg != t.Reg {
+			break
+		}
+		return true
+	}
+
+	return false
+}
+
+/*
+ * generate one instruction:
+ *	as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+	//	Node nod;
+	//	int32 v;
+
+	if f != nil && f.Op == gc.OINDEX {
+		gc.Fatal("gins OINDEX not implemented")
+	}
+
+	//		gc.Regalloc(&nod, &regnode, Z);
+	//		v = constnode.vconst;
+	//		gc.Cgen(f->right, &nod);
+	//		constnode.vconst = v;
+	//		idx.reg = nod.reg;
+	//		gc.Regfree(&nod);
+	if t != nil && t.Op == gc.OINDEX {
+		gc.Fatal("gins OINDEX not implemented")
+	}
+
+	//		gc.Regalloc(&nod, &regnode, Z);
+	//		v = constnode.vconst;
+	//		gc.Cgen(t->right, &nod);
+	//		constnode.vconst = v;
+	//		idx.reg = nod.reg;
+	//		gc.Regfree(&nod);
+
+	p := gc.Prog(as)
+	gc.Naddr(&p.From, f)
+	gc.Naddr(&p.To, t)
+
+	switch as {
+	case arm.ABL:
+		if p.To.Type == obj.TYPE_REG {
+			p.To.Type = obj.TYPE_MEM
+		}
+
+	case arm.ACMP, arm.ACMPF, arm.ACMPD:
+		if t != nil {
+			if f.Op != gc.OREGISTER {
+				/* generate a comparison
+				TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
+				*/
+				gc.Fatal("bad operands to gcmp")
+			}
+			p.From = p.To
+			p.To = obj.Addr{}
+			raddr(f, p)
+		}
+
+	case arm.AMULU:
+		if f != nil && f.Op != gc.OREGISTER {
+			gc.Fatal("bad operands to mul")
+		}
+
+	case arm.AMOVW:
+		if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) {
+			gc.Fatal("gins double memory")
+		}
+
+	case arm.AADD:
+		if p.To.Type == obj.TYPE_MEM {
+			gc.Fatal("gins arith to mem")
+		}
+
+	case arm.ARSB:
+		if p.From.Type == obj.TYPE_NONE {
+			gc.Fatal("rsb with no from")
+		}
+	}
+
+	if gc.Debug['g'] != 0 {
+		fmt.Printf("%v\n", p)
+	}
+	return p
+}
+
+/*
+ * insert n into reg slot of p
+ */
+func raddr(n *gc.Node, p *obj.Prog) {
+	var a obj.Addr
+	gc.Naddr(&a, n)
+	if a.Type != obj.TYPE_REG {
+		if n != nil {
+			gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
+		} else {
+			gc.Fatal("bad in raddr: <null>")
+		}
+		p.Reg = 0
+	} else {
+		p.Reg = a.Reg
+	}
+}
+
+/* generate a constant shift
+ * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
+ */
+func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
+	if sval <= 0 || sval > 32 {
+		gc.Fatal("bad shift value: %d", sval)
+	}
+
+	sval = sval & 0x1f
+
+	p := gins(as, nil, rhs)
+	p.From.Type = obj.TYPE_SHIFT
+	p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Reg)&15
+	return p
+}
+
+/* generate a register shift
+ */
+func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
+	p := gins(as, nil, rhs)
+	p.From.Type = obj.TYPE_SHIFT
+	p.From.Offset = int64(stype) | (int64(reg.Reg)&15)<<8 | 1<<4 | int64(lhs.Reg)&15
+	return p
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+	if t == nil {
+		gc.Fatal("optoas: t is nil")
+	}
+
+	a := obj.AXXX
+	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+	default:
+		gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]])
+
+		/*	case CASE(OADDR, TPTR32):
+				a = ALEAL;
+				break;
+
+			case CASE(OADDR, TPTR64):
+				a = ALEAQ;
+				break;
+		*/
+	// TODO(kaib): make sure the conditional branches work on all edge cases
+	case gc.OEQ<<16 | gc.TBOOL,
+		gc.OEQ<<16 | gc.TINT8,
+		gc.OEQ<<16 | gc.TUINT8,
+		gc.OEQ<<16 | gc.TINT16,
+		gc.OEQ<<16 | gc.TUINT16,
+		gc.OEQ<<16 | gc.TINT32,
+		gc.OEQ<<16 | gc.TUINT32,
+		gc.OEQ<<16 | gc.TINT64,
+		gc.OEQ<<16 | gc.TUINT64,
+		gc.OEQ<<16 | gc.TPTR32,
+		gc.OEQ<<16 | gc.TPTR64,
+		gc.OEQ<<16 | gc.TFLOAT32,
+		gc.OEQ<<16 | gc.TFLOAT64:
+		a = arm.ABEQ
+
+	case gc.ONE<<16 | gc.TBOOL,
+		gc.ONE<<16 | gc.TINT8,
+		gc.ONE<<16 | gc.TUINT8,
+		gc.ONE<<16 | gc.TINT16,
+		gc.ONE<<16 | gc.TUINT16,
+		gc.ONE<<16 | gc.TINT32,
+		gc.ONE<<16 | gc.TUINT32,
+		gc.ONE<<16 | gc.TINT64,
+		gc.ONE<<16 | gc.TUINT64,
+		gc.ONE<<16 | gc.TPTR32,
+		gc.ONE<<16 | gc.TPTR64,
+		gc.ONE<<16 | gc.TFLOAT32,
+		gc.ONE<<16 | gc.TFLOAT64:
+		a = arm.ABNE
+
+	case gc.OLT<<16 | gc.TINT8,
+		gc.OLT<<16 | gc.TINT16,
+		gc.OLT<<16 | gc.TINT32,
+		gc.OLT<<16 | gc.TINT64,
+		gc.OLT<<16 | gc.TFLOAT32,
+		gc.OLT<<16 | gc.TFLOAT64:
+		a = arm.ABLT
+
+	case gc.OLT<<16 | gc.TUINT8,
+		gc.OLT<<16 | gc.TUINT16,
+		gc.OLT<<16 | gc.TUINT32,
+		gc.OLT<<16 | gc.TUINT64:
+		a = arm.ABLO
+
+	case gc.OLE<<16 | gc.TINT8,
+		gc.OLE<<16 | gc.TINT16,
+		gc.OLE<<16 | gc.TINT32,
+		gc.OLE<<16 | gc.TINT64,
+		gc.OLE<<16 | gc.TFLOAT32,
+		gc.OLE<<16 | gc.TFLOAT64:
+		a = arm.ABLE
+
+	case gc.OLE<<16 | gc.TUINT8,
+		gc.OLE<<16 | gc.TUINT16,
+		gc.OLE<<16 | gc.TUINT32,
+		gc.OLE<<16 | gc.TUINT64:
+		a = arm.ABLS
+
+	case gc.OGT<<16 | gc.TINT8,
+		gc.OGT<<16 | gc.TINT16,
+		gc.OGT<<16 | gc.TINT32,
+		gc.OGT<<16 | gc.TINT64,
+		gc.OGT<<16 | gc.TFLOAT32,
+		gc.OGT<<16 | gc.TFLOAT64:
+		a = arm.ABGT
+
+	case gc.OGT<<16 | gc.TUINT8,
+		gc.OGT<<16 | gc.TUINT16,
+		gc.OGT<<16 | gc.TUINT32,
+		gc.OGT<<16 | gc.TUINT64:
+		a = arm.ABHI
+
+	case gc.OGE<<16 | gc.TINT8,
+		gc.OGE<<16 | gc.TINT16,
+		gc.OGE<<16 | gc.TINT32,
+		gc.OGE<<16 | gc.TINT64,
+		gc.OGE<<16 | gc.TFLOAT32,
+		gc.OGE<<16 | gc.TFLOAT64:
+		a = arm.ABGE
+
+	case gc.OGE<<16 | gc.TUINT8,
+		gc.OGE<<16 | gc.TUINT16,
+		gc.OGE<<16 | gc.TUINT32,
+		gc.OGE<<16 | gc.TUINT64:
+		a = arm.ABHS
+
+	case gc.OCMP<<16 | gc.TBOOL,
+		gc.OCMP<<16 | gc.TINT8,
+		gc.OCMP<<16 | gc.TUINT8,
+		gc.OCMP<<16 | gc.TINT16,
+		gc.OCMP<<16 | gc.TUINT16,
+		gc.OCMP<<16 | gc.TINT32,
+		gc.OCMP<<16 | gc.TUINT32,
+		gc.OCMP<<16 | gc.TPTR32:
+		a = arm.ACMP
+
+	case gc.OCMP<<16 | gc.TFLOAT32:
+		a = arm.ACMPF
+
+	case gc.OCMP<<16 | gc.TFLOAT64:
+		a = arm.ACMPD
+
+	case gc.OPS<<16 | gc.TFLOAT32,
+		gc.OPS<<16 | gc.TFLOAT64:
+		a = arm.ABVS
+
+	case gc.OAS<<16 | gc.TBOOL:
+		a = arm.AMOVB
+
+	case gc.OAS<<16 | gc.TINT8:
+		a = arm.AMOVBS
+
+	case gc.OAS<<16 | gc.TUINT8:
+		a = arm.AMOVBU
+
+	case gc.OAS<<16 | gc.TINT16:
+		a = arm.AMOVHS
+
+	case gc.OAS<<16 | gc.TUINT16:
+		a = arm.AMOVHU
+
+	case gc.OAS<<16 | gc.TINT32,
+		gc.OAS<<16 | gc.TUINT32,
+		gc.OAS<<16 | gc.TPTR32:
+		a = arm.AMOVW
+
+	case gc.OAS<<16 | gc.TFLOAT32:
+		a = arm.AMOVF
+
+	case gc.OAS<<16 | gc.TFLOAT64:
+		a = arm.AMOVD
+
+	case gc.OADD<<16 | gc.TINT8,
+		gc.OADD<<16 | gc.TUINT8,
+		gc.OADD<<16 | gc.TINT16,
+		gc.OADD<<16 | gc.TUINT16,
+		gc.OADD<<16 | gc.TINT32,
+		gc.OADD<<16 | gc.TUINT32,
+		gc.OADD<<16 | gc.TPTR32:
+		a = arm.AADD
+
+	case gc.OADD<<16 | gc.TFLOAT32:
+		a = arm.AADDF
+
+	case gc.OADD<<16 | gc.TFLOAT64:
+		a = arm.AADDD
+
+	case gc.OSUB<<16 | gc.TINT8,
+		gc.OSUB<<16 | gc.TUINT8,
+		gc.OSUB<<16 | gc.TINT16,
+		gc.OSUB<<16 | gc.TUINT16,
+		gc.OSUB<<16 | gc.TINT32,
+		gc.OSUB<<16 | gc.TUINT32,
+		gc.OSUB<<16 | gc.TPTR32:
+		a = arm.ASUB
+
+	case gc.OSUB<<16 | gc.TFLOAT32:
+		a = arm.ASUBF
+
+	case gc.OSUB<<16 | gc.TFLOAT64:
+		a = arm.ASUBD
+
+	case gc.OMINUS<<16 | gc.TINT8,
+		gc.OMINUS<<16 | gc.TUINT8,
+		gc.OMINUS<<16 | gc.TINT16,
+		gc.OMINUS<<16 | gc.TUINT16,
+		gc.OMINUS<<16 | gc.TINT32,
+		gc.OMINUS<<16 | gc.TUINT32,
+		gc.OMINUS<<16 | gc.TPTR32:
+		a = arm.ARSB
+
+	case gc.OAND<<16 | gc.TINT8,
+		gc.OAND<<16 | gc.TUINT8,
+		gc.OAND<<16 | gc.TINT16,
+		gc.OAND<<16 | gc.TUINT16,
+		gc.OAND<<16 | gc.TINT32,
+		gc.OAND<<16 | gc.TUINT32,
+		gc.OAND<<16 | gc.TPTR32:
+		a = arm.AAND
+
+	case gc.OOR<<16 | gc.TINT8,
+		gc.OOR<<16 | gc.TUINT8,
+		gc.OOR<<16 | gc.TINT16,
+		gc.OOR<<16 | gc.TUINT16,
+		gc.OOR<<16 | gc.TINT32,
+		gc.OOR<<16 | gc.TUINT32,
+		gc.OOR<<16 | gc.TPTR32:
+		a = arm.AORR
+
+	case gc.OXOR<<16 | gc.TINT8,
+		gc.OXOR<<16 | gc.TUINT8,
+		gc.OXOR<<16 | gc.TINT16,
+		gc.OXOR<<16 | gc.TUINT16,
+		gc.OXOR<<16 | gc.TINT32,
+		gc.OXOR<<16 | gc.TUINT32,
+		gc.OXOR<<16 | gc.TPTR32:
+		a = arm.AEOR
+
+	case gc.OLSH<<16 | gc.TINT8,
+		gc.OLSH<<16 | gc.TUINT8,
+		gc.OLSH<<16 | gc.TINT16,
+		gc.OLSH<<16 | gc.TUINT16,
+		gc.OLSH<<16 | gc.TINT32,
+		gc.OLSH<<16 | gc.TUINT32,
+		gc.OLSH<<16 | gc.TPTR32:
+		a = arm.ASLL
+
+	case gc.ORSH<<16 | gc.TUINT8,
+		gc.ORSH<<16 | gc.TUINT16,
+		gc.ORSH<<16 | gc.TUINT32,
+		gc.ORSH<<16 | gc.TPTR32:
+		a = arm.ASRL
+
+	case gc.ORSH<<16 | gc.TINT8,
+		gc.ORSH<<16 | gc.TINT16,
+		gc.ORSH<<16 | gc.TINT32:
+		a = arm.ASRA
+
+	case gc.OMUL<<16 | gc.TUINT8,
+		gc.OMUL<<16 | gc.TUINT16,
+		gc.OMUL<<16 | gc.TUINT32,
+		gc.OMUL<<16 | gc.TPTR32:
+		a = arm.AMULU
+
+	case gc.OMUL<<16 | gc.TINT8,
+		gc.OMUL<<16 | gc.TINT16,
+		gc.OMUL<<16 | gc.TINT32:
+		a = arm.AMUL
+
+	case gc.OMUL<<16 | gc.TFLOAT32:
+		a = arm.AMULF
+
+	case gc.OMUL<<16 | gc.TFLOAT64:
+		a = arm.AMULD
+
+	case gc.ODIV<<16 | gc.TUINT8,
+		gc.ODIV<<16 | gc.TUINT16,
+		gc.ODIV<<16 | gc.TUINT32,
+		gc.ODIV<<16 | gc.TPTR32:
+		a = arm.ADIVU
+
+	case gc.ODIV<<16 | gc.TINT8,
+		gc.ODIV<<16 | gc.TINT16,
+		gc.ODIV<<16 | gc.TINT32:
+		a = arm.ADIV
+
+	case gc.OMOD<<16 | gc.TUINT8,
+		gc.OMOD<<16 | gc.TUINT16,
+		gc.OMOD<<16 | gc.TUINT32,
+		gc.OMOD<<16 | gc.TPTR32:
+		a = arm.AMODU
+
+	case gc.OMOD<<16 | gc.TINT8,
+		gc.OMOD<<16 | gc.TINT16,
+		gc.OMOD<<16 | gc.TINT32:
+		a = arm.AMOD
+
+		//	case CASE(OEXTEND, TINT16):
+	//		a = ACWD;
+	//		break;
+
+	//	case CASE(OEXTEND, TINT32):
+	//		a = ACDQ;
+	//		break;
+
+	//	case CASE(OEXTEND, TINT64):
+	//		a = ACQO;
+	//		break;
+
+	case gc.ODIV<<16 | gc.TFLOAT32:
+		a = arm.ADIVF
+
+	case gc.ODIV<<16 | gc.TFLOAT64:
+		a = arm.ADIVD
+
+	case gc.OSQRT<<16 | gc.TFLOAT64:
+		a = arm.ASQRTD
+	}
+
+	return a
+}
+
+const (
+	ODynam = 1 << 0
+	OPtrto = 1 << 1
+)
+
+var clean [20]gc.Node
+
+var cleani int = 0
+
+func sudoclean() {
+	if clean[cleani-1].Op != gc.OEMPTY {
+		gc.Regfree(&clean[cleani-1])
+	}
+	if clean[cleani-2].Op != gc.OEMPTY {
+		gc.Regfree(&clean[cleani-2])
+	}
+	cleani -= 2
+}
+
+func dotaddable(n *gc.Node, n1 *gc.Node) bool {
+	if n.Op != gc.ODOT {
+		return false
+	}
+
+	var oary [10]int64
+	var nn *gc.Node
+	o := gc.Dotoffset(n, oary[:], &nn)
+	if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 {
+		*n1 = *nn
+		n1.Type = n.Type
+		n1.Xoffset += oary[0]
+		return true
+	}
+
+	return false
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+	if n.Type == nil {
+		return false
+	}
+
+	*a = obj.Addr{}
+
+	switch n.Op {
+	case gc.OLITERAL:
+		if !gc.Isconst(n, gc.CTINT) {
+			break
+		}
+		v := n.Int()
+		if v >= 32000 || v <= -32000 {
+			break
+		}
+		switch as {
+		default:
+			return false
+
+		case arm.AADD,
+			arm.ASUB,
+			arm.AAND,
+			arm.AORR,
+			arm.AEOR,
+			arm.AMOVB,
+			arm.AMOVBS,
+			arm.AMOVBU,
+			arm.AMOVH,
+			arm.AMOVHS,
+			arm.AMOVHU,
+			arm.AMOVW:
+			break
+		}
+
+		cleani += 2
+		reg := &clean[cleani-1]
+		reg1 := &clean[cleani-2]
+		reg.Op = gc.OEMPTY
+		reg1.Op = gc.OEMPTY
+		gc.Naddr(a, n)
+		return true
+
+	case gc.ODOT,
+		gc.ODOTPTR:
+		cleani += 2
+		reg := &clean[cleani-1]
+		reg1 := &clean[cleani-2]
+		reg.Op = gc.OEMPTY
+		reg1.Op = gc.OEMPTY
+		var nn *gc.Node
+		var oary [10]int64
+		o := gc.Dotoffset(n, oary[:], &nn)
+		if nn == nil {
+			sudoclean()
+			return false
+		}
+
+		if nn.Addable && o == 1 && oary[0] >= 0 {
+			// directly addressable set of DOTs
+			n1 := *nn
+
+			n1.Type = n.Type
+			n1.Xoffset += oary[0]
+			gc.Naddr(a, &n1)
+			return true
+		}
+
+		gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
+		n1 := *reg
+		n1.Op = gc.OINDREG
+		if oary[0] >= 0 {
+			gc.Agen(nn, reg)
+			n1.Xoffset = oary[0]
+		} else {
+			gc.Cgen(nn, reg)
+			gc.Cgen_checknil(reg)
+			n1.Xoffset = -(oary[0] + 1)
+		}
+
+		for i := 1; i < o; i++ {
+			if oary[i] >= 0 {
+				gc.Fatal("can't happen")
+			}
+			gins(arm.AMOVW, &n1, reg)
+			gc.Cgen_checknil(reg)
+			n1.Xoffset = -(oary[i] + 1)
+		}
+
+		a.Type = obj.TYPE_NONE
+		a.Name = obj.NAME_NONE
+		n1.Type = n.Type
+		gc.Naddr(a, &n1)
+		return true
+
+	case gc.OINDEX:
+		return false
+	}
+
+	return false
+}
diff --git a/src/cmd/compile/internal/arm/peep.go b/src/cmd/compile/internal/arm/peep.go
new file mode 100644
index 0000000..66eba41
--- /dev/null
+++ b/src/cmd/compile/internal/arm/peep.go
@@ -0,0 +1,1748 @@
+// Inferno utils/5c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/peep.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm"
+	"fmt"
+)
+
+var gactive uint32
+
+// UNUSED
+func peep(firstp *obj.Prog) {
+	g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+	if g == nil {
+		return
+	}
+	gactive = 0
+
+	var r *gc.Flow
+	var p *obj.Prog
+	var t int
+loop1:
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		gc.Dumpit("loop1", g.Start, 0)
+	}
+
+	t = 0
+	for r = g.Start; r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		/*
+		 * elide shift into TYPE_SHIFT operand of subsequent instruction
+		 */
+		//			if(shiftprop(r)) {
+		//				excise(r);
+		//				t++;
+		//				break;
+		//			}
+		case arm.ASLL,
+			arm.ASRL,
+			arm.ASRA:
+			break
+
+		case arm.AMOVB,
+			arm.AMOVH,
+			arm.AMOVW,
+			arm.AMOVF,
+			arm.AMOVD:
+			if regtyp(&p.From) {
+				if p.From.Type == p.To.Type && isfloatreg(&p.From) == isfloatreg(&p.To) {
+					if p.Scond == arm.C_SCOND_NONE {
+						if copyprop(g, r) {
+							excise(r)
+							t++
+							break
+						}
+
+						if subprop(r) && copyprop(g, r) {
+							excise(r)
+							t++
+							break
+						}
+					}
+				}
+			}
+
+		case arm.AMOVHS,
+			arm.AMOVHU,
+			arm.AMOVBS,
+			arm.AMOVBU:
+			if p.From.Type == obj.TYPE_REG {
+				if shortprop(r) {
+					t++
+				}
+			}
+		}
+	}
+
+	/*
+		if(p->scond == C_SCOND_NONE)
+		if(regtyp(&p->to))
+		if(isdconst(&p->from)) {
+			constprop(&p->from, &p->to, r->s1);
+		}
+		break;
+	*/
+	if t != 0 {
+		goto loop1
+	}
+
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		/*
+		 * EOR -1,x,y => MVN x,y
+		 */
+		case arm.AEOR:
+			if isdconst(&p.From) && p.From.Offset == -1 {
+				p.As = arm.AMVN
+				p.From.Type = obj.TYPE_REG
+				if p.Reg != 0 {
+					p.From.Reg = p.Reg
+				} else {
+					p.From.Reg = p.To.Reg
+				}
+				p.Reg = 0
+			}
+		}
+	}
+
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		case arm.AMOVW,
+			arm.AMOVB,
+			arm.AMOVBS,
+			arm.AMOVBU:
+			if p.From.Type == obj.TYPE_MEM && p.From.Offset == 0 {
+				xtramodes(g, r, &p.From)
+			} else if p.To.Type == obj.TYPE_MEM && p.To.Offset == 0 {
+				xtramodes(g, r, &p.To)
+			} else {
+				continue
+			}
+		}
+	}
+
+	//		case ACMP:
+	//			/*
+	//			 * elide CMP $0,x if calculation of x can set condition codes
+	//			 */
+	//			if(isdconst(&p->from) || p->from.offset != 0)
+	//				continue;
+	//			r2 = r->s1;
+	//			if(r2 == nil)
+	//				continue;
+	//			t = r2->prog->as;
+	//			switch(t) {
+	//			default:
+	//				continue;
+	//			case ABEQ:
+	//			case ABNE:
+	//			case ABMI:
+	//			case ABPL:
+	//				break;
+	//			case ABGE:
+	//				t = ABPL;
+	//				break;
+	//			case ABLT:
+	//				t = ABMI;
+	//				break;
+	//			case ABHI:
+	//				t = ABNE;
+	//				break;
+	//			case ABLS:
+	//				t = ABEQ;
+	//				break;
+	//			}
+	//			r1 = r;
+	//			do
+	//				r1 = uniqp(r1);
+	//			while (r1 != nil && r1->prog->as == ANOP);
+	//			if(r1 == nil)
+	//				continue;
+	//			p1 = r1->prog;
+	//			if(p1->to.type != TYPE_REG)
+	//				continue;
+	//			if(p1->to.reg != p->reg)
+	//			if(!(p1->as == AMOVW && p1->from.type == TYPE_REG && p1->from.reg == p->reg))
+	//				continue;
+	//
+	//			switch(p1->as) {
+	//			default:
+	//				continue;
+	//			case AMOVW:
+	//				if(p1->from.type != TYPE_REG)
+	//					continue;
+	//			case AAND:
+	//			case AEOR:
+	//			case AORR:
+	//			case ABIC:
+	//			case AMVN:
+	//			case ASUB:
+	//			case ARSB:
+	//			case AADD:
+	//			case AADC:
+	//			case ASBC:
+	//			case ARSC:
+	//				break;
+	//			}
+	//			p1->scond |= C_SBIT;
+	//			r2->prog->as = t;
+	//			excise(r);
+	//			continue;
+
+	//	predicate(g);
+
+	gc.Flowend(g)
+}
+
+func regtyp(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && (arm.REG_R0 <= a.Reg && a.Reg <= arm.REG_R15 || arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15)
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ *	MOV	a, R0
+ *	ADD	b, R0	/ no use of R1
+ *	MOV	R0, R1
+ * would be converted to
+ *	MOV	a, R1
+ *	ADD	b, R1
+ *	MOV	R1, R0
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ */
+func subprop(r0 *gc.Flow) bool {
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	if !regtyp(v1) {
+		return false
+	}
+	v2 := (*obj.Addr)(&p.To)
+	if !regtyp(v2) {
+		return false
+	}
+	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+		if gc.Uniqs(r) == nil {
+			break
+		}
+		p = r.Prog
+		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+			continue
+		}
+		if p.Info.Flags&gc.Call != 0 {
+			return false
+		}
+
+		// TODO(rsc): Whatever invalidated the info should have done this call.
+		proginfo(p)
+
+		if (p.Info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG {
+			p.Info.Flags |= gc.RegRead
+			p.Info.Flags &^= (gc.CanRegRead | gc.RightRead)
+			p.Reg = p.To.Reg
+		}
+
+		switch p.As {
+		case arm.AMULLU,
+			arm.AMULA,
+			arm.AMVN:
+			return false
+		}
+
+		if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
+			if p.To.Type == v1.Type {
+				if p.To.Reg == v1.Reg {
+					if p.Scond == arm.C_SCOND_NONE {
+						copysub(&p.To, v1, v2, 1)
+						if gc.Debug['P'] != 0 {
+							fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+							if p.From.Type == v2.Type {
+								fmt.Printf(" excise")
+							}
+							fmt.Printf("\n")
+						}
+
+						for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+							p = r.Prog
+							copysub(&p.From, v1, v2, 1)
+							copysub1(p, v1, v2, 1)
+							copysub(&p.To, v1, v2, 1)
+							if gc.Debug['P'] != 0 {
+								fmt.Printf("%v\n", r.Prog)
+							}
+						}
+
+						t := int(int(v1.Reg))
+						v1.Reg = v2.Reg
+						v2.Reg = int16(t)
+						if gc.Debug['P'] != 0 {
+							fmt.Printf("%v last\n", r.Prog)
+						}
+						return true
+					}
+				}
+			}
+		}
+
+		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
+			break
+		}
+		if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+			break
+		}
+	}
+
+	return false
+}
+
+/*
+ * The idea is to remove redundant copies.
+ *	v1->v2	F=0
+ *	(use v2	s/v2/v1/)*
+ *	set v1	F=1
+ *	use v2	return fail
+ *	-----------------
+ *	v1->v2	F=0
+ *	(use v2	s/v2/v1/)*
+ *	set v1	F=1
+ *	set v2	return success
+ */
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	v2 := (*obj.Addr)(&p.To)
+	if copyas(v1, v2) {
+		return true
+	}
+	gactive++
+	return copy1(v1, v2, r0.S1, 0)
+}
+
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+	if uint32(r.Active) == gactive {
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("act set; return 1\n")
+		}
+		return true
+	}
+
+	r.Active = int32(gactive)
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
+	}
+	var t int
+	var p *obj.Prog
+	for ; r != nil; r = r.S1 {
+		p = r.Prog
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("%v", p)
+		}
+		if f == 0 && gc.Uniqp(r) == nil {
+			f = 1
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; merge; f=%d", f)
+			}
+		}
+
+		t = copyu(p, v2, nil)
+		switch t {
+		case 2: /* rar, can't split */
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %vrar; return 0\n", gc.Ctxt.Dconv(v2))
+			}
+			return false
+
+		case 3: /* set */
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %vset; return 1\n", gc.Ctxt.Dconv(v2))
+			}
+			return true
+
+		case 1, /* used, substitute */
+			4: /* use and set */
+			if f != 0 {
+				if gc.Debug['P'] == 0 {
+					return false
+				}
+				if t == 4 {
+					fmt.Printf("; %vused+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+				} else {
+					fmt.Printf("; %vused and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+				}
+				return false
+			}
+
+			if copyu(p, v2, v1) != 0 {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; sub fail; return 0\n")
+				}
+				return false
+			}
+
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
+			}
+			if t == 4 {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; %vused+set; return 1\n", gc.Ctxt.Dconv(v2))
+				}
+				return true
+			}
+		}
+
+		if f == 0 {
+			t = copyu(p, v1, nil)
+			if f == 0 && (t == 2 || t == 3 || t == 4) {
+				f = 1
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; %vset and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+				}
+			}
+		}
+
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("\n")
+		}
+		if r.S2 != nil {
+			if !copy1(v1, v2, r.S2, f) {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+// UNUSED
+/*
+ * The idea is to remove redundant constants.
+ *	$c1->v1
+ *	($c1->v2 s/$c1/v1)*
+ *	set v1  return
+ * The v1->v2 should be eliminated by copy propagation.
+ */
+func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) {
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("constprop %v->%v\n", gc.Ctxt.Dconv(c1), gc.Ctxt.Dconv(v1))
+	}
+	var p *obj.Prog
+	for ; r != nil; r = r.S1 {
+		p = r.Prog
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("%v", p)
+		}
+		if gc.Uniqp(r) == nil {
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; merge; return\n")
+			}
+			return
+		}
+
+		if p.As == arm.AMOVW && copyas(&p.From, c1) {
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(&p.From), gc.Ctxt.Dconv(v1))
+			}
+			p.From = *v1
+		} else if copyu(p, v1, nil) > 1 {
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %vset; return\n", gc.Ctxt.Dconv(v1))
+			}
+			return
+		}
+
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("\n")
+		}
+		if r.S2 != nil {
+			constprop(c1, v1, r.S2)
+		}
+	}
+}
+
+/*
+ * shortprop eliminates redundant zero/sign extensions.
+ *
+ *   MOVBS x, R
+ *   <no use R>
+ *   MOVBS R, R'
+ *
+ * changed to
+ *
+ *   MOVBS x, R
+ *   ...
+ *   MOVB  R, R' (compiled to mov)
+ *
+ * MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
+ */
+func shortprop(r *gc.Flow) bool {
+	p := (*obj.Prog)(r.Prog)
+	r1 := (*gc.Flow)(findpre(r, &p.From))
+	if r1 == nil {
+		return false
+	}
+
+	p1 := (*obj.Prog)(r1.Prog)
+	if p1.As == p.As {
+		// Two consecutive extensions.
+		goto gotit
+	}
+
+	if p1.As == arm.AMOVW && isdconst(&p1.From) && p1.From.Offset >= 0 && p1.From.Offset < 128 {
+		// Loaded an immediate.
+		goto gotit
+	}
+
+	return false
+
+gotit:
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("shortprop\n%v\n%v", p1, p)
+	}
+	switch p.As {
+	case arm.AMOVBS,
+		arm.AMOVBU:
+		p.As = arm.AMOVB
+
+	case arm.AMOVHS,
+		arm.AMOVHU:
+		p.As = arm.AMOVH
+	}
+
+	if gc.Debug['P'] != 0 {
+		fmt.Printf(" => %v\n", obj.Aconv(int(p.As)))
+	}
+	return true
+}
+
+// UNUSED
+/*
+ * ASLL x,y,w
+ * .. (not use w, not set x y w)
+ * AXXX w,a,b (a != w)
+ * .. (not use w)
+ * (set w)
+ * ----------- changed to
+ * ..
+ * AXXX (x<<y),a,b
+ * ..
+ */
+func shiftprop(r *gc.Flow) bool {
+	p := (*obj.Prog)(r.Prog)
+	if p.To.Type != obj.TYPE_REG {
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
+		}
+		return false
+	}
+
+	n := int(int(p.To.Reg))
+	a := obj.Addr(obj.Addr{})
+	if p.Reg != 0 && p.Reg != p.To.Reg {
+		a.Type = obj.TYPE_REG
+		a.Reg = p.Reg
+	}
+
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("shiftprop\n%v", p)
+	}
+	r1 := (*gc.Flow)(r)
+	var p1 *obj.Prog
+	for {
+		/* find first use of shift result; abort if shift operands or result are changed */
+		r1 = gc.Uniqs(r1)
+
+		if r1 == nil {
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("\tbranch; FAILURE\n")
+			}
+			return false
+		}
+
+		if gc.Uniqp(r1) == nil {
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("\tmerge; FAILURE\n")
+			}
+			return false
+		}
+
+		p1 = r1.Prog
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("\n%v", p1)
+		}
+		switch copyu(p1, &p.To, nil) {
+		case 0: /* not used or set */
+			if (p.From.Type == obj.TYPE_REG && copyu(p1, &p.From, nil) > 1) || (a.Type == obj.TYPE_REG && copyu(p1, &a, nil) > 1) {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("\targs modified; FAILURE\n")
+				}
+				return false
+			}
+
+			continue
+		case 3: /* set, not used */
+			{
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("\tBOTCH: noref; FAILURE\n")
+				}
+				return false
+			}
+		}
+
+		break
+	}
+
+	/* check whether substitution can be done */
+	switch p1.As {
+	default:
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("\tnon-dpi; FAILURE\n")
+		}
+		return false
+
+	case arm.AAND,
+		arm.AEOR,
+		arm.AADD,
+		arm.AADC,
+		arm.AORR,
+		arm.ASUB,
+		arm.ASBC,
+		arm.ARSB,
+		arm.ARSC:
+		if int(p1.Reg) == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && int(p1.To.Reg) == n) {
+			if p1.From.Type != obj.TYPE_REG {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("\tcan't swap; FAILURE\n")
+				}
+				return false
+			}
+
+			p1.Reg = p1.From.Reg
+			p1.From.Reg = int16(n)
+			switch p1.As {
+			case arm.ASUB:
+				p1.As = arm.ARSB
+
+			case arm.ARSB:
+				p1.As = arm.ASUB
+
+			case arm.ASBC:
+				p1.As = arm.ARSC
+
+			case arm.ARSC:
+				p1.As = arm.ASBC
+			}
+
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("\t=>%v", p1)
+			}
+		}
+		fallthrough
+
+	case arm.ABIC,
+		arm.ATST,
+		arm.ACMP,
+		arm.ACMN:
+		if int(p1.Reg) == n {
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("\tcan't swap; FAILURE\n")
+			}
+			return false
+		}
+
+		if p1.Reg == 0 && int(p1.To.Reg) == n {
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("\tshift result used twice; FAILURE\n")
+			}
+			return false
+		}
+
+		//	case AMVN:
+		if p1.From.Type == obj.TYPE_SHIFT {
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("\tshift result used in shift; FAILURE\n")
+			}
+			return false
+		}
+
+		if p1.From.Type != obj.TYPE_REG || int(p1.From.Reg) != n {
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("\tBOTCH: where is it used?; FAILURE\n")
+			}
+			return false
+		}
+	}
+
+	/* check whether shift result is used subsequently */
+	p2 := (*obj.Prog)(p1)
+
+	if int(p1.To.Reg) != n {
+		var p1 *obj.Prog
+		for {
+			r1 = gc.Uniqs(r1)
+			if r1 == nil {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("\tinconclusive; FAILURE\n")
+				}
+				return false
+			}
+
+			p1 = r1.Prog
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("\n%v", p1)
+			}
+			switch copyu(p1, &p.To, nil) {
+			case 0: /* not used or set */
+				continue
+
+			case 3: /* set, not used */
+				break
+
+			default: /* used */
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("\treused; FAILURE\n")
+				}
+				return false
+			}
+
+			break
+		}
+	}
+
+	/* make the substitution */
+	p2.From.Reg = 0
+
+	o := int(int(p.Reg))
+	if o == 0 {
+		o = int(p.To.Reg)
+	}
+	o &= 15
+
+	switch p.From.Type {
+	case obj.TYPE_CONST:
+		o |= int((p.From.Offset & 0x1f) << 7)
+
+	case obj.TYPE_REG:
+		o |= 1<<4 | (int(p.From.Reg)&15)<<8
+	}
+
+	switch p.As {
+	case arm.ASLL:
+		o |= 0 << 5
+
+	case arm.ASRL:
+		o |= 1 << 5
+
+	case arm.ASRA:
+		o |= 2 << 5
+	}
+
+	p2.From = obj.Addr{}
+	p2.From.Type = obj.TYPE_SHIFT
+	p2.From.Offset = int64(o)
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("\t=>%v\tSUCCEED\n", p2)
+	}
+	return true
+}
+
+/*
+ * findpre returns the last instruction mentioning v
+ * before r. It must be a set, and there must be
+ * a unique path from that instruction to r.
+ */
+func findpre(r *gc.Flow, v *obj.Addr) *gc.Flow {
+	var r1 *gc.Flow
+
+	for r1 = gc.Uniqp(r); r1 != nil; r, r1 = r1, gc.Uniqp(r1) {
+		if gc.Uniqs(r1) != r {
+			return nil
+		}
+		switch copyu(r1.Prog, v, nil) {
+		case 1, /* used */
+			2: /* read-alter-rewrite */
+			return nil
+
+		case 3, /* set */
+			4: /* set and used */
+			return r1
+		}
+	}
+
+	return nil
+}
+
+/*
+ * findinc finds ADD instructions with a constant
+ * argument which falls within the immed_12 range.
+ */
+func findinc(r *gc.Flow, r2 *gc.Flow, v *obj.Addr) *gc.Flow {
+	var r1 *gc.Flow
+	var p *obj.Prog
+
+	for r1 = gc.Uniqs(r); r1 != nil && r1 != r2; r, r1 = r1, gc.Uniqs(r1) {
+		if gc.Uniqp(r1) != r {
+			return nil
+		}
+		switch copyu(r1.Prog, v, nil) {
+		case 0: /* not touched */
+			continue
+
+		case 4: /* set and used */
+			p = r1.Prog
+
+			if p.As == arm.AADD {
+				if isdconst(&p.From) {
+					if p.From.Offset > -4096 && p.From.Offset < 4096 {
+						return r1
+					}
+				}
+			}
+			fallthrough
+
+		default:
+			return nil
+		}
+	}
+
+	return nil
+}
+
+func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) bool {
+	if r == r2 {
+		return true
+	}
+	n := int(0)
+	var a [3]obj.Addr
+	if p.Reg != 0 && p.Reg != p.To.Reg {
+		a[n].Type = obj.TYPE_REG
+		a[n].Reg = p.Reg
+		n++
+	}
+
+	switch p.From.Type {
+	case obj.TYPE_SHIFT:
+		a[n].Type = obj.TYPE_REG
+		a[n].Reg = int16(arm.REG_R0 + (p.From.Offset & 0xf))
+		n++
+		fallthrough
+
+	case obj.TYPE_REG:
+		a[n].Type = obj.TYPE_REG
+		a[n].Reg = p.From.Reg
+		n++
+	}
+
+	if n == 0 {
+		return true
+	}
+	var i int
+	for ; r != nil && r != r2; r = gc.Uniqs(r) {
+		p = r.Prog
+		for i = 0; i < n; i++ {
+			if copyu(p, &a[i], nil) > 1 {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+func findu1(r *gc.Flow, v *obj.Addr) bool {
+	for ; r != nil; r = r.S1 {
+		if r.Active != 0 {
+			return false
+		}
+		r.Active = 1
+		switch copyu(r.Prog, v, nil) {
+		case 1, /* used */
+			2, /* read-alter-rewrite */
+			4: /* set and used */
+			return true
+
+		case 3: /* set */
+			return false
+		}
+
+		if r.S2 != nil {
+			if findu1(r.S2, v) {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
+	for r1 := (*gc.Flow)(g.Start); r1 != nil; r1 = r1.Link {
+		r1.Active = 0
+	}
+	return findu1(r, v)
+}
+
+/*
+ * xtramodes enables the ARM post increment and
+ * shift offset addressing modes to transform
+ *   MOVW   0(R3),R1
+ *   ADD    $4,R3,R3
+ * into
+ *   MOVW.P 4(R3),R1
+ * and
+ *   ADD    R0,R1
+ *   MOVBU  0(R1),R0
+ * into
+ *   MOVBU  R0<<0(R1),R0
+ */
+func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
+	p := (*obj.Prog)(r.Prog)
+	v := obj.Addr(*a)
+	v.Type = obj.TYPE_REG
+	r1 := (*gc.Flow)(findpre(r, &v))
+	if r1 != nil {
+		p1 := r1.Prog
+		if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg {
+			switch p1.As {
+			case arm.AADD:
+				if p1.Scond&arm.C_SBIT != 0 {
+					// avoid altering ADD.S/ADC sequences.
+					break
+				}
+
+				if p1.From.Type == obj.TYPE_REG || (p1.From.Type == obj.TYPE_SHIFT && p1.From.Offset&(1<<4) == 0 && ((p.As != arm.AMOVB && p.As != arm.AMOVBS) || (a == &p.From && p1.From.Offset&^0xf == 0))) || ((p1.From.Type == obj.TYPE_ADDR || p1.From.Type == obj.TYPE_CONST) && p1.From.Offset > -4096 && p1.From.Offset < 4096) {
+					if nochange(gc.Uniqs(r1), r, p1) {
+						if a != &p.From || v.Reg != p.To.Reg {
+							if finduse(g, r.S1, &v) {
+								if p1.Reg == 0 || p1.Reg == v.Reg {
+									/* pre-indexing */
+									p.Scond |= arm.C_WBIT
+								} else {
+									return false
+								}
+							}
+						}
+
+						switch p1.From.Type {
+						/* register offset */
+						case obj.TYPE_REG:
+							if gc.Nacl {
+								return false
+							}
+							*a = obj.Addr{}
+							a.Type = obj.TYPE_SHIFT
+							a.Offset = int64(p1.From.Reg) & 15
+
+							/* scaled register offset */
+						case obj.TYPE_SHIFT:
+							if gc.Nacl {
+								return false
+							}
+							*a = obj.Addr{}
+							a.Type = obj.TYPE_SHIFT
+							fallthrough
+
+							/* immediate offset */
+						case obj.TYPE_CONST,
+							obj.TYPE_ADDR:
+							a.Offset = p1.From.Offset
+						}
+
+						if p1.Reg != 0 {
+							a.Reg = p1.Reg
+						}
+						excise(r1)
+						return true
+					}
+				}
+
+			case arm.AMOVW:
+				if p1.From.Type == obj.TYPE_REG {
+					r2 := (*gc.Flow)(findinc(r1, r, &p1.From))
+					if r2 != nil {
+						var r3 *gc.Flow
+						for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) {
+						}
+						if r3 == r {
+							/* post-indexing */
+							p1 := r2.Prog
+
+							a.Reg = p1.To.Reg
+							a.Offset = p1.From.Offset
+							p.Scond |= arm.C_PBIT
+							if !finduse(g, r, &r1.Prog.To) {
+								excise(r1)
+							}
+							excise(r2)
+							return true
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if a != &p.From || a.Reg != p.To.Reg {
+		r1 := (*gc.Flow)(findinc(r, nil, &v))
+		if r1 != nil {
+			/* post-indexing */
+			p1 := r1.Prog
+
+			a.Offset = p1.From.Offset
+			p.Scond |= arm.C_PBIT
+			excise(r1)
+			return true
+		}
+	}
+
+	return false
+}
+
+/*
+ * return
+ * 1 if v only used (and substitute),
+ * 2 if read-alter-rewrite
+ * 3 if set
+ * 4 if set and used
+ * 0 otherwise (not touched)
+ */
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+	switch p.As {
+	default:
+		fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+		return 2
+
+	case arm.AMOVM:
+		if v.Type != obj.TYPE_REG {
+			return 0
+		}
+		if p.From.Type == obj.TYPE_CONST { /* read reglist, read/rar */
+			if s != nil {
+				if p.From.Offset&(1<<uint(v.Reg)) != 0 {
+					return 1
+				}
+				if copysub(&p.To, v, s, 1) != 0 {
+					return 1
+				}
+				return 0
+			}
+
+			if copyau(&p.To, v) {
+				if p.Scond&arm.C_WBIT != 0 {
+					return 2
+				}
+				return 1
+			}
+
+			if p.From.Offset&(1<<uint(v.Reg)) != 0 {
+				return 1 /* read/rar, write reglist */
+			}
+		} else {
+			if s != nil {
+				if p.To.Offset&(1<<uint(v.Reg)) != 0 {
+					return 1
+				}
+				if copysub(&p.From, v, s, 1) != 0 {
+					return 1
+				}
+				return 0
+			}
+
+			if copyau(&p.From, v) {
+				if p.Scond&arm.C_WBIT != 0 {
+					return 2
+				}
+				if p.To.Offset&(1<<uint(v.Reg)) != 0 {
+					return 4
+				}
+				return 1
+			}
+
+			if p.To.Offset&(1<<uint(v.Reg)) != 0 {
+				return 3
+			}
+		}
+
+		return 0
+
+	case obj.ANOP, /* read,, write */
+		arm.ASQRTD,
+		arm.AMOVW,
+		arm.AMOVF,
+		arm.AMOVD,
+		arm.AMOVH,
+		arm.AMOVHS,
+		arm.AMOVHU,
+		arm.AMOVB,
+		arm.AMOVBS,
+		arm.AMOVBU,
+		arm.AMOVFW,
+		arm.AMOVWF,
+		arm.AMOVDW,
+		arm.AMOVWD,
+		arm.AMOVFD,
+		arm.AMOVDF:
+		if p.Scond&(arm.C_WBIT|arm.C_PBIT) != 0 {
+			if v.Type == obj.TYPE_REG {
+				if p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_SHIFT {
+					if p.From.Reg == v.Reg {
+						return 2
+					}
+				} else {
+					if p.To.Reg == v.Reg {
+						return 2
+					}
+				}
+			}
+		}
+
+		if s != nil {
+			if copysub(&p.From, v, s, 1) != 0 {
+				return 1
+			}
+			if !copyas(&p.To, v) {
+				if copysub(&p.To, v, s, 1) != 0 {
+					return 1
+				}
+			}
+			return 0
+		}
+
+		if copyas(&p.To, v) {
+			if p.Scond != arm.C_SCOND_NONE {
+				return 2
+			}
+			if copyau(&p.From, v) {
+				return 4
+			}
+			return 3
+		}
+
+		if copyau(&p.From, v) {
+			return 1
+		}
+		if copyau(&p.To, v) {
+			return 1
+		}
+		return 0
+
+	case arm.AMULLU, /* read, read, write, write */
+		arm.AMULL,
+		arm.AMULA,
+		arm.AMVN:
+		return 2
+
+	case arm.AADD, /* read, read, write */
+		arm.AADC,
+		arm.ASUB,
+		arm.ASBC,
+		arm.ARSB,
+		arm.ASLL,
+		arm.ASRL,
+		arm.ASRA,
+		arm.AORR,
+		arm.AAND,
+		arm.AEOR,
+		arm.AMUL,
+		arm.AMULU,
+		arm.ADIV,
+		arm.ADIVU,
+		arm.AMOD,
+		arm.AMODU,
+		arm.AADDF,
+		arm.AADDD,
+		arm.ASUBF,
+		arm.ASUBD,
+		arm.AMULF,
+		arm.AMULD,
+		arm.ADIVF,
+		arm.ADIVD,
+		obj.ACHECKNIL,
+		/* read */
+		arm.ACMPF, /* read, read, */
+		arm.ACMPD,
+		arm.ACMP,
+		arm.ACMN,
+		arm.ACASE,
+		arm.ATST:
+		/* read,, */
+		if s != nil {
+			if copysub(&p.From, v, s, 1) != 0 {
+				return 1
+			}
+			if copysub1(p, v, s, 1) != 0 {
+				return 1
+			}
+			if !copyas(&p.To, v) {
+				if copysub(&p.To, v, s, 1) != 0 {
+					return 1
+				}
+			}
+			return 0
+		}
+
+		if copyas(&p.To, v) {
+			if p.Scond != arm.C_SCOND_NONE {
+				return 2
+			}
+			if p.Reg == 0 {
+				p.Reg = p.To.Reg
+			}
+			if copyau(&p.From, v) {
+				return 4
+			}
+			if copyau1(p, v) {
+				return 4
+			}
+			return 3
+		}
+
+		if copyau(&p.From, v) {
+			return 1
+		}
+		if copyau1(p, v) {
+			return 1
+		}
+		if copyau(&p.To, v) {
+			return 1
+		}
+		return 0
+
+	case arm.ABEQ, /* read, read */
+		arm.ABNE,
+		arm.ABCS,
+		arm.ABHS,
+		arm.ABCC,
+		arm.ABLO,
+		arm.ABMI,
+		arm.ABPL,
+		arm.ABVS,
+		arm.ABVC,
+		arm.ABHI,
+		arm.ABLS,
+		arm.ABGE,
+		arm.ABLT,
+		arm.ABGT,
+		arm.ABLE:
+		if s != nil {
+			if copysub(&p.From, v, s, 1) != 0 {
+				return 1
+			}
+			return copysub1(p, v, s, 1)
+		}
+
+		if copyau(&p.From, v) {
+			return 1
+		}
+		if copyau1(p, v) {
+			return 1
+		}
+		return 0
+
+	case arm.AB: /* funny */
+		if s != nil {
+			if copysub(&p.To, v, s, 1) != 0 {
+				return 1
+			}
+			return 0
+		}
+
+		if copyau(&p.To, v) {
+			return 1
+		}
+		return 0
+
+	case obj.ARET: /* funny */
+		if s != nil {
+			return 1
+		}
+		return 3
+
+	case arm.ABL: /* funny */
+		if v.Type == obj.TYPE_REG {
+			// TODO(rsc): REG_R0 and REG_F0 used to be
+			// (when register numbers started at 0) exregoffset and exfregoffset,
+			// which are unset entirely.
+			// It's strange that this handles R0 and F0 differently from the other
+			// registers. Possible failure to optimize?
+			if arm.REG_R0 < v.Reg && v.Reg <= arm.REGEXT {
+				return 2
+			}
+			if v.Reg == arm.REGARG {
+				return 2
+			}
+			if arm.REG_F0 < v.Reg && v.Reg <= arm.FREGEXT {
+				return 2
+			}
+		}
+
+		if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
+			return 2
+		}
+
+		if s != nil {
+			if copysub(&p.To, v, s, 1) != 0 {
+				return 1
+			}
+			return 0
+		}
+
+		if copyau(&p.To, v) {
+			return 4
+		}
+		return 3
+
+		// R0 is zero, used by DUFFZERO, cannot be substituted.
+	// R1 is ptr to memory, used and set, cannot be substituted.
+	case obj.ADUFFZERO:
+		if v.Type == obj.TYPE_REG {
+			if v.Reg == arm.REG_R0 {
+				return 1
+			}
+			if v.Reg == arm.REG_R0+1 {
+				return 2
+			}
+		}
+
+		return 0
+
+		// R0 is scratch, set by DUFFCOPY, cannot be substituted.
+	// R1, R2 areptr to src, dst, used and set, cannot be substituted.
+	case obj.ADUFFCOPY:
+		if v.Type == obj.TYPE_REG {
+			if v.Reg == arm.REG_R0 {
+				return 3
+			}
+			if v.Reg == arm.REG_R0+1 || v.Reg == arm.REG_R0+2 {
+				return 2
+			}
+		}
+
+		return 0
+
+	case obj.ATEXT: /* funny */
+		if v.Type == obj.TYPE_REG {
+			if v.Reg == arm.REGARG {
+				return 3
+			}
+		}
+		return 0
+
+	case obj.APCDATA,
+		obj.AFUNCDATA,
+		obj.AVARDEF,
+		obj.AVARKILL:
+		return 0
+	}
+}
+
+/*
+ * direct reference,
+ * could be set/use depending on
+ * semantics
+ */
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+	if regtyp(v) {
+		if a.Type == v.Type {
+			if a.Reg == v.Reg {
+				return true
+			}
+		}
+	} else if v.Type == obj.TYPE_CONST { /* for constprop */
+		if a.Type == v.Type {
+			if a.Name == v.Name {
+				if a.Sym == v.Sym {
+					if a.Reg == v.Reg {
+						if a.Offset == v.Offset {
+							return true
+						}
+					}
+				}
+			}
+		}
+	}
+
+	return false
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+	if a.Type != v.Type {
+		return false
+	}
+	if regtyp(v) && a.Reg == v.Reg {
+		return true
+	}
+
+	// TODO(rsc): Change v->type to v->name and enable.
+	//if(v->type == NAME_AUTO || v->type == NAME_PARAM) {
+	//	if(v->offset == a->offset)
+	//		return 1;
+	//}
+	return false
+}
+
+/*
+ * either direct or indirect
+ */
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+	if copyas(a, v) {
+		return true
+	}
+	if v.Type == obj.TYPE_REG {
+		if a.Type == obj.TYPE_ADDR && a.Reg != 0 {
+			if a.Reg == v.Reg {
+				return true
+			}
+		} else if a.Type == obj.TYPE_MEM {
+			if a.Reg == v.Reg {
+				return true
+			}
+		} else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
+			if a.Reg == v.Reg {
+				return true
+			}
+			if a.Offset == int64(v.Reg) {
+				return true
+			}
+		} else if a.Type == obj.TYPE_SHIFT {
+			if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
+				return true
+			}
+			if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+/*
+ * compare v to the center
+ * register in p (p->reg)
+ */
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
+	if v.Type == obj.TYPE_REG && v.Reg == 0 {
+		return false
+	}
+	return p.Reg == v.Reg
+}
+
+/*
+ * substitute s for v in a
+ * return failure to substitute
+ */
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+	if f != 0 {
+		if copyau(a, v) {
+			if a.Type == obj.TYPE_SHIFT {
+				if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
+					a.Offset = a.Offset&^0xf | int64(s.Reg)&0xf
+				}
+				if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
+					a.Offset = a.Offset&^(0xf<<8) | (int64(s.Reg)&0xf)<<8
+				}
+			} else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
+				if a.Offset == int64(v.Reg) {
+					a.Offset = int64(s.Reg)
+				}
+				if a.Reg == v.Reg {
+					a.Reg = s.Reg
+				}
+			} else {
+				a.Reg = s.Reg
+			}
+		}
+	}
+
+	return 0
+}
+
+func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
+	if f != 0 {
+		if copyau1(p1, v) {
+			p1.Reg = s.Reg
+		}
+	}
+	return 0
+}
+
+var predinfo = []struct {
+	opcode    int
+	notopcode int
+	scond     int
+	notscond  int
+}{
+	{arm.ABEQ, arm.ABNE, 0x0, 0x1},
+	{arm.ABNE, arm.ABEQ, 0x1, 0x0},
+	{arm.ABCS, arm.ABCC, 0x2, 0x3},
+	{arm.ABHS, arm.ABLO, 0x2, 0x3},
+	{arm.ABCC, arm.ABCS, 0x3, 0x2},
+	{arm.ABLO, arm.ABHS, 0x3, 0x2},
+	{arm.ABMI, arm.ABPL, 0x4, 0x5},
+	{arm.ABPL, arm.ABMI, 0x5, 0x4},
+	{arm.ABVS, arm.ABVC, 0x6, 0x7},
+	{arm.ABVC, arm.ABVS, 0x7, 0x6},
+	{arm.ABHI, arm.ABLS, 0x8, 0x9},
+	{arm.ABLS, arm.ABHI, 0x9, 0x8},
+	{arm.ABGE, arm.ABLT, 0xA, 0xB},
+	{arm.ABLT, arm.ABGE, 0xB, 0xA},
+	{arm.ABGT, arm.ABLE, 0xC, 0xD},
+	{arm.ABLE, arm.ABGT, 0xD, 0xC},
+}
+
+type Joininfo struct {
+	start *gc.Flow
+	last  *gc.Flow
+	end   *gc.Flow
+	len   int
+}
+
+const (
+	Join = iota
+	Split
+	End
+	Branch
+	Setcond
+	Toolong
+)
+
+const (
+	Falsecond = iota
+	Truecond
+	Delbranch
+	Keepbranch
+)
+
+func isbranch(p *obj.Prog) bool {
+	return (arm.ABEQ <= p.As) && (p.As <= arm.ABLE)
+}
+
+func predicable(p *obj.Prog) bool {
+	switch p.As {
+	case obj.ANOP,
+		obj.AXXX,
+		obj.ADATA,
+		obj.AGLOBL,
+		obj.ATEXT,
+		arm.AWORD,
+		arm.ABCASE,
+		arm.ACASE:
+		return false
+	}
+
+	if isbranch(p) {
+		return false
+	}
+	return true
+}
+
+/*
+ * Depends on an analysis of the encodings performed by 5l.
+ * These seem to be all of the opcodes that lead to the "S" bit
+ * being set in the instruction encodings.
+ *
+ * C_SBIT may also have been set explicitly in p->scond.
+ */
+func modifiescpsr(p *obj.Prog) bool {
+	switch p.As {
+	case arm.AMULLU,
+		arm.AMULA,
+		arm.AMULU,
+		arm.ADIVU,
+		arm.ATEQ,
+		arm.ACMN,
+		arm.ATST,
+		arm.ACMP,
+		arm.AMUL,
+		arm.ADIV,
+		arm.AMOD,
+		arm.AMODU,
+		arm.ABL:
+		return true
+	}
+
+	if p.Scond&arm.C_SBIT != 0 {
+		return true
+	}
+	return false
+}
+
+/*
+ * Find the maximal chain of instructions starting with r which could
+ * be executed conditionally
+ */
+func joinsplit(r *gc.Flow, j *Joininfo) int {
+	j.start = r
+	j.last = r
+	j.len = 0
+	for {
+		if r.P2 != nil && (r.P1 != nil || r.P2.P2link != nil) {
+			j.end = r
+			return Join
+		}
+
+		if r.S1 != nil && r.S2 != nil {
+			j.end = r
+			return Split
+		}
+
+		j.last = r
+		if r.Prog.As != obj.ANOP {
+			j.len++
+		}
+		if r.S1 == nil && r.S2 == nil {
+			j.end = r.Link
+			return End
+		}
+
+		if r.S2 != nil {
+			j.end = r.S2
+			return Branch
+		}
+
+		if modifiescpsr(r.Prog) {
+			j.end = r.S1
+			return Setcond
+		}
+
+		r = r.S1
+		if j.len >= 4 {
+			break
+		}
+	}
+
+	j.end = r
+	return Toolong
+}
+
+func successor(r *gc.Flow) *gc.Flow {
+	if r.S1 != nil {
+		return r.S1
+	} else {
+		return r.S2
+	}
+}
+
+func applypred(rstart *gc.Flow, j *Joininfo, cond int, branch int) {
+	if j.len == 0 {
+		return
+	}
+	var pred int
+	if cond == Truecond {
+		pred = predinfo[rstart.Prog.As-arm.ABEQ].scond
+	} else {
+		pred = predinfo[rstart.Prog.As-arm.ABEQ].notscond
+	}
+
+	for r := (*gc.Flow)(j.start); ; r = successor(r) {
+		if r.Prog.As == arm.AB {
+			if r != j.last || branch == Delbranch {
+				excise(r)
+			} else {
+				if cond == Truecond {
+					r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].opcode)
+				} else {
+					r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].notopcode)
+				}
+			}
+		} else if predicable(r.Prog) {
+			r.Prog.Scond = uint8(int(r.Prog.Scond&^arm.C_SCOND) | pred)
+		}
+		if r.S1 != r.Link {
+			r.S1 = r.Link
+			r.Link.P1 = r
+		}
+
+		if r == j.last {
+			break
+		}
+	}
+}
+
+func predicate(g *gc.Graph) {
+	var t1 int
+	var t2 int
+	var j1 Joininfo
+	var j2 Joininfo
+
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		if isbranch(r.Prog) {
+			t1 = joinsplit(r.S1, &j1)
+			t2 = joinsplit(r.S2, &j2)
+			if j1.last.Link != j2.start {
+				continue
+			}
+			if j1.end == j2.end {
+				if (t1 == Branch && (t2 == Join || t2 == Setcond)) || (t2 == Join && (t1 == Join || t1 == Setcond)) {
+					applypred(r, &j1, Falsecond, Delbranch)
+					applypred(r, &j2, Truecond, Delbranch)
+					excise(r)
+					continue
+				}
+			}
+
+			if t1 == End || t1 == Branch {
+				applypred(r, &j1, Falsecond, Keepbranch)
+				excise(r)
+				continue
+			}
+		}
+	}
+}
+
+func isdconst(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_CONST
+}
+
+func isfloatreg(a *obj.Addr) bool {
+	return arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15
+}
+
+func stackaddr(a *obj.Addr) bool {
+	return regtyp(a) && a.Reg == arm.REGSP
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+	return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
+}
+
+func excise(r *gc.Flow) {
+	p := (*obj.Prog)(r.Prog)
+	obj.Nopout(p)
+}
diff --git a/src/cmd/compile/internal/arm/prog.go b/src/cmd/compile/internal/arm/prog.go
new file mode 100644
index 0000000..cdf9d29
--- /dev/null
+++ b/src/cmd/compile/internal/arm/prog.go
@@ -0,0 +1,165 @@
+// Copyright 2013 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm"
+)
+
+const (
+	RightRdwr = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [arm.ALAST]obj.ProgInfo{
+	obj.ATYPE:     {gc.Pseudo | gc.Skip, 0, 0, 0},
+	obj.ATEXT:     {gc.Pseudo, 0, 0, 0},
+	obj.AFUNCDATA: {gc.Pseudo, 0, 0, 0},
+	obj.APCDATA:   {gc.Pseudo, 0, 0, 0},
+	obj.AUNDEF:    {gc.Break, 0, 0, 0},
+	obj.AUSEFIELD: {gc.OK, 0, 0, 0},
+	obj.ACHECKNIL: {gc.LeftRead, 0, 0, 0},
+	obj.AVARDEF:   {gc.Pseudo | gc.RightWrite, 0, 0, 0},
+	obj.AVARKILL:  {gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+	// NOP is an internal no-op that also stands
+	// for USED and SET annotations, not the Intel opcode.
+	obj.ANOP: {gc.LeftRead | gc.RightWrite, 0, 0, 0},
+
+	// Integer.
+	arm.AADC:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.AADD:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.AAND:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ABIC:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ACMN:    {gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	arm.ACMP:    {gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	arm.ADIVU:   {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ADIV:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.AEOR:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.AMODU:   {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.AMOD:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.AMULALU: {gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
+	arm.AMULAL:  {gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
+	arm.AMULA:   {gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
+	arm.AMULU:   {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.AMUL:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.AMULL:   {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.AMULLU:  {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.AMVN:    {gc.SizeL | gc.LeftRead | gc.RightWrite, 0, 0, 0},
+	arm.AORR:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ARSB:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ARSC:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ASBC:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ASLL:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ASRA:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ASRL:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ASUB:    {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm.ATEQ:    {gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	arm.ATST:    {gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+
+	// Floating point.
+	arm.AADDD:  {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	arm.AADDF:  {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	arm.ACMPD:  {gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	arm.ACMPF:  {gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	arm.ADIVD:  {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	arm.ADIVF:  {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	arm.AMULD:  {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	arm.AMULF:  {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	arm.ASUBD:  {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	arm.ASUBF:  {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	arm.ASQRTD: {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+
+	// Conversions.
+	arm.AMOVWD: {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm.AMOVWF: {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm.AMOVDF: {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm.AMOVDW: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm.AMOVFD: {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm.AMOVFW: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+	// Moves.
+	arm.AMOVB: {gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	arm.AMOVD: {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	arm.AMOVF: {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	arm.AMOVH: {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	arm.AMOVW: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+	// In addtion, duffzero reads R0,R1 and writes R1.  This fact is
+	// encoded in peep.c
+	obj.ADUFFZERO: {gc.Call, 0, 0, 0},
+
+	// In addtion, duffcopy reads R1,R2 and writes R0,R1,R2.  This fact is
+	// encoded in peep.c
+	obj.ADUFFCOPY: {gc.Call, 0, 0, 0},
+
+	// These should be split into the two different conversions instead
+	// of overloading the one.
+	arm.AMOVBS: {gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm.AMOVBU: {gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm.AMOVHS: {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm.AMOVHU: {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+	// Jumps.
+	arm.AB:   {gc.Jump | gc.Break, 0, 0, 0},
+	arm.ABL:  {gc.Call, 0, 0, 0},
+	arm.ABEQ: {gc.Cjmp, 0, 0, 0},
+	arm.ABNE: {gc.Cjmp, 0, 0, 0},
+	arm.ABCS: {gc.Cjmp, 0, 0, 0},
+	arm.ABHS: {gc.Cjmp, 0, 0, 0},
+	arm.ABCC: {gc.Cjmp, 0, 0, 0},
+	arm.ABLO: {gc.Cjmp, 0, 0, 0},
+	arm.ABMI: {gc.Cjmp, 0, 0, 0},
+	arm.ABPL: {gc.Cjmp, 0, 0, 0},
+	arm.ABVS: {gc.Cjmp, 0, 0, 0},
+	arm.ABVC: {gc.Cjmp, 0, 0, 0},
+	arm.ABHI: {gc.Cjmp, 0, 0, 0},
+	arm.ABLS: {gc.Cjmp, 0, 0, 0},
+	arm.ABGE: {gc.Cjmp, 0, 0, 0},
+	arm.ABLT: {gc.Cjmp, 0, 0, 0},
+	arm.ABGT: {gc.Cjmp, 0, 0, 0},
+	arm.ABLE: {gc.Cjmp, 0, 0, 0},
+	obj.ARET: {gc.Break, 0, 0, 0},
+}
+
+func proginfo(p *obj.Prog) {
+	info := &p.Info
+	*info = progtable[p.As]
+	if info.Flags == 0 {
+		gc.Fatal("unknown instruction %v", p)
+	}
+
+	if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
+		info.Flags &^= gc.LeftRead
+		info.Flags |= gc.LeftAddr
+	}
+
+	if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
+		info.Flags &^= gc.RegRead
+		info.Flags |= gc.CanRegRead | gc.RightRead
+	}
+
+	if (p.Scond&arm.C_SCOND != arm.C_SCOND_NONE) && (info.Flags&gc.RightWrite != 0) {
+		info.Flags |= gc.RightRead
+	}
+
+	switch p.As {
+	case arm.ADIV,
+		arm.ADIVU,
+		arm.AMOD,
+		arm.AMODU:
+		info.Regset |= RtoB(arm.REG_R12)
+	}
+}
diff --git a/src/cmd/compile/internal/arm/reg.go b/src/cmd/compile/internal/arm/reg.go
new file mode 100644
index 0000000..b72ccc9
--- /dev/null
+++ b/src/cmd/compile/internal/arm/reg.go
@@ -0,0 +1,136 @@
+// Inferno utils/5c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/reg.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+import "cmd/internal/obj/arm"
+import "cmd/compile/internal/gc"
+
+const (
+	NREGVAR = 32
+)
+
+var regname = []string{
+	".R0",
+	".R1",
+	".R2",
+	".R3",
+	".R4",
+	".R5",
+	".R6",
+	".R7",
+	".R8",
+	".R9",
+	".R10",
+	".R11",
+	".R12",
+	".R13",
+	".R14",
+	".R15",
+	".F0",
+	".F1",
+	".F2",
+	".F3",
+	".F4",
+	".F5",
+	".F6",
+	".F7",
+	".F8",
+	".F9",
+	".F10",
+	".F11",
+	".F12",
+	".F13",
+	".F14",
+	".F15",
+}
+
+func regnames(n *int) []string {
+	*n = NREGVAR
+	return regname
+}
+
+func excludedregs() uint64 {
+	return RtoB(arm.REGSP) | RtoB(arm.REGLINK) | RtoB(arm.REGPC)
+}
+
+func doregbits(r int) uint64 {
+	return 0
+}
+
+/*
+ *	bit	reg
+ *	0	R0
+ *	1	R1
+ *	...	...
+ *	10	R10
+ *	12  R12
+ *
+ *	bit	reg
+ *	18	F2
+ *	19	F3
+ *	...	...
+ *	31	F15
+ */
+func RtoB(r int) uint64 {
+	if arm.REG_R0 <= r && r <= arm.REG_R15 {
+		if r >= arm.REGTMP-2 && r != arm.REG_R12 { // excluded R9 and R10 for m and g, but not R12
+			return 0
+		}
+		return 1 << uint(r-arm.REG_R0)
+	}
+
+	if arm.REG_F0 <= r && r <= arm.REG_F15 {
+		if r < arm.REG_F2 || r > arm.REG_F0+arm.NFREG-1 {
+			return 0
+		}
+		return 1 << uint((r-arm.REG_F0)+16)
+	}
+
+	return 0
+}
+
+func BtoR(b uint64) int {
+	// TODO Allow R0 and R1, but be careful with a 0 return
+	// TODO Allow R9. Only R10 is reserved now (just g, not m).
+	b &= 0x11fc // excluded R9 and R10 for m and g, but not R12
+	if b == 0 {
+		return 0
+	}
+	return gc.Bitno(b) + arm.REG_R0
+}
+
+func BtoF(b uint64) int {
+	b &= 0xfffc0000
+	if b == 0 {
+		return 0
+	}
+	return gc.Bitno(b) - 16 + arm.REG_F0
+}
diff --git a/src/cmd/compile/internal/arm64/cgen.go b/src/cmd/compile/internal/arm64/cgen.go
new file mode 100644
index 0000000..30326d7
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/cgen.go
@@ -0,0 +1,157 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm64"
+)
+
+func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
+	// determine alignment.
+	// want to avoid unaligned access, so have to use
+	// smaller operations for less aligned types.
+	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
+	align := int(n.Type.Align)
+
+	var op int
+	switch align {
+	default:
+		gc.Fatal("sgen: invalid alignment %d for %v", align, n.Type)
+
+	case 1:
+		op = arm64.AMOVB
+
+	case 2:
+		op = arm64.AMOVH
+
+	case 4:
+		op = arm64.AMOVW
+
+	case 8:
+		op = arm64.AMOVD
+	}
+
+	if w%int64(align) != 0 {
+		gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
+	}
+	c := int32(w / int64(align))
+
+	if osrc%int64(align) != 0 || odst%int64(align) != 0 {
+		gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
+	}
+
+	// if we are copying forward on the stack and
+	// the src and dst overlap, then reverse direction
+	dir := align
+
+	if osrc < odst && int64(odst) < int64(osrc)+w {
+		dir = -dir
+	}
+
+	var dst gc.Node
+	var src gc.Node
+	if n.Ullman >= res.Ullman {
+		gc.Agenr(n, &dst, res) // temporarily use dst
+		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
+		gins(arm64.AMOVD, &dst, &src)
+		if res.Op == gc.ONAME {
+			gc.Gvardef(res)
+		}
+		gc.Agen(res, &dst)
+	} else {
+		if res.Op == gc.ONAME {
+			gc.Gvardef(res)
+		}
+		gc.Agenr(res, &dst, res)
+		gc.Agenr(n, &src, nil)
+	}
+
+	var tmp gc.Node
+	gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
+
+	// set up end marker
+	var nend gc.Node
+
+	// move src and dest to the end of block if necessary
+	if dir < 0 {
+		if c >= 4 {
+			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
+			gins(arm64.AMOVD, &src, &nend)
+		}
+
+		p := gins(arm64.AADD, nil, &src)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = w
+
+		p = gins(arm64.AADD, nil, &dst)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = w
+	} else {
+		p := gins(arm64.AADD, nil, &src)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(-dir)
+
+		p = gins(arm64.AADD, nil, &dst)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(-dir)
+
+		if c >= 4 {
+			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
+			p := gins(arm64.AMOVD, &src, &nend)
+			p.From.Type = obj.TYPE_ADDR
+			p.From.Offset = w
+		}
+	}
+
+	// move
+	// TODO: enable duffcopy for larger copies.
+	if c >= 4 {
+		p := gins(op, &src, &tmp)
+		p.From.Type = obj.TYPE_MEM
+		p.From.Offset = int64(dir)
+		p.Scond = arm64.C_XPRE
+		ploop := p
+
+		p = gins(op, &tmp, &dst)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Offset = int64(dir)
+		p.Scond = arm64.C_XPRE
+
+		p = gcmp(arm64.ACMP, &src, &nend)
+
+		gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), ploop)
+		gc.Regfree(&nend)
+	} else {
+		// TODO(austin): Instead of generating ADD $-8,R8; ADD
+		// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
+		// generate the offsets directly and eliminate the
+		// ADDs.  That will produce shorter, more
+		// pipeline-able code.
+		var p *obj.Prog
+		for {
+			tmp14 := c
+			c--
+			if tmp14 <= 0 {
+				break
+			}
+
+			p = gins(op, &src, &tmp)
+			p.From.Type = obj.TYPE_MEM
+			p.From.Offset = int64(dir)
+			p.Scond = arm64.C_XPRE
+
+			p = gins(op, &tmp, &dst)
+			p.To.Type = obj.TYPE_MEM
+			p.To.Offset = int64(dir)
+			p.Scond = arm64.C_XPRE
+		}
+	}
+
+	gc.Regfree(&dst)
+	gc.Regfree(&src)
+	gc.Regfree(&tmp)
+}
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
new file mode 100644
index 0000000..38def8f
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -0,0 +1,93 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm64"
+)
+
+var thechar int = '7'
+
+var thestring string = "arm64"
+
+var thelinkarch *obj.LinkArch = &arm64.Linkarm64
+
+func linkarchinit() {
+}
+
+var MAXWIDTH int64 = 1 << 50
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, and uintptr
+ */
+var typedefs = []gc.Typedef{
+	gc.Typedef{"int", gc.TINT, gc.TINT64},
+	gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
+	gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+}
+
+func betypeinit() {
+	gc.Widthptr = 8
+	gc.Widthint = 8
+	gc.Widthreg = 8
+}
+
+func Main() {
+	gc.Thearch.Thechar = thechar
+	gc.Thearch.Thestring = thestring
+	gc.Thearch.Thelinkarch = thelinkarch
+	gc.Thearch.Typedefs = typedefs
+	gc.Thearch.REGSP = arm64.REGSP
+	gc.Thearch.REGCTXT = arm64.REGCTXT
+	gc.Thearch.REGCALLX = arm64.REGRT1
+	gc.Thearch.REGCALLX2 = arm64.REGRT2
+	gc.Thearch.REGRETURN = arm64.REG_R0
+	gc.Thearch.REGMIN = arm64.REG_R0
+	gc.Thearch.REGMAX = arm64.REG_R31
+	gc.Thearch.REGZERO = arm64.REGZERO
+	gc.Thearch.FREGMIN = arm64.REG_F0
+	gc.Thearch.FREGMAX = arm64.REG_F31
+	gc.Thearch.MAXWIDTH = MAXWIDTH
+	gc.Thearch.ReservedRegs = resvd
+
+	gc.Thearch.Betypeinit = betypeinit
+	gc.Thearch.Cgen_hmul = cgen_hmul
+	gc.Thearch.Cgen_shift = cgen_shift
+	gc.Thearch.Clearfat = clearfat
+	gc.Thearch.Defframe = defframe
+	gc.Thearch.Dodiv = dodiv
+	gc.Thearch.Excise = excise
+	gc.Thearch.Expandchecks = expandchecks
+	gc.Thearch.Getg = getg
+	gc.Thearch.Gins = gins
+	gc.Thearch.Ginscmp = ginscmp
+	gc.Thearch.Ginscon = ginscon
+	gc.Thearch.Ginsnop = ginsnop
+	gc.Thearch.Gmove = gmove
+	gc.Thearch.Linkarchinit = linkarchinit
+	gc.Thearch.Peep = peep
+	gc.Thearch.Proginfo = proginfo
+	gc.Thearch.Regtyp = regtyp
+	gc.Thearch.Sameaddr = sameaddr
+	gc.Thearch.Smallindir = smallindir
+	gc.Thearch.Stackaddr = stackaddr
+	gc.Thearch.Blockcopy = blockcopy
+	gc.Thearch.Sudoaddable = sudoaddable
+	gc.Thearch.Sudoclean = sudoclean
+	gc.Thearch.Excludedregs = excludedregs
+	gc.Thearch.RtoB = RtoB
+	gc.Thearch.FtoB = RtoB
+	gc.Thearch.BtoR = BtoR
+	gc.Thearch.BtoF = BtoF
+	gc.Thearch.Optoas = optoas
+	gc.Thearch.Doregbits = doregbits
+	gc.Thearch.Regnames = regnames
+
+	gc.Main()
+	gc.Exit(0)
+}
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
new file mode 100644
index 0000000..851ca4e
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -0,0 +1,532 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm64"
+	"fmt"
+)
+
+func defframe(ptxt *obj.Prog) {
+	var n *gc.Node
+
+	// fill in argument size, stack size
+	ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+	ptxt.To.Offset = int64(frame)
+
+	// insert code to zero ambiguously live variables
+	// so that the garbage collector only sees initialized values
+	// when it looks for pointers.
+	p := ptxt
+
+	hi := int64(0)
+	lo := hi
+
+	// iterate through declarations - they are sorted in decreasing xoffset order.
+	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
+		n = l.N
+		if !n.Name.Needzero {
+			continue
+		}
+		if n.Class != gc.PAUTO {
+			gc.Fatal("needzero class %d", n.Class)
+		}
+		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+			gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+		}
+
+		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
+			// merge with range we already have
+			lo = n.Xoffset
+
+			continue
+		}
+
+		// zero old range
+		p = zerorange(p, int64(frame), lo, hi)
+
+		// set new range
+		hi = n.Xoffset + n.Type.Width
+
+		lo = n.Xoffset
+	}
+
+	// zero final range
+	zerorange(p, int64(frame), lo, hi)
+}
+
+var darwin = obj.Getgoos() == "darwin"
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
+	cnt := hi - lo
+	if cnt == 0 {
+		return p
+	}
+	if cnt < int64(4*gc.Widthptr) {
+		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+			p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+frame+lo+i)
+		}
+	} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
+		p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+		p = appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGRT1, 0)
+		p.Reg = arm64.REGRT1
+		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+		f := gc.Sysfunc("duffzero")
+		gc.Naddr(&p.To, f)
+		gc.Afunclit(&p.To, f)
+		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+	} else {
+		p = appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGTMP, 0)
+		p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+		p = appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+		p.Reg = arm64.REGRT1
+		p = appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm64.REGTMP, 0)
+		p = appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT2, 0)
+		p.Reg = arm64.REGRT1
+		p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
+		p.Scond = arm64.C_XPRE
+		p1 := p
+		p = appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
+		p.Reg = arm64.REGRT2
+		p = appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+		gc.Patch(p, p1)
+	}
+
+	return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+	q := gc.Ctxt.NewProg()
+	gc.Clearp(q)
+	q.As = int16(as)
+	q.Lineno = p.Lineno
+	q.From.Type = int16(ftype)
+	q.From.Reg = int16(freg)
+	q.From.Offset = foffset
+	q.To.Type = int16(ttype)
+	q.To.Reg = int16(treg)
+	q.To.Offset = toffset
+	q.Link = p.Link
+	p.Link = q
+	return q
+}
+
+func ginsnop() {
+	var con gc.Node
+	gc.Nodconst(&con, gc.Types[gc.TINT], 0)
+	gins(arm64.AHINT, &con, nil)
+}
+
+var panicdiv *gc.Node
+
+/*
+ * generate division.
+ * generates one of:
+ *	res = nl / nr
+ *	res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	// Have to be careful about handling
+	// most negative int divided by -1 correctly.
+	// The hardware will generate undefined result.
+	// Also need to explicitly trap on division on zero,
+	// the hardware will silently generate undefined result.
+	// DIVW will leave unpredicable result in higher 32-bit,
+	// so always use DIVD/DIVDU.
+	t := nl.Type
+
+	t0 := t
+	check := 0
+	if gc.Issigned[t.Etype] {
+		check = 1
+		if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
+			check = 0
+		} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
+			check = 0
+		}
+	}
+
+	if t.Width < 8 {
+		if gc.Issigned[t.Etype] {
+			t = gc.Types[gc.TINT64]
+		} else {
+			t = gc.Types[gc.TUINT64]
+		}
+		check = 0
+	}
+
+	a := optoas(gc.ODIV, t)
+
+	var tl gc.Node
+	gc.Regalloc(&tl, t0, nil)
+	var tr gc.Node
+	gc.Regalloc(&tr, t0, nil)
+	if nl.Ullman >= nr.Ullman {
+		gc.Cgen(nl, &tl)
+		gc.Cgen(nr, &tr)
+	} else {
+		gc.Cgen(nr, &tr)
+		gc.Cgen(nl, &tl)
+	}
+
+	if t != t0 {
+		// Convert
+		tl2 := tl
+
+		tr2 := tr
+		tl.Type = t
+		tr.Type = t
+		gmove(&tl2, &tl)
+		gmove(&tr2, &tr)
+	}
+
+	// Handle divide-by-zero panic.
+	p1 := gins(optoas(gc.OCMP, t), &tr, nil)
+	p1.Reg = arm64.REGZERO
+	p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+	if panicdiv == nil {
+		panicdiv = gc.Sysfunc("panicdivide")
+	}
+	gc.Ginscall(panicdiv, -1)
+	gc.Patch(p1, gc.Pc)
+
+	var p2 *obj.Prog
+	if check != 0 {
+		var nm1 gc.Node
+		gc.Nodconst(&nm1, t, -1)
+		gcmp(optoas(gc.OCMP, t), &tr, &nm1)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		if op == gc.ODIV {
+			// a / (-1) is -a.
+			gins(optoas(gc.OMINUS, t), &tl, &tl)
+
+			gmove(&tl, res)
+		} else {
+			// a % (-1) is 0.
+			var nz gc.Node
+			gc.Nodconst(&nz, t, 0)
+
+			gmove(&nz, res)
+		}
+
+		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+	}
+
+	p1 = gins(a, &tr, &tl)
+	if op == gc.ODIV {
+		gc.Regfree(&tr)
+		gmove(&tl, res)
+	} else {
+		// A%B = A-(A/B*B)
+		var tm gc.Node
+		gc.Regalloc(&tm, t, nil)
+
+		// patch div to use the 3 register form
+		// TODO(minux): add gins3?
+		p1.Reg = p1.To.Reg
+
+		p1.To.Reg = tm.Reg
+		gins(optoas(gc.OMUL, t), &tr, &tm)
+		gc.Regfree(&tr)
+		gins(optoas(gc.OSUB, t), &tm, &tl)
+		gc.Regfree(&tm)
+		gmove(&tl, res)
+	}
+
+	gc.Regfree(&tl)
+	if check != 0 {
+		gc.Patch(p2, gc.Pc)
+	}
+}
+
+/*
+ * generate high multiply:
+ *   res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	// largest ullman on left.
+	if nl.Ullman < nr.Ullman {
+		tmp := (*gc.Node)(nl)
+		nl = nr
+		nr = tmp
+	}
+
+	t := (*gc.Type)(nl.Type)
+	w := int(int(t.Width * 8))
+	var n1 gc.Node
+	gc.Cgenr(nl, &n1, res)
+	var n2 gc.Node
+	gc.Cgenr(nr, &n2, nil)
+	switch gc.Simtype[t.Etype] {
+	case gc.TINT8,
+		gc.TINT16,
+		gc.TINT32:
+		gins(optoas(gc.OMUL, t), &n2, &n1)
+		p := (*obj.Prog)(gins(arm64.AASR, nil, &n1))
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(w)
+
+	case gc.TUINT8,
+		gc.TUINT16,
+		gc.TUINT32:
+		gins(optoas(gc.OMUL, t), &n2, &n1)
+		p := (*obj.Prog)(gins(arm64.ALSR, nil, &n1))
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(w)
+
+	case gc.TINT64,
+		gc.TUINT64:
+		if gc.Issigned[t.Etype] {
+			gins(arm64.ASMULH, &n2, &n1)
+		} else {
+			gins(arm64.AUMULH, &n2, &n1)
+		}
+
+	default:
+		gc.Fatal("cgen_hmul %v", t)
+	}
+
+	gc.Cgen(&n1, res)
+	gc.Regfree(&n1)
+	gc.Regfree(&n2)
+}
+
+/*
+ * generate shift according to op, one of:
+ *	res = nl << nr
+ *	res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	a := int(optoas(op, nl.Type))
+
+	if nr.Op == gc.OLITERAL {
+		var n1 gc.Node
+		gc.Regalloc(&n1, nl.Type, res)
+		gc.Cgen(nl, &n1)
+		sc := uint64(nr.Int())
+		if sc >= uint64(nl.Type.Width*8) {
+			// large shift gets 2 shifts by width-1
+			var n3 gc.Node
+			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+
+			gins(a, &n3, &n1)
+			gins(a, &n3, &n1)
+		} else {
+			gins(a, nr, &n1)
+		}
+		gmove(&n1, res)
+		gc.Regfree(&n1)
+		return
+	}
+
+	if nl.Ullman >= gc.UINF {
+		var n4 gc.Node
+		gc.Tempname(&n4, nl.Type)
+		gc.Cgen(nl, &n4)
+		nl = &n4
+	}
+
+	if nr.Ullman >= gc.UINF {
+		var n5 gc.Node
+		gc.Tempname(&n5, nr.Type)
+		gc.Cgen(nr, &n5)
+		nr = &n5
+	}
+
+	// Allow either uint32 or uint64 as shift type,
+	// to avoid unnecessary conversion from uint32 to uint64
+	// just to do the comparison.
+	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
+
+	if tcount.Etype < gc.TUINT32 {
+		tcount = gc.Types[gc.TUINT32]
+	}
+
+	var n1 gc.Node
+	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
+	var n3 gc.Node
+	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
+
+	var n2 gc.Node
+	gc.Regalloc(&n2, nl.Type, res)
+
+	if nl.Ullman >= nr.Ullman {
+		gc.Cgen(nl, &n2)
+		gc.Cgen(nr, &n1)
+		gmove(&n1, &n3)
+	} else {
+		gc.Cgen(nr, &n1)
+		gmove(&n1, &n3)
+		gc.Cgen(nl, &n2)
+	}
+
+	gc.Regfree(&n3)
+
+	// test and fix up large shifts
+	if !bounded {
+		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
+		gcmp(optoas(gc.OCMP, tcount), &n1, &n3)
+		p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
+		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
+			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+			gins(a, &n3, &n2)
+		} else {
+			gc.Nodconst(&n3, nl.Type, 0)
+			gmove(&n3, &n2)
+		}
+
+		gc.Patch(p1, gc.Pc)
+	}
+
+	gins(a, &n1, &n2)
+
+	gmove(&n2, res)
+
+	gc.Regfree(&n1)
+	gc.Regfree(&n2)
+}
+
+func clearfat(nl *gc.Node) {
+	/* clear a fat object */
+	if gc.Debug['g'] != 0 {
+		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
+	}
+
+	w := uint64(uint64(nl.Type.Width))
+
+	// Avoid taking the address for simple enough types.
+	if gc.Componentgen(nil, nl) {
+		return
+	}
+
+	c := uint64(w % 8) // bytes
+	q := uint64(w / 8) // dwords
+
+	if reg[arm64.REGRT1-arm64.REG_R0] > 0 {
+		gc.Fatal("R%d in use during clearfat", arm64.REGRT1-arm64.REG_R0)
+	}
+
+	var r0 gc.Node
+	gc.Nodreg(&r0, gc.Types[gc.TUINT64], arm64.REGZERO)
+	var dst gc.Node
+	gc.Nodreg(&dst, gc.Types[gc.Tptr], arm64.REGRT1)
+	reg[arm64.REGRT1-arm64.REG_R0]++
+	gc.Agen(nl, &dst)
+
+	var boff uint64
+	if q > 128 {
+		p := gins(arm64.ASUB, nil, &dst)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = 8
+
+		var end gc.Node
+		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
+		p = gins(arm64.AMOVD, &dst, &end)
+		p.From.Type = obj.TYPE_ADDR
+		p.From.Offset = int64(q * 8)
+
+		p = gins(arm64.AMOVD, &r0, &dst)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Offset = 8
+		p.Scond = arm64.C_XPRE
+		pl := (*obj.Prog)(p)
+
+		p = gcmp(arm64.ACMP, &dst, &end)
+		gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), pl)
+
+		gc.Regfree(&end)
+
+		// The loop leaves R16 on the last zeroed dword
+		boff = 8
+	} else if q >= 4 && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
+		p := gins(arm64.ASUB, nil, &dst)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = 8
+		f := (*gc.Node)(gc.Sysfunc("duffzero"))
+		p = gins(obj.ADUFFZERO, nil, f)
+		gc.Afunclit(&p.To, f)
+
+		// 4 and 128 = magic constants: see ../../runtime/asm_arm64x.s
+		p.To.Offset = int64(4 * (128 - q))
+
+		// duffzero leaves R16 on the last zeroed dword
+		boff = 8
+	} else {
+		var p *obj.Prog
+		for t := uint64(0); t < q; t++ {
+			p = gins(arm64.AMOVD, &r0, &dst)
+			p.To.Type = obj.TYPE_MEM
+			p.To.Offset = int64(8 * t)
+		}
+
+		boff = 8 * q
+	}
+
+	var p *obj.Prog
+	for t := uint64(0); t < c; t++ {
+		p = gins(arm64.AMOVB, &r0, &dst)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Offset = int64(t + boff)
+	}
+
+	reg[arm64.REGRT1-arm64.REG_R0]--
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+	var p1 *obj.Prog
+
+	for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
+		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
+			fmt.Printf("expandchecks: %v\n", p)
+		}
+		if p.As != obj.ACHECKNIL {
+			continue
+		}
+		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+			gc.Warnl(int(p.Lineno), "generated nil check")
+		}
+		if p.From.Type != obj.TYPE_REG {
+			gc.Fatal("invalid nil check %v\n", p)
+		}
+
+		// check is
+		//	CBNZ arg, 2(PC)
+		//	MOVD ZR, 0(arg)
+		p1 = gc.Ctxt.NewProg()
+		gc.Clearp(p1)
+		p1.Link = p.Link
+		p.Link = p1
+		p1.Lineno = p.Lineno
+		p1.Pc = 9999
+
+		p.As = arm64.ACBNZ
+		p.To.Type = obj.TYPE_BRANCH
+		p.To.Val = p1.Link
+
+		// crash by write to memory address 0.
+		p1.As = arm64.AMOVD
+		p1.From.Type = obj.TYPE_REG
+		p1.From.Reg = arm64.REGZERO
+		p1.To.Type = obj.TYPE_MEM
+		p1.To.Reg = p.From.Reg
+		p1.To.Offset = 0
+	}
+}
+
+// res = runtime.getg()
+func getg(res *gc.Node) {
+	var n1 gc.Node
+	gc.Nodreg(&n1, res.Type, arm64.REGG)
+	gmove(&n1, res)
+}
diff --git a/src/cmd/compile/internal/arm64/gsubr.go b/src/cmd/compile/internal/arm64/gsubr.go
new file mode 100644
index 0000000..0a14654
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/gsubr.go
@@ -0,0 +1,983 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm64"
+	"fmt"
+)
+
+var resvd = []int{
+	arm64.REGTMP,
+	arm64.REGG,
+	arm64.REGRT1,
+	arm64.REGRT2,
+	arm64.REG_R31, // REGZERO and REGSP
+	arm64.FREGZERO,
+	arm64.FREGHALF,
+	arm64.FREGONE,
+	arm64.FREGTWO,
+}
+
+/*
+ * generate
+ *	as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+	var n1 gc.Node
+
+	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+	if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) || as == arm64.AMUL || n2 != nil && n2.Op != gc.OREGISTER {
+		// cannot have more than 16-bit of immediate in ADD, etc.
+		// instead, MOV into register first.
+		var ntmp gc.Node
+		gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+		gins(arm64.AMOVD, &n1, &ntmp)
+		gins(as, &ntmp, n2)
+		gc.Regfree(&ntmp)
+		return
+	}
+
+	rawgins(as, &n1, n2)
+}
+
+/*
+ * generate
+ *	as n, $c (CMP)
+ */
+func ginscon2(as int, n2 *gc.Node, c int64) {
+	var n1 gc.Node
+
+	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+	switch as {
+	default:
+		gc.Fatal("ginscon2")
+
+	case arm64.ACMP:
+		if -arm64.BIG <= c && c <= arm64.BIG {
+			gcmp(as, n2, &n1)
+			return
+		}
+	}
+
+	// MOV n1 into register first
+	var ntmp gc.Node
+	gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+	rawgins(arm64.AMOVD, &n1, &ntmp)
+	gcmp(as, n2, &ntmp)
+	gc.Regfree(&ntmp)
+}
+
+func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
+		// Reverse comparison to place constant last.
+		op = gc.Brrev(op)
+		n1, n2 = n2, n1
+	}
+
+	var r1, r2, g1, g2 gc.Node
+	gc.Regalloc(&r1, t, n1)
+	gc.Regalloc(&g1, n1.Type, &r1)
+	gc.Cgen(n1, &g1)
+	gmove(&g1, &r1)
+	if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
+		ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
+	} else {
+		gc.Regalloc(&r2, t, n2)
+		gc.Regalloc(&g2, n1.Type, &r2)
+		gc.Cgen(n2, &g2)
+		gmove(&g2, &r2)
+		gcmp(optoas(gc.OCMP, t), &r1, &r2)
+		gc.Regfree(&g2)
+		gc.Regfree(&r2)
+	}
+	gc.Regfree(&g1)
+	gc.Regfree(&r1)
+	return gc.Gbranch(optoas(op, t), nil, likely)
+}
+
+/*
+ * generate move:
+ *	t = f
+ * hard part is conversions.
+ */
+func gmove(f *gc.Node, t *gc.Node) {
+	if gc.Debug['M'] != 0 {
+		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+	}
+
+	ft := int(gc.Simsimtype(f.Type))
+	tt := int(gc.Simsimtype(t.Type))
+	cvt := (*gc.Type)(t.Type)
+
+	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
+		gc.Complexmove(f, t)
+		return
+	}
+
+	// cannot have two memory operands
+	var r1 gc.Node
+	var a int
+	if gc.Ismem(f) && gc.Ismem(t) {
+		goto hard
+	}
+
+	// convert constant to desired type
+	if f.Op == gc.OLITERAL {
+		var con gc.Node
+		switch tt {
+		default:
+			f.Convconst(&con, t.Type)
+
+		case gc.TINT32,
+			gc.TINT16,
+			gc.TINT8:
+			var con gc.Node
+			f.Convconst(&con, gc.Types[gc.TINT64])
+			var r1 gc.Node
+			gc.Regalloc(&r1, con.Type, t)
+			gins(arm64.AMOVD, &con, &r1)
+			gmove(&r1, t)
+			gc.Regfree(&r1)
+			return
+
+		case gc.TUINT32,
+			gc.TUINT16,
+			gc.TUINT8:
+			var con gc.Node
+			f.Convconst(&con, gc.Types[gc.TUINT64])
+			var r1 gc.Node
+			gc.Regalloc(&r1, con.Type, t)
+			gins(arm64.AMOVD, &con, &r1)
+			gmove(&r1, t)
+			gc.Regfree(&r1)
+			return
+		}
+
+		f = &con
+		ft = tt // so big switch will choose a simple mov
+
+		// constants can't move directly to memory.
+		if gc.Ismem(t) {
+			goto hard
+		}
+	}
+
+	// value -> value copy, first operand in memory.
+	// any floating point operand requires register
+	// src, so goto hard to copy to register first.
+	if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) {
+		cvt = gc.Types[ft]
+		goto hard
+	}
+
+	// value -> value copy, only one memory operand.
+	// figure out the instruction to use.
+	// break out of switch for one-instruction gins.
+	// goto rdst for "destination must be register".
+	// goto hard for "convert to cvt type first".
+	// otherwise handle and return.
+
+	switch uint32(ft)<<16 | uint32(tt) {
+	default:
+		gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+
+		/*
+		 * integer copy and truncate
+		 */
+	case gc.TINT8<<16 | gc.TINT8, // same size
+		gc.TUINT8<<16 | gc.TINT8,
+		gc.TINT16<<16 | gc.TINT8,
+		// truncate
+		gc.TUINT16<<16 | gc.TINT8,
+		gc.TINT32<<16 | gc.TINT8,
+		gc.TUINT32<<16 | gc.TINT8,
+		gc.TINT64<<16 | gc.TINT8,
+		gc.TUINT64<<16 | gc.TINT8:
+		a = arm64.AMOVB
+
+	case gc.TINT8<<16 | gc.TUINT8, // same size
+		gc.TUINT8<<16 | gc.TUINT8,
+		gc.TINT16<<16 | gc.TUINT8,
+		// truncate
+		gc.TUINT16<<16 | gc.TUINT8,
+		gc.TINT32<<16 | gc.TUINT8,
+		gc.TUINT32<<16 | gc.TUINT8,
+		gc.TINT64<<16 | gc.TUINT8,
+		gc.TUINT64<<16 | gc.TUINT8:
+		a = arm64.AMOVBU
+
+	case gc.TINT16<<16 | gc.TINT16, // same size
+		gc.TUINT16<<16 | gc.TINT16,
+		gc.TINT32<<16 | gc.TINT16,
+		// truncate
+		gc.TUINT32<<16 | gc.TINT16,
+		gc.TINT64<<16 | gc.TINT16,
+		gc.TUINT64<<16 | gc.TINT16:
+		a = arm64.AMOVH
+
+	case gc.TINT16<<16 | gc.TUINT16, // same size
+		gc.TUINT16<<16 | gc.TUINT16,
+		gc.TINT32<<16 | gc.TUINT16,
+		// truncate
+		gc.TUINT32<<16 | gc.TUINT16,
+		gc.TINT64<<16 | gc.TUINT16,
+		gc.TUINT64<<16 | gc.TUINT16:
+		a = arm64.AMOVHU
+
+	case gc.TINT32<<16 | gc.TINT32, // same size
+		gc.TUINT32<<16 | gc.TINT32,
+		gc.TINT64<<16 | gc.TINT32,
+		// truncate
+		gc.TUINT64<<16 | gc.TINT32:
+		a = arm64.AMOVW
+
+	case gc.TINT32<<16 | gc.TUINT32, // same size
+		gc.TUINT32<<16 | gc.TUINT32,
+		gc.TINT64<<16 | gc.TUINT32,
+		gc.TUINT64<<16 | gc.TUINT32:
+		a = arm64.AMOVWU
+
+	case gc.TINT64<<16 | gc.TINT64, // same size
+		gc.TINT64<<16 | gc.TUINT64,
+		gc.TUINT64<<16 | gc.TINT64,
+		gc.TUINT64<<16 | gc.TUINT64:
+		a = arm64.AMOVD
+
+		/*
+		 * integer up-conversions
+		 */
+	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+		gc.TINT8<<16 | gc.TUINT16,
+		gc.TINT8<<16 | gc.TINT32,
+		gc.TINT8<<16 | gc.TUINT32,
+		gc.TINT8<<16 | gc.TINT64,
+		gc.TINT8<<16 | gc.TUINT64:
+		a = arm64.AMOVB
+
+		goto rdst
+
+	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+		gc.TUINT8<<16 | gc.TUINT16,
+		gc.TUINT8<<16 | gc.TINT32,
+		gc.TUINT8<<16 | gc.TUINT32,
+		gc.TUINT8<<16 | gc.TINT64,
+		gc.TUINT8<<16 | gc.TUINT64:
+		a = arm64.AMOVBU
+
+		goto rdst
+
+	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+		gc.TINT16<<16 | gc.TUINT32,
+		gc.TINT16<<16 | gc.TINT64,
+		gc.TINT16<<16 | gc.TUINT64:
+		a = arm64.AMOVH
+
+		goto rdst
+
+	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+		gc.TUINT16<<16 | gc.TUINT32,
+		gc.TUINT16<<16 | gc.TINT64,
+		gc.TUINT16<<16 | gc.TUINT64:
+		a = arm64.AMOVHU
+
+		goto rdst
+
+	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+		gc.TINT32<<16 | gc.TUINT64:
+		a = arm64.AMOVW
+
+		goto rdst
+
+	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+		gc.TUINT32<<16 | gc.TUINT64:
+		a = arm64.AMOVWU
+
+		goto rdst
+
+	/*
+	* float to integer
+	 */
+	case gc.TFLOAT32<<16 | gc.TINT32:
+		a = arm64.AFCVTZSSW
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TINT32:
+		a = arm64.AFCVTZSDW
+		goto rdst
+
+	case gc.TFLOAT32<<16 | gc.TINT64:
+		a = arm64.AFCVTZSS
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TINT64:
+		a = arm64.AFCVTZSD
+		goto rdst
+
+	case gc.TFLOAT32<<16 | gc.TUINT32:
+		a = arm64.AFCVTZUSW
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TUINT32:
+		a = arm64.AFCVTZUDW
+		goto rdst
+
+	case gc.TFLOAT32<<16 | gc.TUINT64:
+		a = arm64.AFCVTZUS
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TUINT64:
+		a = arm64.AFCVTZUD
+		goto rdst
+
+	case gc.TFLOAT32<<16 | gc.TINT16,
+		gc.TFLOAT32<<16 | gc.TINT8,
+		gc.TFLOAT64<<16 | gc.TINT16,
+		gc.TFLOAT64<<16 | gc.TINT8:
+		cvt = gc.Types[gc.TINT32]
+
+		goto hard
+
+	case gc.TFLOAT32<<16 | gc.TUINT16,
+		gc.TFLOAT32<<16 | gc.TUINT8,
+		gc.TFLOAT64<<16 | gc.TUINT16,
+		gc.TFLOAT64<<16 | gc.TUINT8:
+		cvt = gc.Types[gc.TUINT32]
+
+		goto hard
+
+	/*
+	 * integer to float
+	 */
+	case gc.TINT8<<16 | gc.TFLOAT32,
+		gc.TINT16<<16 | gc.TFLOAT32,
+		gc.TINT32<<16 | gc.TFLOAT32:
+		a = arm64.ASCVTFWS
+
+		goto rdst
+
+	case gc.TINT8<<16 | gc.TFLOAT64,
+		gc.TINT16<<16 | gc.TFLOAT64,
+		gc.TINT32<<16 | gc.TFLOAT64:
+		a = arm64.ASCVTFWD
+
+		goto rdst
+
+	case gc.TINT64<<16 | gc.TFLOAT32:
+		a = arm64.ASCVTFS
+		goto rdst
+
+	case gc.TINT64<<16 | gc.TFLOAT64:
+		a = arm64.ASCVTFD
+		goto rdst
+
+	case gc.TUINT8<<16 | gc.TFLOAT32,
+		gc.TUINT16<<16 | gc.TFLOAT32,
+		gc.TUINT32<<16 | gc.TFLOAT32:
+		a = arm64.AUCVTFWS
+
+		goto rdst
+
+	case gc.TUINT8<<16 | gc.TFLOAT64,
+		gc.TUINT16<<16 | gc.TFLOAT64,
+		gc.TUINT32<<16 | gc.TFLOAT64:
+		a = arm64.AUCVTFWD
+
+		goto rdst
+
+	case gc.TUINT64<<16 | gc.TFLOAT32:
+		a = arm64.AUCVTFS
+		goto rdst
+
+	case gc.TUINT64<<16 | gc.TFLOAT64:
+		a = arm64.AUCVTFD
+		goto rdst
+
+		/*
+		 * float to float
+		 */
+	case gc.TFLOAT32<<16 | gc.TFLOAT32:
+		a = arm64.AFMOVS
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT64:
+		a = arm64.AFMOVD
+
+	case gc.TFLOAT32<<16 | gc.TFLOAT64:
+		a = arm64.AFCVTSD
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT32:
+		a = arm64.AFCVTDS
+		goto rdst
+	}
+
+	gins(a, f, t)
+	return
+
+	// requires register destination
+rdst:
+	gc.Regalloc(&r1, t.Type, t)
+
+	gins(a, f, &r1)
+	gmove(&r1, t)
+	gc.Regfree(&r1)
+	return
+
+	// requires register intermediate
+hard:
+	gc.Regalloc(&r1, cvt, t)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	gc.Regfree(&r1)
+	return
+}
+
+func intLiteral(n *gc.Node) (x int64, ok bool) {
+	switch {
+	case n == nil:
+		return
+	case gc.Isconst(n, gc.CTINT):
+		return n.Int(), true
+	case gc.Isconst(n, gc.CTBOOL):
+		return int64(obj.Bool2int(n.Bool())), true
+	}
+	return
+}
+
+// gins is called by the front end.
+// It synthesizes some multiple-instruction sequences
+// so the front end can stay simpler.
+func gins(as int, f, t *gc.Node) *obj.Prog {
+	if as >= obj.A_ARCHSPECIFIC {
+		if x, ok := intLiteral(f); ok {
+			ginscon(as, x, t)
+			return nil // caller must not use
+		}
+	}
+	if as == arm64.ACMP {
+		if x, ok := intLiteral(t); ok {
+			ginscon2(as, f, x)
+			return nil // caller must not use
+		}
+	}
+	return rawgins(as, f, t)
+}
+
+/*
+ * generate one instruction:
+ *	as f, t
+ */
+func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+	// TODO(austin): Add self-move test like in 6g (but be careful
+	// of truncation moves)
+
+	p := gc.Prog(as)
+	gc.Naddr(&p.From, f)
+	gc.Naddr(&p.To, t)
+
+	switch as {
+	case arm64.ACMP, arm64.AFCMPS, arm64.AFCMPD:
+		if t != nil {
+			if f.Op != gc.OREGISTER {
+				gc.Fatal("bad operands to gcmp")
+			}
+			p.From = p.To
+			p.To = obj.Addr{}
+			raddr(f, p)
+		}
+	}
+
+	// Bad things the front end has done to us. Crash to find call stack.
+	switch as {
+	case arm64.AAND, arm64.AMUL:
+		if p.From.Type == obj.TYPE_CONST {
+			gc.Debug['h'] = 1
+			gc.Fatal("bad inst: %v", p)
+		}
+	case arm64.ACMP:
+		if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
+			gc.Debug['h'] = 1
+			gc.Fatal("bad inst: %v", p)
+		}
+	}
+
+	if gc.Debug['g'] != 0 {
+		fmt.Printf("%v\n", p)
+	}
+
+	w := int32(0)
+	switch as {
+	case arm64.AMOVB,
+		arm64.AMOVBU:
+		w = 1
+
+	case arm64.AMOVH,
+		arm64.AMOVHU:
+		w = 2
+
+	case arm64.AMOVW,
+		arm64.AMOVWU:
+		w = 4
+
+	case arm64.AMOVD:
+		if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
+			break
+		}
+		w = 8
+	}
+
+	if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
+		gc.Dump("f", f)
+		gc.Dump("t", t)
+		gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+	}
+
+	return p
+}
+
+/*
+ * insert n into reg slot of p
+ */
+func raddr(n *gc.Node, p *obj.Prog) {
+	var a obj.Addr
+
+	gc.Naddr(&a, n)
+	if a.Type != obj.TYPE_REG {
+		if n != nil {
+			gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
+		} else {
+			gc.Fatal("bad in raddr: <null>")
+		}
+		p.Reg = 0
+	} else {
+		p.Reg = a.Reg
+	}
+}
+
+func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
+	if lhs.Op != gc.OREGISTER {
+		gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
+	}
+
+	p := rawgins(as, rhs, nil)
+	raddr(lhs, p)
+	return p
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+	if t == nil {
+		gc.Fatal("optoas: t is nil")
+	}
+
+	a := int(obj.AXXX)
+	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+	default:
+		gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+
+	case gc.OEQ<<16 | gc.TBOOL,
+		gc.OEQ<<16 | gc.TINT8,
+		gc.OEQ<<16 | gc.TUINT8,
+		gc.OEQ<<16 | gc.TINT16,
+		gc.OEQ<<16 | gc.TUINT16,
+		gc.OEQ<<16 | gc.TINT32,
+		gc.OEQ<<16 | gc.TUINT32,
+		gc.OEQ<<16 | gc.TINT64,
+		gc.OEQ<<16 | gc.TUINT64,
+		gc.OEQ<<16 | gc.TPTR32,
+		gc.OEQ<<16 | gc.TPTR64,
+		gc.OEQ<<16 | gc.TFLOAT32,
+		gc.OEQ<<16 | gc.TFLOAT64:
+		a = arm64.ABEQ
+
+	case gc.ONE<<16 | gc.TBOOL,
+		gc.ONE<<16 | gc.TINT8,
+		gc.ONE<<16 | gc.TUINT8,
+		gc.ONE<<16 | gc.TINT16,
+		gc.ONE<<16 | gc.TUINT16,
+		gc.ONE<<16 | gc.TINT32,
+		gc.ONE<<16 | gc.TUINT32,
+		gc.ONE<<16 | gc.TINT64,
+		gc.ONE<<16 | gc.TUINT64,
+		gc.ONE<<16 | gc.TPTR32,
+		gc.ONE<<16 | gc.TPTR64,
+		gc.ONE<<16 | gc.TFLOAT32,
+		gc.ONE<<16 | gc.TFLOAT64:
+		a = arm64.ABNE
+
+	case gc.OLT<<16 | gc.TINT8,
+		gc.OLT<<16 | gc.TINT16,
+		gc.OLT<<16 | gc.TINT32,
+		gc.OLT<<16 | gc.TINT64:
+		a = arm64.ABLT
+
+	case gc.OLT<<16 | gc.TUINT8,
+		gc.OLT<<16 | gc.TUINT16,
+		gc.OLT<<16 | gc.TUINT32,
+		gc.OLT<<16 | gc.TUINT64,
+		gc.OLT<<16 | gc.TFLOAT32,
+		gc.OLT<<16 | gc.TFLOAT64:
+		a = arm64.ABLO
+
+	case gc.OLE<<16 | gc.TINT8,
+		gc.OLE<<16 | gc.TINT16,
+		gc.OLE<<16 | gc.TINT32,
+		gc.OLE<<16 | gc.TINT64:
+		a = arm64.ABLE
+
+	case gc.OLE<<16 | gc.TUINT8,
+		gc.OLE<<16 | gc.TUINT16,
+		gc.OLE<<16 | gc.TUINT32,
+		gc.OLE<<16 | gc.TUINT64,
+		gc.OLE<<16 | gc.TFLOAT32,
+		gc.OLE<<16 | gc.TFLOAT64:
+		a = arm64.ABLS
+
+	case gc.OGT<<16 | gc.TINT8,
+		gc.OGT<<16 | gc.TINT16,
+		gc.OGT<<16 | gc.TINT32,
+		gc.OGT<<16 | gc.TINT64,
+		gc.OGT<<16 | gc.TFLOAT32,
+		gc.OGT<<16 | gc.TFLOAT64:
+		a = arm64.ABGT
+
+	case gc.OGT<<16 | gc.TUINT8,
+		gc.OGT<<16 | gc.TUINT16,
+		gc.OGT<<16 | gc.TUINT32,
+		gc.OGT<<16 | gc.TUINT64:
+		a = arm64.ABHI
+
+	case gc.OGE<<16 | gc.TINT8,
+		gc.OGE<<16 | gc.TINT16,
+		gc.OGE<<16 | gc.TINT32,
+		gc.OGE<<16 | gc.TINT64,
+		gc.OGE<<16 | gc.TFLOAT32,
+		gc.OGE<<16 | gc.TFLOAT64:
+		a = arm64.ABGE
+
+	case gc.OGE<<16 | gc.TUINT8,
+		gc.OGE<<16 | gc.TUINT16,
+		gc.OGE<<16 | gc.TUINT32,
+		gc.OGE<<16 | gc.TUINT64:
+		a = arm64.ABHS
+
+	case gc.OCMP<<16 | gc.TBOOL,
+		gc.OCMP<<16 | gc.TINT8,
+		gc.OCMP<<16 | gc.TINT16,
+		gc.OCMP<<16 | gc.TINT32,
+		gc.OCMP<<16 | gc.TPTR32,
+		gc.OCMP<<16 | gc.TINT64,
+		gc.OCMP<<16 | gc.TUINT8,
+		gc.OCMP<<16 | gc.TUINT16,
+		gc.OCMP<<16 | gc.TUINT32,
+		gc.OCMP<<16 | gc.TUINT64,
+		gc.OCMP<<16 | gc.TPTR64:
+		a = arm64.ACMP
+
+	case gc.OCMP<<16 | gc.TFLOAT32:
+		a = arm64.AFCMPS
+
+	case gc.OCMP<<16 | gc.TFLOAT64:
+		a = arm64.AFCMPD
+
+	case gc.OAS<<16 | gc.TBOOL,
+		gc.OAS<<16 | gc.TINT8:
+		a = arm64.AMOVB
+
+	case gc.OAS<<16 | gc.TUINT8:
+		a = arm64.AMOVBU
+
+	case gc.OAS<<16 | gc.TINT16:
+		a = arm64.AMOVH
+
+	case gc.OAS<<16 | gc.TUINT16:
+		a = arm64.AMOVHU
+
+	case gc.OAS<<16 | gc.TINT32:
+		a = arm64.AMOVW
+
+	case gc.OAS<<16 | gc.TUINT32,
+		gc.OAS<<16 | gc.TPTR32:
+		a = arm64.AMOVWU
+
+	case gc.OAS<<16 | gc.TINT64,
+		gc.OAS<<16 | gc.TUINT64,
+		gc.OAS<<16 | gc.TPTR64:
+		a = arm64.AMOVD
+
+	case gc.OAS<<16 | gc.TFLOAT32:
+		a = arm64.AFMOVS
+
+	case gc.OAS<<16 | gc.TFLOAT64:
+		a = arm64.AFMOVD
+
+	case gc.OADD<<16 | gc.TINT8,
+		gc.OADD<<16 | gc.TUINT8,
+		gc.OADD<<16 | gc.TINT16,
+		gc.OADD<<16 | gc.TUINT16,
+		gc.OADD<<16 | gc.TINT32,
+		gc.OADD<<16 | gc.TUINT32,
+		gc.OADD<<16 | gc.TPTR32,
+		gc.OADD<<16 | gc.TINT64,
+		gc.OADD<<16 | gc.TUINT64,
+		gc.OADD<<16 | gc.TPTR64:
+		a = arm64.AADD
+
+	case gc.OADD<<16 | gc.TFLOAT32:
+		a = arm64.AFADDS
+
+	case gc.OADD<<16 | gc.TFLOAT64:
+		a = arm64.AFADDD
+
+	case gc.OSUB<<16 | gc.TINT8,
+		gc.OSUB<<16 | gc.TUINT8,
+		gc.OSUB<<16 | gc.TINT16,
+		gc.OSUB<<16 | gc.TUINT16,
+		gc.OSUB<<16 | gc.TINT32,
+		gc.OSUB<<16 | gc.TUINT32,
+		gc.OSUB<<16 | gc.TPTR32,
+		gc.OSUB<<16 | gc.TINT64,
+		gc.OSUB<<16 | gc.TUINT64,
+		gc.OSUB<<16 | gc.TPTR64:
+		a = arm64.ASUB
+
+	case gc.OSUB<<16 | gc.TFLOAT32:
+		a = arm64.AFSUBS
+
+	case gc.OSUB<<16 | gc.TFLOAT64:
+		a = arm64.AFSUBD
+
+	case gc.OMINUS<<16 | gc.TINT8,
+		gc.OMINUS<<16 | gc.TUINT8,
+		gc.OMINUS<<16 | gc.TINT16,
+		gc.OMINUS<<16 | gc.TUINT16,
+		gc.OMINUS<<16 | gc.TINT32,
+		gc.OMINUS<<16 | gc.TUINT32,
+		gc.OMINUS<<16 | gc.TPTR32,
+		gc.OMINUS<<16 | gc.TINT64,
+		gc.OMINUS<<16 | gc.TUINT64,
+		gc.OMINUS<<16 | gc.TPTR64:
+		a = arm64.ANEG
+
+	case gc.OMINUS<<16 | gc.TFLOAT32:
+		a = arm64.AFNEGS
+
+	case gc.OMINUS<<16 | gc.TFLOAT64:
+		a = arm64.AFNEGD
+
+	case gc.OAND<<16 | gc.TINT8,
+		gc.OAND<<16 | gc.TUINT8,
+		gc.OAND<<16 | gc.TINT16,
+		gc.OAND<<16 | gc.TUINT16,
+		gc.OAND<<16 | gc.TINT32,
+		gc.OAND<<16 | gc.TUINT32,
+		gc.OAND<<16 | gc.TPTR32,
+		gc.OAND<<16 | gc.TINT64,
+		gc.OAND<<16 | gc.TUINT64,
+		gc.OAND<<16 | gc.TPTR64:
+		a = arm64.AAND
+
+	case gc.OOR<<16 | gc.TINT8,
+		gc.OOR<<16 | gc.TUINT8,
+		gc.OOR<<16 | gc.TINT16,
+		gc.OOR<<16 | gc.TUINT16,
+		gc.OOR<<16 | gc.TINT32,
+		gc.OOR<<16 | gc.TUINT32,
+		gc.OOR<<16 | gc.TPTR32,
+		gc.OOR<<16 | gc.TINT64,
+		gc.OOR<<16 | gc.TUINT64,
+		gc.OOR<<16 | gc.TPTR64:
+		a = arm64.AORR
+
+	case gc.OXOR<<16 | gc.TINT8,
+		gc.OXOR<<16 | gc.TUINT8,
+		gc.OXOR<<16 | gc.TINT16,
+		gc.OXOR<<16 | gc.TUINT16,
+		gc.OXOR<<16 | gc.TINT32,
+		gc.OXOR<<16 | gc.TUINT32,
+		gc.OXOR<<16 | gc.TPTR32,
+		gc.OXOR<<16 | gc.TINT64,
+		gc.OXOR<<16 | gc.TUINT64,
+		gc.OXOR<<16 | gc.TPTR64:
+		a = arm64.AEOR
+
+		// TODO(minux): handle rotates
+	//case CASE(OLROT, TINT8):
+	//case CASE(OLROT, TUINT8):
+	//case CASE(OLROT, TINT16):
+	//case CASE(OLROT, TUINT16):
+	//case CASE(OLROT, TINT32):
+	//case CASE(OLROT, TUINT32):
+	//case CASE(OLROT, TPTR32):
+	//case CASE(OLROT, TINT64):
+	//case CASE(OLROT, TUINT64):
+	//case CASE(OLROT, TPTR64):
+	//	a = 0//???; RLDC?
+	//	break;
+
+	case gc.OLSH<<16 | gc.TINT8,
+		gc.OLSH<<16 | gc.TUINT8,
+		gc.OLSH<<16 | gc.TINT16,
+		gc.OLSH<<16 | gc.TUINT16,
+		gc.OLSH<<16 | gc.TINT32,
+		gc.OLSH<<16 | gc.TUINT32,
+		gc.OLSH<<16 | gc.TPTR32,
+		gc.OLSH<<16 | gc.TINT64,
+		gc.OLSH<<16 | gc.TUINT64,
+		gc.OLSH<<16 | gc.TPTR64:
+		a = arm64.ALSL
+
+	case gc.ORSH<<16 | gc.TUINT8,
+		gc.ORSH<<16 | gc.TUINT16,
+		gc.ORSH<<16 | gc.TUINT32,
+		gc.ORSH<<16 | gc.TPTR32,
+		gc.ORSH<<16 | gc.TUINT64,
+		gc.ORSH<<16 | gc.TPTR64:
+		a = arm64.ALSR
+
+	case gc.ORSH<<16 | gc.TINT8,
+		gc.ORSH<<16 | gc.TINT16,
+		gc.ORSH<<16 | gc.TINT32,
+		gc.ORSH<<16 | gc.TINT64:
+		a = arm64.AASR
+
+		// TODO(minux): handle rotates
+	//case CASE(ORROTC, TINT8):
+	//case CASE(ORROTC, TUINT8):
+	//case CASE(ORROTC, TINT16):
+	//case CASE(ORROTC, TUINT16):
+	//case CASE(ORROTC, TINT32):
+	//case CASE(ORROTC, TUINT32):
+	//case CASE(ORROTC, TINT64):
+	//case CASE(ORROTC, TUINT64):
+	//	a = 0//??? RLDC??
+	//	break;
+
+	case gc.OHMUL<<16 | gc.TINT64:
+		a = arm64.ASMULH
+
+	case gc.OHMUL<<16 | gc.TUINT64,
+		gc.OHMUL<<16 | gc.TPTR64:
+		a = arm64.AUMULH
+
+	case gc.OMUL<<16 | gc.TINT8,
+		gc.OMUL<<16 | gc.TINT16,
+		gc.OMUL<<16 | gc.TINT32:
+		a = arm64.ASMULL
+
+	case gc.OMUL<<16 | gc.TINT64:
+		a = arm64.AMUL
+
+	case gc.OMUL<<16 | gc.TUINT8,
+		gc.OMUL<<16 | gc.TUINT16,
+		gc.OMUL<<16 | gc.TUINT32,
+		gc.OMUL<<16 | gc.TPTR32:
+		// don't use word multiply, the high 32-bit are undefined.
+		a = arm64.AUMULL
+
+	case gc.OMUL<<16 | gc.TUINT64,
+		gc.OMUL<<16 | gc.TPTR64:
+		a = arm64.AMUL // for 64-bit multiplies, signedness doesn't matter.
+
+	case gc.OMUL<<16 | gc.TFLOAT32:
+		a = arm64.AFMULS
+
+	case gc.OMUL<<16 | gc.TFLOAT64:
+		a = arm64.AFMULD
+
+	case gc.ODIV<<16 | gc.TINT8,
+		gc.ODIV<<16 | gc.TINT16,
+		gc.ODIV<<16 | gc.TINT32,
+		gc.ODIV<<16 | gc.TINT64:
+		a = arm64.ASDIV
+
+	case gc.ODIV<<16 | gc.TUINT8,
+		gc.ODIV<<16 | gc.TUINT16,
+		gc.ODIV<<16 | gc.TUINT32,
+		gc.ODIV<<16 | gc.TPTR32,
+		gc.ODIV<<16 | gc.TUINT64,
+		gc.ODIV<<16 | gc.TPTR64:
+		a = arm64.AUDIV
+
+	case gc.ODIV<<16 | gc.TFLOAT32:
+		a = arm64.AFDIVS
+
+	case gc.ODIV<<16 | gc.TFLOAT64:
+		a = arm64.AFDIVD
+
+	case gc.OSQRT<<16 | gc.TFLOAT64:
+		a = arm64.AFSQRTD
+	}
+
+	return a
+}
+
+const (
+	ODynam   = 1 << 0
+	OAddable = 1 << 1
+)
+
+func xgen(n *gc.Node, a *gc.Node, o int) bool {
+	// TODO(minux)
+
+	return -1 != 0 /*TypeKind(100016)*/
+}
+
+func sudoclean() {
+	return
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+	// TODO(minux)
+
+	*a = obj.Addr{}
+	return false
+}
diff --git a/src/cmd/compile/internal/arm64/peep.go b/src/cmd/compile/internal/arm64/peep.go
new file mode 100644
index 0000000..1c3b289
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/peep.go
@@ -0,0 +1,809 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm64"
+	"fmt"
+)
+
+var gactive uint32
+
+func peep(firstp *obj.Prog) {
+	g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+	if g == nil {
+		return
+	}
+	gactive = 0
+
+	var p *obj.Prog
+	var r *gc.Flow
+	var t int
+loop1:
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		gc.Dumpit("loop1", g.Start, 0)
+	}
+
+	t = 0
+	for r = g.Start; r != nil; r = r.Link {
+		p = r.Prog
+
+		// TODO(minux) Handle smaller moves. arm and amd64
+		// distinguish between moves that *must* sign/zero
+		// extend and moves that don't care so they
+		// can eliminate moves that don't care without
+		// breaking moves that do care. This might let us
+		// simplify or remove the next peep loop, too.
+		if p.As == arm64.AMOVD || p.As == arm64.AFMOVD {
+			if regtyp(&p.To) {
+				// Try to eliminate reg->reg moves
+				if regtyp(&p.From) {
+					if p.From.Type == p.To.Type {
+						if copyprop(r) {
+							excise(r)
+							t++
+						} else if subprop(r) && copyprop(r) {
+							excise(r)
+							t++
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if t != 0 {
+		goto loop1
+	}
+
+	/*
+	 * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
+	 */
+	var p1 *obj.Prog
+	var r1 *gc.Flow
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		default:
+			continue
+
+		case arm64.AMOVH,
+			arm64.AMOVHU,
+			arm64.AMOVB,
+			arm64.AMOVBU,
+			arm64.AMOVW,
+			arm64.AMOVWU:
+			if p.To.Type != obj.TYPE_REG {
+				continue
+			}
+		}
+
+		r1 = r.Link
+		if r1 == nil {
+			continue
+		}
+		p1 = r1.Prog
+		if p1.As != p.As {
+			continue
+		}
+		if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
+			continue
+		}
+		if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
+			continue
+		}
+		excise(r1)
+	}
+
+	if gc.Debug['D'] > 1 {
+		goto ret /* allow following code improvement to be suppressed */
+	}
+
+	// MOVD $c, R'; ADD R', R (R' unused) -> ADD $c, R
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		default:
+			continue
+
+		case arm64.AMOVD:
+			if p.To.Type != obj.TYPE_REG {
+				continue
+			}
+			if p.From.Type != obj.TYPE_CONST {
+				continue
+			}
+			if p.From.Offset < 0 || 4096 <= p.From.Offset {
+				continue
+			}
+		}
+		r1 = r.Link
+		if r1 == nil {
+			continue
+		}
+		p1 = r1.Prog
+		if p1.As != arm64.AADD && p1.As != arm64.ASUB { // TODO(aram): also logical after we have bimm.
+			continue
+		}
+		if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
+			continue
+		}
+		if p1.To.Type != obj.TYPE_REG {
+			continue
+		}
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(int(p1.As)), p, p1)
+		}
+		p1.From.Type = obj.TYPE_CONST
+		p1.From = p.From
+		excise(r)
+	}
+
+	/* TODO(minux):
+	 * look for OP x,y,R; CMP R, $0 -> OP.S x,y,R
+	 * when OP can set condition codes correctly
+	 */
+
+ret:
+	gc.Flowend(g)
+}
+
+func excise(r *gc.Flow) {
+	p := (*obj.Prog)(r.Prog)
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		fmt.Printf("%v ===delete===\n", p)
+	}
+	obj.Nopout(p)
+	gc.Ostats.Ndelmov++
+}
+
+func regtyp(a *obj.Addr) bool {
+	// TODO(rsc): Floating point register exclusions?
+	return a.Type == obj.TYPE_REG && arm64.REG_R0 <= a.Reg && a.Reg <= arm64.REG_F31 && a.Reg != arm64.REGZERO
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ *	MOV	a, R1
+ *	ADD	b, R1	/ no use of R2
+ *	MOV	R1, R2
+ * would be converted to
+ *	MOV	a, R2
+ *	ADD	b, R2
+ *	MOV	R2, R1
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ *
+ * r0 (the argument, not the register) is the MOV at the end of the
+ * above sequences. This returns 1 if it modified any instructions.
+ */
+func subprop(r0 *gc.Flow) bool {
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	if !regtyp(v1) {
+		return false
+	}
+	v2 := (*obj.Addr)(&p.To)
+	if !regtyp(v2) {
+		return false
+	}
+	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+		if gc.Uniqs(r) == nil {
+			break
+		}
+		p = r.Prog
+		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+			continue
+		}
+		if p.Info.Flags&gc.Call != 0 {
+			return false
+		}
+
+		if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
+			if p.To.Type == v1.Type {
+				if p.To.Reg == v1.Reg {
+					copysub(&p.To, v1, v2, 1)
+					if gc.Debug['P'] != 0 {
+						fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+						if p.From.Type == v2.Type {
+							fmt.Printf(" excise")
+						}
+						fmt.Printf("\n")
+					}
+
+					for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+						p = r.Prog
+						copysub(&p.From, v1, v2, 1)
+						copysub1(p, v1, v2, 1)
+						copysub(&p.To, v1, v2, 1)
+						if gc.Debug['P'] != 0 {
+							fmt.Printf("%v\n", r.Prog)
+						}
+					}
+
+					t := int(int(v1.Reg))
+					v1.Reg = v2.Reg
+					v2.Reg = int16(t)
+					if gc.Debug['P'] != 0 {
+						fmt.Printf("%v last\n", r.Prog)
+					}
+					return true
+				}
+			}
+		}
+
+		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
+			break
+		}
+		if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+			break
+		}
+	}
+
+	return false
+}
+
+/*
+ * The idea is to remove redundant copies.
+ *	v1->v2	F=0
+ *	(use v2	s/v2/v1/)*
+ *	set v1	F=1
+ *	use v2	return fail (v1->v2 move must remain)
+ *	-----------------
+ *	v1->v2	F=0
+ *	(use v2	s/v2/v1/)*
+ *	set v1	F=1
+ *	set v2	return success (caller can remove v1->v2 move)
+ */
+func copyprop(r0 *gc.Flow) bool {
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	v2 := (*obj.Addr)(&p.To)
+	if copyas(v1, v2) {
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("eliminating self-move: %v\n", r0.Prog)
+		}
+		return true
+	}
+
+	gactive++
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
+	}
+	return copy1(v1, v2, r0.S1, 0)
+}
+
+// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
+// all uses were rewritten.
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+	if uint32(r.Active) == gactive {
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("act set; return 1\n")
+		}
+		return true
+	}
+
+	r.Active = int32(gactive)
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
+	}
+	var t int
+	var p *obj.Prog
+	for ; r != nil; r = r.S1 {
+		p = r.Prog
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("%v", p)
+		}
+		if f == 0 && gc.Uniqp(r) == nil {
+			// Multiple predecessors; conservatively
+			// assume v1 was set on other path
+			f = 1
+
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; merge; f=%d", f)
+			}
+		}
+
+		t = copyu(p, v2, nil)
+		switch t {
+		case 2: /* rar, can't split */
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+			}
+			return false
+
+		case 3: /* set */
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+			}
+			return true
+
+		case 1, /* used, substitute */
+			4: /* use and set */
+			if f != 0 {
+				if gc.Debug['P'] == 0 {
+					return false
+				}
+				if t == 4 {
+					fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+				} else {
+					fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+				}
+				return false
+			}
+
+			if copyu(p, v2, v1) != 0 {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; sub fail; return 0\n")
+				}
+				return false
+			}
+
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
+			}
+			if t == 4 {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+				}
+				return true
+			}
+		}
+
+		if f == 0 {
+			t = copyu(p, v1, nil)
+			if f == 0 && (t == 2 || t == 3 || t == 4) {
+				f = 1
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+				}
+			}
+		}
+
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("\n")
+		}
+		if r.S2 != nil {
+			if !copy1(v1, v2, r.S2, f) {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+// If s==nil, copyu returns the set/use of v in p; otherwise, it
+// modifies p to replace reads of v with reads of s and returns 0 for
+// success or non-zero for failure.
+//
+// If s==nil, copy returns one of the following values:
+//	1 if v only used
+//	2 if v is set and used in one address (read-alter-rewrite;
+//	  can't substitute)
+//	3 if v is only set
+//	4 if v is set in one address and used in another (so addresses
+//	  can be rewritten independently)
+//	0 otherwise (not touched)
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+	if p.From3.Type != obj.TYPE_NONE {
+		// 7g never generates a from3
+		fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(&p.From3))
+	}
+	if p.To2.Type != obj.TYPE_NONE {
+		// 7g never generates a to2
+		fmt.Printf("copyu: to2 (%v) not implemented\n", gc.Ctxt.Dconv(&p.To2))
+	}
+
+	switch p.As {
+	default:
+		fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+		return 2
+
+	case obj.ANOP, /* read p->from, write p->to */
+		arm64.ANEG,
+		arm64.AFNEGD,
+		arm64.AFNEGS,
+		arm64.AFSQRTD,
+		arm64.AFCVTZSD,
+		arm64.AFCVTZSS,
+		arm64.AFCVTZSDW,
+		arm64.AFCVTZSSW,
+		arm64.AFCVTZUD,
+		arm64.AFCVTZUS,
+		arm64.AFCVTZUDW,
+		arm64.AFCVTZUSW,
+		arm64.AFCVTSD,
+		arm64.AFCVTDS,
+		arm64.ASCVTFD,
+		arm64.ASCVTFS,
+		arm64.ASCVTFWD,
+		arm64.ASCVTFWS,
+		arm64.AUCVTFD,
+		arm64.AUCVTFS,
+		arm64.AUCVTFWD,
+		arm64.AUCVTFWS,
+		arm64.AMOVB,
+		arm64.AMOVBU,
+		arm64.AMOVH,
+		arm64.AMOVHU,
+		arm64.AMOVW,
+		arm64.AMOVWU,
+		arm64.AMOVD,
+		arm64.AFMOVS,
+		arm64.AFMOVD:
+		if p.Scond == 0 {
+			if s != nil {
+				if copysub(&p.From, v, s, 1) != 0 {
+					return 1
+				}
+
+				// Update only indirect uses of v in p->to
+				if !copyas(&p.To, v) {
+					if copysub(&p.To, v, s, 1) != 0 {
+						return 1
+					}
+				}
+				return 0
+			}
+
+			if copyas(&p.To, v) {
+				// Fix up implicit from
+				if p.From.Type == obj.TYPE_NONE {
+					p.From = p.To
+				}
+				if copyau(&p.From, v) {
+					return 4
+				}
+				return 3
+			}
+
+			if copyau(&p.From, v) {
+				return 1
+			}
+			if copyau(&p.To, v) {
+				// p->to only indirectly uses v
+				return 1
+			}
+
+			return 0
+		}
+
+		/* rar p->from, write p->to or read p->from, rar p->to */
+		if p.From.Type == obj.TYPE_MEM {
+			if copyas(&p.From, v) {
+				// No s!=nil check; need to fail
+				// anyway in that case
+				return 2
+			}
+
+			if s != nil {
+				if copysub(&p.To, v, s, 1) != 0 {
+					return 1
+				}
+				return 0
+			}
+
+			if copyas(&p.To, v) {
+				return 3
+			}
+		} else if p.To.Type == obj.TYPE_MEM {
+			if copyas(&p.To, v) {
+				return 2
+			}
+			if s != nil {
+				if copysub(&p.From, v, s, 1) != 0 {
+					return 1
+				}
+				return 0
+			}
+
+			if copyau(&p.From, v) {
+				return 1
+			}
+		} else {
+			fmt.Printf("copyu: bad %v\n", p)
+		}
+
+		return 0
+
+	case arm64.AADD, /* read p->from, read p->reg, write p->to */
+		arm64.ASUB,
+		arm64.AAND,
+		arm64.AORR,
+		arm64.AEOR,
+		arm64.AMUL,
+		arm64.ASMULL,
+		arm64.AUMULL,
+		arm64.ASMULH,
+		arm64.AUMULH,
+		arm64.ASDIV,
+		arm64.AUDIV,
+		arm64.ALSL,
+		arm64.ALSR,
+		arm64.AASR,
+		arm64.AFADDD,
+		arm64.AFADDS,
+		arm64.AFSUBD,
+		arm64.AFSUBS,
+		arm64.AFMULD,
+		arm64.AFMULS,
+		arm64.AFDIVD,
+		arm64.AFDIVS:
+		if s != nil {
+			if copysub(&p.From, v, s, 1) != 0 {
+				return 1
+			}
+			if copysub1(p, v, s, 1) != 0 {
+				return 1
+			}
+
+			// Update only indirect uses of v in p->to
+			if !copyas(&p.To, v) {
+				if copysub(&p.To, v, s, 1) != 0 {
+					return 1
+				}
+			}
+			return 0
+		}
+
+		if copyas(&p.To, v) {
+			if p.Reg == 0 {
+				// Fix up implicit reg (e.g., ADD
+				// R3,R4 -> ADD R3,R4,R4) so we can
+				// update reg and to separately.
+				p.Reg = p.To.Reg
+			}
+
+			if copyau(&p.From, v) {
+				return 4
+			}
+			if copyau1(p, v) {
+				return 4
+			}
+			return 3
+		}
+
+		if copyau(&p.From, v) {
+			return 1
+		}
+		if copyau1(p, v) {
+			return 1
+		}
+		if copyau(&p.To, v) {
+			return 1
+		}
+		return 0
+
+	case arm64.ABEQ,
+		arm64.ABNE,
+		arm64.ABGE,
+		arm64.ABLT,
+		arm64.ABGT,
+		arm64.ABLE,
+		arm64.ABLO,
+		arm64.ABLS,
+		arm64.ABHI,
+		arm64.ABHS:
+		return 0
+
+	case obj.ACHECKNIL, /* read p->from */
+		arm64.ACMP, /* read p->from, read p->reg */
+		arm64.AFCMPD,
+		arm64.AFCMPS:
+		if s != nil {
+			if copysub(&p.From, v, s, 1) != 0 {
+				return 1
+			}
+			return copysub1(p, v, s, 1)
+		}
+
+		if copyau(&p.From, v) {
+			return 1
+		}
+		if copyau1(p, v) {
+			return 1
+		}
+		return 0
+
+	case arm64.AB: /* read p->to */
+		if s != nil {
+			if copysub(&p.To, v, s, 1) != 0 {
+				return 1
+			}
+			return 0
+		}
+
+		if copyau(&p.To, v) {
+			return 1
+		}
+		return 0
+
+	case obj.ARET: /* funny */
+		if s != nil {
+			return 0
+		}
+
+		// All registers die at this point, so claim
+		// everything is set (and not used).
+		return 3
+
+	case arm64.ABL: /* funny */
+		if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
+			return 2
+		}
+
+		if s != nil {
+			if copysub(&p.To, v, s, 1) != 0 {
+				return 1
+			}
+			return 0
+		}
+
+		if copyau(&p.To, v) {
+			return 4
+		}
+		return 3
+
+	// R31 is zero, used by DUFFZERO, cannot be substituted.
+	// R16 is ptr to memory, used and set, cannot be substituted.
+	case obj.ADUFFZERO:
+		if v.Type == obj.TYPE_REG {
+			if v.Reg == 31 {
+				return 1
+			}
+			if v.Reg == 16 {
+				return 2
+			}
+		}
+
+		return 0
+
+	// R16, R17 are ptr to src, dst, used and set, cannot be substituted.
+	// R27 is scratch, set by DUFFCOPY, cannot be substituted.
+	case obj.ADUFFCOPY:
+		if v.Type == obj.TYPE_REG {
+			if v.Reg == 16 || v.Reg == 17 {
+				return 2
+			}
+			if v.Reg == 27 {
+				return 3
+			}
+		}
+
+		return 0
+
+	case arm64.AHINT,
+		obj.ATEXT,
+		obj.APCDATA,
+		obj.AFUNCDATA,
+		obj.AVARDEF,
+		obj.AVARKILL:
+		return 0
+	}
+}
+
+// copyas returns 1 if a and v address the same register.
+//
+// If a is the from operand, this means this operation reads the
+// register in v. If a is the to operand, this means this operation
+// writes the register in v.
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+	if regtyp(v) {
+		if a.Type == v.Type {
+			if a.Reg == v.Reg {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// copyau returns 1 if a either directly or indirectly addresses the
+// same register as v.
+//
+// If a is the from operand, this means this operation reads the
+// register in v. If a is the to operand, this means the operation
+// either reads or writes the register in v (if !copyas(a, v), then
+// the operation reads the register in v).
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+	if copyas(a, v) {
+		return true
+	}
+	if v.Type == obj.TYPE_REG {
+		if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
+			if v.Reg == a.Reg {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// copyau1 returns 1 if p->reg references the same register as v and v
+// is a direct reference.
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
+	if regtyp(v) && v.Reg != 0 {
+		if p.Reg == v.Reg {
+			return true
+		}
+	}
+	return false
+}
+
+// copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on arm64).
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+	if f != 0 {
+		if copyau(a, v) {
+			a.Reg = s.Reg
+		}
+	}
+	return 0
+}
+
+// copysub1 replaces v with s in p1->reg if f!=0 or indicates if it could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on arm64).
+func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
+	if f != 0 {
+		if copyau1(p1, v) {
+			p1.Reg = s.Reg
+		}
+	}
+	return 0
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+	if a.Type != v.Type {
+		return false
+	}
+	if regtyp(v) && a.Reg == v.Reg {
+		return true
+	}
+	if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
+		if v.Offset == a.Offset {
+			return true
+		}
+	}
+	return false
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+	return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
+}
+
+func stackaddr(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && a.Reg == arm64.REGSP
+}
diff --git a/src/cmd/compile/internal/arm64/prog.go b/src/cmd/compile/internal/arm64/prog.go
new file mode 100644
index 0000000..1106e78
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/prog.go
@@ -0,0 +1,174 @@
+// Copyright 2014 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/arm64"
+)
+
+const (
+	LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
+	RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [arm64.ALAST]obj.ProgInfo{
+	obj.ATYPE:     {gc.Pseudo | gc.Skip, 0, 0, 0},
+	obj.ATEXT:     {gc.Pseudo, 0, 0, 0},
+	obj.AFUNCDATA: {gc.Pseudo, 0, 0, 0},
+	obj.APCDATA:   {gc.Pseudo, 0, 0, 0},
+	obj.AUNDEF:    {gc.Break, 0, 0, 0},
+	obj.AUSEFIELD: {gc.OK, 0, 0, 0},
+	obj.ACHECKNIL: {gc.LeftRead, 0, 0, 0},
+	obj.AVARDEF:   {gc.Pseudo | gc.RightWrite, 0, 0, 0},
+	obj.AVARKILL:  {gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+	// NOP is an internal no-op that also stands
+	// for USED and SET annotations, not the Power opcode.
+	obj.ANOP:    {gc.LeftRead | gc.RightWrite, 0, 0, 0},
+	arm64.AHINT: {gc.OK, 0, 0, 0},
+
+	// Integer
+	arm64.AADD:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.ASUB:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.ANEG:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AAND:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AORR:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AEOR:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AMUL:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.ASMULL: {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AUMULL: {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.ASMULH: {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AUMULH: {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.ASDIV:  {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AUDIV:  {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.ALSL:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.ALSR:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AASR:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.ACMP:   {gc.SizeQ | gc.LeftRead | gc.RegRead, 0, 0, 0},
+
+	// Floating point.
+	arm64.AFADDD:  {gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFADDS:  {gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFSUBD:  {gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFSUBS:  {gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFNEGD:  {gc.SizeD | gc.LeftRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFNEGS:  {gc.SizeF | gc.LeftRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFSQRTD: {gc.SizeD | gc.LeftRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFMULD:  {gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFMULS:  {gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFDIVD:  {gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFDIVS:  {gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	arm64.AFCMPD:  {gc.SizeD | gc.LeftRead | gc.RegRead, 0, 0, 0},
+	arm64.AFCMPS:  {gc.SizeF | gc.LeftRead | gc.RegRead, 0, 0, 0},
+
+	// float -> integer
+	arm64.AFCVTZSD:  {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AFCVTZSS:  {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AFCVTZSDW: {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AFCVTZSSW: {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AFCVTZUD:  {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AFCVTZUS:  {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AFCVTZUDW: {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AFCVTZUSW: {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+	// float -> float
+	arm64.AFCVTSD: {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AFCVTDS: {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+	// integer -> float
+	arm64.ASCVTFD:  {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.ASCVTFS:  {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.ASCVTFWD: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.ASCVTFWS: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AUCVTFD:  {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AUCVTFS:  {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AUCVTFWD: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	arm64.AUCVTFWS: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+	// Moves
+	arm64.AMOVB:  {gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	arm64.AMOVBU: {gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	arm64.AMOVH:  {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	arm64.AMOVHU: {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	arm64.AMOVW:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	arm64.AMOVWU: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	arm64.AMOVD:  {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	arm64.AFMOVS: {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	arm64.AFMOVD: {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+	// Jumps
+	arm64.AB:      {gc.Jump | gc.Break, 0, 0, 0},
+	arm64.ABL:     {gc.Call, 0, 0, 0},
+	arm64.ABEQ:    {gc.Cjmp, 0, 0, 0},
+	arm64.ABNE:    {gc.Cjmp, 0, 0, 0},
+	arm64.ABGE:    {gc.Cjmp, 0, 0, 0},
+	arm64.ABLT:    {gc.Cjmp, 0, 0, 0},
+	arm64.ABGT:    {gc.Cjmp, 0, 0, 0},
+	arm64.ABLE:    {gc.Cjmp, 0, 0, 0},
+	arm64.ABLO:    {gc.Cjmp, 0, 0, 0},
+	arm64.ABLS:    {gc.Cjmp, 0, 0, 0},
+	arm64.ABHI:    {gc.Cjmp, 0, 0, 0},
+	arm64.ABHS:    {gc.Cjmp, 0, 0, 0},
+	arm64.ACBZ:    {gc.Cjmp, 0, 0, 0},
+	arm64.ACBNZ:   {gc.Cjmp, 0, 0, 0},
+	obj.ARET:      {gc.Break, 0, 0, 0},
+	obj.ADUFFZERO: {gc.Call, 0, 0, 0},
+	obj.ADUFFCOPY: {gc.Call, 0, 0, 0},
+}
+
+func proginfo(p *obj.Prog) {
+	info := &p.Info
+	*info = progtable[p.As]
+	if info.Flags == 0 {
+		gc.Fatal("proginfo: unknown instruction %v", p)
+	}
+
+	if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
+		info.Flags &^= gc.RegRead
+		info.Flags |= gc.RightRead /*CanRegRead |*/
+	}
+
+	if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
+		info.Regindex |= RtoB(int(p.From.Reg))
+		if p.Scond != 0 {
+			info.Regset |= RtoB(int(p.From.Reg))
+		}
+	}
+
+	if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
+		info.Regindex |= RtoB(int(p.To.Reg))
+		if p.Scond != 0 {
+			info.Regset |= RtoB(int(p.To.Reg))
+		}
+	}
+
+	if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
+		info.Flags &^= gc.LeftRead
+		info.Flags |= gc.LeftAddr
+	}
+
+	if p.As == obj.ADUFFZERO {
+		info.Reguse |= RtoB(arm64.REGRT1)
+		info.Regset |= RtoB(arm64.REGRT1)
+	}
+
+	if p.As == obj.ADUFFCOPY {
+		// TODO(austin) Revisit when duffcopy is implemented
+		info.Reguse |= RtoB(arm64.REGRT1) | RtoB(arm64.REGRT2) | RtoB(arm64.REG_R5)
+
+		info.Regset |= RtoB(arm64.REGRT1) | RtoB(arm64.REGRT2)
+	}
+}
diff --git a/src/cmd/compile/internal/arm64/reg.go b/src/cmd/compile/internal/arm64/reg.go
new file mode 100644
index 0000000..7bc756b
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/reg.go
@@ -0,0 +1,171 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj/arm64"
+)
+
+const (
+	NREGVAR = 64 /* 32 general + 32 floating */
+)
+
+var reg [arm64.NREG + arm64.NFREG]uint8
+
+var regname = []string{
+	".R0",
+	".R1",
+	".R2",
+	".R3",
+	".R4",
+	".R5",
+	".R6",
+	".R7",
+	".R8",
+	".R9",
+	".R10",
+	".R11",
+	".R12",
+	".R13",
+	".R14",
+	".R15",
+	".R16",
+	".R17",
+	".R18",
+	".R19",
+	".R20",
+	".R21",
+	".R22",
+	".R23",
+	".R24",
+	".R25",
+	".R26",
+	".R27",
+	".R28",
+	".R29",
+	".R30",
+	".R31",
+	".F0",
+	".F1",
+	".F2",
+	".F3",
+	".F4",
+	".F5",
+	".F6",
+	".F7",
+	".F8",
+	".F9",
+	".F10",
+	".F11",
+	".F12",
+	".F13",
+	".F14",
+	".F15",
+	".F16",
+	".F17",
+	".F18",
+	".F19",
+	".F20",
+	".F21",
+	".F22",
+	".F23",
+	".F24",
+	".F25",
+	".F26",
+	".F27",
+	".F28",
+	".F29",
+	".F30",
+	".F31",
+}
+
+func regnames(n *int) []string {
+	*n = NREGVAR
+	return regname
+}
+
+func excludedregs() uint64 {
+	// Exclude registers with fixed functions
+	regbits := uint64(RtoB(arm64.REGRT1) | RtoB(arm64.REGRT2) | RtoB(arm64.REGPR))
+
+	// Exclude R26 - R31.
+	for r := arm64.REGMAX + 1; r <= arm64.REGZERO; r++ {
+		regbits |= RtoB(r)
+	}
+
+	// Also exclude floating point registers with fixed constants
+	regbits |= RtoB(arm64.REG_F27) | RtoB(arm64.REG_F28) | RtoB(arm64.REG_F29) | RtoB(arm64.REG_F30) | RtoB(arm64.REG_F31)
+
+	return regbits
+}
+
+func doregbits(r int) uint64 {
+	return 0
+}
+
+/*
+ * track register variables including external registers:
+ *	bit	reg
+ *	0	R0
+ *	1	R1
+ *	...	...
+ *	31	R31
+ *	32+0	F0
+ *	32+1	F1
+ *	...	...
+ *	32+31	F31
+ */
+func RtoB(r int) uint64 {
+	if r >= arm64.REG_R0 && r <= arm64.REG_R31 {
+		return 1 << uint(r-arm64.REG_R0)
+	}
+	if r >= arm64.REG_F0 && r <= arm64.REG_F31 {
+		return 1 << uint(32+r-arm64.REG_F0)
+	}
+	return 0
+}
+
+func BtoR(b uint64) int {
+	b &= 0xffffffff
+	if b == 0 {
+		return 0
+	}
+	return gc.Bitno(b) + arm64.REG_R0
+}
+
+func BtoF(b uint64) int {
+	b >>= 32
+	if b == 0 {
+		return 0
+	}
+	return gc.Bitno(b) + arm64.REG_F0
+}
diff --git a/src/cmd/compile/internal/big/accuracy_string.go b/src/cmd/compile/internal/big/accuracy_string.go
new file mode 100644
index 0000000..24ef7f1
--- /dev/null
+++ b/src/cmd/compile/internal/big/accuracy_string.go
@@ -0,0 +1,17 @@
+// generated by stringer -type=Accuracy; DO NOT EDIT
+
+package big
+
+import "fmt"
+
+const _Accuracy_name = "BelowExactAbove"
+
+var _Accuracy_index = [...]uint8{0, 5, 10, 15}
+
+func (i Accuracy) String() string {
+	i -= -1
+	if i < 0 || i+1 >= Accuracy(len(_Accuracy_index)) {
+		return fmt.Sprintf("Accuracy(%d)", i+-1)
+	}
+	return _Accuracy_name[_Accuracy_index[i]:_Accuracy_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/big/arith.go b/src/cmd/compile/internal/big/arith.go
new file mode 100644
index 0000000..328c85c
--- /dev/null
+++ b/src/cmd/compile/internal/big/arith.go
@@ -0,0 +1,291 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides Go implementations of elementary multi-precision
+// arithmetic operations on word vectors. Needed for platforms without
+// assembly implementations of these routines.
+
+package big
+
+// A Word represents a single digit of a multi-precision unsigned integer.
+type Word uintptr
+
+const (
+	// Compute the size _S of a Word in bytes.
+	_m    = ^Word(0)
+	_logS = _m>>8&1 + _m>>16&1 + _m>>32&1
+	_S    = 1 << _logS
+
+	_W = _S << 3 // word size in bits
+	_B = 1 << _W // digit base
+	_M = _B - 1  // digit mask
+
+	_W2 = _W / 2   // half word size in bits
+	_B2 = 1 << _W2 // half digit base
+	_M2 = _B2 - 1  // half digit mask
+)
+
+// ----------------------------------------------------------------------------
+// Elementary operations on words
+//
+// These operations are used by the vector operations below.
+
+// z1<<_W + z0 = x+y+c, with c == 0 or 1
+func addWW_g(x, y, c Word) (z1, z0 Word) {
+	yc := y + c
+	z0 = x + yc
+	if z0 < x || yc < y {
+		z1 = 1
+	}
+	return
+}
+
+// z1<<_W + z0 = x-y-c, with c == 0 or 1
+func subWW_g(x, y, c Word) (z1, z0 Word) {
+	yc := y + c
+	z0 = x - yc
+	if z0 > x || yc < y {
+		z1 = 1
+	}
+	return
+}
+
+// z1<<_W + z0 = x*y
+// Adapted from Warren, Hacker's Delight, p. 132.
+func mulWW_g(x, y Word) (z1, z0 Word) {
+	x0 := x & _M2
+	x1 := x >> _W2
+	y0 := y & _M2
+	y1 := y >> _W2
+	w0 := x0 * y0
+	t := x1*y0 + w0>>_W2
+	w1 := t & _M2
+	w2 := t >> _W2
+	w1 += x0 * y1
+	z1 = x1*y1 + w2 + w1>>_W2
+	z0 = x * y
+	return
+}
+
+// z1<<_W + z0 = x*y + c
+func mulAddWWW_g(x, y, c Word) (z1, z0 Word) {
+	z1, zz0 := mulWW_g(x, y)
+	if z0 = zz0 + c; z0 < zz0 {
+		z1++
+	}
+	return
+}
+
+// Length of x in bits.
+func bitLen_g(x Word) (n int) {
+	for ; x >= 0x8000; x >>= 16 {
+		n += 16
+	}
+	if x >= 0x80 {
+		x >>= 8
+		n += 8
+	}
+	if x >= 0x8 {
+		x >>= 4
+		n += 4
+	}
+	if x >= 0x2 {
+		x >>= 2
+		n += 2
+	}
+	if x >= 0x1 {
+		n++
+	}
+	return
+}
+
+// log2 computes the integer binary logarithm of x.
+// The result is the integer n for which 2^n <= x < 2^(n+1).
+// If x == 0, the result is -1.
+func log2(x Word) int {
+	return bitLen(x) - 1
+}
+
+// Number of leading zeros in x.
+func leadingZeros(x Word) uint {
+	return uint(_W - bitLen(x))
+}
+
+// q = (u1<<_W + u0 - r)/y
+// Adapted from Warren, Hacker's Delight, p. 152.
+func divWW_g(u1, u0, v Word) (q, r Word) {
+	if u1 >= v {
+		return 1<<_W - 1, 1<<_W - 1
+	}
+
+	s := leadingZeros(v)
+	v <<= s
+
+	vn1 := v >> _W2
+	vn0 := v & _M2
+	un32 := u1<<s | u0>>(_W-s)
+	un10 := u0 << s
+	un1 := un10 >> _W2
+	un0 := un10 & _M2
+	q1 := un32 / vn1
+	rhat := un32 - q1*vn1
+
+	for q1 >= _B2 || q1*vn0 > _B2*rhat+un1 {
+		q1--
+		rhat += vn1
+		if rhat >= _B2 {
+			break
+		}
+	}
+
+	un21 := un32*_B2 + un1 - q1*v
+	q0 := un21 / vn1
+	rhat = un21 - q0*vn1
+
+	for q0 >= _B2 || q0*vn0 > _B2*rhat+un0 {
+		q0--
+		rhat += vn1
+		if rhat >= _B2 {
+			break
+		}
+	}
+
+	return q1*_B2 + q0, (un21*_B2 + un0 - q0*v) >> s
+}
+
+// Keep for performance debugging.
+// Using addWW_g is likely slower.
+const use_addWW_g = false
+
+// The resulting carry c is either 0 or 1.
+func addVV_g(z, x, y []Word) (c Word) {
+	if use_addWW_g {
+		for i := range z {
+			c, z[i] = addWW_g(x[i], y[i], c)
+		}
+		return
+	}
+
+	for i, xi := range x[:len(z)] {
+		yi := y[i]
+		zi := xi + yi + c
+		z[i] = zi
+		// see "Hacker's Delight", section 2-12 (overflow detection)
+		c = (xi&yi | (xi|yi)&^zi) >> (_W - 1)
+	}
+	return
+}
+
+// The resulting carry c is either 0 or 1.
+func subVV_g(z, x, y []Word) (c Word) {
+	if use_addWW_g {
+		for i := range z {
+			c, z[i] = subWW_g(x[i], y[i], c)
+		}
+		return
+	}
+
+	for i, xi := range x[:len(z)] {
+		yi := y[i]
+		zi := xi - yi - c
+		z[i] = zi
+		// see "Hacker's Delight", section 2-12 (overflow detection)
+		c = (yi&^xi | (yi|^xi)&zi) >> (_W - 1)
+	}
+	return
+}
+
+// Argument y must be either 0 or 1.
+// The resulting carry c is either 0 or 1.
+func addVW_g(z, x []Word, y Word) (c Word) {
+	if use_addWW_g {
+		c = y
+		for i := range z {
+			c, z[i] = addWW_g(x[i], c, 0)
+		}
+		return
+	}
+
+	c = y
+	for i, xi := range x[:len(z)] {
+		zi := xi + c
+		z[i] = zi
+		c = xi &^ zi >> (_W - 1)
+	}
+	return
+}
+
+func subVW_g(z, x []Word, y Word) (c Word) {
+	if use_addWW_g {
+		c = y
+		for i := range z {
+			c, z[i] = subWW_g(x[i], c, 0)
+		}
+		return
+	}
+
+	c = y
+	for i, xi := range x[:len(z)] {
+		zi := xi - c
+		z[i] = zi
+		c = (zi &^ xi) >> (_W - 1)
+	}
+	return
+}
+
+func shlVU_g(z, x []Word, s uint) (c Word) {
+	if n := len(z); n > 0 {
+		ŝ := _W - s
+		w1 := x[n-1]
+		c = w1 >> ŝ
+		for i := n - 1; i > 0; i-- {
+			w := w1
+			w1 = x[i-1]
+			z[i] = w<<s | w1>>ŝ
+		}
+		z[0] = w1 << s
+	}
+	return
+}
+
+func shrVU_g(z, x []Word, s uint) (c Word) {
+	if n := len(z); n > 0 {
+		ŝ := _W - s
+		w1 := x[0]
+		c = w1 << ŝ
+		for i := 0; i < n-1; i++ {
+			w := w1
+			w1 = x[i+1]
+			z[i] = w>>s | w1<<ŝ
+		}
+		z[n-1] = w1 >> s
+	}
+	return
+}
+
+func mulAddVWW_g(z, x []Word, y, r Word) (c Word) {
+	c = r
+	for i := range z {
+		c, z[i] = mulAddWWW_g(x[i], y, c)
+	}
+	return
+}
+
+// TODO(gri) Remove use of addWW_g here and then we can remove addWW_g and subWW_g.
+func addMulVVW_g(z, x []Word, y Word) (c Word) {
+	for i := range z {
+		z1, z0 := mulAddWWW_g(x[i], y, z[i])
+		c, z[i] = addWW_g(z0, c, 0)
+		c += z1
+	}
+	return
+}
+
+func divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) {
+	r = xn
+	for i := len(z) - 1; i >= 0; i-- {
+		z[i], r = divWW_g(r, x[i], y)
+	}
+	return
+}
diff --git a/src/cmd/compile/internal/big/arith_decl.go b/src/cmd/compile/internal/big/arith_decl.go
new file mode 100644
index 0000000..fe13577
--- /dev/null
+++ b/src/cmd/compile/internal/big/arith_decl.go
@@ -0,0 +1,53 @@
+// Copyright 2015 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+func mulWW(x, y Word) (z1, z0 Word) {
+	return mulWW_g(x, y)
+}
+
+func divWW(x1, x0, y Word) (q, r Word) {
+	return divWW_g(x1, x0, y)
+}
+
+func addVV(z, x, y []Word) (c Word) {
+	return addVV_g(z, x, y)
+}
+
+func subVV(z, x, y []Word) (c Word) {
+	return subVV_g(z, x, y)
+}
+
+func addVW(z, x []Word, y Word) (c Word) {
+	return addVW_g(z, x, y)
+}
+
+func subVW(z, x []Word, y Word) (c Word) {
+	return subVW_g(z, x, y)
+}
+
+func shlVU(z, x []Word, s uint) (c Word) {
+	return shlVU_g(z, x, s)
+}
+
+func shrVU(z, x []Word, s uint) (c Word) {
+	return shrVU_g(z, x, s)
+}
+
+func mulAddVWW(z, x []Word, y, r Word) (c Word) {
+	return mulAddVWW_g(z, x, y, r)
+}
+
+func addMulVVW(z, x []Word, y Word) (c Word) {
+	return addMulVVW_g(z, x, y)
+}
+
+func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) {
+	return divWVW_g(z, xn, x, y)
+}
+
+func bitLen(x Word) (n int) {
+	return bitLen_g(x)
+}
diff --git a/src/cmd/compile/internal/big/arith_test.go b/src/cmd/compile/internal/big/arith_test.go
new file mode 100644
index 0000000..cd92dd7
--- /dev/null
+++ b/src/cmd/compile/internal/big/arith_test.go
@@ -0,0 +1,456 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+	"math/rand"
+	"testing"
+)
+
+type funWW func(x, y, c Word) (z1, z0 Word)
+type argWW struct {
+	x, y, c, z1, z0 Word
+}
+
+var sumWW = []argWW{
+	{0, 0, 0, 0, 0},
+	{0, 1, 0, 0, 1},
+	{0, 0, 1, 0, 1},
+	{0, 1, 1, 0, 2},
+	{12345, 67890, 0, 0, 80235},
+	{12345, 67890, 1, 0, 80236},
+	{_M, 1, 0, 1, 0},
+	{_M, 0, 1, 1, 0},
+	{_M, 1, 1, 1, 1},
+	{_M, _M, 0, 1, _M - 1},
+	{_M, _M, 1, 1, _M},
+}
+
+func testFunWW(t *testing.T, msg string, f funWW, a argWW) {
+	z1, z0 := f(a.x, a.y, a.c)
+	if z1 != a.z1 || z0 != a.z0 {
+		t.Errorf("%s%+v\n\tgot z1:z0 = %#x:%#x; want %#x:%#x", msg, a, z1, z0, a.z1, a.z0)
+	}
+}
+
+func TestFunWW(t *testing.T) {
+	for _, a := range sumWW {
+		arg := a
+		testFunWW(t, "addWW_g", addWW_g, arg)
+
+		arg = argWW{a.y, a.x, a.c, a.z1, a.z0}
+		testFunWW(t, "addWW_g symmetric", addWW_g, arg)
+
+		arg = argWW{a.z0, a.x, a.c, a.z1, a.y}
+		testFunWW(t, "subWW_g", subWW_g, arg)
+
+		arg = argWW{a.z0, a.y, a.c, a.z1, a.x}
+		testFunWW(t, "subWW_g symmetric", subWW_g, arg)
+	}
+}
+
+type funVV func(z, x, y []Word) (c Word)
+type argVV struct {
+	z, x, y nat
+	c       Word
+}
+
+var sumVV = []argVV{
+	{},
+	{nat{0}, nat{0}, nat{0}, 0},
+	{nat{1}, nat{1}, nat{0}, 0},
+	{nat{0}, nat{_M}, nat{1}, 1},
+	{nat{80235}, nat{12345}, nat{67890}, 0},
+	{nat{_M - 1}, nat{_M}, nat{_M}, 1},
+	{nat{0, 0, 0, 0}, nat{_M, _M, _M, _M}, nat{1, 0, 0, 0}, 1},
+	{nat{0, 0, 0, _M}, nat{_M, _M, _M, _M - 1}, nat{1, 0, 0, 0}, 0},
+	{nat{0, 0, 0, 0}, nat{_M, 0, _M, 0}, nat{1, _M, 0, _M}, 1},
+}
+
+func testFunVV(t *testing.T, msg string, f funVV, a argVV) {
+	z := make(nat, len(a.z))
+	c := f(z, a.x, a.y)
+	for i, zi := range z {
+		if zi != a.z[i] {
+			t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
+			break
+		}
+	}
+	if c != a.c {
+		t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
+	}
+}
+
+func TestFunVV(t *testing.T) {
+	for _, a := range sumVV {
+		arg := a
+		testFunVV(t, "addVV_g", addVV_g, arg)
+		testFunVV(t, "addVV", addVV, arg)
+
+		arg = argVV{a.z, a.y, a.x, a.c}
+		testFunVV(t, "addVV_g symmetric", addVV_g, arg)
+		testFunVV(t, "addVV symmetric", addVV, arg)
+
+		arg = argVV{a.x, a.z, a.y, a.c}
+		testFunVV(t, "subVV_g", subVV_g, arg)
+		testFunVV(t, "subVV", subVV, arg)
+
+		arg = argVV{a.y, a.z, a.x, a.c}
+		testFunVV(t, "subVV_g symmetric", subVV_g, arg)
+		testFunVV(t, "subVV symmetric", subVV, arg)
+	}
+}
+
+// Always the same seed for reproducible results.
+var rnd = rand.New(rand.NewSource(0))
+
+func rndW() Word {
+	return Word(rnd.Int63()<<1 | rnd.Int63n(2))
+}
+
+func rndV(n int) []Word {
+	v := make([]Word, n)
+	for i := range v {
+		v[i] = rndW()
+	}
+	return v
+}
+
+func benchmarkFunVV(b *testing.B, f funVV, n int) {
+	x := rndV(n)
+	y := rndV(n)
+	z := make([]Word, n)
+	b.SetBytes(int64(n * _W))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		f(z, x, y)
+	}
+}
+
+func BenchmarkAddVV_1(b *testing.B)   { benchmarkFunVV(b, addVV, 1) }
+func BenchmarkAddVV_2(b *testing.B)   { benchmarkFunVV(b, addVV, 2) }
+func BenchmarkAddVV_3(b *testing.B)   { benchmarkFunVV(b, addVV, 3) }
+func BenchmarkAddVV_4(b *testing.B)   { benchmarkFunVV(b, addVV, 4) }
+func BenchmarkAddVV_5(b *testing.B)   { benchmarkFunVV(b, addVV, 5) }
+func BenchmarkAddVV_1e1(b *testing.B) { benchmarkFunVV(b, addVV, 1e1) }
+func BenchmarkAddVV_1e2(b *testing.B) { benchmarkFunVV(b, addVV, 1e2) }
+func BenchmarkAddVV_1e3(b *testing.B) { benchmarkFunVV(b, addVV, 1e3) }
+func BenchmarkAddVV_1e4(b *testing.B) { benchmarkFunVV(b, addVV, 1e4) }
+func BenchmarkAddVV_1e5(b *testing.B) { benchmarkFunVV(b, addVV, 1e5) }
+
+type funVW func(z, x []Word, y Word) (c Word)
+type argVW struct {
+	z, x nat
+	y    Word
+	c    Word
+}
+
+var sumVW = []argVW{
+	{},
+	{nil, nil, 2, 2},
+	{nat{0}, nat{0}, 0, 0},
+	{nat{1}, nat{0}, 1, 0},
+	{nat{1}, nat{1}, 0, 0},
+	{nat{0}, nat{_M}, 1, 1},
+	{nat{0, 0, 0, 0}, nat{_M, _M, _M, _M}, 1, 1},
+}
+
+var prodVW = []argVW{
+	{},
+	{nat{0}, nat{0}, 0, 0},
+	{nat{0}, nat{_M}, 0, 0},
+	{nat{0}, nat{0}, _M, 0},
+	{nat{1}, nat{1}, 1, 0},
+	{nat{22793}, nat{991}, 23, 0},
+	{nat{0, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 0},
+	{nat{0, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 0},
+	{nat{0, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 0},
+	{nat{_M << 1 & _M}, nat{_M}, 1 << 1, _M >> (_W - 1)},
+	{nat{_M << 7 & _M}, nat{_M}, 1 << 7, _M >> (_W - 7)},
+	{nat{_M << 7 & _M, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, _M >> (_W - 7)},
+}
+
+var lshVW = []argVW{
+	{},
+	{nat{0}, nat{0}, 0, 0},
+	{nat{0}, nat{0}, 1, 0},
+	{nat{0}, nat{0}, 20, 0},
+
+	{nat{_M}, nat{_M}, 0, 0},
+	{nat{_M << 1 & _M}, nat{_M}, 1, 1},
+	{nat{_M << 20 & _M}, nat{_M}, 20, _M >> (_W - 20)},
+
+	{nat{_M, _M, _M}, nat{_M, _M, _M}, 0, 0},
+	{nat{_M << 1 & _M, _M, _M}, nat{_M, _M, _M}, 1, 1},
+	{nat{_M << 20 & _M, _M, _M}, nat{_M, _M, _M}, 20, _M >> (_W - 20)},
+}
+
+var rshVW = []argVW{
+	{},
+	{nat{0}, nat{0}, 0, 0},
+	{nat{0}, nat{0}, 1, 0},
+	{nat{0}, nat{0}, 20, 0},
+
+	{nat{_M}, nat{_M}, 0, 0},
+	{nat{_M >> 1}, nat{_M}, 1, _M << (_W - 1) & _M},
+	{nat{_M >> 20}, nat{_M}, 20, _M << (_W - 20) & _M},
+
+	{nat{_M, _M, _M}, nat{_M, _M, _M}, 0, 0},
+	{nat{_M, _M, _M >> 1}, nat{_M, _M, _M}, 1, _M << (_W - 1) & _M},
+	{nat{_M, _M, _M >> 20}, nat{_M, _M, _M}, 20, _M << (_W - 20) & _M},
+}
+
+func testFunVW(t *testing.T, msg string, f funVW, a argVW) {
+	z := make(nat, len(a.z))
+	c := f(z, a.x, a.y)
+	for i, zi := range z {
+		if zi != a.z[i] {
+			t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
+			break
+		}
+	}
+	if c != a.c {
+		t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
+	}
+}
+
+func makeFunVW(f func(z, x []Word, s uint) (c Word)) funVW {
+	return func(z, x []Word, s Word) (c Word) {
+		return f(z, x, uint(s))
+	}
+}
+
+func TestFunVW(t *testing.T) {
+	for _, a := range sumVW {
+		arg := a
+		testFunVW(t, "addVW_g", addVW_g, arg)
+		testFunVW(t, "addVW", addVW, arg)
+
+		arg = argVW{a.x, a.z, a.y, a.c}
+		testFunVW(t, "subVW_g", subVW_g, arg)
+		testFunVW(t, "subVW", subVW, arg)
+	}
+
+	shlVW_g := makeFunVW(shlVU_g)
+	shlVW := makeFunVW(shlVU)
+	for _, a := range lshVW {
+		arg := a
+		testFunVW(t, "shlVU_g", shlVW_g, arg)
+		testFunVW(t, "shlVU", shlVW, arg)
+	}
+
+	shrVW_g := makeFunVW(shrVU_g)
+	shrVW := makeFunVW(shrVU)
+	for _, a := range rshVW {
+		arg := a
+		testFunVW(t, "shrVU_g", shrVW_g, arg)
+		testFunVW(t, "shrVU", shrVW, arg)
+	}
+}
+
+func benchmarkFunVW(b *testing.B, f funVW, n int) {
+	x := rndV(n)
+	y := rndW()
+	z := make([]Word, n)
+	b.SetBytes(int64(n * _S))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		f(z, x, y)
+	}
+}
+
+func BenchmarkAddVW_1(b *testing.B)   { benchmarkFunVW(b, addVW, 1) }
+func BenchmarkAddVW_2(b *testing.B)   { benchmarkFunVW(b, addVW, 2) }
+func BenchmarkAddVW_3(b *testing.B)   { benchmarkFunVW(b, addVW, 3) }
+func BenchmarkAddVW_4(b *testing.B)   { benchmarkFunVW(b, addVW, 4) }
+func BenchmarkAddVW_5(b *testing.B)   { benchmarkFunVW(b, addVW, 5) }
+func BenchmarkAddVW_1e1(b *testing.B) { benchmarkFunVW(b, addVW, 1e1) }
+func BenchmarkAddVW_1e2(b *testing.B) { benchmarkFunVW(b, addVW, 1e2) }
+func BenchmarkAddVW_1e3(b *testing.B) { benchmarkFunVW(b, addVW, 1e3) }
+func BenchmarkAddVW_1e4(b *testing.B) { benchmarkFunVW(b, addVW, 1e4) }
+func BenchmarkAddVW_1e5(b *testing.B) { benchmarkFunVW(b, addVW, 1e5) }
+
+type funVWW func(z, x []Word, y, r Word) (c Word)
+type argVWW struct {
+	z, x nat
+	y, r Word
+	c    Word
+}
+
+var prodVWW = []argVWW{
+	{},
+	{nat{0}, nat{0}, 0, 0, 0},
+	{nat{991}, nat{0}, 0, 991, 0},
+	{nat{0}, nat{_M}, 0, 0, 0},
+	{nat{991}, nat{_M}, 0, 991, 0},
+	{nat{0}, nat{0}, _M, 0, 0},
+	{nat{991}, nat{0}, _M, 991, 0},
+	{nat{1}, nat{1}, 1, 0, 0},
+	{nat{992}, nat{1}, 1, 991, 0},
+	{nat{22793}, nat{991}, 23, 0, 0},
+	{nat{22800}, nat{991}, 23, 7, 0},
+	{nat{0, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 0, 0},
+	{nat{7, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 7, 0},
+	{nat{0, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 0, 0},
+	{nat{991, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 991, 0},
+	{nat{0, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 0, 0},
+	{nat{991, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 991, 0},
+	{nat{_M << 1 & _M}, nat{_M}, 1 << 1, 0, _M >> (_W - 1)},
+	{nat{_M<<1&_M + 1}, nat{_M}, 1 << 1, 1, _M >> (_W - 1)},
+	{nat{_M << 7 & _M}, nat{_M}, 1 << 7, 0, _M >> (_W - 7)},
+	{nat{_M<<7&_M + 1<<6}, nat{_M}, 1 << 7, 1 << 6, _M >> (_W - 7)},
+	{nat{_M << 7 & _M, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, 0, _M >> (_W - 7)},
+	{nat{_M<<7&_M + 1<<6, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, 1 << 6, _M >> (_W - 7)},
+}
+
+func testFunVWW(t *testing.T, msg string, f funVWW, a argVWW) {
+	z := make(nat, len(a.z))
+	c := f(z, a.x, a.y, a.r)
+	for i, zi := range z {
+		if zi != a.z[i] {
+			t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
+			break
+		}
+	}
+	if c != a.c {
+		t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
+	}
+}
+
+// TODO(gri) mulAddVWW and divWVW are symmetric operations but
+//           their signature is not symmetric. Try to unify.
+
+type funWVW func(z []Word, xn Word, x []Word, y Word) (r Word)
+type argWVW struct {
+	z  nat
+	xn Word
+	x  nat
+	y  Word
+	r  Word
+}
+
+func testFunWVW(t *testing.T, msg string, f funWVW, a argWVW) {
+	z := make(nat, len(a.z))
+	r := f(z, a.xn, a.x, a.y)
+	for i, zi := range z {
+		if zi != a.z[i] {
+			t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
+			break
+		}
+	}
+	if r != a.r {
+		t.Errorf("%s%+v\n\tgot r = %#x; want %#x", msg, a, r, a.r)
+	}
+}
+
+func TestFunVWW(t *testing.T) {
+	for _, a := range prodVWW {
+		arg := a
+		testFunVWW(t, "mulAddVWW_g", mulAddVWW_g, arg)
+		testFunVWW(t, "mulAddVWW", mulAddVWW, arg)
+
+		if a.y != 0 && a.r < a.y {
+			arg := argWVW{a.x, a.c, a.z, a.y, a.r}
+			testFunWVW(t, "divWVW_g", divWVW_g, arg)
+			testFunWVW(t, "divWVW", divWVW, arg)
+		}
+	}
+}
+
+var mulWWTests = []struct {
+	x, y Word
+	q, r Word
+}{
+	{_M, _M, _M - 1, 1},
+	// 32 bit only: {0xc47dfa8c, 50911, 0x98a4, 0x998587f4},
+}
+
+func TestMulWW(t *testing.T) {
+	for i, test := range mulWWTests {
+		q, r := mulWW_g(test.x, test.y)
+		if q != test.q || r != test.r {
+			t.Errorf("#%d got (%x, %x) want (%x, %x)", i, q, r, test.q, test.r)
+		}
+	}
+}
+
+var mulAddWWWTests = []struct {
+	x, y, c Word
+	q, r    Word
+}{
+	// TODO(agl): These will only work on 64-bit platforms.
+	// {15064310297182388543, 0xe7df04d2d35d5d80, 13537600649892366549, 13644450054494335067, 10832252001440893781},
+	// {15064310297182388543, 0xdab2f18048baa68d, 13644450054494335067, 12869334219691522700, 14233854684711418382},
+	{_M, _M, 0, _M - 1, 1},
+	{_M, _M, _M, _M, 0},
+}
+
+func TestMulAddWWW(t *testing.T) {
+	for i, test := range mulAddWWWTests {
+		q, r := mulAddWWW_g(test.x, test.y, test.c)
+		if q != test.q || r != test.r {
+			t.Errorf("#%d got (%x, %x) want (%x, %x)", i, q, r, test.q, test.r)
+		}
+	}
+}
+
+func benchmarkAddMulVVW(b *testing.B, n int) {
+	x := rndV(n)
+	y := rndW()
+	z := make([]Word, n)
+	b.SetBytes(int64(n * _W))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		addMulVVW(z, x, y)
+	}
+}
+
+func BenchmarkAddMulVVW_1(b *testing.B)   { benchmarkAddMulVVW(b, 1) }
+func BenchmarkAddMulVVW_2(b *testing.B)   { benchmarkAddMulVVW(b, 2) }
+func BenchmarkAddMulVVW_3(b *testing.B)   { benchmarkAddMulVVW(b, 3) }
+func BenchmarkAddMulVVW_4(b *testing.B)   { benchmarkAddMulVVW(b, 4) }
+func BenchmarkAddMulVVW_5(b *testing.B)   { benchmarkAddMulVVW(b, 5) }
+func BenchmarkAddMulVVW_1e1(b *testing.B) { benchmarkAddMulVVW(b, 1e1) }
+func BenchmarkAddMulVVW_1e2(b *testing.B) { benchmarkAddMulVVW(b, 1e2) }
+func BenchmarkAddMulVVW_1e3(b *testing.B) { benchmarkAddMulVVW(b, 1e3) }
+func BenchmarkAddMulVVW_1e4(b *testing.B) { benchmarkAddMulVVW(b, 1e4) }
+func BenchmarkAddMulVVW_1e5(b *testing.B) { benchmarkAddMulVVW(b, 1e5) }
+
+func testWordBitLen(t *testing.T, fname string, f func(Word) int) {
+	for i := 0; i <= _W; i++ {
+		x := Word(1) << uint(i-1) // i == 0 => x == 0
+		n := f(x)
+		if n != i {
+			t.Errorf("got %d; want %d for %s(%#x)", n, i, fname, x)
+		}
+	}
+}
+
+func TestWordBitLen(t *testing.T) {
+	testWordBitLen(t, "bitLen", bitLen)
+	testWordBitLen(t, "bitLen_g", bitLen_g)
+}
+
+// runs b.N iterations of bitLen called on a Word containing (1 << nbits)-1.
+func benchmarkBitLenN(b *testing.B, nbits uint) {
+	testword := Word((uint64(1) << nbits) - 1)
+	for i := 0; i < b.N; i++ {
+		bitLen(testword)
+	}
+}
+
+// Individual bitLen tests.  Numbers chosen to examine both sides
+// of powers-of-two boundaries.
+func BenchmarkBitLen0(b *testing.B)  { benchmarkBitLenN(b, 0) }
+func BenchmarkBitLen1(b *testing.B)  { benchmarkBitLenN(b, 1) }
+func BenchmarkBitLen2(b *testing.B)  { benchmarkBitLenN(b, 2) }
+func BenchmarkBitLen3(b *testing.B)  { benchmarkBitLenN(b, 3) }
+func BenchmarkBitLen4(b *testing.B)  { benchmarkBitLenN(b, 4) }
+func BenchmarkBitLen5(b *testing.B)  { benchmarkBitLenN(b, 5) }
+func BenchmarkBitLen8(b *testing.B)  { benchmarkBitLenN(b, 8) }
+func BenchmarkBitLen9(b *testing.B)  { benchmarkBitLenN(b, 9) }
+func BenchmarkBitLen16(b *testing.B) { benchmarkBitLenN(b, 16) }
+func BenchmarkBitLen17(b *testing.B) { benchmarkBitLenN(b, 17) }
+func BenchmarkBitLen31(b *testing.B) { benchmarkBitLenN(b, 31) }
diff --git a/src/cmd/compile/internal/big/bits_test.go b/src/cmd/compile/internal/big/bits_test.go
new file mode 100644
index 0000000..3ce2422
--- /dev/null
+++ b/src/cmd/compile/internal/big/bits_test.go
@@ -0,0 +1,224 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the Bits type used for testing Float operations
+// via an independent (albeit slower) representations for floating-point
+// numbers.
+
+package big
+
+import (
+	"fmt"
+	"sort"
+	"testing"
+)
+
+// A Bits value b represents a finite floating-point number x of the form
+//
+//	x = 2**b[0] + 2**b[1] + ... 2**b[len(b)-1]
+//
+// The order of slice elements is not significant. Negative elements may be
+// used to form fractions. A Bits value is normalized if each b[i] occurs at
+// most once. For instance Bits{0, 0, 1} is not normalized but represents the
+// same floating-point number as Bits{2}, which is normalized. The zero (nil)
+// value of Bits is a ready to use Bits value and represents the value 0.
+type Bits []int
+
+func (x Bits) add(y Bits) Bits {
+	return append(x, y...)
+}
+
+func (x Bits) mul(y Bits) Bits {
+	var p Bits
+	for _, x := range x {
+		for _, y := range y {
+			p = append(p, x+y)
+		}
+	}
+	return p
+}
+
+func TestMulBits(t *testing.T) {
+	for _, test := range []struct {
+		x, y, want Bits
+	}{
+		{nil, nil, nil},
+		{Bits{}, Bits{}, nil},
+		{Bits{0}, Bits{0}, Bits{0}},
+		{Bits{0}, Bits{1}, Bits{1}},
+		{Bits{1}, Bits{1, 2, 3}, Bits{2, 3, 4}},
+		{Bits{-1}, Bits{1}, Bits{0}},
+		{Bits{-10, -1, 0, 1, 10}, Bits{1, 2, 3}, Bits{-9, -8, -7, 0, 1, 2, 1, 2, 3, 2, 3, 4, 11, 12, 13}},
+	} {
+		got := fmt.Sprintf("%v", test.x.mul(test.y))
+		want := fmt.Sprintf("%v", test.want)
+		if got != want {
+			t.Errorf("%v * %v = %s; want %s", test.x, test.y, got, want)
+		}
+
+	}
+}
+
+// norm returns the normalized bits for x: It removes multiple equal entries
+// by treating them as an addition (e.g., Bits{5, 5} => Bits{6}), and it sorts
+// the result list for reproducible results.
+func (x Bits) norm() Bits {
+	m := make(map[int]bool)
+	for _, b := range x {
+		for m[b] {
+			m[b] = false
+			b++
+		}
+		m[b] = true
+	}
+	var z Bits
+	for b, set := range m {
+		if set {
+			z = append(z, b)
+		}
+	}
+	sort.Ints([]int(z))
+	return z
+}
+
+func TestNormBits(t *testing.T) {
+	for _, test := range []struct {
+		x, want Bits
+	}{
+		{nil, nil},
+		{Bits{}, Bits{}},
+		{Bits{0}, Bits{0}},
+		{Bits{0, 0}, Bits{1}},
+		{Bits{3, 1, 1}, Bits{2, 3}},
+		{Bits{10, 9, 8, 7, 6, 6}, Bits{11}},
+	} {
+		got := fmt.Sprintf("%v", test.x.norm())
+		want := fmt.Sprintf("%v", test.want)
+		if got != want {
+			t.Errorf("normBits(%v) = %s; want %s", test.x, got, want)
+		}
+
+	}
+}
+
+// round returns the Float value corresponding to x after rounding x
+// to prec bits according to mode.
+func (x Bits) round(prec uint, mode RoundingMode) *Float {
+	x = x.norm()
+
+	// determine range
+	var min, max int
+	for i, b := range x {
+		if i == 0 || b < min {
+			min = b
+		}
+		if i == 0 || b > max {
+			max = b
+		}
+	}
+	prec0 := uint(max + 1 - min)
+	if prec >= prec0 {
+		return x.Float()
+	}
+	// prec < prec0
+
+	// determine bit 0, rounding, and sticky bit, and result bits z
+	var bit0, rbit, sbit uint
+	var z Bits
+	r := max - int(prec)
+	for _, b := range x {
+		switch {
+		case b == r:
+			rbit = 1
+		case b < r:
+			sbit = 1
+		default:
+			// b > r
+			if b == r+1 {
+				bit0 = 1
+			}
+			z = append(z, b)
+		}
+	}
+
+	// round
+	f := z.Float() // rounded to zero
+	if mode == ToNearestAway {
+		panic("not yet implemented")
+	}
+	if mode == ToNearestEven && rbit == 1 && (sbit == 1 || sbit == 0 && bit0 != 0) || mode == AwayFromZero {
+		// round away from zero
+		f.SetMode(ToZero).SetPrec(prec)
+		f.Add(f, Bits{int(r) + 1}.Float())
+	}
+	return f
+}
+
+// Float returns the *Float z of the smallest possible precision such that
+// z = sum(2**bits[i]), with i = range bits. If multiple bits[i] are equal,
+// they are added: Bits{0, 1, 0}.Float() == 2**0 + 2**1 + 2**0 = 4.
+func (bits Bits) Float() *Float {
+	// handle 0
+	if len(bits) == 0 {
+		return new(Float)
+	}
+	// len(bits) > 0
+
+	// determine lsb exponent
+	var min int
+	for i, b := range bits {
+		if i == 0 || b < min {
+			min = b
+		}
+	}
+
+	// create bit pattern
+	x := NewInt(0)
+	for _, b := range bits {
+		badj := b - min
+		// propagate carry if necessary
+		for x.Bit(badj) != 0 {
+			x.SetBit(x, badj, 0)
+			badj++
+		}
+		x.SetBit(x, badj, 1)
+	}
+
+	// create corresponding float
+	z := new(Float).SetInt(x) // normalized
+	if e := int64(z.exp) + int64(min); MinExp <= e && e <= MaxExp {
+		z.exp = int32(e)
+	} else {
+		// this should never happen for our test cases
+		panic("exponent out of range")
+	}
+	return z
+}
+
+func TestFromBits(t *testing.T) {
+	for _, test := range []struct {
+		bits Bits
+		want string
+	}{
+		// all different bit numbers
+		{nil, "0"},
+		{Bits{0}, "0x.8p1"},
+		{Bits{1}, "0x.8p2"},
+		{Bits{-1}, "0x.8p0"},
+		{Bits{63}, "0x.8p64"},
+		{Bits{33, -30}, "0x.8000000000000001p34"},
+		{Bits{255, 0}, "0x.8000000000000000000000000000000000000000000000000000000000000001p256"},
+
+		// multiple equal bit numbers
+		{Bits{0, 0}, "0x.8p2"},
+		{Bits{0, 0, 0, 0}, "0x.8p3"},
+		{Bits{0, 1, 0}, "0x.8p3"},
+		{append(Bits{2, 1, 0} /* 7 */, Bits{3, 1} /* 10 */ ...), "0x.88p5" /* 17 */},
+	} {
+		f := test.bits.Float()
+		if got := f.Format('p', 0); got != test.want {
+			t.Errorf("setBits(%v) = %s; want %s", test.bits, got, test.want)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/big/calibrate_test.go b/src/cmd/compile/internal/big/calibrate_test.go
new file mode 100644
index 0000000..f69ffbf
--- /dev/null
+++ b/src/cmd/compile/internal/big/calibrate_test.go
@@ -0,0 +1,88 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file prints execution times for the Mul benchmark
+// given different Karatsuba thresholds. The result may be
+// used to manually fine-tune the threshold constant. The
+// results are somewhat fragile; use repeated runs to get
+// a clear picture.
+
+// Usage: go test -run=TestCalibrate -calibrate
+
+package big
+
+import (
+	"flag"
+	"fmt"
+	"testing"
+	"time"
+)
+
+var calibrate = flag.Bool("calibrate", false, "run calibration test")
+
+func karatsubaLoad(b *testing.B) {
+	BenchmarkMul(b)
+}
+
+// measureKaratsuba returns the time to run a Karatsuba-relevant benchmark
+// given Karatsuba threshold th.
+func measureKaratsuba(th int) time.Duration {
+	th, karatsubaThreshold = karatsubaThreshold, th
+	res := testing.Benchmark(karatsubaLoad)
+	karatsubaThreshold = th
+	return time.Duration(res.NsPerOp())
+}
+
+func computeThresholds() {
+	fmt.Printf("Multiplication times for varying Karatsuba thresholds\n")
+	fmt.Printf("(run repeatedly for good results)\n")
+
+	// determine Tk, the work load execution time using basic multiplication
+	Tb := measureKaratsuba(1e9) // th == 1e9 => Karatsuba multiplication disabled
+	fmt.Printf("Tb = %10s\n", Tb)
+
+	// thresholds
+	th := 4
+	th1 := -1
+	th2 := -1
+
+	var deltaOld time.Duration
+	for count := -1; count != 0 && th < 128; count-- {
+		// determine Tk, the work load execution time using Karatsuba multiplication
+		Tk := measureKaratsuba(th)
+
+		// improvement over Tb
+		delta := (Tb - Tk) * 100 / Tb
+
+		fmt.Printf("th = %3d  Tk = %10s  %4d%%", th, Tk, delta)
+
+		// determine break-even point
+		if Tk < Tb && th1 < 0 {
+			th1 = th
+			fmt.Print("  break-even point")
+		}
+
+		// determine diminishing return
+		if 0 < delta && delta < deltaOld && th2 < 0 {
+			th2 = th
+			fmt.Print("  diminishing return")
+		}
+		deltaOld = delta
+
+		fmt.Println()
+
+		// trigger counter
+		if th1 >= 0 && th2 >= 0 && count < 0 {
+			count = 10 // this many extra measurements after we got both thresholds
+		}
+
+		th++
+	}
+}
+
+func TestCalibrate(t *testing.T) {
+	if *calibrate {
+		computeThresholds()
+	}
+}
diff --git a/src/cmd/compile/internal/big/decimal.go b/src/cmd/compile/internal/big/decimal.go
new file mode 100644
index 0000000..3d024dc
--- /dev/null
+++ b/src/cmd/compile/internal/big/decimal.go
@@ -0,0 +1,258 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements multi-precision decimal numbers.
+// The implementation is for float to decimal conversion only;
+// not general purpose use.
+// The only operations are precise conversion from binary to
+// decimal and rounding.
+//
+// The key observation and some code (shr) is borrowed from
+// strconv/decimal.go: conversion of binary fractional values can be done
+// precisely in multi-precision decimal because 2 divides 10 (required for
+// >> of mantissa); but conversion of decimal floating-point values cannot
+// be done precisely in binary representation.
+//
+// In contrast to strconv/decimal.go, only right shift is implemented in
+// decimal format - left shift can be done precisely in binary format.
+
+package big
+
+// A decimal represents a floating-point number in decimal representation.
+// The value of a decimal x is x.mant * 10 ** x.exp with 0.5 <= x.mant < 1,
+// with the most-significant mantissa digit at index 0.
+type decimal struct {
+	mant []byte // mantissa ASCII digits, big-endian
+	exp  int    // exponent, valid if len(mant) > 0
+}
+
+// Maximum shift amount that can be done in one pass without overflow.
+// A Word has _W bits and (1<<maxShift - 1)*10 + 9 must fit into Word.
+const maxShift = _W - 4
+
+// TODO(gri) Since we know the desired decimal precision when converting
+// a floating-point number, we may be able to limit the number of decimal
+// digits that need to be computed by init by providing an additional
+// precision argument and keeping track of when a number was truncated early
+// (equivalent of "sticky bit" in binary rounding).
+
+// TODO(gri) Along the same lines, enforce some limit to shift magnitudes
+// to avoid "infinitely" long running conversions (until we run out of space).
+
+// Init initializes x to the decimal representation of m << shift (for
+// shift >= 0), or m >> -shift (for shift < 0).
+func (x *decimal) init(m nat, shift int) {
+	// special case 0
+	if len(m) == 0 {
+		x.mant = x.mant[:0]
+		return
+	}
+
+	// Optimization: If we need to shift right, first remove any trailing
+	// zero bits from m to reduce shift amount that needs to be done in
+	// decimal format (since that is likely slower).
+	if shift < 0 {
+		ntz := m.trailingZeroBits()
+		s := uint(-shift)
+		if s >= ntz {
+			s = ntz // shift at most ntz bits
+		}
+		m = nat(nil).shr(m, s)
+		shift += int(s)
+	}
+
+	// Do any shift left in binary representation.
+	if shift > 0 {
+		m = nat(nil).shl(m, uint(shift))
+		shift = 0
+	}
+
+	// Convert mantissa into decimal representation.
+	s := m.decimalString() // TODO(gri) avoid string conversion here
+	n := len(s)
+	x.exp = n
+	// Trim trailing zeros; instead the exponent is tracking
+	// the decimal point independent of the number of digits.
+	for n > 0 && s[n-1] == '0' {
+		n--
+	}
+	x.mant = append(x.mant[:0], s[:n]...)
+
+	// Do any (remaining) shift right in decimal representation.
+	if shift < 0 {
+		for shift < -maxShift {
+			shr(x, maxShift)
+			shift += maxShift
+		}
+		shr(x, uint(-shift))
+	}
+}
+
+// Possibly optimization: The current implementation of nat.string takes
+// a charset argument. When a right shift is needed, we could provide
+// "\x00\x01...\x09" instead of "012..9" (as in nat.decimalString) and
+// avoid the repeated +'0' and -'0' operations in decimal.shr (and do a
+// single +'0' pass at the end).
+
+// shr implements x >> s, for s <= maxShift.
+func shr(x *decimal, s uint) {
+	// Division by 1<<s using shift-and-subtract algorithm.
+
+	// pick up enough leading digits to cover first shift
+	r := 0 // read index
+	var n Word
+	for n>>s == 0 && r < len(x.mant) {
+		ch := Word(x.mant[r])
+		r++
+		n = n*10 + ch - '0'
+	}
+	if n == 0 {
+		// x == 0; shouldn't get here, but handle anyway
+		x.mant = x.mant[:0]
+		return
+	}
+	for n>>s == 0 {
+		r++
+		n *= 10
+	}
+	x.exp += 1 - r
+
+	// read a digit, write a digit
+	w := 0 // write index
+	for r < len(x.mant) {
+		ch := Word(x.mant[r])
+		r++
+		d := n >> s
+		n -= d << s
+		x.mant[w] = byte(d + '0')
+		w++
+		n = n*10 + ch - '0'
+	}
+
+	// write extra digits that still fit
+	for n > 0 && w < len(x.mant) {
+		d := n >> s
+		n -= d << s
+		x.mant[w] = byte(d + '0')
+		w++
+		n = n * 10
+	}
+	x.mant = x.mant[:w] // the number may be shorter (e.g. 1024 >> 10)
+
+	// append additional digits that didn't fit
+	for n > 0 {
+		d := n >> s
+		n -= d << s
+		x.mant = append(x.mant, byte(d+'0'))
+		n = n * 10
+	}
+
+	trim(x)
+}
+
+func (x *decimal) String() string {
+	if len(x.mant) == 0 {
+		return "0"
+	}
+
+	var buf []byte
+	switch {
+	case x.exp <= 0:
+		// 0.00ddd
+		buf = append(buf, "0."...)
+		buf = appendZeros(buf, -x.exp)
+		buf = append(buf, x.mant...)
+
+	case /* 0 < */ x.exp < len(x.mant):
+		// dd.ddd
+		buf = append(buf, x.mant[:x.exp]...)
+		buf = append(buf, '.')
+		buf = append(buf, x.mant[x.exp:]...)
+
+	default: // len(x.mant) <= x.exp
+		// ddd00
+		buf = append(buf, x.mant...)
+		buf = appendZeros(buf, x.exp-len(x.mant))
+	}
+
+	return string(buf)
+}
+
+// appendZeros appends n 0 digits to buf and returns buf.
+func appendZeros(buf []byte, n int) []byte {
+	for ; n > 0; n-- {
+		buf = append(buf, '0')
+	}
+	return buf
+}
+
+// shouldRoundUp reports if x should be rounded up
+// if shortened to n digits. n must be a valid index
+// for x.mant.
+func shouldRoundUp(x *decimal, n int) bool {
+	if x.mant[n] == '5' && n+1 == len(x.mant) {
+		// exactly halfway - round to even
+		return n > 0 && (x.mant[n-1]-'0')&1 != 0
+	}
+	// not halfway - digit tells all (x.mant has no trailing zeros)
+	return x.mant[n] >= '5'
+}
+
+// round sets x to (at most) n mantissa digits by rounding it
+// to the nearest even value with n (or fever) mantissa digits.
+// If n < 0, x remains unchanged.
+func (x *decimal) round(n int) {
+	if n < 0 || n >= len(x.mant) {
+		return // nothing to do
+	}
+
+	if shouldRoundUp(x, n) {
+		x.roundUp(n)
+	} else {
+		x.roundDown(n)
+	}
+}
+
+func (x *decimal) roundUp(n int) {
+	if n < 0 || n >= len(x.mant) {
+		return // nothing to do
+	}
+	// 0 <= n < len(x.mant)
+
+	// find first digit < '9'
+	for n > 0 && x.mant[n-1] >= '9' {
+		n--
+	}
+
+	if n == 0 {
+		// all digits are '9's => round up to '1' and update exponent
+		x.mant[0] = '1' // ok since len(x.mant) > n
+		x.mant = x.mant[:1]
+		x.exp++
+		return
+	}
+
+	// n > 0 && x.mant[n-1] < '9'
+	x.mant[n-1]++
+	x.mant = x.mant[:n]
+	// x already trimmed
+}
+
+func (x *decimal) roundDown(n int) {
+	if n < 0 || n >= len(x.mant) {
+		return // nothing to do
+	}
+	x.mant = x.mant[:n]
+	trim(x)
+}
+
+// trim cuts off any trailing zeros from x's mantissa;
+// they are meaningless for the value of x.
+func trim(x *decimal) {
+	i := len(x.mant)
+	for i > 0 && x.mant[i-1] == '0' {
+		i--
+	}
+	x.mant = x.mant[:i]
+}
diff --git a/src/cmd/compile/internal/big/decimal_test.go b/src/cmd/compile/internal/big/decimal_test.go
new file mode 100644
index 0000000..81e022a
--- /dev/null
+++ b/src/cmd/compile/internal/big/decimal_test.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import "testing"
+
+func TestDecimalString(t *testing.T) {
+	for _, test := range []struct {
+		x    decimal
+		want string
+	}{
+		{want: "0"},
+		{decimal{nil, 1000}, "0"}, // exponent of 0 is ignored
+		{decimal{[]byte("12345"), 0}, "0.12345"},
+		{decimal{[]byte("12345"), -3}, "0.00012345"},
+		{decimal{[]byte("12345"), +3}, "123.45"},
+		{decimal{[]byte("12345"), +10}, "1234500000"},
+	} {
+		if got := test.x.String(); got != test.want {
+			t.Errorf("%v == %s; want %s", test.x, got, test.want)
+		}
+	}
+}
+
+func TestDecimalInit(t *testing.T) {
+	for _, test := range []struct {
+		x     Word
+		shift int
+		want  string
+	}{
+		{0, 0, "0"},
+		{0, -100, "0"},
+		{0, 100, "0"},
+		{1, 0, "1"},
+		{1, 10, "1024"},
+		{1, 100, "1267650600228229401496703205376"},
+		{1, -100, "0.0000000000000000000000000000007888609052210118054117285652827862296732064351090230047702789306640625"},
+		{12345678, 8, "3160493568"},
+		{12345678, -8, "48225.3046875"},
+		{195312, 9, "99999744"},
+		{1953125, 9, "1000000000"},
+	} {
+		var d decimal
+		d.init(nat{test.x}.norm(), test.shift)
+		if got := d.String(); got != test.want {
+			t.Errorf("%d << %d == %s; want %s", test.x, test.shift, got, test.want)
+		}
+	}
+}
+
+func TestDecimalRounding(t *testing.T) {
+	for _, test := range []struct {
+		x              uint64
+		n              int
+		down, even, up string
+	}{
+		{0, 0, "0", "0", "0"},
+		{0, 1, "0", "0", "0"},
+
+		{1, 0, "0", "0", "10"},
+		{5, 0, "0", "0", "10"},
+		{9, 0, "0", "10", "10"},
+
+		{15, 1, "10", "20", "20"},
+		{45, 1, "40", "40", "50"},
+		{95, 1, "90", "100", "100"},
+
+		{12344999, 4, "12340000", "12340000", "12350000"},
+		{12345000, 4, "12340000", "12340000", "12350000"},
+		{12345001, 4, "12340000", "12350000", "12350000"},
+		{23454999, 4, "23450000", "23450000", "23460000"},
+		{23455000, 4, "23450000", "23460000", "23460000"},
+		{23455001, 4, "23450000", "23460000", "23460000"},
+
+		{99994999, 4, "99990000", "99990000", "100000000"},
+		{99995000, 4, "99990000", "100000000", "100000000"},
+		{99999999, 4, "99990000", "100000000", "100000000"},
+
+		{12994999, 4, "12990000", "12990000", "13000000"},
+		{12995000, 4, "12990000", "13000000", "13000000"},
+		{12999999, 4, "12990000", "13000000", "13000000"},
+	} {
+		x := nat(nil).setUint64(test.x)
+
+		var d decimal
+		d.init(x, 0)
+		d.roundDown(test.n)
+		if got := d.String(); got != test.down {
+			t.Errorf("roundDown(%d, %d) = %s; want %s", test.x, test.n, got, test.down)
+		}
+
+		d.init(x, 0)
+		d.round(test.n)
+		if got := d.String(); got != test.even {
+			t.Errorf("round(%d, %d) = %s; want %s", test.x, test.n, got, test.even)
+		}
+
+		d.init(x, 0)
+		d.roundUp(test.n)
+		if got := d.String(); got != test.up {
+			t.Errorf("roundUp(%d, %d) = %s; want %s", test.x, test.n, got, test.up)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/big/example_test.go b/src/cmd/compile/internal/big/example_test.go
new file mode 100644
index 0000000..078be47
--- /dev/null
+++ b/src/cmd/compile/internal/big/example_test.go
@@ -0,0 +1,51 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big_test
+
+import (
+	"fmt"
+	"log"
+	"math/big"
+)
+
+func ExampleRat_SetString() {
+	r := new(big.Rat)
+	r.SetString("355/113")
+	fmt.Println(r.FloatString(3))
+	// Output: 3.142
+}
+
+func ExampleInt_SetString() {
+	i := new(big.Int)
+	i.SetString("644", 8) // octal
+	fmt.Println(i)
+	// Output: 420
+}
+
+func ExampleRat_Scan() {
+	// The Scan function is rarely used directly;
+	// the fmt package recognizes it as an implementation of fmt.Scanner.
+	r := new(big.Rat)
+	_, err := fmt.Sscan("1.5000", r)
+	if err != nil {
+		log.Println("error scanning value:", err)
+	} else {
+		fmt.Println(r)
+	}
+	// Output: 3/2
+}
+
+func ExampleInt_Scan() {
+	// The Scan function is rarely used directly;
+	// the fmt package recognizes it as an implementation of fmt.Scanner.
+	i := new(big.Int)
+	_, err := fmt.Sscan("18446744073709551617", i)
+	if err != nil {
+		log.Println("error scanning value:", err)
+	} else {
+		fmt.Println(i)
+	}
+	// Output: 18446744073709551617
+}
diff --git a/src/cmd/compile/internal/big/float.go b/src/cmd/compile/internal/big/float.go
new file mode 100644
index 0000000..ed55e8e
--- /dev/null
+++ b/src/cmd/compile/internal/big/float.go
@@ -0,0 +1,1681 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements multi-precision floating-point numbers.
+// Like in the GNU MPFR library (http://www.mpfr.org/), operands
+// can be of mixed precision. Unlike MPFR, the rounding mode is
+// not specified with each operation, but with each operand. The
+// rounding mode of the result operand determines the rounding
+// mode of an operation. This is a from-scratch implementation.
+
+package big
+
+import (
+	"fmt"
+	"math"
+)
+
+const debugFloat = true // enable for debugging
+
+// A nonzero finite Float represents a multi-precision floating point number
+//
+//   sign × mantissa × 2**exponent
+//
+// with 0.5 <= mantissa < 1.0, and MinExp <= exponent <= MaxExp.
+// A Float may also be zero (+0, -0) or infinite (+Inf, -Inf).
+// All Floats are ordered, and the ordering of two Floats x and y
+// is defined by x.Cmp(y).
+//
+// Each Float value also has a precision, rounding mode, and accuracy.
+// The precision is the maximum number of mantissa bits available to
+// represent the value. The rounding mode specifies how a result should
+// be rounded to fit into the mantissa bits, and accuracy describes the
+// rounding error with respect to the exact result.
+//
+// Unless specified otherwise, all operations (including setters) that
+// specify a *Float variable for the result (usually via the receiver
+// with the exception of MantExp), round the numeric result according
+// to the precision and rounding mode of the result variable.
+//
+// If the provided result precision is 0 (see below), it is set to the
+// precision of the argument with the largest precision value before any
+// rounding takes place, and the rounding mode remains unchanged. Thus,
+// uninitialized Floats provided as result arguments will have their
+// precision set to a reasonable value determined by the operands and
+// their mode is the zero value for RoundingMode (ToNearestEven).
+//
+// By setting the desired precision to 24 or 53 and using matching rounding
+// mode (typically ToNearestEven), Float operations produce the same results
+// as the corresponding float32 or float64 IEEE-754 arithmetic for operands
+// that correspond to normal (i.e., not denormal) float32 or float64 numbers.
+// Exponent underflow and overflow lead to a 0 or an Infinity for different
+// values than IEEE-754 because Float exponents have a much larger range.
+//
+// The zero (uninitialized) value for a Float is ready to use and represents
+// the number +0.0 exactly, with precision 0 and rounding mode ToNearestEven.
+//
+type Float struct {
+	prec uint32
+	mode RoundingMode
+	acc  Accuracy
+	form form
+	neg  bool
+	mant nat
+	exp  int32
+}
+
+// Float operations that would lead to a NaN under IEEE-754 rules cause
+// a run-time panic of ErrNaN type.
+type ErrNaN struct {
+	msg string
+}
+
+// NewFloat allocates and returns a new Float set to x,
+// with precision 53 and rounding mode ToNearestEven.
+// NewFloat panics with ErrNaN if x is a NaN.
+func NewFloat(x float64) *Float {
+	if math.IsNaN(x) {
+		panic(ErrNaN{"NewFloat(NaN)"})
+	}
+	return new(Float).SetFloat64(x)
+}
+
+// Exponent and precision limits.
+const (
+	MaxExp  = math.MaxInt32  // largest supported exponent
+	MinExp  = math.MinInt32  // smallest supported exponent
+	MaxPrec = math.MaxUint32 // largest (theoretically) supported precision; likely memory-limited
+)
+
+// Internal representation: The mantissa bits x.mant of a nonzero finite
+// Float x are stored in a nat slice long enough to hold up to x.prec bits;
+// the slice may (but doesn't have to) be shorter if the mantissa contains
+// trailing 0 bits. x.mant is normalized if the msb of x.mant == 1 (i.e.,
+// the msb is shifted all the way "to the left"). Thus, if the mantissa has
+// trailing 0 bits or x.prec is not a multiple of the the Word size _W,
+// x.mant[0] has trailing zero bits. The msb of the mantissa corresponds
+// to the value 0.5; the exponent x.exp shifts the binary point as needed.
+//
+// A zero or non-finite Float x ignores x.mant and x.exp.
+//
+// x                 form      neg      mant         exp
+// ----------------------------------------------------------
+// ±0                zero      sign     -            -
+// 0 < |x| < +Inf    finite    sign     mantissa     exponent
+// ±Inf              inf       sign     -            -
+
+// A form value describes the internal representation.
+type form byte
+
+// The form value order is relevant - do not change!
+const (
+	zero form = iota
+	finite
+	inf
+)
+
+// RoundingMode determines how a Float value is rounded to the
+// desired precision. Rounding may change the Float value; the
+// rounding error is described by the Float's Accuracy.
+type RoundingMode byte
+
+// The following rounding modes are supported.
+const (
+	ToNearestEven RoundingMode = iota // == IEEE 754-2008 roundTiesToEven
+	ToNearestAway                     // == IEEE 754-2008 roundTiesToAway
+	ToZero                            // == IEEE 754-2008 roundTowardZero
+	AwayFromZero                      // no IEEE 754-2008 equivalent
+	ToNegativeInf                     // == IEEE 754-2008 roundTowardNegative
+	ToPositiveInf                     // == IEEE 754-2008 roundTowardPositive
+)
+
+//go:generate stringer -type=RoundingMode
+
+// Accuracy describes the rounding error produced by the most recent
+// operation that generated a Float value, relative to the exact value.
+type Accuracy int8
+
+// Constants describing the Accuracy of a Float.
+const (
+	Below Accuracy = -1
+	Exact Accuracy = 0
+	Above Accuracy = +1
+)
+
+//go:generate stringer -type=Accuracy
+
+// SetPrec sets z's precision to prec and returns the (possibly) rounded
+// value of z. Rounding occurs according to z's rounding mode if the mantissa
+// cannot be represented in prec bits without loss of precision.
+// SetPrec(0) maps all finite values to ±0; infinite values remain unchanged.
+// If prec > MaxPrec, it is set to MaxPrec.
+func (z *Float) SetPrec(prec uint) *Float {
+	z.acc = Exact // optimistically assume no rounding is needed
+
+	// special case
+	if prec == 0 {
+		z.prec = 0
+		if z.form == finite {
+			// truncate z to 0
+			z.acc = makeAcc(z.neg)
+			z.form = zero
+		}
+		return z
+	}
+
+	// general case
+	if prec > MaxPrec {
+		prec = MaxPrec
+	}
+	old := z.prec
+	z.prec = uint32(prec)
+	if z.prec < old {
+		z.round(0)
+	}
+	return z
+}
+
+func makeAcc(above bool) Accuracy {
+	if above {
+		return Above
+	}
+	return Below
+}
+
+// SetMode sets z's rounding mode to mode and returns an exact z.
+// z remains unchanged otherwise.
+// z.SetMode(z.Mode()) is a cheap way to set z's accuracy to Exact.
+func (z *Float) SetMode(mode RoundingMode) *Float {
+	z.mode = mode
+	z.acc = Exact
+	return z
+}
+
+// Prec returns the mantissa precision of x in bits.
+// The result may be 0 for |x| == 0 and |x| == Inf.
+func (x *Float) Prec() uint {
+	return uint(x.prec)
+}
+
+// MinPrec returns the minimum precision required to represent x exactly
+// (i.e., the smallest prec before x.SetPrec(prec) would start rounding x).
+// The result is 0 for |x| == 0 and |x| == Inf.
+func (x *Float) MinPrec() uint {
+	if x.form != finite {
+		return 0
+	}
+	return uint(len(x.mant))*_W - x.mant.trailingZeroBits()
+}
+
+// Mode returns the rounding mode of x.
+func (x *Float) Mode() RoundingMode {
+	return x.mode
+}
+
+// Acc returns the accuracy of x produced by the most recent operation.
+func (x *Float) Acc() Accuracy {
+	return x.acc
+}
+
+// Sign returns:
+//
+//	-1 if x <   0
+//	 0 if x is ±0
+//	+1 if x >   0
+//
+func (x *Float) Sign() int {
+	if debugFloat {
+		x.validate()
+	}
+	if x.form == zero {
+		return 0
+	}
+	if x.neg {
+		return -1
+	}
+	return 1
+}
+
+// MantExp breaks x into its mantissa and exponent components
+// and returns the exponent. If a non-nil mant argument is
+// provided its value is set to the mantissa of x, with the
+// same precision and rounding mode as x. The components
+// satisfy x == mant × 2**exp, with 0.5 <= |mant| < 1.0.
+// Calling MantExp with a nil argument is an efficient way to
+// get the exponent of the receiver.
+//
+// Special cases are:
+//
+//	(  ±0).MantExp(mant) = 0, with mant set to   ±0
+//	(±Inf).MantExp(mant) = 0, with mant set to ±Inf
+//
+// x and mant may be the same in which case x is set to its
+// mantissa value.
+func (x *Float) MantExp(mant *Float) (exp int) {
+	if debugFloat {
+		x.validate()
+	}
+	if x.form == finite {
+		exp = int(x.exp)
+	}
+	if mant != nil {
+		mant.Copy(x)
+		if mant.form == finite {
+			mant.exp = 0
+		}
+	}
+	return
+}
+
+func (z *Float) setExpAndRound(exp int64, sbit uint) {
+	if exp < MinExp {
+		// underflow
+		z.acc = makeAcc(z.neg)
+		z.form = zero
+		return
+	}
+
+	if exp > MaxExp {
+		// overflow
+		z.acc = makeAcc(!z.neg)
+		z.form = inf
+		return
+	}
+
+	z.form = finite
+	z.exp = int32(exp)
+	z.round(sbit)
+}
+
+// SetMantExp sets z to mant × 2**exp and and returns z.
+// The result z has the same precision and rounding mode
+// as mant. SetMantExp is an inverse of MantExp but does
+// not require 0.5 <= |mant| < 1.0. Specifically:
+//
+//	mant := new(Float)
+//	new(Float).SetMantExp(mant, x.SetMantExp(mant)).Cmp(x).Eql() is true
+//
+// Special cases are:
+//
+//	z.SetMantExp(  ±0, exp) =   ±0
+//	z.SetMantExp(±Inf, exp) = ±Inf
+//
+// z and mant may be the same in which case z's exponent
+// is set to exp.
+func (z *Float) SetMantExp(mant *Float, exp int) *Float {
+	if debugFloat {
+		z.validate()
+		mant.validate()
+	}
+	z.Copy(mant)
+	if z.form != finite {
+		return z
+	}
+	z.setExpAndRound(int64(z.exp)+int64(exp), 0)
+	return z
+}
+
+// Signbit returns true if x is negative or negative zero.
+func (x *Float) Signbit() bool {
+	return x.neg
+}
+
+// IsInf reports whether x is +Inf or -Inf.
+func (x *Float) IsInf() bool {
+	return x.form == inf
+}
+
+// IsInt reports whether x is an integer.
+// ±Inf values are not integers.
+func (x *Float) IsInt() bool {
+	if debugFloat {
+		x.validate()
+	}
+	// special cases
+	if x.form != finite {
+		return x.form == zero
+	}
+	// x.form == finite
+	if x.exp <= 0 {
+		return false
+	}
+	// x.exp > 0
+	return x.prec <= uint32(x.exp) || x.MinPrec() <= uint(x.exp) // not enough bits for fractional mantissa
+}
+
+// debugging support
+func (x *Float) validate() {
+	if !debugFloat {
+		// avoid performance bugs
+		panic("validate called but debugFloat is not set")
+	}
+	if x.form != finite {
+		return
+	}
+	m := len(x.mant)
+	if m == 0 {
+		panic("nonzero finite number with empty mantissa")
+	}
+	const msb = 1 << (_W - 1)
+	if x.mant[m-1]&msb == 0 {
+		panic(fmt.Sprintf("msb not set in last word %#x of %s", x.mant[m-1], x.Format('p', 0)))
+	}
+	if x.prec == 0 {
+		panic("zero precision finite number")
+	}
+}
+
+// round rounds z according to z.mode to z.prec bits and sets z.acc accordingly.
+// sbit must be 0 or 1 and summarizes any "sticky bit" information one might
+// have before calling round. z's mantissa must be normalized (with the msb set)
+// or empty.
+//
+// CAUTION: The rounding modes ToNegativeInf, ToPositiveInf are affected by the
+// sign of z. For correct rounding, the sign of z must be set correctly before
+// calling round.
+func (z *Float) round(sbit uint) {
+	if debugFloat {
+		z.validate()
+		if z.form > finite {
+			panic(fmt.Sprintf("round called for non-finite value %s", z))
+		}
+	}
+	// z.form <= finite
+
+	z.acc = Exact
+	if z.form == zero {
+		return
+	}
+	// z.form == finite && len(z.mant) > 0
+	// m > 0 implies z.prec > 0 (checked by validate)
+
+	m := uint32(len(z.mant)) // present mantissa length in words
+	bits := m * _W           // present mantissa bits
+	if bits <= z.prec {
+		// mantissa fits => nothing to do
+		return
+	}
+	// bits > z.prec
+
+	n := (z.prec + (_W - 1)) / _W // mantissa length in words for desired precision
+
+	// Rounding is based on two bits: the rounding bit (rbit) and the
+	// sticky bit (sbit). The rbit is the bit immediately before the
+	// z.prec leading mantissa bits (the "0.5"). The sbit is set if any
+	// of the bits before the rbit are set (the "0.25", "0.125", etc.):
+	//
+	//   rbit  sbit  => "fractional part"
+	//
+	//   0     0        == 0
+	//   0     1        >  0  , < 0.5
+	//   1     0        == 0.5
+	//   1     1        >  0.5, < 1.0
+
+	// bits > z.prec: mantissa too large => round
+	r := uint(bits - z.prec - 1) // rounding bit position; r >= 0
+	rbit := z.mant.bit(r)        // rounding bit
+	if sbit == 0 {
+		sbit = z.mant.sticky(r)
+	}
+	if debugFloat && sbit&^1 != 0 {
+		panic(fmt.Sprintf("invalid sbit %#x", sbit))
+	}
+
+	// convert ToXInf rounding modes
+	mode := z.mode
+	switch mode {
+	case ToNegativeInf:
+		mode = ToZero
+		if z.neg {
+			mode = AwayFromZero
+		}
+	case ToPositiveInf:
+		mode = AwayFromZero
+		if z.neg {
+			mode = ToZero
+		}
+	}
+
+	// cut off extra words
+	if m > n {
+		copy(z.mant, z.mant[m-n:]) // move n last words to front
+		z.mant = z.mant[:n]
+	}
+
+	// determine number of trailing zero bits t
+	t := n*_W - z.prec // 0 <= t < _W
+	lsb := Word(1) << t
+
+	// make rounding decision
+	// TODO(gri) This can be simplified (see Bits.round in bits_test.go).
+	switch mode {
+	case ToZero:
+		// nothing to do
+	case ToNearestEven, ToNearestAway:
+		if rbit == 0 {
+			// rounding bits == 0b0x
+			mode = ToZero
+		} else if sbit == 1 {
+			// rounding bits == 0b11
+			mode = AwayFromZero
+		}
+	case AwayFromZero:
+		if rbit|sbit == 0 {
+			mode = ToZero
+		}
+	default:
+		// ToXInf modes have been converted to ToZero or AwayFromZero
+		panic("unreachable")
+	}
+
+	// round and determine accuracy
+	switch mode {
+	case ToZero:
+		if rbit|sbit != 0 {
+			z.acc = Below
+		}
+
+	case ToNearestEven, ToNearestAway:
+		if debugFloat && rbit != 1 {
+			panic("internal error in rounding")
+		}
+		if mode == ToNearestEven && sbit == 0 && z.mant[0]&lsb == 0 {
+			z.acc = Below
+			break
+		}
+		// mode == ToNearestAway || sbit == 1 || z.mant[0]&lsb != 0
+		fallthrough
+
+	case AwayFromZero:
+		// add 1 to mantissa
+		if addVW(z.mant, z.mant, lsb) != 0 {
+			// overflow => shift mantissa right by 1 and add msb
+			shrVU(z.mant, z.mant, 1)
+			z.mant[n-1] |= 1 << (_W - 1)
+			// adjust exponent
+			if z.exp < MaxExp {
+				z.exp++
+			} else {
+				// exponent overflow
+				z.acc = makeAcc(!z.neg)
+				z.form = inf
+				return
+			}
+		}
+		z.acc = Above
+	}
+
+	// zero out trailing bits in least-significant word
+	z.mant[0] &^= lsb - 1
+
+	// update accuracy
+	if z.acc != Exact && z.neg {
+		z.acc = -z.acc
+	}
+
+	if debugFloat {
+		z.validate()
+	}
+
+	return
+}
+
+// nlz returns the number of leading zero bits in x.
+func nlz(x Word) uint {
+	return _W - uint(bitLen(x))
+}
+
+func nlz64(x uint64) uint {
+	// TODO(gri) this can be done more nicely
+	if _W == 32 {
+		if x>>32 == 0 {
+			return 32 + nlz(Word(x))
+		}
+		return nlz(Word(x >> 32))
+	}
+	if _W == 64 {
+		return nlz(Word(x))
+	}
+	panic("unreachable")
+}
+
+func (z *Float) setBits64(neg bool, x uint64) *Float {
+	if z.prec == 0 {
+		z.prec = 64
+	}
+	z.acc = Exact
+	z.neg = neg
+	if x == 0 {
+		z.form = zero
+		return z
+	}
+	// x != 0
+	z.form = finite
+	s := nlz64(x)
+	z.mant = z.mant.setUint64(x << s)
+	z.exp = int32(64 - s) // always fits
+	if z.prec < 64 {
+		z.round(0)
+	}
+	return z
+}
+
+// SetUint64 sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to 64 (and rounding will have
+// no effect).
+func (z *Float) SetUint64(x uint64) *Float {
+	return z.setBits64(false, x)
+}
+
+// SetInt64 sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to 64 (and rounding will have
+// no effect).
+func (z *Float) SetInt64(x int64) *Float {
+	u := x
+	if u < 0 {
+		u = -u
+	}
+	// We cannot simply call z.SetUint64(uint64(u)) and change
+	// the sign afterwards because the sign affects rounding.
+	return z.setBits64(x < 0, uint64(u))
+}
+
+// SetFloat64 sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to 53 (and rounding will have
+// no effect). SetFloat64 panics with ErrNaN if x is a NaN.
+func (z *Float) SetFloat64(x float64) *Float {
+	if z.prec == 0 {
+		z.prec = 53
+	}
+	if math.IsNaN(x) {
+		panic(ErrNaN{"Float.SetFloat64(NaN)"})
+	}
+	z.acc = Exact
+	z.neg = math.Signbit(x) // handle -0, -Inf correctly
+	if x == 0 {
+		z.form = zero
+		return z
+	}
+	if math.IsInf(x, 0) {
+		z.form = inf
+		return z
+	}
+	// normalized x != 0
+	z.form = finite
+	fmant, exp := math.Frexp(x) // get normalized mantissa
+	z.mant = z.mant.setUint64(1<<63 | math.Float64bits(fmant)<<11)
+	z.exp = int32(exp) // always fits
+	if z.prec < 53 {
+		z.round(0)
+	}
+	return z
+}
+
+// fnorm normalizes mantissa m by shifting it to the left
+// such that the msb of the most-significant word (msw) is 1.
+// It returns the shift amount. It assumes that len(m) != 0.
+func fnorm(m nat) int64 {
+	if debugFloat && (len(m) == 0 || m[len(m)-1] == 0) {
+		panic("msw of mantissa is 0")
+	}
+	s := nlz(m[len(m)-1])
+	if s > 0 {
+		c := shlVU(m, m, s)
+		if debugFloat && c != 0 {
+			panic("nlz or shlVU incorrect")
+		}
+	}
+	return int64(s)
+}
+
+// SetInt sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to the larger of x.BitLen()
+// or 64 (and rounding will have no effect).
+func (z *Float) SetInt(x *Int) *Float {
+	// TODO(gri) can be more efficient if z.prec > 0
+	// but small compared to the size of x, or if there
+	// are many trailing 0's.
+	bits := uint32(x.BitLen())
+	if z.prec == 0 {
+		z.prec = umax32(bits, 64)
+	}
+	z.acc = Exact
+	z.neg = x.neg
+	if len(x.abs) == 0 {
+		z.form = zero
+		return z
+	}
+	// x != 0
+	z.mant = z.mant.set(x.abs)
+	fnorm(z.mant)
+	z.setExpAndRound(int64(bits), 0)
+	return z
+}
+
+// SetRat sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to the largest of a.BitLen(),
+// b.BitLen(), or 64; with x = a/b.
+func (z *Float) SetRat(x *Rat) *Float {
+	if x.IsInt() {
+		return z.SetInt(x.Num())
+	}
+	var a, b Float
+	a.SetInt(x.Num())
+	b.SetInt(x.Denom())
+	if z.prec == 0 {
+		z.prec = umax32(a.prec, b.prec)
+	}
+	return z.Quo(&a, &b)
+}
+
+// SetInf sets z to the infinite Float -Inf if signbit is
+// set, or +Inf if signbit is not set, and returns z. The
+// precision of z is unchanged and the result is always
+// Exact.
+func (z *Float) SetInf(signbit bool) *Float {
+	z.acc = Exact
+	z.form = inf
+	z.neg = signbit
+	return z
+}
+
+// Set sets z to the (possibly rounded) value of x and returns z.
+// If z's precision is 0, it is changed to the precision of x
+// before setting z (and rounding will have no effect).
+// Rounding is performed according to z's precision and rounding
+// mode; and z's accuracy reports the result error relative to the
+// exact (not rounded) result.
+func (z *Float) Set(x *Float) *Float {
+	if debugFloat {
+		x.validate()
+	}
+	z.acc = Exact
+	if z != x {
+		z.form = x.form
+		z.neg = x.neg
+		if x.form == finite {
+			z.exp = x.exp
+			z.mant = z.mant.set(x.mant)
+		}
+		if z.prec == 0 {
+			z.prec = x.prec
+		} else if z.prec < x.prec {
+			z.round(0)
+		}
+	}
+	return z
+}
+
+// Copy sets z to x, with the same precision, rounding mode, and
+// accuracy as x, and returns z. x is not changed even if z and
+// x are the same.
+func (z *Float) Copy(x *Float) *Float {
+	if debugFloat {
+		x.validate()
+	}
+	if z != x {
+		z.prec = x.prec
+		z.mode = x.mode
+		z.acc = x.acc
+		z.form = x.form
+		z.neg = x.neg
+		if z.form == finite {
+			z.mant = z.mant.set(x.mant)
+			z.exp = x.exp
+		}
+	}
+	return z
+}
+
+func high32(x nat) uint32 {
+	// TODO(gri) This can be done more efficiently on 32bit platforms.
+	return uint32(high64(x) >> 32)
+}
+
+func high64(x nat) uint64 {
+	i := len(x)
+	if i == 0 {
+		return 0
+	}
+	// i > 0
+	v := uint64(x[i-1])
+	if _W == 32 {
+		v <<= 32
+		if i > 1 {
+			v |= uint64(x[i-2])
+		}
+	}
+	return v
+}
+
+// Uint64 returns the unsigned integer resulting from truncating x
+// towards zero. If 0 <= x <= math.MaxUint64, the result is Exact
+// if x is an integer and Below otherwise.
+// The result is (0, Above) for x < 0, and (math.MaxUint64, Below)
+// for x > math.MaxUint64.
+func (x *Float) Uint64() (uint64, Accuracy) {
+	if debugFloat {
+		x.validate()
+	}
+
+	switch x.form {
+	case finite:
+		if x.neg {
+			return 0, Above
+		}
+		// 0 < x < +Inf
+		if x.exp <= 0 {
+			// 0 < x < 1
+			return 0, Below
+		}
+		// 1 <= x < Inf
+		if x.exp <= 64 {
+			// u = trunc(x) fits into a uint64
+			u := high64(x.mant) >> (64 - uint32(x.exp))
+			if x.MinPrec() <= 64 {
+				return u, Exact
+			}
+			return u, Below // x truncated
+		}
+		// x too large
+		return math.MaxUint64, Below
+
+	case zero:
+		return 0, Exact
+
+	case inf:
+		if x.neg {
+			return 0, Above
+		}
+		return math.MaxUint64, Below
+	}
+
+	panic("unreachable")
+}
+
+// Int64 returns the integer resulting from truncating x towards zero.
+// If math.MinInt64 <= x <= math.MaxInt64, the result is Exact if x is
+// an integer, and Above (x < 0) or Below (x > 0) otherwise.
+// The result is (math.MinInt64, Above) for x < math.MinInt64,
+// and (math.MaxInt64, Below) for x > math.MaxInt64.
+func (x *Float) Int64() (int64, Accuracy) {
+	if debugFloat {
+		x.validate()
+	}
+
+	switch x.form {
+	case finite:
+		// 0 < |x| < +Inf
+		acc := makeAcc(x.neg)
+		if x.exp <= 0 {
+			// 0 < |x| < 1
+			return 0, acc
+		}
+		// x.exp > 0
+
+		// 1 <= |x| < +Inf
+		if x.exp <= 63 {
+			// i = trunc(x) fits into an int64 (excluding math.MinInt64)
+			i := int64(high64(x.mant) >> (64 - uint32(x.exp)))
+			if x.neg {
+				i = -i
+			}
+			if x.MinPrec() <= uint(x.exp) {
+				return i, Exact
+			}
+			return i, acc // x truncated
+		}
+		if x.neg {
+			// check for special case x == math.MinInt64 (i.e., x == -(0.5 << 64))
+			if x.exp == 64 && x.MinPrec() == 1 {
+				acc = Exact
+			}
+			return math.MinInt64, acc
+		}
+		// x too large
+		return math.MaxInt64, Below
+
+	case zero:
+		return 0, Exact
+
+	case inf:
+		if x.neg {
+			return math.MinInt64, Above
+		}
+		return math.MaxInt64, Below
+	}
+
+	panic("unreachable")
+}
+
+// TODO(gri) Float32 and Float64 are very similar internally but for the
+// floatxx parameters and some conversions. Should factor out shared code.
+
+// Float32 returns the float32 value nearest to x. If x is too small to be
+// represented by a float32 (|x| < math.SmallestNonzeroFloat32), the result
+// is (0, Below) or (-0, Above), respectively, depending on the sign of x.
+// If x is too large to be represented by a float32 (|x| > math.MaxFloat32),
+// the result is (+Inf, Above) or (-Inf, Below), depending on the sign of x.
+func (x *Float) Float32() (float32, Accuracy) {
+	if debugFloat {
+		x.validate()
+	}
+
+	switch x.form {
+	case finite:
+		// 0 < |x| < +Inf
+
+		const (
+			fbits = 32                //        float size
+			mbits = 23                //        mantissa size (excluding implicit msb)
+			ebits = fbits - mbits - 1 //     8  exponent size
+			bias  = 1<<(ebits-1) - 1  //   127  exponent bias
+			dmin  = 1 - bias - mbits  //  -149  smallest unbiased exponent (denormal)
+			emin  = 1 - bias          //  -126  smallest unbiased exponent (normal)
+			emax  = bias              //   127  largest unbiased exponent (normal)
+		)
+
+		// Float mantissae m have an explicit msb and are in the range 0.5 <= m < 1.0.
+		// floatxx mantissae have an implicit msb and are in the range 1.0 <= m < 2.0.
+		// For a given mantissa m, we need to add 1 to a floatxx exponent to get the
+		// corresponding Float exponent.
+		// (see also implementation of math.Ldexp for similar code)
+
+		if x.exp < dmin+1 {
+			// underflow
+			if x.neg {
+				var z float32
+				return -z, Above
+			}
+			return 0.0, Below
+		}
+		// x.exp >= dmin+1
+
+		var r Float
+		r.prec = mbits + 1 // +1 for implicit msb
+		if x.exp < emin+1 {
+			// denormal number - round to fewer bits
+			r.prec = uint32(x.exp - dmin)
+		}
+		r.Set(x)
+
+		// Rounding may have caused r to overflow to ±Inf
+		// (rounding never causes underflows to 0).
+		if r.form == inf {
+			r.exp = emax + 2 // cause overflow below
+		}
+
+		if r.exp > emax+1 {
+			// overflow
+			if x.neg {
+				return float32(math.Inf(-1)), Below
+			}
+			return float32(math.Inf(+1)), Above
+		}
+		// dmin+1 <= r.exp <= emax+1
+
+		var s uint32
+		if r.neg {
+			s = 1 << (fbits - 1)
+		}
+
+		m := high32(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
+
+		// Rounding may have caused a denormal number to
+		// become normal. Check again.
+		c := float32(1.0)
+		if r.exp < emin+1 {
+			// denormal number
+			r.exp += mbits
+			c = 1.0 / (1 << mbits) // 2**-mbits
+		}
+		// emin+1 <= r.exp <= emax+1
+		e := uint32(r.exp-emin) << mbits
+
+		return c * math.Float32frombits(s|e|m), r.acc
+
+	case zero:
+		if x.neg {
+			var z float32
+			return -z, Exact
+		}
+		return 0.0, Exact
+
+	case inf:
+		if x.neg {
+			return float32(math.Inf(-1)), Exact
+		}
+		return float32(math.Inf(+1)), Exact
+	}
+
+	panic("unreachable")
+}
+
+// Float64 returns the float64 value nearest to x. If x is too small to be
+// represented by a float64 (|x| < math.SmallestNonzeroFloat64), the result
+// is (0, Below) or (-0, Above), respectively, depending on the sign of x.
+// If x is too large to be represented by a float64 (|x| > math.MaxFloat64),
+// the result is (+Inf, Above) or (-Inf, Below), depending on the sign of x.
+func (x *Float) Float64() (float64, Accuracy) {
+	if debugFloat {
+		x.validate()
+	}
+
+	switch x.form {
+	case finite:
+		// 0 < |x| < +Inf
+
+		const (
+			fbits = 64                //        float size
+			mbits = 52                //        mantissa size (excluding implicit msb)
+			ebits = fbits - mbits - 1 //    11  exponent size
+			bias  = 1<<(ebits-1) - 1  //  1023  exponent bias
+			dmin  = 1 - bias - mbits  // -1074  smallest unbiased exponent (denormal)
+			emin  = 1 - bias          // -1022  smallest unbiased exponent (normal)
+			emax  = bias              //  1023  largest unbiased exponent (normal)
+		)
+
+		// Float mantissae m have an explicit msb and are in the range 0.5 <= m < 1.0.
+		// floatxx mantissae have an implicit msb and are in the range 1.0 <= m < 2.0.
+		// For a given mantissa m, we need to add 1 to a floatxx exponent to get the
+		// corresponding Float exponent.
+		// (see also implementation of math.Ldexp for similar code)
+
+		if x.exp < dmin+1 {
+			// underflow
+			if x.neg {
+				var z float64
+				return -z, Above
+			}
+			return 0.0, Below
+		}
+		// x.exp >= dmin+1
+
+		var r Float
+		r.prec = mbits + 1 // +1 for implicit msb
+		if x.exp < emin+1 {
+			// denormal number - round to fewer bits
+			r.prec = uint32(x.exp - dmin)
+		}
+		r.Set(x)
+
+		// Rounding may have caused r to overflow to ±Inf
+		// (rounding never causes underflows to 0).
+		if r.form == inf {
+			r.exp = emax + 2 // cause overflow below
+		}
+
+		if r.exp > emax+1 {
+			// overflow
+			if x.neg {
+				return math.Inf(-1), Below
+			}
+			return math.Inf(+1), Above
+		}
+		// dmin+1 <= r.exp <= emax+1
+
+		var s uint64
+		if r.neg {
+			s = 1 << (fbits - 1)
+		}
+
+		m := high64(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
+
+		// Rounding may have caused a denormal number to
+		// become normal. Check again.
+		c := 1.0
+		if r.exp < emin+1 {
+			// denormal number
+			r.exp += mbits
+			c = 1.0 / (1 << mbits) // 2**-mbits
+		}
+		// emin+1 <= r.exp <= emax+1
+		e := uint64(r.exp-emin) << mbits
+
+		return c * math.Float64frombits(s|e|m), r.acc
+
+	case zero:
+		if x.neg {
+			var z float64
+			return -z, Exact
+		}
+		return 0.0, Exact
+
+	case inf:
+		if x.neg {
+			return math.Inf(-1), Exact
+		}
+		return math.Inf(+1), Exact
+	}
+
+	panic("unreachable")
+}
+
+// Int returns the result of truncating x towards zero;
+// or nil if x is an infinity.
+// The result is Exact if x.IsInt(); otherwise it is Below
+// for x > 0, and Above for x < 0.
+// If a non-nil *Int argument z is provided, Int stores
+// the result in z instead of allocating a new Int.
+func (x *Float) Int(z *Int) (*Int, Accuracy) {
+	if debugFloat {
+		x.validate()
+	}
+
+	if z == nil && x.form <= finite {
+		z = new(Int)
+	}
+
+	switch x.form {
+	case finite:
+		// 0 < |x| < +Inf
+		acc := makeAcc(x.neg)
+		if x.exp <= 0 {
+			// 0 < |x| < 1
+			return z.SetInt64(0), acc
+		}
+		// x.exp > 0
+
+		// 1 <= |x| < +Inf
+		// determine minimum required precision for x
+		allBits := uint(len(x.mant)) * _W
+		exp := uint(x.exp)
+		if x.MinPrec() <= exp {
+			acc = Exact
+		}
+		// shift mantissa as needed
+		if z == nil {
+			z = new(Int)
+		}
+		z.neg = x.neg
+		switch {
+		case exp > allBits:
+			z.abs = z.abs.shl(x.mant, exp-allBits)
+		default:
+			z.abs = z.abs.set(x.mant)
+		case exp < allBits:
+			z.abs = z.abs.shr(x.mant, allBits-exp)
+		}
+		return z, acc
+
+	case zero:
+		return z.SetInt64(0), Exact
+
+	case inf:
+		return nil, makeAcc(x.neg)
+	}
+
+	panic("unreachable")
+}
+
+// Rat returns the rational number corresponding to x;
+// or nil if x is an infinity.
+// The result is Exact is x is not an Inf.
+// If a non-nil *Rat argument z is provided, Rat stores
+// the result in z instead of allocating a new Rat.
+func (x *Float) Rat(z *Rat) (*Rat, Accuracy) {
+	if debugFloat {
+		x.validate()
+	}
+
+	if z == nil && x.form <= finite {
+		z = new(Rat)
+	}
+
+	switch x.form {
+	case finite:
+		// 0 < |x| < +Inf
+		allBits := int32(len(x.mant)) * _W
+		// build up numerator and denominator
+		z.a.neg = x.neg
+		switch {
+		case x.exp > allBits:
+			z.a.abs = z.a.abs.shl(x.mant, uint(x.exp-allBits))
+			z.b.abs = z.b.abs[:0] // == 1 (see Rat)
+			// z already in normal form
+		default:
+			z.a.abs = z.a.abs.set(x.mant)
+			z.b.abs = z.b.abs[:0] // == 1 (see Rat)
+			// z already in normal form
+		case x.exp < allBits:
+			z.a.abs = z.a.abs.set(x.mant)
+			t := z.b.abs.setUint64(1)
+			z.b.abs = t.shl(t, uint(allBits-x.exp))
+			z.norm()
+		}
+		return z, Exact
+
+	case zero:
+		return z.SetInt64(0), Exact
+
+	case inf:
+		return nil, makeAcc(x.neg)
+	}
+
+	panic("unreachable")
+}
+
+// Abs sets z to the (possibly rounded) value |x| (the absolute value of x)
+// and returns z.
+func (z *Float) Abs(x *Float) *Float {
+	z.Set(x)
+	z.neg = false
+	return z
+}
+
+// Neg sets z to the (possibly rounded) value of x with its sign negated,
+// and returns z.
+func (z *Float) Neg(x *Float) *Float {
+	z.Set(x)
+	z.neg = !z.neg
+	return z
+}
+
+func validateBinaryOperands(x, y *Float) {
+	if !debugFloat {
+		// avoid performance bugs
+		panic("validateBinaryOperands called but debugFloat is not set")
+	}
+	if len(x.mant) == 0 {
+		panic("empty mantissa for x")
+	}
+	if len(y.mant) == 0 {
+		panic("empty mantissa for y")
+	}
+}
+
+// z = x + y, ignoring signs of x and y for the addition
+// but using the sign of z for rounding the result.
+// x and y must have a non-empty mantissa and valid exponent.
+func (z *Float) uadd(x, y *Float) {
+	// Note: This implementation requires 2 shifts most of the
+	// time. It is also inefficient if exponents or precisions
+	// differ by wide margins. The following article describes
+	// an efficient (but much more complicated) implementation
+	// compatible with the internal representation used here:
+	//
+	// Vincent Lefèvre: "The Generic Multiple-Precision Floating-
+	// Point Addition With Exact Rounding (as in the MPFR Library)"
+	// http://www.vinc17.net/research/papers/rnc6.pdf
+
+	if debugFloat {
+		validateBinaryOperands(x, y)
+	}
+
+	// compute exponents ex, ey for mantissa with "binary point"
+	// on the right (mantissa.0) - use int64 to avoid overflow
+	ex := int64(x.exp) - int64(len(x.mant))*_W
+	ey := int64(y.exp) - int64(len(y.mant))*_W
+
+	// TODO(gri) having a combined add-and-shift primitive
+	//           could make this code significantly faster
+	switch {
+	case ex < ey:
+		// cannot re-use z.mant w/o testing for aliasing
+		t := nat(nil).shl(y.mant, uint(ey-ex))
+		z.mant = z.mant.add(x.mant, t)
+	default:
+		// ex == ey, no shift needed
+		z.mant = z.mant.add(x.mant, y.mant)
+	case ex > ey:
+		// cannot re-use z.mant w/o testing for aliasing
+		t := nat(nil).shl(x.mant, uint(ex-ey))
+		z.mant = z.mant.add(t, y.mant)
+		ex = ey
+	}
+	// len(z.mant) > 0
+
+	z.setExpAndRound(ex+int64(len(z.mant))*_W-fnorm(z.mant), 0)
+}
+
+// z = x - y for |x| > |y|, ignoring signs of x and y for the subtraction
+// but using the sign of z for rounding the result.
+// x and y must have a non-empty mantissa and valid exponent.
+func (z *Float) usub(x, y *Float) {
+	// This code is symmetric to uadd.
+	// We have not factored the common code out because
+	// eventually uadd (and usub) should be optimized
+	// by special-casing, and the code will diverge.
+
+	if debugFloat {
+		validateBinaryOperands(x, y)
+	}
+
+	ex := int64(x.exp) - int64(len(x.mant))*_W
+	ey := int64(y.exp) - int64(len(y.mant))*_W
+
+	switch {
+	case ex < ey:
+		// cannot re-use z.mant w/o testing for aliasing
+		t := nat(nil).shl(y.mant, uint(ey-ex))
+		z.mant = t.sub(x.mant, t)
+	default:
+		// ex == ey, no shift needed
+		z.mant = z.mant.sub(x.mant, y.mant)
+	case ex > ey:
+		// cannot re-use z.mant w/o testing for aliasing
+		t := nat(nil).shl(x.mant, uint(ex-ey))
+		z.mant = t.sub(t, y.mant)
+		ex = ey
+	}
+
+	// operands may have cancelled each other out
+	if len(z.mant) == 0 {
+		z.acc = Exact
+		z.form = zero
+		z.neg = false
+		return
+	}
+	// len(z.mant) > 0
+
+	z.setExpAndRound(ex+int64(len(z.mant))*_W-fnorm(z.mant), 0)
+}
+
+// z = x * y, ignoring signs of x and y for the multiplication
+// but using the sign of z for rounding the result.
+// x and y must have a non-empty mantissa and valid exponent.
+func (z *Float) umul(x, y *Float) {
+	if debugFloat {
+		validateBinaryOperands(x, y)
+	}
+
+	// Note: This is doing too much work if the precision
+	// of z is less than the sum of the precisions of x
+	// and y which is often the case (e.g., if all floats
+	// have the same precision).
+	// TODO(gri) Optimize this for the common case.
+
+	e := int64(x.exp) + int64(y.exp)
+	z.mant = z.mant.mul(x.mant, y.mant)
+
+	z.setExpAndRound(e-fnorm(z.mant), 0)
+}
+
+// z = x / y, ignoring signs of x and y for the division
+// but using the sign of z for rounding the result.
+// x and y must have a non-empty mantissa and valid exponent.
+func (z *Float) uquo(x, y *Float) {
+	if debugFloat {
+		validateBinaryOperands(x, y)
+	}
+
+	// mantissa length in words for desired result precision + 1
+	// (at least one extra bit so we get the rounding bit after
+	// the division)
+	n := int(z.prec/_W) + 1
+
+	// compute adjusted x.mant such that we get enough result precision
+	xadj := x.mant
+	if d := n - len(x.mant) + len(y.mant); d > 0 {
+		// d extra words needed => add d "0 digits" to x
+		xadj = make(nat, len(x.mant)+d)
+		copy(xadj[d:], x.mant)
+	}
+	// TODO(gri): If we have too many digits (d < 0), we should be able
+	// to shorten x for faster division. But we must be extra careful
+	// with rounding in that case.
+
+	// Compute d before division since there may be aliasing of x.mant
+	// (via xadj) or y.mant with z.mant.
+	d := len(xadj) - len(y.mant)
+
+	// divide
+	var r nat
+	z.mant, r = z.mant.div(nil, xadj, y.mant)
+	e := int64(x.exp) - int64(y.exp) - int64(d-len(z.mant))*_W
+
+	// The result is long enough to include (at least) the rounding bit.
+	// If there's a non-zero remainder, the corresponding fractional part
+	// (if it were computed), would have a non-zero sticky bit (if it were
+	// zero, it couldn't have a non-zero remainder).
+	var sbit uint
+	if len(r) > 0 {
+		sbit = 1
+	}
+
+	z.setExpAndRound(e-fnorm(z.mant), sbit)
+}
+
+// ucmp returns -1, 0, or +1, depending on whether
+// |x| < |y|, |x| == |y|, or |x| > |y|.
+// x and y must have a non-empty mantissa and valid exponent.
+func (x *Float) ucmp(y *Float) int {
+	if debugFloat {
+		validateBinaryOperands(x, y)
+	}
+
+	switch {
+	case x.exp < y.exp:
+		return -1
+	case x.exp > y.exp:
+		return +1
+	}
+	// x.exp == y.exp
+
+	// compare mantissas
+	i := len(x.mant)
+	j := len(y.mant)
+	for i > 0 || j > 0 {
+		var xm, ym Word
+		if i > 0 {
+			i--
+			xm = x.mant[i]
+		}
+		if j > 0 {
+			j--
+			ym = y.mant[j]
+		}
+		switch {
+		case xm < ym:
+			return -1
+		case xm > ym:
+			return +1
+		}
+	}
+
+	return 0
+}
+
+// Handling of sign bit as defined by IEEE 754-2008, section 6.3:
+//
+// When neither the inputs nor result are NaN, the sign of a product or
+// quotient is the exclusive OR of the operands’ signs; the sign of a sum,
+// or of a difference x−y regarded as a sum x+(−y), differs from at most
+// one of the addends’ signs; and the sign of the result of conversions,
+// the quantize operation, the roundToIntegral operations, and the
+// roundToIntegralExact (see 5.3.1) is the sign of the first or only operand.
+// These rules shall apply even when operands or results are zero or infinite.
+//
+// When the sum of two operands with opposite signs (or the difference of
+// two operands with like signs) is exactly zero, the sign of that sum (or
+// difference) shall be +0 in all rounding-direction attributes except
+// roundTowardNegative; under that attribute, the sign of an exact zero
+// sum (or difference) shall be −0. However, x+x = x−(−x) retains the same
+// sign as x even when x is zero.
+//
+// See also: http://play.golang.org/p/RtH3UCt5IH
+
+// Add sets z to the rounded sum x+y and returns z. If z's precision is 0,
+// it is changed to the larger of x's or y's precision before the operation.
+// Rounding is performed according to z's precision and rounding mode; and
+// z's accuracy reports the result error relative to the exact (not rounded)
+// result. Add panics with ErrNaN if x and y are infinities with opposite
+// signs. The value of z is undefined in that case.
+//
+// BUG(gri) When rounding ToNegativeInf, the sign of Float values rounded to 0 is incorrect.
+func (z *Float) Add(x, y *Float) *Float {
+	if debugFloat {
+		x.validate()
+		y.validate()
+	}
+
+	if z.prec == 0 {
+		z.prec = umax32(x.prec, y.prec)
+	}
+
+	if x.form == finite && y.form == finite {
+		// x + y (commom case)
+		z.neg = x.neg
+		if x.neg == y.neg {
+			// x + y == x + y
+			// (-x) + (-y) == -(x + y)
+			z.uadd(x, y)
+		} else {
+			// x + (-y) == x - y == -(y - x)
+			// (-x) + y == y - x == -(x - y)
+			if x.ucmp(y) > 0 {
+				z.usub(x, y)
+			} else {
+				z.neg = !z.neg
+				z.usub(y, x)
+			}
+		}
+		return z
+	}
+
+	if x.form == inf && y.form == inf && x.neg != y.neg {
+		// +Inf + -Inf
+		// -Inf + +Inf
+		// value of z is undefined but make sure it's valid
+		z.acc = Exact
+		z.form = zero
+		z.neg = false
+		panic(ErrNaN{"addition of infinities with opposite signs"})
+	}
+
+	if x.form == zero && y.form == zero {
+		// ±0 + ±0
+		z.acc = Exact
+		z.form = zero
+		z.neg = x.neg && y.neg // -0 + -0 == -0
+		return z
+	}
+
+	if x.form == inf || y.form == zero {
+		// ±Inf + y
+		// x + ±0
+		return z.Set(x)
+	}
+
+	// ±0 + y
+	// x + ±Inf
+	return z.Set(y)
+}
+
+// Sub sets z to the rounded difference x-y and returns z.
+// Precision, rounding, and accuracy reporting are as for Add.
+// Sub panics with ErrNaN if x and y are infinities with equal
+// signs. The value of z is undefined in that case.
+func (z *Float) Sub(x, y *Float) *Float {
+	if debugFloat {
+		x.validate()
+		y.validate()
+	}
+
+	if z.prec == 0 {
+		z.prec = umax32(x.prec, y.prec)
+	}
+
+	if x.form == finite && y.form == finite {
+		// x - y (common case)
+		z.neg = x.neg
+		if x.neg != y.neg {
+			// x - (-y) == x + y
+			// (-x) - y == -(x + y)
+			z.uadd(x, y)
+		} else {
+			// x - y == x - y == -(y - x)
+			// (-x) - (-y) == y - x == -(x - y)
+			if x.ucmp(y) > 0 {
+				z.usub(x, y)
+			} else {
+				z.neg = !z.neg
+				z.usub(y, x)
+			}
+		}
+		return z
+	}
+
+	if x.form == inf && y.form == inf && x.neg == y.neg {
+		// +Inf - +Inf
+		// -Inf - -Inf
+		// value of z is undefined but make sure it's valid
+		z.acc = Exact
+		z.form = zero
+		z.neg = false
+		panic(ErrNaN{"subtraction of infinities with equal signs"})
+	}
+
+	if x.form == zero && y.form == zero {
+		// ±0 - ±0
+		z.acc = Exact
+		z.form = zero
+		z.neg = x.neg && !y.neg // -0 - +0 == -0
+		return z
+	}
+
+	if x.form == inf || y.form == zero {
+		// ±Inf - y
+		// x - ±0
+		return z.Set(x)
+	}
+
+	// ±0 - y
+	// x - ±Inf
+	return z.Neg(y)
+}
+
+// Mul sets z to the rounded product x*y and returns z.
+// Precision, rounding, and accuracy reporting are as for Add.
+// Mul panics with ErrNaN if one operand is zero and the other
+// operand an infinity. The value of z is undefined in that case.
+func (z *Float) Mul(x, y *Float) *Float {
+	if debugFloat {
+		x.validate()
+		y.validate()
+	}
+
+	if z.prec == 0 {
+		z.prec = umax32(x.prec, y.prec)
+	}
+
+	z.neg = x.neg != y.neg
+
+	if x.form == finite && y.form == finite {
+		// x * y (common case)
+		z.umul(x, y)
+		return z
+	}
+
+	z.acc = Exact
+	if x.form == zero && y.form == inf || x.form == inf && y.form == zero {
+		// ±0 * ±Inf
+		// ±Inf * ±0
+		// value of z is undefined but make sure it's valid
+		z.form = zero
+		z.neg = false
+		panic(ErrNaN{"multiplication of zero with infinity"})
+	}
+
+	if x.form == inf || y.form == inf {
+		// ±Inf * y
+		// x * ±Inf
+		z.form = inf
+		return z
+	}
+
+	// ±0 * y
+	// x * ±0
+	z.form = zero
+	return z
+}
+
+// Quo sets z to the rounded quotient x/y and returns z.
+// Precision, rounding, and accuracy reporting are as for Add.
+// Quo panics with ErrNaN if both operands are zero or infinities.
+// The value of z is undefined in that case.
+func (z *Float) Quo(x, y *Float) *Float {
+	if debugFloat {
+		x.validate()
+		y.validate()
+	}
+
+	if z.prec == 0 {
+		z.prec = umax32(x.prec, y.prec)
+	}
+
+	z.neg = x.neg != y.neg
+
+	if x.form == finite && y.form == finite {
+		// x / y (common case)
+		z.uquo(x, y)
+		return z
+	}
+
+	z.acc = Exact
+	if x.form == zero && y.form == zero || x.form == inf && y.form == inf {
+		// ±0 / ±0
+		// ±Inf / ±Inf
+		// value of z is undefined but make sure it's valid
+		z.form = zero
+		z.neg = false
+		panic(ErrNaN{"division of zero by zero or infinity by infinity"})
+	}
+
+	if x.form == zero || y.form == inf {
+		// ±0 / y
+		// x / ±Inf
+		z.form = zero
+		return z
+	}
+
+	// x / ±0
+	// ±Inf / y
+	z.form = inf
+	return z
+}
+
+// Cmp compares x and y and returns:
+//
+//   -1 if x <  y
+//    0 if x == y (incl. -0 == 0, -Inf == -Inf, and +Inf == +Inf)
+//   +1 if x >  y
+//
+func (x *Float) Cmp(y *Float) int {
+	if debugFloat {
+		x.validate()
+		y.validate()
+	}
+
+	mx := x.ord()
+	my := y.ord()
+	switch {
+	case mx < my:
+		return -1
+	case mx > my:
+		return +1
+	}
+	// mx == my
+
+	// only if |mx| == 1 we have to compare the mantissae
+	switch mx {
+	case -1:
+		return y.ucmp(x)
+	case +1:
+		return x.ucmp(y)
+	}
+
+	return 0
+}
+
+// ord classifies x and returns:
+//
+//	-2 if -Inf == x
+//	-1 if -Inf < x < 0
+//	 0 if x == 0 (signed or unsigned)
+//	+1 if 0 < x < +Inf
+//	+2 if x == +Inf
+//
+func (x *Float) ord() int {
+	var m int
+	switch x.form {
+	case finite:
+		m = 1
+	case zero:
+		return 0
+	case inf:
+		m = 2
+	}
+	if x.neg {
+		m = -m
+	}
+	return m
+}
+
+func umax32(x, y uint32) uint32 {
+	if x > y {
+		return x
+	}
+	return y
+}
diff --git a/src/cmd/compile/internal/big/float_test.go b/src/cmd/compile/internal/big/float_test.go
new file mode 100644
index 0000000..de79b07
--- /dev/null
+++ b/src/cmd/compile/internal/big/float_test.go
@@ -0,0 +1,1664 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"testing"
+)
+
+func (x *Float) uint64() uint64 {
+	u, acc := x.Uint64()
+	if acc != Exact {
+		panic(fmt.Sprintf("%s is not a uint64", x.Format('g', 10)))
+	}
+	return u
+}
+
+func (x *Float) int64() int64 {
+	i, acc := x.Int64()
+	if acc != Exact {
+		panic(fmt.Sprintf("%s is not an int64", x.Format('g', 10)))
+	}
+	return i
+}
+
+func TestFloatZeroValue(t *testing.T) {
+	// zero (uninitialized) value is a ready-to-use 0.0
+	var x Float
+	if s := x.Format('f', 1); s != "0.0" {
+		t.Errorf("zero value = %s; want 0.0", s)
+	}
+
+	// zero value has precision 0
+	if prec := x.Prec(); prec != 0 {
+		t.Errorf("prec = %d; want 0", prec)
+	}
+
+	// zero value can be used in any and all positions of binary operations
+	make := func(x int) *Float {
+		var f Float
+		if x != 0 {
+			f.SetInt64(int64(x))
+		}
+		// x == 0 translates into the zero value
+		return &f
+	}
+	for _, test := range []struct {
+		z, x, y, want int
+		opname        rune
+		op            func(z, x, y *Float) *Float
+	}{
+		{0, 0, 0, 0, '+', (*Float).Add},
+		{0, 1, 2, 3, '+', (*Float).Add},
+		{1, 2, 0, 2, '+', (*Float).Add},
+		{2, 0, 1, 1, '+', (*Float).Add},
+
+		{0, 0, 0, 0, '-', (*Float).Sub},
+		{0, 1, 2, -1, '-', (*Float).Sub},
+		{1, 2, 0, 2, '-', (*Float).Sub},
+		{2, 0, 1, -1, '-', (*Float).Sub},
+
+		{0, 0, 0, 0, '*', (*Float).Mul},
+		{0, 1, 2, 2, '*', (*Float).Mul},
+		{1, 2, 0, 0, '*', (*Float).Mul},
+		{2, 0, 1, 0, '*', (*Float).Mul},
+
+		// {0, 0, 0, 0, '/', (*Float).Quo}, // panics
+		{0, 2, 1, 2, '/', (*Float).Quo},
+		{1, 2, 0, 0, '/', (*Float).Quo}, // = +Inf
+		{2, 0, 1, 0, '/', (*Float).Quo},
+	} {
+		z := make(test.z)
+		test.op(z, make(test.x), make(test.y))
+		got := 0
+		if !z.IsInf() {
+			got = int(z.int64())
+		}
+		if got != test.want {
+			t.Errorf("%d %c %d = %d; want %d", test.x, test.opname, test.y, got, test.want)
+		}
+	}
+
+	// TODO(gri) test how precision is set for zero value results
+}
+
+func makeFloat(s string) *Float {
+	var x Float
+
+	switch s {
+	case "0":
+		return &x
+	case "-0":
+		return x.Neg(&x)
+	case "Inf", "+Inf":
+		return x.SetInf(false)
+	case "-Inf":
+		return x.SetInf(true)
+	}
+
+	x.SetPrec(1000)
+	if _, ok := x.SetString(s); !ok {
+		panic(fmt.Sprintf("%q is not a valid float", s))
+	}
+	return &x
+}
+
+func TestFloatSetPrec(t *testing.T) {
+	for _, test := range []struct {
+		x    string
+		prec uint
+		want string
+		acc  Accuracy
+	}{
+		// prec 0
+		{"0", 0, "0", Exact},
+		{"-0", 0, "-0", Exact},
+		{"-Inf", 0, "-Inf", Exact},
+		{"+Inf", 0, "+Inf", Exact},
+		{"123", 0, "0", Below},
+		{"-123", 0, "-0", Above},
+
+		// prec at upper limit
+		{"0", MaxPrec, "0", Exact},
+		{"-0", MaxPrec, "-0", Exact},
+		{"-Inf", MaxPrec, "-Inf", Exact},
+		{"+Inf", MaxPrec, "+Inf", Exact},
+
+		// just a few regular cases - general rounding is tested elsewhere
+		{"1.5", 1, "2", Above},
+		{"-1.5", 1, "-2", Below},
+		{"123", 1e6, "123", Exact},
+		{"-123", 1e6, "-123", Exact},
+	} {
+		x := makeFloat(test.x).SetPrec(test.prec)
+		prec := test.prec
+		if prec > MaxPrec {
+			prec = MaxPrec
+		}
+		if got := x.Prec(); got != prec {
+			t.Errorf("%s.SetPrec(%d).Prec() == %d; want %d", test.x, test.prec, got, prec)
+		}
+		if got, acc := x.String(), x.Acc(); got != test.want || acc != test.acc {
+			t.Errorf("%s.SetPrec(%d) = %s (%s); want %s (%s)", test.x, test.prec, got, acc, test.want, test.acc)
+		}
+	}
+}
+
+func TestFloatMinPrec(t *testing.T) {
+	const max = 100
+	for _, test := range []struct {
+		x    string
+		want uint
+	}{
+		{"0", 0},
+		{"-0", 0},
+		{"+Inf", 0},
+		{"-Inf", 0},
+		{"1", 1},
+		{"2", 1},
+		{"3", 2},
+		{"0x8001", 16},
+		{"0x8001p-1000", 16},
+		{"0x8001p+1000", 16},
+		{"0.1", max},
+	} {
+		x := makeFloat(test.x).SetPrec(max)
+		if got := x.MinPrec(); got != test.want {
+			t.Errorf("%s.MinPrec() = %d; want %d", test.x, got, test.want)
+		}
+	}
+}
+
+func TestFloatSign(t *testing.T) {
+	for _, test := range []struct {
+		x string
+		s int
+	}{
+		{"-Inf", -1},
+		{"-1", -1},
+		{"-0", 0},
+		{"+0", 0},
+		{"+1", +1},
+		{"+Inf", +1},
+	} {
+		x := makeFloat(test.x)
+		s := x.Sign()
+		if s != test.s {
+			t.Errorf("%s.Sign() = %d; want %d", test.x, s, test.s)
+		}
+	}
+}
+
+// alike(x, y) is like x.Cmp(y) == 0 but also considers the sign of 0 (0 != -0).
+func alike(x, y *Float) bool {
+	return x.Cmp(y) == 0 && x.Signbit() == y.Signbit()
+}
+
+func TestFloatMantExp(t *testing.T) {
+	for _, test := range []struct {
+		x    string
+		mant string
+		exp  int
+	}{
+		{"0", "0", 0},
+		{"+0", "0", 0},
+		{"-0", "-0", 0},
+		{"Inf", "+Inf", 0},
+		{"+Inf", "+Inf", 0},
+		{"-Inf", "-Inf", 0},
+		{"1.5", "0.75", 1},
+		{"1.024e3", "0.5", 11},
+		{"-0.125", "-0.5", -2},
+	} {
+		x := makeFloat(test.x)
+		mant := makeFloat(test.mant)
+		m := new(Float)
+		e := x.MantExp(m)
+		if !alike(m, mant) || e != test.exp {
+			t.Errorf("%s.MantExp() = %s, %d; want %s, %d", test.x, m.Format('g', 10), e, test.mant, test.exp)
+		}
+	}
+}
+
+func TestFloatMantExpAliasing(t *testing.T) {
+	x := makeFloat("0.5p10")
+	if e := x.MantExp(x); e != 10 {
+		t.Fatalf("Float.MantExp aliasing error: got %d; want 10", e)
+	}
+	if want := makeFloat("0.5"); !alike(x, want) {
+		t.Fatalf("Float.MantExp aliasing error: got %s; want %s", x.Format('g', 10), want.Format('g', 10))
+	}
+}
+
+func TestFloatSetMantExp(t *testing.T) {
+	for _, test := range []struct {
+		frac string
+		exp  int
+		z    string
+	}{
+		{"0", 0, "0"},
+		{"+0", 0, "0"},
+		{"-0", 0, "-0"},
+		{"Inf", 1234, "+Inf"},
+		{"+Inf", -1234, "+Inf"},
+		{"-Inf", -1234, "-Inf"},
+		{"0", MinExp, "0"},
+		{"0.25", MinExp, "+0"},    // exponent underflow
+		{"-0.25", MinExp, "-0"},   // exponent underflow
+		{"1", MaxExp, "+Inf"},     // exponent overflow
+		{"2", MaxExp - 1, "+Inf"}, // exponent overflow
+		{"0.75", 1, "1.5"},
+		{"0.5", 11, "1024"},
+		{"-0.5", -2, "-0.125"},
+		{"32", 5, "1024"},
+		{"1024", -10, "1"},
+	} {
+		frac := makeFloat(test.frac)
+		want := makeFloat(test.z)
+		var z Float
+		z.SetMantExp(frac, test.exp)
+		if !alike(&z, want) {
+			t.Errorf("SetMantExp(%s, %d) = %s; want %s", test.frac, test.exp, z.Format('g', 10), test.z)
+		}
+		// test inverse property
+		mant := new(Float)
+		if z.SetMantExp(mant, want.MantExp(mant)).Cmp(want) != 0 {
+			t.Errorf("Inverse property not satisfied: got %s; want %s", z.Format('g', 10), test.z)
+		}
+	}
+}
+
+func TestFloatPredicates(t *testing.T) {
+	for _, test := range []struct {
+		x            string
+		sign         int
+		signbit, inf bool
+	}{
+		{x: "-Inf", sign: -1, signbit: true, inf: true},
+		{x: "-1", sign: -1, signbit: true},
+		{x: "-0", signbit: true},
+		{x: "0"},
+		{x: "1", sign: 1},
+		{x: "+Inf", sign: 1, inf: true},
+	} {
+		x := makeFloat(test.x)
+		if got := x.Signbit(); got != test.signbit {
+			t.Errorf("(%s).Signbit() = %v; want %v", test.x, got, test.signbit)
+		}
+		if got := x.Sign(); got != test.sign {
+			t.Errorf("(%s).Sign() = %d; want %d", test.x, got, test.sign)
+		}
+		if got := x.IsInf(); got != test.inf {
+			t.Errorf("(%s).IsInf() = %v; want %v", test.x, got, test.inf)
+		}
+	}
+}
+
+func TestFloatIsInt(t *testing.T) {
+	for _, test := range []string{
+		"0 int",
+		"-0 int",
+		"1 int",
+		"-1 int",
+		"0.5",
+		"1.23",
+		"1.23e1",
+		"1.23e2 int",
+		"0.000000001e+8",
+		"0.000000001e+9 int",
+		"1.2345e200 int",
+		"Inf",
+		"+Inf",
+		"-Inf",
+	} {
+		s := strings.TrimSuffix(test, " int")
+		want := s != test
+		if got := makeFloat(s).IsInt(); got != want {
+			t.Errorf("%s.IsInt() == %t", s, got)
+		}
+	}
+}
+
+func fromBinary(s string) int64 {
+	x, err := strconv.ParseInt(s, 2, 64)
+	if err != nil {
+		panic(err)
+	}
+	return x
+}
+
+func toBinary(x int64) string {
+	return strconv.FormatInt(x, 2)
+}
+
+func testFloatRound(t *testing.T, x, r int64, prec uint, mode RoundingMode) {
+	// verify test data
+	var ok bool
+	switch mode {
+	case ToNearestEven, ToNearestAway:
+		ok = true // nothing to do for now
+	case ToZero:
+		if x < 0 {
+			ok = r >= x
+		} else {
+			ok = r <= x
+		}
+	case AwayFromZero:
+		if x < 0 {
+			ok = r <= x
+		} else {
+			ok = r >= x
+		}
+	case ToNegativeInf:
+		ok = r <= x
+	case ToPositiveInf:
+		ok = r >= x
+	default:
+		panic("unreachable")
+	}
+	if !ok {
+		t.Fatalf("incorrect test data for prec = %d, %s: x = %s, r = %s", prec, mode, toBinary(x), toBinary(r))
+	}
+
+	// compute expected accuracy
+	a := Exact
+	switch {
+	case r < x:
+		a = Below
+	case r > x:
+		a = Above
+	}
+
+	// round
+	f := new(Float).SetMode(mode).SetInt64(x).SetPrec(prec)
+
+	// check result
+	r1 := f.int64()
+	p1 := f.Prec()
+	a1 := f.Acc()
+	if r1 != r || p1 != prec || a1 != a {
+		t.Errorf("round %s (%d bits, %s) incorrect: got %s (%d bits, %s); want %s (%d bits, %s)",
+			toBinary(x), prec, mode,
+			toBinary(r1), p1, a1,
+			toBinary(r), prec, a)
+		return
+	}
+
+	// g and f should be the same
+	// (rounding by SetPrec after SetInt64 using default precision
+	// should be the same as rounding by SetInt64 after setting the
+	// precision)
+	g := new(Float).SetMode(mode).SetPrec(prec).SetInt64(x)
+	if !alike(g, f) {
+		t.Errorf("round %s (%d bits, %s) not symmetric: got %s and %s; want %s",
+			toBinary(x), prec, mode,
+			toBinary(g.int64()),
+			toBinary(r1),
+			toBinary(r),
+		)
+		return
+	}
+
+	// h and f should be the same
+	// (repeated rounding should be idempotent)
+	h := new(Float).SetMode(mode).SetPrec(prec).Set(f)
+	if !alike(h, f) {
+		t.Errorf("round %s (%d bits, %s) not idempotent: got %s and %s; want %s",
+			toBinary(x), prec, mode,
+			toBinary(h.int64()),
+			toBinary(r1),
+			toBinary(r),
+		)
+		return
+	}
+}
+
+// TestFloatRound tests basic rounding.
+func TestFloatRound(t *testing.T) {
+	for _, test := range []struct {
+		prec                        uint
+		x, zero, neven, naway, away string // input, results rounded to prec bits
+	}{
+		{5, "1000", "1000", "1000", "1000", "1000"},
+		{5, "1001", "1001", "1001", "1001", "1001"},
+		{5, "1010", "1010", "1010", "1010", "1010"},
+		{5, "1011", "1011", "1011", "1011", "1011"},
+		{5, "1100", "1100", "1100", "1100", "1100"},
+		{5, "1101", "1101", "1101", "1101", "1101"},
+		{5, "1110", "1110", "1110", "1110", "1110"},
+		{5, "1111", "1111", "1111", "1111", "1111"},
+
+		{4, "1000", "1000", "1000", "1000", "1000"},
+		{4, "1001", "1001", "1001", "1001", "1001"},
+		{4, "1010", "1010", "1010", "1010", "1010"},
+		{4, "1011", "1011", "1011", "1011", "1011"},
+		{4, "1100", "1100", "1100", "1100", "1100"},
+		{4, "1101", "1101", "1101", "1101", "1101"},
+		{4, "1110", "1110", "1110", "1110", "1110"},
+		{4, "1111", "1111", "1111", "1111", "1111"},
+
+		{3, "1000", "1000", "1000", "1000", "1000"},
+		{3, "1001", "1000", "1000", "1010", "1010"},
+		{3, "1010", "1010", "1010", "1010", "1010"},
+		{3, "1011", "1010", "1100", "1100", "1100"},
+		{3, "1100", "1100", "1100", "1100", "1100"},
+		{3, "1101", "1100", "1100", "1110", "1110"},
+		{3, "1110", "1110", "1110", "1110", "1110"},
+		{3, "1111", "1110", "10000", "10000", "10000"},
+
+		{3, "1000001", "1000000", "1000000", "1000000", "1010000"},
+		{3, "1001001", "1000000", "1010000", "1010000", "1010000"},
+		{3, "1010001", "1010000", "1010000", "1010000", "1100000"},
+		{3, "1011001", "1010000", "1100000", "1100000", "1100000"},
+		{3, "1100001", "1100000", "1100000", "1100000", "1110000"},
+		{3, "1101001", "1100000", "1110000", "1110000", "1110000"},
+		{3, "1110001", "1110000", "1110000", "1110000", "10000000"},
+		{3, "1111001", "1110000", "10000000", "10000000", "10000000"},
+
+		{2, "1000", "1000", "1000", "1000", "1000"},
+		{2, "1001", "1000", "1000", "1000", "1100"},
+		{2, "1010", "1000", "1000", "1100", "1100"},
+		{2, "1011", "1000", "1100", "1100", "1100"},
+		{2, "1100", "1100", "1100", "1100", "1100"},
+		{2, "1101", "1100", "1100", "1100", "10000"},
+		{2, "1110", "1100", "10000", "10000", "10000"},
+		{2, "1111", "1100", "10000", "10000", "10000"},
+
+		{2, "1000001", "1000000", "1000000", "1000000", "1100000"},
+		{2, "1001001", "1000000", "1000000", "1000000", "1100000"},
+		{2, "1010001", "1000000", "1100000", "1100000", "1100000"},
+		{2, "1011001", "1000000", "1100000", "1100000", "1100000"},
+		{2, "1100001", "1100000", "1100000", "1100000", "10000000"},
+		{2, "1101001", "1100000", "1100000", "1100000", "10000000"},
+		{2, "1110001", "1100000", "10000000", "10000000", "10000000"},
+		{2, "1111001", "1100000", "10000000", "10000000", "10000000"},
+
+		{1, "1000", "1000", "1000", "1000", "1000"},
+		{1, "1001", "1000", "1000", "1000", "10000"},
+		{1, "1010", "1000", "1000", "1000", "10000"},
+		{1, "1011", "1000", "1000", "1000", "10000"},
+		{1, "1100", "1000", "10000", "10000", "10000"},
+		{1, "1101", "1000", "10000", "10000", "10000"},
+		{1, "1110", "1000", "10000", "10000", "10000"},
+		{1, "1111", "1000", "10000", "10000", "10000"},
+
+		{1, "1000001", "1000000", "1000000", "1000000", "10000000"},
+		{1, "1001001", "1000000", "1000000", "1000000", "10000000"},
+		{1, "1010001", "1000000", "1000000", "1000000", "10000000"},
+		{1, "1011001", "1000000", "1000000", "1000000", "10000000"},
+		{1, "1100001", "1000000", "10000000", "10000000", "10000000"},
+		{1, "1101001", "1000000", "10000000", "10000000", "10000000"},
+		{1, "1110001", "1000000", "10000000", "10000000", "10000000"},
+		{1, "1111001", "1000000", "10000000", "10000000", "10000000"},
+	} {
+		x := fromBinary(test.x)
+		z := fromBinary(test.zero)
+		e := fromBinary(test.neven)
+		n := fromBinary(test.naway)
+		a := fromBinary(test.away)
+		prec := test.prec
+
+		testFloatRound(t, x, z, prec, ToZero)
+		testFloatRound(t, x, e, prec, ToNearestEven)
+		testFloatRound(t, x, n, prec, ToNearestAway)
+		testFloatRound(t, x, a, prec, AwayFromZero)
+
+		testFloatRound(t, x, z, prec, ToNegativeInf)
+		testFloatRound(t, x, a, prec, ToPositiveInf)
+
+		testFloatRound(t, -x, -a, prec, ToNegativeInf)
+		testFloatRound(t, -x, -z, prec, ToPositiveInf)
+	}
+}
+
+// TestFloatRound24 tests that rounding a float64 to 24 bits
+// matches IEEE-754 rounding to nearest when converting a
+// float64 to a float32 (excluding denormal numbers).
+func TestFloatRound24(t *testing.T) {
+	const x0 = 1<<26 - 0x10 // 11...110000 (26 bits)
+	for d := 0; d <= 0x10; d++ {
+		x := float64(x0 + d)
+		f := new(Float).SetPrec(24).SetFloat64(x)
+		got, _ := f.Float32()
+		want := float32(x)
+		if got != want {
+			t.Errorf("Round(%g, 24) = %g; want %g", x, got, want)
+		}
+	}
+}
+
+func TestFloatSetUint64(t *testing.T) {
+	for _, want := range []uint64{
+		0,
+		1,
+		2,
+		10,
+		100,
+		1<<32 - 1,
+		1 << 32,
+		1<<64 - 1,
+	} {
+		var f Float
+		f.SetUint64(want)
+		if got := f.uint64(); got != want {
+			t.Errorf("got %#x (%s); want %#x", got, f.Format('p', 0), want)
+		}
+	}
+
+	// test basic rounding behavior (exhaustive rounding testing is done elsewhere)
+	const x uint64 = 0x8765432187654321 // 64 bits needed
+	for prec := uint(1); prec <= 64; prec++ {
+		f := new(Float).SetPrec(prec).SetMode(ToZero).SetUint64(x)
+		got := f.uint64()
+		want := x &^ (1<<(64-prec) - 1) // cut off (round to zero) low 64-prec bits
+		if got != want {
+			t.Errorf("got %#x (%s); want %#x", got, f.Format('p', 0), want)
+		}
+	}
+}
+
+func TestFloatSetInt64(t *testing.T) {
+	for _, want := range []int64{
+		0,
+		1,
+		2,
+		10,
+		100,
+		1<<32 - 1,
+		1 << 32,
+		1<<63 - 1,
+	} {
+		for i := range [2]int{} {
+			if i&1 != 0 {
+				want = -want
+			}
+			var f Float
+			f.SetInt64(want)
+			if got := f.int64(); got != want {
+				t.Errorf("got %#x (%s); want %#x", got, f.Format('p', 0), want)
+			}
+		}
+	}
+
+	// test basic rounding behavior (exhaustive rounding testing is done elsewhere)
+	const x int64 = 0x7654321076543210 // 63 bits needed
+	for prec := uint(1); prec <= 63; prec++ {
+		f := new(Float).SetPrec(prec).SetMode(ToZero).SetInt64(x)
+		got := f.int64()
+		want := x &^ (1<<(63-prec) - 1) // cut off (round to zero) low 63-prec bits
+		if got != want {
+			t.Errorf("got %#x (%s); want %#x", got, f.Format('p', 0), want)
+		}
+	}
+}
+
+func TestFloatSetFloat64(t *testing.T) {
+	for _, want := range []float64{
+		0,
+		1,
+		2,
+		12345,
+		1e10,
+		1e100,
+		3.14159265e10,
+		2.718281828e-123,
+		1.0 / 3,
+		math.MaxFloat32,
+		math.MaxFloat64,
+		math.SmallestNonzeroFloat32,
+		math.SmallestNonzeroFloat64,
+		math.Inf(-1),
+		math.Inf(0),
+		-math.Inf(1),
+	} {
+		for i := range [2]int{} {
+			if i&1 != 0 {
+				want = -want
+			}
+			var f Float
+			f.SetFloat64(want)
+			if got, acc := f.Float64(); got != want || acc != Exact {
+				t.Errorf("got %g (%s, %s); want %g (Exact)", got, f.Format('p', 0), acc, want)
+			}
+		}
+	}
+
+	// test basic rounding behavior (exhaustive rounding testing is done elsewhere)
+	const x uint64 = 0x8765432143218 // 53 bits needed
+	for prec := uint(1); prec <= 52; prec++ {
+		f := new(Float).SetPrec(prec).SetMode(ToZero).SetFloat64(float64(x))
+		got, _ := f.Float64()
+		want := float64(x &^ (1<<(52-prec) - 1)) // cut off (round to zero) low 53-prec bits
+		if got != want {
+			t.Errorf("got %g (%s); want %g", got, f.Format('p', 0), want)
+		}
+	}
+
+	// test NaN
+	defer func() {
+		if p, ok := recover().(ErrNaN); !ok {
+			t.Errorf("got %v; want ErrNaN panic", p)
+		}
+	}()
+	var f Float
+	f.SetFloat64(math.NaN())
+	// should not reach here
+	t.Errorf("got %s; want ErrNaN panic", f.Format('p', 0))
+}
+
+func TestFloatSetInt(t *testing.T) {
+	for _, want := range []string{
+		"0",
+		"1",
+		"-1",
+		"1234567890",
+		"123456789012345678901234567890",
+		"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890",
+	} {
+		var x Int
+		_, ok := x.SetString(want, 0)
+		if !ok {
+			t.Errorf("invalid integer %s", want)
+			continue
+		}
+		n := x.BitLen()
+
+		var f Float
+		f.SetInt(&x)
+
+		// check precision
+		if n < 64 {
+			n = 64
+		}
+		if prec := f.Prec(); prec != uint(n) {
+			t.Errorf("got prec = %d; want %d", prec, n)
+		}
+
+		// check value
+		got := f.Format('g', 100)
+		if got != want {
+			t.Errorf("got %s (%s); want %s", got, f.Format('p', 0), want)
+		}
+	}
+
+	// TODO(gri) test basic rounding behavior
+}
+
+func TestFloatSetRat(t *testing.T) {
+	for _, want := range []string{
+		"0",
+		"1",
+		"-1",
+		"1234567890",
+		"123456789012345678901234567890",
+		"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890",
+		"1.2",
+		"3.14159265",
+		// TODO(gri) expand
+	} {
+		var x Rat
+		_, ok := x.SetString(want)
+		if !ok {
+			t.Errorf("invalid fraction %s", want)
+			continue
+		}
+		n := max(x.Num().BitLen(), x.Denom().BitLen())
+
+		var f1, f2 Float
+		f2.SetPrec(1000)
+		f1.SetRat(&x)
+		f2.SetRat(&x)
+
+		// check precision when set automatically
+		if n < 64 {
+			n = 64
+		}
+		if prec := f1.Prec(); prec != uint(n) {
+			t.Errorf("got prec = %d; want %d", prec, n)
+		}
+
+		got := f2.Format('g', 100)
+		if got != want {
+			t.Errorf("got %s (%s); want %s", got, f2.Format('p', 0), want)
+		}
+	}
+}
+
+func TestFloatSetInf(t *testing.T) {
+	var f Float
+	for _, test := range []struct {
+		signbit bool
+		prec    uint
+		want    string
+	}{
+		{false, 0, "+Inf"},
+		{true, 0, "-Inf"},
+		{false, 10, "+Inf"},
+		{true, 30, "-Inf"},
+	} {
+		x := f.SetPrec(test.prec).SetInf(test.signbit)
+		if got := x.String(); got != test.want || x.Prec() != test.prec {
+			t.Errorf("SetInf(%v) = %s (prec = %d); want %s (prec = %d)", test.signbit, got, x.Prec(), test.want, test.prec)
+		}
+	}
+}
+
+func TestFloatUint64(t *testing.T) {
+	for _, test := range []struct {
+		x   string
+		out uint64
+		acc Accuracy
+	}{
+		{"-Inf", 0, Above},
+		{"-1", 0, Above},
+		{"-1e-1000", 0, Above},
+		{"-0", 0, Exact},
+		{"0", 0, Exact},
+		{"1e-1000", 0, Below},
+		{"1", 1, Exact},
+		{"1.000000000000000000001", 1, Below},
+		{"12345.0", 12345, Exact},
+		{"12345.000000000000000000001", 12345, Below},
+		{"18446744073709551615", 18446744073709551615, Exact},
+		{"18446744073709551615.000000000000000000001", math.MaxUint64, Below},
+		{"18446744073709551616", math.MaxUint64, Below},
+		{"1e10000", math.MaxUint64, Below},
+		{"+Inf", math.MaxUint64, Below},
+	} {
+		x := makeFloat(test.x)
+		out, acc := x.Uint64()
+		if out != test.out || acc != test.acc {
+			t.Errorf("%s: got %d (%s); want %d (%s)", test.x, out, acc, test.out, test.acc)
+		}
+	}
+}
+
+func TestFloatInt64(t *testing.T) {
+	for _, test := range []struct {
+		x   string
+		out int64
+		acc Accuracy
+	}{
+		{"-Inf", math.MinInt64, Above},
+		{"-1e10000", math.MinInt64, Above},
+		{"-9223372036854775809", math.MinInt64, Above},
+		{"-9223372036854775808.000000000000000000001", math.MinInt64, Above},
+		{"-9223372036854775808", -9223372036854775808, Exact},
+		{"-9223372036854775807.000000000000000000001", -9223372036854775807, Above},
+		{"-9223372036854775807", -9223372036854775807, Exact},
+		{"-12345.000000000000000000001", -12345, Above},
+		{"-12345.0", -12345, Exact},
+		{"-1.000000000000000000001", -1, Above},
+		{"-1.5", -1, Above},
+		{"-1", -1, Exact},
+		{"-1e-1000", 0, Above},
+		{"0", 0, Exact},
+		{"1e-1000", 0, Below},
+		{"1", 1, Exact},
+		{"1.000000000000000000001", 1, Below},
+		{"1.5", 1, Below},
+		{"12345.0", 12345, Exact},
+		{"12345.000000000000000000001", 12345, Below},
+		{"9223372036854775807", 9223372036854775807, Exact},
+		{"9223372036854775807.000000000000000000001", math.MaxInt64, Below},
+		{"9223372036854775808", math.MaxInt64, Below},
+		{"1e10000", math.MaxInt64, Below},
+		{"+Inf", math.MaxInt64, Below},
+	} {
+		x := makeFloat(test.x)
+		out, acc := x.Int64()
+		if out != test.out || acc != test.acc {
+			t.Errorf("%s: got %d (%s); want %d (%s)", test.x, out, acc, test.out, test.acc)
+		}
+	}
+}
+
+func TestFloatFloat32(t *testing.T) {
+	for _, test := range []struct {
+		x   string
+		out float32
+		acc Accuracy
+	}{
+		{"-Inf", float32(math.Inf(-1)), Exact},
+		{"-0x1.ffffff0p2147483646", float32(-math.Inf(+1)), Below}, // overflow in rounding
+		{"-1e10000", float32(math.Inf(-1)), Below},                 // overflow
+		{"-0x1p128", float32(math.Inf(-1)), Below},                 // overflow
+		{"-0x1.ffffff0p127", float32(-math.Inf(+1)), Below},        // overflow
+		{"-0x1.fffffe8p127", -math.MaxFloat32, Above},
+		{"-0x1.fffffe0p127", -math.MaxFloat32, Exact},
+		{"-12345.000000000000000000001", -12345, Above},
+		{"-12345.0", -12345, Exact},
+		{"-1.000000000000000000001", -1, Above},
+		{"-1", -1, Exact},
+		{"-0x0.000002p-126", -math.SmallestNonzeroFloat32, Exact},
+		{"-0x0.000002p-127", -0, Above}, // underflow
+		{"-1e-1000", -0, Above},         // underflow
+		{"0", 0, Exact},
+		{"1e-1000", 0, Below},         // underflow
+		{"0x0.000002p-127", 0, Below}, // underflow
+		{"0x0.000002p-126", math.SmallestNonzeroFloat32, Exact},
+		{"1", 1, Exact},
+		{"1.000000000000000000001", 1, Below},
+		{"12345.0", 12345, Exact},
+		{"12345.000000000000000000001", 12345, Below},
+		{"0x1.fffffe0p127", math.MaxFloat32, Exact},
+		{"0x1.fffffe8p127", math.MaxFloat32, Below},
+		{"0x1.ffffff0p127", float32(math.Inf(+1)), Above},        // overflow
+		{"0x1p128", float32(math.Inf(+1)), Above},                // overflow
+		{"1e10000", float32(math.Inf(+1)), Above},                // overflow
+		{"0x1.ffffff0p2147483646", float32(math.Inf(+1)), Above}, // overflow in rounding
+		{"+Inf", float32(math.Inf(+1)), Exact},
+	} {
+		// conversion should match strconv where syntax is agreeable
+		if f, err := strconv.ParseFloat(test.x, 32); err == nil && float32(f) != test.out {
+			t.Errorf("%s: got %g; want %g (incorrect test data)", test.x, f, test.out)
+		}
+
+		x := makeFloat(test.x)
+		out, acc := x.Float32()
+		if out != test.out || acc != test.acc {
+			t.Errorf("%s: got %g (%#x, %s); want %g (%#x, %s)", test.x, out, math.Float32bits(out), acc, test.out, math.Float32bits(test.out), test.acc)
+		}
+
+		// test that x.SetFloat64(float64(f)).Float32() == f
+		var x2 Float
+		out2, acc2 := x2.SetFloat64(float64(out)).Float32()
+		if out2 != out || acc2 != Exact {
+			t.Errorf("idempotency test: got %g (%s); want %g (Exact)", out2, acc2, out)
+		}
+	}
+}
+
+func TestFloatFloat64(t *testing.T) {
+	const smallestNormalFloat64 = 2.2250738585072014e-308 // 1p-1022
+	for _, test := range []struct {
+		x   string
+		out float64
+		acc Accuracy
+	}{
+		{"-Inf", math.Inf(-1), Exact},
+		{"-0x1.fffffffffffff8p2147483646", -math.Inf(+1), Below}, // overflow in rounding
+		{"-1e10000", math.Inf(-1), Below},                        // overflow
+		{"-0x1p1024", math.Inf(-1), Below},                       // overflow
+		{"-0x1.fffffffffffff8p1023", -math.Inf(+1), Below},       // overflow
+		{"-0x1.fffffffffffff4p1023", -math.MaxFloat64, Above},
+		{"-0x1.fffffffffffff0p1023", -math.MaxFloat64, Exact},
+		{"-12345.000000000000000000001", -12345, Above},
+		{"-12345.0", -12345, Exact},
+		{"-1.000000000000000000001", -1, Above},
+		{"-1", -1, Exact},
+		{"-0x0.0000000000001p-1022", -math.SmallestNonzeroFloat64, Exact},
+		{"-0x0.0000000000001p-1023", -0, Above}, // underflow
+		{"-1e-1000", -0, Above},                 // underflow
+		{"0", 0, Exact},
+		{"1e-1000", 0, Below},                 // underflow
+		{"0x0.0000000000001p-1023", 0, Below}, // underflow
+		{"0x0.0000000000001p-1022", math.SmallestNonzeroFloat64, Exact},
+		{"1", 1, Exact},
+		{"1.000000000000000000001", 1, Below},
+		{"12345.0", 12345, Exact},
+		{"12345.000000000000000000001", 12345, Below},
+		{"0x1.fffffffffffff0p1023", math.MaxFloat64, Exact},
+		{"0x1.fffffffffffff4p1023", math.MaxFloat64, Below},
+		{"0x1.fffffffffffff8p1023", math.Inf(+1), Above},       // overflow
+		{"0x1p1024", math.Inf(+1), Above},                      // overflow
+		{"1e10000", math.Inf(+1), Above},                       // overflow
+		{"0x1.fffffffffffff8p2147483646", math.Inf(+1), Above}, // overflow in rounding
+		{"+Inf", math.Inf(+1), Exact},
+
+		// selected denormalized values that were handled incorrectly in the past
+		{"0x.fffffffffffffp-1022", smallestNormalFloat64 - math.SmallestNonzeroFloat64, Exact},
+		{"4503599627370495p-1074", smallestNormalFloat64 - math.SmallestNonzeroFloat64, Exact},
+
+		// http://www.exploringbinary.com/php-hangs-on-numeric-value-2-2250738585072011e-308/
+		{"2.2250738585072011e-308", 2.225073858507201e-308, Below},
+		// http://www.exploringbinary.com/java-hangs-when-converting-2-2250738585072012e-308/
+		{"2.2250738585072012e-308", 2.2250738585072014e-308, Above},
+	} {
+		// conversion should match strconv where syntax is agreeable
+		if f, err := strconv.ParseFloat(test.x, 64); err == nil && f != test.out {
+			t.Errorf("%s: got %g; want %g (incorrect test data)", test.x, f, test.out)
+		}
+
+		x := makeFloat(test.x)
+		out, acc := x.Float64()
+		if out != test.out || acc != test.acc {
+			t.Errorf("%s: got %g (%#x, %s); want %g (%#x, %s)", test.x, out, math.Float64bits(out), acc, test.out, math.Float64bits(test.out), test.acc)
+		}
+
+		// test that x.SetFloat64(f).Float64() == f
+		var x2 Float
+		out2, acc2 := x2.SetFloat64(out).Float64()
+		if out2 != out || acc2 != Exact {
+			t.Errorf("idempotency test: got %g (%s); want %g (Exact)", out2, acc2, out)
+		}
+	}
+}
+
+func TestFloatInt(t *testing.T) {
+	for _, test := range []struct {
+		x    string
+		want string
+		acc  Accuracy
+	}{
+		{"0", "0", Exact},
+		{"+0", "0", Exact},
+		{"-0", "0", Exact},
+		{"Inf", "nil", Below},
+		{"+Inf", "nil", Below},
+		{"-Inf", "nil", Above},
+		{"1", "1", Exact},
+		{"-1", "-1", Exact},
+		{"1.23", "1", Below},
+		{"-1.23", "-1", Above},
+		{"123e-2", "1", Below},
+		{"123e-3", "0", Below},
+		{"123e-4", "0", Below},
+		{"1e-1000", "0", Below},
+		{"-1e-1000", "0", Above},
+		{"1e+10", "10000000000", Exact},
+		{"1e+100", "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", Exact},
+	} {
+		x := makeFloat(test.x)
+		res, acc := x.Int(nil)
+		got := "nil"
+		if res != nil {
+			got = res.String()
+		}
+		if got != test.want || acc != test.acc {
+			t.Errorf("%s: got %s (%s); want %s (%s)", test.x, got, acc, test.want, test.acc)
+		}
+	}
+
+	// check that supplied *Int is used
+	for _, f := range []string{"0", "1", "-1", "1234"} {
+		x := makeFloat(f)
+		i := new(Int)
+		if res, _ := x.Int(i); res != i {
+			t.Errorf("(%s).Int is not using supplied *Int", f)
+		}
+	}
+}
+
+func TestFloatRat(t *testing.T) {
+	for _, test := range []struct {
+		x, want string
+		acc     Accuracy
+	}{
+		{"0", "0/1", Exact},
+		{"+0", "0/1", Exact},
+		{"-0", "0/1", Exact},
+		{"Inf", "nil", Below},
+		{"+Inf", "nil", Below},
+		{"-Inf", "nil", Above},
+		{"1", "1/1", Exact},
+		{"-1", "-1/1", Exact},
+		{"1.25", "5/4", Exact},
+		{"-1.25", "-5/4", Exact},
+		{"1e10", "10000000000/1", Exact},
+		{"1p10", "1024/1", Exact},
+		{"-1p-10", "-1/1024", Exact},
+		{"3.14159265", "7244019449799623199/2305843009213693952", Exact},
+	} {
+		x := makeFloat(test.x).SetPrec(64)
+		res, acc := x.Rat(nil)
+		got := "nil"
+		if res != nil {
+			got = res.String()
+		}
+		if got != test.want {
+			t.Errorf("%s: got %s; want %s", test.x, got, test.want)
+			continue
+		}
+		if acc != test.acc {
+			t.Errorf("%s: got %s; want %s", test.x, acc, test.acc)
+			continue
+		}
+
+		// inverse conversion
+		if res != nil {
+			got := new(Float).SetPrec(64).SetRat(res)
+			if got.Cmp(x) != 0 {
+				t.Errorf("%s: got %s; want %s", test.x, got, x)
+			}
+		}
+	}
+
+	// check that supplied *Rat is used
+	for _, f := range []string{"0", "1", "-1", "1234"} {
+		x := makeFloat(f)
+		r := new(Rat)
+		if res, _ := x.Rat(r); res != r {
+			t.Errorf("(%s).Rat is not using supplied *Rat", f)
+		}
+	}
+}
+
+func TestFloatAbs(t *testing.T) {
+	for _, test := range []string{
+		"0",
+		"1",
+		"1234",
+		"1.23e-2",
+		"1e-1000",
+		"1e1000",
+		"Inf",
+	} {
+		p := makeFloat(test)
+		a := new(Float).Abs(p)
+		if !alike(a, p) {
+			t.Errorf("%s: got %s; want %s", test, a.Format('g', 10), test)
+		}
+
+		n := makeFloat("-" + test)
+		a.Abs(n)
+		if !alike(a, p) {
+			t.Errorf("-%s: got %s; want %s", test, a.Format('g', 10), test)
+		}
+	}
+}
+
+func TestFloatNeg(t *testing.T) {
+	for _, test := range []string{
+		"0",
+		"1",
+		"1234",
+		"1.23e-2",
+		"1e-1000",
+		"1e1000",
+		"Inf",
+	} {
+		p1 := makeFloat(test)
+		n1 := makeFloat("-" + test)
+		n2 := new(Float).Neg(p1)
+		p2 := new(Float).Neg(n2)
+		if !alike(n2, n1) {
+			t.Errorf("%s: got %s; want %s", test, n2.Format('g', 10), n1.Format('g', 10))
+		}
+		if !alike(p2, p1) {
+			t.Errorf("%s: got %s; want %s", test, p2.Format('g', 10), p1.Format('g', 10))
+		}
+	}
+}
+
+func TestFloatInc(t *testing.T) {
+	const n = 10
+	for _, prec := range precList {
+		if 1<<prec < n {
+			continue // prec must be large enough to hold all numbers from 0 to n
+		}
+		var x, one Float
+		x.SetPrec(prec)
+		one.SetInt64(1)
+		for i := 0; i < n; i++ {
+			x.Add(&x, &one)
+		}
+		if x.Cmp(new(Float).SetInt64(n)) != 0 {
+			t.Errorf("prec = %d: got %s; want %d", prec, &x, n)
+		}
+	}
+}
+
+// Selected precisions with which to run various tests.
+var precList = [...]uint{1, 2, 5, 8, 10, 16, 23, 24, 32, 50, 53, 64, 100, 128, 500, 511, 512, 513, 1000, 10000}
+
+// Selected bits with which to run various tests.
+// Each entry is a list of bits representing a floating-point number (see fromBits).
+var bitsList = [...]Bits{
+	{},           // = 0
+	{0},          // = 1
+	{1},          // = 2
+	{-1},         // = 1/2
+	{10},         // = 2**10 == 1024
+	{-10},        // = 2**-10 == 1/1024
+	{100, 10, 1}, // = 2**100 + 2**10 + 2**1
+	{0, -1, -2, -10},
+	// TODO(gri) add more test cases
+}
+
+// TestFloatAdd tests Float.Add/Sub by comparing the result of a "manual"
+// addition/subtraction of arguments represented by Bits values with the
+// respective Float addition/subtraction for a variety of precisions
+// and rounding modes.
+func TestFloatAdd(t *testing.T) {
+	for _, xbits := range bitsList {
+		for _, ybits := range bitsList {
+			// exact values
+			x := xbits.Float()
+			y := ybits.Float()
+			zbits := xbits.add(ybits)
+			z := zbits.Float()
+
+			for i, mode := range [...]RoundingMode{ToZero, ToNearestEven, AwayFromZero} {
+				for _, prec := range precList {
+					got := new(Float).SetPrec(prec).SetMode(mode)
+					got.Add(x, y)
+					want := zbits.round(prec, mode)
+					if got.Cmp(want) != 0 {
+						t.Errorf("i = %d, prec = %d, %s:\n\t     %s %v\n\t+    %s %v\n\t=    %s\n\twant %s",
+							i, prec, mode, x, xbits, y, ybits, got, want)
+					}
+
+					got.Sub(z, x)
+					want = ybits.round(prec, mode)
+					if got.Cmp(want) != 0 {
+						t.Errorf("i = %d, prec = %d, %s:\n\t     %s %v\n\t-    %s %v\n\t=    %s\n\twant %s",
+							i, prec, mode, z, zbits, x, xbits, got, want)
+					}
+				}
+			}
+		}
+	}
+}
+
+// TestFloatAdd32 tests that Float.Add/Sub of numbers with
+// 24bit mantissa behaves like float32 addition/subtraction
+// (excluding denormal numbers).
+func TestFloatAdd32(t *testing.T) {
+	// chose base such that we cross the mantissa precision limit
+	const base = 1<<26 - 0x10 // 11...110000 (26 bits)
+	for d := 0; d <= 0x10; d++ {
+		for i := range [2]int{} {
+			x0, y0 := float64(base), float64(d)
+			if i&1 != 0 {
+				x0, y0 = y0, x0
+			}
+
+			x := NewFloat(x0)
+			y := NewFloat(y0)
+			z := new(Float).SetPrec(24)
+
+			z.Add(x, y)
+			got, acc := z.Float32()
+			want := float32(y0) + float32(x0)
+			if got != want || acc != Exact {
+				t.Errorf("d = %d: %g + %g = %g (%s); want %g (Exact)", d, x0, y0, got, acc, want)
+			}
+
+			z.Sub(z, y)
+			got, acc = z.Float32()
+			want = float32(want) - float32(y0)
+			if got != want || acc != Exact {
+				t.Errorf("d = %d: %g - %g = %g (%s); want %g (Exact)", d, x0+y0, y0, got, acc, want)
+			}
+		}
+	}
+}
+
+// TestFloatAdd64 tests that Float.Add/Sub of numbers with
+// 53bit mantissa behaves like float64 addition/subtraction.
+func TestFloatAdd64(t *testing.T) {
+	// chose base such that we cross the mantissa precision limit
+	const base = 1<<55 - 0x10 // 11...110000 (55 bits)
+	for d := 0; d <= 0x10; d++ {
+		for i := range [2]int{} {
+			x0, y0 := float64(base), float64(d)
+			if i&1 != 0 {
+				x0, y0 = y0, x0
+			}
+
+			x := NewFloat(x0)
+			y := NewFloat(y0)
+			z := new(Float).SetPrec(53)
+
+			z.Add(x, y)
+			got, acc := z.Float64()
+			want := x0 + y0
+			if got != want || acc != Exact {
+				t.Errorf("d = %d: %g + %g = %g (%s); want %g (Exact)", d, x0, y0, got, acc, want)
+			}
+
+			z.Sub(z, y)
+			got, acc = z.Float64()
+			want -= y0
+			if got != want || acc != Exact {
+				t.Errorf("d = %d: %g - %g = %g (%s); want %g (Exact)", d, x0+y0, y0, got, acc, want)
+			}
+		}
+	}
+}
+
+// TestFloatMul tests Float.Mul/Quo by comparing the result of a "manual"
+// multiplication/division of arguments represented by Bits values with the
+// respective Float multiplication/division for a variety of precisions
+// and rounding modes.
+func TestFloatMul(t *testing.T) {
+	for _, xbits := range bitsList {
+		for _, ybits := range bitsList {
+			// exact values
+			x := xbits.Float()
+			y := ybits.Float()
+			zbits := xbits.mul(ybits)
+			z := zbits.Float()
+
+			for i, mode := range [...]RoundingMode{ToZero, ToNearestEven, AwayFromZero} {
+				for _, prec := range precList {
+					got := new(Float).SetPrec(prec).SetMode(mode)
+					got.Mul(x, y)
+					want := zbits.round(prec, mode)
+					if got.Cmp(want) != 0 {
+						t.Errorf("i = %d, prec = %d, %s:\n\t     %s %v\n\t*    %s %v\n\t=    %s\n\twant %s",
+							i, prec, mode, x, xbits, y, ybits, got, want)
+					}
+
+					if x.Sign() == 0 {
+						continue // ignore div-0 case (not invertable)
+					}
+					got.Quo(z, x)
+					want = ybits.round(prec, mode)
+					if got.Cmp(want) != 0 {
+						t.Errorf("i = %d, prec = %d, %s:\n\t     %s %v\n\t/    %s %v\n\t=    %s\n\twant %s",
+							i, prec, mode, z, zbits, x, xbits, got, want)
+					}
+				}
+			}
+		}
+	}
+}
+
+// TestFloatMul64 tests that Float.Mul/Quo of numbers with
+// 53bit mantissa behaves like float64 multiplication/division.
+func TestFloatMul64(t *testing.T) {
+	for _, test := range []struct {
+		x, y float64
+	}{
+		{0, 0},
+		{0, 1},
+		{1, 1},
+		{1, 1.5},
+		{1.234, 0.5678},
+		{2.718281828, 3.14159265358979},
+		{2.718281828e10, 3.14159265358979e-32},
+		{1.0 / 3, 1e200},
+	} {
+		for i := range [8]int{} {
+			x0, y0 := test.x, test.y
+			if i&1 != 0 {
+				x0 = -x0
+			}
+			if i&2 != 0 {
+				y0 = -y0
+			}
+			if i&4 != 0 {
+				x0, y0 = y0, x0
+			}
+
+			x := NewFloat(x0)
+			y := NewFloat(y0)
+			z := new(Float).SetPrec(53)
+
+			z.Mul(x, y)
+			got, _ := z.Float64()
+			want := x0 * y0
+			if got != want {
+				t.Errorf("%g * %g = %g; want %g", x0, y0, got, want)
+			}
+
+			if y0 == 0 {
+				continue // avoid division-by-zero
+			}
+			z.Quo(z, y)
+			got, _ = z.Float64()
+			want /= y0
+			if got != want {
+				t.Errorf("%g / %g = %g; want %g", x0*y0, y0, got, want)
+			}
+		}
+	}
+}
+
+func TestIssue6866(t *testing.T) {
+	for _, prec := range precList {
+		two := new(Float).SetPrec(prec).SetInt64(2)
+		one := new(Float).SetPrec(prec).SetInt64(1)
+		three := new(Float).SetPrec(prec).SetInt64(3)
+		msix := new(Float).SetPrec(prec).SetInt64(-6)
+		psix := new(Float).SetPrec(prec).SetInt64(+6)
+
+		p := new(Float).SetPrec(prec)
+		z1 := new(Float).SetPrec(prec)
+		z2 := new(Float).SetPrec(prec)
+
+		// z1 = 2 + 1.0/3*-6
+		p.Quo(one, three)
+		p.Mul(p, msix)
+		z1.Add(two, p)
+
+		// z2 = 2 - 1.0/3*+6
+		p.Quo(one, three)
+		p.Mul(p, psix)
+		z2.Sub(two, p)
+
+		if z1.Cmp(z2) != 0 {
+			t.Fatalf("prec %d: got z1 = %s != z2 = %s; want z1 == z2\n", prec, z1, z2)
+		}
+		if z1.Sign() != 0 {
+			t.Errorf("prec %d: got z1 = %s; want 0", prec, z1)
+		}
+		if z2.Sign() != 0 {
+			t.Errorf("prec %d: got z2 = %s; want 0", prec, z2)
+		}
+	}
+}
+
+func TestFloatQuo(t *testing.T) {
+	// TODO(gri) make the test vary these precisions
+	preci := 200 // precision of integer part
+	precf := 20  // precision of fractional part
+
+	for i := 0; i < 8; i++ {
+		// compute accurate (not rounded) result z
+		bits := Bits{preci - 1}
+		if i&3 != 0 {
+			bits = append(bits, 0)
+		}
+		if i&2 != 0 {
+			bits = append(bits, -1)
+		}
+		if i&1 != 0 {
+			bits = append(bits, -precf)
+		}
+		z := bits.Float()
+
+		// compute accurate x as z*y
+		y := NewFloat(3.14159265358979323e123)
+
+		x := new(Float).SetPrec(z.Prec() + y.Prec()).SetMode(ToZero)
+		x.Mul(z, y)
+
+		// leave for debugging
+		// fmt.Printf("x = %s\ny = %s\nz = %s\n", x, y, z)
+
+		if got := x.Acc(); got != Exact {
+			t.Errorf("got acc = %s; want exact", got)
+		}
+
+		// round accurate z for a variety of precisions and
+		// modes and compare against result of x / y.
+		for _, mode := range [...]RoundingMode{ToZero, ToNearestEven, AwayFromZero} {
+			for d := -5; d < 5; d++ {
+				prec := uint(preci + d)
+				got := new(Float).SetPrec(prec).SetMode(mode).Quo(x, y)
+				want := bits.round(prec, mode)
+				if got.Cmp(want) != 0 {
+					t.Errorf("i = %d, prec = %d, %s:\n\t     %s\n\t/    %s\n\t=    %s\n\twant %s",
+						i, prec, mode, x, y, got, want)
+				}
+			}
+		}
+	}
+}
+
+// TestFloatQuoSmoke tests all divisions x/y for values x, y in the range [-n, +n];
+// it serves as a smoke test for basic correctness of division.
+func TestFloatQuoSmoke(t *testing.T) {
+	n := 1000
+	if testing.Short() {
+		n = 10
+	}
+
+	const dprec = 3         // max. precision variation
+	const prec = 10 + dprec // enough bits to hold n precisely
+	for x := -n; x <= n; x++ {
+		for y := -n; y < n; y++ {
+			if y == 0 {
+				continue
+			}
+
+			a := float64(x)
+			b := float64(y)
+			c := a / b
+
+			// vary operand precision (only ok as long as a, b can be represented correctly)
+			for ad := -dprec; ad <= dprec; ad++ {
+				for bd := -dprec; bd <= dprec; bd++ {
+					A := new(Float).SetPrec(uint(prec + ad)).SetFloat64(a)
+					B := new(Float).SetPrec(uint(prec + bd)).SetFloat64(b)
+					C := new(Float).SetPrec(53).Quo(A, B) // C has float64 mantissa width
+
+					cc, acc := C.Float64()
+					if cc != c {
+						t.Errorf("%g/%g = %s; want %.5g\n", a, b, C.Format('g', 5), c)
+						continue
+					}
+					if acc != Exact {
+						t.Errorf("%g/%g got %s result; want exact result", a, b, acc)
+					}
+				}
+			}
+		}
+	}
+}
+
+// TestFloatArithmeticSpecialValues tests that Float operations produce the
+// correct results for combinations of zero (±0), finite (±1 and ±2.71828),
+// and infinite (±Inf) operands.
+func TestFloatArithmeticSpecialValues(t *testing.T) {
+	zero := 0.0
+	args := []float64{math.Inf(-1), -2.71828, -1, -zero, zero, 1, 2.71828, math.Inf(1)}
+	xx := new(Float)
+	yy := new(Float)
+	got := new(Float)
+	want := new(Float)
+	for i := 0; i < 4; i++ {
+		for _, x := range args {
+			xx.SetFloat64(x)
+			// check conversion is correct
+			// (no need to do this for y, since we see exactly the
+			// same values there)
+			if got, acc := xx.Float64(); got != x || acc != Exact {
+				t.Errorf("Float(%g) == %g (%s)", x, got, acc)
+			}
+			for _, y := range args {
+				yy.SetFloat64(y)
+				var (
+					op string
+					z  float64
+					f  func(z, x, y *Float) *Float
+				)
+				switch i {
+				case 0:
+					op = "+"
+					z = x + y
+					f = (*Float).Add
+				case 1:
+					op = "-"
+					z = x - y
+					f = (*Float).Sub
+				case 2:
+					op = "*"
+					z = x * y
+					f = (*Float).Mul
+				case 3:
+					op = "/"
+					z = x / y
+					f = (*Float).Quo
+				default:
+					panic("unreachable")
+				}
+				var errnan bool // set if execution of f panicked with ErrNaN
+				// protect execution of f
+				func() {
+					defer func() {
+						if p := recover(); p != nil {
+							_ = p.(ErrNaN) // re-panic if not ErrNaN
+							errnan = true
+						}
+					}()
+					f(got, xx, yy)
+				}()
+				if math.IsNaN(z) {
+					if !errnan {
+						t.Errorf("%5g %s %5g = %5s; want ErrNaN panic", x, op, y, got)
+					}
+					continue
+				}
+				if errnan {
+					t.Errorf("%5g %s %5g panicked with ErrNan; want %5s", x, op, y, want)
+					continue
+				}
+				want.SetFloat64(z)
+				if !alike(got, want) {
+					t.Errorf("%5g %s %5g = %5s; want %5s", x, op, y, got, want)
+				}
+			}
+		}
+	}
+}
+
+func TestFloatArithmeticOverflow(t *testing.T) {
+	for _, test := range []struct {
+		prec       uint
+		mode       RoundingMode
+		op         byte
+		x, y, want string
+		acc        Accuracy
+	}{
+		{4, ToNearestEven, '+', "0", "0", "0", Exact},                // smoke test
+		{4, ToNearestEven, '+', "0x.8p0", "0x.8p0", "0x.8p1", Exact}, // smoke test
+
+		{4, ToNearestEven, '+', "0", "0x.8p2147483647", "0x.8p2147483647", Exact},
+		{4, ToNearestEven, '+', "0x.8p2147483500", "0x.8p2147483647", "0x.8p2147483647", Below}, // rounded to zero
+		{4, ToNearestEven, '+', "0x.8p2147483647", "0x.8p2147483647", "+Inf", Above},            // exponent overflow in +
+		{4, ToNearestEven, '+', "-0x.8p2147483647", "-0x.8p2147483647", "-Inf", Below},          // exponent overflow in +
+		{4, ToNearestEven, '-', "-0x.8p2147483647", "0x.8p2147483647", "-Inf", Below},           // exponent overflow in -
+
+		{4, ToZero, '+', "0x.fp2147483647", "0x.8p2147483643", "0x.fp2147483647", Below}, // rounded to zero
+		{4, ToNearestEven, '+', "0x.fp2147483647", "0x.8p2147483643", "+Inf", Above},     // exponent overflow in rounding
+		{4, AwayFromZero, '+', "0x.fp2147483647", "0x.8p2147483643", "+Inf", Above},      // exponent overflow in rounding
+
+		{4, AwayFromZero, '-', "-0x.fp2147483647", "0x.8p2147483644", "-Inf", Below},       // exponent overflow in rounding
+		{4, ToNearestEven, '-', "-0x.fp2147483647", "0x.8p2147483643", "-Inf", Below},      // exponent overflow in rounding
+		{4, ToZero, '-', "-0x.fp2147483647", "0x.8p2147483643", "-0x.fp2147483647", Above}, // rounded to zero
+
+		{4, ToNearestEven, '+', "0", "0x.8p-2147483648", "0x.8p-2147483648", Exact},
+		{4, ToNearestEven, '+', "0x.8p-2147483648", "0x.8p-2147483648", "0x.8p-2147483647", Exact},
+
+		{4, ToNearestEven, '*', "1", "0x.8p2147483647", "0x.8p2147483647", Exact},
+		{4, ToNearestEven, '*', "2", "0x.8p2147483647", "+Inf", Above},  // exponent overflow in *
+		{4, ToNearestEven, '*', "-2", "0x.8p2147483647", "-Inf", Below}, // exponent overflow in *
+
+		{4, ToNearestEven, '/', "0.5", "0x.8p2147483647", "0x.8p-2147483646", Exact},
+		{4, ToNearestEven, '/', "0x.8p0", "0x.8p2147483647", "0x.8p-2147483646", Exact},
+		{4, ToNearestEven, '/', "0x.8p-1", "0x.8p2147483647", "0x.8p-2147483647", Exact},
+		{4, ToNearestEven, '/', "0x.8p-2", "0x.8p2147483647", "0x.8p-2147483648", Exact},
+		{4, ToNearestEven, '/', "0x.8p-3", "0x.8p2147483647", "0", Below}, // exponent underflow in /
+	} {
+		x := makeFloat(test.x)
+		y := makeFloat(test.y)
+		z := new(Float).SetPrec(test.prec).SetMode(test.mode)
+		switch test.op {
+		case '+':
+			z.Add(x, y)
+		case '-':
+			z.Sub(x, y)
+		case '*':
+			z.Mul(x, y)
+		case '/':
+			z.Quo(x, y)
+		default:
+			panic("unreachable")
+		}
+		if got := z.Format('p', 0); got != test.want || z.Acc() != test.acc {
+			t.Errorf(
+				"prec = %d (%s): %s %c %s = %s (%s); want %s (%s)",
+				test.prec, test.mode, x.Format('p', 0), test.op, y.Format('p', 0), got, z.Acc(), test.want, test.acc,
+			)
+		}
+	}
+}
+
+// TODO(gri) Add tests that check correctness in the presence of aliasing.
+
+// For rounding modes ToNegativeInf and ToPositiveInf, rounding is affected
+// by the sign of the value to be rounded. Test that rounding happens after
+// the sign of a result has been set.
+// This test uses specific values that are known to fail if rounding is
+// "factored" out before setting the result sign.
+func TestFloatArithmeticRounding(t *testing.T) {
+	for _, test := range []struct {
+		mode       RoundingMode
+		prec       uint
+		x, y, want int64
+		op         byte
+	}{
+		{ToZero, 3, -0x8, -0x1, -0x8, '+'},
+		{AwayFromZero, 3, -0x8, -0x1, -0xa, '+'},
+		{ToNegativeInf, 3, -0x8, -0x1, -0xa, '+'},
+
+		{ToZero, 3, -0x8, 0x1, -0x8, '-'},
+		{AwayFromZero, 3, -0x8, 0x1, -0xa, '-'},
+		{ToNegativeInf, 3, -0x8, 0x1, -0xa, '-'},
+
+		{ToZero, 3, -0x9, 0x1, -0x8, '*'},
+		{AwayFromZero, 3, -0x9, 0x1, -0xa, '*'},
+		{ToNegativeInf, 3, -0x9, 0x1, -0xa, '*'},
+
+		{ToZero, 3, -0x9, 0x1, -0x8, '/'},
+		{AwayFromZero, 3, -0x9, 0x1, -0xa, '/'},
+		{ToNegativeInf, 3, -0x9, 0x1, -0xa, '/'},
+	} {
+		var x, y, z Float
+		x.SetInt64(test.x)
+		y.SetInt64(test.y)
+		z.SetPrec(test.prec).SetMode(test.mode)
+		switch test.op {
+		case '+':
+			z.Add(&x, &y)
+		case '-':
+			z.Sub(&x, &y)
+		case '*':
+			z.Mul(&x, &y)
+		case '/':
+			z.Quo(&x, &y)
+		default:
+			panic("unreachable")
+		}
+		if got, acc := z.Int64(); got != test.want || acc != Exact {
+			t.Errorf("%s, %d bits: %d %c %d = %d (%s); want %d (Exact)",
+				test.mode, test.prec, test.x, test.op, test.y, got, acc, test.want,
+			)
+		}
+	}
+}
+
+// TestFloatCmpSpecialValues tests that Cmp produces the correct results for
+// combinations of zero (±0), finite (±1 and ±2.71828), and infinite (±Inf)
+// operands.
+func TestFloatCmpSpecialValues(t *testing.T) {
+	zero := 0.0
+	args := []float64{math.Inf(-1), -2.71828, -1, -zero, zero, 1, 2.71828, math.Inf(1)}
+	xx := new(Float)
+	yy := new(Float)
+	for i := 0; i < 4; i++ {
+		for _, x := range args {
+			xx.SetFloat64(x)
+			// check conversion is correct
+			// (no need to do this for y, since we see exactly the
+			// same values there)
+			if got, acc := xx.Float64(); got != x || acc != Exact {
+				t.Errorf("Float(%g) == %g (%s)", x, got, acc)
+			}
+			for _, y := range args {
+				yy.SetFloat64(y)
+				got := xx.Cmp(yy)
+				want := 0
+				switch {
+				case x < y:
+					want = -1
+				case x > y:
+					want = +1
+				}
+				if got != want {
+					t.Errorf("(%g).Cmp(%g) = %v; want %v", x, y, got, want)
+				}
+			}
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/big/floatconv.go b/src/cmd/compile/internal/big/floatconv.go
new file mode 100644
index 0000000..b929d12
--- /dev/null
+++ b/src/cmd/compile/internal/big/floatconv.go
@@ -0,0 +1,373 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements float-to-string conversion functions.
+
+package big
+
+import (
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+)
+
+// SetString sets z to the value of s and returns z and a boolean indicating
+// success. s must be a floating-point number of the same format as accepted
+// by Scan, with number prefixes permitted.
+func (z *Float) SetString(s string) (*Float, bool) {
+	r := strings.NewReader(s)
+
+	f, _, err := z.Scan(r, 0)
+	if err != nil {
+		return nil, false
+	}
+
+	// there should be no unread characters left
+	if _, err = r.ReadByte(); err != io.EOF {
+		return nil, false
+	}
+
+	return f, true
+}
+
+// Scan scans the number corresponding to the longest possible prefix
+// of r representing a floating-point number with a mantissa in the
+// given conversion base (the exponent is always a decimal number).
+// It sets z to the (possibly rounded) value of the corresponding
+// floating-point number, and returns z, the actual base b, and an
+// error err, if any. If z's precision is 0, it is changed to 64
+// before rounding takes effect. The number must be of the form:
+//
+//	number   = [ sign ] [ prefix ] mantissa [ exponent ] .
+//	sign     = "+" | "-" .
+//      prefix   = "0" ( "x" | "X" | "b" | "B" ) .
+//	mantissa = digits | digits "." [ digits ] | "." digits .
+//	exponent = ( "E" | "e" | "p" ) [ sign ] digits .
+//	digits   = digit { digit } .
+//	digit    = "0" ... "9" | "a" ... "z" | "A" ... "Z" .
+//
+// The base argument must be 0, 2, 10, or 16. Providing an invalid base
+// argument will lead to a run-time panic.
+//
+// For base 0, the number prefix determines the actual base: A prefix of
+// "0x" or "0X" selects base 16, and a "0b" or "0B" prefix selects
+// base 2; otherwise, the actual base is 10 and no prefix is accepted.
+// The octal prefix "0" is not supported (a leading "0" is simply
+// considered a "0").
+//
+// A "p" exponent indicates a binary (rather then decimal) exponent;
+// for instance "0x1.fffffffffffffp1023" (using base 0) represents the
+// maximum float64 value. For hexadecimal mantissae, the exponent must
+// be binary, if present (an "e" or "E" exponent indicator cannot be
+// distinguished from a mantissa digit).
+//
+// The returned *Float f is nil and the value of z is valid but not
+// defined if an error is reported.
+//
+// BUG(gri) The Float.Scan signature conflicts with Scan(s fmt.ScanState, ch rune) error.
+func (z *Float) Scan(r io.ByteScanner, base int) (f *Float, b int, err error) {
+	prec := z.prec
+	if prec == 0 {
+		prec = 64
+	}
+
+	// A reasonable value in case of an error.
+	z.form = zero
+
+	// sign
+	z.neg, err = scanSign(r)
+	if err != nil {
+		return
+	}
+
+	// mantissa
+	var fcount int // fractional digit count; valid if <= 0
+	z.mant, b, fcount, err = z.mant.scan(r, base, true)
+	if err != nil {
+		return
+	}
+
+	// exponent
+	var exp int64
+	var ebase int
+	exp, ebase, err = scanExponent(r, true)
+	if err != nil {
+		return
+	}
+
+	// special-case 0
+	if len(z.mant) == 0 {
+		z.prec = prec
+		z.acc = Exact
+		z.form = zero
+		f = z
+		return
+	}
+	// len(z.mant) > 0
+
+	// The mantissa may have a decimal point (fcount <= 0) and there
+	// may be a nonzero exponent exp. The decimal point amounts to a
+	// division by b**(-fcount). An exponent means multiplication by
+	// ebase**exp. Finally, mantissa normalization (shift left) requires
+	// a correcting multiplication by 2**(-shiftcount). Multiplications
+	// are commutative, so we can apply them in any order as long as there
+	// is no loss of precision. We only have powers of 2 and 10; keep
+	// track via separate exponents exp2 and exp10.
+
+	// normalize mantissa and get initial binary exponent
+	var exp2 = int64(len(z.mant))*_W - fnorm(z.mant)
+
+	// determine binary or decimal exponent contribution of decimal point
+	var exp10 int64
+	if fcount < 0 {
+		// The mantissa has a "decimal" point ddd.dddd; and
+		// -fcount is the number of digits to the right of '.'.
+		// Adjust relevant exponent accodingly.
+		switch b {
+		case 16:
+			fcount *= 4 // hexadecimal digits are 4 bits each
+			fallthrough
+		case 2:
+			exp2 += int64(fcount)
+		default: // b == 10
+			exp10 = int64(fcount)
+		}
+		// we don't need fcount anymore
+	}
+
+	// take actual exponent into account
+	if ebase == 2 {
+		exp2 += exp
+	} else { // ebase == 10
+		exp10 += exp
+	}
+	// we don't need exp anymore
+
+	// apply 2**exp2
+	if MinExp <= exp2 && exp2 <= MaxExp {
+		z.prec = prec
+		z.form = finite
+		z.exp = int32(exp2)
+		f = z
+	} else {
+		err = fmt.Errorf("exponent overflow")
+		return
+	}
+
+	if exp10 == 0 {
+		// no decimal exponent to consider
+		z.round(0)
+		return
+	}
+	// exp10 != 0
+
+	// apply 10**exp10
+	p := new(Float).SetPrec(z.Prec() + 64) // use more bits for p -- TODO(gri) what is the right number?
+	if exp10 < 0 {
+		z.uquo(z, p.pow10(-exp10))
+	} else {
+		z.umul(z, p.pow10(exp10))
+	}
+
+	return
+}
+
+// These powers of 10 can be represented exactly as a float64.
+var pow10tab = [...]float64{
+	1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+	1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+}
+
+// pow10 sets z to 10**n and returns z.
+// n must not be negative.
+func (z *Float) pow10(n int64) *Float {
+	if n < 0 {
+		panic("pow10 called with negative argument")
+	}
+
+	const m = int64(len(pow10tab) - 1)
+	if n <= m {
+		return z.SetFloat64(pow10tab[n])
+	}
+	// n > m
+
+	z.SetFloat64(pow10tab[m])
+	n -= m
+
+	// use more bits for f than for z
+	// TODO(gri) what is the right number?
+	f := new(Float).SetPrec(z.Prec() + 64).SetInt64(10)
+
+	for n > 0 {
+		if n&1 != 0 {
+			z.Mul(z, f)
+		}
+		f.Mul(f, f)
+		n >>= 1
+	}
+
+	return z
+}
+
+// Parse is like z.Scan(r, base), but instead of reading from an
+// io.ByteScanner, it parses the string s. An error is also returned
+// if the string contains invalid or trailing bytes not belonging to
+// the number.
+func (z *Float) Parse(s string, base int) (f *Float, b int, err error) {
+	r := strings.NewReader(s)
+
+	if f, b, err = z.Scan(r, base); err != nil {
+		return
+	}
+
+	// entire string must have been consumed
+	if ch, err2 := r.ReadByte(); err2 == nil {
+		err = fmt.Errorf("expected end of string, found %q", ch)
+	} else if err2 != io.EOF {
+		err = err2
+	}
+
+	return
+}
+
+// ScanFloat is like f.Scan(r, base) with f set to the given precision
+// and rounding mode.
+func ScanFloat(r io.ByteScanner, base int, prec uint, mode RoundingMode) (f *Float, b int, err error) {
+	return new(Float).SetPrec(prec).SetMode(mode).Scan(r, base)
+}
+
+// ParseFloat is like f.Parse(s, base) with f set to the given precision
+// and rounding mode.
+func ParseFloat(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error) {
+	return new(Float).SetPrec(prec).SetMode(mode).Parse(s, base)
+}
+
+// Format converts the floating-point number x to a string according
+// to the given format and precision prec. The format is one of:
+//
+//	'e'	-d.dddde±dd, decimal exponent, at least two (possibly 0) exponent digits
+//	'E'	-d.ddddE±dd, decimal exponent, at least two (possibly 0) exponent digits
+//	'f'	-ddddd.dddd, no exponent
+//	'g'	like 'e' for large exponents, like 'f' otherwise
+//	'G'	like 'E' for large exponents, like 'f' otherwise
+//	'b'	-ddddddp±dd, binary exponent
+//	'p'	-0x.dddp±dd, binary exponent, hexadecimal mantissa
+//
+// For the binary exponent formats, the mantissa is printed in normalized form:
+//
+//	'b'	decimal integer mantissa using x.Prec() bits, or -0
+//	'p'	hexadecimal fraction with 0.5 <= 0.mantissa < 1.0, or -0
+//
+// The precision prec controls the number of digits (excluding the exponent)
+// printed by the 'e', 'E', 'f', 'g', and 'G' formats. For 'e', 'E', and 'f'
+// it is the number of digits after the decimal point. For 'g' and 'G' it is
+// the total number of digits. A negative precision selects the smallest
+// number of digits necessary such that ParseFloat will return f exactly.
+// The prec value is ignored for the 'b' or 'p' format.
+//
+// BUG(gri) Float.Format does not accept negative precisions.
+func (x *Float) Format(format byte, prec int) string {
+	const extra = 10 // TODO(gri) determine a good/better value here
+	return string(x.Append(make([]byte, 0, prec+extra), format, prec))
+}
+
+// Append appends the string form of the floating-point number x,
+// as generated by x.Format, to buf and returns the extended buffer.
+func (x *Float) Append(buf []byte, format byte, prec int) []byte {
+	// TODO(gri) factor out handling of sign?
+
+	// Inf
+	if x.IsInf() {
+		var ch byte = '+'
+		if x.neg {
+			ch = '-'
+		}
+		buf = append(buf, ch)
+		return append(buf, "Inf"...)
+	}
+
+	// easy formats
+	switch format {
+	case 'b':
+		return x.bstring(buf)
+	case 'p':
+		return x.pstring(buf)
+	}
+
+	return x.bigFtoa(buf, format, prec)
+}
+
+// BUG(gri): Float.String uses x.Format('g', 10) rather than x.Format('g', -1).
+func (x *Float) String() string {
+	return x.Format('g', 10)
+}
+
+// bstring appends the string of x in the format ["-"] mantissa "p" exponent
+// with a decimal mantissa and a binary exponent, or ["-"] "0" if x is zero,
+// and returns the extended buffer.
+// The mantissa is normalized such that is uses x.Prec() bits in binary
+// representation.
+func (x *Float) bstring(buf []byte) []byte {
+	if x.neg {
+		buf = append(buf, '-')
+	}
+	if x.form == zero {
+		return append(buf, '0')
+	}
+
+	if debugFloat && x.form != finite {
+		panic("non-finite float")
+	}
+	// x != 0
+
+	// adjust mantissa to use exactly x.prec bits
+	m := x.mant
+	switch w := uint32(len(x.mant)) * _W; {
+	case w < x.prec:
+		m = nat(nil).shl(m, uint(x.prec-w))
+	case w > x.prec:
+		m = nat(nil).shr(m, uint(w-x.prec))
+	}
+
+	buf = append(buf, m.decimalString()...)
+	buf = append(buf, 'p')
+	e := int64(x.exp) - int64(x.prec)
+	if e >= 0 {
+		buf = append(buf, '+')
+	}
+	return strconv.AppendInt(buf, e, 10)
+}
+
+// pstring appends the string of x in the format ["-"] "0x." mantissa "p" exponent
+// with a hexadecimal mantissa and a binary exponent, or ["-"] "0" if x is zero,
+// ad returns the extended buffer.
+// The mantissa is normalized such that 0.5 <= 0.mantissa < 1.0.
+func (x *Float) pstring(buf []byte) []byte {
+	if x.neg {
+		buf = append(buf, '-')
+	}
+	if x.form == zero {
+		return append(buf, '0')
+	}
+
+	if debugFloat && x.form != finite {
+		panic("non-finite float")
+	}
+	// x != 0
+
+	// remove trailing 0 words early
+	// (no need to convert to hex 0's and trim later)
+	m := x.mant
+	i := 0
+	for i < len(m) && m[i] == 0 {
+		i++
+	}
+	m = m[i:]
+
+	buf = append(buf, "0x."...)
+	buf = append(buf, strings.TrimRight(x.mant.hexString(), "0")...)
+	buf = append(buf, 'p')
+	return strconv.AppendInt(buf, int64(x.exp), 10)
+}
diff --git a/src/cmd/compile/internal/big/floatconv_test.go b/src/cmd/compile/internal/big/floatconv_test.go
new file mode 100644
index 0000000..96c01ee
--- /dev/null
+++ b/src/cmd/compile/internal/big/floatconv_test.go
@@ -0,0 +1,397 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+	"math"
+	"strconv"
+	"testing"
+)
+
+func TestFloatSetFloat64String(t *testing.T) {
+	for _, test := range []struct {
+		s string
+		x float64
+	}{
+		// basics
+		{"0", 0},
+		{"-0", -0},
+		{"+0", 0},
+		{"1", 1},
+		{"-1", -1},
+		{"+1", 1},
+		{"1.234", 1.234},
+		{"-1.234", -1.234},
+		{"+1.234", 1.234},
+		{".1", 0.1},
+		{"1.", 1},
+		{"+1.", 1},
+
+		// various zeros
+		{"0e100", 0},
+		{"-0e+100", 0},
+		{"+0e-100", 0},
+		{"0E100", 0},
+		{"-0E+100", 0},
+		{"+0E-100", 0},
+
+		// various decimal exponent formats
+		{"1.e10", 1e10},
+		{"1e+10", 1e10},
+		{"+1e-10", 1e-10},
+		{"1E10", 1e10},
+		{"1.E+10", 1e10},
+		{"+1E-10", 1e-10},
+
+		// misc decimal values
+		{"3.14159265", 3.14159265},
+		{"-687436.79457e-245", -687436.79457e-245},
+		{"-687436.79457E245", -687436.79457e245},
+		{".0000000000000000000000000000000000000001", 1e-40},
+		{"+10000000000000000000000000000000000000000e-0", 1e40},
+
+		// decimal mantissa, binary exponent
+		{"0p0", 0},
+		{"-0p0", -0},
+		{"1p10", 1 << 10},
+		{"1p+10", 1 << 10},
+		{"+1p-10", 1.0 / (1 << 10)},
+		{"1024p-12", 0.25},
+		{"-1p10", -1024},
+		{"1.5p1", 3},
+
+		// binary mantissa, decimal exponent
+		{"0b0", 0},
+		{"-0b0", -0},
+		{"0b0e+10", 0},
+		{"-0b0e-10", -0},
+		{"0b1010", 10},
+		{"0B1010E2", 1000},
+		{"0b.1", 0.5},
+		{"0b.001", 0.125},
+		{"0b.001e3", 125},
+
+		// binary mantissa, binary exponent
+		{"0b0p+10", 0},
+		{"-0b0p-10", -0},
+		{"0b.1010p4", 10},
+		{"0b1p-1", 0.5},
+		{"0b001p-3", 0.125},
+		{"0b.001p3", 1},
+		{"0b0.01p2", 1},
+
+		// hexadecimal mantissa and exponent
+		{"0x0", 0},
+		{"-0x0", -0},
+		{"0x0p+10", 0},
+		{"-0x0p-10", -0},
+		{"0xff", 255},
+		{"0X.8p1", 1},
+		{"-0X0.00008p16", -0.5},
+		{"0x0.0000000000001p-1022", math.SmallestNonzeroFloat64},
+		{"0x1.fffffffffffffp1023", math.MaxFloat64},
+	} {
+		var x Float
+		x.SetPrec(53)
+		_, ok := x.SetString(test.s)
+		if !ok {
+			t.Errorf("%s: parse error", test.s)
+			continue
+		}
+		f, _ := x.Float64()
+		want := new(Float).SetFloat64(test.x)
+		if x.Cmp(want) != 0 {
+			t.Errorf("%s: got %s (%v); want %v", test.s, &x, f, test.x)
+		}
+	}
+}
+
+const (
+	below1e23 = 99999999999999974834176
+	above1e23 = 100000000000000008388608
+)
+
+func TestFloat64Format(t *testing.T) {
+	for _, test := range []struct {
+		x      float64
+		format byte
+		prec   int
+		want   string
+	}{
+		{0, 'f', 0, "0"},
+		{math.Copysign(0, -1), 'f', 0, "-0"},
+		{1, 'f', 0, "1"},
+		{-1, 'f', 0, "-1"},
+
+		{1.459, 'e', 0, "1e+00"},
+		{2.459, 'e', 1, "2.5e+00"},
+		{3.459, 'e', 2, "3.46e+00"},
+		{4.459, 'e', 3, "4.459e+00"},
+		{5.459, 'e', 4, "5.4590e+00"},
+
+		{1.459, 'f', 0, "1"},
+		{2.459, 'f', 1, "2.5"},
+		{3.459, 'f', 2, "3.46"},
+		{4.459, 'f', 3, "4.459"},
+		{5.459, 'f', 4, "5.4590"},
+
+		{0, 'b', 0, "0"},
+		{math.Copysign(0, -1), 'b', 0, "-0"},
+		{1.0, 'b', 0, "4503599627370496p-52"},
+		{-1.0, 'b', 0, "-4503599627370496p-52"},
+		{4503599627370496, 'b', 0, "4503599627370496p+0"},
+
+		{0, 'p', 0, "0"},
+		{math.Copysign(0, -1), 'p', 0, "-0"},
+		{1024.0, 'p', 0, "0x.8p11"},
+		{-1024.0, 'p', 0, "-0x.8p11"},
+
+		// all test cases below from strconv/ftoa_test.go
+		{1, 'e', 5, "1.00000e+00"},
+		{1, 'f', 5, "1.00000"},
+		{1, 'g', 5, "1"},
+		// {1, 'g', -1, "1"},
+		// {20, 'g', -1, "20"},
+		// {1234567.8, 'g', -1, "1.2345678e+06"},
+		// {200000, 'g', -1, "200000"},
+		// {2000000, 'g', -1, "2e+06"},
+
+		// g conversion and zero suppression
+		{400, 'g', 2, "4e+02"},
+		{40, 'g', 2, "40"},
+		{4, 'g', 2, "4"},
+		{.4, 'g', 2, "0.4"},
+		{.04, 'g', 2, "0.04"},
+		{.004, 'g', 2, "0.004"},
+		{.0004, 'g', 2, "0.0004"},
+		{.00004, 'g', 2, "4e-05"},
+		{.000004, 'g', 2, "4e-06"},
+
+		{0, 'e', 5, "0.00000e+00"},
+		{0, 'f', 5, "0.00000"},
+		{0, 'g', 5, "0"},
+		// {0, 'g', -1, "0"},
+
+		{-1, 'e', 5, "-1.00000e+00"},
+		{-1, 'f', 5, "-1.00000"},
+		{-1, 'g', 5, "-1"},
+		// {-1, 'g', -1, "-1"},
+
+		{12, 'e', 5, "1.20000e+01"},
+		{12, 'f', 5, "12.00000"},
+		{12, 'g', 5, "12"},
+		// {12, 'g', -1, "12"},
+
+		{123456700, 'e', 5, "1.23457e+08"},
+		{123456700, 'f', 5, "123456700.00000"},
+		{123456700, 'g', 5, "1.2346e+08"},
+		// {123456700, 'g', -1, "1.234567e+08"},
+
+		{1.2345e6, 'e', 5, "1.23450e+06"},
+		{1.2345e6, 'f', 5, "1234500.00000"},
+		{1.2345e6, 'g', 5, "1.2345e+06"},
+
+		{1e23, 'e', 17, "9.99999999999999916e+22"},
+		{1e23, 'f', 17, "99999999999999991611392.00000000000000000"},
+		{1e23, 'g', 17, "9.9999999999999992e+22"},
+
+		// {1e23, 'e', -1, "1e+23"},
+		// {1e23, 'f', -1, "100000000000000000000000"},
+		// {1e23, 'g', -1, "1e+23"},
+
+		{below1e23, 'e', 17, "9.99999999999999748e+22"},
+		{below1e23, 'f', 17, "99999999999999974834176.00000000000000000"},
+		{below1e23, 'g', 17, "9.9999999999999975e+22"},
+
+		// {below1e23, 'e', -1, "9.999999999999997e+22"},
+		// {below1e23, 'f', -1, "99999999999999970000000"},
+		// {below1e23, 'g', -1, "9.999999999999997e+22"},
+
+		{above1e23, 'e', 17, "1.00000000000000008e+23"},
+		{above1e23, 'f', 17, "100000000000000008388608.00000000000000000"},
+		// {above1e23, 'g', 17, "1.0000000000000001e+23"},
+
+		// {above1e23, 'e', -1, "1.0000000000000001e+23"},
+		// {above1e23, 'f', -1, "100000000000000010000000"},
+		// {above1e23, 'g', -1, "1.0000000000000001e+23"},
+
+		// {fdiv(5e-304, 1e20), 'g', -1, "5e-324"},
+		// {fdiv(-5e-304, 1e20), 'g', -1, "-5e-324"},
+
+		// {32, 'g', -1, "32"},
+		// {32, 'g', 0, "3e+01"},
+
+		// {100, 'x', -1, "%x"},
+
+		// {math.NaN(), 'g', -1, "NaN"},
+		// {-math.NaN(), 'g', -1, "NaN"},
+		{math.Inf(0), 'g', -1, "+Inf"},
+		{math.Inf(-1), 'g', -1, "-Inf"},
+		{-math.Inf(0), 'g', -1, "-Inf"},
+
+		{-1, 'b', -1, "-4503599627370496p-52"},
+
+		// fixed bugs
+		{0.9, 'f', 1, "0.9"},
+		{0.09, 'f', 1, "0.1"},
+		{0.0999, 'f', 1, "0.1"},
+		{0.05, 'f', 1, "0.1"},
+		{0.05, 'f', 0, "0"},
+		{0.5, 'f', 1, "0.5"},
+		{0.5, 'f', 0, "0"},
+		{1.5, 'f', 0, "2"},
+
+		// http://www.exploringbinary.com/java-hangs-when-converting-2-2250738585072012e-308/
+		// {2.2250738585072012e-308, 'g', -1, "2.2250738585072014e-308"},
+		// http://www.exploringbinary.com/php-hangs-on-numeric-value-2-2250738585072011e-308/
+		// {2.2250738585072011e-308, 'g', -1, "2.225073858507201e-308"},
+
+		// Issue 2625.
+		{383260575764816448, 'f', 0, "383260575764816448"},
+		// {383260575764816448, 'g', -1, "3.8326057576481645e+17"},
+	} {
+		f := new(Float).SetFloat64(test.x)
+		got := f.Format(test.format, test.prec)
+		if got != test.want {
+			t.Errorf("%v: got %s; want %s", test, got, test.want)
+		}
+
+		if test.format == 'b' && test.x == 0 {
+			continue // 'b' format in strconv.Float requires knowledge of bias for 0.0
+		}
+		if test.format == 'p' {
+			continue // 'p' format not supported in strconv.Format
+		}
+
+		// verify that Float format matches strconv format
+		want := strconv.FormatFloat(test.x, test.format, test.prec, 64)
+		if got != want {
+			t.Errorf("%v: got %s; want %s (strconv)", test, got, want)
+		}
+	}
+}
+
+func TestFloatFormat(t *testing.T) {
+	for _, test := range []struct {
+		x      string
+		prec   uint
+		format byte
+		digits int
+		want   string
+	}{
+		{"0", 10, 'f', 0, "0"},
+		{"-0", 10, 'f', 0, "-0"},
+		{"1", 10, 'f', 0, "1"},
+		{"-1", 10, 'f', 0, "-1"},
+
+		{"1.459", 100, 'e', 0, "1e+00"},
+		{"2.459", 100, 'e', 1, "2.5e+00"},
+		{"3.459", 100, 'e', 2, "3.46e+00"},
+		{"4.459", 100, 'e', 3, "4.459e+00"},
+		{"5.459", 100, 'e', 4, "5.4590e+00"},
+
+		{"1.459", 100, 'E', 0, "1E+00"},
+		{"2.459", 100, 'E', 1, "2.5E+00"},
+		{"3.459", 100, 'E', 2, "3.46E+00"},
+		{"4.459", 100, 'E', 3, "4.459E+00"},
+		{"5.459", 100, 'E', 4, "5.4590E+00"},
+
+		{"1.459", 100, 'f', 0, "1"},
+		{"2.459", 100, 'f', 1, "2.5"},
+		{"3.459", 100, 'f', 2, "3.46"},
+		{"4.459", 100, 'f', 3, "4.459"},
+		{"5.459", 100, 'f', 4, "5.4590"},
+
+		{"1.459", 100, 'g', 0, "1"},
+		{"2.459", 100, 'g', 1, "2"},
+		{"3.459", 100, 'g', 2, "3.5"},
+		{"4.459", 100, 'g', 3, "4.46"},
+		{"5.459", 100, 'g', 4, "5.459"},
+
+		{"1459", 53, 'g', 0, "1e+03"},
+		{"2459", 53, 'g', 1, "2e+03"},
+		{"3459", 53, 'g', 2, "3.5e+03"},
+		{"4459", 53, 'g', 3, "4.46e+03"},
+		{"5459", 53, 'g', 4, "5459"},
+
+		{"1459", 53, 'G', 0, "1E+03"},
+		{"2459", 53, 'G', 1, "2E+03"},
+		{"3459", 53, 'G', 2, "3.5E+03"},
+		{"4459", 53, 'G', 3, "4.46E+03"},
+		{"5459", 53, 'G', 4, "5459"},
+
+		{"3", 10, 'e', 40, "3.0000000000000000000000000000000000000000e+00"},
+		{"3", 10, 'f', 40, "3.0000000000000000000000000000000000000000"},
+		{"3", 10, 'g', 40, "3"},
+
+		{"3e40", 100, 'e', 40, "3.0000000000000000000000000000000000000000e+40"},
+		{"3e40", 100, 'f', 4, "30000000000000000000000000000000000000000.0000"},
+		{"3e40", 100, 'g', 40, "3e+40"},
+
+		// make sure "stupid" exponents don't stall the machine
+		{"1e1000000", 64, 'p', 0, "0x.88b3a28a05eade3ap3321929"},
+		{"1e1000000000", 64, 'p', 0, "0x.ecc5f45aa573d3p1538481529"},
+		{"1e-1000000", 64, 'p', 0, "0x.efb4542cc8ca418ap-3321928"},
+		{"1e-1000000000", 64, 'p', 0, "0x.8a64dd983a4c7dabp-1538481528"},
+
+		// TODO(gri) need tests for actual large Floats
+
+		{"0", 53, 'b', 0, "0"},
+		{"-0", 53, 'b', 0, "-0"},
+		{"1.0", 53, 'b', 0, "4503599627370496p-52"},
+		{"-1.0", 53, 'b', 0, "-4503599627370496p-52"},
+		{"4503599627370496", 53, 'b', 0, "4503599627370496p+0"},
+
+		// issue 9939
+		{"3", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
+		{"03", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
+		{"3.", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
+		{"3.0", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
+		{"3.00", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
+		{"3.000", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
+
+		{"3", 350, 'p', 0, "0x.cp2"},
+		{"03", 350, 'p', 0, "0x.cp2"},
+		{"3.", 350, 'p', 0, "0x.cp2"},
+		{"3.0", 350, 'p', 0, "0x.cp2"},
+		{"3.00", 350, 'p', 0, "0x.cp2"},
+		{"3.000", 350, 'p', 0, "0x.cp2"},
+
+		{"0", 64, 'p', 0, "0"},
+		{"-0", 64, 'p', 0, "-0"},
+		{"1024.0", 64, 'p', 0, "0x.8p11"},
+		{"-1024.0", 64, 'p', 0, "-0x.8p11"},
+
+		// unsupported format
+		{"3.14", 64, 'x', 0, "%x"},
+	} {
+		f, _, err := ParseFloat(test.x, 0, test.prec, ToNearestEven)
+		if err != nil {
+			t.Errorf("%v: %s", test, err)
+			continue
+		}
+
+		got := f.Format(test.format, test.digits)
+		if got != test.want {
+			t.Errorf("%v: got %s; want %s", test, got, test.want)
+		}
+
+		// compare with strconv.FormatFloat output if possible
+		// ('p' format is not supported by strconv.FormatFloat,
+		// and its output for 0.0 prints a biased exponent value
+		// as in 0p-1074 which makes no sense to emulate here)
+		if test.prec == 53 && test.format != 'p' && f.Sign() != 0 {
+			f64, acc := f.Float64()
+			if acc != Exact {
+				t.Errorf("%v: expected exact conversion to float64", test)
+				continue
+			}
+			got := strconv.FormatFloat(f64, test.format, test.digits, 64)
+			if got != test.want {
+				t.Errorf("%v: got %s; want %s", test, got, test.want)
+			}
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/big/floatexample_test.go b/src/cmd/compile/internal/big/floatexample_test.go
new file mode 100644
index 0000000..7db1023
--- /dev/null
+++ b/src/cmd/compile/internal/big/floatexample_test.go
@@ -0,0 +1,111 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big_test
+
+import (
+	"fmt"
+	"math"
+	"math/big"
+)
+
+func ExampleFloat_Add() {
+	// Operating on numbers of different precision.
+	var x, y, z big.Float
+	x.SetInt64(1000)          // x is automatically set to 64bit precision
+	y.SetFloat64(2.718281828) // y is automatically set to 53bit precision
+	z.SetPrec(32)
+	z.Add(&x, &y)
+	fmt.Printf("x = %s (%s, prec = %d, acc = %s)\n", &x, x.Format('p', 0), x.Prec(), x.Acc())
+	fmt.Printf("y = %s (%s, prec = %d, acc = %s)\n", &y, y.Format('p', 0), y.Prec(), y.Acc())
+	fmt.Printf("z = %s (%s, prec = %d, acc = %s)\n", &z, z.Format('p', 0), z.Prec(), z.Acc())
+	// Output:
+	// x = 1000 (0x.fap10, prec = 64, acc = Exact)
+	// y = 2.718281828 (0x.adf85458248cd8p2, prec = 53, acc = Exact)
+	// z = 1002.718282 (0x.faadf854p10, prec = 32, acc = Below)
+}
+
+func Example_Shift() {
+	// Implementing Float "shift" by modifying the (binary) exponents directly.
+	for s := -5; s <= 5; s++ {
+		x := big.NewFloat(0.5)
+		x.SetMantExp(x, x.MantExp(nil)+s) // shift x by s
+		fmt.Println(x)
+	}
+	// Output:
+	// 0.015625
+	// 0.03125
+	// 0.0625
+	// 0.125
+	// 0.25
+	// 0.5
+	// 1
+	// 2
+	// 4
+	// 8
+	// 16
+}
+
+func ExampleFloat_Cmp() {
+	inf := math.Inf(1)
+	zero := 0.0
+
+	operands := []float64{-inf, -1.2, -zero, 0, +1.2, +inf}
+
+	fmt.Println("   x     y  cmp")
+	fmt.Println("---------------")
+	for _, x64 := range operands {
+		x := big.NewFloat(x64)
+		for _, y64 := range operands {
+			y := big.NewFloat(y64)
+			fmt.Printf("%4s  %4s  %3d\n", x, y, x.Cmp(y))
+		}
+		fmt.Println()
+	}
+
+	// Output:
+	//    x     y  cmp
+	// ---------------
+	// -Inf  -Inf    0
+	// -Inf  -1.2   -1
+	// -Inf    -0   -1
+	// -Inf     0   -1
+	// -Inf   1.2   -1
+	// -Inf  +Inf   -1
+	//
+	// -1.2  -Inf    1
+	// -1.2  -1.2    0
+	// -1.2    -0   -1
+	// -1.2     0   -1
+	// -1.2   1.2   -1
+	// -1.2  +Inf   -1
+	//
+	//   -0  -Inf    1
+	//   -0  -1.2    1
+	//   -0    -0    0
+	//   -0     0    0
+	//   -0   1.2   -1
+	//   -0  +Inf   -1
+	//
+	//    0  -Inf    1
+	//    0  -1.2    1
+	//    0    -0    0
+	//    0     0    0
+	//    0   1.2   -1
+	//    0  +Inf   -1
+	//
+	//  1.2  -Inf    1
+	//  1.2  -1.2    1
+	//  1.2    -0    1
+	//  1.2     0    1
+	//  1.2   1.2    0
+	//  1.2  +Inf   -1
+	//
+	// +Inf  -Inf    1
+	// +Inf  -1.2    1
+	// +Inf    -0    1
+	// +Inf     0    1
+	// +Inf   1.2    1
+	// +Inf  +Inf    0
+}
diff --git a/src/cmd/compile/internal/big/ftoa.go b/src/cmd/compile/internal/big/ftoa.go
new file mode 100644
index 0000000..0a9edfd
--- /dev/null
+++ b/src/cmd/compile/internal/big/ftoa.go
@@ -0,0 +1,190 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the 'e', 'f', 'g' floating-point formats.
+// It is closely following the corresponding implementation in
+// strconv/ftoa.go, but modified and simplified for big.Float.
+
+// Algorithm:
+//   1) convert Float to multiprecision decimal
+//   2) round to desired precision
+//   3) read digits out and format
+
+package big
+
+import "strconv"
+
+// TODO(gri) Consider moving sign into decimal - could make the signatures below cleaner.
+
+// bigFtoa formats a float for the %e, %E, %f, %g, and %G formats.
+func (f *Float) bigFtoa(buf []byte, fmt byte, prec int) []byte {
+	if debugFloat && f.IsInf() {
+		panic("non-finite float")
+	}
+
+	// 1) convert Float to multiprecision decimal
+	var mant nat
+	if f.form == finite {
+		mant = f.mant
+	}
+	var d decimal
+	d.init(mant, int(f.exp)-f.mant.bitLen())
+
+	// 2) round to desired precision
+	shortest := false
+	if prec < 0 {
+		shortest = true
+		panic("unimplemented")
+		// TODO(gri) complete this
+		// roundShortest(&d, f.mant, int(f.exp))
+		// Precision for shortest representation mode.
+		switch fmt {
+		case 'e', 'E':
+			prec = len(d.mant) - 1
+		case 'f':
+			prec = max(len(d.mant)-d.exp, 0)
+		case 'g', 'G':
+			prec = len(d.mant)
+		}
+	} else {
+		// round appropriately
+		switch fmt {
+		case 'e', 'E':
+			// one digit before and number of digits after decimal point
+			d.round(1 + prec)
+		case 'f':
+			// number of digits before and after decimal point
+			d.round(d.exp + prec)
+		case 'g', 'G':
+			if prec == 0 {
+				prec = 1
+			}
+			d.round(prec)
+		}
+	}
+
+	// 3) read digits out and format
+	switch fmt {
+	case 'e', 'E':
+		return fmtE(buf, fmt, prec, f.neg, d)
+	case 'f':
+		return fmtF(buf, prec, f.neg, d)
+	case 'g', 'G':
+		// trim trailing fractional zeros in %e format
+		eprec := prec
+		if eprec > len(d.mant) && len(d.mant) >= d.exp {
+			eprec = len(d.mant)
+		}
+		// %e is used if the exponent from the conversion
+		// is less than -4 or greater than or equal to the precision.
+		// If precision was the shortest possible, use eprec = 6 for
+		// this decision.
+		if shortest {
+			eprec = 6
+		}
+		exp := d.exp - 1
+		if exp < -4 || exp >= eprec {
+			if prec > len(d.mant) {
+				prec = len(d.mant)
+			}
+			return fmtE(buf, fmt+'e'-'g', prec-1, f.neg, d)
+		}
+		if prec > d.exp {
+			prec = len(d.mant)
+		}
+		return fmtF(buf, max(prec-d.exp, 0), f.neg, d)
+	}
+
+	// unknown format
+	return append(buf, '%', fmt)
+}
+
+// %e: -d.ddddde±dd
+func fmtE(buf []byte, fmt byte, prec int, neg bool, d decimal) []byte {
+	// sign
+	if neg {
+		buf = append(buf, '-')
+	}
+
+	// first digit
+	ch := byte('0')
+	if len(d.mant) > 0 {
+		ch = d.mant[0]
+	}
+	buf = append(buf, ch)
+
+	// .moredigits
+	if prec > 0 {
+		buf = append(buf, '.')
+		i := 1
+		m := min(len(d.mant), prec+1)
+		if i < m {
+			buf = append(buf, d.mant[i:m]...)
+			i = m
+		}
+		for ; i <= prec; i++ {
+			buf = append(buf, '0')
+		}
+	}
+
+	// e±
+	buf = append(buf, fmt)
+	var exp int64
+	if len(d.mant) > 0 {
+		exp = int64(d.exp) - 1 // -1 because first digit was printed before '.'
+	}
+	if exp < 0 {
+		ch = '-'
+		exp = -exp
+	} else {
+		ch = '+'
+	}
+	buf = append(buf, ch)
+
+	// dd...d
+	if exp < 10 {
+		buf = append(buf, '0') // at least 2 exponent digits
+	}
+	return strconv.AppendInt(buf, exp, 10)
+}
+
+// %f: -ddddddd.ddddd
+func fmtF(buf []byte, prec int, neg bool, d decimal) []byte {
+	// sign
+	if neg {
+		buf = append(buf, '-')
+	}
+
+	// integer, padded with zeros as needed
+	if d.exp > 0 {
+		m := min(len(d.mant), d.exp)
+		buf = append(buf, d.mant[:m]...)
+		for ; m < d.exp; m++ {
+			buf = append(buf, '0')
+		}
+	} else {
+		buf = append(buf, '0')
+	}
+
+	// fraction
+	if prec > 0 {
+		buf = append(buf, '.')
+		for i := 0; i < prec; i++ {
+			ch := byte('0')
+			if j := d.exp + i; 0 <= j && j < len(d.mant) {
+				ch = d.mant[j]
+			}
+			buf = append(buf, ch)
+		}
+	}
+
+	return buf
+}
+
+func min(x, y int) int {
+	if x < y {
+		return x
+	}
+	return y
+}
diff --git a/src/cmd/compile/internal/big/gcd_test.go b/src/cmd/compile/internal/big/gcd_test.go
new file mode 100644
index 0000000..c0b9f58
--- /dev/null
+++ b/src/cmd/compile/internal/big/gcd_test.go
@@ -0,0 +1,47 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a GCD benchmark.
+// Usage: go test math/big -test.bench GCD
+
+package big
+
+import (
+	"math/rand"
+	"testing"
+)
+
+// randInt returns a pseudo-random Int in the range [1<<(size-1), (1<<size) - 1]
+func randInt(r *rand.Rand, size uint) *Int {
+	n := new(Int).Lsh(intOne, size-1)
+	x := new(Int).Rand(r, n)
+	return x.Add(x, n) // make sure result > 1<<(size-1)
+}
+
+func runGCD(b *testing.B, aSize, bSize uint) {
+	b.StopTimer()
+	var r = rand.New(rand.NewSource(1234))
+	aa := randInt(r, aSize)
+	bb := randInt(r, bSize)
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		new(Int).GCD(nil, nil, aa, bb)
+	}
+}
+
+func BenchmarkGCD10x10(b *testing.B)         { runGCD(b, 10, 10) }
+func BenchmarkGCD10x100(b *testing.B)        { runGCD(b, 10, 100) }
+func BenchmarkGCD10x1000(b *testing.B)       { runGCD(b, 10, 1000) }
+func BenchmarkGCD10x10000(b *testing.B)      { runGCD(b, 10, 10000) }
+func BenchmarkGCD10x100000(b *testing.B)     { runGCD(b, 10, 100000) }
+func BenchmarkGCD100x100(b *testing.B)       { runGCD(b, 100, 100) }
+func BenchmarkGCD100x1000(b *testing.B)      { runGCD(b, 100, 1000) }
+func BenchmarkGCD100x10000(b *testing.B)     { runGCD(b, 100, 10000) }
+func BenchmarkGCD100x100000(b *testing.B)    { runGCD(b, 100, 100000) }
+func BenchmarkGCD1000x1000(b *testing.B)     { runGCD(b, 1000, 1000) }
+func BenchmarkGCD1000x10000(b *testing.B)    { runGCD(b, 1000, 10000) }
+func BenchmarkGCD1000x100000(b *testing.B)   { runGCD(b, 1000, 100000) }
+func BenchmarkGCD10000x10000(b *testing.B)   { runGCD(b, 10000, 10000) }
+func BenchmarkGCD10000x100000(b *testing.B)  { runGCD(b, 10000, 100000) }
+func BenchmarkGCD100000x100000(b *testing.B) { runGCD(b, 100000, 100000) }
diff --git a/src/cmd/compile/internal/big/hilbert_test.go b/src/cmd/compile/internal/big/hilbert_test.go
new file mode 100644
index 0000000..1a84341
--- /dev/null
+++ b/src/cmd/compile/internal/big/hilbert_test.go
@@ -0,0 +1,160 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// A little test program and benchmark for rational arithmetics.
+// Computes a Hilbert matrix, its inverse, multiplies them
+// and verifies that the product is the identity matrix.
+
+package big
+
+import (
+	"fmt"
+	"testing"
+)
+
+type matrix struct {
+	n, m int
+	a    []*Rat
+}
+
+func (a *matrix) at(i, j int) *Rat {
+	if !(0 <= i && i < a.n && 0 <= j && j < a.m) {
+		panic("index out of range")
+	}
+	return a.a[i*a.m+j]
+}
+
+func (a *matrix) set(i, j int, x *Rat) {
+	if !(0 <= i && i < a.n && 0 <= j && j < a.m) {
+		panic("index out of range")
+	}
+	a.a[i*a.m+j] = x
+}
+
+func newMatrix(n, m int) *matrix {
+	if !(0 <= n && 0 <= m) {
+		panic("illegal matrix")
+	}
+	a := new(matrix)
+	a.n = n
+	a.m = m
+	a.a = make([]*Rat, n*m)
+	return a
+}
+
+func newUnit(n int) *matrix {
+	a := newMatrix(n, n)
+	for i := 0; i < n; i++ {
+		for j := 0; j < n; j++ {
+			x := NewRat(0, 1)
+			if i == j {
+				x.SetInt64(1)
+			}
+			a.set(i, j, x)
+		}
+	}
+	return a
+}
+
+func newHilbert(n int) *matrix {
+	a := newMatrix(n, n)
+	for i := 0; i < n; i++ {
+		for j := 0; j < n; j++ {
+			a.set(i, j, NewRat(1, int64(i+j+1)))
+		}
+	}
+	return a
+}
+
+func newInverseHilbert(n int) *matrix {
+	a := newMatrix(n, n)
+	for i := 0; i < n; i++ {
+		for j := 0; j < n; j++ {
+			x1 := new(Rat).SetInt64(int64(i + j + 1))
+			x2 := new(Rat).SetInt(new(Int).Binomial(int64(n+i), int64(n-j-1)))
+			x3 := new(Rat).SetInt(new(Int).Binomial(int64(n+j), int64(n-i-1)))
+			x4 := new(Rat).SetInt(new(Int).Binomial(int64(i+j), int64(i)))
+
+			x1.Mul(x1, x2)
+			x1.Mul(x1, x3)
+			x1.Mul(x1, x4)
+			x1.Mul(x1, x4)
+
+			if (i+j)&1 != 0 {
+				x1.Neg(x1)
+			}
+
+			a.set(i, j, x1)
+		}
+	}
+	return a
+}
+
+func (a *matrix) mul(b *matrix) *matrix {
+	if a.m != b.n {
+		panic("illegal matrix multiply")
+	}
+	c := newMatrix(a.n, b.m)
+	for i := 0; i < c.n; i++ {
+		for j := 0; j < c.m; j++ {
+			x := NewRat(0, 1)
+			for k := 0; k < a.m; k++ {
+				x.Add(x, new(Rat).Mul(a.at(i, k), b.at(k, j)))
+			}
+			c.set(i, j, x)
+		}
+	}
+	return c
+}
+
+func (a *matrix) eql(b *matrix) bool {
+	if a.n != b.n || a.m != b.m {
+		return false
+	}
+	for i := 0; i < a.n; i++ {
+		for j := 0; j < a.m; j++ {
+			if a.at(i, j).Cmp(b.at(i, j)) != 0 {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func (a *matrix) String() string {
+	s := ""
+	for i := 0; i < a.n; i++ {
+		for j := 0; j < a.m; j++ {
+			s += fmt.Sprintf("\t%s", a.at(i, j))
+		}
+		s += "\n"
+	}
+	return s
+}
+
+func doHilbert(t *testing.T, n int) {
+	a := newHilbert(n)
+	b := newInverseHilbert(n)
+	I := newUnit(n)
+	ab := a.mul(b)
+	if !ab.eql(I) {
+		if t == nil {
+			panic("Hilbert failed")
+		}
+		t.Errorf("a   = %s\n", a)
+		t.Errorf("b   = %s\n", b)
+		t.Errorf("a*b = %s\n", ab)
+		t.Errorf("I   = %s\n", I)
+	}
+}
+
+func TestHilbert(t *testing.T) {
+	doHilbert(t, 10)
+}
+
+func BenchmarkHilbert(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		doHilbert(nil, 10)
+	}
+}
diff --git a/src/cmd/compile/internal/big/int.go b/src/cmd/compile/internal/big/int.go
new file mode 100644
index 0000000..7b419bf
--- /dev/null
+++ b/src/cmd/compile/internal/big/int.go
@@ -0,0 +1,848 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements signed multi-precision integers.
+
+package big
+
+import (
+	"fmt"
+	"io"
+	"math/rand"
+	"strings"
+)
+
+// An Int represents a signed multi-precision integer.
+// The zero value for an Int represents the value 0.
+type Int struct {
+	neg bool // sign
+	abs nat  // absolute value of the integer
+}
+
+var intOne = &Int{false, natOne}
+
+// Sign returns:
+//
+//	-1 if x <  0
+//	 0 if x == 0
+//	+1 if x >  0
+//
+func (x *Int) Sign() int {
+	if len(x.abs) == 0 {
+		return 0
+	}
+	if x.neg {
+		return -1
+	}
+	return 1
+}
+
+// SetInt64 sets z to x and returns z.
+func (z *Int) SetInt64(x int64) *Int {
+	neg := false
+	if x < 0 {
+		neg = true
+		x = -x
+	}
+	z.abs = z.abs.setUint64(uint64(x))
+	z.neg = neg
+	return z
+}
+
+// SetUint64 sets z to x and returns z.
+func (z *Int) SetUint64(x uint64) *Int {
+	z.abs = z.abs.setUint64(x)
+	z.neg = false
+	return z
+}
+
+// NewInt allocates and returns a new Int set to x.
+func NewInt(x int64) *Int {
+	return new(Int).SetInt64(x)
+}
+
+// Set sets z to x and returns z.
+func (z *Int) Set(x *Int) *Int {
+	if z != x {
+		z.abs = z.abs.set(x.abs)
+		z.neg = x.neg
+	}
+	return z
+}
+
+// Bits provides raw (unchecked but fast) access to x by returning its
+// absolute value as a little-endian Word slice. The result and x share
+// the same underlying array.
+// Bits is intended to support implementation of missing low-level Int
+// functionality outside this package; it should be avoided otherwise.
+func (x *Int) Bits() []Word {
+	return x.abs
+}
+
+// SetBits provides raw (unchecked but fast) access to z by setting its
+// value to abs, interpreted as a little-endian Word slice, and returning
+// z. The result and abs share the same underlying array.
+// SetBits is intended to support implementation of missing low-level Int
+// functionality outside this package; it should be avoided otherwise.
+func (z *Int) SetBits(abs []Word) *Int {
+	z.abs = nat(abs).norm()
+	z.neg = false
+	return z
+}
+
+// Abs sets z to |x| (the absolute value of x) and returns z.
+func (z *Int) Abs(x *Int) *Int {
+	z.Set(x)
+	z.neg = false
+	return z
+}
+
+// Neg sets z to -x and returns z.
+func (z *Int) Neg(x *Int) *Int {
+	z.Set(x)
+	z.neg = len(z.abs) > 0 && !z.neg // 0 has no sign
+	return z
+}
+
+// Add sets z to the sum x+y and returns z.
+func (z *Int) Add(x, y *Int) *Int {
+	neg := x.neg
+	if x.neg == y.neg {
+		// x + y == x + y
+		// (-x) + (-y) == -(x + y)
+		z.abs = z.abs.add(x.abs, y.abs)
+	} else {
+		// x + (-y) == x - y == -(y - x)
+		// (-x) + y == y - x == -(x - y)
+		if x.abs.cmp(y.abs) >= 0 {
+			z.abs = z.abs.sub(x.abs, y.abs)
+		} else {
+			neg = !neg
+			z.abs = z.abs.sub(y.abs, x.abs)
+		}
+	}
+	z.neg = len(z.abs) > 0 && neg // 0 has no sign
+	return z
+}
+
+// Sub sets z to the difference x-y and returns z.
+func (z *Int) Sub(x, y *Int) *Int {
+	neg := x.neg
+	if x.neg != y.neg {
+		// x - (-y) == x + y
+		// (-x) - y == -(x + y)
+		z.abs = z.abs.add(x.abs, y.abs)
+	} else {
+		// x - y == x - y == -(y - x)
+		// (-x) - (-y) == y - x == -(x - y)
+		if x.abs.cmp(y.abs) >= 0 {
+			z.abs = z.abs.sub(x.abs, y.abs)
+		} else {
+			neg = !neg
+			z.abs = z.abs.sub(y.abs, x.abs)
+		}
+	}
+	z.neg = len(z.abs) > 0 && neg // 0 has no sign
+	return z
+}
+
+// Mul sets z to the product x*y and returns z.
+func (z *Int) Mul(x, y *Int) *Int {
+	// x * y == x * y
+	// x * (-y) == -(x * y)
+	// (-x) * y == -(x * y)
+	// (-x) * (-y) == x * y
+	z.abs = z.abs.mul(x.abs, y.abs)
+	z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
+	return z
+}
+
+// MulRange sets z to the product of all integers
+// in the range [a, b] inclusively and returns z.
+// If a > b (empty range), the result is 1.
+func (z *Int) MulRange(a, b int64) *Int {
+	switch {
+	case a > b:
+		return z.SetInt64(1) // empty range
+	case a <= 0 && b >= 0:
+		return z.SetInt64(0) // range includes 0
+	}
+	// a <= b && (b < 0 || a > 0)
+
+	neg := false
+	if a < 0 {
+		neg = (b-a)&1 == 0
+		a, b = -b, -a
+	}
+
+	z.abs = z.abs.mulRange(uint64(a), uint64(b))
+	z.neg = neg
+	return z
+}
+
+// Binomial sets z to the binomial coefficient of (n, k) and returns z.
+func (z *Int) Binomial(n, k int64) *Int {
+	// reduce the number of multiplications by reducing k
+	if n/2 < k && k <= n {
+		k = n - k // Binomial(n, k) == Binomial(n, n-k)
+	}
+	var a, b Int
+	a.MulRange(n-k+1, n)
+	b.MulRange(1, k)
+	return z.Quo(&a, &b)
+}
+
+// Quo sets z to the quotient x/y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Quo implements truncated division (like Go); see QuoRem for more details.
+func (z *Int) Quo(x, y *Int) *Int {
+	z.abs, _ = z.abs.div(nil, x.abs, y.abs)
+	z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
+	return z
+}
+
+// Rem sets z to the remainder x%y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Rem implements truncated modulus (like Go); see QuoRem for more details.
+func (z *Int) Rem(x, y *Int) *Int {
+	_, z.abs = nat(nil).div(z.abs, x.abs, y.abs)
+	z.neg = len(z.abs) > 0 && x.neg // 0 has no sign
+	return z
+}
+
+// QuoRem sets z to the quotient x/y and r to the remainder x%y
+// and returns the pair (z, r) for y != 0.
+// If y == 0, a division-by-zero run-time panic occurs.
+//
+// QuoRem implements T-division and modulus (like Go):
+//
+//	q = x/y      with the result truncated to zero
+//	r = x - y*q
+//
+// (See Daan Leijen, ``Division and Modulus for Computer Scientists''.)
+// See DivMod for Euclidean division and modulus (unlike Go).
+//
+func (z *Int) QuoRem(x, y, r *Int) (*Int, *Int) {
+	z.abs, r.abs = z.abs.div(r.abs, x.abs, y.abs)
+	z.neg, r.neg = len(z.abs) > 0 && x.neg != y.neg, len(r.abs) > 0 && x.neg // 0 has no sign
+	return z, r
+}
+
+// Div sets z to the quotient x/y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Div implements Euclidean division (unlike Go); see DivMod for more details.
+func (z *Int) Div(x, y *Int) *Int {
+	y_neg := y.neg // z may be an alias for y
+	var r Int
+	z.QuoRem(x, y, &r)
+	if r.neg {
+		if y_neg {
+			z.Add(z, intOne)
+		} else {
+			z.Sub(z, intOne)
+		}
+	}
+	return z
+}
+
+// Mod sets z to the modulus x%y for y != 0 and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+// Mod implements Euclidean modulus (unlike Go); see DivMod for more details.
+func (z *Int) Mod(x, y *Int) *Int {
+	y0 := y // save y
+	if z == y || alias(z.abs, y.abs) {
+		y0 = new(Int).Set(y)
+	}
+	var q Int
+	q.QuoRem(x, y, z)
+	if z.neg {
+		if y0.neg {
+			z.Sub(z, y0)
+		} else {
+			z.Add(z, y0)
+		}
+	}
+	return z
+}
+
+// DivMod sets z to the quotient x div y and m to the modulus x mod y
+// and returns the pair (z, m) for y != 0.
+// If y == 0, a division-by-zero run-time panic occurs.
+//
+// DivMod implements Euclidean division and modulus (unlike Go):
+//
+//	q = x div y  such that
+//	m = x - y*q  with 0 <= m < |q|
+//
+// (See Raymond T. Boute, ``The Euclidean definition of the functions
+// div and mod''. ACM Transactions on Programming Languages and
+// Systems (TOPLAS), 14(2):127-144, New York, NY, USA, 4/1992.
+// ACM press.)
+// See QuoRem for T-division and modulus (like Go).
+//
+func (z *Int) DivMod(x, y, m *Int) (*Int, *Int) {
+	y0 := y // save y
+	if z == y || alias(z.abs, y.abs) {
+		y0 = new(Int).Set(y)
+	}
+	z.QuoRem(x, y, m)
+	if m.neg {
+		if y0.neg {
+			z.Add(z, intOne)
+			m.Sub(m, y0)
+		} else {
+			z.Sub(z, intOne)
+			m.Add(m, y0)
+		}
+	}
+	return z, m
+}
+
+// Cmp compares x and y and returns:
+//
+//   -1 if x <  y
+//    0 if x == y
+//   +1 if x >  y
+//
+func (x *Int) Cmp(y *Int) (r int) {
+	// x cmp y == x cmp y
+	// x cmp (-y) == x
+	// (-x) cmp y == y
+	// (-x) cmp (-y) == -(x cmp y)
+	switch {
+	case x.neg == y.neg:
+		r = x.abs.cmp(y.abs)
+		if x.neg {
+			r = -r
+		}
+	case x.neg:
+		r = -1
+	default:
+		r = 1
+	}
+	return
+}
+
+// low32 returns the least significant 32 bits of z.
+func low32(z nat) uint32 {
+	if len(z) == 0 {
+		return 0
+	}
+	return uint32(z[0])
+}
+
+// low64 returns the least significant 64 bits of z.
+func low64(z nat) uint64 {
+	if len(z) == 0 {
+		return 0
+	}
+	v := uint64(z[0])
+	if _W == 32 && len(z) > 1 {
+		v |= uint64(z[1]) << 32
+	}
+	return v
+}
+
+// Int64 returns the int64 representation of x.
+// If x cannot be represented in an int64, the result is undefined.
+func (x *Int) Int64() int64 {
+	v := int64(low64(x.abs))
+	if x.neg {
+		v = -v
+	}
+	return v
+}
+
+// Uint64 returns the uint64 representation of x.
+// If x cannot be represented in a uint64, the result is undefined.
+func (x *Int) Uint64() uint64 {
+	return low64(x.abs)
+}
+
+// SetString sets z to the value of s, interpreted in the given base,
+// and returns z and a boolean indicating success. If SetString fails,
+// the value of z is undefined but the returned value is nil.
+//
+// The base argument must be 0 or a value between 2 and MaxBase. If the base
+// is 0, the string prefix determines the actual conversion base. A prefix of
+// ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
+// ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
+//
+func (z *Int) SetString(s string, base int) (*Int, bool) {
+	r := strings.NewReader(s)
+	_, _, err := z.scan(r, base)
+	if err != nil {
+		return nil, false
+	}
+	_, err = r.ReadByte()
+	if err != io.EOF {
+		return nil, false
+	}
+	return z, true // err == io.EOF => scan consumed all of s
+}
+
+// SetBytes interprets buf as the bytes of a big-endian unsigned
+// integer, sets z to that value, and returns z.
+func (z *Int) SetBytes(buf []byte) *Int {
+	z.abs = z.abs.setBytes(buf)
+	z.neg = false
+	return z
+}
+
+// Bytes returns the absolute value of x as a big-endian byte slice.
+func (x *Int) Bytes() []byte {
+	buf := make([]byte, len(x.abs)*_S)
+	return buf[x.abs.bytes(buf):]
+}
+
+// BitLen returns the length of the absolute value of x in bits.
+// The bit length of 0 is 0.
+func (x *Int) BitLen() int {
+	return x.abs.bitLen()
+}
+
+// Exp sets z = x**y mod |m| (i.e. the sign of m is ignored), and returns z.
+// If y <= 0, the result is 1 mod |m|; if m == nil or m == 0, z = x**y.
+// See Knuth, volume 2, section 4.6.3.
+func (z *Int) Exp(x, y, m *Int) *Int {
+	var yWords nat
+	if !y.neg {
+		yWords = y.abs
+	}
+	// y >= 0
+
+	var mWords nat
+	if m != nil {
+		mWords = m.abs // m.abs may be nil for m == 0
+	}
+
+	z.abs = z.abs.expNN(x.abs, yWords, mWords)
+	z.neg = len(z.abs) > 0 && x.neg && len(yWords) > 0 && yWords[0]&1 == 1 // 0 has no sign
+	if z.neg && len(mWords) > 0 {
+		// make modulus result positive
+		z.abs = z.abs.sub(mWords, z.abs) // z == x**y mod |m| && 0 <= z < |m|
+		z.neg = false
+	}
+
+	return z
+}
+
+// GCD sets z to the greatest common divisor of a and b, which both must
+// be > 0, and returns z.
+// If x and y are not nil, GCD sets x and y such that z = a*x + b*y.
+// If either a or b is <= 0, GCD sets z = x = y = 0.
+func (z *Int) GCD(x, y, a, b *Int) *Int {
+	if a.Sign() <= 0 || b.Sign() <= 0 {
+		z.SetInt64(0)
+		if x != nil {
+			x.SetInt64(0)
+		}
+		if y != nil {
+			y.SetInt64(0)
+		}
+		return z
+	}
+	if x == nil && y == nil {
+		return z.binaryGCD(a, b)
+	}
+
+	A := new(Int).Set(a)
+	B := new(Int).Set(b)
+
+	X := new(Int)
+	Y := new(Int).SetInt64(1)
+
+	lastX := new(Int).SetInt64(1)
+	lastY := new(Int)
+
+	q := new(Int)
+	temp := new(Int)
+
+	for len(B.abs) > 0 {
+		r := new(Int)
+		q, r = q.QuoRem(A, B, r)
+
+		A, B = B, r
+
+		temp.Set(X)
+		X.Mul(X, q)
+		X.neg = !X.neg
+		X.Add(X, lastX)
+		lastX.Set(temp)
+
+		temp.Set(Y)
+		Y.Mul(Y, q)
+		Y.neg = !Y.neg
+		Y.Add(Y, lastY)
+		lastY.Set(temp)
+	}
+
+	if x != nil {
+		*x = *lastX
+	}
+
+	if y != nil {
+		*y = *lastY
+	}
+
+	*z = *A
+	return z
+}
+
+// binaryGCD sets z to the greatest common divisor of a and b, which both must
+// be > 0, and returns z.
+// See Knuth, The Art of Computer Programming, Vol. 2, Section 4.5.2, Algorithm B.
+func (z *Int) binaryGCD(a, b *Int) *Int {
+	u := z
+	v := new(Int)
+
+	// use one Euclidean iteration to ensure that u and v are approx. the same size
+	switch {
+	case len(a.abs) > len(b.abs):
+		u.Set(b)
+		v.Rem(a, b)
+	case len(a.abs) < len(b.abs):
+		u.Set(a)
+		v.Rem(b, a)
+	default:
+		u.Set(a)
+		v.Set(b)
+	}
+
+	// v might be 0 now
+	if len(v.abs) == 0 {
+		return u
+	}
+	// u > 0 && v > 0
+
+	// determine largest k such that u = u' << k, v = v' << k
+	k := u.abs.trailingZeroBits()
+	if vk := v.abs.trailingZeroBits(); vk < k {
+		k = vk
+	}
+	u.Rsh(u, k)
+	v.Rsh(v, k)
+
+	// determine t (we know that u > 0)
+	t := new(Int)
+	if u.abs[0]&1 != 0 {
+		// u is odd
+		t.Neg(v)
+	} else {
+		t.Set(u)
+	}
+
+	for len(t.abs) > 0 {
+		// reduce t
+		t.Rsh(t, t.abs.trailingZeroBits())
+		if t.neg {
+			v, t = t, v
+			v.neg = len(v.abs) > 0 && !v.neg // 0 has no sign
+		} else {
+			u, t = t, u
+		}
+		t.Sub(u, v)
+	}
+
+	return z.Lsh(u, k)
+}
+
+// ProbablyPrime performs n Miller-Rabin tests to check whether x is prime.
+// If it returns true, x is prime with probability 1 - 1/4^n.
+// If it returns false, x is not prime. n must be > 0.
+func (x *Int) ProbablyPrime(n int) bool {
+	if n <= 0 {
+		panic("non-positive n for ProbablyPrime")
+	}
+	return !x.neg && x.abs.probablyPrime(n)
+}
+
+// Rand sets z to a pseudo-random number in [0, n) and returns z.
+func (z *Int) Rand(rnd *rand.Rand, n *Int) *Int {
+	z.neg = false
+	if n.neg == true || len(n.abs) == 0 {
+		z.abs = nil
+		return z
+	}
+	z.abs = z.abs.random(rnd, n.abs, n.abs.bitLen())
+	return z
+}
+
+// ModInverse sets z to the multiplicative inverse of g in the ring ℤ/nℤ
+// and returns z. If g and n are not relatively prime, the result is undefined.
+func (z *Int) ModInverse(g, n *Int) *Int {
+	var d Int
+	d.GCD(z, nil, g, n)
+	// x and y are such that g*x + n*y = d. Since g and n are
+	// relatively prime, d = 1. Taking that modulo n results in
+	// g*x = 1, therefore x is the inverse element.
+	if z.neg {
+		z.Add(z, n)
+	}
+	return z
+}
+
+// Lsh sets z = x << n and returns z.
+func (z *Int) Lsh(x *Int, n uint) *Int {
+	z.abs = z.abs.shl(x.abs, n)
+	z.neg = x.neg
+	return z
+}
+
+// Rsh sets z = x >> n and returns z.
+func (z *Int) Rsh(x *Int, n uint) *Int {
+	if x.neg {
+		// (-x) >> s == ^(x-1) >> s == ^((x-1) >> s) == -(((x-1) >> s) + 1)
+		t := z.abs.sub(x.abs, natOne) // no underflow because |x| > 0
+		t = t.shr(t, n)
+		z.abs = t.add(t, natOne)
+		z.neg = true // z cannot be zero if x is negative
+		return z
+	}
+
+	z.abs = z.abs.shr(x.abs, n)
+	z.neg = false
+	return z
+}
+
+// Bit returns the value of the i'th bit of x. That is, it
+// returns (x>>i)&1. The bit index i must be >= 0.
+func (x *Int) Bit(i int) uint {
+	if i == 0 {
+		// optimization for common case: odd/even test of x
+		if len(x.abs) > 0 {
+			return uint(x.abs[0] & 1) // bit 0 is same for -x
+		}
+		return 0
+	}
+	if i < 0 {
+		panic("negative bit index")
+	}
+	if x.neg {
+		t := nat(nil).sub(x.abs, natOne)
+		return t.bit(uint(i)) ^ 1
+	}
+
+	return x.abs.bit(uint(i))
+}
+
+// SetBit sets z to x, with x's i'th bit set to b (0 or 1).
+// That is, if b is 1 SetBit sets z = x | (1 << i);
+// if b is 0 SetBit sets z = x &^ (1 << i). If b is not 0 or 1,
+// SetBit will panic.
+func (z *Int) SetBit(x *Int, i int, b uint) *Int {
+	if i < 0 {
+		panic("negative bit index")
+	}
+	if x.neg {
+		t := z.abs.sub(x.abs, natOne)
+		t = t.setBit(t, uint(i), b^1)
+		z.abs = t.add(t, natOne)
+		z.neg = len(z.abs) > 0
+		return z
+	}
+	z.abs = z.abs.setBit(x.abs, uint(i), b)
+	z.neg = false
+	return z
+}
+
+// And sets z = x & y and returns z.
+func (z *Int) And(x, y *Int) *Int {
+	if x.neg == y.neg {
+		if x.neg {
+			// (-x) & (-y) == ^(x-1) & ^(y-1) == ^((x-1) | (y-1)) == -(((x-1) | (y-1)) + 1)
+			x1 := nat(nil).sub(x.abs, natOne)
+			y1 := nat(nil).sub(y.abs, natOne)
+			z.abs = z.abs.add(z.abs.or(x1, y1), natOne)
+			z.neg = true // z cannot be zero if x and y are negative
+			return z
+		}
+
+		// x & y == x & y
+		z.abs = z.abs.and(x.abs, y.abs)
+		z.neg = false
+		return z
+	}
+
+	// x.neg != y.neg
+	if x.neg {
+		x, y = y, x // & is symmetric
+	}
+
+	// x & (-y) == x & ^(y-1) == x &^ (y-1)
+	y1 := nat(nil).sub(y.abs, natOne)
+	z.abs = z.abs.andNot(x.abs, y1)
+	z.neg = false
+	return z
+}
+
+// AndNot sets z = x &^ y and returns z.
+func (z *Int) AndNot(x, y *Int) *Int {
+	if x.neg == y.neg {
+		if x.neg {
+			// (-x) &^ (-y) == ^(x-1) &^ ^(y-1) == ^(x-1) & (y-1) == (y-1) &^ (x-1)
+			x1 := nat(nil).sub(x.abs, natOne)
+			y1 := nat(nil).sub(y.abs, natOne)
+			z.abs = z.abs.andNot(y1, x1)
+			z.neg = false
+			return z
+		}
+
+		// x &^ y == x &^ y
+		z.abs = z.abs.andNot(x.abs, y.abs)
+		z.neg = false
+		return z
+	}
+
+	if x.neg {
+		// (-x) &^ y == ^(x-1) &^ y == ^(x-1) & ^y == ^((x-1) | y) == -(((x-1) | y) + 1)
+		x1 := nat(nil).sub(x.abs, natOne)
+		z.abs = z.abs.add(z.abs.or(x1, y.abs), natOne)
+		z.neg = true // z cannot be zero if x is negative and y is positive
+		return z
+	}
+
+	// x &^ (-y) == x &^ ^(y-1) == x & (y-1)
+	y1 := nat(nil).sub(y.abs, natOne)
+	z.abs = z.abs.and(x.abs, y1)
+	z.neg = false
+	return z
+}
+
+// Or sets z = x | y and returns z.
+func (z *Int) Or(x, y *Int) *Int {
+	if x.neg == y.neg {
+		if x.neg {
+			// (-x) | (-y) == ^(x-1) | ^(y-1) == ^((x-1) & (y-1)) == -(((x-1) & (y-1)) + 1)
+			x1 := nat(nil).sub(x.abs, natOne)
+			y1 := nat(nil).sub(y.abs, natOne)
+			z.abs = z.abs.add(z.abs.and(x1, y1), natOne)
+			z.neg = true // z cannot be zero if x and y are negative
+			return z
+		}
+
+		// x | y == x | y
+		z.abs = z.abs.or(x.abs, y.abs)
+		z.neg = false
+		return z
+	}
+
+	// x.neg != y.neg
+	if x.neg {
+		x, y = y, x // | is symmetric
+	}
+
+	// x | (-y) == x | ^(y-1) == ^((y-1) &^ x) == -(^((y-1) &^ x) + 1)
+	y1 := nat(nil).sub(y.abs, natOne)
+	z.abs = z.abs.add(z.abs.andNot(y1, x.abs), natOne)
+	z.neg = true // z cannot be zero if one of x or y is negative
+	return z
+}
+
+// Xor sets z = x ^ y and returns z.
+func (z *Int) Xor(x, y *Int) *Int {
+	if x.neg == y.neg {
+		if x.neg {
+			// (-x) ^ (-y) == ^(x-1) ^ ^(y-1) == (x-1) ^ (y-1)
+			x1 := nat(nil).sub(x.abs, natOne)
+			y1 := nat(nil).sub(y.abs, natOne)
+			z.abs = z.abs.xor(x1, y1)
+			z.neg = false
+			return z
+		}
+
+		// x ^ y == x ^ y
+		z.abs = z.abs.xor(x.abs, y.abs)
+		z.neg = false
+		return z
+	}
+
+	// x.neg != y.neg
+	if x.neg {
+		x, y = y, x // ^ is symmetric
+	}
+
+	// x ^ (-y) == x ^ ^(y-1) == ^(x ^ (y-1)) == -((x ^ (y-1)) + 1)
+	y1 := nat(nil).sub(y.abs, natOne)
+	z.abs = z.abs.add(z.abs.xor(x.abs, y1), natOne)
+	z.neg = true // z cannot be zero if only one of x or y is negative
+	return z
+}
+
+// Not sets z = ^x and returns z.
+func (z *Int) Not(x *Int) *Int {
+	if x.neg {
+		// ^(-x) == ^(^(x-1)) == x-1
+		z.abs = z.abs.sub(x.abs, natOne)
+		z.neg = false
+		return z
+	}
+
+	// ^x == -x-1 == -(x+1)
+	z.abs = z.abs.add(x.abs, natOne)
+	z.neg = true // z cannot be zero if x is positive
+	return z
+}
+
+// Gob codec version. Permits backward-compatible changes to the encoding.
+const intGobVersion byte = 1
+
+// GobEncode implements the gob.GobEncoder interface.
+func (x *Int) GobEncode() ([]byte, error) {
+	if x == nil {
+		return nil, nil
+	}
+	buf := make([]byte, 1+len(x.abs)*_S) // extra byte for version and sign bit
+	i := x.abs.bytes(buf) - 1            // i >= 0
+	b := intGobVersion << 1              // make space for sign bit
+	if x.neg {
+		b |= 1
+	}
+	buf[i] = b
+	return buf[i:], nil
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (z *Int) GobDecode(buf []byte) error {
+	if len(buf) == 0 {
+		// Other side sent a nil or default value.
+		*z = Int{}
+		return nil
+	}
+	b := buf[0]
+	if b>>1 != intGobVersion {
+		return fmt.Errorf("Int.GobDecode: encoding version %d not supported", b>>1)
+	}
+	z.neg = b&1 != 0
+	z.abs = z.abs.setBytes(buf[1:])
+	return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (z *Int) MarshalJSON() ([]byte, error) {
+	// TODO(gri): get rid of the []byte/string conversions
+	return []byte(z.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (z *Int) UnmarshalJSON(text []byte) error {
+	// TODO(gri): get rid of the []byte/string conversions
+	if _, ok := z.SetString(string(text), 0); !ok {
+		return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Int", text)
+	}
+	return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (z *Int) MarshalText() (text []byte, err error) {
+	return []byte(z.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (z *Int) UnmarshalText(text []byte) error {
+	if _, ok := z.SetString(string(text), 0); !ok {
+		return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Int", text)
+	}
+	return nil
+}
diff --git a/src/cmd/compile/internal/big/int_test.go b/src/cmd/compile/internal/big/int_test.go
new file mode 100644
index 0000000..a972a72
--- /dev/null
+++ b/src/cmd/compile/internal/big/int_test.go
@@ -0,0 +1,1387 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+	"bytes"
+	"encoding/gob"
+	"encoding/hex"
+	"encoding/json"
+	"encoding/xml"
+	"fmt"
+	"math/rand"
+	"testing"
+	"testing/quick"
+)
+
+func isNormalized(x *Int) bool {
+	if len(x.abs) == 0 {
+		return !x.neg
+	}
+	// len(x.abs) > 0
+	return x.abs[len(x.abs)-1] != 0
+}
+
+type funZZ func(z, x, y *Int) *Int
+type argZZ struct {
+	z, x, y *Int
+}
+
+var sumZZ = []argZZ{
+	{NewInt(0), NewInt(0), NewInt(0)},
+	{NewInt(1), NewInt(1), NewInt(0)},
+	{NewInt(1111111110), NewInt(123456789), NewInt(987654321)},
+	{NewInt(-1), NewInt(-1), NewInt(0)},
+	{NewInt(864197532), NewInt(-123456789), NewInt(987654321)},
+	{NewInt(-1111111110), NewInt(-123456789), NewInt(-987654321)},
+}
+
+var prodZZ = []argZZ{
+	{NewInt(0), NewInt(0), NewInt(0)},
+	{NewInt(0), NewInt(1), NewInt(0)},
+	{NewInt(1), NewInt(1), NewInt(1)},
+	{NewInt(-991 * 991), NewInt(991), NewInt(-991)},
+	// TODO(gri) add larger products
+}
+
+func TestSignZ(t *testing.T) {
+	var zero Int
+	for _, a := range sumZZ {
+		s := a.z.Sign()
+		e := a.z.Cmp(&zero)
+		if s != e {
+			t.Errorf("got %d; want %d for z = %v", s, e, a.z)
+		}
+	}
+}
+
+func TestSetZ(t *testing.T) {
+	for _, a := range sumZZ {
+		var z Int
+		z.Set(a.z)
+		if !isNormalized(&z) {
+			t.Errorf("%v is not normalized", z)
+		}
+		if (&z).Cmp(a.z) != 0 {
+			t.Errorf("got z = %v; want %v", z, a.z)
+		}
+	}
+}
+
+func TestAbsZ(t *testing.T) {
+	var zero Int
+	for _, a := range sumZZ {
+		var z Int
+		z.Abs(a.z)
+		var e Int
+		e.Set(a.z)
+		if e.Cmp(&zero) < 0 {
+			e.Sub(&zero, &e)
+		}
+		if z.Cmp(&e) != 0 {
+			t.Errorf("got z = %v; want %v", z, e)
+		}
+	}
+}
+
+func testFunZZ(t *testing.T, msg string, f funZZ, a argZZ) {
+	var z Int
+	f(&z, a.x, a.y)
+	if !isNormalized(&z) {
+		t.Errorf("%s%v is not normalized", msg, z)
+	}
+	if (&z).Cmp(a.z) != 0 {
+		t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, &z, a.z)
+	}
+}
+
+func TestSumZZ(t *testing.T) {
+	AddZZ := func(z, x, y *Int) *Int { return z.Add(x, y) }
+	SubZZ := func(z, x, y *Int) *Int { return z.Sub(x, y) }
+	for _, a := range sumZZ {
+		arg := a
+		testFunZZ(t, "AddZZ", AddZZ, arg)
+
+		arg = argZZ{a.z, a.y, a.x}
+		testFunZZ(t, "AddZZ symmetric", AddZZ, arg)
+
+		arg = argZZ{a.x, a.z, a.y}
+		testFunZZ(t, "SubZZ", SubZZ, arg)
+
+		arg = argZZ{a.y, a.z, a.x}
+		testFunZZ(t, "SubZZ symmetric", SubZZ, arg)
+	}
+}
+
+func TestProdZZ(t *testing.T) {
+	MulZZ := func(z, x, y *Int) *Int { return z.Mul(x, y) }
+	for _, a := range prodZZ {
+		arg := a
+		testFunZZ(t, "MulZZ", MulZZ, arg)
+
+		arg = argZZ{a.z, a.y, a.x}
+		testFunZZ(t, "MulZZ symmetric", MulZZ, arg)
+	}
+}
+
+// mulBytes returns x*y via grade school multiplication. Both inputs
+// and the result are assumed to be in big-endian representation (to
+// match the semantics of Int.Bytes and Int.SetBytes).
+func mulBytes(x, y []byte) []byte {
+	z := make([]byte, len(x)+len(y))
+
+	// multiply
+	k0 := len(z) - 1
+	for j := len(y) - 1; j >= 0; j-- {
+		d := int(y[j])
+		if d != 0 {
+			k := k0
+			carry := 0
+			for i := len(x) - 1; i >= 0; i-- {
+				t := int(z[k]) + int(x[i])*d + carry
+				z[k], carry = byte(t), t>>8
+				k--
+			}
+			z[k] = byte(carry)
+		}
+		k0--
+	}
+
+	// normalize (remove leading 0's)
+	i := 0
+	for i < len(z) && z[i] == 0 {
+		i++
+	}
+
+	return z[i:]
+}
+
+func checkMul(a, b []byte) bool {
+	var x, y, z1 Int
+	x.SetBytes(a)
+	y.SetBytes(b)
+	z1.Mul(&x, &y)
+
+	var z2 Int
+	z2.SetBytes(mulBytes(a, b))
+
+	return z1.Cmp(&z2) == 0
+}
+
+func TestMul(t *testing.T) {
+	if err := quick.Check(checkMul, nil); err != nil {
+		t.Error(err)
+	}
+}
+
+var mulRangesZ = []struct {
+	a, b int64
+	prod string
+}{
+	// entirely positive ranges are covered by mulRangesN
+	{-1, 1, "0"},
+	{-2, -1, "2"},
+	{-3, -2, "6"},
+	{-3, -1, "-6"},
+	{1, 3, "6"},
+	{-10, -10, "-10"},
+	{0, -1, "1"},                      // empty range
+	{-1, -100, "1"},                   // empty range
+	{-1, 1, "0"},                      // range includes 0
+	{-1e9, 0, "0"},                    // range includes 0
+	{-1e9, 1e9, "0"},                  // range includes 0
+	{-10, -1, "3628800"},              // 10!
+	{-20, -2, "-2432902008176640000"}, // -20!
+	{-99, -1,
+		"-933262154439441526816992388562667004907159682643816214685929" +
+			"638952175999932299156089414639761565182862536979208272237582" +
+			"511852109168640000000000000000000000", // -99!
+	},
+}
+
+func TestMulRangeZ(t *testing.T) {
+	var tmp Int
+	// test entirely positive ranges
+	for i, r := range mulRangesN {
+		prod := tmp.MulRange(int64(r.a), int64(r.b)).String()
+		if prod != r.prod {
+			t.Errorf("#%da: got %s; want %s", i, prod, r.prod)
+		}
+	}
+	// test other ranges
+	for i, r := range mulRangesZ {
+		prod := tmp.MulRange(r.a, r.b).String()
+		if prod != r.prod {
+			t.Errorf("#%db: got %s; want %s", i, prod, r.prod)
+		}
+	}
+}
+
+func TestBinomial(t *testing.T) {
+	var z Int
+	for _, test := range []struct {
+		n, k int64
+		want string
+	}{
+		{0, 0, "1"},
+		{0, 1, "0"},
+		{1, 0, "1"},
+		{1, 1, "1"},
+		{1, 10, "0"},
+		{4, 0, "1"},
+		{4, 1, "4"},
+		{4, 2, "6"},
+		{4, 3, "4"},
+		{4, 4, "1"},
+		{10, 1, "10"},
+		{10, 9, "10"},
+		{10, 5, "252"},
+		{11, 5, "462"},
+		{11, 6, "462"},
+		{100, 10, "17310309456440"},
+		{100, 90, "17310309456440"},
+		{1000, 10, "263409560461970212832400"},
+		{1000, 990, "263409560461970212832400"},
+	} {
+		if got := z.Binomial(test.n, test.k).String(); got != test.want {
+			t.Errorf("Binomial(%d, %d) = %s; want %s", test.n, test.k, got, test.want)
+		}
+	}
+}
+
+func BenchmarkBinomial(b *testing.B) {
+	var z Int
+	for i := b.N - 1; i >= 0; i-- {
+		z.Binomial(1000, 990)
+	}
+}
+
+// Examples from the Go Language Spec, section "Arithmetic operators"
+var divisionSignsTests = []struct {
+	x, y int64
+	q, r int64 // T-division
+	d, m int64 // Euclidian division
+}{
+	{5, 3, 1, 2, 1, 2},
+	{-5, 3, -1, -2, -2, 1},
+	{5, -3, -1, 2, -1, 2},
+	{-5, -3, 1, -2, 2, 1},
+	{1, 2, 0, 1, 0, 1},
+	{8, 4, 2, 0, 2, 0},
+}
+
+func TestDivisionSigns(t *testing.T) {
+	for i, test := range divisionSignsTests {
+		x := NewInt(test.x)
+		y := NewInt(test.y)
+		q := NewInt(test.q)
+		r := NewInt(test.r)
+		d := NewInt(test.d)
+		m := NewInt(test.m)
+
+		q1 := new(Int).Quo(x, y)
+		r1 := new(Int).Rem(x, y)
+		if !isNormalized(q1) {
+			t.Errorf("#%d Quo: %v is not normalized", i, *q1)
+		}
+		if !isNormalized(r1) {
+			t.Errorf("#%d Rem: %v is not normalized", i, *r1)
+		}
+		if q1.Cmp(q) != 0 || r1.Cmp(r) != 0 {
+			t.Errorf("#%d QuoRem: got (%s, %s), want (%s, %s)", i, q1, r1, q, r)
+		}
+
+		q2, r2 := new(Int).QuoRem(x, y, new(Int))
+		if !isNormalized(q2) {
+			t.Errorf("#%d Quo: %v is not normalized", i, *q2)
+		}
+		if !isNormalized(r2) {
+			t.Errorf("#%d Rem: %v is not normalized", i, *r2)
+		}
+		if q2.Cmp(q) != 0 || r2.Cmp(r) != 0 {
+			t.Errorf("#%d QuoRem: got (%s, %s), want (%s, %s)", i, q2, r2, q, r)
+		}
+
+		d1 := new(Int).Div(x, y)
+		m1 := new(Int).Mod(x, y)
+		if !isNormalized(d1) {
+			t.Errorf("#%d Div: %v is not normalized", i, *d1)
+		}
+		if !isNormalized(m1) {
+			t.Errorf("#%d Mod: %v is not normalized", i, *m1)
+		}
+		if d1.Cmp(d) != 0 || m1.Cmp(m) != 0 {
+			t.Errorf("#%d DivMod: got (%s, %s), want (%s, %s)", i, d1, m1, d, m)
+		}
+
+		d2, m2 := new(Int).DivMod(x, y, new(Int))
+		if !isNormalized(d2) {
+			t.Errorf("#%d Div: %v is not normalized", i, *d2)
+		}
+		if !isNormalized(m2) {
+			t.Errorf("#%d Mod: %v is not normalized", i, *m2)
+		}
+		if d2.Cmp(d) != 0 || m2.Cmp(m) != 0 {
+			t.Errorf("#%d DivMod: got (%s, %s), want (%s, %s)", i, d2, m2, d, m)
+		}
+	}
+}
+
+func norm(x nat) nat {
+	i := len(x)
+	for i > 0 && x[i-1] == 0 {
+		i--
+	}
+	return x[:i]
+}
+
+func TestBits(t *testing.T) {
+	for _, test := range []nat{
+		nil,
+		{0},
+		{1},
+		{0, 1, 2, 3, 4},
+		{4, 3, 2, 1, 0},
+		{4, 3, 2, 1, 0, 0, 0, 0},
+	} {
+		var z Int
+		z.neg = true
+		got := z.SetBits(test)
+		want := norm(test)
+		if got.abs.cmp(want) != 0 {
+			t.Errorf("SetBits(%v) = %v; want %v", test, got.abs, want)
+		}
+
+		if got.neg {
+			t.Errorf("SetBits(%v): got negative result", test)
+		}
+
+		bits := nat(z.Bits())
+		if bits.cmp(want) != 0 {
+			t.Errorf("%v.Bits() = %v; want %v", z.abs, bits, want)
+		}
+	}
+}
+
+func checkSetBytes(b []byte) bool {
+	hex1 := hex.EncodeToString(new(Int).SetBytes(b).Bytes())
+	hex2 := hex.EncodeToString(b)
+
+	for len(hex1) < len(hex2) {
+		hex1 = "0" + hex1
+	}
+
+	for len(hex1) > len(hex2) {
+		hex2 = "0" + hex2
+	}
+
+	return hex1 == hex2
+}
+
+func TestSetBytes(t *testing.T) {
+	if err := quick.Check(checkSetBytes, nil); err != nil {
+		t.Error(err)
+	}
+}
+
+func checkBytes(b []byte) bool {
+	b2 := new(Int).SetBytes(b).Bytes()
+	return bytes.Equal(b, b2)
+}
+
+func TestBytes(t *testing.T) {
+	if err := quick.Check(checkBytes, nil); err != nil {
+		t.Error(err)
+	}
+}
+
+func checkQuo(x, y []byte) bool {
+	u := new(Int).SetBytes(x)
+	v := new(Int).SetBytes(y)
+
+	if len(v.abs) == 0 {
+		return true
+	}
+
+	r := new(Int)
+	q, r := new(Int).QuoRem(u, v, r)
+
+	if r.Cmp(v) >= 0 {
+		return false
+	}
+
+	uprime := new(Int).Set(q)
+	uprime.Mul(uprime, v)
+	uprime.Add(uprime, r)
+
+	return uprime.Cmp(u) == 0
+}
+
+var quoTests = []struct {
+	x, y string
+	q, r string
+}{
+	{
+		"476217953993950760840509444250624797097991362735329973741718102894495832294430498335824897858659711275234906400899559094370964723884706254265559534144986498357",
+		"9353930466774385905609975137998169297361893554149986716853295022578535724979483772383667534691121982974895531435241089241440253066816724367338287092081996",
+		"50911",
+		"1",
+	},
+	{
+		"11510768301994997771168",
+		"1328165573307167369775",
+		"8",
+		"885443715537658812968",
+	},
+}
+
+func TestQuo(t *testing.T) {
+	if err := quick.Check(checkQuo, nil); err != nil {
+		t.Error(err)
+	}
+
+	for i, test := range quoTests {
+		x, _ := new(Int).SetString(test.x, 10)
+		y, _ := new(Int).SetString(test.y, 10)
+		expectedQ, _ := new(Int).SetString(test.q, 10)
+		expectedR, _ := new(Int).SetString(test.r, 10)
+
+		r := new(Int)
+		q, r := new(Int).QuoRem(x, y, r)
+
+		if q.Cmp(expectedQ) != 0 || r.Cmp(expectedR) != 0 {
+			t.Errorf("#%d got (%s, %s) want (%s, %s)", i, q, r, expectedQ, expectedR)
+		}
+	}
+}
+
+func TestQuoStepD6(t *testing.T) {
+	// See Knuth, Volume 2, section 4.3.1, exercise 21. This code exercises
+	// a code path which only triggers 1 in 10^{-19} cases.
+
+	u := &Int{false, nat{0, 0, 1 + 1<<(_W-1), _M ^ (1 << (_W - 1))}}
+	v := &Int{false, nat{5, 2 + 1<<(_W-1), 1 << (_W - 1)}}
+
+	r := new(Int)
+	q, r := new(Int).QuoRem(u, v, r)
+	const expectedQ64 = "18446744073709551613"
+	const expectedR64 = "3138550867693340382088035895064302439801311770021610913807"
+	const expectedQ32 = "4294967293"
+	const expectedR32 = "39614081266355540837921718287"
+	if q.String() != expectedQ64 && q.String() != expectedQ32 ||
+		r.String() != expectedR64 && r.String() != expectedR32 {
+		t.Errorf("got (%s, %s) want (%s, %s) or (%s, %s)", q, r, expectedQ64, expectedR64, expectedQ32, expectedR32)
+	}
+}
+
+var bitLenTests = []struct {
+	in  string
+	out int
+}{
+	{"-1", 1},
+	{"0", 0},
+	{"1", 1},
+	{"2", 2},
+	{"4", 3},
+	{"0xabc", 12},
+	{"0x8000", 16},
+	{"0x80000000", 32},
+	{"0x800000000000", 48},
+	{"0x8000000000000000", 64},
+	{"0x80000000000000000000", 80},
+	{"-0x4000000000000000000000", 87},
+}
+
+func TestBitLen(t *testing.T) {
+	for i, test := range bitLenTests {
+		x, ok := new(Int).SetString(test.in, 0)
+		if !ok {
+			t.Errorf("#%d test input invalid: %s", i, test.in)
+			continue
+		}
+
+		if n := x.BitLen(); n != test.out {
+			t.Errorf("#%d got %d want %d", i, n, test.out)
+		}
+	}
+}
+
+var expTests = []struct {
+	x, y, m string
+	out     string
+}{
+	// y <= 0
+	{"0", "0", "", "1"},
+	{"1", "0", "", "1"},
+	{"-10", "0", "", "1"},
+	{"1234", "-1", "", "1"},
+
+	// m == 1
+	{"0", "0", "1", "0"},
+	{"1", "0", "1", "0"},
+	{"-10", "0", "1", "0"},
+	{"1234", "-1", "1", "0"},
+
+	// misc
+	{"5", "-7", "", "1"},
+	{"-5", "-7", "", "1"},
+	{"5", "0", "", "1"},
+	{"-5", "0", "", "1"},
+	{"5", "1", "", "5"},
+	{"-5", "1", "", "-5"},
+	{"-5", "1", "7", "2"},
+	{"-2", "3", "2", "0"},
+	{"5", "2", "", "25"},
+	{"1", "65537", "2", "1"},
+	{"0x8000000000000000", "2", "", "0x40000000000000000000000000000000"},
+	{"0x8000000000000000", "2", "6719", "4944"},
+	{"0x8000000000000000", "3", "6719", "5447"},
+	{"0x8000000000000000", "1000", "6719", "1603"},
+	{"0x8000000000000000", "1000000", "6719", "3199"},
+	{"0x8000000000000000", "-1000000", "6719", "1"},
+	{
+		"2938462938472983472983659726349017249287491026512746239764525612965293865296239471239874193284792387498274256129746192347",
+		"298472983472983471903246121093472394872319615612417471234712061",
+		"29834729834729834729347290846729561262544958723956495615629569234729836259263598127342374289365912465901365498236492183464",
+		"23537740700184054162508175125554701713153216681790245129157191391322321508055833908509185839069455749219131480588829346291",
+	},
+	// test case for issue 8822
+	{
+		"-0x1BCE04427D8032319A89E5C4136456671AC620883F2C4139E57F91307C485AD2D6204F4F87A58262652DB5DBBAC72B0613E51B835E7153BEC6068F5C8D696B74DBD18FEC316AEF73985CF0475663208EB46B4F17DD9DA55367B03323E5491A70997B90C059FB34809E6EE55BCFBD5F2F52233BFE62E6AA9E4E26A1D4C2439883D14F2633D55D8AA66A1ACD5595E778AC3A280517F1157989E70C1A437B849F1877B779CC3CDDEDE2DAA6594A6C66D181A00A5F777EE60596D8773998F6E988DEAE4CCA60E4DDCF9590543C89F74F603259FCAD71660D30294FBBE6490300F78A9D63FA660DC9417B8B9DDA28BEB3977B621B988E23D4D954F322C3540541BC649ABD504C50FADFD9F0987D58A2BF689313A285E773FF02899A6EF887D1D4A0D2",
+		"0xB08FFB20760FFED58FADA86DFEF71AD72AA0FA763219618FE022C197E54708BB1191C66470250FCE8879487507CEE41381CA4D932F81C2B3F1AB20B539D50DCD",
+		"0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73",
+		"21484252197776302499639938883777710321993113097987201050501182909581359357618579566746556372589385361683610524730509041328855066514963385522570894839035884713051640171474186548713546686476761306436434146475140156284389181808675016576845833340494848283681088886584219750554408060556769486628029028720727393293111678826356480455433909233520504112074401376133077150471237549474149190242010469539006449596611576612573955754349042329130631128234637924786466585703488460540228477440853493392086251021228087076124706778899179648655221663765993962724699135217212118535057766739392069738618682722216712319320435674779146070442",
+	},
+}
+
+func TestExp(t *testing.T) {
+	for i, test := range expTests {
+		x, ok1 := new(Int).SetString(test.x, 0)
+		y, ok2 := new(Int).SetString(test.y, 0)
+		out, ok3 := new(Int).SetString(test.out, 0)
+
+		var ok4 bool
+		var m *Int
+
+		if len(test.m) == 0 {
+			m, ok4 = nil, true
+		} else {
+			m, ok4 = new(Int).SetString(test.m, 0)
+		}
+
+		if !ok1 || !ok2 || !ok3 || !ok4 {
+			t.Errorf("#%d: error in input", i)
+			continue
+		}
+
+		z1 := new(Int).Exp(x, y, m)
+		if !isNormalized(z1) {
+			t.Errorf("#%d: %v is not normalized", i, *z1)
+		}
+		if z1.Cmp(out) != 0 {
+			t.Errorf("#%d: got %s want %s", i, z1, out)
+		}
+
+		if m == nil {
+			// The result should be the same as for m == 0;
+			// specifically, there should be no div-zero panic.
+			m = &Int{abs: nat{}} // m != nil && len(m.abs) == 0
+			z2 := new(Int).Exp(x, y, m)
+			if z2.Cmp(z1) != 0 {
+				t.Errorf("#%d: got %s want %s", i, z2, z1)
+			}
+		}
+	}
+}
+
+func checkGcd(aBytes, bBytes []byte) bool {
+	x := new(Int)
+	y := new(Int)
+	a := new(Int).SetBytes(aBytes)
+	b := new(Int).SetBytes(bBytes)
+
+	d := new(Int).GCD(x, y, a, b)
+	x.Mul(x, a)
+	y.Mul(y, b)
+	x.Add(x, y)
+
+	return x.Cmp(d) == 0
+}
+
+var gcdTests = []struct {
+	d, x, y, a, b string
+}{
+	// a <= 0 || b <= 0
+	{"0", "0", "0", "0", "0"},
+	{"0", "0", "0", "0", "7"},
+	{"0", "0", "0", "11", "0"},
+	{"0", "0", "0", "-77", "35"},
+	{"0", "0", "0", "64515", "-24310"},
+	{"0", "0", "0", "-64515", "-24310"},
+
+	{"1", "-9", "47", "120", "23"},
+	{"7", "1", "-2", "77", "35"},
+	{"935", "-3", "8", "64515", "24310"},
+	{"935000000000000000", "-3", "8", "64515000000000000000", "24310000000000000000"},
+	{"1", "-221", "22059940471369027483332068679400581064239780177629666810348940098015901108344", "98920366548084643601728869055592650835572950932266967461790948584315647051443", "991"},
+
+	// test early exit (after one Euclidean iteration) in binaryGCD
+	{"1", "", "", "1", "98920366548084643601728869055592650835572950932266967461790948584315647051443"},
+}
+
+func testGcd(t *testing.T, d, x, y, a, b *Int) {
+	var X *Int
+	if x != nil {
+		X = new(Int)
+	}
+	var Y *Int
+	if y != nil {
+		Y = new(Int)
+	}
+
+	D := new(Int).GCD(X, Y, a, b)
+	if D.Cmp(d) != 0 {
+		t.Errorf("GCD(%s, %s): got d = %s, want %s", a, b, D, d)
+	}
+	if x != nil && X.Cmp(x) != 0 {
+		t.Errorf("GCD(%s, %s): got x = %s, want %s", a, b, X, x)
+	}
+	if y != nil && Y.Cmp(y) != 0 {
+		t.Errorf("GCD(%s, %s): got y = %s, want %s", a, b, Y, y)
+	}
+
+	// binaryGCD requires a > 0 && b > 0
+	if a.Sign() <= 0 || b.Sign() <= 0 {
+		return
+	}
+
+	D.binaryGCD(a, b)
+	if D.Cmp(d) != 0 {
+		t.Errorf("binaryGcd(%s, %s): got d = %s, want %s", a, b, D, d)
+	}
+}
+
+func TestGcd(t *testing.T) {
+	for _, test := range gcdTests {
+		d, _ := new(Int).SetString(test.d, 0)
+		x, _ := new(Int).SetString(test.x, 0)
+		y, _ := new(Int).SetString(test.y, 0)
+		a, _ := new(Int).SetString(test.a, 0)
+		b, _ := new(Int).SetString(test.b, 0)
+
+		testGcd(t, d, nil, nil, a, b)
+		testGcd(t, d, x, nil, a, b)
+		testGcd(t, d, nil, y, a, b)
+		testGcd(t, d, x, y, a, b)
+	}
+
+	quick.Check(checkGcd, nil)
+}
+
+var primes = []string{
+	"2",
+	"3",
+	"5",
+	"7",
+	"11",
+
+	"13756265695458089029",
+	"13496181268022124907",
+	"10953742525620032441",
+	"17908251027575790097",
+
+	// http://golang.org/issue/638
+	"18699199384836356663",
+
+	"98920366548084643601728869055592650835572950932266967461790948584315647051443",
+	"94560208308847015747498523884063394671606671904944666360068158221458669711639",
+
+	// http://primes.utm.edu/lists/small/small3.html
+	"449417999055441493994709297093108513015373787049558499205492347871729927573118262811508386655998299074566974373711472560655026288668094291699357843464363003144674940345912431129144354948751003607115263071543163",
+	"230975859993204150666423538988557839555560243929065415434980904258310530753006723857139742334640122533598517597674807096648905501653461687601339782814316124971547968912893214002992086353183070342498989426570593",
+	"5521712099665906221540423207019333379125265462121169655563495403888449493493629943498064604536961775110765377745550377067893607246020694972959780839151452457728855382113555867743022746090187341871655890805971735385789993",
+	"203956878356401977405765866929034577280193993314348263094772646453283062722701277632936616063144088173312372882677123879538709400158306567338328279154499698366071906766440037074217117805690872792848149112022286332144876183376326512083574821647933992961249917319836219304274280243803104015000563790123",
+}
+
+var composites = []string{
+	"0",
+	"1",
+	"21284175091214687912771199898307297748211672914763848041968395774954376176754",
+	"6084766654921918907427900243509372380954290099172559290432744450051395395951",
+	"84594350493221918389213352992032324280367711247940675652888030554255915464401",
+	"82793403787388584738507275144194252681",
+}
+
+func TestProbablyPrime(t *testing.T) {
+	nreps := 20
+	if testing.Short() {
+		nreps = 1
+	}
+	for i, s := range primes {
+		p, _ := new(Int).SetString(s, 10)
+		if !p.ProbablyPrime(nreps) {
+			t.Errorf("#%d prime found to be non-prime (%s)", i, s)
+		}
+	}
+
+	for i, s := range composites {
+		c, _ := new(Int).SetString(s, 10)
+		if c.ProbablyPrime(nreps) {
+			t.Errorf("#%d composite found to be prime (%s)", i, s)
+		}
+		if testing.Short() {
+			break
+		}
+	}
+
+	// check that ProbablyPrime panics if n <= 0
+	c := NewInt(11) // a prime
+	for _, n := range []int{-1, 0, 1} {
+		func() {
+			defer func() {
+				if n <= 0 && recover() == nil {
+					t.Fatalf("expected panic from ProbablyPrime(%d)", n)
+				}
+			}()
+			if !c.ProbablyPrime(n) {
+				t.Fatalf("%v should be a prime", c)
+			}
+		}()
+	}
+}
+
+type intShiftTest struct {
+	in    string
+	shift uint
+	out   string
+}
+
+var rshTests = []intShiftTest{
+	{"0", 0, "0"},
+	{"-0", 0, "0"},
+	{"0", 1, "0"},
+	{"0", 2, "0"},
+	{"1", 0, "1"},
+	{"1", 1, "0"},
+	{"1", 2, "0"},
+	{"2", 0, "2"},
+	{"2", 1, "1"},
+	{"-1", 0, "-1"},
+	{"-1", 1, "-1"},
+	{"-1", 10, "-1"},
+	{"-100", 2, "-25"},
+	{"-100", 3, "-13"},
+	{"-100", 100, "-1"},
+	{"4294967296", 0, "4294967296"},
+	{"4294967296", 1, "2147483648"},
+	{"4294967296", 2, "1073741824"},
+	{"18446744073709551616", 0, "18446744073709551616"},
+	{"18446744073709551616", 1, "9223372036854775808"},
+	{"18446744073709551616", 2, "4611686018427387904"},
+	{"18446744073709551616", 64, "1"},
+	{"340282366920938463463374607431768211456", 64, "18446744073709551616"},
+	{"340282366920938463463374607431768211456", 128, "1"},
+}
+
+func TestRsh(t *testing.T) {
+	for i, test := range rshTests {
+		in, _ := new(Int).SetString(test.in, 10)
+		expected, _ := new(Int).SetString(test.out, 10)
+		out := new(Int).Rsh(in, test.shift)
+
+		if !isNormalized(out) {
+			t.Errorf("#%d: %v is not normalized", i, *out)
+		}
+		if out.Cmp(expected) != 0 {
+			t.Errorf("#%d: got %s want %s", i, out, expected)
+		}
+	}
+}
+
+func TestRshSelf(t *testing.T) {
+	for i, test := range rshTests {
+		z, _ := new(Int).SetString(test.in, 10)
+		expected, _ := new(Int).SetString(test.out, 10)
+		z.Rsh(z, test.shift)
+
+		if !isNormalized(z) {
+			t.Errorf("#%d: %v is not normalized", i, *z)
+		}
+		if z.Cmp(expected) != 0 {
+			t.Errorf("#%d: got %s want %s", i, z, expected)
+		}
+	}
+}
+
+var lshTests = []intShiftTest{
+	{"0", 0, "0"},
+	{"0", 1, "0"},
+	{"0", 2, "0"},
+	{"1", 0, "1"},
+	{"1", 1, "2"},
+	{"1", 2, "4"},
+	{"2", 0, "2"},
+	{"2", 1, "4"},
+	{"2", 2, "8"},
+	{"-87", 1, "-174"},
+	{"4294967296", 0, "4294967296"},
+	{"4294967296", 1, "8589934592"},
+	{"4294967296", 2, "17179869184"},
+	{"18446744073709551616", 0, "18446744073709551616"},
+	{"9223372036854775808", 1, "18446744073709551616"},
+	{"4611686018427387904", 2, "18446744073709551616"},
+	{"1", 64, "18446744073709551616"},
+	{"18446744073709551616", 64, "340282366920938463463374607431768211456"},
+	{"1", 128, "340282366920938463463374607431768211456"},
+}
+
+func TestLsh(t *testing.T) {
+	for i, test := range lshTests {
+		in, _ := new(Int).SetString(test.in, 10)
+		expected, _ := new(Int).SetString(test.out, 10)
+		out := new(Int).Lsh(in, test.shift)
+
+		if !isNormalized(out) {
+			t.Errorf("#%d: %v is not normalized", i, *out)
+		}
+		if out.Cmp(expected) != 0 {
+			t.Errorf("#%d: got %s want %s", i, out, expected)
+		}
+	}
+}
+
+func TestLshSelf(t *testing.T) {
+	for i, test := range lshTests {
+		z, _ := new(Int).SetString(test.in, 10)
+		expected, _ := new(Int).SetString(test.out, 10)
+		z.Lsh(z, test.shift)
+
+		if !isNormalized(z) {
+			t.Errorf("#%d: %v is not normalized", i, *z)
+		}
+		if z.Cmp(expected) != 0 {
+			t.Errorf("#%d: got %s want %s", i, z, expected)
+		}
+	}
+}
+
+func TestLshRsh(t *testing.T) {
+	for i, test := range rshTests {
+		in, _ := new(Int).SetString(test.in, 10)
+		out := new(Int).Lsh(in, test.shift)
+		out = out.Rsh(out, test.shift)
+
+		if !isNormalized(out) {
+			t.Errorf("#%d: %v is not normalized", i, *out)
+		}
+		if in.Cmp(out) != 0 {
+			t.Errorf("#%d: got %s want %s", i, out, in)
+		}
+	}
+	for i, test := range lshTests {
+		in, _ := new(Int).SetString(test.in, 10)
+		out := new(Int).Lsh(in, test.shift)
+		out.Rsh(out, test.shift)
+
+		if !isNormalized(out) {
+			t.Errorf("#%d: %v is not normalized", i, *out)
+		}
+		if in.Cmp(out) != 0 {
+			t.Errorf("#%d: got %s want %s", i, out, in)
+		}
+	}
+}
+
+var int64Tests = []int64{
+	0,
+	1,
+	-1,
+	4294967295,
+	-4294967295,
+	4294967296,
+	-4294967296,
+	9223372036854775807,
+	-9223372036854775807,
+	-9223372036854775808,
+}
+
+func TestInt64(t *testing.T) {
+	for i, testVal := range int64Tests {
+		in := NewInt(testVal)
+		out := in.Int64()
+
+		if out != testVal {
+			t.Errorf("#%d got %d want %d", i, out, testVal)
+		}
+	}
+}
+
+var uint64Tests = []uint64{
+	0,
+	1,
+	4294967295,
+	4294967296,
+	8589934591,
+	8589934592,
+	9223372036854775807,
+	9223372036854775808,
+	18446744073709551615, // 1<<64 - 1
+}
+
+func TestUint64(t *testing.T) {
+	in := new(Int)
+	for i, testVal := range uint64Tests {
+		in.SetUint64(testVal)
+		out := in.Uint64()
+
+		if out != testVal {
+			t.Errorf("#%d got %d want %d", i, out, testVal)
+		}
+
+		str := fmt.Sprint(testVal)
+		strOut := in.String()
+		if strOut != str {
+			t.Errorf("#%d.String got %s want %s", i, strOut, str)
+		}
+	}
+}
+
+var bitwiseTests = []struct {
+	x, y                 string
+	and, or, xor, andNot string
+}{
+	{"0x00", "0x00", "0x00", "0x00", "0x00", "0x00"},
+	{"0x00", "0x01", "0x00", "0x01", "0x01", "0x00"},
+	{"0x01", "0x00", "0x00", "0x01", "0x01", "0x01"},
+	{"-0x01", "0x00", "0x00", "-0x01", "-0x01", "-0x01"},
+	{"-0xaf", "-0x50", "-0xf0", "-0x0f", "0xe1", "0x41"},
+	{"0x00", "-0x01", "0x00", "-0x01", "-0x01", "0x00"},
+	{"0x01", "0x01", "0x01", "0x01", "0x00", "0x00"},
+	{"-0x01", "-0x01", "-0x01", "-0x01", "0x00", "0x00"},
+	{"0x07", "0x08", "0x00", "0x0f", "0x0f", "0x07"},
+	{"0x05", "0x0f", "0x05", "0x0f", "0x0a", "0x00"},
+	{"0xff", "-0x0a", "0xf6", "-0x01", "-0xf7", "0x09"},
+	{"0x013ff6", "0x9a4e", "0x1a46", "0x01bffe", "0x01a5b8", "0x0125b0"},
+	{"-0x013ff6", "0x9a4e", "0x800a", "-0x0125b2", "-0x01a5bc", "-0x01c000"},
+	{"-0x013ff6", "-0x9a4e", "-0x01bffe", "-0x1a46", "0x01a5b8", "0x8008"},
+	{
+		"0x1000009dc6e3d9822cba04129bcbe3401",
+		"0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
+		"0x1000001186210100001000009048c2001",
+		"0xb9bd7d543685789d57cb918e8bfeff7fddb2ebe87dfbbdfe35fd",
+		"0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fc",
+		"0x8c40c2d8822caa04120b8321400",
+	},
+	{
+		"0x1000009dc6e3d9822cba04129bcbe3401",
+		"-0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
+		"0x8c40c2d8822caa04120b8321401",
+		"-0xb9bd7d543685789d57ca918e82229142459020483cd2014001fd",
+		"-0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fe",
+		"0x1000001186210100001000009048c2000",
+	},
+	{
+		"-0x1000009dc6e3d9822cba04129bcbe3401",
+		"-0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
+		"-0xb9bd7d543685789d57cb918e8bfeff7fddb2ebe87dfbbdfe35fd",
+		"-0x1000001186210100001000009048c2001",
+		"0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fc",
+		"0xb9bd7d543685789d57ca918e82229142459020483cd2014001fc",
+	},
+}
+
+type bitFun func(z, x, y *Int) *Int
+
+func testBitFun(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
+	expected := new(Int)
+	expected.SetString(exp, 0)
+
+	out := f(new(Int), x, y)
+	if out.Cmp(expected) != 0 {
+		t.Errorf("%s: got %s want %s", msg, out, expected)
+	}
+}
+
+func testBitFunSelf(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
+	self := new(Int)
+	self.Set(x)
+	expected := new(Int)
+	expected.SetString(exp, 0)
+
+	self = f(self, self, y)
+	if self.Cmp(expected) != 0 {
+		t.Errorf("%s: got %s want %s", msg, self, expected)
+	}
+}
+
+func altBit(x *Int, i int) uint {
+	z := new(Int).Rsh(x, uint(i))
+	z = z.And(z, NewInt(1))
+	if z.Cmp(new(Int)) != 0 {
+		return 1
+	}
+	return 0
+}
+
+func altSetBit(z *Int, x *Int, i int, b uint) *Int {
+	one := NewInt(1)
+	m := one.Lsh(one, uint(i))
+	switch b {
+	case 1:
+		return z.Or(x, m)
+	case 0:
+		return z.AndNot(x, m)
+	}
+	panic("set bit is not 0 or 1")
+}
+
+func testBitset(t *testing.T, x *Int) {
+	n := x.BitLen()
+	z := new(Int).Set(x)
+	z1 := new(Int).Set(x)
+	for i := 0; i < n+10; i++ {
+		old := z.Bit(i)
+		old1 := altBit(z1, i)
+		if old != old1 {
+			t.Errorf("bitset: inconsistent value for Bit(%s, %d), got %v want %v", z1, i, old, old1)
+		}
+		z := new(Int).SetBit(z, i, 1)
+		z1 := altSetBit(new(Int), z1, i, 1)
+		if z.Bit(i) == 0 {
+			t.Errorf("bitset: bit %d of %s got 0 want 1", i, x)
+		}
+		if z.Cmp(z1) != 0 {
+			t.Errorf("bitset: inconsistent value after SetBit 1, got %s want %s", z, z1)
+		}
+		z.SetBit(z, i, 0)
+		altSetBit(z1, z1, i, 0)
+		if z.Bit(i) != 0 {
+			t.Errorf("bitset: bit %d of %s got 1 want 0", i, x)
+		}
+		if z.Cmp(z1) != 0 {
+			t.Errorf("bitset: inconsistent value after SetBit 0, got %s want %s", z, z1)
+		}
+		altSetBit(z1, z1, i, old)
+		z.SetBit(z, i, old)
+		if z.Cmp(z1) != 0 {
+			t.Errorf("bitset: inconsistent value after SetBit old, got %s want %s", z, z1)
+		}
+	}
+	if z.Cmp(x) != 0 {
+		t.Errorf("bitset: got %s want %s", z, x)
+	}
+}
+
+var bitsetTests = []struct {
+	x string
+	i int
+	b uint
+}{
+	{"0", 0, 0},
+	{"0", 200, 0},
+	{"1", 0, 1},
+	{"1", 1, 0},
+	{"-1", 0, 1},
+	{"-1", 200, 1},
+	{"0x2000000000000000000000000000", 108, 0},
+	{"0x2000000000000000000000000000", 109, 1},
+	{"0x2000000000000000000000000000", 110, 0},
+	{"-0x2000000000000000000000000001", 108, 1},
+	{"-0x2000000000000000000000000001", 109, 0},
+	{"-0x2000000000000000000000000001", 110, 1},
+}
+
+func TestBitSet(t *testing.T) {
+	for _, test := range bitwiseTests {
+		x := new(Int)
+		x.SetString(test.x, 0)
+		testBitset(t, x)
+		x = new(Int)
+		x.SetString(test.y, 0)
+		testBitset(t, x)
+	}
+	for i, test := range bitsetTests {
+		x := new(Int)
+		x.SetString(test.x, 0)
+		b := x.Bit(test.i)
+		if b != test.b {
+			t.Errorf("#%d got %v want %v", i, b, test.b)
+		}
+	}
+	z := NewInt(1)
+	z.SetBit(NewInt(0), 2, 1)
+	if z.Cmp(NewInt(4)) != 0 {
+		t.Errorf("destination leaked into result; got %s want 4", z)
+	}
+}
+
+func BenchmarkBitset(b *testing.B) {
+	z := new(Int)
+	z.SetBit(z, 512, 1)
+	b.ResetTimer()
+	b.StartTimer()
+	for i := b.N - 1; i >= 0; i-- {
+		z.SetBit(z, i&512, 1)
+	}
+}
+
+func BenchmarkBitsetNeg(b *testing.B) {
+	z := NewInt(-1)
+	z.SetBit(z, 512, 0)
+	b.ResetTimer()
+	b.StartTimer()
+	for i := b.N - 1; i >= 0; i-- {
+		z.SetBit(z, i&512, 0)
+	}
+}
+
+func BenchmarkBitsetOrig(b *testing.B) {
+	z := new(Int)
+	altSetBit(z, z, 512, 1)
+	b.ResetTimer()
+	b.StartTimer()
+	for i := b.N - 1; i >= 0; i-- {
+		altSetBit(z, z, i&512, 1)
+	}
+}
+
+func BenchmarkBitsetNegOrig(b *testing.B) {
+	z := NewInt(-1)
+	altSetBit(z, z, 512, 0)
+	b.ResetTimer()
+	b.StartTimer()
+	for i := b.N - 1; i >= 0; i-- {
+		altSetBit(z, z, i&512, 0)
+	}
+}
+
+func TestBitwise(t *testing.T) {
+	x := new(Int)
+	y := new(Int)
+	for _, test := range bitwiseTests {
+		x.SetString(test.x, 0)
+		y.SetString(test.y, 0)
+
+		testBitFun(t, "and", (*Int).And, x, y, test.and)
+		testBitFunSelf(t, "and", (*Int).And, x, y, test.and)
+		testBitFun(t, "andNot", (*Int).AndNot, x, y, test.andNot)
+		testBitFunSelf(t, "andNot", (*Int).AndNot, x, y, test.andNot)
+		testBitFun(t, "or", (*Int).Or, x, y, test.or)
+		testBitFunSelf(t, "or", (*Int).Or, x, y, test.or)
+		testBitFun(t, "xor", (*Int).Xor, x, y, test.xor)
+		testBitFunSelf(t, "xor", (*Int).Xor, x, y, test.xor)
+	}
+}
+
+var notTests = []struct {
+	in  string
+	out string
+}{
+	{"0", "-1"},
+	{"1", "-2"},
+	{"7", "-8"},
+	{"0", "-1"},
+	{"-81910", "81909"},
+	{
+		"298472983472983471903246121093472394872319615612417471234712061",
+		"-298472983472983471903246121093472394872319615612417471234712062",
+	},
+}
+
+func TestNot(t *testing.T) {
+	in := new(Int)
+	out := new(Int)
+	expected := new(Int)
+	for i, test := range notTests {
+		in.SetString(test.in, 10)
+		expected.SetString(test.out, 10)
+		out = out.Not(in)
+		if out.Cmp(expected) != 0 {
+			t.Errorf("#%d: got %s want %s", i, out, expected)
+		}
+		out = out.Not(out)
+		if out.Cmp(in) != 0 {
+			t.Errorf("#%d: got %s want %s", i, out, in)
+		}
+	}
+}
+
+var modInverseTests = []struct {
+	element string
+	modulus string
+}{
+	{"1234567", "458948883992"},
+	{"239487239847", "2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919"},
+}
+
+func TestModInverse(t *testing.T) {
+	var element, modulus, gcd, inverse Int
+	one := NewInt(1)
+	for i, test := range modInverseTests {
+		(&element).SetString(test.element, 10)
+		(&modulus).SetString(test.modulus, 10)
+		(&inverse).ModInverse(&element, &modulus)
+		(&inverse).Mul(&inverse, &element)
+		(&inverse).Mod(&inverse, &modulus)
+		if (&inverse).Cmp(one) != 0 {
+			t.Errorf("#%d: failed (e·e^(-1)=%s)", i, &inverse)
+		}
+	}
+	// exhaustive test for small values
+	for n := 2; n < 100; n++ {
+		(&modulus).SetInt64(int64(n))
+		for x := 1; x < n; x++ {
+			(&element).SetInt64(int64(x))
+			(&gcd).GCD(nil, nil, &element, &modulus)
+			if (&gcd).Cmp(one) != 0 {
+				continue
+			}
+			(&inverse).ModInverse(&element, &modulus)
+			(&inverse).Mul(&inverse, &element)
+			(&inverse).Mod(&inverse, &modulus)
+			if (&inverse).Cmp(one) != 0 {
+				t.Errorf("ModInverse(%d,%d)*%d%%%d=%d, not 1", &element, &modulus, &element, &modulus, &inverse)
+			}
+		}
+	}
+}
+
+var encodingTests = []string{
+	"-539345864568634858364538753846587364875430589374589",
+	"-678645873",
+	"-100",
+	"-2",
+	"-1",
+	"0",
+	"1",
+	"2",
+	"10",
+	"42",
+	"1234567890",
+	"298472983472983471903246121093472394872319615612417471234712061",
+}
+
+func TestIntGobEncoding(t *testing.T) {
+	var medium bytes.Buffer
+	enc := gob.NewEncoder(&medium)
+	dec := gob.NewDecoder(&medium)
+	for _, test := range encodingTests {
+		medium.Reset() // empty buffer for each test case (in case of failures)
+		var tx Int
+		tx.SetString(test, 10)
+		if err := enc.Encode(&tx); err != nil {
+			t.Errorf("encoding of %s failed: %s", &tx, err)
+		}
+		var rx Int
+		if err := dec.Decode(&rx); err != nil {
+			t.Errorf("decoding of %s failed: %s", &tx, err)
+		}
+		if rx.Cmp(&tx) != 0 {
+			t.Errorf("transmission of %s failed: got %s want %s", &tx, &rx, &tx)
+		}
+	}
+}
+
+// Sending a nil Int pointer (inside a slice) on a round trip through gob should yield a zero.
+// TODO: top-level nils.
+func TestGobEncodingNilIntInSlice(t *testing.T) {
+	buf := new(bytes.Buffer)
+	enc := gob.NewEncoder(buf)
+	dec := gob.NewDecoder(buf)
+
+	var in = make([]*Int, 1)
+	err := enc.Encode(&in)
+	if err != nil {
+		t.Errorf("gob encode failed: %q", err)
+	}
+	var out []*Int
+	err = dec.Decode(&out)
+	if err != nil {
+		t.Fatalf("gob decode failed: %q", err)
+	}
+	if len(out) != 1 {
+		t.Fatalf("wrong len; want 1 got %d", len(out))
+	}
+	var zero Int
+	if out[0].Cmp(&zero) != 0 {
+		t.Errorf("transmission of (*Int)(nill) failed: got %s want 0", out)
+	}
+}
+
+func TestIntJSONEncoding(t *testing.T) {
+	for _, test := range encodingTests {
+		var tx Int
+		tx.SetString(test, 10)
+		b, err := json.Marshal(&tx)
+		if err != nil {
+			t.Errorf("marshaling of %s failed: %s", &tx, err)
+		}
+		var rx Int
+		if err := json.Unmarshal(b, &rx); err != nil {
+			t.Errorf("unmarshaling of %s failed: %s", &tx, err)
+		}
+		if rx.Cmp(&tx) != 0 {
+			t.Errorf("JSON encoding of %s failed: got %s want %s", &tx, &rx, &tx)
+		}
+	}
+}
+
+var intVals = []string{
+	"-141592653589793238462643383279502884197169399375105820974944592307816406286",
+	"-1415926535897932384626433832795028841971",
+	"-141592653589793",
+	"-1",
+	"0",
+	"1",
+	"141592653589793",
+	"1415926535897932384626433832795028841971",
+	"141592653589793238462643383279502884197169399375105820974944592307816406286",
+}
+
+func TestIntJSONEncodingTextMarshaller(t *testing.T) {
+	for _, num := range intVals {
+		var tx Int
+		tx.SetString(num, 0)
+		b, err := json.Marshal(&tx)
+		if err != nil {
+			t.Errorf("marshaling of %s failed: %s", &tx, err)
+			continue
+		}
+		var rx Int
+		if err := json.Unmarshal(b, &rx); err != nil {
+			t.Errorf("unmarshaling of %s failed: %s", &tx, err)
+			continue
+		}
+		if rx.Cmp(&tx) != 0 {
+			t.Errorf("JSON encoding of %s failed: got %s want %s", &tx, &rx, &tx)
+		}
+	}
+}
+
+func TestIntXMLEncodingTextMarshaller(t *testing.T) {
+	for _, num := range intVals {
+		var tx Int
+		tx.SetString(num, 0)
+		b, err := xml.Marshal(&tx)
+		if err != nil {
+			t.Errorf("marshaling of %s failed: %s", &tx, err)
+			continue
+		}
+		var rx Int
+		if err := xml.Unmarshal(b, &rx); err != nil {
+			t.Errorf("unmarshaling of %s failed: %s", &tx, err)
+			continue
+		}
+		if rx.Cmp(&tx) != 0 {
+			t.Errorf("XML encoding of %s failed: got %s want %s", &tx, &rx, &tx)
+		}
+	}
+}
+
+func TestIssue2607(t *testing.T) {
+	// This code sequence used to hang.
+	n := NewInt(10)
+	n.Rand(rand.New(rand.NewSource(9)), n)
+}
diff --git a/src/cmd/compile/internal/big/intconv.go b/src/cmd/compile/internal/big/intconv.go
new file mode 100644
index 0000000..9c68a22
--- /dev/null
+++ b/src/cmd/compile/internal/big/intconv.go
@@ -0,0 +1,228 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements int-to-string conversion functions.
+
+package big
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+func (x *Int) String() string {
+	switch {
+	case x == nil:
+		return "<nil>"
+	case x.neg:
+		return "-" + x.abs.decimalString()
+	}
+	return x.abs.decimalString()
+}
+
+func charset(ch rune) string {
+	switch ch {
+	case 'b':
+		return lowercaseDigits[0:2]
+	case 'o':
+		return lowercaseDigits[0:8]
+	case 'd', 's', 'v':
+		return lowercaseDigits[0:10]
+	case 'x':
+		return lowercaseDigits[0:16]
+	case 'X':
+		return uppercaseDigits[0:16]
+	}
+	return "" // unknown format
+}
+
+// write count copies of text to s
+func writeMultiple(s fmt.State, text string, count int) {
+	if len(text) > 0 {
+		b := []byte(text)
+		for ; count > 0; count-- {
+			s.Write(b)
+		}
+	}
+}
+
+// Format is a support routine for fmt.Formatter. It accepts
+// the formats 'b' (binary), 'o' (octal), 'd' (decimal), 'x'
+// (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
+// Also supported are the full suite of package fmt's format
+// verbs for integral types, including '+', '-', and ' '
+// for sign control, '#' for leading zero in octal and for
+// hexadecimal, a leading "0x" or "0X" for "%#x" and "%#X"
+// respectively, specification of minimum digits precision,
+// output field width, space or zero padding, and left or
+// right justification.
+//
+func (x *Int) Format(s fmt.State, ch rune) {
+	cs := charset(ch)
+
+	// special cases
+	switch {
+	case cs == "":
+		// unknown format
+		fmt.Fprintf(s, "%%!%c(big.Int=%s)", ch, x.String())
+		return
+	case x == nil:
+		fmt.Fprint(s, "<nil>")
+		return
+	}
+
+	// determine sign character
+	sign := ""
+	switch {
+	case x.neg:
+		sign = "-"
+	case s.Flag('+'): // supersedes ' ' when both specified
+		sign = "+"
+	case s.Flag(' '):
+		sign = " "
+	}
+
+	// determine prefix characters for indicating output base
+	prefix := ""
+	if s.Flag('#') {
+		switch ch {
+		case 'o': // octal
+			prefix = "0"
+		case 'x': // hexadecimal
+			prefix = "0x"
+		case 'X':
+			prefix = "0X"
+		}
+	}
+
+	// determine digits with base set by len(cs) and digit characters from cs
+	digits := x.abs.string(cs)
+
+	// number of characters for the three classes of number padding
+	var left int   // space characters to left of digits for right justification ("%8d")
+	var zeroes int // zero characters (actually cs[0]) as left-most digits ("%.8d")
+	var right int  // space characters to right of digits for left justification ("%-8d")
+
+	// determine number padding from precision: the least number of digits to output
+	precision, precisionSet := s.Precision()
+	if precisionSet {
+		switch {
+		case len(digits) < precision:
+			zeroes = precision - len(digits) // count of zero padding
+		case digits == "0" && precision == 0:
+			return // print nothing if zero value (x == 0) and zero precision ("." or ".0")
+		}
+	}
+
+	// determine field pad from width: the least number of characters to output
+	length := len(sign) + len(prefix) + zeroes + len(digits)
+	if width, widthSet := s.Width(); widthSet && length < width { // pad as specified
+		switch d := width - length; {
+		case s.Flag('-'):
+			// pad on the right with spaces; supersedes '0' when both specified
+			right = d
+		case s.Flag('0') && !precisionSet:
+			// pad with zeroes unless precision also specified
+			zeroes = d
+		default:
+			// pad on the left with spaces
+			left = d
+		}
+	}
+
+	// print number as [left pad][sign][prefix][zero pad][digits][right pad]
+	writeMultiple(s, " ", left)
+	writeMultiple(s, sign, 1)
+	writeMultiple(s, prefix, 1)
+	writeMultiple(s, "0", zeroes)
+	writeMultiple(s, digits, 1)
+	writeMultiple(s, " ", right)
+}
+
+// scan sets z to the integer value corresponding to the longest possible prefix
+// read from r representing a signed integer number in a given conversion base.
+// It returns z, the actual conversion base used, and an error, if any. In the
+// error case, the value of z is undefined but the returned value is nil. The
+// syntax follows the syntax of integer literals in Go.
+//
+// The base argument must be 0 or a value from 2 through MaxBase. If the base
+// is 0, the string prefix determines the actual conversion base. A prefix of
+// ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
+// ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
+//
+func (z *Int) scan(r io.ByteScanner, base int) (*Int, int, error) {
+	// determine sign
+	neg, err := scanSign(r)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	// determine mantissa
+	z.abs, base, _, err = z.abs.scan(r, base, false)
+	if err != nil {
+		return nil, base, err
+	}
+	z.neg = len(z.abs) > 0 && neg // 0 has no sign
+
+	return z, base, nil
+}
+
+func scanSign(r io.ByteScanner) (neg bool, err error) {
+	var ch byte
+	if ch, err = r.ReadByte(); err != nil {
+		return false, err
+	}
+	switch ch {
+	case '-':
+		neg = true
+	case '+':
+		// nothing to do
+	default:
+		r.UnreadByte()
+	}
+	return
+}
+
+// byteReader is a local wrapper around fmt.ScanState;
+// it implements the ByteReader interface.
+type byteReader struct {
+	fmt.ScanState
+}
+
+func (r byteReader) ReadByte() (byte, error) {
+	ch, size, err := r.ReadRune()
+	if size != 1 && err == nil {
+		err = fmt.Errorf("invalid rune %#U", ch)
+	}
+	return byte(ch), err
+}
+
+func (r byteReader) UnreadByte() error {
+	return r.UnreadRune()
+}
+
+// Scan is a support routine for fmt.Scanner; it sets z to the value of
+// the scanned number. It accepts the formats 'b' (binary), 'o' (octal),
+// 'd' (decimal), 'x' (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
+func (z *Int) Scan(s fmt.ScanState, ch rune) error {
+	s.SkipSpace() // skip leading space characters
+	base := 0
+	switch ch {
+	case 'b':
+		base = 2
+	case 'o':
+		base = 8
+	case 'd':
+		base = 10
+	case 'x', 'X':
+		base = 16
+	case 's', 'v':
+		// let scan determine the base
+	default:
+		return errors.New("Int.Scan: invalid verb")
+	}
+	_, _, err := z.scan(byteReader{s}, base)
+	return err
+}
diff --git a/src/cmd/compile/internal/big/intconv_test.go b/src/cmd/compile/internal/big/intconv_test.go
new file mode 100644
index 0000000..2deb84b
--- /dev/null
+++ b/src/cmd/compile/internal/big/intconv_test.go
@@ -0,0 +1,342 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+)
+
+var stringTests = []struct {
+	in   string
+	out  string
+	base int
+	val  int64
+	ok   bool
+}{
+	{in: "", ok: false},
+	{in: "a", ok: false},
+	{in: "z", ok: false},
+	{in: "+", ok: false},
+	{in: "-", ok: false},
+	{in: "0b", ok: false},
+	{in: "0x", ok: false},
+	{in: "2", base: 2, ok: false},
+	{in: "0b2", base: 0, ok: false},
+	{in: "08", ok: false},
+	{in: "8", base: 8, ok: false},
+	{in: "0xg", base: 0, ok: false},
+	{in: "g", base: 16, ok: false},
+	{"0", "0", 0, 0, true},
+	{"0", "0", 10, 0, true},
+	{"0", "0", 16, 0, true},
+	{"+0", "0", 0, 0, true},
+	{"-0", "0", 0, 0, true},
+	{"10", "10", 0, 10, true},
+	{"10", "10", 10, 10, true},
+	{"10", "10", 16, 16, true},
+	{"-10", "-10", 16, -16, true},
+	{"+10", "10", 16, 16, true},
+	{"0x10", "16", 0, 16, true},
+	{in: "0x10", base: 16, ok: false},
+	{"-0x10", "-16", 0, -16, true},
+	{"+0x10", "16", 0, 16, true},
+	{"00", "0", 0, 0, true},
+	{"0", "0", 8, 0, true},
+	{"07", "7", 0, 7, true},
+	{"7", "7", 8, 7, true},
+	{"023", "19", 0, 19, true},
+	{"23", "23", 8, 19, true},
+	{"cafebabe", "cafebabe", 16, 0xcafebabe, true},
+	{"0b0", "0", 0, 0, true},
+	{"-111", "-111", 2, -7, true},
+	{"-0b111", "-7", 0, -7, true},
+	{"0b1001010111", "599", 0, 0x257, true},
+	{"1001010111", "1001010111", 2, 0x257, true},
+}
+
+func format(base int) string {
+	switch base {
+	case 2:
+		return "%b"
+	case 8:
+		return "%o"
+	case 16:
+		return "%x"
+	}
+	return "%d"
+}
+
+func TestGetString(t *testing.T) {
+	z := new(Int)
+	for i, test := range stringTests {
+		if !test.ok {
+			continue
+		}
+		z.SetInt64(test.val)
+
+		if test.base == 10 {
+			s := z.String()
+			if s != test.out {
+				t.Errorf("#%da got %s; want %s", i, s, test.out)
+			}
+		}
+
+		s := fmt.Sprintf(format(test.base), z)
+		if s != test.out {
+			t.Errorf("#%db got %s; want %s", i, s, test.out)
+		}
+	}
+}
+
+func TestSetString(t *testing.T) {
+	tmp := new(Int)
+	for i, test := range stringTests {
+		// initialize to a non-zero value so that issues with parsing
+		// 0 are detected
+		tmp.SetInt64(1234567890)
+		n1, ok1 := new(Int).SetString(test.in, test.base)
+		n2, ok2 := tmp.SetString(test.in, test.base)
+		expected := NewInt(test.val)
+		if ok1 != test.ok || ok2 != test.ok {
+			t.Errorf("#%d (input '%s') ok incorrect (should be %t)", i, test.in, test.ok)
+			continue
+		}
+		if !ok1 {
+			if n1 != nil {
+				t.Errorf("#%d (input '%s') n1 != nil", i, test.in)
+			}
+			continue
+		}
+		if !ok2 {
+			if n2 != nil {
+				t.Errorf("#%d (input '%s') n2 != nil", i, test.in)
+			}
+			continue
+		}
+
+		if ok1 && !isNormalized(n1) {
+			t.Errorf("#%d (input '%s'): %v is not normalized", i, test.in, *n1)
+		}
+		if ok2 && !isNormalized(n2) {
+			t.Errorf("#%d (input '%s'): %v is not normalized", i, test.in, *n2)
+		}
+
+		if n1.Cmp(expected) != 0 {
+			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
+		}
+		if n2.Cmp(expected) != 0 {
+			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
+		}
+	}
+}
+
+var formatTests = []struct {
+	input  string
+	format string
+	output string
+}{
+	{"<nil>", "%x", "<nil>"},
+	{"<nil>", "%#x", "<nil>"},
+	{"<nil>", "%#y", "%!y(big.Int=<nil>)"},
+
+	{"10", "%b", "1010"},
+	{"10", "%o", "12"},
+	{"10", "%d", "10"},
+	{"10", "%v", "10"},
+	{"10", "%x", "a"},
+	{"10", "%X", "A"},
+	{"-10", "%X", "-A"},
+	{"10", "%y", "%!y(big.Int=10)"},
+	{"-10", "%y", "%!y(big.Int=-10)"},
+
+	{"10", "%#b", "1010"},
+	{"10", "%#o", "012"},
+	{"10", "%#d", "10"},
+	{"10", "%#v", "10"},
+	{"10", "%#x", "0xa"},
+	{"10", "%#X", "0XA"},
+	{"-10", "%#X", "-0XA"},
+	{"10", "%#y", "%!y(big.Int=10)"},
+	{"-10", "%#y", "%!y(big.Int=-10)"},
+
+	{"1234", "%d", "1234"},
+	{"1234", "%3d", "1234"},
+	{"1234", "%4d", "1234"},
+	{"-1234", "%d", "-1234"},
+	{"1234", "% 5d", " 1234"},
+	{"1234", "%+5d", "+1234"},
+	{"1234", "%-5d", "1234 "},
+	{"1234", "%x", "4d2"},
+	{"1234", "%X", "4D2"},
+	{"-1234", "%3x", "-4d2"},
+	{"-1234", "%4x", "-4d2"},
+	{"-1234", "%5x", " -4d2"},
+	{"-1234", "%-5x", "-4d2 "},
+	{"1234", "%03d", "1234"},
+	{"1234", "%04d", "1234"},
+	{"1234", "%05d", "01234"},
+	{"1234", "%06d", "001234"},
+	{"-1234", "%06d", "-01234"},
+	{"1234", "%+06d", "+01234"},
+	{"1234", "% 06d", " 01234"},
+	{"1234", "%-6d", "1234  "},
+	{"1234", "%-06d", "1234  "},
+	{"-1234", "%-06d", "-1234 "},
+
+	{"1234", "%.3d", "1234"},
+	{"1234", "%.4d", "1234"},
+	{"1234", "%.5d", "01234"},
+	{"1234", "%.6d", "001234"},
+	{"-1234", "%.3d", "-1234"},
+	{"-1234", "%.4d", "-1234"},
+	{"-1234", "%.5d", "-01234"},
+	{"-1234", "%.6d", "-001234"},
+
+	{"1234", "%8.3d", "    1234"},
+	{"1234", "%8.4d", "    1234"},
+	{"1234", "%8.5d", "   01234"},
+	{"1234", "%8.6d", "  001234"},
+	{"-1234", "%8.3d", "   -1234"},
+	{"-1234", "%8.4d", "   -1234"},
+	{"-1234", "%8.5d", "  -01234"},
+	{"-1234", "%8.6d", " -001234"},
+
+	{"1234", "%+8.3d", "   +1234"},
+	{"1234", "%+8.4d", "   +1234"},
+	{"1234", "%+8.5d", "  +01234"},
+	{"1234", "%+8.6d", " +001234"},
+	{"-1234", "%+8.3d", "   -1234"},
+	{"-1234", "%+8.4d", "   -1234"},
+	{"-1234", "%+8.5d", "  -01234"},
+	{"-1234", "%+8.6d", " -001234"},
+
+	{"1234", "% 8.3d", "    1234"},
+	{"1234", "% 8.4d", "    1234"},
+	{"1234", "% 8.5d", "   01234"},
+	{"1234", "% 8.6d", "  001234"},
+	{"-1234", "% 8.3d", "   -1234"},
+	{"-1234", "% 8.4d", "   -1234"},
+	{"-1234", "% 8.5d", "  -01234"},
+	{"-1234", "% 8.6d", " -001234"},
+
+	{"1234", "%.3x", "4d2"},
+	{"1234", "%.4x", "04d2"},
+	{"1234", "%.5x", "004d2"},
+	{"1234", "%.6x", "0004d2"},
+	{"-1234", "%.3x", "-4d2"},
+	{"-1234", "%.4x", "-04d2"},
+	{"-1234", "%.5x", "-004d2"},
+	{"-1234", "%.6x", "-0004d2"},
+
+	{"1234", "%8.3x", "     4d2"},
+	{"1234", "%8.4x", "    04d2"},
+	{"1234", "%8.5x", "   004d2"},
+	{"1234", "%8.6x", "  0004d2"},
+	{"-1234", "%8.3x", "    -4d2"},
+	{"-1234", "%8.4x", "   -04d2"},
+	{"-1234", "%8.5x", "  -004d2"},
+	{"-1234", "%8.6x", " -0004d2"},
+
+	{"1234", "%+8.3x", "    +4d2"},
+	{"1234", "%+8.4x", "   +04d2"},
+	{"1234", "%+8.5x", "  +004d2"},
+	{"1234", "%+8.6x", " +0004d2"},
+	{"-1234", "%+8.3x", "    -4d2"},
+	{"-1234", "%+8.4x", "   -04d2"},
+	{"-1234", "%+8.5x", "  -004d2"},
+	{"-1234", "%+8.6x", " -0004d2"},
+
+	{"1234", "% 8.3x", "     4d2"},
+	{"1234", "% 8.4x", "    04d2"},
+	{"1234", "% 8.5x", "   004d2"},
+	{"1234", "% 8.6x", "  0004d2"},
+	{"1234", "% 8.7x", " 00004d2"},
+	{"1234", "% 8.8x", " 000004d2"},
+	{"-1234", "% 8.3x", "    -4d2"},
+	{"-1234", "% 8.4x", "   -04d2"},
+	{"-1234", "% 8.5x", "  -004d2"},
+	{"-1234", "% 8.6x", " -0004d2"},
+	{"-1234", "% 8.7x", "-00004d2"},
+	{"-1234", "% 8.8x", "-000004d2"},
+
+	{"1234", "%-8.3d", "1234    "},
+	{"1234", "%-8.4d", "1234    "},
+	{"1234", "%-8.5d", "01234   "},
+	{"1234", "%-8.6d", "001234  "},
+	{"1234", "%-8.7d", "0001234 "},
+	{"1234", "%-8.8d", "00001234"},
+	{"-1234", "%-8.3d", "-1234   "},
+	{"-1234", "%-8.4d", "-1234   "},
+	{"-1234", "%-8.5d", "-01234  "},
+	{"-1234", "%-8.6d", "-001234 "},
+	{"-1234", "%-8.7d", "-0001234"},
+	{"-1234", "%-8.8d", "-00001234"},
+
+	{"16777215", "%b", "111111111111111111111111"}, // 2**24 - 1
+
+	{"0", "%.d", ""},
+	{"0", "%.0d", ""},
+	{"0", "%3.d", ""},
+}
+
+func TestFormat(t *testing.T) {
+	for i, test := range formatTests {
+		var x *Int
+		if test.input != "<nil>" {
+			var ok bool
+			x, ok = new(Int).SetString(test.input, 0)
+			if !ok {
+				t.Errorf("#%d failed reading input %s", i, test.input)
+			}
+		}
+		output := fmt.Sprintf(test.format, x)
+		if output != test.output {
+			t.Errorf("#%d got %q; want %q, {%q, %q, %q}", i, output, test.output, test.input, test.format, test.output)
+		}
+	}
+}
+
+var scanTests = []struct {
+	input     string
+	format    string
+	output    string
+	remaining int
+}{
+	{"1010", "%b", "10", 0},
+	{"0b1010", "%v", "10", 0},
+	{"12", "%o", "10", 0},
+	{"012", "%v", "10", 0},
+	{"10", "%d", "10", 0},
+	{"10", "%v", "10", 0},
+	{"a", "%x", "10", 0},
+	{"0xa", "%v", "10", 0},
+	{"A", "%X", "10", 0},
+	{"-A", "%X", "-10", 0},
+	{"+0b1011001", "%v", "89", 0},
+	{"0xA", "%v", "10", 0},
+	{"0 ", "%v", "0", 1},
+	{"2+3", "%v", "2", 2},
+	{"0XABC 12", "%v", "2748", 3},
+}
+
+func TestScan(t *testing.T) {
+	var buf bytes.Buffer
+	for i, test := range scanTests {
+		x := new(Int)
+		buf.Reset()
+		buf.WriteString(test.input)
+		if _, err := fmt.Fscanf(&buf, test.format, x); err != nil {
+			t.Errorf("#%d error: %s", i, err)
+		}
+		if x.String() != test.output {
+			t.Errorf("#%d got %s; want %s", i, x.String(), test.output)
+		}
+		if buf.Len() != test.remaining {
+			t.Errorf("#%d got %d bytes remaining; want %d", i, buf.Len(), test.remaining)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/big/nat.go b/src/cmd/compile/internal/big/nat.go
new file mode 100644
index 0000000..2a279d1
--- /dev/null
+++ b/src/cmd/compile/internal/big/nat.go
@@ -0,0 +1,1155 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package big implements multi-precision arithmetic (big numbers).
+// The following numeric types are supported:
+//
+//   Int    signed integers
+//   Rat    rational numbers
+//   Float  floating-point numbers
+//
+// Methods are typically of the form:
+//
+//   func (z *T) Unary(x *T) *T        // z = op x
+//   func (z *T) Binary(x, y *T) *T    // z = x op y
+//   func (x *T) M() T1                // v = x.M()
+//
+// with T one of Int, Rat, or Float. For unary and binary operations, the
+// result is the receiver (usually named z in that case); if it is one of
+// the operands x or y it may be overwritten (and its memory reused).
+// To enable chaining of operations, the result is also returned. Methods
+// returning a result other than *Int, *Rat, or *Float take an operand as
+// the receiver (usually named x in that case).
+//
+package big
+
+// This file contains operations on unsigned multi-precision integers.
+// These are the building blocks for the operations on signed integers
+// and rationals.
+
+import "math/rand"
+
+// An unsigned integer x of the form
+//
+//   x = x[n-1]*_B^(n-1) + x[n-2]*_B^(n-2) + ... + x[1]*_B + x[0]
+//
+// with 0 <= x[i] < _B and 0 <= i < n is stored in a slice of length n,
+// with the digits x[i] as the slice elements.
+//
+// A number is normalized if the slice contains no leading 0 digits.
+// During arithmetic operations, denormalized values may occur but are
+// always normalized before returning the final result. The normalized
+// representation of 0 is the empty or nil slice (length = 0).
+//
+type nat []Word
+
+var (
+	natOne = nat{1}
+	natTwo = nat{2}
+	natTen = nat{10}
+)
+
+func (z nat) clear() {
+	for i := range z {
+		z[i] = 0
+	}
+}
+
+func (z nat) norm() nat {
+	i := len(z)
+	for i > 0 && z[i-1] == 0 {
+		i--
+	}
+	return z[0:i]
+}
+
+func (z nat) make(n int) nat {
+	if n <= cap(z) {
+		return z[:n] // reuse z
+	}
+	// Choosing a good value for e has significant performance impact
+	// because it increases the chance that a value can be reused.
+	const e = 4 // extra capacity
+	return make(nat, n, n+e)
+}
+
+func (z nat) setWord(x Word) nat {
+	if x == 0 {
+		return z[:0]
+	}
+	z = z.make(1)
+	z[0] = x
+	return z
+}
+
+func (z nat) setUint64(x uint64) nat {
+	// single-digit values
+	if w := Word(x); uint64(w) == x {
+		return z.setWord(w)
+	}
+
+	// compute number of words n required to represent x
+	n := 0
+	for t := x; t > 0; t >>= _W {
+		n++
+	}
+
+	// split x into n words
+	z = z.make(n)
+	for i := range z {
+		z[i] = Word(x & _M)
+		x >>= _W
+	}
+
+	return z
+}
+
+func (z nat) set(x nat) nat {
+	z = z.make(len(x))
+	copy(z, x)
+	return z
+}
+
+func (z nat) add(x, y nat) nat {
+	m := len(x)
+	n := len(y)
+
+	switch {
+	case m < n:
+		return z.add(y, x)
+	case m == 0:
+		// n == 0 because m >= n; result is 0
+		return z[:0]
+	case n == 0:
+		// result is x
+		return z.set(x)
+	}
+	// m > 0
+
+	z = z.make(m + 1)
+	c := addVV(z[0:n], x, y)
+	if m > n {
+		c = addVW(z[n:m], x[n:], c)
+	}
+	z[m] = c
+
+	return z.norm()
+}
+
+func (z nat) sub(x, y nat) nat {
+	m := len(x)
+	n := len(y)
+
+	switch {
+	case m < n:
+		panic("underflow")
+	case m == 0:
+		// n == 0 because m >= n; result is 0
+		return z[:0]
+	case n == 0:
+		// result is x
+		return z.set(x)
+	}
+	// m > 0
+
+	z = z.make(m)
+	c := subVV(z[0:n], x, y)
+	if m > n {
+		c = subVW(z[n:], x[n:], c)
+	}
+	if c != 0 {
+		panic("underflow")
+	}
+
+	return z.norm()
+}
+
+func (x nat) cmp(y nat) (r int) {
+	m := len(x)
+	n := len(y)
+	if m != n || m == 0 {
+		switch {
+		case m < n:
+			r = -1
+		case m > n:
+			r = 1
+		}
+		return
+	}
+
+	i := m - 1
+	for i > 0 && x[i] == y[i] {
+		i--
+	}
+
+	switch {
+	case x[i] < y[i]:
+		r = -1
+	case x[i] > y[i]:
+		r = 1
+	}
+	return
+}
+
+func (z nat) mulAddWW(x nat, y, r Word) nat {
+	m := len(x)
+	if m == 0 || y == 0 {
+		return z.setWord(r) // result is r
+	}
+	// m > 0
+
+	z = z.make(m + 1)
+	z[m] = mulAddVWW(z[0:m], x, y, r)
+
+	return z.norm()
+}
+
+// basicMul multiplies x and y and leaves the result in z.
+// The (non-normalized) result is placed in z[0 : len(x) + len(y)].
+func basicMul(z, x, y nat) {
+	z[0 : len(x)+len(y)].clear() // initialize z
+	for i, d := range y {
+		if d != 0 {
+			z[len(x)+i] = addMulVVW(z[i:i+len(x)], x, d)
+		}
+	}
+}
+
+// Fast version of z[0:n+n>>1].add(z[0:n+n>>1], x[0:n]) w/o bounds checks.
+// Factored out for readability - do not use outside karatsuba.
+func karatsubaAdd(z, x nat, n int) {
+	if c := addVV(z[0:n], z, x); c != 0 {
+		addVW(z[n:n+n>>1], z[n:], c)
+	}
+}
+
+// Like karatsubaAdd, but does subtract.
+func karatsubaSub(z, x nat, n int) {
+	if c := subVV(z[0:n], z, x); c != 0 {
+		subVW(z[n:n+n>>1], z[n:], c)
+	}
+}
+
+// Operands that are shorter than karatsubaThreshold are multiplied using
+// "grade school" multiplication; for longer operands the Karatsuba algorithm
+// is used.
+var karatsubaThreshold int = 40 // computed by calibrate.go
+
+// karatsuba multiplies x and y and leaves the result in z.
+// Both x and y must have the same length n and n must be a
+// power of 2. The result vector z must have len(z) >= 6*n.
+// The (non-normalized) result is placed in z[0 : 2*n].
+func karatsuba(z, x, y nat) {
+	n := len(y)
+
+	// Switch to basic multiplication if numbers are odd or small.
+	// (n is always even if karatsubaThreshold is even, but be
+	// conservative)
+	if n&1 != 0 || n < karatsubaThreshold || n < 2 {
+		basicMul(z, x, y)
+		return
+	}
+	// n&1 == 0 && n >= karatsubaThreshold && n >= 2
+
+	// Karatsuba multiplication is based on the observation that
+	// for two numbers x and y with:
+	//
+	//   x = x1*b + x0
+	//   y = y1*b + y0
+	//
+	// the product x*y can be obtained with 3 products z2, z1, z0
+	// instead of 4:
+	//
+	//   x*y = x1*y1*b*b + (x1*y0 + x0*y1)*b + x0*y0
+	//       =    z2*b*b +              z1*b +    z0
+	//
+	// with:
+	//
+	//   xd = x1 - x0
+	//   yd = y0 - y1
+	//
+	//   z1 =      xd*yd                    + z2 + z0
+	//      = (x1-x0)*(y0 - y1)             + z2 + z0
+	//      = x1*y0 - x1*y1 - x0*y0 + x0*y1 + z2 + z0
+	//      = x1*y0 -    z2 -    z0 + x0*y1 + z2 + z0
+	//      = x1*y0                 + x0*y1
+
+	// split x, y into "digits"
+	n2 := n >> 1              // n2 >= 1
+	x1, x0 := x[n2:], x[0:n2] // x = x1*b + y0
+	y1, y0 := y[n2:], y[0:n2] // y = y1*b + y0
+
+	// z is used for the result and temporary storage:
+	//
+	//   6*n     5*n     4*n     3*n     2*n     1*n     0*n
+	// z = [z2 copy|z0 copy| xd*yd | yd:xd | x1*y1 | x0*y0 ]
+	//
+	// For each recursive call of karatsuba, an unused slice of
+	// z is passed in that has (at least) half the length of the
+	// caller's z.
+
+	// compute z0 and z2 with the result "in place" in z
+	karatsuba(z, x0, y0)     // z0 = x0*y0
+	karatsuba(z[n:], x1, y1) // z2 = x1*y1
+
+	// compute xd (or the negative value if underflow occurs)
+	s := 1 // sign of product xd*yd
+	xd := z[2*n : 2*n+n2]
+	if subVV(xd, x1, x0) != 0 { // x1-x0
+		s = -s
+		subVV(xd, x0, x1) // x0-x1
+	}
+
+	// compute yd (or the negative value if underflow occurs)
+	yd := z[2*n+n2 : 3*n]
+	if subVV(yd, y0, y1) != 0 { // y0-y1
+		s = -s
+		subVV(yd, y1, y0) // y1-y0
+	}
+
+	// p = (x1-x0)*(y0-y1) == x1*y0 - x1*y1 - x0*y0 + x0*y1 for s > 0
+	// p = (x0-x1)*(y0-y1) == x0*y0 - x0*y1 - x1*y0 + x1*y1 for s < 0
+	p := z[n*3:]
+	karatsuba(p, xd, yd)
+
+	// save original z2:z0
+	// (ok to use upper half of z since we're done recursing)
+	r := z[n*4:]
+	copy(r, z[:n*2])
+
+	// add up all partial products
+	//
+	//   2*n     n     0
+	// z = [ z2  | z0  ]
+	//   +    [ z0  ]
+	//   +    [ z2  ]
+	//   +    [  p  ]
+	//
+	karatsubaAdd(z[n2:], r, n)
+	karatsubaAdd(z[n2:], r[n:], n)
+	if s > 0 {
+		karatsubaAdd(z[n2:], p, n)
+	} else {
+		karatsubaSub(z[n2:], p, n)
+	}
+}
+
+// alias reports whether x and y share the same base array.
+func alias(x, y nat) bool {
+	return cap(x) > 0 && cap(y) > 0 && &x[0:cap(x)][cap(x)-1] == &y[0:cap(y)][cap(y)-1]
+}
+
+// addAt implements z += x<<(_W*i); z must be long enough.
+// (we don't use nat.add because we need z to stay the same
+// slice, and we don't need to normalize z after each addition)
+func addAt(z, x nat, i int) {
+	if n := len(x); n > 0 {
+		if c := addVV(z[i:i+n], z[i:], x); c != 0 {
+			j := i + n
+			if j < len(z) {
+				addVW(z[j:], z[j:], c)
+			}
+		}
+	}
+}
+
+func max(x, y int) int {
+	if x > y {
+		return x
+	}
+	return y
+}
+
+// karatsubaLen computes an approximation to the maximum k <= n such that
+// k = p<<i for a number p <= karatsubaThreshold and an i >= 0. Thus, the
+// result is the largest number that can be divided repeatedly by 2 before
+// becoming about the value of karatsubaThreshold.
+func karatsubaLen(n int) int {
+	i := uint(0)
+	for n > karatsubaThreshold {
+		n >>= 1
+		i++
+	}
+	return n << i
+}
+
+func (z nat) mul(x, y nat) nat {
+	m := len(x)
+	n := len(y)
+
+	switch {
+	case m < n:
+		return z.mul(y, x)
+	case m == 0 || n == 0:
+		return z[:0]
+	case n == 1:
+		return z.mulAddWW(x, y[0], 0)
+	}
+	// m >= n > 1
+
+	// determine if z can be reused
+	if alias(z, x) || alias(z, y) {
+		z = nil // z is an alias for x or y - cannot reuse
+	}
+
+	// use basic multiplication if the numbers are small
+	if n < karatsubaThreshold {
+		z = z.make(m + n)
+		basicMul(z, x, y)
+		return z.norm()
+	}
+	// m >= n && n >= karatsubaThreshold && n >= 2
+
+	// determine Karatsuba length k such that
+	//
+	//   x = xh*b + x0  (0 <= x0 < b)
+	//   y = yh*b + y0  (0 <= y0 < b)
+	//   b = 1<<(_W*k)  ("base" of digits xi, yi)
+	//
+	k := karatsubaLen(n)
+	// k <= n
+
+	// multiply x0 and y0 via Karatsuba
+	x0 := x[0:k]              // x0 is not normalized
+	y0 := y[0:k]              // y0 is not normalized
+	z = z.make(max(6*k, m+n)) // enough space for karatsuba of x0*y0 and full result of x*y
+	karatsuba(z, x0, y0)
+	z = z[0 : m+n]  // z has final length but may be incomplete
+	z[2*k:].clear() // upper portion of z is garbage (and 2*k <= m+n since k <= n <= m)
+
+	// If xh != 0 or yh != 0, add the missing terms to z. For
+	//
+	//   xh = xi*b^i + ... + x2*b^2 + x1*b (0 <= xi < b)
+	//   yh =                         y1*b (0 <= y1 < b)
+	//
+	// the missing terms are
+	//
+	//   x0*y1*b and xi*y0*b^i, xi*y1*b^(i+1) for i > 0
+	//
+	// since all the yi for i > 1 are 0 by choice of k: If any of them
+	// were > 0, then yh >= b^2 and thus y >= b^2. Then k' = k*2 would
+	// be a larger valid threshold contradicting the assumption about k.
+	//
+	if k < n || m != n {
+		var t nat
+
+		// add x0*y1*b
+		x0 := x0.norm()
+		y1 := y[k:]       // y1 is normalized because y is
+		t = t.mul(x0, y1) // update t so we don't lose t's underlying array
+		addAt(z, t, k)
+
+		// add xi*y0<<i, xi*y1*b<<(i+k)
+		y0 := y0.norm()
+		for i := k; i < len(x); i += k {
+			xi := x[i:]
+			if len(xi) > k {
+				xi = xi[:k]
+			}
+			xi = xi.norm()
+			t = t.mul(xi, y0)
+			addAt(z, t, i)
+			t = t.mul(xi, y1)
+			addAt(z, t, i+k)
+		}
+	}
+
+	return z.norm()
+}
+
+// mulRange computes the product of all the unsigned integers in the
+// range [a, b] inclusively. If a > b (empty range), the result is 1.
+func (z nat) mulRange(a, b uint64) nat {
+	switch {
+	case a == 0:
+		// cut long ranges short (optimization)
+		return z.setUint64(0)
+	case a > b:
+		return z.setUint64(1)
+	case a == b:
+		return z.setUint64(a)
+	case a+1 == b:
+		return z.mul(nat(nil).setUint64(a), nat(nil).setUint64(b))
+	}
+	m := (a + b) / 2
+	return z.mul(nat(nil).mulRange(a, m), nat(nil).mulRange(m+1, b))
+}
+
+// q = (x-r)/y, with 0 <= r < y
+func (z nat) divW(x nat, y Word) (q nat, r Word) {
+	m := len(x)
+	switch {
+	case y == 0:
+		panic("division by zero")
+	case y == 1:
+		q = z.set(x) // result is x
+		return
+	case m == 0:
+		q = z[:0] // result is 0
+		return
+	}
+	// m > 0
+	z = z.make(m)
+	r = divWVW(z, 0, x, y)
+	q = z.norm()
+	return
+}
+
+func (z nat) div(z2, u, v nat) (q, r nat) {
+	if len(v) == 0 {
+		panic("division by zero")
+	}
+
+	if u.cmp(v) < 0 {
+		q = z[:0]
+		r = z2.set(u)
+		return
+	}
+
+	if len(v) == 1 {
+		var r2 Word
+		q, r2 = z.divW(u, v[0])
+		r = z2.setWord(r2)
+		return
+	}
+
+	q, r = z.divLarge(z2, u, v)
+	return
+}
+
+// q = (uIn-r)/v, with 0 <= r < y
+// Uses z as storage for q, and u as storage for r if possible.
+// See Knuth, Volume 2, section 4.3.1, Algorithm D.
+// Preconditions:
+//    len(v) >= 2
+//    len(uIn) >= len(v)
+func (z nat) divLarge(u, uIn, v nat) (q, r nat) {
+	n := len(v)
+	m := len(uIn) - n
+
+	// determine if z can be reused
+	// TODO(gri) should find a better solution - this if statement
+	//           is very costly (see e.g. time pidigits -s -n 10000)
+	if alias(z, uIn) || alias(z, v) {
+		z = nil // z is an alias for uIn or v - cannot reuse
+	}
+	q = z.make(m + 1)
+
+	qhatv := make(nat, n+1)
+	if alias(u, uIn) || alias(u, v) {
+		u = nil // u is an alias for uIn or v - cannot reuse
+	}
+	u = u.make(len(uIn) + 1)
+	u.clear() // TODO(gri) no need to clear if we allocated a new u
+
+	// D1.
+	shift := leadingZeros(v[n-1])
+	if shift > 0 {
+		// do not modify v, it may be used by another goroutine simultaneously
+		v1 := make(nat, n)
+		shlVU(v1, v, shift)
+		v = v1
+	}
+	u[len(uIn)] = shlVU(u[0:len(uIn)], uIn, shift)
+
+	// D2.
+	for j := m; j >= 0; j-- {
+		// D3.
+		qhat := Word(_M)
+		if u[j+n] != v[n-1] {
+			var rhat Word
+			qhat, rhat = divWW(u[j+n], u[j+n-1], v[n-1])
+
+			// x1 | x2 = q̂v_{n-2}
+			x1, x2 := mulWW(qhat, v[n-2])
+			// test if q̂v_{n-2} > br̂ + u_{j+n-2}
+			for greaterThan(x1, x2, rhat, u[j+n-2]) {
+				qhat--
+				prevRhat := rhat
+				rhat += v[n-1]
+				// v[n-1] >= 0, so this tests for overflow.
+				if rhat < prevRhat {
+					break
+				}
+				x1, x2 = mulWW(qhat, v[n-2])
+			}
+		}
+
+		// D4.
+		qhatv[n] = mulAddVWW(qhatv[0:n], v, qhat, 0)
+
+		c := subVV(u[j:j+len(qhatv)], u[j:], qhatv)
+		if c != 0 {
+			c := addVV(u[j:j+n], u[j:], v)
+			u[j+n] += c
+			qhat--
+		}
+
+		q[j] = qhat
+	}
+
+	q = q.norm()
+	shrVU(u, u, shift)
+	r = u.norm()
+
+	return q, r
+}
+
+// Length of x in bits. x must be normalized.
+func (x nat) bitLen() int {
+	if i := len(x) - 1; i >= 0 {
+		return i*_W + bitLen(x[i])
+	}
+	return 0
+}
+
+const deBruijn32 = 0x077CB531
+
+var deBruijn32Lookup = []byte{
+	0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+	31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
+}
+
+const deBruijn64 = 0x03f79d71b4ca8b09
+
+var deBruijn64Lookup = []byte{
+	0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
+	62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
+	63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
+	54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
+}
+
+// trailingZeroBits returns the number of consecutive least significant zero
+// bits of x.
+func trailingZeroBits(x Word) uint {
+	// x & -x leaves only the right-most bit set in the word. Let k be the
+	// index of that bit. Since only a single bit is set, the value is two
+	// to the power of k. Multiplying by a power of two is equivalent to
+	// left shifting, in this case by k bits.  The de Bruijn constant is
+	// such that all six bit, consecutive substrings are distinct.
+	// Therefore, if we have a left shifted version of this constant we can
+	// find by how many bits it was shifted by looking at which six bit
+	// substring ended up at the top of the word.
+	// (Knuth, volume 4, section 7.3.1)
+	switch _W {
+	case 32:
+		return uint(deBruijn32Lookup[((x&-x)*deBruijn32)>>27])
+	case 64:
+		return uint(deBruijn64Lookup[((x&-x)*(deBruijn64&_M))>>58])
+	default:
+		panic("unknown word size")
+	}
+}
+
+// trailingZeroBits returns the number of consecutive least significant zero
+// bits of x.
+func (x nat) trailingZeroBits() uint {
+	if len(x) == 0 {
+		return 0
+	}
+	var i uint
+	for x[i] == 0 {
+		i++
+	}
+	// x[i] != 0
+	return i*_W + trailingZeroBits(x[i])
+}
+
+// z = x << s
+func (z nat) shl(x nat, s uint) nat {
+	m := len(x)
+	if m == 0 {
+		return z[:0]
+	}
+	// m > 0
+
+	n := m + int(s/_W)
+	z = z.make(n + 1)
+	z[n] = shlVU(z[n-m:n], x, s%_W)
+	z[0 : n-m].clear()
+
+	return z.norm()
+}
+
+// z = x >> s
+func (z nat) shr(x nat, s uint) nat {
+	m := len(x)
+	n := m - int(s/_W)
+	if n <= 0 {
+		return z[:0]
+	}
+	// n > 0
+
+	z = z.make(n)
+	shrVU(z, x[m-n:], s%_W)
+
+	return z.norm()
+}
+
+func (z nat) setBit(x nat, i uint, b uint) nat {
+	j := int(i / _W)
+	m := Word(1) << (i % _W)
+	n := len(x)
+	switch b {
+	case 0:
+		z = z.make(n)
+		copy(z, x)
+		if j >= n {
+			// no need to grow
+			return z
+		}
+		z[j] &^= m
+		return z.norm()
+	case 1:
+		if j >= n {
+			z = z.make(j + 1)
+			z[n:].clear()
+		} else {
+			z = z.make(n)
+		}
+		copy(z, x)
+		z[j] |= m
+		// no need to normalize
+		return z
+	}
+	panic("set bit is not 0 or 1")
+}
+
+// bit returns the value of the i'th bit, with lsb == bit 0.
+func (x nat) bit(i uint) uint {
+	j := i / _W
+	if j >= uint(len(x)) {
+		return 0
+	}
+	// 0 <= j < len(x)
+	return uint(x[j] >> (i % _W) & 1)
+}
+
+// sticky returns 1 if there's a 1 bit within the
+// i least significant bits, otherwise it returns 0.
+func (x nat) sticky(i uint) uint {
+	j := i / _W
+	if j >= uint(len(x)) {
+		if len(x) == 0 {
+			return 0
+		}
+		return 1
+	}
+	// 0 <= j < len(x)
+	for _, x := range x[:j] {
+		if x != 0 {
+			return 1
+		}
+	}
+	if x[j]<<(_W-i%_W) != 0 {
+		return 1
+	}
+	return 0
+}
+
+func (z nat) and(x, y nat) nat {
+	m := len(x)
+	n := len(y)
+	if m > n {
+		m = n
+	}
+	// m <= n
+
+	z = z.make(m)
+	for i := 0; i < m; i++ {
+		z[i] = x[i] & y[i]
+	}
+
+	return z.norm()
+}
+
+func (z nat) andNot(x, y nat) nat {
+	m := len(x)
+	n := len(y)
+	if n > m {
+		n = m
+	}
+	// m >= n
+
+	z = z.make(m)
+	for i := 0; i < n; i++ {
+		z[i] = x[i] &^ y[i]
+	}
+	copy(z[n:m], x[n:m])
+
+	return z.norm()
+}
+
+func (z nat) or(x, y nat) nat {
+	m := len(x)
+	n := len(y)
+	s := x
+	if m < n {
+		n, m = m, n
+		s = y
+	}
+	// m >= n
+
+	z = z.make(m)
+	for i := 0; i < n; i++ {
+		z[i] = x[i] | y[i]
+	}
+	copy(z[n:m], s[n:m])
+
+	return z.norm()
+}
+
+func (z nat) xor(x, y nat) nat {
+	m := len(x)
+	n := len(y)
+	s := x
+	if m < n {
+		n, m = m, n
+		s = y
+	}
+	// m >= n
+
+	z = z.make(m)
+	for i := 0; i < n; i++ {
+		z[i] = x[i] ^ y[i]
+	}
+	copy(z[n:m], s[n:m])
+
+	return z.norm()
+}
+
+// greaterThan reports whether (x1<<_W + x2) > (y1<<_W + y2)
+func greaterThan(x1, x2, y1, y2 Word) bool {
+	return x1 > y1 || x1 == y1 && x2 > y2
+}
+
+// modW returns x % d.
+func (x nat) modW(d Word) (r Word) {
+	// TODO(agl): we don't actually need to store the q value.
+	var q nat
+	q = q.make(len(x))
+	return divWVW(q, 0, x, d)
+}
+
+// random creates a random integer in [0..limit), using the space in z if
+// possible. n is the bit length of limit.
+func (z nat) random(rand *rand.Rand, limit nat, n int) nat {
+	if alias(z, limit) {
+		z = nil // z is an alias for limit - cannot reuse
+	}
+	z = z.make(len(limit))
+
+	bitLengthOfMSW := uint(n % _W)
+	if bitLengthOfMSW == 0 {
+		bitLengthOfMSW = _W
+	}
+	mask := Word((1 << bitLengthOfMSW) - 1)
+
+	for {
+		switch _W {
+		case 32:
+			for i := range z {
+				z[i] = Word(rand.Uint32())
+			}
+		case 64:
+			for i := range z {
+				z[i] = Word(rand.Uint32()) | Word(rand.Uint32())<<32
+			}
+		default:
+			panic("unknown word size")
+		}
+		z[len(limit)-1] &= mask
+		if z.cmp(limit) < 0 {
+			break
+		}
+	}
+
+	return z.norm()
+}
+
+// If m != 0 (i.e., len(m) != 0), expNN sets z to x**y mod m;
+// otherwise it sets z to x**y. The result is the value of z.
+func (z nat) expNN(x, y, m nat) nat {
+	if alias(z, x) || alias(z, y) {
+		// We cannot allow in-place modification of x or y.
+		z = nil
+	}
+
+	// x**y mod 1 == 0
+	if len(m) == 1 && m[0] == 1 {
+		return z.setWord(0)
+	}
+	// m == 0 || m > 1
+
+	// x**0 == 1
+	if len(y) == 0 {
+		return z.setWord(1)
+	}
+	// y > 0
+
+	if len(m) != 0 {
+		// We likely end up being as long as the modulus.
+		z = z.make(len(m))
+	}
+	z = z.set(x)
+
+	// If the base is non-trivial and the exponent is large, we use
+	// 4-bit, windowed exponentiation. This involves precomputing 14 values
+	// (x^2...x^15) but then reduces the number of multiply-reduces by a
+	// third. Even for a 32-bit exponent, this reduces the number of
+	// operations.
+	if len(x) > 1 && len(y) > 1 && len(m) > 0 {
+		return z.expNNWindowed(x, y, m)
+	}
+
+	v := y[len(y)-1] // v > 0 because y is normalized and y > 0
+	shift := leadingZeros(v) + 1
+	v <<= shift
+	var q nat
+
+	const mask = 1 << (_W - 1)
+
+	// We walk through the bits of the exponent one by one. Each time we
+	// see a bit, we square, thus doubling the power. If the bit is a one,
+	// we also multiply by x, thus adding one to the power.
+
+	w := _W - int(shift)
+	// zz and r are used to avoid allocating in mul and div as
+	// otherwise the arguments would alias.
+	var zz, r nat
+	for j := 0; j < w; j++ {
+		zz = zz.mul(z, z)
+		zz, z = z, zz
+
+		if v&mask != 0 {
+			zz = zz.mul(z, x)
+			zz, z = z, zz
+		}
+
+		if len(m) != 0 {
+			zz, r = zz.div(r, z, m)
+			zz, r, q, z = q, z, zz, r
+		}
+
+		v <<= 1
+	}
+
+	for i := len(y) - 2; i >= 0; i-- {
+		v = y[i]
+
+		for j := 0; j < _W; j++ {
+			zz = zz.mul(z, z)
+			zz, z = z, zz
+
+			if v&mask != 0 {
+				zz = zz.mul(z, x)
+				zz, z = z, zz
+			}
+
+			if len(m) != 0 {
+				zz, r = zz.div(r, z, m)
+				zz, r, q, z = q, z, zz, r
+			}
+
+			v <<= 1
+		}
+	}
+
+	return z.norm()
+}
+
+// expNNWindowed calculates x**y mod m using a fixed, 4-bit window.
+func (z nat) expNNWindowed(x, y, m nat) nat {
+	// zz and r are used to avoid allocating in mul and div as otherwise
+	// the arguments would alias.
+	var zz, r nat
+
+	const n = 4
+	// powers[i] contains x^i.
+	var powers [1 << n]nat
+	powers[0] = natOne
+	powers[1] = x
+	for i := 2; i < 1<<n; i += 2 {
+		p2, p, p1 := &powers[i/2], &powers[i], &powers[i+1]
+		*p = p.mul(*p2, *p2)
+		zz, r = zz.div(r, *p, m)
+		*p, r = r, *p
+		*p1 = p1.mul(*p, x)
+		zz, r = zz.div(r, *p1, m)
+		*p1, r = r, *p1
+	}
+
+	z = z.setWord(1)
+
+	for i := len(y) - 1; i >= 0; i-- {
+		yi := y[i]
+		for j := 0; j < _W; j += n {
+			if i != len(y)-1 || j != 0 {
+				// Unrolled loop for significant performance
+				// gain.  Use go test -bench=".*" in crypto/rsa
+				// to check performance before making changes.
+				zz = zz.mul(z, z)
+				zz, z = z, zz
+				zz, r = zz.div(r, z, m)
+				z, r = r, z
+
+				zz = zz.mul(z, z)
+				zz, z = z, zz
+				zz, r = zz.div(r, z, m)
+				z, r = r, z
+
+				zz = zz.mul(z, z)
+				zz, z = z, zz
+				zz, r = zz.div(r, z, m)
+				z, r = r, z
+
+				zz = zz.mul(z, z)
+				zz, z = z, zz
+				zz, r = zz.div(r, z, m)
+				z, r = r, z
+			}
+
+			zz = zz.mul(z, powers[yi>>(_W-n)])
+			zz, z = z, zz
+			zz, r = zz.div(r, z, m)
+			z, r = r, z
+
+			yi <<= n
+		}
+	}
+
+	return z.norm()
+}
+
+// probablyPrime performs reps Miller-Rabin tests to check whether n is prime.
+// If it returns true, n is prime with probability 1 - 1/4^reps.
+// If it returns false, n is not prime.
+func (n nat) probablyPrime(reps int) bool {
+	if len(n) == 0 {
+		return false
+	}
+
+	if len(n) == 1 {
+		if n[0] < 2 {
+			return false
+		}
+
+		if n[0]%2 == 0 {
+			return n[0] == 2
+		}
+
+		// We have to exclude these cases because we reject all
+		// multiples of these numbers below.
+		switch n[0] {
+		case 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53:
+			return true
+		}
+	}
+
+	if n[0]&1 == 0 {
+		return false // n is even
+	}
+
+	const primesProduct32 = 0xC0CFD797         // Π {p ∈ primes, 2 < p <= 29}
+	const primesProduct64 = 0xE221F97C30E94E1D // Π {p ∈ primes, 2 < p <= 53}
+
+	var r Word
+	switch _W {
+	case 32:
+		r = n.modW(primesProduct32)
+	case 64:
+		r = n.modW(primesProduct64 & _M)
+	default:
+		panic("Unknown word size")
+	}
+
+	if r%3 == 0 || r%5 == 0 || r%7 == 0 || r%11 == 0 ||
+		r%13 == 0 || r%17 == 0 || r%19 == 0 || r%23 == 0 || r%29 == 0 {
+		return false
+	}
+
+	if _W == 64 && (r%31 == 0 || r%37 == 0 || r%41 == 0 ||
+		r%43 == 0 || r%47 == 0 || r%53 == 0) {
+		return false
+	}
+
+	nm1 := nat(nil).sub(n, natOne)
+	// determine q, k such that nm1 = q << k
+	k := nm1.trailingZeroBits()
+	q := nat(nil).shr(nm1, k)
+
+	nm3 := nat(nil).sub(nm1, natTwo)
+	rand := rand.New(rand.NewSource(int64(n[0])))
+
+	var x, y, quotient nat
+	nm3Len := nm3.bitLen()
+
+NextRandom:
+	for i := 0; i < reps; i++ {
+		x = x.random(rand, nm3, nm3Len)
+		x = x.add(x, natTwo)
+		y = y.expNN(x, q, n)
+		if y.cmp(natOne) == 0 || y.cmp(nm1) == 0 {
+			continue
+		}
+		for j := uint(1); j < k; j++ {
+			y = y.mul(y, y)
+			quotient, y = quotient.div(y, y, n)
+			if y.cmp(nm1) == 0 {
+				continue NextRandom
+			}
+			if y.cmp(natOne) == 0 {
+				return false
+			}
+		}
+		return false
+	}
+
+	return true
+}
+
+// bytes writes the value of z into buf using big-endian encoding.
+// len(buf) must be >= len(z)*_S. The value of z is encoded in the
+// slice buf[i:]. The number i of unused bytes at the beginning of
+// buf is returned as result.
+func (z nat) bytes(buf []byte) (i int) {
+	i = len(buf)
+	for _, d := range z {
+		for j := 0; j < _S; j++ {
+			i--
+			buf[i] = byte(d)
+			d >>= 8
+		}
+	}
+
+	for i < len(buf) && buf[i] == 0 {
+		i++
+	}
+
+	return
+}
+
+// setBytes interprets buf as the bytes of a big-endian unsigned
+// integer, sets z to that value, and returns z.
+func (z nat) setBytes(buf []byte) nat {
+	z = z.make((len(buf) + _S - 1) / _S)
+
+	k := 0
+	s := uint(0)
+	var d Word
+	for i := len(buf); i > 0; i-- {
+		d |= Word(buf[i-1]) << s
+		if s += 8; s == _S*8 {
+			z[k] = d
+			k++
+			s = 0
+			d = 0
+		}
+	}
+	if k < len(z) {
+		z[k] = d
+	}
+
+	return z.norm()
+}
diff --git a/src/cmd/compile/internal/big/nat_test.go b/src/cmd/compile/internal/big/nat_test.go
new file mode 100644
index 0000000..b25a89f
--- /dev/null
+++ b/src/cmd/compile/internal/big/nat_test.go
@@ -0,0 +1,518 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+	"runtime"
+	"strings"
+	"testing"
+)
+
+var cmpTests = []struct {
+	x, y nat
+	r    int
+}{
+	{nil, nil, 0},
+	{nil, nat(nil), 0},
+	{nat(nil), nil, 0},
+	{nat(nil), nat(nil), 0},
+	{nat{0}, nat{0}, 0},
+	{nat{0}, nat{1}, -1},
+	{nat{1}, nat{0}, 1},
+	{nat{1}, nat{1}, 0},
+	{nat{0, _M}, nat{1}, 1},
+	{nat{1}, nat{0, _M}, -1},
+	{nat{1, _M}, nat{0, _M}, 1},
+	{nat{0, _M}, nat{1, _M}, -1},
+	{nat{16, 571956, 8794, 68}, nat{837, 9146, 1, 754489}, -1},
+	{nat{34986, 41, 105, 1957}, nat{56, 7458, 104, 1957}, 1},
+}
+
+func TestCmp(t *testing.T) {
+	for i, a := range cmpTests {
+		r := a.x.cmp(a.y)
+		if r != a.r {
+			t.Errorf("#%d got r = %v; want %v", i, r, a.r)
+		}
+	}
+}
+
+type funNN func(z, x, y nat) nat
+type argNN struct {
+	z, x, y nat
+}
+
+var sumNN = []argNN{
+	{},
+	{nat{1}, nil, nat{1}},
+	{nat{1111111110}, nat{123456789}, nat{987654321}},
+	{nat{0, 0, 0, 1}, nil, nat{0, 0, 0, 1}},
+	{nat{0, 0, 0, 1111111110}, nat{0, 0, 0, 123456789}, nat{0, 0, 0, 987654321}},
+	{nat{0, 0, 0, 1}, nat{0, 0, _M}, nat{0, 0, 1}},
+}
+
+var prodNN = []argNN{
+	{},
+	{nil, nil, nil},
+	{nil, nat{991}, nil},
+	{nat{991}, nat{991}, nat{1}},
+	{nat{991 * 991}, nat{991}, nat{991}},
+	{nat{0, 0, 991 * 991}, nat{0, 991}, nat{0, 991}},
+	{nat{1 * 991, 2 * 991, 3 * 991, 4 * 991}, nat{1, 2, 3, 4}, nat{991}},
+	{nat{4, 11, 20, 30, 20, 11, 4}, nat{1, 2, 3, 4}, nat{4, 3, 2, 1}},
+	// 3^100 * 3^28 = 3^128
+	{
+		natFromString("11790184577738583171520872861412518665678211592275841109096961"),
+		natFromString("515377520732011331036461129765621272702107522001"),
+		natFromString("22876792454961"),
+	},
+	// z = 111....1 (70000 digits)
+	// x = 10^(99*700) + ... + 10^1400 + 10^700 + 1
+	// y = 111....1 (700 digits, larger than Karatsuba threshold on 32-bit and 64-bit)
+	{
+		natFromString(strings.Repeat("1", 70000)),
+		natFromString("1" + strings.Repeat(strings.Repeat("0", 699)+"1", 99)),
+		natFromString(strings.Repeat("1", 700)),
+	},
+	// z = 111....1 (20000 digits)
+	// x = 10^10000 + 1
+	// y = 111....1 (10000 digits)
+	{
+		natFromString(strings.Repeat("1", 20000)),
+		natFromString("1" + strings.Repeat("0", 9999) + "1"),
+		natFromString(strings.Repeat("1", 10000)),
+	},
+}
+
+func natFromString(s string) nat {
+	x, _, _, err := nat(nil).scan(strings.NewReader(s), 0, false)
+	if err != nil {
+		panic(err)
+	}
+	return x
+}
+
+func TestSet(t *testing.T) {
+	for _, a := range sumNN {
+		z := nat(nil).set(a.z)
+		if z.cmp(a.z) != 0 {
+			t.Errorf("got z = %v; want %v", z, a.z)
+		}
+	}
+}
+
+func testFunNN(t *testing.T, msg string, f funNN, a argNN) {
+	z := f(nil, a.x, a.y)
+	if z.cmp(a.z) != 0 {
+		t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, z, a.z)
+	}
+}
+
+func TestFunNN(t *testing.T) {
+	for _, a := range sumNN {
+		arg := a
+		testFunNN(t, "add", nat.add, arg)
+
+		arg = argNN{a.z, a.y, a.x}
+		testFunNN(t, "add symmetric", nat.add, arg)
+
+		arg = argNN{a.x, a.z, a.y}
+		testFunNN(t, "sub", nat.sub, arg)
+
+		arg = argNN{a.y, a.z, a.x}
+		testFunNN(t, "sub symmetric", nat.sub, arg)
+	}
+
+	for _, a := range prodNN {
+		arg := a
+		testFunNN(t, "mul", nat.mul, arg)
+
+		arg = argNN{a.z, a.y, a.x}
+		testFunNN(t, "mul symmetric", nat.mul, arg)
+	}
+}
+
+var mulRangesN = []struct {
+	a, b uint64
+	prod string
+}{
+	{0, 0, "0"},
+	{1, 1, "1"},
+	{1, 2, "2"},
+	{1, 3, "6"},
+	{10, 10, "10"},
+	{0, 100, "0"},
+	{0, 1e9, "0"},
+	{1, 0, "1"},                    // empty range
+	{100, 1, "1"},                  // empty range
+	{1, 10, "3628800"},             // 10!
+	{1, 20, "2432902008176640000"}, // 20!
+	{1, 100,
+		"933262154439441526816992388562667004907159682643816214685929" +
+			"638952175999932299156089414639761565182862536979208272237582" +
+			"51185210916864000000000000000000000000", // 100!
+	},
+}
+
+func TestMulRangeN(t *testing.T) {
+	for i, r := range mulRangesN {
+		prod := nat(nil).mulRange(r.a, r.b).decimalString()
+		if prod != r.prod {
+			t.Errorf("#%d: got %s; want %s", i, prod, r.prod)
+		}
+	}
+}
+
+// allocBytes returns the number of bytes allocated by invoking f.
+func allocBytes(f func()) uint64 {
+	var stats runtime.MemStats
+	runtime.ReadMemStats(&stats)
+	t := stats.TotalAlloc
+	f()
+	runtime.ReadMemStats(&stats)
+	return stats.TotalAlloc - t
+}
+
+// TestMulUnbalanced tests that multiplying numbers of different lengths
+// does not cause deep recursion and in turn allocate too much memory.
+// Test case for issue 3807.
+func TestMulUnbalanced(t *testing.T) {
+	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+	x := rndNat(50000)
+	y := rndNat(40)
+	allocSize := allocBytes(func() {
+		nat(nil).mul(x, y)
+	})
+	inputSize := uint64(len(x)+len(y)) * _S
+	if ratio := allocSize / uint64(inputSize); ratio > 10 {
+		t.Errorf("multiplication uses too much memory (%d > %d times the size of inputs)", allocSize, ratio)
+	}
+}
+
+func rndNat(n int) nat {
+	return nat(rndV(n)).norm()
+}
+
+func BenchmarkMul(b *testing.B) {
+	mulx := rndNat(1e4)
+	muly := rndNat(1e4)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		var z nat
+		z.mul(mulx, muly)
+	}
+}
+
+func TestLeadingZeros(t *testing.T) {
+	var x Word = _B >> 1
+	for i := 0; i <= _W; i++ {
+		if int(leadingZeros(x)) != i {
+			t.Errorf("failed at %x: got %d want %d", x, leadingZeros(x), i)
+		}
+		x >>= 1
+	}
+}
+
+type shiftTest struct {
+	in    nat
+	shift uint
+	out   nat
+}
+
+var leftShiftTests = []shiftTest{
+	{nil, 0, nil},
+	{nil, 1, nil},
+	{natOne, 0, natOne},
+	{natOne, 1, natTwo},
+	{nat{1 << (_W - 1)}, 1, nat{0}},
+	{nat{1 << (_W - 1), 0}, 1, nat{0, 1}},
+}
+
+func TestShiftLeft(t *testing.T) {
+	for i, test := range leftShiftTests {
+		var z nat
+		z = z.shl(test.in, test.shift)
+		for j, d := range test.out {
+			if j >= len(z) || z[j] != d {
+				t.Errorf("#%d: got: %v want: %v", i, z, test.out)
+				break
+			}
+		}
+	}
+}
+
+var rightShiftTests = []shiftTest{
+	{nil, 0, nil},
+	{nil, 1, nil},
+	{natOne, 0, natOne},
+	{natOne, 1, nil},
+	{natTwo, 1, natOne},
+	{nat{0, 1}, 1, nat{1 << (_W - 1)}},
+	{nat{2, 1, 1}, 1, nat{1<<(_W-1) + 1, 1 << (_W - 1)}},
+}
+
+func TestShiftRight(t *testing.T) {
+	for i, test := range rightShiftTests {
+		var z nat
+		z = z.shr(test.in, test.shift)
+		for j, d := range test.out {
+			if j >= len(z) || z[j] != d {
+				t.Errorf("#%d: got: %v want: %v", i, z, test.out)
+				break
+			}
+		}
+	}
+}
+
+type modWTest struct {
+	in       string
+	dividend string
+	out      string
+}
+
+var modWTests32 = []modWTest{
+	{"23492635982634928349238759823742", "252341", "220170"},
+}
+
+var modWTests64 = []modWTest{
+	{"6527895462947293856291561095690465243862946", "524326975699234", "375066989628668"},
+}
+
+func runModWTests(t *testing.T, tests []modWTest) {
+	for i, test := range tests {
+		in, _ := new(Int).SetString(test.in, 10)
+		d, _ := new(Int).SetString(test.dividend, 10)
+		out, _ := new(Int).SetString(test.out, 10)
+
+		r := in.abs.modW(d.abs[0])
+		if r != out.abs[0] {
+			t.Errorf("#%d failed: got %d want %s", i, r, out)
+		}
+	}
+}
+
+func TestModW(t *testing.T) {
+	if _W >= 32 {
+		runModWTests(t, modWTests32)
+	}
+	if _W >= 64 {
+		runModWTests(t, modWTests64)
+	}
+}
+
+func TestTrailingZeroBits(t *testing.T) {
+	// test 0 case explicitly
+	if n := trailingZeroBits(0); n != 0 {
+		t.Errorf("got trailingZeroBits(0) = %d; want 0", n)
+	}
+
+	x := Word(1)
+	for i := uint(0); i < _W; i++ {
+		n := trailingZeroBits(x)
+		if n != i {
+			t.Errorf("got trailingZeroBits(%#x) = %d; want %d", x, n, i%_W)
+		}
+		x <<= 1
+	}
+
+	// test 0 case explicitly
+	if n := nat(nil).trailingZeroBits(); n != 0 {
+		t.Errorf("got nat(nil).trailingZeroBits() = %d; want 0", n)
+	}
+
+	y := nat(nil).set(natOne)
+	for i := uint(0); i <= 3*_W; i++ {
+		n := y.trailingZeroBits()
+		if n != i {
+			t.Errorf("got 0x%s.trailingZeroBits() = %d; want %d", y.hexString(), n, i)
+		}
+		y = y.shl(y, 1)
+	}
+}
+
+var expNNTests = []struct {
+	x, y, m string
+	out     string
+}{
+	{"0", "0", "0", "1"},
+	{"0", "0", "1", "0"},
+	{"1", "1", "1", "0"},
+	{"2", "1", "1", "0"},
+	{"2", "2", "1", "0"},
+	{"10", "100000000000", "1", "0"},
+	{"0x8000000000000000", "2", "", "0x40000000000000000000000000000000"},
+	{"0x8000000000000000", "2", "6719", "4944"},
+	{"0x8000000000000000", "3", "6719", "5447"},
+	{"0x8000000000000000", "1000", "6719", "1603"},
+	{"0x8000000000000000", "1000000", "6719", "3199"},
+	{
+		"2938462938472983472983659726349017249287491026512746239764525612965293865296239471239874193284792387498274256129746192347",
+		"298472983472983471903246121093472394872319615612417471234712061",
+		"29834729834729834729347290846729561262544958723956495615629569234729836259263598127342374289365912465901365498236492183464",
+		"23537740700184054162508175125554701713153216681790245129157191391322321508055833908509185839069455749219131480588829346291",
+	},
+}
+
+func TestExpNN(t *testing.T) {
+	for i, test := range expNNTests {
+		x := natFromString(test.x)
+		y := natFromString(test.y)
+		out := natFromString(test.out)
+
+		var m nat
+		if len(test.m) > 0 {
+			m = natFromString(test.m)
+		}
+
+		z := nat(nil).expNN(x, y, m)
+		if z.cmp(out) != 0 {
+			t.Errorf("#%d got %s want %s", i, z.decimalString(), out.decimalString())
+		}
+	}
+}
+
+func ExpHelper(b *testing.B, x, y Word) {
+	var z nat
+	for i := 0; i < b.N; i++ {
+		z.expWW(x, y)
+	}
+}
+
+func BenchmarkExp3Power0x10(b *testing.B)     { ExpHelper(b, 3, 0x10) }
+func BenchmarkExp3Power0x40(b *testing.B)     { ExpHelper(b, 3, 0x40) }
+func BenchmarkExp3Power0x100(b *testing.B)    { ExpHelper(b, 3, 0x100) }
+func BenchmarkExp3Power0x400(b *testing.B)    { ExpHelper(b, 3, 0x400) }
+func BenchmarkExp3Power0x1000(b *testing.B)   { ExpHelper(b, 3, 0x1000) }
+func BenchmarkExp3Power0x4000(b *testing.B)   { ExpHelper(b, 3, 0x4000) }
+func BenchmarkExp3Power0x10000(b *testing.B)  { ExpHelper(b, 3, 0x10000) }
+func BenchmarkExp3Power0x40000(b *testing.B)  { ExpHelper(b, 3, 0x40000) }
+func BenchmarkExp3Power0x100000(b *testing.B) { ExpHelper(b, 3, 0x100000) }
+func BenchmarkExp3Power0x400000(b *testing.B) { ExpHelper(b, 3, 0x400000) }
+
+func fibo(n int) nat {
+	switch n {
+	case 0:
+		return nil
+	case 1:
+		return nat{1}
+	}
+	f0 := fibo(0)
+	f1 := fibo(1)
+	var f2 nat
+	for i := 1; i < n; i++ {
+		f2 = f2.add(f0, f1)
+		f0, f1, f2 = f1, f2, f0
+	}
+	return f1
+}
+
+var fiboNums = []string{
+	"0",
+	"55",
+	"6765",
+	"832040",
+	"102334155",
+	"12586269025",
+	"1548008755920",
+	"190392490709135",
+	"23416728348467685",
+	"2880067194370816120",
+	"354224848179261915075",
+}
+
+func TestFibo(t *testing.T) {
+	for i, want := range fiboNums {
+		n := i * 10
+		got := fibo(n).decimalString()
+		if got != want {
+			t.Errorf("fibo(%d) failed: got %s want %s", n, got, want)
+		}
+	}
+}
+
+func BenchmarkFibo(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		fibo(1e0)
+		fibo(1e1)
+		fibo(1e2)
+		fibo(1e3)
+		fibo(1e4)
+		fibo(1e5)
+	}
+}
+
+var bitTests = []struct {
+	x    string
+	i    uint
+	want uint
+}{
+	{"0", 0, 0},
+	{"0", 1, 0},
+	{"0", 1000, 0},
+
+	{"0x1", 0, 1},
+	{"0x10", 0, 0},
+	{"0x10", 3, 0},
+	{"0x10", 4, 1},
+	{"0x10", 5, 0},
+
+	{"0x8000000000000000", 62, 0},
+	{"0x8000000000000000", 63, 1},
+	{"0x8000000000000000", 64, 0},
+
+	{"0x3" + strings.Repeat("0", 32), 127, 0},
+	{"0x3" + strings.Repeat("0", 32), 128, 1},
+	{"0x3" + strings.Repeat("0", 32), 129, 1},
+	{"0x3" + strings.Repeat("0", 32), 130, 0},
+}
+
+func TestBit(t *testing.T) {
+	for i, test := range bitTests {
+		x := natFromString(test.x)
+		if got := x.bit(test.i); got != test.want {
+			t.Errorf("#%d: %s.bit(%d) = %v; want %v", i, test.x, test.i, got, test.want)
+		}
+	}
+}
+
+var stickyTests = []struct {
+	x    string
+	i    uint
+	want uint
+}{
+	{"0", 0, 0},
+	{"0", 1, 0},
+	{"0", 1000, 0},
+
+	{"0x1", 0, 0},
+	{"0x1", 1, 1},
+
+	{"0x1350", 0, 0},
+	{"0x1350", 4, 0},
+	{"0x1350", 5, 1},
+
+	{"0x8000000000000000", 63, 0},
+	{"0x8000000000000000", 64, 1},
+
+	{"0x1" + strings.Repeat("0", 100), 400, 0},
+	{"0x1" + strings.Repeat("0", 100), 401, 1},
+}
+
+func TestSticky(t *testing.T) {
+	for i, test := range stickyTests {
+		x := natFromString(test.x)
+		if got := x.sticky(test.i); got != test.want {
+			t.Errorf("#%d: %s.sticky(%d) = %v; want %v", i, test.x, test.i, got, test.want)
+		}
+		if test.want == 1 {
+			// all subsequent i's should also return 1
+			for d := uint(1); d <= 3; d++ {
+				if got := x.sticky(test.i + d); got != 1 {
+					t.Errorf("#%d: %s.sticky(%d) = %v; want %v", i, test.x, test.i+d, got, 1)
+				}
+			}
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/big/natconv.go b/src/cmd/compile/internal/big/natconv.go
new file mode 100644
index 0000000..022dcfe
--- /dev/null
+++ b/src/cmd/compile/internal/big/natconv.go
@@ -0,0 +1,495 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements nat-to-string conversion functions.
+
+package big
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"sync"
+)
+
+// MaxBase is the largest number base accepted for string conversions.
+const MaxBase = 'z' - 'a' + 10 + 1
+
+// maxPow returns (b**n, n) such that b**n is the largest power b**n <= _M.
+// For instance maxPow(10) == (1e19, 19) for 19 decimal digits in a 64bit Word.
+// In other words, at most n digits in base b fit into a Word.
+// TODO(gri) replace this with a table, generated at build time.
+func maxPow(b Word) (p Word, n int) {
+	p, n = b, 1 // assuming b <= _M
+	for max := _M / b; p <= max; {
+		// p == b**n && p <= max
+		p *= b
+		n++
+	}
+	// p == b**n && p <= _M
+	return
+}
+
+// pow returns x**n for n > 0, and 1 otherwise.
+func pow(x Word, n int) (p Word) {
+	// n == sum of bi * 2**i, for 0 <= i < imax, and bi is 0 or 1
+	// thus x**n == product of x**(2**i) for all i where bi == 1
+	// (Russian Peasant Method for exponentiation)
+	p = 1
+	for n > 0 {
+		if n&1 != 0 {
+			p *= x
+		}
+		x *= x
+		n >>= 1
+	}
+	return
+}
+
+// scan scans the number corresponding to the longest possible prefix
+// from r representing an unsigned number in a given conversion base.
+// It returns the corresponding natural number res, the actual base b,
+// a digit count, and a read or syntax error err, if any.
+//
+//	number   = [ prefix ] mantissa .
+//	prefix   = "0" [ "x" | "X" | "b" | "B" ] .
+//      mantissa = digits | digits "." [ digits ] | "." digits .
+//	digits   = digit { digit } .
+//	digit    = "0" ... "9" | "a" ... "z" | "A" ... "Z" .
+//
+// Unless fracOk is set, the base argument must be 0 or a value between
+// 2 and MaxBase. If fracOk is set, the base argument must be one of
+// 0, 2, 10, or 16. Providing an invalid base argument leads to a run-
+// time panic.
+//
+// For base 0, the number prefix determines the actual base: A prefix of
+// ``0x'' or ``0X'' selects base 16; if fracOk is not set, the ``0'' prefix
+// selects base 8, and a ``0b'' or ``0B'' prefix selects base 2. Otherwise
+// the selected base is 10 and no prefix is accepted.
+//
+// If fracOk is set, an octal prefix is ignored (a leading ``0'' simply
+// stands for a zero digit), and a period followed by a fractional part
+// is permitted. The result value is computed as if there were no period
+// present; and the count value is used to determine the fractional part.
+//
+// A result digit count > 0 corresponds to the number of (non-prefix) digits
+// parsed. A digit count <= 0 indicates the presence of a period (if fracOk
+// is set, only), and -count is the number of fractional digits found.
+// In this case, the actual value of the scanned number is res * b**count.
+//
+func (z nat) scan(r io.ByteScanner, base int, fracOk bool) (res nat, b, count int, err error) {
+	// reject illegal bases
+	baseOk := base == 0 ||
+		!fracOk && 2 <= base && base <= MaxBase ||
+		fracOk && (base == 2 || base == 10 || base == 16)
+	if !baseOk {
+		panic(fmt.Sprintf("illegal number base %d", base))
+	}
+
+	// one char look-ahead
+	ch, err := r.ReadByte()
+	if err != nil {
+		return
+	}
+
+	// determine actual base
+	b = base
+	if base == 0 {
+		// actual base is 10 unless there's a base prefix
+		b = 10
+		if ch == '0' {
+			count = 1
+			switch ch, err = r.ReadByte(); err {
+			case nil:
+				// possibly one of 0x, 0X, 0b, 0B
+				if !fracOk {
+					b = 8
+				}
+				switch ch {
+				case 'x', 'X':
+					b = 16
+				case 'b', 'B':
+					b = 2
+				}
+				switch b {
+				case 16, 2:
+					count = 0 // prefix is not counted
+					if ch, err = r.ReadByte(); err != nil {
+						// io.EOF is also an error in this case
+						return
+					}
+				case 8:
+					count = 0 // prefix is not counted
+				}
+			case io.EOF:
+				// input is "0"
+				res = z[:0]
+				err = nil
+				return
+			default:
+				// read error
+				return
+			}
+		}
+	}
+
+	// convert string
+	// Algorithm: Collect digits in groups of at most n digits in di
+	// and then use mulAddWW for every such group to add them to the
+	// result.
+	z = z[:0]
+	b1 := Word(b)
+	bn, n := maxPow(b1) // at most n digits in base b1 fit into Word
+	di := Word(0)       // 0 <= di < b1**i < bn
+	i := 0              // 0 <= i < n
+	dp := -1            // position of decimal point
+	for {
+		if fracOk && ch == '.' {
+			fracOk = false
+			dp = count
+			// advance
+			if ch, err = r.ReadByte(); err != nil {
+				if err == io.EOF {
+					err = nil
+					break
+				}
+				return
+			}
+		}
+
+		// convert rune into digit value d1
+		var d1 Word
+		switch {
+		case '0' <= ch && ch <= '9':
+			d1 = Word(ch - '0')
+		case 'a' <= ch && ch <= 'z':
+			d1 = Word(ch - 'a' + 10)
+		case 'A' <= ch && ch <= 'Z':
+			d1 = Word(ch - 'A' + 10)
+		default:
+			d1 = MaxBase + 1
+		}
+		if d1 >= b1 {
+			r.UnreadByte() // ch does not belong to number anymore
+			break
+		}
+		count++
+
+		// collect d1 in di
+		di = di*b1 + d1
+		i++
+
+		// if di is "full", add it to the result
+		if i == n {
+			z = z.mulAddWW(z, bn, di)
+			di = 0
+			i = 0
+		}
+
+		// advance
+		if ch, err = r.ReadByte(); err != nil {
+			if err == io.EOF {
+				err = nil
+				break
+			}
+			return
+		}
+	}
+
+	if count == 0 {
+		// no digits found
+		switch {
+		case base == 0 && b == 8:
+			// there was only the octal prefix 0 (possibly followed by digits > 7);
+			// count as one digit and return base 10, not 8
+			count = 1
+			b = 10
+		case base != 0 || b != 8:
+			// there was neither a mantissa digit nor the octal prefix 0
+			err = errors.New("syntax error scanning number")
+		}
+		return
+	}
+	// count > 0
+
+	// add remaining digits to result
+	if i > 0 {
+		z = z.mulAddWW(z, pow(b1, i), di)
+	}
+	res = z.norm()
+
+	// adjust for fraction, if any
+	if dp >= 0 {
+		// 0 <= dp <= count > 0
+		count = dp - count
+	}
+
+	return
+}
+
+// Character sets for string conversion.
+const (
+	lowercaseDigits = "0123456789abcdefghijklmnopqrstuvwxyz"
+	uppercaseDigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+)
+
+// decimalString returns a decimal representation of x.
+// It calls x.string with the charset "0123456789".
+func (x nat) decimalString() string {
+	return x.string(lowercaseDigits[:10])
+}
+
+// hexString returns a hexadecimal representation of x.
+// It calls x.string with the charset "0123456789abcdef".
+func (x nat) hexString() string {
+	return x.string(lowercaseDigits[:16])
+}
+
+// string converts x to a string using digits from a charset; a digit with
+// value d is represented by charset[d]. The conversion base is determined
+// by len(charset), which must be >= 2 and <= 256.
+func (x nat) string(charset string) string {
+	b := Word(len(charset))
+
+	// special cases
+	switch {
+	case b < 2 || b > 256:
+		panic("invalid character set length")
+	case len(x) == 0:
+		return string(charset[0])
+	}
+
+	// allocate buffer for conversion
+	i := int(float64(x.bitLen())/math.Log2(float64(b))) + 1 // off by one at most
+	s := make([]byte, i)
+
+	// convert power of two and non power of two bases separately
+	if b == b&-b {
+		// shift is base-b digit size in bits
+		shift := trailingZeroBits(b) // shift > 0 because b >= 2
+		mask := Word(1)<<shift - 1
+		w := x[0]
+		nbits := uint(_W) // number of unprocessed bits in w
+
+		// convert less-significant words
+		for k := 1; k < len(x); k++ {
+			// convert full digits
+			for nbits >= shift {
+				i--
+				s[i] = charset[w&mask]
+				w >>= shift
+				nbits -= shift
+			}
+
+			// convert any partial leading digit and advance to next word
+			if nbits == 0 {
+				// no partial digit remaining, just advance
+				w = x[k]
+				nbits = _W
+			} else {
+				// partial digit in current (k-1) and next (k) word
+				w |= x[k] << nbits
+				i--
+				s[i] = charset[w&mask]
+
+				// advance
+				w = x[k] >> (shift - nbits)
+				nbits = _W - (shift - nbits)
+			}
+		}
+
+		// convert digits of most-significant word (omit leading zeros)
+		for nbits >= 0 && w != 0 {
+			i--
+			s[i] = charset[w&mask]
+			w >>= shift
+			nbits -= shift
+		}
+
+	} else {
+		bb, ndigits := maxPow(Word(b))
+
+		// construct table of successive squares of bb*leafSize to use in subdivisions
+		// result (table != nil) <=> (len(x) > leafSize > 0)
+		table := divisors(len(x), b, ndigits, bb)
+
+		// preserve x, create local copy for use by convertWords
+		q := nat(nil).set(x)
+
+		// convert q to string s in base b
+		q.convertWords(s, charset, b, ndigits, bb, table)
+
+		// strip leading zeros
+		// (x != 0; thus s must contain at least one non-zero digit
+		// and the loop will terminate)
+		i = 0
+		for zero := charset[0]; s[i] == zero; {
+			i++
+		}
+	}
+
+	return string(s[i:])
+}
+
+// Convert words of q to base b digits in s. If q is large, it is recursively "split in half"
+// by nat/nat division using tabulated divisors. Otherwise, it is converted iteratively using
+// repeated nat/Word division.
+//
+// The iterative method processes n Words by n divW() calls, each of which visits every Word in the
+// incrementally shortened q for a total of n + (n-1) + (n-2) ... + 2 + 1, or n(n+1)/2 divW()'s.
+// Recursive conversion divides q by its approximate square root, yielding two parts, each half
+// the size of q. Using the iterative method on both halves means 2 * (n/2)(n/2 + 1)/2 divW()'s
+// plus the expensive long div(). Asymptotically, the ratio is favorable at 1/2 the divW()'s, and
+// is made better by splitting the subblocks recursively. Best is to split blocks until one more
+// split would take longer (because of the nat/nat div()) than the twice as many divW()'s of the
+// iterative approach. This threshold is represented by leafSize. Benchmarking of leafSize in the
+// range 2..64 shows that values of 8 and 16 work well, with a 4x speedup at medium lengths and
+// ~30x for 20000 digits. Use nat_test.go's BenchmarkLeafSize tests to optimize leafSize for
+// specific hardware.
+//
+func (q nat) convertWords(s []byte, charset string, b Word, ndigits int, bb Word, table []divisor) {
+	// split larger blocks recursively
+	if table != nil {
+		// len(q) > leafSize > 0
+		var r nat
+		index := len(table) - 1
+		for len(q) > leafSize {
+			// find divisor close to sqrt(q) if possible, but in any case < q
+			maxLength := q.bitLen()     // ~= log2 q, or at of least largest possible q of this bit length
+			minLength := maxLength >> 1 // ~= log2 sqrt(q)
+			for index > 0 && table[index-1].nbits > minLength {
+				index-- // desired
+			}
+			if table[index].nbits >= maxLength && table[index].bbb.cmp(q) >= 0 {
+				index--
+				if index < 0 {
+					panic("internal inconsistency")
+				}
+			}
+
+			// split q into the two digit number (q'*bbb + r) to form independent subblocks
+			q, r = q.div(r, q, table[index].bbb)
+
+			// convert subblocks and collect results in s[:h] and s[h:]
+			h := len(s) - table[index].ndigits
+			r.convertWords(s[h:], charset, b, ndigits, bb, table[0:index])
+			s = s[:h] // == q.convertWords(s, charset, b, ndigits, bb, table[0:index+1])
+		}
+	}
+
+	// having split any large blocks now process the remaining (small) block iteratively
+	i := len(s)
+	var r Word
+	if b == 10 {
+		// hard-coding for 10 here speeds this up by 1.25x (allows for / and % by constants)
+		for len(q) > 0 {
+			// extract least significant, base bb "digit"
+			q, r = q.divW(q, bb)
+			for j := 0; j < ndigits && i > 0; j++ {
+				i--
+				// avoid % computation since r%10 == r - int(r/10)*10;
+				// this appears to be faster for BenchmarkString10000Base10
+				// and smaller strings (but a bit slower for larger ones)
+				t := r / 10
+				s[i] = charset[r-t<<3-t-t] // TODO(gri) replace w/ t*10 once compiler produces better code
+				r = t
+			}
+		}
+	} else {
+		for len(q) > 0 {
+			// extract least significant, base bb "digit"
+			q, r = q.divW(q, bb)
+			for j := 0; j < ndigits && i > 0; j++ {
+				i--
+				s[i] = charset[r%b]
+				r /= b
+			}
+		}
+	}
+
+	// prepend high-order zeroes
+	zero := charset[0]
+	for i > 0 { // while need more leading zeroes
+		i--
+		s[i] = zero
+	}
+}
+
+// Split blocks greater than leafSize Words (or set to 0 to disable recursive conversion)
+// Benchmark and configure leafSize using: go test -bench="Leaf"
+//   8 and 16 effective on 3.0 GHz Xeon "Clovertown" CPU (128 byte cache lines)
+//   8 and 16 effective on 2.66 GHz Core 2 Duo "Penryn" CPU
+var leafSize int = 8 // number of Word-size binary values treat as a monolithic block
+
+type divisor struct {
+	bbb     nat // divisor
+	nbits   int // bit length of divisor (discounting leading zeroes) ~= log2(bbb)
+	ndigits int // digit length of divisor in terms of output base digits
+}
+
+var cacheBase10 struct {
+	sync.Mutex
+	table [64]divisor // cached divisors for base 10
+}
+
+// expWW computes x**y
+func (z nat) expWW(x, y Word) nat {
+	return z.expNN(nat(nil).setWord(x), nat(nil).setWord(y), nil)
+}
+
+// construct table of powers of bb*leafSize to use in subdivisions
+func divisors(m int, b Word, ndigits int, bb Word) []divisor {
+	// only compute table when recursive conversion is enabled and x is large
+	if leafSize == 0 || m <= leafSize {
+		return nil
+	}
+
+	// determine k where (bb**leafSize)**(2**k) >= sqrt(x)
+	k := 1
+	for words := leafSize; words < m>>1 && k < len(cacheBase10.table); words <<= 1 {
+		k++
+	}
+
+	// reuse and extend existing table of divisors or create new table as appropriate
+	var table []divisor // for b == 10, table overlaps with cacheBase10.table
+	if b == 10 {
+		cacheBase10.Lock()
+		table = cacheBase10.table[0:k] // reuse old table for this conversion
+	} else {
+		table = make([]divisor, k) // create new table for this conversion
+	}
+
+	// extend table
+	if table[k-1].ndigits == 0 {
+		// add new entries as needed
+		var larger nat
+		for i := 0; i < k; i++ {
+			if table[i].ndigits == 0 {
+				if i == 0 {
+					table[0].bbb = nat(nil).expWW(bb, Word(leafSize))
+					table[0].ndigits = ndigits * leafSize
+				} else {
+					table[i].bbb = nat(nil).mul(table[i-1].bbb, table[i-1].bbb)
+					table[i].ndigits = 2 * table[i-1].ndigits
+				}
+
+				// optimization: exploit aggregated extra bits in macro blocks
+				larger = nat(nil).set(table[i].bbb)
+				for mulAddVWW(larger, larger, b, 0) == 0 {
+					table[i].bbb = table[i].bbb.set(larger)
+					table[i].ndigits++
+				}
+
+				table[i].nbits = table[i].bbb.bitLen()
+			}
+		}
+	}
+
+	if b == 10 {
+		cacheBase10.Unlock()
+	}
+
+	return table
+}
diff --git a/src/cmd/compile/internal/big/natconv_test.go b/src/cmd/compile/internal/big/natconv_test.go
new file mode 100644
index 0000000..f321fbc
--- /dev/null
+++ b/src/cmd/compile/internal/big/natconv_test.go
@@ -0,0 +1,425 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+	"io"
+	"strings"
+	"testing"
+)
+
+func toString(x nat, charset string) string {
+	base := len(charset)
+
+	// special cases
+	switch {
+	case base < 2:
+		panic("illegal base")
+	case len(x) == 0:
+		return string(charset[0])
+	}
+
+	// allocate buffer for conversion
+	i := x.bitLen()/log2(Word(base)) + 1 // +1: round up
+	s := make([]byte, i)
+
+	// don't destroy x
+	q := nat(nil).set(x)
+
+	// convert
+	for len(q) > 0 {
+		i--
+		var r Word
+		q, r = q.divW(q, Word(base))
+		s[i] = charset[r]
+	}
+
+	return string(s[i:])
+}
+
+var strTests = []struct {
+	x nat    // nat value to be converted
+	c string // conversion charset
+	s string // expected result
+}{
+	{nil, "01", "0"},
+	{nat{1}, "01", "1"},
+	{nat{0xc5}, "01", "11000101"},
+	{nat{03271}, lowercaseDigits[:8], "3271"},
+	{nat{10}, lowercaseDigits[:10], "10"},
+	{nat{1234567890}, uppercaseDigits[:10], "1234567890"},
+	{nat{0xdeadbeef}, lowercaseDigits[:16], "deadbeef"},
+	{nat{0xdeadbeef}, uppercaseDigits[:16], "DEADBEEF"},
+	{nat{0x229be7}, lowercaseDigits[:17], "1a2b3c"},
+	{nat{0x309663e6}, uppercaseDigits[:32], "O9COV6"},
+}
+
+func TestString(t *testing.T) {
+	// test invalid character set explicitly
+	var panicStr string
+	func() {
+		defer func() {
+			panicStr = recover().(string)
+		}()
+		natOne.string("0")
+	}()
+	if panicStr != "invalid character set length" {
+		t.Errorf("expected panic for invalid character set")
+	}
+
+	for _, a := range strTests {
+		s := a.x.string(a.c)
+		if s != a.s {
+			t.Errorf("string%+v\n\tgot s = %s; want %s", a, s, a.s)
+		}
+
+		x, b, _, err := nat(nil).scan(strings.NewReader(a.s), len(a.c), false)
+		if x.cmp(a.x) != 0 {
+			t.Errorf("scan%+v\n\tgot z = %v; want %v", a, x, a.x)
+		}
+		if b != len(a.c) {
+			t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, len(a.c))
+		}
+		if err != nil {
+			t.Errorf("scan%+v\n\tgot error = %s", a, err)
+		}
+	}
+}
+
+var natScanTests = []struct {
+	s     string // string to be scanned
+	base  int    // input base
+	frac  bool   // fraction ok
+	x     nat    // expected nat
+	b     int    // expected base
+	count int    // expected digit count
+	ok    bool   // expected success
+	next  rune   // next character (or 0, if at EOF)
+}{
+	// error: no mantissa
+	{},
+	{s: "?"},
+	{base: 10},
+	{base: 36},
+	{s: "?", base: 10},
+	{s: "0x"},
+	{s: "345", base: 2},
+
+	// error: incorrect use of decimal point
+	{s: ".0"},
+	{s: ".0", base: 10},
+	{s: ".", base: 0},
+	{s: "0x.0"},
+
+	// no errors
+	{"0", 0, false, nil, 10, 1, true, 0},
+	{"0", 10, false, nil, 10, 1, true, 0},
+	{"0", 36, false, nil, 36, 1, true, 0},
+	{"1", 0, false, nat{1}, 10, 1, true, 0},
+	{"1", 10, false, nat{1}, 10, 1, true, 0},
+	{"0 ", 0, false, nil, 10, 1, true, ' '},
+	{"08", 0, false, nil, 10, 1, true, '8'},
+	{"08", 10, false, nat{8}, 10, 2, true, 0},
+	{"018", 0, false, nat{1}, 8, 1, true, '8'},
+	{"0b1", 0, false, nat{1}, 2, 1, true, 0},
+	{"0b11000101", 0, false, nat{0xc5}, 2, 8, true, 0},
+	{"03271", 0, false, nat{03271}, 8, 4, true, 0},
+	{"10ab", 0, false, nat{10}, 10, 2, true, 'a'},
+	{"1234567890", 0, false, nat{1234567890}, 10, 10, true, 0},
+	{"xyz", 36, false, nat{(33*36+34)*36 + 35}, 36, 3, true, 0},
+	{"xyz?", 36, false, nat{(33*36+34)*36 + 35}, 36, 3, true, '?'},
+	{"0x", 16, false, nil, 16, 1, true, 'x'},
+	{"0xdeadbeef", 0, false, nat{0xdeadbeef}, 16, 8, true, 0},
+	{"0XDEADBEEF", 0, false, nat{0xdeadbeef}, 16, 8, true, 0},
+
+	// no errors, decimal point
+	{"0.", 0, false, nil, 10, 1, true, '.'},
+	{"0.", 10, true, nil, 10, 0, true, 0},
+	{"0.1.2", 10, true, nat{1}, 10, -1, true, '.'},
+	{".000", 10, true, nil, 10, -3, true, 0},
+	{"12.3", 10, true, nat{123}, 10, -1, true, 0},
+	{"012.345", 10, true, nat{12345}, 10, -3, true, 0},
+}
+
+func TestScanBase(t *testing.T) {
+	for _, a := range natScanTests {
+		r := strings.NewReader(a.s)
+		x, b, count, err := nat(nil).scan(r, a.base, a.frac)
+		if err == nil && !a.ok {
+			t.Errorf("scan%+v\n\texpected error", a)
+		}
+		if err != nil {
+			if a.ok {
+				t.Errorf("scan%+v\n\tgot error = %s", a, err)
+			}
+			continue
+		}
+		if x.cmp(a.x) != 0 {
+			t.Errorf("scan%+v\n\tgot z = %v; want %v", a, x, a.x)
+		}
+		if b != a.b {
+			t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, a.base)
+		}
+		if count != a.count {
+			t.Errorf("scan%+v\n\tgot count = %d; want %d", a, count, a.count)
+		}
+		next, _, err := r.ReadRune()
+		if err == io.EOF {
+			next = 0
+			err = nil
+		}
+		if err == nil && next != a.next {
+			t.Errorf("scan%+v\n\tgot next = %q; want %q", a, next, a.next)
+		}
+	}
+}
+
+var pi = "3" +
+	"14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651" +
+	"32823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461" +
+	"28475648233786783165271201909145648566923460348610454326648213393607260249141273724587006606315588174881520920" +
+	"96282925409171536436789259036001133053054882046652138414695194151160943305727036575959195309218611738193261179" +
+	"31051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798" +
+	"60943702770539217176293176752384674818467669405132000568127145263560827785771342757789609173637178721468440901" +
+	"22495343014654958537105079227968925892354201995611212902196086403441815981362977477130996051870721134999999837" +
+	"29780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083" +
+	"81420617177669147303598253490428755468731159562863882353787593751957781857780532171226806613001927876611195909" +
+	"21642019893809525720106548586327886593615338182796823030195203530185296899577362259941389124972177528347913151" +
+	"55748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035" +
+	"63707660104710181942955596198946767837449448255379774726847104047534646208046684259069491293313677028989152104" +
+	"75216205696602405803815019351125338243003558764024749647326391419927260426992279678235478163600934172164121992" +
+	"45863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818" +
+	"34797753566369807426542527862551818417574672890977772793800081647060016145249192173217214772350141441973568548" +
+	"16136115735255213347574184946843852332390739414333454776241686251898356948556209921922218427255025425688767179" +
+	"04946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886" +
+	"26945604241965285022210661186306744278622039194945047123713786960956364371917287467764657573962413890865832645" +
+	"99581339047802759009946576407895126946839835259570982582262052248940772671947826848260147699090264013639443745" +
+	"53050682034962524517493996514314298091906592509372216964615157098583874105978859597729754989301617539284681382" +
+	"68683868942774155991855925245953959431049972524680845987273644695848653836736222626099124608051243884390451244" +
+	"13654976278079771569143599770012961608944169486855584840635342207222582848864815845602850601684273945226746767" +
+	"88952521385225499546667278239864565961163548862305774564980355936345681743241125150760694794510965960940252288" +
+	"79710893145669136867228748940560101503308617928680920874760917824938589009714909675985261365549781893129784821" +
+	"68299894872265880485756401427047755513237964145152374623436454285844479526586782105114135473573952311342716610" +
+	"21359695362314429524849371871101457654035902799344037420073105785390621983874478084784896833214457138687519435" +
+	"06430218453191048481005370614680674919278191197939952061419663428754440643745123718192179998391015919561814675" +
+	"14269123974894090718649423196156794520809514655022523160388193014209376213785595663893778708303906979207734672" +
+	"21825625996615014215030680384477345492026054146659252014974428507325186660021324340881907104863317346496514539" +
+	"05796268561005508106658796998163574736384052571459102897064140110971206280439039759515677157700420337869936007" +
+	"23055876317635942187312514712053292819182618612586732157919841484882916447060957527069572209175671167229109816" +
+	"90915280173506712748583222871835209353965725121083579151369882091444210067510334671103141267111369908658516398" +
+	"31501970165151168517143765761835155650884909989859982387345528331635507647918535893226185489632132933089857064" +
+	"20467525907091548141654985946163718027098199430992448895757128289059232332609729971208443357326548938239119325" +
+	"97463667305836041428138830320382490375898524374417029132765618093773444030707469211201913020330380197621101100" +
+	"44929321516084244485963766983895228684783123552658213144957685726243344189303968642624341077322697802807318915" +
+	"44110104468232527162010526522721116603966655730925471105578537634668206531098965269186205647693125705863566201" +
+	"85581007293606598764861179104533488503461136576867532494416680396265797877185560845529654126654085306143444318" +
+	"58676975145661406800700237877659134401712749470420562230538994561314071127000407854733269939081454664645880797" +
+	"27082668306343285878569830523580893306575740679545716377525420211495576158140025012622859413021647155097925923" +
+	"09907965473761255176567513575178296664547791745011299614890304639947132962107340437518957359614589019389713111" +
+	"79042978285647503203198691514028708085990480109412147221317947647772622414254854540332157185306142288137585043" +
+	"06332175182979866223717215916077166925474873898665494945011465406284336639379003976926567214638530673609657120" +
+	"91807638327166416274888800786925602902284721040317211860820419000422966171196377921337575114959501566049631862" +
+	"94726547364252308177036751590673502350728354056704038674351362222477158915049530984448933309634087807693259939" +
+	"78054193414473774418426312986080998886874132604721569516239658645730216315981931951673538129741677294786724229" +
+	"24654366800980676928238280689964004824354037014163149658979409243237896907069779422362508221688957383798623001" +
+	"59377647165122893578601588161755782973523344604281512627203734314653197777416031990665541876397929334419521541" +
+	"34189948544473456738316249934191318148092777710386387734317720754565453220777092120190516609628049092636019759" +
+	"88281613323166636528619326686336062735676303544776280350450777235547105859548702790814356240145171806246436267" +
+	"94561275318134078330336254232783944975382437205835311477119926063813346776879695970309833913077109870408591337"
+
+// Test case for BenchmarkScanPi.
+func TestScanPi(t *testing.T) {
+	var x nat
+	z, _, _, err := x.scan(strings.NewReader(pi), 10, false)
+	if err != nil {
+		t.Errorf("scanning pi: %s", err)
+	}
+	if s := z.decimalString(); s != pi {
+		t.Errorf("scanning pi: got %s", s)
+	}
+}
+
+func TestScanPiParallel(t *testing.T) {
+	const n = 2
+	c := make(chan int)
+	for i := 0; i < n; i++ {
+		go func() {
+			TestScanPi(t)
+			c <- 0
+		}()
+	}
+	for i := 0; i < n; i++ {
+		<-c
+	}
+}
+
+func BenchmarkScanPi(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		var x nat
+		x.scan(strings.NewReader(pi), 10, false)
+	}
+}
+
+func BenchmarkStringPiParallel(b *testing.B) {
+	var x nat
+	x, _, _, _ = x.scan(strings.NewReader(pi), 0, false)
+	if x.decimalString() != pi {
+		panic("benchmark incorrect: conversion failed")
+	}
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			x.decimalString()
+		}
+	})
+}
+
+func BenchmarkScan10Base2(b *testing.B)     { ScanHelper(b, 2, 10, 10) }
+func BenchmarkScan100Base2(b *testing.B)    { ScanHelper(b, 2, 10, 100) }
+func BenchmarkScan1000Base2(b *testing.B)   { ScanHelper(b, 2, 10, 1000) }
+func BenchmarkScan10000Base2(b *testing.B)  { ScanHelper(b, 2, 10, 10000) }
+func BenchmarkScan100000Base2(b *testing.B) { ScanHelper(b, 2, 10, 100000) }
+
+func BenchmarkScan10Base8(b *testing.B)     { ScanHelper(b, 8, 10, 10) }
+func BenchmarkScan100Base8(b *testing.B)    { ScanHelper(b, 8, 10, 100) }
+func BenchmarkScan1000Base8(b *testing.B)   { ScanHelper(b, 8, 10, 1000) }
+func BenchmarkScan10000Base8(b *testing.B)  { ScanHelper(b, 8, 10, 10000) }
+func BenchmarkScan100000Base8(b *testing.B) { ScanHelper(b, 8, 10, 100000) }
+
+func BenchmarkScan10Base10(b *testing.B)     { ScanHelper(b, 10, 10, 10) }
+func BenchmarkScan100Base10(b *testing.B)    { ScanHelper(b, 10, 10, 100) }
+func BenchmarkScan1000Base10(b *testing.B)   { ScanHelper(b, 10, 10, 1000) }
+func BenchmarkScan10000Base10(b *testing.B)  { ScanHelper(b, 10, 10, 10000) }
+func BenchmarkScan100000Base10(b *testing.B) { ScanHelper(b, 10, 10, 100000) }
+
+func BenchmarkScan10Base16(b *testing.B)     { ScanHelper(b, 16, 10, 10) }
+func BenchmarkScan100Base16(b *testing.B)    { ScanHelper(b, 16, 10, 100) }
+func BenchmarkScan1000Base16(b *testing.B)   { ScanHelper(b, 16, 10, 1000) }
+func BenchmarkScan10000Base16(b *testing.B)  { ScanHelper(b, 16, 10, 10000) }
+func BenchmarkScan100000Base16(b *testing.B) { ScanHelper(b, 16, 10, 100000) }
+
+func ScanHelper(b *testing.B, base int, x, y Word) {
+	b.StopTimer()
+	var z nat
+	z = z.expWW(x, y)
+
+	var s string
+	s = z.string(lowercaseDigits[:base])
+	if t := toString(z, lowercaseDigits[:base]); t != s {
+		b.Fatalf("scanning: got %s; want %s", s, t)
+	}
+	b.StartTimer()
+
+	for i := 0; i < b.N; i++ {
+		z.scan(strings.NewReader(s), base, false)
+	}
+}
+
+func BenchmarkString10Base2(b *testing.B)     { StringHelper(b, 2, 10, 10) }
+func BenchmarkString100Base2(b *testing.B)    { StringHelper(b, 2, 10, 100) }
+func BenchmarkString1000Base2(b *testing.B)   { StringHelper(b, 2, 10, 1000) }
+func BenchmarkString10000Base2(b *testing.B)  { StringHelper(b, 2, 10, 10000) }
+func BenchmarkString100000Base2(b *testing.B) { StringHelper(b, 2, 10, 100000) }
+
+func BenchmarkString10Base8(b *testing.B)     { StringHelper(b, 8, 10, 10) }
+func BenchmarkString100Base8(b *testing.B)    { StringHelper(b, 8, 10, 100) }
+func BenchmarkString1000Base8(b *testing.B)   { StringHelper(b, 8, 10, 1000) }
+func BenchmarkString10000Base8(b *testing.B)  { StringHelper(b, 8, 10, 10000) }
+func BenchmarkString100000Base8(b *testing.B) { StringHelper(b, 8, 10, 100000) }
+
+func BenchmarkString10Base10(b *testing.B)     { StringHelper(b, 10, 10, 10) }
+func BenchmarkString100Base10(b *testing.B)    { StringHelper(b, 10, 10, 100) }
+func BenchmarkString1000Base10(b *testing.B)   { StringHelper(b, 10, 10, 1000) }
+func BenchmarkString10000Base10(b *testing.B)  { StringHelper(b, 10, 10, 10000) }
+func BenchmarkString100000Base10(b *testing.B) { StringHelper(b, 10, 10, 100000) }
+
+func BenchmarkString10Base16(b *testing.B)     { StringHelper(b, 16, 10, 10) }
+func BenchmarkString100Base16(b *testing.B)    { StringHelper(b, 16, 10, 100) }
+func BenchmarkString1000Base16(b *testing.B)   { StringHelper(b, 16, 10, 1000) }
+func BenchmarkString10000Base16(b *testing.B)  { StringHelper(b, 16, 10, 10000) }
+func BenchmarkString100000Base16(b *testing.B) { StringHelper(b, 16, 10, 100000) }
+
+func StringHelper(b *testing.B, base int, x, y Word) {
+	b.StopTimer()
+	var z nat
+	z = z.expWW(x, y)
+	z.string(lowercaseDigits[:base]) // warm divisor cache
+	b.StartTimer()
+
+	for i := 0; i < b.N; i++ {
+		_ = z.string(lowercaseDigits[:base])
+	}
+}
+
+func BenchmarkLeafSize0(b *testing.B)  { LeafSizeHelper(b, 10, 0) } // test without splitting
+func BenchmarkLeafSize1(b *testing.B)  { LeafSizeHelper(b, 10, 1) }
+func BenchmarkLeafSize2(b *testing.B)  { LeafSizeHelper(b, 10, 2) }
+func BenchmarkLeafSize3(b *testing.B)  { LeafSizeHelper(b, 10, 3) }
+func BenchmarkLeafSize4(b *testing.B)  { LeafSizeHelper(b, 10, 4) }
+func BenchmarkLeafSize5(b *testing.B)  { LeafSizeHelper(b, 10, 5) }
+func BenchmarkLeafSize6(b *testing.B)  { LeafSizeHelper(b, 10, 6) }
+func BenchmarkLeafSize7(b *testing.B)  { LeafSizeHelper(b, 10, 7) }
+func BenchmarkLeafSize8(b *testing.B)  { LeafSizeHelper(b, 10, 8) }
+func BenchmarkLeafSize9(b *testing.B)  { LeafSizeHelper(b, 10, 9) }
+func BenchmarkLeafSize10(b *testing.B) { LeafSizeHelper(b, 10, 10) }
+func BenchmarkLeafSize11(b *testing.B) { LeafSizeHelper(b, 10, 11) }
+func BenchmarkLeafSize12(b *testing.B) { LeafSizeHelper(b, 10, 12) }
+func BenchmarkLeafSize13(b *testing.B) { LeafSizeHelper(b, 10, 13) }
+func BenchmarkLeafSize14(b *testing.B) { LeafSizeHelper(b, 10, 14) }
+func BenchmarkLeafSize15(b *testing.B) { LeafSizeHelper(b, 10, 15) }
+func BenchmarkLeafSize16(b *testing.B) { LeafSizeHelper(b, 10, 16) }
+func BenchmarkLeafSize32(b *testing.B) { LeafSizeHelper(b, 10, 32) } // try some large lengths
+func BenchmarkLeafSize64(b *testing.B) { LeafSizeHelper(b, 10, 64) }
+
+func LeafSizeHelper(b *testing.B, base Word, size int) {
+	b.StopTimer()
+	originalLeafSize := leafSize
+	resetTable(cacheBase10.table[:])
+	leafSize = size
+	b.StartTimer()
+
+	for d := 1; d <= 10000; d *= 10 {
+		b.StopTimer()
+		var z nat
+		z = z.expWW(base, Word(d))           // build target number
+		_ = z.string(lowercaseDigits[:base]) // warm divisor cache
+		b.StartTimer()
+
+		for i := 0; i < b.N; i++ {
+			_ = z.string(lowercaseDigits[:base])
+		}
+	}
+
+	b.StopTimer()
+	resetTable(cacheBase10.table[:])
+	leafSize = originalLeafSize
+	b.StartTimer()
+}
+
+func resetTable(table []divisor) {
+	if table != nil && table[0].bbb != nil {
+		for i := 0; i < len(table); i++ {
+			table[i].bbb = nil
+			table[i].nbits = 0
+			table[i].ndigits = 0
+		}
+	}
+}
+
+func TestStringPowers(t *testing.T) {
+	var b, p Word
+	for b = 2; b <= 16; b++ {
+		for p = 0; p <= 512; p++ {
+			x := nat(nil).expWW(b, p)
+			xs := x.string(lowercaseDigits[:b])
+			xs2 := toString(x, lowercaseDigits[:b])
+			if xs != xs2 {
+				t.Errorf("failed at %d ** %d in base %d: %s != %s", b, p, b, xs, xs2)
+			}
+		}
+		if b >= 3 && testing.Short() {
+			break
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/big/rat.go b/src/cmd/compile/internal/big/rat.go
new file mode 100644
index 0000000..fb16f18
--- /dev/null
+++ b/src/cmd/compile/internal/big/rat.go
@@ -0,0 +1,570 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements multi-precision rational numbers.
+
+package big
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+)
+
+// A Rat represents a quotient a/b of arbitrary precision.
+// The zero value for a Rat represents the value 0.
+type Rat struct {
+	// To make zero values for Rat work w/o initialization,
+	// a zero value of b (len(b) == 0) acts like b == 1.
+	// a.neg determines the sign of the Rat, b.neg is ignored.
+	a, b Int
+}
+
+// NewRat creates a new Rat with numerator a and denominator b.
+func NewRat(a, b int64) *Rat {
+	return new(Rat).SetFrac64(a, b)
+}
+
+// SetFloat64 sets z to exactly f and returns z.
+// If f is not finite, SetFloat returns nil.
+func (z *Rat) SetFloat64(f float64) *Rat {
+	const expMask = 1<<11 - 1
+	bits := math.Float64bits(f)
+	mantissa := bits & (1<<52 - 1)
+	exp := int((bits >> 52) & expMask)
+	switch exp {
+	case expMask: // non-finite
+		return nil
+	case 0: // denormal
+		exp -= 1022
+	default: // normal
+		mantissa |= 1 << 52
+		exp -= 1023
+	}
+
+	shift := 52 - exp
+
+	// Optimization (?): partially pre-normalise.
+	for mantissa&1 == 0 && shift > 0 {
+		mantissa >>= 1
+		shift--
+	}
+
+	z.a.SetUint64(mantissa)
+	z.a.neg = f < 0
+	z.b.Set(intOne)
+	if shift > 0 {
+		z.b.Lsh(&z.b, uint(shift))
+	} else {
+		z.a.Lsh(&z.a, uint(-shift))
+	}
+	return z.norm()
+}
+
+// quotToFloat32 returns the non-negative float32 value
+// nearest to the quotient a/b, using round-to-even in
+// halfway cases.  It does not mutate its arguments.
+// Preconditions: b is non-zero; a and b have no common factors.
+func quotToFloat32(a, b nat) (f float32, exact bool) {
+	const (
+		// float size in bits
+		Fsize = 32
+
+		// mantissa
+		Msize  = 23
+		Msize1 = Msize + 1 // incl. implicit 1
+		Msize2 = Msize1 + 1
+
+		// exponent
+		Esize = Fsize - Msize1
+		Ebias = 1<<(Esize-1) - 1
+		Emin  = 1 - Ebias
+		Emax  = Ebias
+	)
+
+	// TODO(adonovan): specialize common degenerate cases: 1.0, integers.
+	alen := a.bitLen()
+	if alen == 0 {
+		return 0, true
+	}
+	blen := b.bitLen()
+	if blen == 0 {
+		panic("division by zero")
+	}
+
+	// 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1)
+	// (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B).
+	// This is 2 or 3 more than the float32 mantissa field width of Msize:
+	// - the optional extra bit is shifted away in step 3 below.
+	// - the high-order 1 is omitted in "normal" representation;
+	// - the low-order 1 will be used during rounding then discarded.
+	exp := alen - blen
+	var a2, b2 nat
+	a2 = a2.set(a)
+	b2 = b2.set(b)
+	if shift := Msize2 - exp; shift > 0 {
+		a2 = a2.shl(a2, uint(shift))
+	} else if shift < 0 {
+		b2 = b2.shl(b2, uint(-shift))
+	}
+
+	// 2. Compute quotient and remainder (q, r).  NB: due to the
+	// extra shift, the low-order bit of q is logically the
+	// high-order bit of r.
+	var q nat
+	q, r := q.div(a2, a2, b2) // (recycle a2)
+	mantissa := low32(q)
+	haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half
+
+	// 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1
+	// (in effect---we accomplish this incrementally).
+	if mantissa>>Msize2 == 1 {
+		if mantissa&1 == 1 {
+			haveRem = true
+		}
+		mantissa >>= 1
+		exp++
+	}
+	if mantissa>>Msize1 != 1 {
+		panic(fmt.Sprintf("expected exactly %d bits of result", Msize2))
+	}
+
+	// 4. Rounding.
+	if Emin-Msize <= exp && exp <= Emin {
+		// Denormal case; lose 'shift' bits of precision.
+		shift := uint(Emin - (exp - 1)) // [1..Esize1)
+		lostbits := mantissa & (1<<shift - 1)
+		haveRem = haveRem || lostbits != 0
+		mantissa >>= shift
+		exp = 2 - Ebias // == exp + shift
+	}
+	// Round q using round-half-to-even.
+	exact = !haveRem
+	if mantissa&1 != 0 {
+		exact = false
+		if haveRem || mantissa&2 != 0 {
+			if mantissa++; mantissa >= 1<<Msize2 {
+				// Complete rollover 11...1 => 100...0, so shift is safe
+				mantissa >>= 1
+				exp++
+			}
+		}
+	}
+	mantissa >>= 1 // discard rounding bit.  Mantissa now scaled by 1<<Msize1.
+
+	f = float32(math.Ldexp(float64(mantissa), exp-Msize1))
+	if math.IsInf(float64(f), 0) {
+		exact = false
+	}
+	return
+}
+
+// quotToFloat64 returns the non-negative float64 value
+// nearest to the quotient a/b, using round-to-even in
+// halfway cases.  It does not mutate its arguments.
+// Preconditions: b is non-zero; a and b have no common factors.
+func quotToFloat64(a, b nat) (f float64, exact bool) {
+	const (
+		// float size in bits
+		Fsize = 64
+
+		// mantissa
+		Msize  = 52
+		Msize1 = Msize + 1 // incl. implicit 1
+		Msize2 = Msize1 + 1
+
+		// exponent
+		Esize = Fsize - Msize1
+		Ebias = 1<<(Esize-1) - 1
+		Emin  = 1 - Ebias
+		Emax  = Ebias
+	)
+
+	// TODO(adonovan): specialize common degenerate cases: 1.0, integers.
+	alen := a.bitLen()
+	if alen == 0 {
+		return 0, true
+	}
+	blen := b.bitLen()
+	if blen == 0 {
+		panic("division by zero")
+	}
+
+	// 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1)
+	// (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B).
+	// This is 2 or 3 more than the float64 mantissa field width of Msize:
+	// - the optional extra bit is shifted away in step 3 below.
+	// - the high-order 1 is omitted in "normal" representation;
+	// - the low-order 1 will be used during rounding then discarded.
+	exp := alen - blen
+	var a2, b2 nat
+	a2 = a2.set(a)
+	b2 = b2.set(b)
+	if shift := Msize2 - exp; shift > 0 {
+		a2 = a2.shl(a2, uint(shift))
+	} else if shift < 0 {
+		b2 = b2.shl(b2, uint(-shift))
+	}
+
+	// 2. Compute quotient and remainder (q, r).  NB: due to the
+	// extra shift, the low-order bit of q is logically the
+	// high-order bit of r.
+	var q nat
+	q, r := q.div(a2, a2, b2) // (recycle a2)
+	mantissa := low64(q)
+	haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half
+
+	// 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1
+	// (in effect---we accomplish this incrementally).
+	if mantissa>>Msize2 == 1 {
+		if mantissa&1 == 1 {
+			haveRem = true
+		}
+		mantissa >>= 1
+		exp++
+	}
+	if mantissa>>Msize1 != 1 {
+		panic(fmt.Sprintf("expected exactly %d bits of result", Msize2))
+	}
+
+	// 4. Rounding.
+	if Emin-Msize <= exp && exp <= Emin {
+		// Denormal case; lose 'shift' bits of precision.
+		shift := uint(Emin - (exp - 1)) // [1..Esize1)
+		lostbits := mantissa & (1<<shift - 1)
+		haveRem = haveRem || lostbits != 0
+		mantissa >>= shift
+		exp = 2 - Ebias // == exp + shift
+	}
+	// Round q using round-half-to-even.
+	exact = !haveRem
+	if mantissa&1 != 0 {
+		exact = false
+		if haveRem || mantissa&2 != 0 {
+			if mantissa++; mantissa >= 1<<Msize2 {
+				// Complete rollover 11...1 => 100...0, so shift is safe
+				mantissa >>= 1
+				exp++
+			}
+		}
+	}
+	mantissa >>= 1 // discard rounding bit.  Mantissa now scaled by 1<<Msize1.
+
+	f = math.Ldexp(float64(mantissa), exp-Msize1)
+	if math.IsInf(f, 0) {
+		exact = false
+	}
+	return
+}
+
+// Float32 returns the nearest float32 value for x and a bool indicating
+// whether f represents x exactly. If the magnitude of x is too large to
+// be represented by a float32, f is an infinity and exact is false.
+// The sign of f always matches the sign of x, even if f == 0.
+func (x *Rat) Float32() (f float32, exact bool) {
+	b := x.b.abs
+	if len(b) == 0 {
+		b = b.set(natOne) // materialize denominator
+	}
+	f, exact = quotToFloat32(x.a.abs, b)
+	if x.a.neg {
+		f = -f
+	}
+	return
+}
+
+// Float64 returns the nearest float64 value for x and a bool indicating
+// whether f represents x exactly. If the magnitude of x is too large to
+// be represented by a float64, f is an infinity and exact is false.
+// The sign of f always matches the sign of x, even if f == 0.
+func (x *Rat) Float64() (f float64, exact bool) {
+	b := x.b.abs
+	if len(b) == 0 {
+		b = b.set(natOne) // materialize denominator
+	}
+	f, exact = quotToFloat64(x.a.abs, b)
+	if x.a.neg {
+		f = -f
+	}
+	return
+}
+
+// SetFrac sets z to a/b and returns z.
+func (z *Rat) SetFrac(a, b *Int) *Rat {
+	z.a.neg = a.neg != b.neg
+	babs := b.abs
+	if len(babs) == 0 {
+		panic("division by zero")
+	}
+	if &z.a == b || alias(z.a.abs, babs) {
+		babs = nat(nil).set(babs) // make a copy
+	}
+	z.a.abs = z.a.abs.set(a.abs)
+	z.b.abs = z.b.abs.set(babs)
+	return z.norm()
+}
+
+// SetFrac64 sets z to a/b and returns z.
+func (z *Rat) SetFrac64(a, b int64) *Rat {
+	z.a.SetInt64(a)
+	if b == 0 {
+		panic("division by zero")
+	}
+	if b < 0 {
+		b = -b
+		z.a.neg = !z.a.neg
+	}
+	z.b.abs = z.b.abs.setUint64(uint64(b))
+	return z.norm()
+}
+
+// SetInt sets z to x (by making a copy of x) and returns z.
+func (z *Rat) SetInt(x *Int) *Rat {
+	z.a.Set(x)
+	z.b.abs = z.b.abs[:0]
+	return z
+}
+
+// SetInt64 sets z to x and returns z.
+func (z *Rat) SetInt64(x int64) *Rat {
+	z.a.SetInt64(x)
+	z.b.abs = z.b.abs[:0]
+	return z
+}
+
+// Set sets z to x (by making a copy of x) and returns z.
+func (z *Rat) Set(x *Rat) *Rat {
+	if z != x {
+		z.a.Set(&x.a)
+		z.b.Set(&x.b)
+	}
+	return z
+}
+
+// Abs sets z to |x| (the absolute value of x) and returns z.
+func (z *Rat) Abs(x *Rat) *Rat {
+	z.Set(x)
+	z.a.neg = false
+	return z
+}
+
+// Neg sets z to -x and returns z.
+func (z *Rat) Neg(x *Rat) *Rat {
+	z.Set(x)
+	z.a.neg = len(z.a.abs) > 0 && !z.a.neg // 0 has no sign
+	return z
+}
+
+// Inv sets z to 1/x and returns z.
+func (z *Rat) Inv(x *Rat) *Rat {
+	if len(x.a.abs) == 0 {
+		panic("division by zero")
+	}
+	z.Set(x)
+	a := z.b.abs
+	if len(a) == 0 {
+		a = a.set(natOne) // materialize numerator
+	}
+	b := z.a.abs
+	if b.cmp(natOne) == 0 {
+		b = b[:0] // normalize denominator
+	}
+	z.a.abs, z.b.abs = a, b // sign doesn't change
+	return z
+}
+
+// Sign returns:
+//
+//	-1 if x <  0
+//	 0 if x == 0
+//	+1 if x >  0
+//
+func (x *Rat) Sign() int {
+	return x.a.Sign()
+}
+
+// IsInt reports whether the denominator of x is 1.
+func (x *Rat) IsInt() bool {
+	return len(x.b.abs) == 0 || x.b.abs.cmp(natOne) == 0
+}
+
+// Num returns the numerator of x; it may be <= 0.
+// The result is a reference to x's numerator; it
+// may change if a new value is assigned to x, and vice versa.
+// The sign of the numerator corresponds to the sign of x.
+func (x *Rat) Num() *Int {
+	return &x.a
+}
+
+// Denom returns the denominator of x; it is always > 0.
+// The result is a reference to x's denominator; it
+// may change if a new value is assigned to x, and vice versa.
+func (x *Rat) Denom() *Int {
+	x.b.neg = false // the result is always >= 0
+	if len(x.b.abs) == 0 {
+		x.b.abs = x.b.abs.set(natOne) // materialize denominator
+	}
+	return &x.b
+}
+
+func (z *Rat) norm() *Rat {
+	switch {
+	case len(z.a.abs) == 0:
+		// z == 0 - normalize sign and denominator
+		z.a.neg = false
+		z.b.abs = z.b.abs[:0]
+	case len(z.b.abs) == 0:
+		// z is normalized int - nothing to do
+	case z.b.abs.cmp(natOne) == 0:
+		// z is int - normalize denominator
+		z.b.abs = z.b.abs[:0]
+	default:
+		neg := z.a.neg
+		z.a.neg = false
+		z.b.neg = false
+		if f := NewInt(0).binaryGCD(&z.a, &z.b); f.Cmp(intOne) != 0 {
+			z.a.abs, _ = z.a.abs.div(nil, z.a.abs, f.abs)
+			z.b.abs, _ = z.b.abs.div(nil, z.b.abs, f.abs)
+			if z.b.abs.cmp(natOne) == 0 {
+				// z is int - normalize denominator
+				z.b.abs = z.b.abs[:0]
+			}
+		}
+		z.a.neg = neg
+	}
+	return z
+}
+
+// mulDenom sets z to the denominator product x*y (by taking into
+// account that 0 values for x or y must be interpreted as 1) and
+// returns z.
+func mulDenom(z, x, y nat) nat {
+	switch {
+	case len(x) == 0:
+		return z.set(y)
+	case len(y) == 0:
+		return z.set(x)
+	}
+	return z.mul(x, y)
+}
+
+// scaleDenom computes x*f.
+// If f == 0 (zero value of denominator), the result is (a copy of) x.
+func scaleDenom(x *Int, f nat) *Int {
+	var z Int
+	if len(f) == 0 {
+		return z.Set(x)
+	}
+	z.abs = z.abs.mul(x.abs, f)
+	z.neg = x.neg
+	return &z
+}
+
+// Cmp compares x and y and returns:
+//
+//   -1 if x <  y
+//    0 if x == y
+//   +1 if x >  y
+//
+func (x *Rat) Cmp(y *Rat) int {
+	return scaleDenom(&x.a, y.b.abs).Cmp(scaleDenom(&y.a, x.b.abs))
+}
+
+// Add sets z to the sum x+y and returns z.
+func (z *Rat) Add(x, y *Rat) *Rat {
+	a1 := scaleDenom(&x.a, y.b.abs)
+	a2 := scaleDenom(&y.a, x.b.abs)
+	z.a.Add(a1, a2)
+	z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
+	return z.norm()
+}
+
+// Sub sets z to the difference x-y and returns z.
+func (z *Rat) Sub(x, y *Rat) *Rat {
+	a1 := scaleDenom(&x.a, y.b.abs)
+	a2 := scaleDenom(&y.a, x.b.abs)
+	z.a.Sub(a1, a2)
+	z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
+	return z.norm()
+}
+
+// Mul sets z to the product x*y and returns z.
+func (z *Rat) Mul(x, y *Rat) *Rat {
+	z.a.Mul(&x.a, &y.a)
+	z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
+	return z.norm()
+}
+
+// Quo sets z to the quotient x/y and returns z.
+// If y == 0, a division-by-zero run-time panic occurs.
+func (z *Rat) Quo(x, y *Rat) *Rat {
+	if len(y.a.abs) == 0 {
+		panic("division by zero")
+	}
+	a := scaleDenom(&x.a, y.b.abs)
+	b := scaleDenom(&y.a, x.b.abs)
+	z.a.abs = a.abs
+	z.b.abs = b.abs
+	z.a.neg = a.neg != b.neg
+	return z.norm()
+}
+
+// Gob codec version. Permits backward-compatible changes to the encoding.
+const ratGobVersion byte = 1
+
+// GobEncode implements the gob.GobEncoder interface.
+func (x *Rat) GobEncode() ([]byte, error) {
+	if x == nil {
+		return nil, nil
+	}
+	buf := make([]byte, 1+4+(len(x.a.abs)+len(x.b.abs))*_S) // extra bytes for version and sign bit (1), and numerator length (4)
+	i := x.b.abs.bytes(buf)
+	j := x.a.abs.bytes(buf[:i])
+	n := i - j
+	if int(uint32(n)) != n {
+		// this should never happen
+		return nil, errors.New("Rat.GobEncode: numerator too large")
+	}
+	binary.BigEndian.PutUint32(buf[j-4:j], uint32(n))
+	j -= 1 + 4
+	b := ratGobVersion << 1 // make space for sign bit
+	if x.a.neg {
+		b |= 1
+	}
+	buf[j] = b
+	return buf[j:], nil
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (z *Rat) GobDecode(buf []byte) error {
+	if len(buf) == 0 {
+		// Other side sent a nil or default value.
+		*z = Rat{}
+		return nil
+	}
+	b := buf[0]
+	if b>>1 != ratGobVersion {
+		return fmt.Errorf("Rat.GobDecode: encoding version %d not supported", b>>1)
+	}
+	const j = 1 + 4
+	i := j + binary.BigEndian.Uint32(buf[j-4:j])
+	z.a.neg = b&1 != 0
+	z.a.abs = z.a.abs.setBytes(buf[j:i])
+	z.b.abs = z.b.abs.setBytes(buf[i:])
+	return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (r *Rat) MarshalText() (text []byte, err error) {
+	return []byte(r.RatString()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (r *Rat) UnmarshalText(text []byte) error {
+	if _, ok := r.SetString(string(text)); !ok {
+		return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Rat", text)
+	}
+	return nil
+}
diff --git a/src/cmd/compile/internal/big/rat_test.go b/src/cmd/compile/internal/big/rat_test.go
new file mode 100644
index 0000000..012d0c4
--- /dev/null
+++ b/src/cmd/compile/internal/big/rat_test.go
@@ -0,0 +1,736 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+	"bytes"
+	"encoding/gob"
+	"encoding/json"
+	"encoding/xml"
+	"math"
+	"testing"
+)
+
+func TestZeroRat(t *testing.T) {
+	var x, y, z Rat
+	y.SetFrac64(0, 42)
+
+	if x.Cmp(&y) != 0 {
+		t.Errorf("x and y should be both equal and zero")
+	}
+
+	if s := x.String(); s != "0/1" {
+		t.Errorf("got x = %s, want 0/1", s)
+	}
+
+	if s := x.RatString(); s != "0" {
+		t.Errorf("got x = %s, want 0", s)
+	}
+
+	z.Add(&x, &y)
+	if s := z.RatString(); s != "0" {
+		t.Errorf("got x+y = %s, want 0", s)
+	}
+
+	z.Sub(&x, &y)
+	if s := z.RatString(); s != "0" {
+		t.Errorf("got x-y = %s, want 0", s)
+	}
+
+	z.Mul(&x, &y)
+	if s := z.RatString(); s != "0" {
+		t.Errorf("got x*y = %s, want 0", s)
+	}
+
+	// check for division by zero
+	defer func() {
+		if s := recover(); s == nil || s.(string) != "division by zero" {
+			panic(s)
+		}
+	}()
+	z.Quo(&x, &y)
+}
+
+func TestRatSign(t *testing.T) {
+	zero := NewRat(0, 1)
+	for _, a := range setStringTests {
+		x, ok := new(Rat).SetString(a.in)
+		if !ok {
+			continue
+		}
+		s := x.Sign()
+		e := x.Cmp(zero)
+		if s != e {
+			t.Errorf("got %d; want %d for z = %v", s, e, &x)
+		}
+	}
+}
+
+var ratCmpTests = []struct {
+	rat1, rat2 string
+	out        int
+}{
+	{"0", "0/1", 0},
+	{"1/1", "1", 0},
+	{"-1", "-2/2", 0},
+	{"1", "0", 1},
+	{"0/1", "1/1", -1},
+	{"-5/1434770811533343057144", "-5/1434770811533343057145", -1},
+	{"49832350382626108453/8964749413", "49832350382626108454/8964749413", -1},
+	{"-37414950961700930/7204075375675961", "37414950961700930/7204075375675961", -1},
+	{"37414950961700930/7204075375675961", "74829901923401860/14408150751351922", 0},
+}
+
+func TestRatCmp(t *testing.T) {
+	for i, test := range ratCmpTests {
+		x, _ := new(Rat).SetString(test.rat1)
+		y, _ := new(Rat).SetString(test.rat2)
+
+		out := x.Cmp(y)
+		if out != test.out {
+			t.Errorf("#%d got out = %v; want %v", i, out, test.out)
+		}
+	}
+}
+
+func TestIsInt(t *testing.T) {
+	one := NewInt(1)
+	for _, a := range setStringTests {
+		x, ok := new(Rat).SetString(a.in)
+		if !ok {
+			continue
+		}
+		i := x.IsInt()
+		e := x.Denom().Cmp(one) == 0
+		if i != e {
+			t.Errorf("got IsInt(%v) == %v; want %v", x, i, e)
+		}
+	}
+}
+
+func TestRatAbs(t *testing.T) {
+	zero := new(Rat)
+	for _, a := range setStringTests {
+		x, ok := new(Rat).SetString(a.in)
+		if !ok {
+			continue
+		}
+		e := new(Rat).Set(x)
+		if e.Cmp(zero) < 0 {
+			e.Sub(zero, e)
+		}
+		z := new(Rat).Abs(x)
+		if z.Cmp(e) != 0 {
+			t.Errorf("got Abs(%v) = %v; want %v", x, z, e)
+		}
+	}
+}
+
+func TestRatNeg(t *testing.T) {
+	zero := new(Rat)
+	for _, a := range setStringTests {
+		x, ok := new(Rat).SetString(a.in)
+		if !ok {
+			continue
+		}
+		e := new(Rat).Sub(zero, x)
+		z := new(Rat).Neg(x)
+		if z.Cmp(e) != 0 {
+			t.Errorf("got Neg(%v) = %v; want %v", x, z, e)
+		}
+	}
+}
+
+func TestRatInv(t *testing.T) {
+	zero := new(Rat)
+	for _, a := range setStringTests {
+		x, ok := new(Rat).SetString(a.in)
+		if !ok {
+			continue
+		}
+		if x.Cmp(zero) == 0 {
+			continue // avoid division by zero
+		}
+		e := new(Rat).SetFrac(x.Denom(), x.Num())
+		z := new(Rat).Inv(x)
+		if z.Cmp(e) != 0 {
+			t.Errorf("got Inv(%v) = %v; want %v", x, z, e)
+		}
+	}
+}
+
+type ratBinFun func(z, x, y *Rat) *Rat
+type ratBinArg struct {
+	x, y, z string
+}
+
+func testRatBin(t *testing.T, i int, name string, f ratBinFun, a ratBinArg) {
+	x, _ := new(Rat).SetString(a.x)
+	y, _ := new(Rat).SetString(a.y)
+	z, _ := new(Rat).SetString(a.z)
+	out := f(new(Rat), x, y)
+
+	if out.Cmp(z) != 0 {
+		t.Errorf("%s #%d got %s want %s", name, i, out, z)
+	}
+}
+
+var ratBinTests = []struct {
+	x, y      string
+	sum, prod string
+}{
+	{"0", "0", "0", "0"},
+	{"0", "1", "1", "0"},
+	{"-1", "0", "-1", "0"},
+	{"-1", "1", "0", "-1"},
+	{"1", "1", "2", "1"},
+	{"1/2", "1/2", "1", "1/4"},
+	{"1/4", "1/3", "7/12", "1/12"},
+	{"2/5", "-14/3", "-64/15", "-28/15"},
+	{"4707/49292519774798173060", "-3367/70976135186689855734", "84058377121001851123459/1749296273614329067191168098769082663020", "-1760941/388732505247628681598037355282018369560"},
+	{"-61204110018146728334/3", "-31052192278051565633/2", "-215564796870448153567/6", "950260896245257153059642991192710872711/3"},
+	{"-854857841473707320655/4237645934602118692642972629634714039", "-18/31750379913563777419", "-27/133467566250814981", "15387441146526731771790/134546868362786310073779084329032722548987800600710485341"},
+	{"618575745270541348005638912139/19198433543745179392300736", "-19948846211000086/637313996471", "27674141753240653/30123979153216", "-6169936206128396568797607742807090270137721977/6117715203873571641674006593837351328"},
+	{"-3/26206484091896184128", "5/2848423294177090248", "15310893822118706237/9330894968229805033368778458685147968", "-5/24882386581946146755650075889827061248"},
+	{"26946729/330400702820", "41563965/225583428284", "1238218672302860271/4658307703098666660055", "224002580204097/14906584649915733312176"},
+	{"-8259900599013409474/7", "-84829337473700364773/56707961321161574960", "-468402123685491748914621885145127724451/396955729248131024720", "350340947706464153265156004876107029701/198477864624065512360"},
+	{"575775209696864/1320203974639986246357", "29/712593081308", "410331716733912717985762465/940768218243776489278275419794956", "808/45524274987585732633"},
+	{"1786597389946320496771/2066653520653241", "6269770/1992362624741777", "3559549865190272133656109052308126637/4117523232840525481453983149257", "8967230/3296219033"},
+	{"-36459180403360509753/32150500941194292113930", "9381566963714/9633539", "301622077145533298008420642898530153/309723104686531919656937098270", "-3784609207827/3426986245"},
+}
+
+func TestRatBin(t *testing.T) {
+	for i, test := range ratBinTests {
+		arg := ratBinArg{test.x, test.y, test.sum}
+		testRatBin(t, i, "Add", (*Rat).Add, arg)
+
+		arg = ratBinArg{test.y, test.x, test.sum}
+		testRatBin(t, i, "Add symmetric", (*Rat).Add, arg)
+
+		arg = ratBinArg{test.sum, test.x, test.y}
+		testRatBin(t, i, "Sub", (*Rat).Sub, arg)
+
+		arg = ratBinArg{test.sum, test.y, test.x}
+		testRatBin(t, i, "Sub symmetric", (*Rat).Sub, arg)
+
+		arg = ratBinArg{test.x, test.y, test.prod}
+		testRatBin(t, i, "Mul", (*Rat).Mul, arg)
+
+		arg = ratBinArg{test.y, test.x, test.prod}
+		testRatBin(t, i, "Mul symmetric", (*Rat).Mul, arg)
+
+		if test.x != "0" {
+			arg = ratBinArg{test.prod, test.x, test.y}
+			testRatBin(t, i, "Quo", (*Rat).Quo, arg)
+		}
+
+		if test.y != "0" {
+			arg = ratBinArg{test.prod, test.y, test.x}
+			testRatBin(t, i, "Quo symmetric", (*Rat).Quo, arg)
+		}
+	}
+}
+
+func TestIssue820(t *testing.T) {
+	x := NewRat(3, 1)
+	y := NewRat(2, 1)
+	z := y.Quo(x, y)
+	q := NewRat(3, 2)
+	if z.Cmp(q) != 0 {
+		t.Errorf("got %s want %s", z, q)
+	}
+
+	y = NewRat(3, 1)
+	x = NewRat(2, 1)
+	z = y.Quo(x, y)
+	q = NewRat(2, 3)
+	if z.Cmp(q) != 0 {
+		t.Errorf("got %s want %s", z, q)
+	}
+
+	x = NewRat(3, 1)
+	z = x.Quo(x, x)
+	q = NewRat(3, 3)
+	if z.Cmp(q) != 0 {
+		t.Errorf("got %s want %s", z, q)
+	}
+}
+
+var setFrac64Tests = []struct {
+	a, b int64
+	out  string
+}{
+	{0, 1, "0"},
+	{0, -1, "0"},
+	{1, 1, "1"},
+	{-1, 1, "-1"},
+	{1, -1, "-1"},
+	{-1, -1, "1"},
+	{-9223372036854775808, -9223372036854775808, "1"},
+}
+
+func TestRatSetFrac64Rat(t *testing.T) {
+	for i, test := range setFrac64Tests {
+		x := new(Rat).SetFrac64(test.a, test.b)
+		if x.RatString() != test.out {
+			t.Errorf("#%d got %s want %s", i, x.RatString(), test.out)
+		}
+	}
+}
+
+func TestRatGobEncoding(t *testing.T) {
+	var medium bytes.Buffer
+	enc := gob.NewEncoder(&medium)
+	dec := gob.NewDecoder(&medium)
+	for _, test := range encodingTests {
+		medium.Reset() // empty buffer for each test case (in case of failures)
+		var tx Rat
+		tx.SetString(test + ".14159265")
+		if err := enc.Encode(&tx); err != nil {
+			t.Errorf("encoding of %s failed: %s", &tx, err)
+		}
+		var rx Rat
+		if err := dec.Decode(&rx); err != nil {
+			t.Errorf("decoding of %s failed: %s", &tx, err)
+		}
+		if rx.Cmp(&tx) != 0 {
+			t.Errorf("transmission of %s failed: got %s want %s", &tx, &rx, &tx)
+		}
+	}
+}
+
+// Sending a nil Rat pointer (inside a slice) on a round trip through gob should yield a zero.
+// TODO: top-level nils.
+func TestGobEncodingNilRatInSlice(t *testing.T) {
+	buf := new(bytes.Buffer)
+	enc := gob.NewEncoder(buf)
+	dec := gob.NewDecoder(buf)
+
+	var in = make([]*Rat, 1)
+	err := enc.Encode(&in)
+	if err != nil {
+		t.Errorf("gob encode failed: %q", err)
+	}
+	var out []*Rat
+	err = dec.Decode(&out)
+	if err != nil {
+		t.Fatalf("gob decode failed: %q", err)
+	}
+	if len(out) != 1 {
+		t.Fatalf("wrong len; want 1 got %d", len(out))
+	}
+	var zero Rat
+	if out[0].Cmp(&zero) != 0 {
+		t.Errorf("transmission of (*Int)(nill) failed: got %s want 0", out)
+	}
+}
+
+var ratNums = []string{
+	"-141592653589793238462643383279502884197169399375105820974944592307816406286",
+	"-1415926535897932384626433832795028841971",
+	"-141592653589793",
+	"-1",
+	"0",
+	"1",
+	"141592653589793",
+	"1415926535897932384626433832795028841971",
+	"141592653589793238462643383279502884197169399375105820974944592307816406286",
+}
+
+var ratDenoms = []string{
+	"1",
+	"718281828459045",
+	"7182818284590452353602874713526624977572",
+	"718281828459045235360287471352662497757247093699959574966967627724076630353",
+}
+
+func TestRatJSONEncoding(t *testing.T) {
+	for _, num := range ratNums {
+		for _, denom := range ratDenoms {
+			var tx Rat
+			tx.SetString(num + "/" + denom)
+			b, err := json.Marshal(&tx)
+			if err != nil {
+				t.Errorf("marshaling of %s failed: %s", &tx, err)
+				continue
+			}
+			var rx Rat
+			if err := json.Unmarshal(b, &rx); err != nil {
+				t.Errorf("unmarshaling of %s failed: %s", &tx, err)
+				continue
+			}
+			if rx.Cmp(&tx) != 0 {
+				t.Errorf("JSON encoding of %s failed: got %s want %s", &tx, &rx, &tx)
+			}
+		}
+	}
+}
+
+func TestRatXMLEncoding(t *testing.T) {
+	for _, num := range ratNums {
+		for _, denom := range ratDenoms {
+			var tx Rat
+			tx.SetString(num + "/" + denom)
+			b, err := xml.Marshal(&tx)
+			if err != nil {
+				t.Errorf("marshaling of %s failed: %s", &tx, err)
+				continue
+			}
+			var rx Rat
+			if err := xml.Unmarshal(b, &rx); err != nil {
+				t.Errorf("unmarshaling of %s failed: %s", &tx, err)
+				continue
+			}
+			if rx.Cmp(&tx) != 0 {
+				t.Errorf("XML encoding of %s failed: got %s want %s", &tx, &rx, &tx)
+			}
+		}
+	}
+}
+
+func TestIssue2379(t *testing.T) {
+	// 1) no aliasing
+	q := NewRat(3, 2)
+	x := new(Rat)
+	x.SetFrac(NewInt(3), NewInt(2))
+	if x.Cmp(q) != 0 {
+		t.Errorf("1) got %s want %s", x, q)
+	}
+
+	// 2) aliasing of numerator
+	x = NewRat(2, 3)
+	x.SetFrac(NewInt(3), x.Num())
+	if x.Cmp(q) != 0 {
+		t.Errorf("2) got %s want %s", x, q)
+	}
+
+	// 3) aliasing of denominator
+	x = NewRat(2, 3)
+	x.SetFrac(x.Denom(), NewInt(2))
+	if x.Cmp(q) != 0 {
+		t.Errorf("3) got %s want %s", x, q)
+	}
+
+	// 4) aliasing of numerator and denominator
+	x = NewRat(2, 3)
+	x.SetFrac(x.Denom(), x.Num())
+	if x.Cmp(q) != 0 {
+		t.Errorf("4) got %s want %s", x, q)
+	}
+
+	// 5) numerator and denominator are the same
+	q = NewRat(1, 1)
+	x = new(Rat)
+	n := NewInt(7)
+	x.SetFrac(n, n)
+	if x.Cmp(q) != 0 {
+		t.Errorf("5) got %s want %s", x, q)
+	}
+}
+
+func TestIssue3521(t *testing.T) {
+	a := new(Int)
+	b := new(Int)
+	a.SetString("64375784358435883458348587", 0)
+	b.SetString("4789759874531", 0)
+
+	// 0) a raw zero value has 1 as denominator
+	zero := new(Rat)
+	one := NewInt(1)
+	if zero.Denom().Cmp(one) != 0 {
+		t.Errorf("0) got %s want %s", zero.Denom(), one)
+	}
+
+	// 1a) a zero value remains zero independent of denominator
+	x := new(Rat)
+	x.Denom().Set(new(Int).Neg(b))
+	if x.Cmp(zero) != 0 {
+		t.Errorf("1a) got %s want %s", x, zero)
+	}
+
+	// 1b) a zero value may have a denominator != 0 and != 1
+	x.Num().Set(a)
+	qab := new(Rat).SetFrac(a, b)
+	if x.Cmp(qab) != 0 {
+		t.Errorf("1b) got %s want %s", x, qab)
+	}
+
+	// 2a) an integral value becomes a fraction depending on denominator
+	x.SetFrac64(10, 2)
+	x.Denom().SetInt64(3)
+	q53 := NewRat(5, 3)
+	if x.Cmp(q53) != 0 {
+		t.Errorf("2a) got %s want %s", x, q53)
+	}
+
+	// 2b) an integral value becomes a fraction depending on denominator
+	x = NewRat(10, 2)
+	x.Denom().SetInt64(3)
+	if x.Cmp(q53) != 0 {
+		t.Errorf("2b) got %s want %s", x, q53)
+	}
+
+	// 3) changing the numerator/denominator of a Rat changes the Rat
+	x.SetFrac(a, b)
+	a = x.Num()
+	b = x.Denom()
+	a.SetInt64(5)
+	b.SetInt64(3)
+	if x.Cmp(q53) != 0 {
+		t.Errorf("3) got %s want %s", x, q53)
+	}
+}
+
+func TestFloat32Distribution(t *testing.T) {
+	// Generate a distribution of (sign, mantissa, exp) values
+	// broader than the float32 range, and check Rat.Float32()
+	// always picks the closest float32 approximation.
+	var add = []int64{
+		0,
+		1,
+		3,
+		5,
+		7,
+		9,
+		11,
+	}
+	var winc, einc = uint64(1), 1 // soak test (~1.5s on x86-64)
+	if testing.Short() {
+		winc, einc = 5, 15 // quick test (~60ms on x86-64)
+	}
+
+	for _, sign := range "+-" {
+		for _, a := range add {
+			for wid := uint64(0); wid < 30; wid += winc {
+				b := 1<<wid + a
+				if sign == '-' {
+					b = -b
+				}
+				for exp := -150; exp < 150; exp += einc {
+					num, den := NewInt(b), NewInt(1)
+					if exp > 0 {
+						num.Lsh(num, uint(exp))
+					} else {
+						den.Lsh(den, uint(-exp))
+					}
+					r := new(Rat).SetFrac(num, den)
+					f, _ := r.Float32()
+
+					if !checkIsBestApprox32(t, f, r) {
+						// Append context information.
+						t.Errorf("(input was mantissa %#x, exp %d; f = %g (%b); f ~ %g; r = %v)",
+							b, exp, f, f, math.Ldexp(float64(b), exp), r)
+					}
+
+					checkNonLossyRoundtrip32(t, f)
+				}
+			}
+		}
+	}
+}
+
+func TestFloat64Distribution(t *testing.T) {
+	// Generate a distribution of (sign, mantissa, exp) values
+	// broader than the float64 range, and check Rat.Float64()
+	// always picks the closest float64 approximation.
+	var add = []int64{
+		0,
+		1,
+		3,
+		5,
+		7,
+		9,
+		11,
+	}
+	var winc, einc = uint64(1), 1 // soak test (~75s on x86-64)
+	if testing.Short() {
+		winc, einc = 10, 500 // quick test (~12ms on x86-64)
+	}
+
+	for _, sign := range "+-" {
+		for _, a := range add {
+			for wid := uint64(0); wid < 60; wid += winc {
+				b := 1<<wid + a
+				if sign == '-' {
+					b = -b
+				}
+				for exp := -1100; exp < 1100; exp += einc {
+					num, den := NewInt(b), NewInt(1)
+					if exp > 0 {
+						num.Lsh(num, uint(exp))
+					} else {
+						den.Lsh(den, uint(-exp))
+					}
+					r := new(Rat).SetFrac(num, den)
+					f, _ := r.Float64()
+
+					if !checkIsBestApprox64(t, f, r) {
+						// Append context information.
+						t.Errorf("(input was mantissa %#x, exp %d; f = %g (%b); f ~ %g; r = %v)",
+							b, exp, f, f, math.Ldexp(float64(b), exp), r)
+					}
+
+					checkNonLossyRoundtrip64(t, f)
+				}
+			}
+		}
+	}
+}
+
+// TestSetFloat64NonFinite checks that SetFloat64 of a non-finite value
+// returns nil.
+func TestSetFloat64NonFinite(t *testing.T) {
+	for _, f := range []float64{math.NaN(), math.Inf(+1), math.Inf(-1)} {
+		var r Rat
+		if r2 := r.SetFloat64(f); r2 != nil {
+			t.Errorf("SetFloat64(%g) was %v, want nil", f, r2)
+		}
+	}
+}
+
+// checkNonLossyRoundtrip32 checks that a float->Rat->float roundtrip is
+// non-lossy for finite f.
+func checkNonLossyRoundtrip32(t *testing.T, f float32) {
+	if !isFinite(float64(f)) {
+		return
+	}
+	r := new(Rat).SetFloat64(float64(f))
+	if r == nil {
+		t.Errorf("Rat.SetFloat64(float64(%g) (%b)) == nil", f, f)
+		return
+	}
+	f2, exact := r.Float32()
+	if f != f2 || !exact {
+		t.Errorf("Rat.SetFloat64(float64(%g)).Float32() = %g (%b), %v, want %g (%b), %v; delta = %b",
+			f, f2, f2, exact, f, f, true, f2-f)
+	}
+}
+
+// checkNonLossyRoundtrip64 checks that a float->Rat->float roundtrip is
+// non-lossy for finite f.
+func checkNonLossyRoundtrip64(t *testing.T, f float64) {
+	if !isFinite(f) {
+		return
+	}
+	r := new(Rat).SetFloat64(f)
+	if r == nil {
+		t.Errorf("Rat.SetFloat64(%g (%b)) == nil", f, f)
+		return
+	}
+	f2, exact := r.Float64()
+	if f != f2 || !exact {
+		t.Errorf("Rat.SetFloat64(%g).Float64() = %g (%b), %v, want %g (%b), %v; delta = %b",
+			f, f2, f2, exact, f, f, true, f2-f)
+	}
+}
+
+// delta returns the absolute difference between r and f.
+func delta(r *Rat, f float64) *Rat {
+	d := new(Rat).Sub(r, new(Rat).SetFloat64(f))
+	return d.Abs(d)
+}
+
+// checkIsBestApprox32 checks that f is the best possible float32
+// approximation of r.
+// Returns true on success.
+func checkIsBestApprox32(t *testing.T, f float32, r *Rat) bool {
+	if math.Abs(float64(f)) >= math.MaxFloat32 {
+		// Cannot check +Inf, -Inf, nor the float next to them (MaxFloat32).
+		// But we have tests for these special cases.
+		return true
+	}
+
+	// r must be strictly between f0 and f1, the floats bracketing f.
+	f0 := math.Nextafter32(f, float32(math.Inf(-1)))
+	f1 := math.Nextafter32(f, float32(math.Inf(+1)))
+
+	// For f to be correct, r must be closer to f than to f0 or f1.
+	df := delta(r, float64(f))
+	df0 := delta(r, float64(f0))
+	df1 := delta(r, float64(f1))
+	if df.Cmp(df0) > 0 {
+		t.Errorf("Rat(%v).Float32() = %g (%b), but previous float32 %g (%b) is closer", r, f, f, f0, f0)
+		return false
+	}
+	if df.Cmp(df1) > 0 {
+		t.Errorf("Rat(%v).Float32() = %g (%b), but next float32 %g (%b) is closer", r, f, f, f1, f1)
+		return false
+	}
+	if df.Cmp(df0) == 0 && !isEven32(f) {
+		t.Errorf("Rat(%v).Float32() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f0, f0)
+		return false
+	}
+	if df.Cmp(df1) == 0 && !isEven32(f) {
+		t.Errorf("Rat(%v).Float32() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f1, f1)
+		return false
+	}
+	return true
+}
+
+// checkIsBestApprox64 checks that f is the best possible float64
+// approximation of r.
+// Returns true on success.
+func checkIsBestApprox64(t *testing.T, f float64, r *Rat) bool {
+	if math.Abs(f) >= math.MaxFloat64 {
+		// Cannot check +Inf, -Inf, nor the float next to them (MaxFloat64).
+		// But we have tests for these special cases.
+		return true
+	}
+
+	// r must be strictly between f0 and f1, the floats bracketing f.
+	f0 := math.Nextafter(f, math.Inf(-1))
+	f1 := math.Nextafter(f, math.Inf(+1))
+
+	// For f to be correct, r must be closer to f than to f0 or f1.
+	df := delta(r, f)
+	df0 := delta(r, f0)
+	df1 := delta(r, f1)
+	if df.Cmp(df0) > 0 {
+		t.Errorf("Rat(%v).Float64() = %g (%b), but previous float64 %g (%b) is closer", r, f, f, f0, f0)
+		return false
+	}
+	if df.Cmp(df1) > 0 {
+		t.Errorf("Rat(%v).Float64() = %g (%b), but next float64 %g (%b) is closer", r, f, f, f1, f1)
+		return false
+	}
+	if df.Cmp(df0) == 0 && !isEven64(f) {
+		t.Errorf("Rat(%v).Float64() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f0, f0)
+		return false
+	}
+	if df.Cmp(df1) == 0 && !isEven64(f) {
+		t.Errorf("Rat(%v).Float64() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f1, f1)
+		return false
+	}
+	return true
+}
+
+func isEven32(f float32) bool { return math.Float32bits(f)&1 == 0 }
+func isEven64(f float64) bool { return math.Float64bits(f)&1 == 0 }
+
+func TestIsFinite(t *testing.T) {
+	finites := []float64{
+		1.0 / 3,
+		4891559871276714924261e+222,
+		math.MaxFloat64,
+		math.SmallestNonzeroFloat64,
+		-math.MaxFloat64,
+		-math.SmallestNonzeroFloat64,
+	}
+	for _, f := range finites {
+		if !isFinite(f) {
+			t.Errorf("!IsFinite(%g (%b))", f, f)
+		}
+	}
+	nonfinites := []float64{
+		math.NaN(),
+		math.Inf(-1),
+		math.Inf(+1),
+	}
+	for _, f := range nonfinites {
+		if isFinite(f) {
+			t.Errorf("IsFinite(%g, (%b))", f, f)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/big/ratconv.go b/src/cmd/compile/internal/big/ratconv.go
new file mode 100644
index 0000000..778077b
--- /dev/null
+++ b/src/cmd/compile/internal/big/ratconv.go
@@ -0,0 +1,251 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements rat-to-string conversion functions.
+
+package big
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+)
+
+func ratTok(ch rune) bool {
+	return strings.IndexRune("+-/0123456789.eE", ch) >= 0
+}
+
+// Scan is a support routine for fmt.Scanner. It accepts the formats
+// 'e', 'E', 'f', 'F', 'g', 'G', and 'v'. All formats are equivalent.
+func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
+	tok, err := s.Token(true, ratTok)
+	if err != nil {
+		return err
+	}
+	if strings.IndexRune("efgEFGv", ch) < 0 {
+		return errors.New("Rat.Scan: invalid verb")
+	}
+	if _, ok := z.SetString(string(tok)); !ok {
+		return errors.New("Rat.Scan: invalid syntax")
+	}
+	return nil
+}
+
+// SetString sets z to the value of s and returns z and a boolean indicating
+// success. s can be given as a fraction "a/b" or as a floating-point number
+// optionally followed by an exponent. If the operation failed, the value of
+// z is undefined but the returned value is nil.
+func (z *Rat) SetString(s string) (*Rat, bool) {
+	if len(s) == 0 {
+		return nil, false
+	}
+	// len(s) > 0
+
+	// parse fraction a/b, if any
+	if sep := strings.Index(s, "/"); sep >= 0 {
+		if _, ok := z.a.SetString(s[:sep], 0); !ok {
+			return nil, false
+		}
+		s = s[sep+1:]
+		var err error
+		if z.b.abs, _, _, err = z.b.abs.scan(strings.NewReader(s), 0, false); err != nil {
+			return nil, false
+		}
+		if len(z.b.abs) == 0 {
+			return nil, false
+		}
+		return z.norm(), true
+	}
+
+	// parse floating-point number
+	r := strings.NewReader(s)
+
+	// sign
+	neg, err := scanSign(r)
+	if err != nil {
+		return nil, false
+	}
+
+	// mantissa
+	var ecorr int
+	z.a.abs, _, ecorr, err = z.a.abs.scan(r, 10, true)
+	if err != nil {
+		return nil, false
+	}
+
+	// exponent
+	var exp int64
+	exp, _, err = scanExponent(r, false)
+	if err != nil {
+		return nil, false
+	}
+
+	// there should be no unread characters left
+	if _, err = r.ReadByte(); err != io.EOF {
+		return nil, false
+	}
+
+	// correct exponent
+	if ecorr < 0 {
+		exp += int64(ecorr)
+	}
+
+	// compute exponent power
+	expabs := exp
+	if expabs < 0 {
+		expabs = -expabs
+	}
+	powTen := nat(nil).expNN(natTen, nat(nil).setWord(Word(expabs)), nil)
+
+	// complete fraction
+	if exp < 0 {
+		z.b.abs = powTen
+		z.norm()
+	} else {
+		z.a.abs = z.a.abs.mul(z.a.abs, powTen)
+		z.b.abs = z.b.abs[:0]
+	}
+
+	z.a.neg = neg && len(z.a.abs) > 0 // 0 has no sign
+
+	return z, true
+}
+
+// scanExponent scans the longest possible prefix of r representing a decimal
+// ('e', 'E') or binary ('p') exponent, if any. It returns the exponent, the
+// exponent base (10 or 2), or a read or syntax error, if any.
+//
+//	exponent = ( "E" | "e" | "p" ) [ sign ] digits .
+//	sign     = "+" | "-" .
+//	digits   = digit { digit } .
+//	digit    = "0" ... "9" .
+//
+// A binary exponent is only permitted if binExpOk is set.
+func scanExponent(r io.ByteScanner, binExpOk bool) (exp int64, base int, err error) {
+	base = 10
+
+	var ch byte
+	if ch, err = r.ReadByte(); err != nil {
+		if err == io.EOF {
+			err = nil // no exponent; same as e0
+		}
+		return
+	}
+
+	switch ch {
+	case 'e', 'E':
+		// ok
+	case 'p':
+		if binExpOk {
+			base = 2
+			break // ok
+		}
+		fallthrough // binary exponent not permitted
+	default:
+		r.UnreadByte()
+		return // no exponent; same as e0
+	}
+
+	var neg bool
+	if neg, err = scanSign(r); err != nil {
+		return
+	}
+
+	var digits []byte
+	if neg {
+		digits = append(digits, '-')
+	}
+
+	// no need to use nat.scan for exponent digits
+	// since we only care about int64 values - the
+	// from-scratch scan is easy enough and faster
+	for i := 0; ; i++ {
+		if ch, err = r.ReadByte(); err != nil {
+			if err != io.EOF || i == 0 {
+				return
+			}
+			err = nil
+			break // i > 0
+		}
+		if ch < '0' || '9' < ch {
+			if i == 0 {
+				r.UnreadByte()
+				err = fmt.Errorf("invalid exponent (missing digits)")
+				return
+			}
+			break // i > 0
+		}
+		digits = append(digits, byte(ch))
+	}
+	// i > 0 => we have at least one digit
+
+	exp, err = strconv.ParseInt(string(digits), 10, 64)
+	return
+}
+
+// String returns a string representation of x in the form "a/b" (even if b == 1).
+func (x *Rat) String() string {
+	s := "/1"
+	if len(x.b.abs) != 0 {
+		s = "/" + x.b.abs.decimalString()
+	}
+	return x.a.String() + s
+}
+
+// RatString returns a string representation of x in the form "a/b" if b != 1,
+// and in the form "a" if b == 1.
+func (x *Rat) RatString() string {
+	if x.IsInt() {
+		return x.a.String()
+	}
+	return x.String()
+}
+
+// FloatString returns a string representation of x in decimal form with prec
+// digits of precision after the decimal point and the last digit rounded.
+func (x *Rat) FloatString(prec int) string {
+	if x.IsInt() {
+		s := x.a.String()
+		if prec > 0 {
+			s += "." + strings.Repeat("0", prec)
+		}
+		return s
+	}
+	// x.b.abs != 0
+
+	q, r := nat(nil).div(nat(nil), x.a.abs, x.b.abs)
+
+	p := natOne
+	if prec > 0 {
+		p = nat(nil).expNN(natTen, nat(nil).setUint64(uint64(prec)), nil)
+	}
+
+	r = r.mul(r, p)
+	r, r2 := r.div(nat(nil), r, x.b.abs)
+
+	// see if we need to round up
+	r2 = r2.add(r2, r2)
+	if x.b.abs.cmp(r2) <= 0 {
+		r = r.add(r, natOne)
+		if r.cmp(p) >= 0 {
+			q = nat(nil).add(q, natOne)
+			r = nat(nil).sub(r, p)
+		}
+	}
+
+	s := q.decimalString()
+	if x.a.neg {
+		s = "-" + s
+	}
+
+	if prec > 0 {
+		rs := r.decimalString()
+		leadingZeros := prec - len(rs)
+		s += "." + strings.Repeat("0", leadingZeros) + rs
+	}
+
+	return s
+}
diff --git a/src/cmd/compile/internal/big/ratconv_test.go b/src/cmd/compile/internal/big/ratconv_test.go
new file mode 100644
index 0000000..16b3a19
--- /dev/null
+++ b/src/cmd/compile/internal/big/ratconv_test.go
@@ -0,0 +1,451 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package big
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"testing"
+)
+
+type StringTest struct {
+	in, out string
+	ok      bool
+}
+
+var setStringTests = []StringTest{
+	{"0", "0", true},
+	{"-0", "0", true},
+	{"1", "1", true},
+	{"-1", "-1", true},
+	{"1.", "1", true},
+	{"1e0", "1", true},
+	{"1.e1", "10", true},
+	{in: "1e"},
+	{in: "1.e"},
+	{in: "1e+14e-5"},
+	{in: "1e4.5"},
+	{in: "r"},
+	{in: "a/b"},
+	{in: "a.b"},
+	{"-0.1", "-1/10", true},
+	{"-.1", "-1/10", true},
+	{"2/4", "1/2", true},
+	{".25", "1/4", true},
+	{"-1/5", "-1/5", true},
+	{"8129567.7690E14", "812956776900000000000", true},
+	{"78189e+4", "781890000", true},
+	{"553019.8935e+8", "55301989350000", true},
+	{"98765432109876543210987654321e-10", "98765432109876543210987654321/10000000000", true},
+	{"9877861857500000E-7", "3951144743/4", true},
+	{"2169378.417e-3", "2169378417/1000000", true},
+	{"884243222337379604041632732738665534", "884243222337379604041632732738665534", true},
+	{"53/70893980658822810696", "53/70893980658822810696", true},
+	{"106/141787961317645621392", "53/70893980658822810696", true},
+	{"204211327800791583.81095", "4084226556015831676219/20000", true},
+	{in: "1/0"},
+}
+
+// These are not supported by fmt.Fscanf.
+var setStringTests2 = []StringTest{
+	{"0x10", "16", true},
+	{"-010/1", "-8", true}, // TODO(gri) should we even permit octal here?
+	{"-010.", "-10", true},
+	{"0x10/0x20", "1/2", true},
+	{"0b1000/3", "8/3", true},
+	// TODO(gri) add more tests
+}
+
+func TestRatSetString(t *testing.T) {
+	var tests []StringTest
+	tests = append(tests, setStringTests...)
+	tests = append(tests, setStringTests2...)
+
+	for i, test := range tests {
+		x, ok := new(Rat).SetString(test.in)
+
+		if ok {
+			if !test.ok {
+				t.Errorf("#%d SetString(%q) expected failure", i, test.in)
+			} else if x.RatString() != test.out {
+				t.Errorf("#%d SetString(%q) got %s want %s", i, test.in, x.RatString(), test.out)
+			}
+		} else if x != nil {
+			t.Errorf("#%d SetString(%q) got %p want nil", i, test.in, x)
+		}
+	}
+}
+
+func TestRatScan(t *testing.T) {
+	var buf bytes.Buffer
+	for i, test := range setStringTests {
+		x := new(Rat)
+		buf.Reset()
+		buf.WriteString(test.in)
+
+		_, err := fmt.Fscanf(&buf, "%v", x)
+		if err == nil != test.ok {
+			if test.ok {
+				t.Errorf("#%d (%s) error: %s", i, test.in, err)
+			} else {
+				t.Errorf("#%d (%s) expected error", i, test.in)
+			}
+			continue
+		}
+		if err == nil && x.RatString() != test.out {
+			t.Errorf("#%d got %s want %s", i, x.RatString(), test.out)
+		}
+	}
+}
+
+var floatStringTests = []struct {
+	in   string
+	prec int
+	out  string
+}{
+	{"0", 0, "0"},
+	{"0", 4, "0.0000"},
+	{"1", 0, "1"},
+	{"1", 2, "1.00"},
+	{"-1", 0, "-1"},
+	{".25", 2, "0.25"},
+	{".25", 1, "0.3"},
+	{".25", 3, "0.250"},
+	{"-1/3", 3, "-0.333"},
+	{"-2/3", 4, "-0.6667"},
+	{"0.96", 1, "1.0"},
+	{"0.999", 2, "1.00"},
+	{"0.9", 0, "1"},
+	{".25", -1, "0"},
+	{".55", -1, "1"},
+}
+
+func TestFloatString(t *testing.T) {
+	for i, test := range floatStringTests {
+		x, _ := new(Rat).SetString(test.in)
+
+		if x.FloatString(test.prec) != test.out {
+			t.Errorf("#%d got %s want %s", i, x.FloatString(test.prec), test.out)
+		}
+	}
+}
+
+// Test inputs to Rat.SetString.  The prefix "long:" causes the test
+// to be skipped in --test.short mode.  (The threshold is about 500us.)
+var float64inputs = []string{
+	// Constants plundered from strconv/testfp.txt.
+
+	// Table 1: Stress Inputs for Conversion to 53-bit Binary, < 1/2 ULP
+	"5e+125",
+	"69e+267",
+	"999e-026",
+	"7861e-034",
+	"75569e-254",
+	"928609e-261",
+	"9210917e+080",
+	"84863171e+114",
+	"653777767e+273",
+	"5232604057e-298",
+	"27235667517e-109",
+	"653532977297e-123",
+	"3142213164987e-294",
+	"46202199371337e-072",
+	"231010996856685e-073",
+	"9324754620109615e+212",
+	"78459735791271921e+049",
+	"272104041512242479e+200",
+	"6802601037806061975e+198",
+	"20505426358836677347e-221",
+	"836168422905420598437e-234",
+	"4891559871276714924261e+222",
+
+	// Table 2: Stress Inputs for Conversion to 53-bit Binary, > 1/2 ULP
+	"9e-265",
+	"85e-037",
+	"623e+100",
+	"3571e+263",
+	"81661e+153",
+	"920657e-023",
+	"4603285e-024",
+	"87575437e-309",
+	"245540327e+122",
+	"6138508175e+120",
+	"83356057653e+193",
+	"619534293513e+124",
+	"2335141086879e+218",
+	"36167929443327e-159",
+	"609610927149051e-255",
+	"3743626360493413e-165",
+	"94080055902682397e-242",
+	"899810892172646163e+283",
+	"7120190517612959703e+120",
+	"25188282901709339043e-252",
+	"308984926168550152811e-052",
+	"6372891218502368041059e+064",
+
+	// Table 14: Stress Inputs for Conversion to 24-bit Binary, <1/2 ULP
+	"5e-20",
+	"67e+14",
+	"985e+15",
+	"7693e-42",
+	"55895e-16",
+	"996622e-44",
+	"7038531e-32",
+	"60419369e-46",
+	"702990899e-20",
+	"6930161142e-48",
+	"25933168707e+13",
+	"596428896559e+20",
+
+	// Table 15: Stress Inputs for Conversion to 24-bit Binary, >1/2 ULP
+	"3e-23",
+	"57e+18",
+	"789e-35",
+	"2539e-18",
+	"76173e+28",
+	"887745e-11",
+	"5382571e-37",
+	"82381273e-35",
+	"750486563e-38",
+	"3752432815e-39",
+	"75224575729e-45",
+	"459926601011e+15",
+
+	// Constants plundered from strconv/atof_test.go.
+
+	"0",
+	"1",
+	"+1",
+	"1e23",
+	"1E23",
+	"100000000000000000000000",
+	"1e-100",
+	"123456700",
+	"99999999999999974834176",
+	"100000000000000000000001",
+	"100000000000000008388608",
+	"100000000000000016777215",
+	"100000000000000016777216",
+	"-1",
+	"-0.1",
+	"-0", // NB: exception made for this input
+	"1e-20",
+	"625e-3",
+
+	// largest float64
+	"1.7976931348623157e308",
+	"-1.7976931348623157e308",
+	// next float64 - too large
+	"1.7976931348623159e308",
+	"-1.7976931348623159e308",
+	// the border is ...158079
+	// borderline - okay
+	"1.7976931348623158e308",
+	"-1.7976931348623158e308",
+	// borderline - too large
+	"1.797693134862315808e308",
+	"-1.797693134862315808e308",
+
+	// a little too large
+	"1e308",
+	"2e308",
+	"1e309",
+
+	// way too large
+	"1e310",
+	"-1e310",
+	"1e400",
+	"-1e400",
+	"long:1e400000",
+	"long:-1e400000",
+
+	// denormalized
+	"1e-305",
+	"1e-306",
+	"1e-307",
+	"1e-308",
+	"1e-309",
+	"1e-310",
+	"1e-322",
+	// smallest denormal
+	"5e-324",
+	"4e-324",
+	"3e-324",
+	// too small
+	"2e-324",
+	// way too small
+	"1e-350",
+	"long:1e-400000",
+	// way too small, negative
+	"-1e-350",
+	"long:-1e-400000",
+
+	// try to overflow exponent
+	// [Disabled: too slow and memory-hungry with rationals.]
+	// "1e-4294967296",
+	// "1e+4294967296",
+	// "1e-18446744073709551616",
+	// "1e+18446744073709551616",
+
+	// http://www.exploringbinary.com/java-hangs-when-converting-2-2250738585072012e-308/
+	"2.2250738585072012e-308",
+	// http://www.exploringbinary.com/php-hangs-on-numeric-value-2-2250738585072011e-308/
+	"2.2250738585072011e-308",
+
+	// A very large number (initially wrongly parsed by the fast algorithm).
+	"4.630813248087435e+307",
+
+	// A different kind of very large number.
+	"22.222222222222222",
+	"long:2." + strings.Repeat("2", 4000) + "e+1",
+
+	// Exactly halfway between 1 and math.Nextafter(1, 2).
+	// Round to even (down).
+	"1.00000000000000011102230246251565404236316680908203125",
+	// Slightly lower; still round down.
+	"1.00000000000000011102230246251565404236316680908203124",
+	// Slightly higher; round up.
+	"1.00000000000000011102230246251565404236316680908203126",
+	// Slightly higher, but you have to read all the way to the end.
+	"long:1.00000000000000011102230246251565404236316680908203125" + strings.Repeat("0", 10000) + "1",
+
+	// Smallest denormal, 2^(-1022-52)
+	"4.940656458412465441765687928682213723651e-324",
+	// Half of smallest denormal, 2^(-1022-53)
+	"2.470328229206232720882843964341106861825e-324",
+	// A little more than the exact half of smallest denormal
+	// 2^-1075 + 2^-1100.  (Rounds to 1p-1074.)
+	"2.470328302827751011111470718709768633275e-324",
+	// The exact halfway between smallest normal and largest denormal:
+	// 2^-1022 - 2^-1075.  (Rounds to 2^-1022.)
+	"2.225073858507201136057409796709131975935e-308",
+
+	"1152921504606846975",  //   1<<60 - 1
+	"-1152921504606846975", // -(1<<60 - 1)
+	"1152921504606846977",  //   1<<60 + 1
+	"-1152921504606846977", // -(1<<60 + 1)
+
+	"1/3",
+}
+
+// isFinite reports whether f represents a finite rational value.
+// It is equivalent to !math.IsNan(f) && !math.IsInf(f, 0).
+func isFinite(f float64) bool {
+	return math.Abs(f) <= math.MaxFloat64
+}
+
+func TestFloat32SpecialCases(t *testing.T) {
+	for _, input := range float64inputs {
+		if strings.HasPrefix(input, "long:") {
+			if testing.Short() {
+				continue
+			}
+			input = input[len("long:"):]
+		}
+
+		r, ok := new(Rat).SetString(input)
+		if !ok {
+			t.Errorf("Rat.SetString(%q) failed", input)
+			continue
+		}
+		f, exact := r.Float32()
+
+		// 1. Check string -> Rat -> float32 conversions are
+		// consistent with strconv.ParseFloat.
+		// Skip this check if the input uses "a/b" rational syntax.
+		if !strings.Contains(input, "/") {
+			e64, _ := strconv.ParseFloat(input, 32)
+			e := float32(e64)
+
+			// Careful: negative Rats too small for
+			// float64 become -0, but Rat obviously cannot
+			// preserve the sign from SetString("-0").
+			switch {
+			case math.Float32bits(e) == math.Float32bits(f):
+				// Ok: bitwise equal.
+			case f == 0 && r.Num().BitLen() == 0:
+				// Ok: Rat(0) is equivalent to both +/- float64(0).
+			default:
+				t.Errorf("strconv.ParseFloat(%q) = %g (%b), want %g (%b); delta = %g", input, e, e, f, f, f-e)
+			}
+		}
+
+		if !isFinite(float64(f)) {
+			continue
+		}
+
+		// 2. Check f is best approximation to r.
+		if !checkIsBestApprox32(t, f, r) {
+			// Append context information.
+			t.Errorf("(input was %q)", input)
+		}
+
+		// 3. Check f->R->f roundtrip is non-lossy.
+		checkNonLossyRoundtrip32(t, f)
+
+		// 4. Check exactness using slow algorithm.
+		if wasExact := new(Rat).SetFloat64(float64(f)).Cmp(r) == 0; wasExact != exact {
+			t.Errorf("Rat.SetString(%q).Float32().exact = %t, want %t", input, exact, wasExact)
+		}
+	}
+}
+
+func TestFloat64SpecialCases(t *testing.T) {
+	for _, input := range float64inputs {
+		if strings.HasPrefix(input, "long:") {
+			if testing.Short() {
+				continue
+			}
+			input = input[len("long:"):]
+		}
+
+		r, ok := new(Rat).SetString(input)
+		if !ok {
+			t.Errorf("Rat.SetString(%q) failed", input)
+			continue
+		}
+		f, exact := r.Float64()
+
+		// 1. Check string -> Rat -> float64 conversions are
+		// consistent with strconv.ParseFloat.
+		// Skip this check if the input uses "a/b" rational syntax.
+		if !strings.Contains(input, "/") {
+			e, _ := strconv.ParseFloat(input, 64)
+
+			// Careful: negative Rats too small for
+			// float64 become -0, but Rat obviously cannot
+			// preserve the sign from SetString("-0").
+			switch {
+			case math.Float64bits(e) == math.Float64bits(f):
+				// Ok: bitwise equal.
+			case f == 0 && r.Num().BitLen() == 0:
+				// Ok: Rat(0) is equivalent to both +/- float64(0).
+			default:
+				t.Errorf("strconv.ParseFloat(%q) = %g (%b), want %g (%b); delta = %g", input, e, e, f, f, f-e)
+			}
+		}
+
+		if !isFinite(f) {
+			continue
+		}
+
+		// 2. Check f is best approximation to r.
+		if !checkIsBestApprox64(t, f, r) {
+			// Append context information.
+			t.Errorf("(input was %q)", input)
+		}
+
+		// 3. Check f->R->f roundtrip is non-lossy.
+		checkNonLossyRoundtrip64(t, f)
+
+		// 4. Check exactness using slow algorithm.
+		if wasExact := new(Rat).SetFloat64(f).Cmp(r) == 0; wasExact != exact {
+			t.Errorf("Rat.SetString(%q).Float64().exact = %t, want %t", input, exact, wasExact)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/big/roundingmode_string.go b/src/cmd/compile/internal/big/roundingmode_string.go
new file mode 100644
index 0000000..05024b8
--- /dev/null
+++ b/src/cmd/compile/internal/big/roundingmode_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=RoundingMode; DO NOT EDIT
+
+package big
+
+import "fmt"
+
+const _RoundingMode_name = "ToNearestEvenToNearestAwayToZeroAwayFromZeroToNegativeInfToPositiveInf"
+
+var _RoundingMode_index = [...]uint8{0, 13, 26, 32, 44, 57, 70}
+
+func (i RoundingMode) String() string {
+	if i+1 >= RoundingMode(len(_RoundingMode_index)) {
+		return fmt.Sprintf("RoundingMode(%d)", i)
+	}
+	return _RoundingMode_name[_RoundingMode_index[i]:_RoundingMode_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/big/vendor.bash b/src/cmd/compile/internal/big/vendor.bash
new file mode 100755
index 0000000..84aa750
--- /dev/null
+++ b/src/cmd/compile/internal/big/vendor.bash
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+# Copyright 2015 The Go Authors.  All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Run this script to obtain an up-to-date vendored version of math/big.
+
+BIGDIR=../../../../math/big
+
+# Start from scratch.
+rm *.go
+
+# We don't want any assembly files.
+cp $BIGDIR/*.go .
+
+# Use pure Go arith ops w/o build tag.
+sed 's/^\/\/ \+build math_big_pure_go$//' arith_decl_pure.go > arith_decl.go
+rm arith_decl_pure.go
+
+# gofmt to clean up after sed
+gofmt -w .
+
+# Test that it works
+go test -short
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
new file mode 100644
index 0000000..789e59b
--- /dev/null
+++ b/src/cmd/compile/internal/gc/align.go
@@ -0,0 +1,706 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * machine size and rounding
+ * alignment is dictated around
+ * the size of a pointer, set in betypeinit
+ * (see ../6g/galign.c).
+ */
+var defercalc int
+
+func Rnd(o int64, r int64) int64 {
+	if r < 1 || r > 8 || r&(r-1) != 0 {
+		Fatal("rnd %d", r)
+	}
+	return (o + r - 1) &^ (r - 1)
+}
+
+func offmod(t *Type) {
+	o := int32(0)
+	for f := t.Type; f != nil; f = f.Down {
+		if f.Etype != TFIELD {
+			Fatal("offmod: not TFIELD: %v", Tconv(f, obj.FmtLong))
+		}
+		f.Width = int64(o)
+		o += int32(Widthptr)
+		if int64(o) >= Thearch.MAXWIDTH {
+			Yyerror("interface too large")
+			o = int32(Widthptr)
+		}
+	}
+}
+
+func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
+	starto := o
+	maxalign := int32(flag)
+	if maxalign < 1 {
+		maxalign = 1
+	}
+	lastzero := int64(0)
+	var w int64
+	for f := t.Type; f != nil; f = f.Down {
+		if f.Etype != TFIELD {
+			Fatal("widstruct: not TFIELD: %v", Tconv(f, obj.FmtLong))
+		}
+		if f.Type == nil {
+			// broken field, just skip it so that other valid fields
+			// get a width.
+			continue
+		}
+
+		dowidth(f.Type)
+		if int32(f.Type.Align) > maxalign {
+			maxalign = int32(f.Type.Align)
+		}
+		if f.Type.Width < 0 {
+			Fatal("invalid width %d", f.Type.Width)
+		}
+		w = f.Type.Width
+		if f.Type.Align > 0 {
+			o = Rnd(o, int64(f.Type.Align))
+		}
+		f.Width = o // really offset for TFIELD
+		if f.Nname != nil {
+			// this same stackparam logic is in addrescapes
+			// in typecheck.c.  usually addrescapes runs after
+			// widstruct, in which case we could drop this,
+			// but function closure functions are the exception.
+			if f.Nname.Stackparam != nil {
+				f.Nname.Stackparam.Xoffset = o
+				f.Nname.Xoffset = 0
+			} else {
+				f.Nname.Xoffset = o
+			}
+		}
+
+		if w == 0 {
+			lastzero = o
+		}
+		o += w
+		if o >= Thearch.MAXWIDTH {
+			Yyerror("type %v too large", Tconv(errtype, obj.FmtLong))
+			o = 8 // small but nonzero
+		}
+	}
+
+	// For nonzero-sized structs which end in a zero-sized thing, we add
+	// an extra byte of padding to the type.  This padding ensures that
+	// taking the address of the zero-sized thing can't manufacture a
+	// pointer to the next object in the heap.  See issue 9401.
+	if flag == 1 && o > starto && o == lastzero {
+		o++
+	}
+
+	// final width is rounded
+	if flag != 0 {
+		o = Rnd(o, int64(maxalign))
+	}
+	t.Align = uint8(maxalign)
+
+	// type width only includes back to first field's offset
+	t.Width = o - starto
+
+	return o
+}
+
+func dowidth(t *Type) {
+	if Widthptr == 0 {
+		Fatal("dowidth without betypeinit")
+	}
+
+	if t == nil {
+		return
+	}
+
+	if t.Width > 0 {
+		return
+	}
+
+	if t.Width == -2 {
+		lno := int(lineno)
+		lineno = int32(t.Lineno)
+		if t.Broke == 0 {
+			t.Broke = 1
+			Yyerror("invalid recursive type %v", t)
+		}
+
+		t.Width = 0
+		lineno = int32(lno)
+		return
+	}
+
+	// break infinite recursion if the broken recursive type
+	// is referenced again
+	if t.Broke != 0 && t.Width == 0 {
+		return
+	}
+
+	// defer checkwidth calls until after we're done
+	defercalc++
+
+	lno := int(lineno)
+	lineno = int32(t.Lineno)
+	t.Width = -2
+	t.Align = 0
+
+	et := int32(t.Etype)
+	switch et {
+	case TFUNC, TCHAN, TMAP, TSTRING:
+		break
+
+		/* simtype == 0 during bootstrap */
+	default:
+		if Simtype[t.Etype] != 0 {
+			et = int32(Simtype[t.Etype])
+		}
+	}
+
+	w := int64(0)
+	switch et {
+	default:
+		Fatal("dowidth: unknown type: %v", t)
+
+		/* compiler-specific stuff */
+	case TINT8, TUINT8, TBOOL:
+		// bool is int8
+		w = 1
+
+	case TINT16, TUINT16:
+		w = 2
+
+	case TINT32, TUINT32, TFLOAT32:
+		w = 4
+
+	case TINT64, TUINT64, TFLOAT64, TCOMPLEX64:
+		w = 8
+		t.Align = uint8(Widthreg)
+
+	case TCOMPLEX128:
+		w = 16
+		t.Align = uint8(Widthreg)
+
+	case TPTR32:
+		w = 4
+		checkwidth(t.Type)
+
+	case TPTR64:
+		w = 8
+		checkwidth(t.Type)
+
+	case TUNSAFEPTR:
+		w = int64(Widthptr)
+
+	case TINTER: // implemented as 2 pointers
+		w = 2 * int64(Widthptr)
+
+		t.Align = uint8(Widthptr)
+		offmod(t)
+
+	case TCHAN: // implemented as pointer
+		w = int64(Widthptr)
+
+		checkwidth(t.Type)
+
+		// make fake type to check later to
+		// trigger channel argument check.
+		t1 := typ(TCHANARGS)
+
+		t1.Type = t
+		checkwidth(t1)
+
+	case TCHANARGS:
+		t1 := t.Type
+		dowidth(t.Type) // just in case
+		if t1.Type.Width >= 1<<16 {
+			Yyerror("channel element type too large (>64kB)")
+		}
+		t.Width = 1
+
+	case TMAP: // implemented as pointer
+		w = int64(Widthptr)
+
+		checkwidth(t.Type)
+		checkwidth(t.Down)
+
+	case TFORW: // should have been filled in
+		if t.Broke == 0 {
+			Yyerror("invalid recursive type %v", t)
+		}
+		w = 1 // anything will do
+
+		// dummy type; should be replaced before use.
+	case TANY:
+		if Debug['A'] == 0 {
+			Fatal("dowidth any")
+		}
+		w = 1 // anything will do
+
+	case TSTRING:
+		if sizeof_String == 0 {
+			Fatal("early dowidth string")
+		}
+		w = int64(sizeof_String)
+		t.Align = uint8(Widthptr)
+
+	case TARRAY:
+		if t.Type == nil {
+			break
+		}
+		if t.Bound >= 0 {
+			dowidth(t.Type)
+			if t.Type.Width != 0 {
+				cap := (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Type.Width)
+				if uint64(t.Bound) > cap {
+					Yyerror("type %v larger than address space", Tconv(t, obj.FmtLong))
+				}
+			}
+
+			w = t.Bound * t.Type.Width
+			t.Align = t.Type.Align
+		} else if t.Bound == -1 {
+			w = int64(sizeof_Array)
+			checkwidth(t.Type)
+			t.Align = uint8(Widthptr)
+		} else if t.Bound == -100 {
+			if t.Broke == 0 {
+				Yyerror("use of [...] array outside of array literal")
+				t.Broke = 1
+			}
+		} else {
+			Fatal("dowidth %v", t) // probably [...]T
+		}
+
+	case TSTRUCT:
+		if t.Funarg != 0 {
+			Fatal("dowidth fn struct %v", t)
+		}
+		w = widstruct(t, t, 0, 1)
+
+		// make fake type to check later to
+	// trigger function argument computation.
+	case TFUNC:
+		t1 := typ(TFUNCARGS)
+
+		t1.Type = t
+		checkwidth(t1)
+
+		// width of func type is pointer
+		w = int64(Widthptr)
+
+		// function is 3 cated structures;
+	// compute their widths as side-effect.
+	case TFUNCARGS:
+		t1 := t.Type
+
+		w = widstruct(t.Type, *getthis(t1), 0, 0)
+		w = widstruct(t.Type, *getinarg(t1), w, Widthreg)
+		w = widstruct(t.Type, *Getoutarg(t1), w, Widthreg)
+		t1.Argwid = w
+		if w%int64(Widthreg) != 0 {
+			Warn("bad type %v %d\n", t1, w)
+		}
+		t.Align = 1
+	}
+
+	if Widthptr == 4 && w != int64(int32(w)) {
+		Yyerror("type %v too large", t)
+	}
+
+	t.Width = w
+	if t.Align == 0 {
+		if w > 8 || w&(w-1) != 0 {
+			Fatal("invalid alignment for %v", t)
+		}
+		t.Align = uint8(w)
+	}
+
+	lineno = int32(lno)
+
+	if defercalc == 1 {
+		resumecheckwidth()
+	} else {
+		defercalc--
+	}
+}
+
+/*
+ * when a type's width should be known, we call checkwidth
+ * to compute it.  during a declaration like
+ *
+ *	type T *struct { next T }
+ *
+ * it is necessary to defer the calculation of the struct width
+ * until after T has been initialized to be a pointer to that struct.
+ * similarly, during import processing structs may be used
+ * before their definition.  in those situations, calling
+ * defercheckwidth() stops width calculations until
+ * resumecheckwidth() is called, at which point all the
+ * checkwidths that were deferred are executed.
+ * dowidth should only be called when the type's size
+ * is needed immediately.  checkwidth makes sure the
+ * size is evaluated eventually.
+ */
+type TypeList struct {
+	t    *Type
+	next *TypeList
+}
+
+var tlfree *TypeList
+
+var tlq *TypeList
+
+func checkwidth(t *Type) {
+	if t == nil {
+		return
+	}
+
+	// function arg structs should not be checked
+	// outside of the enclosing function.
+	if t.Funarg != 0 {
+		Fatal("checkwidth %v", t)
+	}
+
+	if defercalc == 0 {
+		dowidth(t)
+		return
+	}
+
+	if t.Deferwidth != 0 {
+		return
+	}
+	t.Deferwidth = 1
+
+	l := tlfree
+	if l != nil {
+		tlfree = l.next
+	} else {
+		l = new(TypeList)
+	}
+
+	l.t = t
+	l.next = tlq
+	tlq = l
+}
+
+func defercheckwidth() {
+	// we get out of sync on syntax errors, so don't be pedantic.
+	if defercalc != 0 && nerrors == 0 {
+		Fatal("defercheckwidth")
+	}
+	defercalc = 1
+}
+
+func resumecheckwidth() {
+	if defercalc == 0 {
+		Fatal("resumecheckwidth")
+	}
+	for l := tlq; l != nil; l = tlq {
+		l.t.Deferwidth = 0
+		tlq = l.next
+		dowidth(l.t)
+		l.next = tlfree
+		tlfree = l
+	}
+
+	defercalc = 0
+}
+
+var itable *Type // distinguished *byte
+
+func typeinit() {
+	if Widthptr == 0 {
+		Fatal("typeinit before betypeinit")
+	}
+
+	for i := 0; i < NTYPE; i++ {
+		Simtype[i] = uint8(i)
+	}
+
+	Types[TPTR32] = typ(TPTR32)
+	dowidth(Types[TPTR32])
+
+	Types[TPTR64] = typ(TPTR64)
+	dowidth(Types[TPTR64])
+
+	t := typ(TUNSAFEPTR)
+	Types[TUNSAFEPTR] = t
+	t.Sym = Pkglookup("Pointer", unsafepkg)
+	t.Sym.Def = typenod(t)
+
+	dowidth(Types[TUNSAFEPTR])
+
+	Tptr = TPTR32
+	if Widthptr == 8 {
+		Tptr = TPTR64
+	}
+
+	for i := TINT8; i <= TUINT64; i++ {
+		Isint[i] = true
+	}
+	Isint[TINT] = true
+	Isint[TUINT] = true
+	Isint[TUINTPTR] = true
+
+	Isfloat[TFLOAT32] = true
+	Isfloat[TFLOAT64] = true
+
+	Iscomplex[TCOMPLEX64] = true
+	Iscomplex[TCOMPLEX128] = true
+
+	Isptr[TPTR32] = true
+	Isptr[TPTR64] = true
+
+	isforw[TFORW] = true
+
+	Issigned[TINT] = true
+	Issigned[TINT8] = true
+	Issigned[TINT16] = true
+	Issigned[TINT32] = true
+	Issigned[TINT64] = true
+
+	/*
+	 * initialize okfor
+	 */
+	for i := 0; i < NTYPE; i++ {
+		if Isint[i] || i == TIDEAL {
+			okforeq[i] = true
+			okforcmp[i] = true
+			okforarith[i] = true
+			okforadd[i] = true
+			okforand[i] = true
+			okforconst[i] = true
+			issimple[i] = true
+			Minintval[i] = new(Mpint)
+			Maxintval[i] = new(Mpint)
+		}
+
+		if Isfloat[i] {
+			okforeq[i] = true
+			okforcmp[i] = true
+			okforadd[i] = true
+			okforarith[i] = true
+			okforconst[i] = true
+			issimple[i] = true
+			minfltval[i] = newMpflt()
+			maxfltval[i] = newMpflt()
+		}
+
+		if Iscomplex[i] {
+			okforeq[i] = true
+			okforadd[i] = true
+			okforarith[i] = true
+			okforconst[i] = true
+			issimple[i] = true
+		}
+	}
+
+	issimple[TBOOL] = true
+
+	okforadd[TSTRING] = true
+
+	okforbool[TBOOL] = true
+
+	okforcap[TARRAY] = true
+	okforcap[TCHAN] = true
+
+	okforconst[TBOOL] = true
+	okforconst[TSTRING] = true
+
+	okforlen[TARRAY] = true
+	okforlen[TCHAN] = true
+	okforlen[TMAP] = true
+	okforlen[TSTRING] = true
+
+	okforeq[TPTR32] = true
+	okforeq[TPTR64] = true
+	okforeq[TUNSAFEPTR] = true
+	okforeq[TINTER] = true
+	okforeq[TCHAN] = true
+	okforeq[TSTRING] = true
+	okforeq[TBOOL] = true
+	okforeq[TMAP] = true    // nil only; refined in typecheck
+	okforeq[TFUNC] = true   // nil only; refined in typecheck
+	okforeq[TARRAY] = true  // nil slice only; refined in typecheck
+	okforeq[TSTRUCT] = true // it's complicated; refined in typecheck
+
+	okforcmp[TSTRING] = true
+
+	var i int
+	for i = 0; i < len(okfor); i++ {
+		okfor[i] = okfornone[:]
+	}
+
+	// binary
+	okfor[OADD] = okforadd[:]
+
+	okfor[OAND] = okforand[:]
+	okfor[OANDAND] = okforbool[:]
+	okfor[OANDNOT] = okforand[:]
+	okfor[ODIV] = okforarith[:]
+	okfor[OEQ] = okforeq[:]
+	okfor[OGE] = okforcmp[:]
+	okfor[OGT] = okforcmp[:]
+	okfor[OLE] = okforcmp[:]
+	okfor[OLT] = okforcmp[:]
+	okfor[OMOD] = okforand[:]
+	okfor[OMUL] = okforarith[:]
+	okfor[ONE] = okforeq[:]
+	okfor[OOR] = okforand[:]
+	okfor[OOROR] = okforbool[:]
+	okfor[OSUB] = okforarith[:]
+	okfor[OXOR] = okforand[:]
+	okfor[OLSH] = okforand[:]
+	okfor[ORSH] = okforand[:]
+
+	// unary
+	okfor[OCOM] = okforand[:]
+
+	okfor[OMINUS] = okforarith[:]
+	okfor[ONOT] = okforbool[:]
+	okfor[OPLUS] = okforarith[:]
+
+	// special
+	okfor[OCAP] = okforcap[:]
+
+	okfor[OLEN] = okforlen[:]
+
+	// comparison
+	iscmp[OLT] = true
+
+	iscmp[OGT] = true
+	iscmp[OGE] = true
+	iscmp[OLE] = true
+	iscmp[OEQ] = true
+	iscmp[ONE] = true
+
+	mpatofix(Maxintval[TINT8], "0x7f")
+	mpatofix(Minintval[TINT8], "-0x80")
+	mpatofix(Maxintval[TINT16], "0x7fff")
+	mpatofix(Minintval[TINT16], "-0x8000")
+	mpatofix(Maxintval[TINT32], "0x7fffffff")
+	mpatofix(Minintval[TINT32], "-0x80000000")
+	mpatofix(Maxintval[TINT64], "0x7fffffffffffffff")
+	mpatofix(Minintval[TINT64], "-0x8000000000000000")
+
+	mpatofix(Maxintval[TUINT8], "0xff")
+	mpatofix(Maxintval[TUINT16], "0xffff")
+	mpatofix(Maxintval[TUINT32], "0xffffffff")
+	mpatofix(Maxintval[TUINT64], "0xffffffffffffffff")
+
+	/* f is valid float if min < f < max.  (min and max are not themselves valid.) */
+	mpatoflt(maxfltval[TFLOAT32], "33554431p103") /* 2^24-1 p (127-23) + 1/2 ulp*/
+	mpatoflt(minfltval[TFLOAT32], "-33554431p103")
+	mpatoflt(maxfltval[TFLOAT64], "18014398509481983p970") /* 2^53-1 p (1023-52) + 1/2 ulp */
+	mpatoflt(minfltval[TFLOAT64], "-18014398509481983p970")
+
+	maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
+	minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
+	maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
+	minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
+
+	/* for walk to use in error messages */
+	Types[TFUNC] = functype(nil, nil, nil)
+
+	/* types used in front end */
+	// types[TNIL] got set early in lexinit
+	Types[TIDEAL] = typ(TIDEAL)
+
+	Types[TINTER] = typ(TINTER)
+
+	/* simple aliases */
+	Simtype[TMAP] = uint8(Tptr)
+
+	Simtype[TCHAN] = uint8(Tptr)
+	Simtype[TFUNC] = uint8(Tptr)
+	Simtype[TUNSAFEPTR] = uint8(Tptr)
+
+	/* pick up the backend thearch.typedefs */
+	var s1 *Sym
+	var etype int
+	var sameas int
+	var s *Sym
+	for i = range Thearch.Typedefs {
+		s = Lookup(Thearch.Typedefs[i].Name)
+		s1 = Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
+
+		etype = Thearch.Typedefs[i].Etype
+		if etype < 0 || etype >= len(Types) {
+			Fatal("typeinit: %s bad etype", s.Name)
+		}
+		sameas = Thearch.Typedefs[i].Sameas
+		if sameas < 0 || sameas >= len(Types) {
+			Fatal("typeinit: %s bad sameas", s.Name)
+		}
+		Simtype[etype] = uint8(sameas)
+		minfltval[etype] = minfltval[sameas]
+		maxfltval[etype] = maxfltval[sameas]
+		Minintval[etype] = Minintval[sameas]
+		Maxintval[etype] = Maxintval[sameas]
+
+		t = Types[etype]
+		if t != nil {
+			Fatal("typeinit: %s already defined", s.Name)
+		}
+
+		t = typ(etype)
+		t.Sym = s1
+
+		dowidth(t)
+		Types[etype] = t
+		s1.Def = typenod(t)
+	}
+
+	Array_array = int(Rnd(0, int64(Widthptr)))
+	Array_nel = int(Rnd(int64(Array_array)+int64(Widthptr), int64(Widthint)))
+	Array_cap = int(Rnd(int64(Array_nel)+int64(Widthint), int64(Widthint)))
+	sizeof_Array = int(Rnd(int64(Array_cap)+int64(Widthint), int64(Widthptr)))
+
+	// string is same as slice wo the cap
+	sizeof_String = int(Rnd(int64(Array_nel)+int64(Widthint), int64(Widthptr)))
+
+	dowidth(Types[TSTRING])
+	dowidth(idealstring)
+
+	itable = typ(Tptr)
+	itable.Type = Types[TUINT8]
+}
+
+/*
+ * compute total size of f's in/out arguments.
+ */
+func Argsize(t *Type) int {
+	var save Iter
+	var x int64
+
+	w := int64(0)
+
+	fp := Structfirst(&save, Getoutarg(t))
+	for fp != nil {
+		x = fp.Width + fp.Type.Width
+		if x > w {
+			w = x
+		}
+		fp = structnext(&save)
+	}
+
+	fp = funcfirst(&save, t)
+	for fp != nil {
+		x = fp.Width + fp.Type.Width
+		if x > w {
+			w = x
+		}
+		fp = funcnext(&save)
+	}
+
+	w = (w + int64(Widthptr) - 1) &^ (int64(Widthptr) - 1)
+	if int64(int(w)) != w {
+		Fatal("argsize too big")
+	}
+	return int(w)
+}
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
new file mode 100644
index 0000000..6bdf78c
--- /dev/null
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -0,0 +1,168 @@
+// AUTO-GENERATED by mkbuiltin.go; DO NOT EDIT
+
+package gc
+
+const runtimeimport = "" +
+	"package runtime\n" +
+	"import runtime \"runtime\"\n" +
+	"func @\"\".newobject (@\"\".typ·2 *byte) (? *any)\n" +
+	"func @\"\".panicindex ()\n" +
+	"func @\"\".panicslice ()\n" +
+	"func @\"\".panicdivide ()\n" +
+	"func @\"\".throwreturn ()\n" +
+	"func @\"\".throwinit ()\n" +
+	"func @\"\".panicwrap (? string, ? string, ? string)\n" +
+	"func @\"\".gopanic (? interface {})\n" +
+	"func @\"\".gorecover (? *int32) (? interface {})\n" +
+	"func @\"\".printbool (? bool)\n" +
+	"func @\"\".printfloat (? float64)\n" +
+	"func @\"\".printint (? int64)\n" +
+	"func @\"\".printhex (? uint64)\n" +
+	"func @\"\".printuint (? uint64)\n" +
+	"func @\"\".printcomplex (? complex128)\n" +
+	"func @\"\".printstring (? string)\n" +
+	"func @\"\".printpointer (? any)\n" +
+	"func @\"\".printiface (? any)\n" +
+	"func @\"\".printeface (? any)\n" +
+	"func @\"\".printslice (? any)\n" +
+	"func @\"\".printnl ()\n" +
+	"func @\"\".printsp ()\n" +
+	"func @\"\".printlock ()\n" +
+	"func @\"\".printunlock ()\n" +
+	"func @\"\".concatstring2 (? *[32]byte, ? string, ? string) (? string)\n" +
+	"func @\"\".concatstring3 (? *[32]byte, ? string, ? string, ? string) (? string)\n" +
+	"func @\"\".concatstring4 (? *[32]byte, ? string, ? string, ? string, ? string) (? string)\n" +
+	"func @\"\".concatstring5 (? *[32]byte, ? string, ? string, ? string, ? string, ? string) (? string)\n" +
+	"func @\"\".concatstrings (? *[32]byte, ? []string) (? string)\n" +
+	"func @\"\".cmpstring (? string, ? string) (? int)\n" +
+	"func @\"\".eqstring (? string, ? string) (? bool)\n" +
+	"func @\"\".intstring (? *[4]byte, ? int64) (? string)\n" +
+	"func @\"\".slicebytetostring (? *[32]byte, ? []byte) (? string)\n" +
+	"func @\"\".slicebytetostringtmp (? []byte) (? string)\n" +
+	"func @\"\".slicerunetostring (? *[32]byte, ? []rune) (? string)\n" +
+	"func @\"\".stringtoslicebyte (? *[32]byte, ? string) (? []byte)\n" +
+	"func @\"\".stringtoslicebytetmp (? string) (? []byte)\n" +
+	"func @\"\".stringtoslicerune (? *[32]rune, ? string) (? []rune)\n" +
+	"func @\"\".stringiter (? string, ? int) (? int)\n" +
+	"func @\"\".stringiter2 (? string, ? int) (@\"\".retk·1 int, @\"\".retv·2 rune)\n" +
+	"func @\"\".slicecopy (@\"\".to·2 any, @\"\".fr·3 any, @\"\".wid·4 uintptr) (? int)\n" +
+	"func @\"\".slicestringcopy (@\"\".to·2 any, @\"\".fr·3 any) (? int)\n" +
+	"func @\"\".typ2Itab (@\"\".typ·2 *byte, @\"\".typ2·3 *byte, @\"\".cache·4 **byte) (@\"\".ret·1 *byte)\n" +
+	"func @\"\".convI2E (@\"\".elem·2 any) (@\"\".ret·1 any)\n" +
+	"func @\"\".convI2I (@\"\".typ·2 *byte, @\"\".elem·3 any) (@\"\".ret·1 any)\n" +
+	"func @\"\".convT2E (@\"\".typ·2 *byte, @\"\".elem·3 *any, @\"\".buf·4 *any) (@\"\".ret·1 any)\n" +
+	"func @\"\".convT2I (@\"\".typ·2 *byte, @\"\".typ2·3 *byte, @\"\".cache·4 **byte, @\"\".elem·5 *any, @\"\".buf·6 *any) (@\"\".ret·1 any)\n" +
+	"func @\"\".assertE2E (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" +
+	"func @\"\".assertE2E2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" +
+	"func @\"\".assertE2I (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" +
+	"func @\"\".assertE2I2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" +
+	"func @\"\".assertE2T (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" +
+	"func @\"\".assertE2T2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" +
+	"func @\"\".assertI2E (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" +
+	"func @\"\".assertI2E2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" +
+	"func @\"\".assertI2I (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" +
+	"func @\"\".assertI2I2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" +
+	"func @\"\".assertI2T (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" +
+	"func @\"\".assertI2T2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" +
+	"func @\"\".panicdottype (@\"\".have·1 *byte, @\"\".want·2 *byte, @\"\".iface·3 *byte)\n" +
+	"func @\"\".ifaceeq (@\"\".i1·2 any, @\"\".i2·3 any) (@\"\".ret·1 bool)\n" +
+	"func @\"\".efaceeq (@\"\".i1·2 any, @\"\".i2·3 any) (@\"\".ret·1 bool)\n" +
+	"func @\"\".ifacethash (@\"\".i1·2 any) (@\"\".ret·1 uint32)\n" +
+	"func @\"\".efacethash (@\"\".i1·2 any) (@\"\".ret·1 uint32)\n" +
+	"func @\"\".makemap (@\"\".mapType·2 *byte, @\"\".hint·3 int64, @\"\".mapbuf·4 *any, @\"\".bucketbuf·5 *any) (@\"\".hmap·1 map[any]any)\n" +
+	"func @\"\".mapaccess1 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 *any) (@\"\".val·1 *any)\n" +
+	"func @\"\".mapaccess1_fast32 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" +
+	"func @\"\".mapaccess1_fast64 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" +
+	"func @\"\".mapaccess1_faststr (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" +
+	"func @\"\".mapaccess2 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 *any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" +
+	"func @\"\".mapaccess2_fast32 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" +
+	"func @\"\".mapaccess2_fast64 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" +
+	"func @\"\".mapaccess2_faststr (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" +
+	"func @\"\".mapassign1 (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".key·3 *any, @\"\".val·4 *any)\n" +
+	"func @\"\".mapiterinit (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".hiter·3 *any)\n" +
+	"func @\"\".mapdelete (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".key·3 *any)\n" +
+	"func @\"\".mapiternext (@\"\".hiter·1 *any)\n" +
+	"func @\"\".makechan (@\"\".chanType·2 *byte, @\"\".hint·3 int64) (@\"\".hchan·1 chan any)\n" +
+	"func @\"\".chanrecv1 (@\"\".chanType·1 *byte, @\"\".hchan·2 <-chan any, @\"\".elem·3 *any)\n" +
+	"func @\"\".chanrecv2 (@\"\".chanType·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (? bool)\n" +
+	"func @\"\".chansend1 (@\"\".chanType·1 *byte, @\"\".hchan·2 chan<- any, @\"\".elem·3 *any)\n" +
+	"func @\"\".closechan (@\"\".hchan·1 any)\n" +
+	"var @\"\".writeBarrierEnabled bool\n" +
+	"func @\"\".writebarrierptr (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
+	"func @\"\".writebarrierstring (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
+	"func @\"\".writebarrierslice (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
+	"func @\"\".writebarrieriface (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
+	"func @\"\".writebarrierfat01 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat10 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat11 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat001 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat010 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat011 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat100 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat101 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat110 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat111 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat0001 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat0010 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat0011 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat0100 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat0101 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat0110 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat0111 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat1000 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat1001 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat1010 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat1011 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat1100 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat1101 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat1110 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".writebarrierfat1111 (@\"\".dst·1 *any, _ uintptr, @\"\".src·3 any)\n" +
+	"func @\"\".typedmemmove (@\"\".typ·1 *byte, @\"\".dst·2 *any, @\"\".src·3 *any)\n" +
+	"func @\"\".typedslicecopy (@\"\".typ·2 *byte, @\"\".dst·3 any, @\"\".src·4 any) (? int)\n" +
+	"func @\"\".selectnbsend (@\"\".chanType·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (? bool)\n" +
+	"func @\"\".selectnbrecv (@\"\".chanType·2 *byte, @\"\".elem·3 *any, @\"\".hchan·4 <-chan any) (? bool)\n" +
+	"func @\"\".selectnbrecv2 (@\"\".chanType·2 *byte, @\"\".elem·3 *any, @\"\".received·4 *bool, @\"\".hchan·5 <-chan any) (? bool)\n" +
+	"func @\"\".newselect (@\"\".sel·1 *byte, @\"\".selsize·2 int64, @\"\".size·3 int32)\n" +
+	"func @\"\".selectsend (@\"\".sel·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (@\"\".selected·1 bool)\n" +
+	"func @\"\".selectrecv (@\"\".sel·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (@\"\".selected·1 bool)\n" +
+	"func @\"\".selectrecv2 (@\"\".sel·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any, @\"\".received·5 *bool) (@\"\".selected·1 bool)\n" +
+	"func @\"\".selectdefault (@\"\".sel·2 *byte) (@\"\".selected·1 bool)\n" +
+	"func @\"\".selectgo (@\"\".sel·1 *byte)\n" +
+	"func @\"\".block ()\n" +
+	"func @\"\".makeslice (@\"\".typ·2 *byte, @\"\".nel·3 int64, @\"\".cap·4 int64) (@\"\".ary·1 []any)\n" +
+	"func @\"\".growslice (@\"\".typ·2 *byte, @\"\".old·3 []any, @\"\".n·4 int) (@\"\".ary·1 []any)\n" +
+	"func @\"\".memmove (@\"\".to·1 *any, @\"\".frm·2 *any, @\"\".length·3 uintptr)\n" +
+	"func @\"\".memclr (@\"\".ptr·1 *byte, @\"\".length·2 uintptr)\n" +
+	"func @\"\".memequal (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n" +
+	"func @\"\".memequal8 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" +
+	"func @\"\".memequal16 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" +
+	"func @\"\".memequal32 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" +
+	"func @\"\".memequal64 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" +
+	"func @\"\".memequal128 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" +
+	"func @\"\".int64div (? int64, ? int64) (? int64)\n" +
+	"func @\"\".uint64div (? uint64, ? uint64) (? uint64)\n" +
+	"func @\"\".int64mod (? int64, ? int64) (? int64)\n" +
+	"func @\"\".uint64mod (? uint64, ? uint64) (? uint64)\n" +
+	"func @\"\".float64toint64 (? float64) (? int64)\n" +
+	"func @\"\".float64touint64 (? float64) (? uint64)\n" +
+	"func @\"\".int64tofloat64 (? int64) (? float64)\n" +
+	"func @\"\".uint64tofloat64 (? uint64) (? float64)\n" +
+	"func @\"\".complex128div (@\"\".num·2 complex128, @\"\".den·3 complex128) (@\"\".quo·1 complex128)\n" +
+	"func @\"\".racefuncenter (? uintptr)\n" +
+	"func @\"\".racefuncexit ()\n" +
+	"func @\"\".raceread (? uintptr)\n" +
+	"func @\"\".racewrite (? uintptr)\n" +
+	"func @\"\".racereadrange (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" +
+	"func @\"\".racewriterange (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" +
+	"\n" +
+	"$$\n"
+
+const unsafeimport = "" +
+	"package unsafe\n" +
+	"import runtime \"runtime\"\n" +
+	"type @\"\".Pointer uintptr\n" +
+	"func @\"\".Offsetof (? any) (? uintptr)\n" +
+	"func @\"\".Sizeof (? any) (? uintptr)\n" +
+	"func @\"\".Alignof (? any) (? uintptr)\n" +
+	"\n" +
+	"$$\n"
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go
new file mode 100644
index 0000000..179a4dd
--- /dev/null
+++ b/src/cmd/compile/internal/gc/builtin/runtime.go
@@ -0,0 +1,194 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// NOTE: If you change this file you must run "go generate"
+// to update builtin.go.  This is not done automatically
+// to avoid depending on having a working compiler binary.
+
+// +build ignore
+
+package PACKAGE
+
+// emitted by compiler, not referred to by go programs
+
+func newobject(typ *byte) *any
+func panicindex()
+func panicslice()
+func panicdivide()
+func throwreturn()
+func throwinit()
+func panicwrap(string, string, string)
+
+func gopanic(interface{})
+func gorecover(*int32) interface{}
+
+func printbool(bool)
+func printfloat(float64)
+func printint(int64)
+func printhex(uint64)
+func printuint(uint64)
+func printcomplex(complex128)
+func printstring(string)
+func printpointer(any)
+func printiface(any)
+func printeface(any)
+func printslice(any)
+func printnl()
+func printsp()
+func printlock()
+func printunlock()
+
+func concatstring2(*[32]byte, string, string) string
+func concatstring3(*[32]byte, string, string, string) string
+func concatstring4(*[32]byte, string, string, string, string) string
+func concatstring5(*[32]byte, string, string, string, string, string) string
+func concatstrings(*[32]byte, []string) string
+
+func cmpstring(string, string) int
+func eqstring(string, string) bool
+func intstring(*[4]byte, int64) string
+func slicebytetostring(*[32]byte, []byte) string
+func slicebytetostringtmp([]byte) string
+func slicerunetostring(*[32]byte, []rune) string
+func stringtoslicebyte(*[32]byte, string) []byte
+func stringtoslicebytetmp(string) []byte
+func stringtoslicerune(*[32]rune, string) []rune
+func stringiter(string, int) int
+func stringiter2(string, int) (retk int, retv rune)
+func slicecopy(to any, fr any, wid uintptr) int
+func slicestringcopy(to any, fr any) int
+
+// interface conversions
+func typ2Itab(typ *byte, typ2 *byte, cache **byte) (ret *byte)
+func convI2E(elem any) (ret any)
+func convI2I(typ *byte, elem any) (ret any)
+func convT2E(typ *byte, elem, buf *any) (ret any)
+func convT2I(typ *byte, typ2 *byte, cache **byte, elem, buf *any) (ret any)
+
+// interface type assertions  x.(T)
+func assertE2E(typ *byte, iface any, ret *any)
+func assertE2E2(typ *byte, iface any, ret *any) bool
+func assertE2I(typ *byte, iface any, ret *any)
+func assertE2I2(typ *byte, iface any, ret *any) bool
+func assertE2T(typ *byte, iface any, ret *any)
+func assertE2T2(typ *byte, iface any, ret *any) bool
+func assertI2E(typ *byte, iface any, ret *any)
+func assertI2E2(typ *byte, iface any, ret *any) bool
+func assertI2I(typ *byte, iface any, ret *any)
+func assertI2I2(typ *byte, iface any, ret *any) bool
+func assertI2T(typ *byte, iface any, ret *any)
+func assertI2T2(typ *byte, iface any, ret *any) bool
+func panicdottype(have, want, iface *byte)
+
+func ifaceeq(i1 any, i2 any) (ret bool)
+func efaceeq(i1 any, i2 any) (ret bool)
+func ifacethash(i1 any) (ret uint32)
+func efacethash(i1 any) (ret uint32)
+
+// *byte is really *runtime.Type
+func makemap(mapType *byte, hint int64, mapbuf *any, bucketbuf *any) (hmap map[any]any)
+func mapaccess1(mapType *byte, hmap map[any]any, key *any) (val *any)
+func mapaccess1_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapaccess1_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapaccess1_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapaccess2(mapType *byte, hmap map[any]any, key *any) (val *any, pres bool)
+func mapaccess2_fast32(mapType *byte, hmap map[any]any, key any) (val *any, pres bool)
+func mapaccess2_fast64(mapType *byte, hmap map[any]any, key any) (val *any, pres bool)
+func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pres bool)
+func mapassign1(mapType *byte, hmap map[any]any, key *any, val *any)
+func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
+func mapdelete(mapType *byte, hmap map[any]any, key *any)
+func mapiternext(hiter *any)
+
+// *byte is really *runtime.Type
+func makechan(chanType *byte, hint int64) (hchan chan any)
+func chanrecv1(chanType *byte, hchan <-chan any, elem *any)
+func chanrecv2(chanType *byte, hchan <-chan any, elem *any) bool
+func chansend1(chanType *byte, hchan chan<- any, elem *any)
+func closechan(hchan any)
+
+var writeBarrierEnabled bool
+
+func writebarrierptr(dst *any, src any)
+func writebarrierstring(dst *any, src any)
+func writebarrierslice(dst *any, src any)
+func writebarrieriface(dst *any, src any)
+
+// The unused *byte argument makes sure that src is 2-pointer-aligned,
+// which is the maximum alignment on NaCl amd64p32
+// (and possibly on 32-bit systems if we start 64-bit aligning uint64s).
+// The bitmap in the name tells which words being copied are pointers.
+func writebarrierfat01(dst *any, _ uintptr, src any)
+func writebarrierfat10(dst *any, _ uintptr, src any)
+func writebarrierfat11(dst *any, _ uintptr, src any)
+func writebarrierfat001(dst *any, _ uintptr, src any)
+func writebarrierfat010(dst *any, _ uintptr, src any)
+func writebarrierfat011(dst *any, _ uintptr, src any)
+func writebarrierfat100(dst *any, _ uintptr, src any)
+func writebarrierfat101(dst *any, _ uintptr, src any)
+func writebarrierfat110(dst *any, _ uintptr, src any)
+func writebarrierfat111(dst *any, _ uintptr, src any)
+func writebarrierfat0001(dst *any, _ uintptr, src any)
+func writebarrierfat0010(dst *any, _ uintptr, src any)
+func writebarrierfat0011(dst *any, _ uintptr, src any)
+func writebarrierfat0100(dst *any, _ uintptr, src any)
+func writebarrierfat0101(dst *any, _ uintptr, src any)
+func writebarrierfat0110(dst *any, _ uintptr, src any)
+func writebarrierfat0111(dst *any, _ uintptr, src any)
+func writebarrierfat1000(dst *any, _ uintptr, src any)
+func writebarrierfat1001(dst *any, _ uintptr, src any)
+func writebarrierfat1010(dst *any, _ uintptr, src any)
+func writebarrierfat1011(dst *any, _ uintptr, src any)
+func writebarrierfat1100(dst *any, _ uintptr, src any)
+func writebarrierfat1101(dst *any, _ uintptr, src any)
+func writebarrierfat1110(dst *any, _ uintptr, src any)
+func writebarrierfat1111(dst *any, _ uintptr, src any)
+
+// *byte is really *runtime.Type
+func typedmemmove(typ *byte, dst *any, src *any)
+func typedslicecopy(typ *byte, dst any, src any) int
+
+func selectnbsend(chanType *byte, hchan chan<- any, elem *any) bool
+func selectnbrecv(chanType *byte, elem *any, hchan <-chan any) bool
+func selectnbrecv2(chanType *byte, elem *any, received *bool, hchan <-chan any) bool
+
+func newselect(sel *byte, selsize int64, size int32)
+func selectsend(sel *byte, hchan chan<- any, elem *any) (selected bool)
+func selectrecv(sel *byte, hchan <-chan any, elem *any) (selected bool)
+func selectrecv2(sel *byte, hchan <-chan any, elem *any, received *bool) (selected bool)
+func selectdefault(sel *byte) (selected bool)
+func selectgo(sel *byte)
+func block()
+
+func makeslice(typ *byte, nel int64, cap int64) (ary []any)
+func growslice(typ *byte, old []any, n int) (ary []any)
+func memmove(to *any, frm *any, length uintptr)
+func memclr(ptr *byte, length uintptr)
+
+func memequal(x, y *any, size uintptr) bool
+func memequal8(x, y *any) bool
+func memequal16(x, y *any) bool
+func memequal32(x, y *any) bool
+func memequal64(x, y *any) bool
+func memequal128(x, y *any) bool
+
+// only used on 32-bit
+func int64div(int64, int64) int64
+func uint64div(uint64, uint64) uint64
+func int64mod(int64, int64) int64
+func uint64mod(uint64, uint64) uint64
+func float64toint64(float64) int64
+func float64touint64(float64) uint64
+func int64tofloat64(int64) float64
+func uint64tofloat64(uint64) float64
+
+func complex128div(num complex128, den complex128) (quo complex128)
+
+// race detection
+func racefuncenter(uintptr)
+func racefuncexit()
+func raceread(uintptr)
+func racewrite(uintptr)
+func racereadrange(addr, size uintptr)
+func racewriterange(addr, size uintptr)
diff --git a/src/cmd/compile/internal/gc/builtin/unsafe.go b/src/cmd/compile/internal/gc/builtin/unsafe.go
new file mode 100644
index 0000000..ce50869
--- /dev/null
+++ b/src/cmd/compile/internal/gc/builtin/unsafe.go
@@ -0,0 +1,18 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// NOTE: If you change this file you must run "go generate"
+// to update builtin.go.  This is not done automatically
+// to avoid depending on having a working compiler binary.
+
+// +build ignore
+
+package PACKAGE
+
+type Pointer uintptr // not really; filled in by compiler
+
+// return types here are ignored; see unsafe.go
+func Offsetof(any) uintptr
+func Sizeof(any) uintptr
+func Alignof(any) uintptr
diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go
new file mode 100644
index 0000000..2b988e6
--- /dev/null
+++ b/src/cmd/compile/internal/gc/bv.go
@@ -0,0 +1,195 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "fmt"
+
+const (
+	WORDSIZE  = 4
+	WORDBITS  = 32
+	WORDMASK  = WORDBITS - 1
+	WORDSHIFT = 5
+)
+
+// A Bvec is a bit vector.
+type Bvec struct {
+	n int32    // number of bits in vector
+	b []uint32 // words holding bits
+}
+
+func bvsize(n uint32) uint32 {
+	return ((n + WORDBITS - 1) / WORDBITS) * WORDSIZE
+}
+
+func bvbits(bv Bvec) int32 {
+	return bv.n
+}
+
+func bvwords(bv Bvec) int32 {
+	return (bv.n + WORDBITS - 1) / WORDBITS
+}
+
+func bvalloc(n int32) Bvec {
+	return Bvec{n, make([]uint32, bvsize(uint32(n))/4)}
+}
+
+type bulkBvec struct {
+	words []uint32
+	nbit  int32
+	nword int32
+}
+
+func bvbulkalloc(nbit int32, count int32) bulkBvec {
+	nword := (nbit + WORDBITS - 1) / WORDBITS
+	return bulkBvec{
+		words: make([]uint32, nword*count),
+		nbit:  nbit,
+		nword: nword,
+	}
+}
+
+func (b *bulkBvec) next() Bvec {
+	out := Bvec{b.nbit, b.words[:b.nword]}
+	b.words = b.words[b.nword:]
+	return out
+}
+
+/* difference */
+func bvandnot(dst Bvec, src1 Bvec, src2 Bvec) {
+	for i, x := range src1.b {
+		dst.b[i] = x &^ src2.b[i]
+	}
+}
+
+func bvcmp(bv1 Bvec, bv2 Bvec) int {
+	if bv1.n != bv2.n {
+		Fatal("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
+	}
+	for i, x := range bv1.b {
+		if x != bv2.b[i] {
+			return 1
+		}
+	}
+	return 0
+}
+
+func bvcopy(dst Bvec, src Bvec) {
+	for i, x := range src.b {
+		dst.b[i] = x
+	}
+}
+
+func bvconcat(src1 Bvec, src2 Bvec) Bvec {
+	dst := bvalloc(src1.n + src2.n)
+	for i := int32(0); i < src1.n; i++ {
+		if bvget(src1, i) != 0 {
+			bvset(dst, i)
+		}
+	}
+	for i := int32(0); i < src2.n; i++ {
+		if bvget(src2, i) != 0 {
+			bvset(dst, i+src1.n)
+		}
+	}
+	return dst
+}
+
+func bvget(bv Bvec, i int32) int {
+	if i < 0 || i >= bv.n {
+		Fatal("bvget: index %d is out of bounds with length %d\n", i, bv.n)
+	}
+	return int((bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)) & 1)
+}
+
+// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// If there is no such index, bvnext returns -1.
+func bvnext(bv Bvec, i int32) int {
+	if i >= bv.n {
+		return -1
+	}
+
+	// Jump i ahead to next word with bits.
+	if bv.b[i>>WORDSHIFT]>>uint(i&WORDMASK) == 0 {
+		i &^= WORDMASK
+		i += WORDBITS
+		for i < bv.n && bv.b[i>>WORDSHIFT] == 0 {
+			i += WORDBITS
+		}
+	}
+
+	if i >= bv.n {
+		return -1
+	}
+
+	// Find 1 bit.
+	w := bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)
+
+	for w&1 == 0 {
+		w >>= 1
+		i++
+	}
+
+	return int(i)
+}
+
+func bvisempty(bv Bvec) bool {
+	for i := int32(0); i < bv.n; i += WORDBITS {
+		if bv.b[i>>WORDSHIFT] != 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func bvnot(bv Bvec) {
+	i := int32(0)
+	w := int32(0)
+	for ; i < bv.n; i, w = i+WORDBITS, w+1 {
+		bv.b[w] = ^bv.b[w]
+	}
+}
+
+/* union */
+func bvor(dst Bvec, src1 Bvec, src2 Bvec) {
+	for i, x := range src1.b {
+		dst.b[i] = x | src2.b[i]
+	}
+}
+
+/* intersection */
+func bvand(dst Bvec, src1 Bvec, src2 Bvec) {
+	for i, x := range src1.b {
+		dst.b[i] = x & src2.b[i]
+	}
+}
+
+func bvprint(bv Bvec) {
+	fmt.Printf("#*")
+	for i := int32(0); i < bv.n; i++ {
+		fmt.Printf("%d", bvget(bv, i))
+	}
+}
+
+func bvreset(bv Bvec, i int32) {
+	if i < 0 || i >= bv.n {
+		Fatal("bvreset: index %d is out of bounds with length %d\n", i, bv.n)
+	}
+	mask := uint32(^(1 << uint(i%WORDBITS)))
+	bv.b[i/WORDBITS] &= mask
+}
+
+func bvresetall(bv Bvec) {
+	for i := range bv.b {
+		bv.b[i] = 0
+	}
+}
+
+func bvset(bv Bvec, i int32) {
+	if i < 0 || i >= bv.n {
+		Fatal("bvset: index %d is out of bounds with length %d\n", i, bv.n)
+	}
+	mask := uint32(1 << uint(i%WORDBITS))
+	bv.b[i/WORDBITS] |= mask
+}
diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go
new file mode 100644
index 0000000..ca58b1c
--- /dev/null
+++ b/src/cmd/compile/internal/gc/cgen.go
@@ -0,0 +1,3564 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+)
+
+/*
+ * generate:
+ *	res = n;
+ * simplifies and calls Thearch.Gmove.
+ * if wb is true, need to emit write barriers.
+ */
+func Cgen(n, res *Node) {
+	cgen_wb(n, res, false)
+}
+
+func cgen_wb(n, res *Node, wb bool) {
+	if Debug['g'] != 0 {
+		op := "cgen"
+		if wb {
+			op = "cgen_wb"
+		}
+		Dump("\n"+op+"-n", n)
+		Dump(op+"-res", res)
+	}
+
+	if n == nil || n.Type == nil {
+		return
+	}
+
+	if res == nil || res.Type == nil {
+		Fatal("cgen: res nil")
+	}
+
+	for n.Op == OCONVNOP {
+		n = n.Left
+	}
+
+	switch n.Op {
+	case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+		cgen_slice(n, res, wb)
+		return
+
+	case OEFACE:
+		if res.Op != ONAME || !res.Addable || wb {
+			var n1 Node
+			Tempname(&n1, n.Type)
+			Cgen_eface(n, &n1)
+			cgen_wb(&n1, res, wb)
+		} else {
+			Cgen_eface(n, res)
+		}
+		return
+
+	case ODOTTYPE:
+		cgen_dottype(n, res, nil, wb)
+		return
+
+	case OAPPEND:
+		cgen_append(n, res)
+		return
+	}
+
+	if n.Ullman >= UINF {
+		if n.Op == OINDREG {
+			Fatal("cgen: this is going to miscompile")
+		}
+		if res.Ullman >= UINF {
+			var n1 Node
+			Tempname(&n1, n.Type)
+			Cgen(n, &n1)
+			cgen_wb(&n1, res, wb)
+			return
+		}
+	}
+
+	if Isfat(n.Type) {
+		if n.Type.Width < 0 {
+			Fatal("forgot to compute width for %v", n.Type)
+		}
+		sgen_wb(n, res, n.Type.Width, wb)
+		return
+	}
+
+	if !res.Addable {
+		if n.Ullman > res.Ullman {
+			if Ctxt.Arch.Regsize == 4 && Is64(n.Type) {
+				var n1 Node
+				Tempname(&n1, n.Type)
+				Cgen(n, &n1)
+				cgen_wb(&n1, res, wb)
+				return
+			}
+
+			var n1 Node
+			Regalloc(&n1, n.Type, res)
+			Cgen(n, &n1)
+			if n1.Ullman > res.Ullman {
+				Dump("n1", &n1)
+				Dump("res", res)
+				Fatal("loop in cgen")
+			}
+
+			cgen_wb(&n1, res, wb)
+			Regfree(&n1)
+			return
+		}
+
+		var f int
+		if res.Ullman < UINF {
+			if Complexop(n, res) {
+				Complexgen(n, res)
+				return
+			}
+
+			f = 1 // gen thru register
+			switch n.Op {
+			case OLITERAL:
+				if Smallintconst(n) {
+					f = 0
+				}
+
+			case OREGISTER:
+				f = 0
+			}
+
+			if !Iscomplex[n.Type.Etype] && Ctxt.Arch.Regsize == 8 && !wb {
+				a := Thearch.Optoas(OAS, res.Type)
+				var addr obj.Addr
+				if Thearch.Sudoaddable(a, res, &addr) {
+					var p1 *obj.Prog
+					if f != 0 {
+						var n2 Node
+						Regalloc(&n2, res.Type, nil)
+						Cgen(n, &n2)
+						p1 = Thearch.Gins(a, &n2, nil)
+						Regfree(&n2)
+					} else {
+						p1 = Thearch.Gins(a, n, nil)
+					}
+					p1.To = addr
+					if Debug['g'] != 0 {
+						fmt.Printf("%v [ignore previous line]\n", p1)
+					}
+					Thearch.Sudoclean()
+					return
+				}
+			}
+		}
+
+		if Ctxt.Arch.Thechar == '8' {
+			// no registers to speak of
+			var n1, n2 Node
+			Tempname(&n1, n.Type)
+			Cgen(n, &n1)
+			Igen(res, &n2, nil)
+			cgen_wb(&n1, &n2, wb)
+			Regfree(&n2)
+			return
+		}
+
+		var n1 Node
+		Igen(res, &n1, nil)
+		cgen_wb(n, &n1, wb)
+		Regfree(&n1)
+		return
+	}
+
+	// update addressability for string, slice
+	// can't do in walk because n->left->addable
+	// changes if n->left is an escaping local variable.
+	switch n.Op {
+	case OSPTR, OLEN:
+		if Isslice(n.Left.Type) || Istype(n.Left.Type, TSTRING) {
+			n.Addable = n.Left.Addable
+		}
+
+	case OCAP:
+		if Isslice(n.Left.Type) {
+			n.Addable = n.Left.Addable
+		}
+
+	case OITAB:
+		n.Addable = n.Left.Addable
+	}
+
+	if wb {
+		if int(Simtype[res.Type.Etype]) != Tptr {
+			Fatal("cgen_wb of type %v", res.Type)
+		}
+		if n.Ullman >= UINF {
+			var n1 Node
+			Tempname(&n1, n.Type)
+			Cgen(n, &n1)
+			n = &n1
+		}
+		cgen_wbptr(n, res)
+		return
+	}
+
+	// Write barrier now handled. Code below this line can ignore wb.
+
+	if Ctxt.Arch.Thechar == '5' { // TODO(rsc): Maybe more often?
+		// if both are addressable, move
+		if n.Addable && res.Addable {
+			if Is64(n.Type) || Is64(res.Type) || n.Op == OREGISTER || res.Op == OREGISTER || Iscomplex[n.Type.Etype] || Iscomplex[res.Type.Etype] {
+				Thearch.Gmove(n, res)
+			} else {
+				var n1 Node
+				Regalloc(&n1, n.Type, nil)
+				Thearch.Gmove(n, &n1)
+				Cgen(&n1, res)
+				Regfree(&n1)
+			}
+
+			return
+		}
+
+		// if both are not addressable, use a temporary.
+		if !n.Addable && !res.Addable {
+			// could use regalloc here sometimes,
+			// but have to check for ullman >= UINF.
+			var n1 Node
+			Tempname(&n1, n.Type)
+			Cgen(n, &n1)
+			Cgen(&n1, res)
+			return
+		}
+
+		// if result is not addressable directly but n is,
+		// compute its address and then store via the address.
+		if !res.Addable {
+			var n1 Node
+			Igen(res, &n1, nil)
+			Cgen(n, &n1)
+			Regfree(&n1)
+			return
+		}
+	}
+
+	if Complexop(n, res) {
+		Complexgen(n, res)
+		return
+	}
+
+	if (Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8') && n.Addable {
+		Thearch.Gmove(n, res)
+		return
+	}
+
+	if Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+		// if both are addressable, move
+		if n.Addable {
+			if n.Op == OREGISTER || res.Op == OREGISTER {
+				Thearch.Gmove(n, res)
+			} else {
+				var n1 Node
+				Regalloc(&n1, n.Type, nil)
+				Thearch.Gmove(n, &n1)
+				Cgen(&n1, res)
+				Regfree(&n1)
+			}
+			return
+		}
+	}
+
+	// if n is sudoaddable generate addr and move
+	if Ctxt.Arch.Thechar == '5' && !Is64(n.Type) && !Is64(res.Type) && !Iscomplex[n.Type.Etype] && !Iscomplex[res.Type.Etype] {
+		a := Thearch.Optoas(OAS, n.Type)
+		var addr obj.Addr
+		if Thearch.Sudoaddable(a, n, &addr) {
+			if res.Op != OREGISTER {
+				var n2 Node
+				Regalloc(&n2, res.Type, nil)
+				p1 := Thearch.Gins(a, nil, &n2)
+				p1.From = addr
+				if Debug['g'] != 0 {
+					fmt.Printf("%v [ignore previous line]\n", p1)
+				}
+				Thearch.Gmove(&n2, res)
+				Regfree(&n2)
+			} else {
+				p1 := Thearch.Gins(a, nil, res)
+				p1.From = addr
+				if Debug['g'] != 0 {
+					fmt.Printf("%v [ignore previous line]\n", p1)
+				}
+			}
+			Thearch.Sudoclean()
+			return
+		}
+	}
+
+	nl := n.Left
+	nr := n.Right
+
+	if nl != nil && nl.Ullman >= UINF {
+		if nr != nil && nr.Ullman >= UINF {
+			var n1 Node
+			Tempname(&n1, nl.Type)
+			Cgen(nl, &n1)
+			n2 := *n
+			n2.Left = &n1
+			Cgen(&n2, res)
+			return
+		}
+	}
+
+	// 64-bit ops are hard on 32-bit machine.
+	if Ctxt.Arch.Regsize == 4 && (Is64(n.Type) || Is64(res.Type) || n.Left != nil && Is64(n.Left.Type)) {
+		switch n.Op {
+		// math goes to cgen64.
+		case OMINUS,
+			OCOM,
+			OADD,
+			OSUB,
+			OMUL,
+			OLROT,
+			OLSH,
+			ORSH,
+			OAND,
+			OOR,
+			OXOR:
+			Thearch.Cgen64(n, res)
+			return
+		}
+	}
+
+	if Thearch.Cgen_float != nil && nl != nil && Isfloat[n.Type.Etype] && Isfloat[nl.Type.Etype] {
+		Thearch.Cgen_float(n, res)
+		return
+	}
+
+	if !Iscomplex[n.Type.Etype] && Ctxt.Arch.Regsize == 8 {
+		a := Thearch.Optoas(OAS, n.Type)
+		var addr obj.Addr
+		if Thearch.Sudoaddable(a, n, &addr) {
+			if res.Op == OREGISTER {
+				p1 := Thearch.Gins(a, nil, res)
+				p1.From = addr
+			} else {
+				var n2 Node
+				Regalloc(&n2, n.Type, nil)
+				p1 := Thearch.Gins(a, nil, &n2)
+				p1.From = addr
+				Thearch.Gins(a, &n2, res)
+				Regfree(&n2)
+			}
+
+			Thearch.Sudoclean()
+			return
+		}
+	}
+
+	var a int
+	switch n.Op {
+	default:
+		Dump("cgen", n)
+		Dump("cgen-res", res)
+		Fatal("cgen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+	case OOROR, OANDAND,
+		OEQ, ONE,
+		OLT, OLE,
+		OGE, OGT,
+		ONOT:
+		Bvgen(n, res, true)
+		return
+
+	case OPLUS:
+		Cgen(nl, res)
+		return
+
+		// unary
+	case OCOM:
+		a := Thearch.Optoas(OXOR, nl.Type)
+
+		var n1 Node
+		Regalloc(&n1, nl.Type, nil)
+		Cgen(nl, &n1)
+		var n2 Node
+		Nodconst(&n2, nl.Type, -1)
+		Thearch.Gins(a, &n2, &n1)
+		cgen_norm(n, &n1, res)
+		return
+
+	case OMINUS:
+		if Isfloat[nl.Type.Etype] {
+			nr = Nodintconst(-1)
+			Convlit(&nr, n.Type)
+			a = Thearch.Optoas(OMUL, nl.Type)
+			goto sbop
+		}
+
+		a := Thearch.Optoas(int(n.Op), nl.Type)
+		// unary
+		var n1 Node
+		Regalloc(&n1, nl.Type, res)
+
+		Cgen(nl, &n1)
+		if Ctxt.Arch.Thechar == '5' {
+			var n2 Node
+			Nodconst(&n2, nl.Type, 0)
+			Thearch.Gins(a, &n2, &n1)
+		} else if Ctxt.Arch.Thechar == '7' {
+			Thearch.Gins(a, &n1, &n1)
+		} else {
+			Thearch.Gins(a, nil, &n1)
+		}
+		cgen_norm(n, &n1, res)
+		return
+
+	case OSQRT:
+		var n1 Node
+		Regalloc(&n1, nl.Type, res)
+		Cgen(n.Left, &n1)
+		Thearch.Gins(Thearch.Optoas(OSQRT, nl.Type), &n1, &n1)
+		Thearch.Gmove(&n1, res)
+		Regfree(&n1)
+		return
+
+	case OGETG:
+		Thearch.Getg(res)
+		return
+
+		// symmetric binary
+	case OAND,
+		OOR,
+		OXOR,
+		OADD,
+		OMUL:
+		if n.Op == OMUL && Thearch.Cgen_bmul != nil && Thearch.Cgen_bmul(int(n.Op), nl, nr, res) {
+			break
+		}
+		a = Thearch.Optoas(int(n.Op), nl.Type)
+		goto sbop
+
+		// asymmetric binary
+	case OSUB:
+		a = Thearch.Optoas(int(n.Op), nl.Type)
+		goto abop
+
+	case OHMUL:
+		Thearch.Cgen_hmul(nl, nr, res)
+
+	case OCONV:
+		if Eqtype(n.Type, nl.Type) || Noconv(n.Type, nl.Type) {
+			Cgen(nl, res)
+			return
+		}
+
+		if Ctxt.Arch.Thechar == '8' {
+			var n1 Node
+			var n2 Node
+			Tempname(&n2, n.Type)
+			Mgen(nl, &n1, res)
+			Thearch.Gmove(&n1, &n2)
+			Thearch.Gmove(&n2, res)
+			Mfree(&n1)
+			break
+		}
+
+		var n1 Node
+		var n2 Node
+		if Ctxt.Arch.Thechar == '5' {
+			if nl.Addable && !Is64(nl.Type) {
+				Regalloc(&n1, nl.Type, res)
+				Thearch.Gmove(nl, &n1)
+			} else {
+				if n.Type.Width > int64(Widthptr) || Is64(nl.Type) || Isfloat[nl.Type.Etype] {
+					Tempname(&n1, nl.Type)
+				} else {
+					Regalloc(&n1, nl.Type, res)
+				}
+				Cgen(nl, &n1)
+			}
+			if n.Type.Width > int64(Widthptr) || Is64(n.Type) || Isfloat[n.Type.Etype] {
+				Tempname(&n2, n.Type)
+			} else {
+				Regalloc(&n2, n.Type, nil)
+			}
+		} else {
+			if n.Type.Width > nl.Type.Width {
+				// If loading from memory, do conversion during load,
+				// so as to avoid use of 8-bit register in, say, int(*byteptr).
+				switch nl.Op {
+				case ODOT, ODOTPTR, OINDEX, OIND, ONAME:
+					Igen(nl, &n1, res)
+					Regalloc(&n2, n.Type, res)
+					Thearch.Gmove(&n1, &n2)
+					Thearch.Gmove(&n2, res)
+					Regfree(&n2)
+					Regfree(&n1)
+					return
+				}
+			}
+			Regalloc(&n1, nl.Type, res)
+			Regalloc(&n2, n.Type, &n1)
+			Cgen(nl, &n1)
+		}
+
+		// if we do the conversion n1 -> n2 here
+		// reusing the register, then gmove won't
+		// have to allocate its own register.
+		Thearch.Gmove(&n1, &n2)
+		Thearch.Gmove(&n2, res)
+		if n2.Op == OREGISTER {
+			Regfree(&n2)
+		}
+		if n1.Op == OREGISTER {
+			Regfree(&n1)
+		}
+
+	case ODOT,
+		ODOTPTR,
+		OINDEX,
+		OIND,
+		ONAME: // PHEAP or PPARAMREF var
+		var n1 Node
+		Igen(n, &n1, res)
+
+		Thearch.Gmove(&n1, res)
+		Regfree(&n1)
+
+		// interface table is first word of interface value
+	case OITAB:
+		var n1 Node
+		Igen(nl, &n1, res)
+
+		n1.Type = n.Type
+		Thearch.Gmove(&n1, res)
+		Regfree(&n1)
+
+	case OSPTR:
+		// pointer is the first word of string or slice.
+		if Isconst(nl, CTSTR) {
+			var n1 Node
+			Regalloc(&n1, Types[Tptr], res)
+			p1 := Thearch.Gins(Thearch.Optoas(OAS, n1.Type), nil, &n1)
+			Datastring(nl.Val.U.(string), &p1.From)
+			p1.From.Type = obj.TYPE_ADDR
+			Thearch.Gmove(&n1, res)
+			Regfree(&n1)
+			break
+		}
+
+		var n1 Node
+		Igen(nl, &n1, res)
+		n1.Type = n.Type
+		Thearch.Gmove(&n1, res)
+		Regfree(&n1)
+
+	case OLEN:
+		if Istype(nl.Type, TMAP) || Istype(nl.Type, TCHAN) {
+			// map and chan have len in the first int-sized word.
+			// a zero pointer means zero length
+			var n1 Node
+			Regalloc(&n1, Types[Tptr], res)
+
+			Cgen(nl, &n1)
+
+			var n2 Node
+			Nodconst(&n2, Types[Tptr], 0)
+			p1 := Thearch.Ginscmp(OEQ, Types[Tptr], &n1, &n2, 0)
+
+			n2 = n1
+			n2.Op = OINDREG
+			n2.Type = Types[Simtype[TINT]]
+			Thearch.Gmove(&n2, &n1)
+
+			Patch(p1, Pc)
+
+			Thearch.Gmove(&n1, res)
+			Regfree(&n1)
+			break
+		}
+
+		if Istype(nl.Type, TSTRING) || Isslice(nl.Type) {
+			// both slice and string have len one pointer into the struct.
+			// a zero pointer means zero length
+			var n1 Node
+			Igen(nl, &n1, res)
+
+			n1.Type = Types[Simtype[TUINT]]
+			n1.Xoffset += int64(Array_nel)
+			Thearch.Gmove(&n1, res)
+			Regfree(&n1)
+			break
+		}
+
+		Fatal("cgen: OLEN: unknown type %v", Tconv(nl.Type, obj.FmtLong))
+
+	case OCAP:
+		if Istype(nl.Type, TCHAN) {
+			// chan has cap in the second int-sized word.
+			// a zero pointer means zero length
+			var n1 Node
+			Regalloc(&n1, Types[Tptr], res)
+
+			Cgen(nl, &n1)
+
+			var n2 Node
+			Nodconst(&n2, Types[Tptr], 0)
+			p1 := Thearch.Ginscmp(OEQ, Types[Tptr], &n1, &n2, 0)
+
+			n2 = n1
+			n2.Op = OINDREG
+			n2.Xoffset = int64(Widthint)
+			n2.Type = Types[Simtype[TINT]]
+			Thearch.Gmove(&n2, &n1)
+
+			Patch(p1, Pc)
+
+			Thearch.Gmove(&n1, res)
+			Regfree(&n1)
+			break
+		}
+
+		if Isslice(nl.Type) {
+			var n1 Node
+			Igen(nl, &n1, res)
+			n1.Type = Types[Simtype[TUINT]]
+			n1.Xoffset += int64(Array_cap)
+			Thearch.Gmove(&n1, res)
+			Regfree(&n1)
+			break
+		}
+
+		Fatal("cgen: OCAP: unknown type %v", Tconv(nl.Type, obj.FmtLong))
+
+	case OADDR:
+		if n.Bounded { // let race detector avoid nil checks
+			Disable_checknil++
+		}
+		Agen(nl, res)
+		if n.Bounded {
+			Disable_checknil--
+		}
+
+	case OCALLMETH:
+		cgen_callmeth(n, 0)
+		cgen_callret(n, res)
+
+	case OCALLINTER:
+		cgen_callinter(n, res, 0)
+		cgen_callret(n, res)
+
+	case OCALLFUNC:
+		cgen_call(n, 0)
+		cgen_callret(n, res)
+
+	case OMOD, ODIV:
+		if Isfloat[n.Type.Etype] || Thearch.Dodiv == nil {
+			a = Thearch.Optoas(int(n.Op), nl.Type)
+			goto abop
+		}
+
+		if nl.Ullman >= nr.Ullman {
+			var n1 Node
+			Regalloc(&n1, nl.Type, res)
+			Cgen(nl, &n1)
+			cgen_div(int(n.Op), &n1, nr, res)
+			Regfree(&n1)
+		} else {
+			var n2 Node
+			if !Smallintconst(nr) {
+				Regalloc(&n2, nr.Type, res)
+				Cgen(nr, &n2)
+			} else {
+				n2 = *nr
+			}
+
+			cgen_div(int(n.Op), nl, &n2, res)
+			if n2.Op != OLITERAL {
+				Regfree(&n2)
+			}
+		}
+
+	case OLSH, ORSH, OLROT:
+		Thearch.Cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
+	}
+
+	return
+
+	/*
+	 * put simplest on right - we'll generate into left
+	 * and then adjust it using the computation of right.
+	 * constants and variables have the same ullman
+	 * count, so look for constants specially.
+	 *
+	 * an integer constant we can use as an immediate
+	 * is simpler than a variable - we can use the immediate
+	 * in the adjustment instruction directly - so it goes
+	 * on the right.
+	 *
+	 * other constants, like big integers or floating point
+	 * constants, require a mov into a register, so those
+	 * might as well go on the left, so we can reuse that
+	 * register for the computation.
+	 */
+sbop: // symmetric binary
+	if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) {
+		r := nl
+		nl = nr
+		nr = r
+	}
+
+abop: // asymmetric binary
+	var n1 Node
+	var n2 Node
+	if Ctxt.Arch.Thechar == '8' {
+		// no registers, sigh
+		if Smallintconst(nr) {
+			var n1 Node
+			Mgen(nl, &n1, res)
+			var n2 Node
+			Regalloc(&n2, nl.Type, &n1)
+			Thearch.Gmove(&n1, &n2)
+			Thearch.Gins(a, nr, &n2)
+			Thearch.Gmove(&n2, res)
+			Regfree(&n2)
+			Mfree(&n1)
+		} else if nl.Ullman >= nr.Ullman {
+			var nt Node
+			Tempname(&nt, nl.Type)
+			Cgen(nl, &nt)
+			var n2 Node
+			Mgen(nr, &n2, nil)
+			var n1 Node
+			Regalloc(&n1, nl.Type, res)
+			Thearch.Gmove(&nt, &n1)
+			Thearch.Gins(a, &n2, &n1)
+			Thearch.Gmove(&n1, res)
+			Regfree(&n1)
+			Mfree(&n2)
+		} else {
+			var n2 Node
+			Regalloc(&n2, nr.Type, res)
+			Cgen(nr, &n2)
+			var n1 Node
+			Regalloc(&n1, nl.Type, nil)
+			Cgen(nl, &n1)
+			Thearch.Gins(a, &n2, &n1)
+			Regfree(&n2)
+			Thearch.Gmove(&n1, res)
+			Regfree(&n1)
+		}
+		return
+	}
+
+	if nl.Ullman >= nr.Ullman {
+		Regalloc(&n1, nl.Type, res)
+		Cgen(nl, &n1)
+
+		if Smallintconst(nr) && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
+			n2 = *nr
+		} else {
+			Regalloc(&n2, nr.Type, nil)
+			Cgen(nr, &n2)
+		}
+	} else {
+		if Smallintconst(nr) && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
+			n2 = *nr
+		} else {
+			Regalloc(&n2, nr.Type, res)
+			Cgen(nr, &n2)
+		}
+
+		Regalloc(&n1, nl.Type, nil)
+		Cgen(nl, &n1)
+	}
+
+	Thearch.Gins(a, &n2, &n1)
+	if n2.Op != OLITERAL {
+		Regfree(&n2)
+	}
+	cgen_norm(n, &n1, res)
+}
+
+var sys_wbptr *Node
+
+func cgen_wbptr(n, res *Node) {
+	if Curfn != nil && Curfn.Func.Nowritebarrier {
+		Yyerror("write barrier prohibited")
+	}
+	if Debug_wb > 0 {
+		Warn("write barrier")
+	}
+
+	var dst, src Node
+	Igen(res, &dst, nil)
+	if n.Op == OREGISTER {
+		src = *n
+		Regrealloc(&src)
+	} else {
+		Cgenr(n, &src, nil)
+	}
+
+	wbEnabled := syslook("writeBarrierEnabled", 0)
+	pbr := Thearch.Ginscmp(ONE, Types[TUINT8], wbEnabled, Nodintconst(0), -1)
+	Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, &dst)
+	pjmp := Gbranch(obj.AJMP, nil, 0)
+	Patch(pbr, Pc)
+	var adst Node
+	Agenr(&dst, &adst, &dst)
+	p := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &adst, nil)
+	a := &p.To
+	a.Type = obj.TYPE_MEM
+	a.Reg = int16(Thearch.REGSP)
+	a.Offset = 0
+	if HasLinkRegister() {
+		a.Offset += int64(Widthptr)
+	}
+	p2 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
+	p2.To = p.To
+	p2.To.Offset += int64(Widthptr)
+	Regfree(&adst)
+	if sys_wbptr == nil {
+		sys_wbptr = writebarrierfn("writebarrierptr", Types[Tptr], Types[Tptr])
+	}
+	Ginscall(sys_wbptr, 0)
+	Patch(pjmp, Pc)
+
+	Regfree(&dst)
+	Regfree(&src)
+}
+
+func cgen_wbfat(n, res *Node) {
+	if Curfn != nil && Curfn.Func.Nowritebarrier {
+		Yyerror("write barrier prohibited")
+	}
+	if Debug_wb > 0 {
+		Warn("write barrier")
+	}
+	needType := true
+	funcName := "typedmemmove"
+	var dst, src Node
+	if n.Ullman >= res.Ullman {
+		Agenr(n, &src, nil)
+		Agenr(res, &dst, nil)
+	} else {
+		Agenr(res, &dst, nil)
+		Agenr(n, &src, nil)
+	}
+	p := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &dst, nil)
+	a := &p.To
+	a.Type = obj.TYPE_MEM
+	a.Reg = int16(Thearch.REGSP)
+	a.Offset = 0
+	if HasLinkRegister() {
+		a.Offset += int64(Widthptr)
+	}
+	if needType {
+		a.Offset += int64(Widthptr)
+	}
+	p2 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
+	p2.To = p.To
+	p2.To.Offset += int64(Widthptr)
+	Regfree(&dst)
+	if needType {
+		src.Type = Types[Tptr]
+		Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), typename(n.Type), &src)
+		p3 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
+		p3.To = p2.To
+		p3.To.Offset -= 2 * int64(Widthptr)
+	}
+	Regfree(&src)
+	Ginscall(writebarrierfn(funcName, Types[Tptr], Types[Tptr]), 0)
+}
+
+// cgen_norm moves n1 to res, truncating to expected type if necessary.
+// n1 is a register, and cgen_norm frees it.
+func cgen_norm(n, n1, res *Node) {
+	switch Ctxt.Arch.Thechar {
+	case '6', '8':
+		// We use sized math, so the result is already truncated.
+	default:
+		switch n.Op {
+		case OADD, OSUB, OMUL, ODIV, OCOM, OMINUS:
+			// TODO(rsc): What about left shift?
+			Thearch.Gins(Thearch.Optoas(OAS, n.Type), n1, n1)
+		}
+	}
+
+	Thearch.Gmove(n1, res)
+	Regfree(n1)
+}
+
+func Mgen(n *Node, n1 *Node, rg *Node) {
+	n1.Op = OEMPTY
+
+	if n.Addable {
+		*n1 = *n
+		if n1.Op == OREGISTER || n1.Op == OINDREG {
+			reg[n.Reg-int16(Thearch.REGMIN)]++
+		}
+		return
+	}
+
+	Tempname(n1, n.Type)
+	Cgen(n, n1)
+	if n.Type.Width <= int64(Widthptr) || Isfloat[n.Type.Etype] {
+		n2 := *n1
+		Regalloc(n1, n.Type, rg)
+		Thearch.Gmove(&n2, n1)
+	}
+}
+
+func Mfree(n *Node) {
+	if n.Op == OREGISTER {
+		Regfree(n)
+	}
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ *  a = n
+ * The caller must call Regfree(a).
+ */
+func Cgenr(n *Node, a *Node, res *Node) {
+	if Debug['g'] != 0 {
+		Dump("cgenr-n", n)
+	}
+
+	if Isfat(n.Type) {
+		Fatal("cgenr on fat node")
+	}
+
+	if n.Addable {
+		Regalloc(a, n.Type, res)
+		Thearch.Gmove(n, a)
+		return
+	}
+
+	switch n.Op {
+	case ONAME,
+		ODOT,
+		ODOTPTR,
+		OINDEX,
+		OCALLFUNC,
+		OCALLMETH,
+		OCALLINTER:
+		var n1 Node
+		Igen(n, &n1, res)
+		Regalloc(a, Types[Tptr], &n1)
+		Thearch.Gmove(&n1, a)
+		Regfree(&n1)
+
+	default:
+		Regalloc(a, n.Type, res)
+		Cgen(n, a)
+	}
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = &n
+ * The caller must call Regfree(a).
+ * The generated code checks that the result is not nil.
+ */
+func Agenr(n *Node, a *Node, res *Node) {
+	if Debug['g'] != 0 {
+		Dump("\nagenr-n", n)
+	}
+
+	nl := n.Left
+	nr := n.Right
+
+	switch n.Op {
+	case ODOT, ODOTPTR, OCALLFUNC, OCALLMETH, OCALLINTER:
+		var n1 Node
+		Igen(n, &n1, res)
+		Regalloc(a, Types[Tptr], &n1)
+		Agen(&n1, a)
+		Regfree(&n1)
+
+	case OIND:
+		Cgenr(n.Left, a, res)
+		Cgen_checknil(a)
+
+	case OINDEX:
+		if Ctxt.Arch.Thechar == '5' {
+			var p2 *obj.Prog // to be patched to panicindex.
+			w := uint32(n.Type.Width)
+			bounded := Debug['B'] != 0 || n.Bounded
+			var n1 Node
+			var n3 Node
+			if nr.Addable {
+				var tmp Node
+				if !Isconst(nr, CTINT) {
+					Tempname(&tmp, Types[TINT32])
+				}
+				if !Isconst(nl, CTSTR) {
+					Agenr(nl, &n3, res)
+				}
+				if !Isconst(nr, CTINT) {
+					p2 = Thearch.Cgenindex(nr, &tmp, bounded)
+					Regalloc(&n1, tmp.Type, nil)
+					Thearch.Gmove(&tmp, &n1)
+				}
+			} else if nl.Addable {
+				if !Isconst(nr, CTINT) {
+					var tmp Node
+					Tempname(&tmp, Types[TINT32])
+					p2 = Thearch.Cgenindex(nr, &tmp, bounded)
+					Regalloc(&n1, tmp.Type, nil)
+					Thearch.Gmove(&tmp, &n1)
+				}
+
+				if !Isconst(nl, CTSTR) {
+					Agenr(nl, &n3, res)
+				}
+			} else {
+				var tmp Node
+				Tempname(&tmp, Types[TINT32])
+				p2 = Thearch.Cgenindex(nr, &tmp, bounded)
+				nr = &tmp
+				if !Isconst(nl, CTSTR) {
+					Agenr(nl, &n3, res)
+				}
+				Regalloc(&n1, tmp.Type, nil)
+				Thearch.Gins(Thearch.Optoas(OAS, tmp.Type), &tmp, &n1)
+			}
+
+			// &a is in &n3 (allocated in res)
+			// i is in &n1 (if not constant)
+			// w is width
+
+			// constant index
+			if Isconst(nr, CTINT) {
+				if Isconst(nl, CTSTR) {
+					Fatal("constant string constant index")
+				}
+				v := uint64(Mpgetfix(nr.Val.U.(*Mpint)))
+				var n2 Node
+				if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+					if Debug['B'] == 0 && !n.Bounded {
+						n1 = n3
+						n1.Op = OINDREG
+						n1.Type = Types[Tptr]
+						n1.Xoffset = int64(Array_nel)
+						Nodconst(&n2, Types[TUINT32], int64(v))
+						p1 := Thearch.Ginscmp(OGT, Types[TUINT32], &n1, &n2, +1)
+						Ginscall(Panicindex, -1)
+						Patch(p1, Pc)
+					}
+
+					n1 = n3
+					n1.Op = OINDREG
+					n1.Type = Types[Tptr]
+					n1.Xoffset = int64(Array_array)
+					Thearch.Gmove(&n1, &n3)
+				}
+
+				Nodconst(&n2, Types[Tptr], int64(v*uint64(w)))
+				Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+				*a = n3
+				break
+			}
+
+			var n2 Node
+			Regalloc(&n2, Types[TINT32], &n1) // i
+			Thearch.Gmove(&n1, &n2)
+			Regfree(&n1)
+
+			var n4 Node
+			if Debug['B'] == 0 && !n.Bounded {
+				// check bounds
+				if Isconst(nl, CTSTR) {
+					Nodconst(&n4, Types[TUINT32], int64(len(nl.Val.U.(string))))
+				} else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+					n1 = n3
+					n1.Op = OINDREG
+					n1.Type = Types[Tptr]
+					n1.Xoffset = int64(Array_nel)
+					Regalloc(&n4, Types[TUINT32], nil)
+					Thearch.Gmove(&n1, &n4)
+				} else {
+					Nodconst(&n4, Types[TUINT32], nl.Type.Bound)
+				}
+				p1 := Thearch.Ginscmp(OLT, Types[TUINT32], &n2, &n4, +1)
+				if n4.Op == OREGISTER {
+					Regfree(&n4)
+				}
+				if p2 != nil {
+					Patch(p2, Pc)
+				}
+				Ginscall(Panicindex, -1)
+				Patch(p1, Pc)
+			}
+
+			if Isconst(nl, CTSTR) {
+				Regalloc(&n3, Types[Tptr], res)
+				p1 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), nil, &n3)
+				Datastring(nl.Val.U.(string), &p1.From)
+				p1.From.Type = obj.TYPE_ADDR
+			} else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+				n1 = n3
+				n1.Op = OINDREG
+				n1.Type = Types[Tptr]
+				n1.Xoffset = int64(Array_array)
+				Thearch.Gmove(&n1, &n3)
+			}
+
+			if w == 0 {
+				// nothing to do
+			} else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
+				// done by back end
+			} else if w == 1 {
+				Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+			} else {
+				if w&(w-1) == 0 {
+					// Power of 2.  Use shift.
+					Thearch.Ginscon(Thearch.Optoas(OLSH, Types[TUINT32]), int64(log2(uint64(w))), &n2)
+				} else {
+					// Not a power of 2.  Use multiply.
+					Regalloc(&n4, Types[TUINT32], nil)
+					Nodconst(&n1, Types[TUINT32], int64(w))
+					Thearch.Gmove(&n1, &n4)
+					Thearch.Gins(Thearch.Optoas(OMUL, Types[TUINT32]), &n4, &n2)
+					Regfree(&n4)
+				}
+				Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+			}
+			*a = n3
+			Regfree(&n2)
+			break
+		}
+		if Ctxt.Arch.Thechar == '8' {
+			var p2 *obj.Prog // to be patched to panicindex.
+			w := uint32(n.Type.Width)
+			bounded := Debug['B'] != 0 || n.Bounded
+			var n3 Node
+			var tmp Node
+			var n1 Node
+			if nr.Addable {
+				// Generate &nl first, and move nr into register.
+				if !Isconst(nl, CTSTR) {
+					Igen(nl, &n3, res)
+				}
+				if !Isconst(nr, CTINT) {
+					p2 = Thearch.Igenindex(nr, &tmp, bounded)
+					Regalloc(&n1, tmp.Type, nil)
+					Thearch.Gmove(&tmp, &n1)
+				}
+			} else if nl.Addable {
+				// Generate nr first, and move &nl into register.
+				if !Isconst(nr, CTINT) {
+					p2 = Thearch.Igenindex(nr, &tmp, bounded)
+					Regalloc(&n1, tmp.Type, nil)
+					Thearch.Gmove(&tmp, &n1)
+				}
+
+				if !Isconst(nl, CTSTR) {
+					Igen(nl, &n3, res)
+				}
+			} else {
+				p2 = Thearch.Igenindex(nr, &tmp, bounded)
+				nr = &tmp
+				if !Isconst(nl, CTSTR) {
+					Igen(nl, &n3, res)
+				}
+				Regalloc(&n1, tmp.Type, nil)
+				Thearch.Gins(Thearch.Optoas(OAS, tmp.Type), &tmp, &n1)
+			}
+
+			// For fixed array we really want the pointer in n3.
+			var n2 Node
+			if Isfixedarray(nl.Type) {
+				Regalloc(&n2, Types[Tptr], &n3)
+				Agen(&n3, &n2)
+				Regfree(&n3)
+				n3 = n2
+			}
+
+			// &a[0] is in n3 (allocated in res)
+			// i is in n1 (if not constant)
+			// len(a) is in nlen (if needed)
+			// w is width
+
+			// constant index
+			if Isconst(nr, CTINT) {
+				if Isconst(nl, CTSTR) {
+					Fatal("constant string constant index") // front end should handle
+				}
+				v := uint64(Mpgetfix(nr.Val.U.(*Mpint)))
+				if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+					if Debug['B'] == 0 && !n.Bounded {
+						nlen := n3
+						nlen.Type = Types[TUINT32]
+						nlen.Xoffset += int64(Array_nel)
+						Nodconst(&n2, Types[TUINT32], int64(v))
+						p1 := Thearch.Ginscmp(OGT, Types[TUINT32], &nlen, &n2, +1)
+						Ginscall(Panicindex, -1)
+						Patch(p1, Pc)
+					}
+				}
+
+				// Load base pointer in n2 = n3.
+				Regalloc(&n2, Types[Tptr], &n3)
+
+				n3.Type = Types[Tptr]
+				n3.Xoffset += int64(Array_array)
+				Thearch.Gmove(&n3, &n2)
+				Regfree(&n3)
+				if v*uint64(w) != 0 {
+					Nodconst(&n1, Types[Tptr], int64(v*uint64(w)))
+					Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n1, &n2)
+				}
+				*a = n2
+				break
+			}
+
+			// i is in register n1, extend to 32 bits.
+			t := Types[TUINT32]
+
+			if Issigned[n1.Type.Etype] {
+				t = Types[TINT32]
+			}
+
+			Regalloc(&n2, t, &n1) // i
+			Thearch.Gmove(&n1, &n2)
+			Regfree(&n1)
+
+			if Debug['B'] == 0 && !n.Bounded {
+				// check bounds
+				t := Types[TUINT32]
+
+				var nlen Node
+				if Isconst(nl, CTSTR) {
+					Nodconst(&nlen, t, int64(len(nl.Val.U.(string))))
+				} else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+					nlen = n3
+					nlen.Type = t
+					nlen.Xoffset += int64(Array_nel)
+				} else {
+					Nodconst(&nlen, t, nl.Type.Bound)
+				}
+
+				p1 := Thearch.Ginscmp(OLT, t, &n2, &nlen, +1)
+				if p2 != nil {
+					Patch(p2, Pc)
+				}
+				Ginscall(Panicindex, -1)
+				Patch(p1, Pc)
+			}
+
+			if Isconst(nl, CTSTR) {
+				Regalloc(&n3, Types[Tptr], res)
+				p1 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), nil, &n3)
+				Datastring(nl.Val.U.(string), &p1.From)
+				p1.From.Type = obj.TYPE_ADDR
+				Thearch.Gins(Thearch.Optoas(OADD, n3.Type), &n2, &n3)
+				goto indexdone1
+			}
+
+			// Load base pointer in n3.
+			Regalloc(&tmp, Types[Tptr], &n3)
+
+			if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+				n3.Type = Types[Tptr]
+				n3.Xoffset += int64(Array_array)
+				Thearch.Gmove(&n3, &tmp)
+			}
+
+			Regfree(&n3)
+			n3 = tmp
+
+			if w == 0 {
+				// nothing to do
+			} else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
+				// done by back end
+			} else if w == 1 {
+				Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+			} else {
+				if w&(w-1) == 0 {
+					// Power of 2.  Use shift.
+					Thearch.Ginscon(Thearch.Optoas(OLSH, Types[TUINT32]), int64(log2(uint64(w))), &n2)
+				} else {
+					// Not a power of 2.  Use multiply.
+					Thearch.Ginscon(Thearch.Optoas(OMUL, Types[TUINT32]), int64(w), &n2)
+				}
+				Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+			}
+
+		indexdone1:
+			*a = n3
+			Regfree(&n2)
+			break
+		}
+
+		freelen := 0
+		w := uint64(n.Type.Width)
+
+		// Generate the non-addressable child first.
+		var n3 Node
+		var nlen Node
+		var tmp Node
+		var n1 Node
+		if nr.Addable {
+			goto irad
+		}
+		if nl.Addable {
+			Cgenr(nr, &n1, nil)
+			if !Isconst(nl, CTSTR) {
+				if Isfixedarray(nl.Type) {
+					Agenr(nl, &n3, res)
+				} else {
+					Igen(nl, &nlen, res)
+					freelen = 1
+					nlen.Type = Types[Tptr]
+					nlen.Xoffset += int64(Array_array)
+					Regalloc(&n3, Types[Tptr], res)
+					Thearch.Gmove(&nlen, &n3)
+					nlen.Type = Types[Simtype[TUINT]]
+					nlen.Xoffset += int64(Array_nel) - int64(Array_array)
+				}
+			}
+
+			goto index
+		}
+
+		Tempname(&tmp, nr.Type)
+		Cgen(nr, &tmp)
+		nr = &tmp
+
+	irad:
+		if !Isconst(nl, CTSTR) {
+			if Isfixedarray(nl.Type) {
+				Agenr(nl, &n3, res)
+			} else {
+				if !nl.Addable {
+					if res != nil && res.Op == OREGISTER { // give up res, which we don't need yet.
+						Regfree(res)
+					}
+
+					// igen will need an addressable node.
+					var tmp2 Node
+					Tempname(&tmp2, nl.Type)
+					Cgen(nl, &tmp2)
+					nl = &tmp2
+
+					if res != nil && res.Op == OREGISTER { // reacquire res
+						Regrealloc(res)
+					}
+				}
+
+				Igen(nl, &nlen, res)
+				freelen = 1
+				nlen.Type = Types[Tptr]
+				nlen.Xoffset += int64(Array_array)
+				Regalloc(&n3, Types[Tptr], res)
+				Thearch.Gmove(&nlen, &n3)
+				nlen.Type = Types[Simtype[TUINT]]
+				nlen.Xoffset += int64(Array_nel) - int64(Array_array)
+			}
+		}
+
+		if !Isconst(nr, CTINT) {
+			Cgenr(nr, &n1, nil)
+		}
+
+		goto index
+
+		// &a is in &n3 (allocated in res)
+		// i is in &n1 (if not constant)
+		// len(a) is in nlen (if needed)
+		// w is width
+
+		// constant index
+	index:
+		if Isconst(nr, CTINT) {
+			if Isconst(nl, CTSTR) {
+				Fatal("constant string constant index") // front end should handle
+			}
+			v := uint64(Mpgetfix(nr.Val.U.(*Mpint)))
+			if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+				if Debug['B'] == 0 && !n.Bounded {
+					p1 := Thearch.Ginscmp(OGT, Types[Simtype[TUINT]], &nlen, Nodintconst(int64(v)), +1)
+					Ginscall(Panicindex, -1)
+					Patch(p1, Pc)
+				}
+
+				Regfree(&nlen)
+			}
+
+			if v*w != 0 {
+				Thearch.Ginscon(Thearch.Optoas(OADD, Types[Tptr]), int64(v*w), &n3)
+			}
+			*a = n3
+			break
+		}
+
+		// type of the index
+		t := Types[TUINT64]
+
+		if Issigned[n1.Type.Etype] {
+			t = Types[TINT64]
+		}
+
+		var n2 Node
+		Regalloc(&n2, t, &n1) // i
+		Thearch.Gmove(&n1, &n2)
+		Regfree(&n1)
+
+		if Debug['B'] == 0 && !n.Bounded {
+			// check bounds
+			t = Types[Simtype[TUINT]]
+
+			if Is64(nr.Type) {
+				t = Types[TUINT64]
+			}
+			if Isconst(nl, CTSTR) {
+				Nodconst(&nlen, t, int64(len(nl.Val.U.(string))))
+			} else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+				// nlen already initialized
+			} else {
+				Nodconst(&nlen, t, nl.Type.Bound)
+			}
+
+			p1 := Thearch.Ginscmp(OLT, t, &n2, &nlen, +1)
+			Ginscall(Panicindex, -1)
+			Patch(p1, Pc)
+		}
+
+		if Isconst(nl, CTSTR) {
+			Regalloc(&n3, Types[Tptr], res)
+			p1 := Thearch.Gins(Thearch.Optoas(OAS, n3.Type), nil, &n3) // XXX was LEAQ!
+			Datastring(nl.Val.U.(string), &p1.From)
+			p1.From.Type = obj.TYPE_ADDR
+			Thearch.Gins(Thearch.Optoas(OADD, n3.Type), &n2, &n3)
+			goto indexdone
+		}
+
+		if w == 0 {
+			// nothing to do
+		} else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
+			// done by back end
+		} else if w == 1 {
+			Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+		} else {
+			if w&(w-1) == 0 {
+				// Power of 2.  Use shift.
+				Thearch.Ginscon(Thearch.Optoas(OLSH, t), int64(log2(w)), &n2)
+			} else {
+				// Not a power of 2.  Use multiply.
+				Thearch.Ginscon(Thearch.Optoas(OMUL, t), int64(w), &n2)
+			}
+			Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+		}
+
+	indexdone:
+		*a = n3
+		Regfree(&n2)
+		if freelen != 0 {
+			Regfree(&nlen)
+		}
+
+	default:
+		Regalloc(a, Types[Tptr], res)
+		Agen(n, a)
+	}
+}
+
+// log2 returns the logarithm base 2 of n.  n must be a power of 2.
+func log2(n uint64) int {
+	x := 0
+	for n>>uint(x) != 1 {
+		x++
+	}
+	return x
+}
+
+/*
+ * generate:
+ *	res = &n;
+ * The generated code checks that the result is not nil.
+ */
+func Agen(n *Node, res *Node) {
+	if Debug['g'] != 0 {
+		Dump("\nagen-res", res)
+		Dump("agen-r", n)
+	}
+
+	if n == nil || n.Type == nil {
+		return
+	}
+
+	for n.Op == OCONVNOP {
+		n = n.Left
+	}
+
+	if Isconst(n, CTNIL) && n.Type.Width > int64(Widthptr) {
+		// Use of a nil interface or nil slice.
+		// Create a temporary we can take the address of and read.
+		// The generated code is just going to panic, so it need not
+		// be terribly efficient. See issue 3670.
+		var n1 Node
+		Tempname(&n1, n.Type)
+
+		Gvardef(&n1)
+		Thearch.Clearfat(&n1)
+		var n2 Node
+		Regalloc(&n2, Types[Tptr], res)
+		var n3 Node
+		n3.Op = OADDR
+		n3.Left = &n1
+		Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &n3, &n2)
+		Thearch.Gmove(&n2, res)
+		Regfree(&n2)
+		return
+	}
+
+	if n.Op == OINDREG && n.Xoffset == 0 {
+		// Generate MOVW R0, R1 instead of MOVW $0(R0), R1.
+		// This allows better move propagation in the back ends
+		// (and maybe it helps the processor).
+		n1 := *n
+		n1.Op = OREGISTER
+		n1.Type = res.Type
+		Thearch.Gmove(&n1, res)
+		return
+	}
+
+	if n.Addable {
+		if n.Op == OREGISTER {
+			Fatal("agen OREGISTER")
+		}
+		var n1 Node
+		n1.Op = OADDR
+		n1.Left = n
+		var n2 Node
+		Regalloc(&n2, Types[Tptr], res)
+		Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &n1, &n2)
+		Thearch.Gmove(&n2, res)
+		Regfree(&n2)
+		return
+	}
+
+	nl := n.Left
+
+	switch n.Op {
+	default:
+		Fatal("agen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+	case OCALLMETH:
+		cgen_callmeth(n, 0)
+		cgen_aret(n, res)
+
+	case OCALLINTER:
+		cgen_callinter(n, res, 0)
+		cgen_aret(n, res)
+
+	case OCALLFUNC:
+		cgen_call(n, 0)
+		cgen_aret(n, res)
+
+	case OEFACE, ODOTTYPE, OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+		var n1 Node
+		Tempname(&n1, n.Type)
+		Cgen(n, &n1)
+		Agen(&n1, res)
+
+	case OINDEX:
+		var n1 Node
+		Agenr(n, &n1, res)
+		Thearch.Gmove(&n1, res)
+		Regfree(&n1)
+
+	case ONAME:
+		// should only get here with names in this func.
+		if n.Funcdepth > 0 && n.Funcdepth != Funcdepth {
+			Dump("bad agen", n)
+			Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, Funcdepth)
+		}
+
+		// should only get here for heap vars or paramref
+		if n.Class&PHEAP == 0 && n.Class != PPARAMREF {
+			Dump("bad agen", n)
+			Fatal("agen: bad ONAME class %#x", n.Class)
+		}
+
+		Cgen(n.Name.Heapaddr, res)
+		if n.Xoffset != 0 {
+			addOffset(res, n.Xoffset)
+		}
+
+	case OIND:
+		Cgen(nl, res)
+		Cgen_checknil(res)
+
+	case ODOT:
+		Agen(nl, res)
+		if n.Xoffset != 0 {
+			addOffset(res, n.Xoffset)
+		}
+
+	case ODOTPTR:
+		Cgen(nl, res)
+		Cgen_checknil(res)
+		if n.Xoffset != 0 {
+			addOffset(res, n.Xoffset)
+		}
+	}
+}
+
+func addOffset(res *Node, offset int64) {
+	if Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8' {
+		Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), Nodintconst(offset), res)
+		return
+	}
+
+	var n1, n2 Node
+	Regalloc(&n1, Types[Tptr], nil)
+	Thearch.Gmove(res, &n1)
+	Regalloc(&n2, Types[Tptr], nil)
+	Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), Nodintconst(offset), &n2)
+	Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n1)
+	Thearch.Gmove(&n1, res)
+	Regfree(&n1)
+	Regfree(&n2)
+}
+
+// Igen computes the address &n, stores it in a register r,
+// and rewrites a to refer to *r. The chosen r may be the
+// stack pointer, it may be borrowed from res, or it may
+// be a newly allocated register. The caller must call Regfree(a)
+// to free r when the address is no longer needed.
+// The generated code ensures that &n is not nil.
+func Igen(n *Node, a *Node, res *Node) {
+	if Debug['g'] != 0 {
+		Dump("\nigen-n", n)
+	}
+
+	switch n.Op {
+	case ONAME:
+		if (n.Class&PHEAP != 0) || n.Class == PPARAMREF {
+			break
+		}
+		*a = *n
+		return
+
+	case OINDREG:
+		// Increase the refcount of the register so that igen's caller
+		// has to call Regfree.
+		if n.Reg != int16(Thearch.REGSP) {
+			reg[n.Reg-int16(Thearch.REGMIN)]++
+		}
+		*a = *n
+		return
+
+	case ODOT:
+		Igen(n.Left, a, res)
+		a.Xoffset += n.Xoffset
+		a.Type = n.Type
+		Fixlargeoffset(a)
+		return
+
+	case ODOTPTR:
+		Cgenr(n.Left, a, res)
+		Cgen_checknil(a)
+		a.Op = OINDREG
+		a.Xoffset += n.Xoffset
+		a.Type = n.Type
+		Fixlargeoffset(a)
+		return
+
+	case OCALLFUNC, OCALLMETH, OCALLINTER:
+		switch n.Op {
+		case OCALLFUNC:
+			cgen_call(n, 0)
+
+		case OCALLMETH:
+			cgen_callmeth(n, 0)
+
+		case OCALLINTER:
+			cgen_callinter(n, nil, 0)
+		}
+
+		var flist Iter
+		fp := Structfirst(&flist, Getoutarg(n.Left.Type))
+		*a = Node{}
+		a.Op = OINDREG
+		a.Reg = int16(Thearch.REGSP)
+		a.Addable = true
+		a.Xoffset = fp.Width
+		if HasLinkRegister() {
+			a.Xoffset += int64(Ctxt.Arch.Ptrsize)
+		}
+		a.Type = n.Type
+		return
+
+		// Index of fixed-size array by constant can
+	// put the offset in the addressing.
+	// Could do the same for slice except that we need
+	// to use the real index for the bounds checking.
+	case OINDEX:
+		if Isfixedarray(n.Left.Type) || (Isptr[n.Left.Type.Etype] && Isfixedarray(n.Left.Left.Type)) {
+			if Isconst(n.Right, CTINT) {
+				// Compute &a.
+				if !Isptr[n.Left.Type.Etype] {
+					Igen(n.Left, a, res)
+				} else {
+					var n1 Node
+					Igen(n.Left, &n1, res)
+					Cgen_checknil(&n1)
+					Regalloc(a, Types[Tptr], res)
+					Thearch.Gmove(&n1, a)
+					Regfree(&n1)
+					a.Op = OINDREG
+				}
+
+				// Compute &a[i] as &a + i*width.
+				a.Type = n.Type
+
+				a.Xoffset += Mpgetfix(n.Right.Val.U.(*Mpint)) * n.Type.Width
+				Fixlargeoffset(a)
+				return
+			}
+		}
+	}
+
+	Agenr(n, a, res)
+	a.Op = OINDREG
+	a.Type = n.Type
+}
+
+// Bgen generates code for branches:
+//
+// 	if n == wantTrue {
+// 		goto to
+// 	}
+func Bgen(n *Node, wantTrue bool, likely int, to *obj.Prog) {
+	bgenx(n, nil, wantTrue, likely, to)
+}
+
+// Bvgen generates code for calculating boolean values:
+// 	res = n == wantTrue
+func Bvgen(n, res *Node, wantTrue bool) {
+	if Thearch.Ginsboolval == nil {
+		// Direct value generation not implemented for this architecture.
+		// Implement using jumps.
+		bvgenjump(n, res, wantTrue, true)
+		return
+	}
+	bgenx(n, res, wantTrue, 0, nil)
+}
+
+// bvgenjump implements boolean value generation using jumps:
+// 	if n == wantTrue {
+// 		res = 1
+// 	} else {
+// 		res = 0
+// 	}
+// geninit controls whether n's Ninit is generated.
+func bvgenjump(n, res *Node, wantTrue, geninit bool) {
+	init := n.Ninit
+	if !geninit {
+		n.Ninit = nil
+	}
+	p1 := Gbranch(obj.AJMP, nil, 0)
+	p2 := Pc
+	Thearch.Gmove(Nodbool(true), res)
+	p3 := Gbranch(obj.AJMP, nil, 0)
+	Patch(p1, Pc)
+	Bgen(n, wantTrue, 0, p2)
+	Thearch.Gmove(Nodbool(false), res)
+	Patch(p3, Pc)
+	n.Ninit = init
+}
+
+// bgenx is the backend for Bgen and Bvgen.
+// If res is nil, it generates a branch.
+// Otherwise, it generates a boolean value.
+func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
+	if Debug['g'] != 0 {
+		fmt.Printf("\nbgenx wantTrue=%t likely=%d to=%v\n", wantTrue, likely, to)
+		Dump("n", n)
+		Dump("res", res)
+	}
+
+	genval := res != nil
+
+	if n == nil {
+		n = Nodbool(true)
+	}
+
+	Genlist(n.Ninit)
+
+	if n.Type == nil {
+		Convlit(&n, Types[TBOOL])
+		if n.Type == nil {
+			return
+		}
+	}
+
+	if n.Type.Etype != TBOOL {
+		Fatal("bgen: bad type %v for %v", n.Type, Oconv(int(n.Op), 0))
+	}
+
+	for n.Op == OCONVNOP {
+		n = n.Left
+		Genlist(n.Ninit)
+	}
+
+	if Thearch.Bgen_float != nil && n.Left != nil && Isfloat[n.Left.Type.Etype] {
+		if genval {
+			bvgenjump(n, res, wantTrue, false)
+			return
+		}
+		Thearch.Bgen_float(n, wantTrue, likely, to)
+		return
+	}
+
+	switch n.Op {
+	default:
+		if genval {
+			Cgen(n, res)
+			if !wantTrue {
+				Thearch.Gins(Thearch.Optoas(OXOR, Types[TUINT8]), Nodintconst(1), res)
+			}
+			return
+		}
+
+		var tmp Node
+		Regalloc(&tmp, n.Type, nil)
+		Cgen(n, &tmp)
+		bgenNonZero(&tmp, nil, wantTrue, likely, to)
+		Regfree(&tmp)
+		return
+
+	case ONAME:
+		if genval {
+			// 5g, 7g, and 9g might need a temporary or other help here,
+			// but they don't support direct generation of a bool value yet.
+			// We can fix that as we go.
+			switch Ctxt.Arch.Thechar {
+			case '5', '7', '9':
+				Fatal("genval 5g, 7g, 9g ONAMES not fully implemented")
+			}
+			Cgen(n, res)
+			if !wantTrue {
+				Thearch.Gins(Thearch.Optoas(OXOR, Types[TUINT8]), Nodintconst(1), res)
+			}
+			return
+		}
+
+		if n.Addable && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' {
+			// no need for a temporary
+			bgenNonZero(n, nil, wantTrue, likely, to)
+			return
+		}
+		var tmp Node
+		Regalloc(&tmp, n.Type, nil)
+		Cgen(n, &tmp)
+		bgenNonZero(&tmp, nil, wantTrue, likely, to)
+		Regfree(&tmp)
+		return
+
+	case OLITERAL:
+		// n is a constant.
+		if !Isconst(n, CTBOOL) {
+			Fatal("bgen: non-bool const %v\n", Nconv(n, obj.FmtLong))
+		}
+		if genval {
+			Cgen(Nodbool(wantTrue == n.Val.U.(bool)), res)
+			return
+		}
+		// If n == wantTrue, jump; otherwise do nothing.
+		if wantTrue == n.Val.U.(bool) {
+			Patch(Gbranch(obj.AJMP, nil, likely), to)
+		}
+		return
+
+	case OANDAND, OOROR:
+		and := (n.Op == OANDAND) == wantTrue
+		if genval {
+			p1 := Gbranch(obj.AJMP, nil, 0)
+			p2 := Gbranch(obj.AJMP, nil, 0)
+			Patch(p2, Pc)
+			Cgen(Nodbool(!and), res)
+			p3 := Gbranch(obj.AJMP, nil, 0)
+			Patch(p1, Pc)
+			Bgen(n.Left, wantTrue != and, 0, p2)
+			Bvgen(n.Right, res, wantTrue)
+			Patch(p3, Pc)
+			return
+		}
+
+		if and {
+			p1 := Gbranch(obj.AJMP, nil, 0)
+			p2 := Gbranch(obj.AJMP, nil, 0)
+			Patch(p1, Pc)
+			Bgen(n.Left, !wantTrue, -likely, p2)
+			Bgen(n.Right, !wantTrue, -likely, p2)
+			p1 = Gbranch(obj.AJMP, nil, 0)
+			Patch(p1, to)
+			Patch(p2, Pc)
+		} else {
+			Bgen(n.Left, wantTrue, likely, to)
+			Bgen(n.Right, wantTrue, likely, to)
+		}
+		return
+
+	case ONOT: // unary
+		if n.Left == nil || n.Left.Type == nil {
+			return
+		}
+		bgenx(n.Left, res, !wantTrue, likely, to)
+		return
+
+	case OEQ, ONE, OLT, OGT, OLE, OGE:
+		if n.Left == nil || n.Left.Type == nil || n.Right == nil || n.Right.Type == nil {
+			return
+		}
+	}
+
+	// n.Op is one of OEQ, ONE, OLT, OGT, OLE, OGE
+	nl := n.Left
+	nr := n.Right
+	a := int(n.Op)
+
+	if !wantTrue {
+		if Isfloat[nr.Type.Etype] {
+			// Brcom is not valid on floats when NaN is involved.
+			ll := n.Ninit // avoid re-genning Ninit
+			n.Ninit = nil
+			if genval {
+				bgenx(n, res, true, likely, to)
+				Thearch.Gins(Thearch.Optoas(OXOR, Types[TUINT8]), Nodintconst(1), res) // res = !res
+				n.Ninit = ll
+				return
+			}
+			p1 := Gbranch(obj.AJMP, nil, 0)
+			p2 := Gbranch(obj.AJMP, nil, 0)
+			Patch(p1, Pc)
+			bgenx(n, res, true, -likely, p2)
+			Patch(Gbranch(obj.AJMP, nil, 0), to)
+			Patch(p2, Pc)
+			n.Ninit = ll
+			return
+		}
+
+		a = Brcom(a)
+	}
+	wantTrue = true
+
+	// make simplest on right
+	if nl.Op == OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < UINF) {
+		a = Brrev(a)
+		nl, nr = nr, nl
+	}
+
+	if Isslice(nl.Type) || Isinter(nl.Type) {
+		// front end should only leave cmp to literal nil
+		if (a != OEQ && a != ONE) || nr.Op != OLITERAL {
+			if Isslice(nl.Type) {
+				Yyerror("illegal slice comparison")
+			} else {
+				Yyerror("illegal interface comparison")
+			}
+			return
+		}
+
+		var ptr Node
+		Igen(nl, &ptr, nil)
+		if Isslice(nl.Type) {
+			ptr.Xoffset += int64(Array_array)
+		}
+		ptr.Type = Types[Tptr]
+		var tmp Node
+		Regalloc(&tmp, ptr.Type, &ptr)
+		Cgen(&ptr, &tmp)
+		Regfree(&ptr)
+		bgenNonZero(&tmp, res, a == OEQ != wantTrue, likely, to)
+		Regfree(&tmp)
+		return
+	}
+
+	if Iscomplex[nl.Type.Etype] {
+		complexbool(a, nl, nr, res, wantTrue, likely, to)
+		return
+	}
+
+	if Ctxt.Arch.Regsize == 4 && Is64(nr.Type) {
+		if genval {
+			// TODO: Teach Cmp64 to generate boolean values and remove this.
+			bvgenjump(n, res, wantTrue, false)
+			return
+		}
+		if !nl.Addable || Isconst(nl, CTINT) {
+			nl = CgenTemp(nl)
+		}
+		if !nr.Addable {
+			nr = CgenTemp(nr)
+		}
+		Thearch.Cmp64(nl, nr, a, likely, to)
+		return
+	}
+
+	if nr.Ullman >= UINF {
+		var n1 Node
+		Regalloc(&n1, nl.Type, nil)
+		Cgen(nl, &n1)
+
+		var tmp Node
+		Tempname(&tmp, nl.Type)
+		Thearch.Gmove(&n1, &tmp)
+		Regfree(&n1)
+
+		var n2 Node
+		Regalloc(&n2, nr.Type, nil)
+		Cgen(nr, &n2)
+		Regfree(&n2)
+
+		Regalloc(&n1, nl.Type, nil)
+		Cgen(&tmp, &n1)
+		Regfree(&n1)
+	} else {
+		var n1 Node
+		if !nl.Addable && Ctxt.Arch.Thechar == '8' {
+			Tempname(&n1, nl.Type)
+		} else {
+			Regalloc(&n1, nl.Type, nil)
+			defer Regfree(&n1)
+		}
+		Cgen(nl, &n1)
+		nl = &n1
+
+		if Smallintconst(nr) && Ctxt.Arch.Thechar != '9' {
+			Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), nl, nr)
+			bins(nr.Type, res, a, likely, to)
+			return
+		}
+
+		if !nr.Addable && Ctxt.Arch.Thechar == '8' {
+			nr = CgenTemp(nr)
+		}
+
+		var n2 Node
+		Regalloc(&n2, nr.Type, nil)
+		Cgen(nr, &n2)
+		nr = &n2
+		Regfree(&n2)
+	}
+
+	l, r := nl, nr
+
+	// On x86, only < and <= work right with NaN; reverse if needed
+	if Ctxt.Arch.Thechar == '6' && Isfloat[nl.Type.Etype] && (a == OGT || a == OGE) {
+		l, r = r, l
+		a = Brrev(a)
+	}
+
+	// Do the comparison.
+	Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), l, r)
+
+	// Handle floating point special cases.
+	// Note that 8g has Bgen_float and is handled above.
+	if Isfloat[nl.Type.Etype] {
+		switch Ctxt.Arch.Thechar {
+		case '5':
+			if genval {
+				Fatal("genval 5g Isfloat special cases not implemented")
+			}
+			switch n.Op {
+			case ONE:
+				Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, likely), to)
+				Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+			default:
+				p := Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, -likely)
+				Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+				Patch(p, Pc)
+			}
+			return
+		case '6':
+			switch n.Op {
+			case OEQ:
+				// neither NE nor P
+				if genval {
+					var reg Node
+					Regalloc(&reg, Types[TBOOL], nil)
+					Thearch.Ginsboolval(Thearch.Optoas(OEQ, nr.Type), &reg)
+					Thearch.Ginsboolval(Thearch.Optoas(OPC, nr.Type), res)
+					Thearch.Gins(Thearch.Optoas(OAND, Types[TBOOL]), &reg, res)
+					Regfree(&reg)
+				} else {
+					p1 := Gbranch(Thearch.Optoas(ONE, nr.Type), nil, -likely)
+					p2 := Gbranch(Thearch.Optoas(OPS, nr.Type), nil, -likely)
+					Patch(Gbranch(obj.AJMP, nil, 0), to)
+					Patch(p1, Pc)
+					Patch(p2, Pc)
+				}
+				return
+			case ONE:
+				// either NE or P
+				if genval {
+					var reg Node
+					Regalloc(&reg, Types[TBOOL], nil)
+					Thearch.Ginsboolval(Thearch.Optoas(ONE, nr.Type), &reg)
+					Thearch.Ginsboolval(Thearch.Optoas(OPS, nr.Type), res)
+					Thearch.Gins(Thearch.Optoas(OOR, Types[TBOOL]), &reg, res)
+					Regfree(&reg)
+				} else {
+					Patch(Gbranch(Thearch.Optoas(ONE, nr.Type), nil, likely), to)
+					Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nil, likely), to)
+				}
+				return
+			}
+		case '7', '9':
+			if genval {
+				Fatal("genval 7g, 9g Isfloat special cases not implemented")
+			}
+			switch n.Op {
+			// On arm64 and ppc64, <= and >= mishandle NaN. Must decompose into < or > and =.
+			// TODO(josh): Convert a <= b to b > a instead?
+			case OLE, OGE:
+				if a == OLE {
+					a = OLT
+				} else {
+					a = OGT
+				}
+				Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+				Patch(Gbranch(Thearch.Optoas(OEQ, nr.Type), nr.Type, likely), to)
+				return
+			}
+		}
+	}
+
+	// Not a special case. Insert the conditional jump or value gen.
+	bins(nr.Type, res, a, likely, to)
+}
+
+func bgenNonZero(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
+	// TODO: Optimize on systems that can compare to zero easily.
+	a := ONE
+	if !wantTrue {
+		a = OEQ
+	}
+	var zero Node
+	Nodconst(&zero, n.Type, 0)
+	Thearch.Gins(Thearch.Optoas(OCMP, n.Type), n, &zero)
+	bins(n.Type, res, a, likely, to)
+}
+
+// bins inserts an instruction to handle the result of a compare.
+// If res is non-nil, it inserts appropriate value generation instructions.
+// If res is nil, it inserts a branch to to.
+func bins(typ *Type, res *Node, a, likely int, to *obj.Prog) {
+	a = Thearch.Optoas(a, typ)
+	if res != nil {
+		// value gen
+		Thearch.Ginsboolval(a, res)
+	} else {
+		// jump
+		Patch(Gbranch(a, typ, likely), to)
+	}
+}
+
+// stkof returns n's offset from SP if n is on the stack
+// (either a local variable or the return value from a function call
+// or the arguments to a function call).
+// If n is not on the stack, stkof returns -1000.
+// If n is on the stack but in an unknown location
+// (due to array index arithmetic), stkof returns +1000.
+//
+// NOTE(rsc): It is possible that the ODOT and OINDEX cases
+// are not relevant here, since it shouldn't be possible for them
+// to be involved in an overlapping copy. Only function results
+// from one call and the arguments to the next can overlap in
+// any non-trivial way. If they can be dropped, then this function
+// becomes much simpler and also more trustworthy.
+// The fact that it works at all today is probably due to the fact
+// that ODOT and OINDEX are irrelevant.
+func stkof(n *Node) int64 {
+	switch n.Op {
+	case OINDREG:
+		if n.Reg != int16(Thearch.REGSP) {
+			return -1000 // not on stack
+		}
+		return n.Xoffset
+
+	case ODOT:
+		t := n.Left.Type
+		if Isptr[t.Etype] {
+			break
+		}
+		off := stkof(n.Left)
+		if off == -1000 || off == +1000 {
+			return off
+		}
+		return off + n.Xoffset
+
+	case OINDEX:
+		t := n.Left.Type
+		if !Isfixedarray(t) {
+			break
+		}
+		off := stkof(n.Left)
+		if off == -1000 || off == +1000 {
+			return off
+		}
+		if Isconst(n.Right, CTINT) {
+			return off + t.Type.Width*Mpgetfix(n.Right.Val.U.(*Mpint))
+		}
+		return +1000 // on stack but not sure exactly where
+
+	case OCALLMETH, OCALLINTER, OCALLFUNC:
+		t := n.Left.Type
+		if Isptr[t.Etype] {
+			t = t.Type
+		}
+
+		var flist Iter
+		t = Structfirst(&flist, Getoutarg(t))
+		if t != nil {
+			w := t.Width
+			if HasLinkRegister() {
+				w += int64(Ctxt.Arch.Ptrsize)
+			}
+			return w
+		}
+	}
+
+	// botch - probably failing to recognize address
+	// arithmetic on the above. eg INDEX and DOT
+	return -1000 // not on stack
+}
+
+/*
+ * block copy:
+ *	memmove(&ns, &n, w);
+ * if wb is true, needs write barrier.
+ */
+func sgen_wb(n *Node, ns *Node, w int64, wb bool) {
+	if Debug['g'] != 0 {
+		op := "sgen"
+		if wb {
+			op = "sgen-wb"
+		}
+		fmt.Printf("\n%s w=%d\n", op, w)
+		Dump("r", n)
+		Dump("res", ns)
+	}
+
+	if n.Ullman >= UINF && ns.Ullman >= UINF {
+		Fatal("sgen UINF")
+	}
+
+	if w < 0 {
+		Fatal("sgen copy %d", w)
+	}
+
+	// If copying .args, that's all the results, so record definition sites
+	// for them for the liveness analysis.
+	if ns.Op == ONAME && ns.Sym.Name == ".args" {
+		for l := Curfn.Func.Dcl; l != nil; l = l.Next {
+			if l.N.Class == PPARAMOUT {
+				Gvardef(l.N)
+			}
+		}
+	}
+
+	// Avoid taking the address for simple enough types.
+	if componentgen_wb(n, ns, wb) {
+		return
+	}
+
+	if w == 0 {
+		// evaluate side effects only
+		var nodr Node
+		Regalloc(&nodr, Types[Tptr], nil)
+		Agen(ns, &nodr)
+		Agen(n, &nodr)
+		Regfree(&nodr)
+		return
+	}
+
+	// offset on the stack
+	osrc := stkof(n)
+	odst := stkof(ns)
+
+	if odst != -1000 {
+		// on stack, write barrier not needed after all
+		wb = false
+	}
+
+	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) || wb && osrc != -1000 {
+		// osrc and odst both on stack, and at least one is in
+		// an unknown position.  Could generate code to test
+		// for forward/backward copy, but instead just copy
+		// to a temporary location first.
+		//
+		// OR: write barrier needed and source is on stack.
+		// Invoking the write barrier will use the stack to prepare its call.
+		// Copy to temporary.
+		var tmp Node
+		Tempname(&tmp, n.Type)
+		sgen_wb(n, &tmp, w, false)
+		sgen_wb(&tmp, ns, w, wb)
+		return
+	}
+
+	if wb {
+		cgen_wbfat(n, ns)
+		return
+	}
+
+	Thearch.Blockcopy(n, ns, osrc, odst, w)
+}
+
+/*
+ * generate:
+ *	call f
+ *	proc=-1	normal call but no return
+ *	proc=0	normal call
+ *	proc=1	goroutine run in new proc
+ *	proc=2	defer call save away stack
+  *	proc=3	normal call to C pointer (not Go func value)
+*/
+func Ginscall(f *Node, proc int) {
+	if f.Type != nil {
+		extra := int32(0)
+		if proc == 1 || proc == 2 {
+			extra = 2 * int32(Widthptr)
+		}
+		Setmaxarg(f.Type, extra)
+	}
+
+	switch proc {
+	default:
+		Fatal("Ginscall: bad proc %d", proc)
+
+	case 0, // normal call
+		-1: // normal call but no return
+		if f.Op == ONAME && f.Class == PFUNC {
+			if f == Deferreturn {
+				// Deferred calls will appear to be returning to
+				// the CALL deferreturn(SB) that we are about to emit.
+				// However, the stack trace code will show the line
+				// of the instruction byte before the return PC.
+				// To avoid that being an unrelated instruction,
+				// insert an actual hardware NOP that will have the right line number.
+				// This is different from obj.ANOP, which is a virtual no-op
+				// that doesn't make it into the instruction stream.
+				Thearch.Ginsnop()
+			}
+
+			p := Thearch.Gins(obj.ACALL, nil, f)
+			Afunclit(&p.To, f)
+			if proc == -1 || Noreturn(p) {
+				Thearch.Gins(obj.AUNDEF, nil, nil)
+			}
+			break
+		}
+
+		var reg Node
+		Nodreg(&reg, Types[Tptr], Thearch.REGCTXT)
+		var r1 Node
+		Nodreg(&r1, Types[Tptr], Thearch.REGCALLX)
+		Thearch.Gmove(f, &reg)
+		reg.Op = OINDREG
+		Thearch.Gmove(&reg, &r1)
+		reg.Op = OREGISTER
+		Thearch.Gins(obj.ACALL, &reg, &r1)
+
+	case 3: // normal call of c function pointer
+		Thearch.Gins(obj.ACALL, nil, f)
+
+	case 1, // call in new proc (go)
+		2: // deferred call (defer)
+		var stk Node
+
+		// size of arguments at 0(SP)
+		stk.Op = OINDREG
+		stk.Reg = int16(Thearch.REGSP)
+		stk.Xoffset = 0
+		if HasLinkRegister() {
+			stk.Xoffset += int64(Ctxt.Arch.Ptrsize)
+		}
+		Thearch.Ginscon(Thearch.Optoas(OAS, Types[TINT32]), int64(Argsize(f.Type)), &stk)
+
+		// FuncVal* at 8(SP)
+		stk.Xoffset = int64(Widthptr)
+		if HasLinkRegister() {
+			stk.Xoffset += int64(Ctxt.Arch.Ptrsize)
+		}
+
+		var reg Node
+		Nodreg(&reg, Types[Tptr], Thearch.REGCALLX2)
+		Thearch.Gmove(f, &reg)
+		Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &reg, &stk)
+
+		if proc == 1 {
+			Ginscall(Newproc, 0)
+		} else {
+			if Hasdefer == 0 {
+				Fatal("hasdefer=0 but has defer")
+			}
+			Ginscall(Deferproc, 0)
+		}
+
+		if proc == 2 {
+			Nodreg(&reg, Types[TINT32], Thearch.REGRETURN)
+			p := Thearch.Ginscmp(OEQ, Types[TINT32], &reg, Nodintconst(0), +1)
+			cgen_ret(nil)
+			Patch(p, Pc)
+		}
+	}
+}
+
+/*
+ * n is call to interface method.
+ * generate res = n.
+ */
+func cgen_callinter(n *Node, res *Node, proc int) {
+	i := n.Left
+	if i.Op != ODOTINTER {
+		Fatal("cgen_callinter: not ODOTINTER %v", Oconv(int(i.Op), 0))
+	}
+
+	f := i.Right // field
+	if f.Op != ONAME {
+		Fatal("cgen_callinter: not ONAME %v", Oconv(int(f.Op), 0))
+	}
+
+	i = i.Left // interface
+
+	if !i.Addable {
+		var tmpi Node
+		Tempname(&tmpi, i.Type)
+		Cgen(i, &tmpi)
+		i = &tmpi
+	}
+
+	Genlist(n.List) // assign the args
+
+	// i is now addable, prepare an indirected
+	// register to hold its address.
+	var nodi Node
+	Igen(i, &nodi, res) // REG = &inter
+
+	var nodsp Node
+	Nodindreg(&nodsp, Types[Tptr], Thearch.REGSP)
+	nodsp.Xoffset = 0
+	if HasLinkRegister() {
+		nodsp.Xoffset += int64(Ctxt.Arch.Ptrsize)
+	}
+	if proc != 0 {
+		nodsp.Xoffset += 2 * int64(Widthptr) // leave room for size & fn
+	}
+	nodi.Type = Types[Tptr]
+	nodi.Xoffset += int64(Widthptr)
+	Cgen(&nodi, &nodsp) // {0, 8(nacl), or 16}(SP) = 8(REG) -- i.data
+
+	var nodo Node
+	Regalloc(&nodo, Types[Tptr], res)
+
+	nodi.Type = Types[Tptr]
+	nodi.Xoffset -= int64(Widthptr)
+	Cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
+	Regfree(&nodi)
+
+	var nodr Node
+	Regalloc(&nodr, Types[Tptr], &nodo)
+	if n.Left.Xoffset == BADWIDTH {
+		Fatal("cgen_callinter: badwidth")
+	}
+	Cgen_checknil(&nodo) // in case offset is huge
+	nodo.Op = OINDREG
+	nodo.Xoffset = n.Left.Xoffset + 3*int64(Widthptr) + 8
+	if proc == 0 {
+		// plain call: use direct c function pointer - more efficient
+		Cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
+		proc = 3
+	} else {
+		// go/defer. generate go func value.
+		Agen(&nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
+	}
+
+	nodr.Type = n.Left.Type
+	Ginscall(&nodr, proc)
+
+	Regfree(&nodr)
+	Regfree(&nodo)
+}
+
+/*
+ * generate function call;
+ *	proc=0	normal call
+ *	proc=1	goroutine run in new proc
+ *	proc=2	defer call save away stack
+ */
+func cgen_call(n *Node, proc int) {
+	if n == nil {
+		return
+	}
+
+	var afun Node
+	if n.Left.Ullman >= UINF {
+		// if name involves a fn call
+		// precompute the address of the fn
+		Tempname(&afun, Types[Tptr])
+
+		Cgen(n.Left, &afun)
+	}
+
+	Genlist(n.List) // assign the args
+	t := n.Left.Type
+
+	// call tempname pointer
+	if n.Left.Ullman >= UINF {
+		var nod Node
+		Regalloc(&nod, Types[Tptr], nil)
+		Cgen_as(&nod, &afun)
+		nod.Type = t
+		Ginscall(&nod, proc)
+		Regfree(&nod)
+		return
+	}
+
+	// call pointer
+	if n.Left.Op != ONAME || n.Left.Class != PFUNC {
+		var nod Node
+		Regalloc(&nod, Types[Tptr], nil)
+		Cgen_as(&nod, n.Left)
+		nod.Type = t
+		Ginscall(&nod, proc)
+		Regfree(&nod)
+		return
+	}
+
+	// call direct
+	n.Left.Name.Method = true
+
+	Ginscall(n.Left, proc)
+}
+
+func HasLinkRegister() bool {
+	c := Ctxt.Arch.Thechar
+	return c != '6' && c != '8'
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ *	res = return value from call.
+ */
+func cgen_callret(n *Node, res *Node) {
+	t := n.Left.Type
+	if t.Etype == TPTR32 || t.Etype == TPTR64 {
+		t = t.Type
+	}
+
+	var flist Iter
+	fp := Structfirst(&flist, Getoutarg(t))
+	if fp == nil {
+		Fatal("cgen_callret: nil")
+	}
+
+	var nod Node
+	nod.Op = OINDREG
+	nod.Reg = int16(Thearch.REGSP)
+	nod.Addable = true
+
+	nod.Xoffset = fp.Width
+	if HasLinkRegister() {
+		nod.Xoffset += int64(Ctxt.Arch.Ptrsize)
+	}
+	nod.Type = fp.Type
+	Cgen_as(res, &nod)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ *	res = &return value from call.
+ */
+func cgen_aret(n *Node, res *Node) {
+	t := n.Left.Type
+	if Isptr[t.Etype] {
+		t = t.Type
+	}
+
+	var flist Iter
+	fp := Structfirst(&flist, Getoutarg(t))
+	if fp == nil {
+		Fatal("cgen_aret: nil")
+	}
+
+	var nod1 Node
+	nod1.Op = OINDREG
+	nod1.Reg = int16(Thearch.REGSP)
+	nod1.Addable = true
+	nod1.Xoffset = fp.Width
+	if HasLinkRegister() {
+		nod1.Xoffset += int64(Ctxt.Arch.Ptrsize)
+	}
+	nod1.Type = fp.Type
+
+	if res.Op != OREGISTER {
+		var nod2 Node
+		Regalloc(&nod2, Types[Tptr], res)
+		Agen(&nod1, &nod2)
+		Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &nod2, res)
+		Regfree(&nod2)
+	} else {
+		Agen(&nod1, res)
+	}
+}
+
+/*
+ * generate return.
+ * n->left is assignments to return values.
+ */
+func cgen_ret(n *Node) {
+	if n != nil {
+		Genlist(n.List) // copy out args
+	}
+	if Hasdefer != 0 {
+		Ginscall(Deferreturn, 0)
+	}
+	Genlist(Curfn.Func.Exit)
+	p := Thearch.Gins(obj.ARET, nil, nil)
+	if n != nil && n.Op == ORETJMP {
+		p.To.Type = obj.TYPE_MEM
+		p.To.Name = obj.NAME_EXTERN
+		p.To.Sym = Linksym(n.Left.Sym)
+	}
+}
+
+/*
+ * generate division according to op, one of:
+ *	res = nl / nr
+ *	res = nl % nr
+ */
+func cgen_div(op int, nl *Node, nr *Node, res *Node) {
+	var w int
+
+	// TODO(rsc): arm64 needs to support the relevant instructions
+	// in peep and optoas in order to enable this.
+	// TODO(rsc): ppc64 needs to support the relevant instructions
+	// in peep and optoas in order to enable this.
+	if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+		goto longdiv
+	}
+	w = int(nl.Type.Width * 8)
+
+	// Front end handled 32-bit division. We only need to handle 64-bit.
+	// try to do division by multiply by (2^w)/d
+	// see hacker's delight chapter 10
+	switch Simtype[nl.Type.Etype] {
+	default:
+		goto longdiv
+
+	case TUINT64:
+		var m Magic
+		m.W = w
+		m.Ud = uint64(Mpgetfix(nr.Val.U.(*Mpint)))
+		Umagic(&m)
+		if m.Bad != 0 {
+			break
+		}
+		if op == OMOD {
+			goto longmod
+		}
+
+		var n1 Node
+		Cgenr(nl, &n1, nil)
+		var n2 Node
+		Nodconst(&n2, nl.Type, int64(m.Um))
+		var n3 Node
+		Regalloc(&n3, nl.Type, res)
+		Thearch.Cgen_hmul(&n1, &n2, &n3)
+
+		if m.Ua != 0 {
+			// need to add numerator accounting for overflow
+			Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3)
+
+			Nodconst(&n2, nl.Type, 1)
+			Thearch.Gins(Thearch.Optoas(ORROTC, nl.Type), &n2, &n3)
+			Nodconst(&n2, nl.Type, int64(m.S)-1)
+			Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3)
+		} else {
+			Nodconst(&n2, nl.Type, int64(m.S))
+			Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) // shift dx
+		}
+
+		Thearch.Gmove(&n3, res)
+		Regfree(&n1)
+		Regfree(&n3)
+		return
+
+	case TINT64:
+		var m Magic
+		m.W = w
+		m.Sd = Mpgetfix(nr.Val.U.(*Mpint))
+		Smagic(&m)
+		if m.Bad != 0 {
+			break
+		}
+		if op == OMOD {
+			goto longmod
+		}
+
+		var n1 Node
+		Cgenr(nl, &n1, res)
+		var n2 Node
+		Nodconst(&n2, nl.Type, m.Sm)
+		var n3 Node
+		Regalloc(&n3, nl.Type, nil)
+		Thearch.Cgen_hmul(&n1, &n2, &n3)
+
+		if m.Sm < 0 {
+			// need to add numerator
+			Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3)
+		}
+
+		Nodconst(&n2, nl.Type, int64(m.S))
+		Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) // shift n3
+
+		Nodconst(&n2, nl.Type, int64(w)-1)
+
+		Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
+		Thearch.Gins(Thearch.Optoas(OSUB, nl.Type), &n1, &n3) // added
+
+		if m.Sd < 0 {
+			// this could probably be removed
+			// by factoring it into the multiplier
+			Thearch.Gins(Thearch.Optoas(OMINUS, nl.Type), nil, &n3)
+		}
+
+		Thearch.Gmove(&n3, res)
+		Regfree(&n1)
+		Regfree(&n3)
+		return
+	}
+
+	goto longdiv
+
+	// division and mod using (slow) hardware instruction
+longdiv:
+	Thearch.Dodiv(op, nl, nr, res)
+
+	return
+
+	// mod using formula A%B = A-(A/B*B) but
+	// we know that there is a fast algorithm for A/B
+longmod:
+	var n1 Node
+	Regalloc(&n1, nl.Type, res)
+
+	Cgen(nl, &n1)
+	var n2 Node
+	Regalloc(&n2, nl.Type, nil)
+	cgen_div(ODIV, &n1, nr, &n2)
+	a := Thearch.Optoas(OMUL, nl.Type)
+	if w == 8 {
+		// use 2-operand 16-bit multiply
+		// because there is no 2-operand 8-bit multiply
+		a = Thearch.Optoas(OMUL, Types[TINT16]) // XXX was IMULW
+	}
+
+	if !Smallintconst(nr) {
+		var n3 Node
+		Regalloc(&n3, nl.Type, nil)
+		Cgen(nr, &n3)
+		Thearch.Gins(a, &n3, &n2)
+		Regfree(&n3)
+	} else {
+		Thearch.Gins(a, nr, &n2)
+	}
+	Thearch.Gins(Thearch.Optoas(OSUB, nl.Type), &n2, &n1)
+	Thearch.Gmove(&n1, res)
+	Regfree(&n1)
+	Regfree(&n2)
+}
+
+func Fixlargeoffset(n *Node) {
+	if n == nil {
+		return
+	}
+	if n.Op != OINDREG {
+		return
+	}
+	if n.Reg == int16(Thearch.REGSP) { // stack offset cannot be large
+		return
+	}
+	if n.Xoffset != int64(int32(n.Xoffset)) {
+		// offset too large, add to register instead.
+		a := *n
+
+		a.Op = OREGISTER
+		a.Type = Types[Tptr]
+		a.Xoffset = 0
+		Cgen_checknil(&a)
+		Thearch.Ginscon(Thearch.Optoas(OADD, Types[Tptr]), n.Xoffset, &a)
+		n.Xoffset = 0
+	}
+}
+
+func cgen_append(n, res *Node) {
+	if Debug['g'] != 0 {
+		Dump("cgen_append-n", n)
+		Dump("cgen_append-res", res)
+	}
+	if res.Op != ONAME && !samesafeexpr(res, n.List.N) {
+		Dump("cgen_append-n", n)
+		Dump("cgen_append-res", res)
+		Fatal("append not lowered")
+	}
+	for l := n.List; l != nil; l = l.Next {
+		if l.N.Ullman >= UINF {
+			Fatal("append with function call arguments")
+		}
+	}
+
+	// res = append(src, x, y, z)
+	//
+	// If res and src are the same, we can avoid writing to base and cap
+	// unless we grow the underlying array.
+	needFullUpdate := !samesafeexpr(res, n.List.N)
+
+	// Copy src triple into base, len, cap.
+	base := temp(Types[Tptr])
+	len := temp(Types[TUINT])
+	cap := temp(Types[TUINT])
+
+	var src Node
+	Igen(n.List.N, &src, nil)
+	src.Type = Types[Tptr]
+	Thearch.Gmove(&src, base)
+	src.Type = Types[TUINT]
+	src.Xoffset += int64(Widthptr)
+	Thearch.Gmove(&src, len)
+	src.Xoffset += int64(Widthptr)
+	Thearch.Gmove(&src, cap)
+
+	// if len+argc <= cap goto L1
+	var rlen Node
+	Regalloc(&rlen, Types[TUINT], nil)
+	Thearch.Gmove(len, &rlen)
+	Thearch.Ginscon(Thearch.Optoas(OADD, Types[TUINT]), int64(count(n.List)-1), &rlen)
+	p := Thearch.Ginscmp(OLE, Types[TUINT], &rlen, cap, +1)
+	// Note: rlen and src are Regrealloc'ed below at the target of the
+	// branch we just emitted; do not reuse these Go variables for
+	// other purposes. They need to still describe the same things
+	// below that they describe right here.
+	Regfree(&src)
+
+	// base, len, cap = growslice(type, base, len, cap, newlen)
+	var arg Node
+	arg.Op = OINDREG
+	arg.Reg = int16(Thearch.REGSP)
+	arg.Addable = true
+	arg.Xoffset = 0
+	if HasLinkRegister() {
+		arg.Xoffset = int64(Ctxt.Arch.Ptrsize)
+	}
+	arg.Type = Ptrto(Types[TUINT8])
+	Cgen(typename(res.Type), &arg)
+	arg.Xoffset += int64(Widthptr)
+
+	arg.Type = Types[Tptr]
+	Cgen(base, &arg)
+	arg.Xoffset += int64(Widthptr)
+
+	arg.Type = Types[TUINT]
+	Cgen(len, &arg)
+	arg.Xoffset += int64(Widthptr)
+
+	arg.Type = Types[TUINT]
+	Cgen(cap, &arg)
+	arg.Xoffset += int64(Widthptr)
+
+	arg.Type = Types[TUINT]
+	Cgen(&rlen, &arg)
+	arg.Xoffset += int64(Widthptr)
+	Regfree(&rlen)
+
+	fn := syslook("growslice", 1)
+	substArgTypes(fn, res.Type.Type, res.Type.Type)
+	Ginscall(fn, 0)
+
+	if Widthptr == 4 && Widthreg == 8 {
+		arg.Xoffset += 4
+	}
+
+	arg.Type = Types[Tptr]
+	Cgen(&arg, base)
+	arg.Xoffset += int64(Widthptr)
+
+	arg.Type = Types[TUINT]
+	Cgen(&arg, len)
+	arg.Xoffset += int64(Widthptr)
+
+	arg.Type = Types[TUINT]
+	Cgen(&arg, cap)
+
+	// Update res with base, len+argc, cap.
+	if needFullUpdate {
+		if Debug_append > 0 {
+			Warn("append: full update")
+		}
+		Patch(p, Pc)
+	}
+	if res.Op == ONAME {
+		Gvardef(res)
+	}
+	var dst, r1 Node
+	Igen(res, &dst, nil)
+	dst.Type = Types[TUINT]
+	dst.Xoffset += int64(Widthptr)
+	Regalloc(&r1, Types[TUINT], nil)
+	Thearch.Gmove(len, &r1)
+	Thearch.Ginscon(Thearch.Optoas(OADD, Types[TUINT]), int64(count(n.List)-1), &r1)
+	Thearch.Gmove(&r1, &dst)
+	Regfree(&r1)
+	dst.Xoffset += int64(Widthptr)
+	Thearch.Gmove(cap, &dst)
+	dst.Type = Types[Tptr]
+	dst.Xoffset -= 2 * int64(Widthptr)
+	cgen_wb(base, &dst, needwritebarrier(&dst, base))
+	Regfree(&dst)
+
+	if !needFullUpdate {
+		if Debug_append > 0 {
+			Warn("append: len-only update")
+		}
+		// goto L2;
+		// L1:
+		//	update len only
+		// L2:
+		q := Gbranch(obj.AJMP, nil, 0)
+		Patch(p, Pc)
+		// At the goto above, src refers to cap and rlen holds the new len
+		if src.Op == OREGISTER || src.Op == OINDREG {
+			Regrealloc(&src)
+		}
+		Regrealloc(&rlen)
+		src.Xoffset -= int64(Widthptr)
+		Thearch.Gmove(&rlen, &src)
+		Regfree(&src)
+		Regfree(&rlen)
+		Patch(q, Pc)
+	}
+
+	// Copy data into place.
+	// Could do write barrier check around entire copy instead of each element.
+	// Could avoid reloading registers on each iteration if we know the cgen_wb
+	// is not going to use a write barrier.
+	i := 0
+	var r2 Node
+	for l := n.List.Next; l != nil; l = l.Next {
+		Regalloc(&r1, Types[Tptr], nil)
+		Thearch.Gmove(base, &r1)
+		Regalloc(&r2, Types[TUINT], nil)
+		Thearch.Gmove(len, &r2)
+		if i > 0 {
+			Thearch.Gins(Thearch.Optoas(OADD, Types[TUINT]), Nodintconst(int64(i)), &r2)
+		}
+		w := res.Type.Type.Width
+		if Thearch.AddIndex != nil && Thearch.AddIndex(&r2, w, &r1) {
+			// r1 updated by back end
+		} else if w == 1 {
+			Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &r2, &r1)
+		} else {
+			Thearch.Ginscon(Thearch.Optoas(OMUL, Types[TUINT]), int64(w), &r2)
+			Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &r2, &r1)
+		}
+		Regfree(&r2)
+
+		r1.Op = OINDREG
+		r1.Type = res.Type.Type
+		cgen_wb(l.N, &r1, needwritebarrier(&r1, l.N))
+		Regfree(&r1)
+		i++
+	}
+}
+
+// Generate res = n, where n is x[i:j] or x[i:j:k].
+// If wb is true, need write barrier updating res's base pointer.
+// On systems with 32-bit ints, i, j, k are guaranteed to be 32-bit values.
+func cgen_slice(n, res *Node, wb bool) {
+	if Debug['g'] != 0 {
+		Dump("cgen_slice-n", n)
+		Dump("cgen_slice-res", res)
+	}
+
+	needFullUpdate := !samesafeexpr(n.Left, res)
+
+	// orderexpr has made sure that x is safe (but possibly expensive)
+	// and i, j, k are cheap. On a system with registers (anything but 386)
+	// we can evaluate x first and then know we have enough registers
+	// for i, j, k as well.
+	var x, xbase, xlen, xcap, i, j, k Node
+	if n.Op != OSLICEARR && n.Op != OSLICE3ARR {
+		Igen(n.Left, &x, nil)
+	}
+
+	indexRegType := Types[TUINT]
+	if Widthreg > Widthptr { // amd64p32
+		indexRegType = Types[TUINT64]
+	}
+
+	// On most systems, we use registers.
+	// The 386 has basically no registers, so substitute functions
+	// that can work with temporaries instead.
+	regalloc := Regalloc
+	ginscon := Thearch.Ginscon
+	gins := Thearch.Gins
+	if Thearch.Thechar == '8' {
+		regalloc = func(n *Node, t *Type, reuse *Node) {
+			Tempname(n, t)
+		}
+		ginscon = func(as int, c int64, n *Node) {
+			var n1 Node
+			Regalloc(&n1, n.Type, n)
+			Thearch.Gmove(n, &n1)
+			Thearch.Ginscon(as, c, &n1)
+			Thearch.Gmove(&n1, n)
+			Regfree(&n1)
+		}
+		gins = func(as int, f, t *Node) *obj.Prog {
+			var n1 Node
+			Regalloc(&n1, t.Type, t)
+			Thearch.Gmove(t, &n1)
+			Thearch.Gins(as, f, &n1)
+			Thearch.Gmove(&n1, t)
+			Regfree(&n1)
+			return nil
+		}
+	}
+
+	panics := make([]*obj.Prog, 0, 6) // 3 loads + 3 checks
+
+	loadlen := func() {
+		if xlen.Op != 0 {
+			return
+		}
+		if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
+			Nodconst(&xlen, indexRegType, n.Left.Type.Type.Bound)
+			return
+		}
+		if n.Op == OSLICESTR && Isconst(n.Left, CTSTR) {
+			Nodconst(&xlen, indexRegType, int64(len(n.Left.Val.U.(string))))
+			return
+		}
+		regalloc(&xlen, indexRegType, nil)
+		x.Xoffset += int64(Widthptr)
+		x.Type = Types[TUINT]
+		Thearch.Gmove(&x, &xlen)
+		x.Xoffset -= int64(Widthptr)
+	}
+
+	loadcap := func() {
+		if xcap.Op != 0 {
+			return
+		}
+		if n.Op == OSLICEARR || n.Op == OSLICE3ARR || n.Op == OSLICESTR {
+			loadlen()
+			xcap = xlen
+			if xcap.Op == OREGISTER {
+				Regrealloc(&xcap)
+			}
+			return
+		}
+		regalloc(&xcap, indexRegType, nil)
+		x.Xoffset += 2 * int64(Widthptr)
+		x.Type = Types[TUINT]
+		Thearch.Gmove(&x, &xcap)
+		x.Xoffset -= 2 * int64(Widthptr)
+	}
+
+	var x1, x2, x3 *Node // unevaluated index arguments
+	x1 = n.Right.Left
+	switch n.Op {
+	default:
+		x2 = n.Right.Right
+	case OSLICE3, OSLICE3ARR:
+		x2 = n.Right.Right.Left
+		x3 = n.Right.Right.Right
+	}
+
+	// load computes src into targ, but if src refers to the len or cap of n.Left,
+	// load copies those from xlen, xcap, loading xlen if needed.
+	// If targ.Op == OREGISTER on return, it must be Regfreed,
+	// but it should not be modified without first checking whether it is
+	// xlen or xcap's register.
+	load := func(src, targ *Node) {
+		if src == nil {
+			return
+		}
+		switch src.Op {
+		case OLITERAL:
+			*targ = *src
+			return
+		case OLEN:
+			// NOTE(rsc): This doesn't actually trigger, because order.go
+			// has pulled all the len and cap calls into separate assignments
+			// to temporaries. There are tests in test/sliceopt.go that could
+			// be enabled if this is fixed.
+			if samesafeexpr(n.Left, src.Left) {
+				if Debug_slice > 0 {
+					Warn("slice: reuse len")
+				}
+				loadlen()
+				*targ = xlen
+				if targ.Op == OREGISTER {
+					Regrealloc(targ)
+				}
+				return
+			}
+		case OCAP:
+			// NOTE(rsc): This doesn't actually trigger; see note in case OLEN above.
+			if samesafeexpr(n.Left, src.Left) {
+				if Debug_slice > 0 {
+					Warn("slice: reuse cap")
+				}
+				loadcap()
+				*targ = xcap
+				if targ.Op == OREGISTER {
+					Regrealloc(targ)
+				}
+				return
+			}
+		}
+		if i.Op != 0 && samesafeexpr(x1, src) {
+			if Debug_slice > 0 {
+				Warn("slice: reuse 1st index")
+			}
+			*targ = i
+			if targ.Op == OREGISTER {
+				Regrealloc(targ)
+			}
+			return
+		}
+		if j.Op != 0 && samesafeexpr(x2, src) {
+			if Debug_slice > 0 {
+				Warn("slice: reuse 2nd index")
+			}
+			*targ = j
+			if targ.Op == OREGISTER {
+				Regrealloc(targ)
+			}
+			return
+		}
+		if Thearch.Cgenindex != nil {
+			regalloc(targ, indexRegType, nil)
+			p := Thearch.Cgenindex(src, targ, false)
+			if p != nil {
+				panics = append(panics, p)
+			}
+		} else if Thearch.Igenindex != nil {
+			p := Thearch.Igenindex(src, targ, false)
+			if p != nil {
+				panics = append(panics, p)
+			}
+		} else {
+			regalloc(targ, indexRegType, nil)
+			var tmp Node
+			Cgenr(src, &tmp, targ)
+			Thearch.Gmove(&tmp, targ)
+			Regfree(&tmp)
+		}
+	}
+
+	load(x1, &i)
+	load(x2, &j)
+	load(x3, &k)
+
+	// i defaults to 0.
+	if i.Op == 0 {
+		Nodconst(&i, indexRegType, 0)
+	}
+
+	// j defaults to len(x)
+	if j.Op == 0 {
+		loadlen()
+		j = xlen
+		if j.Op == OREGISTER {
+			Regrealloc(&j)
+		}
+	}
+
+	// k defaults to cap(x)
+	// Only need to load it if we're recalculating cap or doing a full update.
+	if k.Op == 0 && n.Op != OSLICESTR && (!iszero(&i) || needFullUpdate) {
+		loadcap()
+		k = xcap
+		if k.Op == OREGISTER {
+			Regrealloc(&k)
+		}
+	}
+
+	// Check constant indexes for negative values, and against constant length if known.
+	// The func obvious below checks for out-of-order constant indexes.
+	var bound int64 = -1
+	if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
+		bound = n.Left.Type.Type.Bound
+	} else if n.Op == OSLICESTR && Isconst(n.Left, CTSTR) {
+		bound = int64(len(n.Left.Val.U.(string)))
+	}
+	if Isconst(&i, CTINT) {
+		if mpcmpfixc(i.Val.U.(*Mpint), 0) < 0 || bound >= 0 && mpcmpfixc(i.Val.U.(*Mpint), bound) > 0 {
+			Yyerror("slice index out of bounds")
+		}
+	}
+	if Isconst(&j, CTINT) {
+		if mpcmpfixc(j.Val.U.(*Mpint), 0) < 0 || bound >= 0 && mpcmpfixc(j.Val.U.(*Mpint), bound) > 0 {
+			Yyerror("slice index out of bounds")
+		}
+	}
+	if Isconst(&k, CTINT) {
+		if mpcmpfixc(k.Val.U.(*Mpint), 0) < 0 || bound >= 0 && mpcmpfixc(k.Val.U.(*Mpint), bound) > 0 {
+			Yyerror("slice index out of bounds")
+		}
+	}
+
+	// same reports whether n1 and n2 are the same register or constant.
+	same := func(n1, n2 *Node) bool {
+		return n1.Op == OREGISTER && n2.Op == OREGISTER && n1.Reg == n2.Reg ||
+			n1.Op == ONAME && n2.Op == ONAME && n1.Orig == n2.Orig && n1.Type == n2.Type && n1.Xoffset == n2.Xoffset ||
+			n1.Op == OLITERAL && n2.Op == OLITERAL && Mpcmpfixfix(n1.Val.U.(*Mpint), n2.Val.U.(*Mpint)) == 0
+	}
+
+	// obvious reports whether n1 <= n2 is obviously true,
+	// and it calls Yyerror if n1 <= n2 is obviously false.
+	obvious := func(n1, n2 *Node) bool {
+		if Debug['B'] != 0 { // -B disables bounds checks
+			return true
+		}
+		if same(n1, n2) {
+			return true // n1 == n2
+		}
+		if iszero(n1) {
+			return true // using unsigned compare, so 0 <= n2 always true
+		}
+		if xlen.Op != 0 && same(n1, &xlen) && xcap.Op != 0 && same(n2, &xcap) {
+			return true // len(x) <= cap(x) always true
+		}
+		if Isconst(n1, CTINT) && Isconst(n2, CTINT) {
+			if Mpcmpfixfix(n1.Val.U.(*Mpint), n2.Val.U.(*Mpint)) <= 0 {
+				return true // n1, n2 constants such that n1 <= n2
+			}
+			Yyerror("slice index out of bounds")
+			return true
+		}
+		return false
+	}
+
+	compare := func(n1, n2 *Node) {
+		// n1 might be a 64-bit constant, even on 32-bit architectures,
+		// but it will be represented in 32 bits.
+		if Ctxt.Arch.Regsize == 4 && Is64(n1.Type) {
+			if mpcmpfixc(n1.Val.U.(*Mpint), 1<<31) >= 0 {
+				Fatal("missed slice out of bounds check")
+			}
+			var tmp Node
+			Nodconst(&tmp, indexRegType, Mpgetfix(n1.Val.U.(*Mpint)))
+			n1 = &tmp
+		}
+		p := Thearch.Ginscmp(OGT, indexRegType, n1, n2, -1)
+		panics = append(panics, p)
+	}
+
+	loadcap()
+	max := &xcap
+	if k.Op != 0 && (n.Op == OSLICE3 || n.Op == OSLICE3ARR) {
+		if obvious(&k, max) {
+			if Debug_slice > 0 {
+				Warn("slice: omit check for 3rd index")
+			}
+		} else {
+			compare(&k, max)
+		}
+		max = &k
+	}
+	if j.Op != 0 {
+		if obvious(&j, max) {
+			if Debug_slice > 0 {
+				Warn("slice: omit check for 2nd index")
+			}
+		} else {
+			compare(&j, max)
+		}
+		max = &j
+	}
+	if i.Op != 0 {
+		if obvious(&i, max) {
+			if Debug_slice > 0 {
+				Warn("slice: omit check for 1st index")
+			}
+		} else {
+			compare(&i, max)
+		}
+		max = &i
+	}
+	if k.Op != 0 && i.Op != 0 {
+		obvious(&i, &k) // emit compile-time error for x[3:n:2]
+	}
+
+	if len(panics) > 0 {
+		p := Gbranch(obj.AJMP, nil, 0)
+		for _, q := range panics {
+			Patch(q, Pc)
+		}
+		Ginscall(panicslice, -1)
+		Patch(p, Pc)
+	}
+
+	// Checks are done.
+	// Compute new len as j-i, cap as k-i.
+	// If i and j are same register, len is constant 0.
+	// If i and k are same register, cap is constant 0.
+	// If j and k are same register, len and cap are same.
+
+	// Done with xlen and xcap.
+	// Now safe to modify j and k even if they alias xlen, xcap.
+	if xlen.Op == OREGISTER {
+		Regfree(&xlen)
+	}
+	if xcap.Op == OREGISTER {
+		Regfree(&xcap)
+	}
+
+	// are j and k the same value?
+	sameJK := same(&j, &k)
+
+	if i.Op != 0 {
+		// j -= i
+		if same(&i, &j) {
+			if Debug_slice > 0 {
+				Warn("slice: result len == 0")
+			}
+			if j.Op == OREGISTER {
+				Regfree(&j)
+			}
+			Nodconst(&j, indexRegType, 0)
+		} else {
+			switch j.Op {
+			case OLITERAL:
+				if Isconst(&i, CTINT) {
+					Nodconst(&j, indexRegType, Mpgetfix(j.Val.U.(*Mpint))-Mpgetfix(i.Val.U.(*Mpint)))
+					if Debug_slice > 0 {
+						Warn("slice: result len == %d", Mpgetfix(j.Val.U.(*Mpint)))
+					}
+					break
+				}
+				fallthrough
+			case ONAME:
+				if !istemp(&j) {
+					var r Node
+					regalloc(&r, indexRegType, nil)
+					Thearch.Gmove(&j, &r)
+					j = r
+				}
+				fallthrough
+			case OREGISTER:
+				if i.Op == OLITERAL {
+					v := Mpgetfix(i.Val.U.(*Mpint))
+					if v != 0 {
+						ginscon(Thearch.Optoas(OSUB, indexRegType), v, &j)
+					}
+				} else {
+					gins(Thearch.Optoas(OSUB, indexRegType), &i, &j)
+				}
+			}
+		}
+
+		// k -= i if k different from j and cap is needed.j
+		// (The modifications to j above cannot affect i: if j and i were aliased,
+		// we replace j with a constant 0 instead of doing a subtraction,
+		// leaving i unmodified.)
+		if k.Op == 0 {
+			if Debug_slice > 0 && n.Op != OSLICESTR {
+				Warn("slice: result cap not computed")
+			}
+			// no need
+		} else if same(&i, &k) {
+			if k.Op == OREGISTER {
+				Regfree(&k)
+			}
+			Nodconst(&k, indexRegType, 0)
+			if Debug_slice > 0 {
+				Warn("slice: result cap == 0")
+			}
+		} else if sameJK {
+			if Debug_slice > 0 {
+				Warn("slice: result cap == result len")
+			}
+			// k and j were the same value; make k-i the same as j-i.
+			if k.Op == OREGISTER {
+				Regfree(&k)
+			}
+			k = j
+			if k.Op == OREGISTER {
+				Regrealloc(&k)
+			}
+		} else {
+			switch k.Op {
+			case OLITERAL:
+				if Isconst(&i, CTINT) {
+					Nodconst(&k, indexRegType, Mpgetfix(k.Val.U.(*Mpint))-Mpgetfix(i.Val.U.(*Mpint)))
+					if Debug_slice > 0 {
+						Warn("slice: result cap == %d", Mpgetfix(k.Val.U.(*Mpint)))
+					}
+					break
+				}
+				fallthrough
+			case ONAME:
+				if !istemp(&k) {
+					var r Node
+					regalloc(&r, indexRegType, nil)
+					Thearch.Gmove(&k, &r)
+					k = r
+				}
+				fallthrough
+			case OREGISTER:
+				if same(&i, &k) {
+					Regfree(&k)
+					Nodconst(&k, indexRegType, 0)
+					if Debug_slice > 0 {
+						Warn("slice: result cap == 0")
+					}
+				} else if i.Op == OLITERAL {
+					v := Mpgetfix(i.Val.U.(*Mpint))
+					if v != 0 {
+						ginscon(Thearch.Optoas(OSUB, indexRegType), v, &k)
+					}
+				} else {
+					gins(Thearch.Optoas(OSUB, indexRegType), &i, &k)
+				}
+			}
+		}
+	}
+
+	adjustBase := true
+	if i.Op == 0 || iszero(&i) {
+		if Debug_slice > 0 {
+			Warn("slice: skip base adjustment for 1st index 0")
+		}
+		adjustBase = false
+	} else if k.Op != 0 && iszero(&k) || k.Op == 0 && iszero(&j) {
+		if Debug_slice > 0 {
+			if n.Op == OSLICESTR {
+				Warn("slice: skip base adjustment for string len == 0")
+			} else {
+				Warn("slice: skip base adjustment for cap == 0")
+			}
+		}
+		adjustBase = false
+	}
+
+	if !adjustBase && !needFullUpdate {
+		if Debug_slice > 0 {
+			if k.Op != 0 {
+				Warn("slice: len/cap-only update")
+			} else {
+				Warn("slice: len-only update")
+			}
+		}
+		if i.Op == OREGISTER {
+			Regfree(&i)
+		}
+		// Write len (and cap if needed) back to x.
+		x.Xoffset += int64(Widthptr)
+		x.Type = Types[TUINT]
+		Thearch.Gmove(&j, &x)
+		x.Xoffset -= int64(Widthptr)
+		if k.Op != 0 {
+			x.Xoffset += 2 * int64(Widthptr)
+			x.Type = Types[TUINT]
+			Thearch.Gmove(&k, &x)
+			x.Xoffset -= 2 * int64(Widthptr)
+		}
+		Regfree(&x)
+	} else {
+		// Compute new base. May smash i.
+		if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
+			Cgenr(n.Left, &xbase, nil)
+			Cgen_checknil(&xbase)
+		} else {
+			regalloc(&xbase, Ptrto(res.Type.Type), nil)
+			x.Type = xbase.Type
+			Thearch.Gmove(&x, &xbase)
+			Regfree(&x)
+		}
+		if i.Op != 0 && adjustBase {
+			// Branch around the base adjustment if the resulting cap will be 0.
+			var p *obj.Prog
+			size := &k
+			if k.Op == 0 {
+				size = &j
+			}
+			if Isconst(size, CTINT) {
+				// zero was checked above, must be non-zero.
+			} else {
+				var tmp Node
+				Nodconst(&tmp, indexRegType, 0)
+				p = Thearch.Ginscmp(OEQ, indexRegType, size, &tmp, -1)
+			}
+			var w int64
+			if n.Op == OSLICESTR {
+				w = 1 // res is string, elem size is 1 (byte)
+			} else {
+				w = res.Type.Type.Width // res is []T, elem size is T.width
+			}
+			if Isconst(&i, CTINT) {
+				ginscon(Thearch.Optoas(OADD, xbase.Type), Mpgetfix(i.Val.U.(*Mpint))*w, &xbase)
+			} else if Thearch.AddIndex != nil && Thearch.AddIndex(&i, w, &xbase) {
+				// done by back end
+			} else if w == 1 {
+				gins(Thearch.Optoas(OADD, xbase.Type), &i, &xbase)
+			} else {
+				if i.Op == ONAME && !istemp(&i) {
+					var tmp Node
+					Tempname(&tmp, i.Type)
+					Thearch.Gmove(&i, &tmp)
+					i = tmp
+				}
+				ginscon(Thearch.Optoas(OMUL, i.Type), w, &i)
+				gins(Thearch.Optoas(OADD, xbase.Type), &i, &xbase)
+			}
+			if p != nil {
+				Patch(p, Pc)
+			}
+		}
+		if i.Op == OREGISTER {
+			Regfree(&i)
+		}
+
+		// Write len, cap, base to result.
+		if res.Op == ONAME {
+			Gvardef(res)
+		}
+		Igen(res, &x, nil)
+		x.Xoffset += int64(Widthptr)
+		x.Type = Types[TUINT]
+		Thearch.Gmove(&j, &x)
+		x.Xoffset -= int64(Widthptr)
+		if k.Op != 0 {
+			x.Xoffset += 2 * int64(Widthptr)
+			Thearch.Gmove(&k, &x)
+			x.Xoffset -= 2 * int64(Widthptr)
+		}
+		x.Type = xbase.Type
+		cgen_wb(&xbase, &x, wb)
+		Regfree(&xbase)
+		Regfree(&x)
+	}
+
+	if j.Op == OREGISTER {
+		Regfree(&j)
+	}
+	if k.Op == OREGISTER {
+		Regfree(&k)
+	}
+}
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
new file mode 100644
index 0000000..b51e74b
--- /dev/null
+++ b/src/cmd/compile/internal/gc/closure.go
@@ -0,0 +1,689 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+)
+
+/*
+ * function literals aka closures
+ */
+func closurehdr(ntype *Node) {
+	var name *Node
+	var a *Node
+
+	n := Nod(OCLOSURE, nil, nil)
+	n.Ntype = ntype
+	n.Funcdepth = Funcdepth
+	n.Func.Outerfunc = Curfn
+
+	funchdr(n)
+
+	// steal ntype's argument names and
+	// leave a fresh copy in their place.
+	// references to these variables need to
+	// refer to the variables in the external
+	// function declared below; see walkclosure.
+	n.List = ntype.List
+
+	n.Rlist = ntype.Rlist
+	ntype.List = nil
+	ntype.Rlist = nil
+	for l := n.List; l != nil; l = l.Next {
+		name = l.N.Left
+		if name != nil {
+			name = newname(name.Sym)
+		}
+		a = Nod(ODCLFIELD, name, l.N.Right)
+		a.Isddd = l.N.Isddd
+		if name != nil {
+			name.Isddd = a.Isddd
+		}
+		ntype.List = list(ntype.List, a)
+	}
+
+	for l := n.Rlist; l != nil; l = l.Next {
+		name = l.N.Left
+		if name != nil {
+			name = newname(name.Sym)
+		}
+		ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, name, l.N.Right))
+	}
+}
+
+func closurebody(body *NodeList) *Node {
+	if body == nil {
+		body = list1(Nod(OEMPTY, nil, nil))
+	}
+
+	func_ := Curfn
+	func_.Nbody = body
+	func_.Func.Endlineno = lineno
+	funcbody(func_)
+
+	// closure-specific variables are hanging off the
+	// ordinary ones in the symbol table; see oldname.
+	// unhook them.
+	// make the list of pointers for the closure call.
+	var v *Node
+	for l := func_.Func.Cvars; l != nil; l = l.Next {
+		v = l.N
+		v.Closure.Closure = v.Outer
+		v.Outerexpr = oldname(v.Sym)
+	}
+
+	return func_
+}
+
+func typecheckclosure(func_ *Node, top int) {
+	var n *Node
+
+	for l := func_.Func.Cvars; l != nil; l = l.Next {
+		n = l.N.Closure
+		if !n.Name.Captured {
+			n.Name.Captured = true
+			if n.Name.Decldepth == 0 {
+				Fatal("typecheckclosure: var %v does not have decldepth assigned", Nconv(n, obj.FmtShort))
+			}
+
+			// Ignore assignments to the variable in straightline code
+			// preceding the first capturing by a closure.
+			if n.Name.Decldepth == decldepth {
+				n.Assigned = false
+			}
+		}
+	}
+
+	for l := func_.Func.Dcl; l != nil; l = l.Next {
+		if l.N.Op == ONAME && (l.N.Class == PPARAM || l.N.Class == PPARAMOUT) {
+			l.N.Name.Decldepth = 1
+		}
+	}
+
+	oldfn := Curfn
+	typecheck(&func_.Ntype, Etype)
+	func_.Type = func_.Ntype.Type
+	func_.Top = top
+
+	// Type check the body now, but only if we're inside a function.
+	// At top level (in a variable initialization: curfn==nil) we're not
+	// ready to type check code yet; we'll check it later, because the
+	// underlying closure function we create is added to xtop.
+	if Curfn != nil && func_.Type != nil {
+		Curfn = func_
+		olddd := decldepth
+		decldepth = 1
+		typechecklist(func_.Nbody, Etop)
+		decldepth = olddd
+		Curfn = oldfn
+	}
+
+	// Create top-level function
+	xtop = list(xtop, makeclosure(func_))
+}
+
+// closurename returns name for OCLOSURE n.
+// It is not as simple as it ought to be, because we typecheck nested closures
+// starting from the innermost one. So when we check the inner closure,
+// we don't yet have name for the outer closure. This function uses recursion
+// to generate names all the way up if necessary.
+
+var closurename_closgen int
+
+func closurename(n *Node) *Sym {
+	if n.Sym != nil {
+		return n.Sym
+	}
+	gen := 0
+	outer := ""
+	prefix := ""
+	if n.Func.Outerfunc == nil {
+		// Global closure.
+		outer = "glob"
+
+		prefix = "func"
+		closurename_closgen++
+		gen = closurename_closgen
+	} else if n.Func.Outerfunc.Op == ODCLFUNC {
+		// The outermost closure inside of a named function.
+		outer = n.Func.Outerfunc.Nname.Sym.Name
+
+		prefix = "func"
+
+		// Yes, functions can be named _.
+		// Can't use function closgen in such case,
+		// because it would lead to name clashes.
+		if !isblank(n.Func.Outerfunc.Nname) {
+			n.Func.Outerfunc.Func.Closgen++
+			gen = n.Func.Outerfunc.Func.Closgen
+		} else {
+			closurename_closgen++
+			gen = closurename_closgen
+		}
+	} else if n.Func.Outerfunc.Op == OCLOSURE {
+		// Nested closure, recurse.
+		outer = closurename(n.Func.Outerfunc).Name
+
+		prefix = ""
+		n.Func.Outerfunc.Func.Closgen++
+		gen = n.Func.Outerfunc.Func.Closgen
+	} else {
+		Fatal("closurename called for %v", Nconv(n, obj.FmtShort))
+	}
+	n.Sym = Lookupf("%s.%s%d", outer, prefix, gen)
+	return n.Sym
+}
+
+func makeclosure(func_ *Node) *Node {
+	/*
+	 * wrap body in external function
+	 * that begins by reading closure parameters.
+	 */
+	xtype := Nod(OTFUNC, nil, nil)
+
+	xtype.List = func_.List
+	xtype.Rlist = func_.Rlist
+
+	// create the function
+	xfunc := Nod(ODCLFUNC, nil, nil)
+
+	xfunc.Nname = newfuncname(closurename(func_))
+	xfunc.Nname.Sym.Flags |= SymExported // disable export
+	xfunc.Nname.Ntype = xtype
+	xfunc.Nname.Defn = xfunc
+	declare(xfunc.Nname, PFUNC)
+	xfunc.Nname.Funcdepth = func_.Funcdepth
+	xfunc.Funcdepth = func_.Funcdepth
+	xfunc.Func.Endlineno = func_.Func.Endlineno
+
+	xfunc.Nbody = func_.Nbody
+	xfunc.Func.Dcl = concat(func_.Func.Dcl, xfunc.Func.Dcl)
+	if xfunc.Nbody == nil {
+		Fatal("empty body - won't generate any code")
+	}
+	typecheck(&xfunc, Etop)
+
+	xfunc.Closure = func_
+	func_.Closure = xfunc
+
+	func_.Nbody = nil
+	func_.List = nil
+	func_.Rlist = nil
+
+	return xfunc
+}
+
+// capturevars is called in a separate phase after all typechecking is done.
+// It decides whether each variable captured by a closure should be captured
+// by value or by reference.
+// We use value capturing for values <= 128 bytes that are never reassigned
+// after capturing (effectively constant).
+func capturevars(xfunc *Node) {
+	var v *Node
+	var outer *Node
+
+	lno := int(lineno)
+	lineno = xfunc.Lineno
+
+	func_ := xfunc.Closure
+	func_.Func.Enter = nil
+	for l := func_.Func.Cvars; l != nil; l = l.Next {
+		v = l.N
+		if v.Type == nil {
+			// if v->type is nil, it means v looked like it was
+			// going to be used in the closure but wasn't.
+			// this happens because when parsing a, b, c := f()
+			// the a, b, c gets parsed as references to older
+			// a, b, c before the parser figures out this is a
+			// declaration.
+			v.Op = OXXX
+
+			continue
+		}
+
+		// type check the & of closed variables outside the closure,
+		// so that the outer frame also grabs them and knows they escape.
+		dowidth(v.Type)
+
+		outer = v.Outerexpr
+		v.Outerexpr = nil
+
+		// out parameters will be assigned to implicitly upon return.
+		if outer.Class != PPARAMOUT && !v.Closure.Addrtaken && !v.Closure.Assigned && v.Type.Width <= 128 {
+			v.Name.Byval = true
+		} else {
+			v.Closure.Addrtaken = true
+			outer = Nod(OADDR, outer, nil)
+		}
+
+		if Debug['m'] > 1 {
+			var name *Sym
+			if v.Curfn != nil && v.Curfn.Nname != nil {
+				name = v.Curfn.Nname.Sym
+			}
+			how := "ref"
+			if v.Name.Byval {
+				how = "value"
+			}
+			Warnl(int(v.Lineno), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, v.Closure.Addrtaken, v.Closure.Assigned, int32(v.Type.Width))
+		}
+
+		typecheck(&outer, Erv)
+		func_.Func.Enter = list(func_.Func.Enter, outer)
+	}
+
+	lineno = int32(lno)
+}
+
+// transformclosure is called in a separate phase after escape analysis.
+// It transform closure bodies to properly reference captured variables.
+func transformclosure(xfunc *Node) {
+	lno := int(lineno)
+	lineno = xfunc.Lineno
+	func_ := xfunc.Closure
+
+	if func_.Top&Ecall != 0 {
+		// If the closure is directly called, we transform it to a plain function call
+		// with variables passed as args. This avoids allocation of a closure object.
+		// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
+		// will complete the transformation later.
+		// For illustration, the following closure:
+		//	func(a int) {
+		//		println(byval)
+		//		byref++
+		//	}(42)
+		// becomes:
+		//	func(a int, byval int, &byref *int) {
+		//		println(byval)
+		//		(*&byref)++
+		//	}(42, byval, &byref)
+
+		// f is ONAME of the actual function.
+		f := xfunc.Nname
+
+		// Get pointer to input arguments and rewind to the end.
+		// We are going to append captured variables to input args.
+		param := &getinargx(f.Type).Type
+
+		for ; *param != nil; param = &(*param).Down {
+		}
+		var v *Node
+		var addr *Node
+		var fld *Type
+		for l := func_.Func.Cvars; l != nil; l = l.Next {
+			v = l.N
+			if v.Op == OXXX {
+				continue
+			}
+			fld = typ(TFIELD)
+			fld.Funarg = 1
+			if v.Name.Byval {
+				// If v is captured by value, we merely downgrade it to PPARAM.
+				v.Class = PPARAM
+
+				v.Ullman = 1
+				fld.Nname = v
+			} else {
+				// If v of type T is captured by reference,
+				// we introduce function param &v *T
+				// and v remains PPARAMREF with &v heapaddr
+				// (accesses will implicitly deref &v).
+				addr = newname(Lookupf("&%s", v.Sym.Name))
+				addr.Type = Ptrto(v.Type)
+				addr.Class = PPARAM
+				v.Name.Heapaddr = addr
+				fld.Nname = addr
+			}
+
+			fld.Type = fld.Nname.Type
+			fld.Sym = fld.Nname.Sym
+
+			// Declare the new param and append it to input arguments.
+			xfunc.Func.Dcl = list(xfunc.Func.Dcl, fld.Nname)
+
+			*param = fld
+			param = &fld.Down
+		}
+
+		// Recalculate param offsets.
+		if f.Type.Width > 0 {
+			Fatal("transformclosure: width is already calculated")
+		}
+		dowidth(f.Type)
+		xfunc.Type = f.Type // update type of ODCLFUNC
+	} else {
+		// The closure is not called, so it is going to stay as closure.
+		nvar := 0
+
+		var body *NodeList
+		offset := int64(Widthptr)
+		var addr *Node
+		var v *Node
+		var cv *Node
+		for l := func_.Func.Cvars; l != nil; l = l.Next {
+			v = l.N
+			if v.Op == OXXX {
+				continue
+			}
+			nvar++
+
+			// cv refers to the field inside of closure OSTRUCTLIT.
+			cv = Nod(OCLOSUREVAR, nil, nil)
+
+			cv.Type = v.Type
+			if !v.Name.Byval {
+				cv.Type = Ptrto(v.Type)
+			}
+			offset = Rnd(offset, int64(cv.Type.Align))
+			cv.Xoffset = offset
+			offset += cv.Type.Width
+
+			if v.Name.Byval && v.Type.Width <= int64(2*Widthptr) && Thearch.Thechar == '6' {
+				//  If it is a small variable captured by value, downgrade it to PAUTO.
+				// This optimization is currently enabled only for amd64, see:
+				// https://github.com/golang/go/issues/9865
+				v.Class = PAUTO
+
+				v.Ullman = 1
+				xfunc.Func.Dcl = list(xfunc.Func.Dcl, v)
+				body = list(body, Nod(OAS, v, cv))
+			} else {
+				// Declare variable holding addresses taken from closure
+				// and initialize in entry prologue.
+				addr = newname(Lookupf("&%s", v.Sym.Name))
+				addr.Ntype = Nod(OIND, typenod(v.Type), nil)
+				addr.Class = PAUTO
+				addr.Used = true
+				addr.Curfn = xfunc
+				xfunc.Func.Dcl = list(xfunc.Func.Dcl, addr)
+				v.Name.Heapaddr = addr
+				if v.Name.Byval {
+					cv = Nod(OADDR, cv, nil)
+				}
+				body = list(body, Nod(OAS, addr, cv))
+			}
+		}
+
+		typechecklist(body, Etop)
+		walkstmtlist(body)
+		xfunc.Func.Enter = body
+		xfunc.Func.Needctxt = nvar > 0
+	}
+
+	lineno = int32(lno)
+}
+
+func walkclosure(func_ *Node, init **NodeList) *Node {
+	// If no closure vars, don't bother wrapping.
+	if func_.Func.Cvars == nil {
+		return func_.Closure.Nname
+	}
+
+	// Create closure in the form of a composite literal.
+	// supposing the closure captures an int i and a string s
+	// and has one float64 argument and no results,
+	// the generated code looks like:
+	//
+	//	clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
+	//
+	// The use of the struct provides type information to the garbage
+	// collector so that it can walk the closure. We could use (in this case)
+	// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
+	// The information appears in the binary in the form of type descriptors;
+	// the struct is unnamed so that closures in multiple packages with the
+	// same struct type can share the descriptor.
+
+	typ := Nod(OTSTRUCT, nil, nil)
+
+	typ.List = list1(Nod(ODCLFIELD, newname(Lookup(".F")), typenod(Types[TUINTPTR])))
+	var typ1 *Node
+	var v *Node
+	for l := func_.Func.Cvars; l != nil; l = l.Next {
+		v = l.N
+		if v.Op == OXXX {
+			continue
+		}
+		typ1 = typenod(v.Type)
+		if !v.Name.Byval {
+			typ1 = Nod(OIND, typ1, nil)
+		}
+		typ.List = list(typ.List, Nod(ODCLFIELD, newname(v.Sym), typ1))
+	}
+
+	clos := Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
+	clos.Esc = func_.Esc
+	clos.Right.Implicit = true
+	clos.List = concat(list1(Nod(OCFUNC, func_.Closure.Nname, nil)), func_.Func.Enter)
+
+	// Force type conversion from *struct to the func type.
+	clos = Nod(OCONVNOP, clos, nil)
+
+	clos.Type = func_.Type
+
+	typecheck(&clos, Erv)
+
+	// typecheck will insert a PTRLIT node under CONVNOP,
+	// tag it with escape analysis result.
+	clos.Left.Esc = func_.Esc
+
+	// non-escaping temp to use, if any.
+	// orderexpr did not compute the type; fill it in now.
+	if func_.Alloc != nil {
+		func_.Alloc.Type = clos.Left.Left.Type
+		func_.Alloc.Orig.Type = func_.Alloc.Type
+		clos.Left.Right = func_.Alloc
+		func_.Alloc = nil
+	}
+
+	walkexpr(&clos, init)
+
+	return clos
+}
+
+func typecheckpartialcall(fn *Node, sym *Node) {
+	switch fn.Op {
+	case ODOTINTER, ODOTMETH:
+		break
+
+	default:
+		Fatal("invalid typecheckpartialcall")
+	}
+
+	// Create top-level function.
+	fn.Nname = makepartialcall(fn, fn.Type, sym)
+
+	fn.Right = sym
+	fn.Op = OCALLPART
+	fn.Type = fn.Nname.Type
+}
+
+var makepartialcall_gopkg *Pkg
+
+func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
+	var p string
+
+	rcvrtype := fn.Left.Type
+	if exportname(meth.Sym.Name) {
+		p = fmt.Sprintf("(%v).%s-fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), meth.Sym.Name)
+	} else {
+		p = fmt.Sprintf("(%v).(%v)-fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), Sconv(meth.Sym, obj.FmtLeft))
+	}
+	basetype := rcvrtype
+	if Isptr[rcvrtype.Etype] {
+		basetype = basetype.Type
+	}
+	if basetype.Etype != TINTER && basetype.Sym == nil {
+		Fatal("missing base type for %v", rcvrtype)
+	}
+
+	var spkg *Pkg
+	if basetype.Sym != nil {
+		spkg = basetype.Sym.Pkg
+	}
+	if spkg == nil {
+		if makepartialcall_gopkg == nil {
+			makepartialcall_gopkg = mkpkg("go")
+		}
+		spkg = makepartialcall_gopkg
+	}
+
+	sym := Pkglookup(p, spkg)
+
+	if sym.Flags&SymUniq != 0 {
+		return sym.Def
+	}
+	sym.Flags |= SymUniq
+
+	savecurfn := Curfn
+	Curfn = nil
+
+	xtype := Nod(OTFUNC, nil, nil)
+	i := 0
+	var l *NodeList
+	var callargs *NodeList
+	ddd := false
+	xfunc := Nod(ODCLFUNC, nil, nil)
+	Curfn = xfunc
+	var fld *Node
+	var n *Node
+	for t := getinargx(t0).Type; t != nil; t = t.Down {
+		n = newname(Lookupf("a%d", i))
+		i++
+		n.Class = PPARAM
+		xfunc.Func.Dcl = list(xfunc.Func.Dcl, n)
+		callargs = list(callargs, n)
+		fld = Nod(ODCLFIELD, n, typenod(t.Type))
+		if t.Isddd {
+			fld.Isddd = true
+			ddd = true
+		}
+
+		l = list(l, fld)
+	}
+
+	xtype.List = l
+	i = 0
+	l = nil
+	var retargs *NodeList
+	for t := getoutargx(t0).Type; t != nil; t = t.Down {
+		n = newname(Lookupf("r%d", i))
+		i++
+		n.Class = PPARAMOUT
+		xfunc.Func.Dcl = list(xfunc.Func.Dcl, n)
+		retargs = list(retargs, n)
+		l = list(l, Nod(ODCLFIELD, n, typenod(t.Type)))
+	}
+
+	xtype.Rlist = l
+
+	xfunc.Func.Dupok = true
+	xfunc.Nname = newfuncname(sym)
+	xfunc.Nname.Sym.Flags |= SymExported // disable export
+	xfunc.Nname.Ntype = xtype
+	xfunc.Nname.Defn = xfunc
+	declare(xfunc.Nname, PFUNC)
+
+	// Declare and initialize variable holding receiver.
+
+	xfunc.Func.Needctxt = true
+	cv := Nod(OCLOSUREVAR, nil, nil)
+	cv.Xoffset = int64(Widthptr)
+	cv.Type = rcvrtype
+	if int(cv.Type.Align) > Widthptr {
+		cv.Xoffset = int64(cv.Type.Align)
+	}
+	ptr := Nod(ONAME, nil, nil)
+	ptr.Sym = Lookup("rcvr")
+	ptr.Class = PAUTO
+	ptr.Addable = true
+	ptr.Ullman = 1
+	ptr.Used = true
+	ptr.Curfn = xfunc
+	xfunc.Func.Dcl = list(xfunc.Func.Dcl, ptr)
+	var body *NodeList
+	if Isptr[rcvrtype.Etype] || Isinter(rcvrtype) {
+		ptr.Ntype = typenod(rcvrtype)
+		body = list(body, Nod(OAS, ptr, cv))
+	} else {
+		ptr.Ntype = typenod(Ptrto(rcvrtype))
+		body = list(body, Nod(OAS, ptr, Nod(OADDR, cv, nil)))
+	}
+
+	call := Nod(OCALL, Nod(OXDOT, ptr, meth), nil)
+	call.List = callargs
+	call.Isddd = ddd
+	if t0.Outtuple == 0 {
+		body = list(body, call)
+	} else {
+		n := Nod(OAS2, nil, nil)
+		n.List = retargs
+		n.Rlist = list1(call)
+		body = list(body, n)
+		n = Nod(ORETURN, nil, nil)
+		body = list(body, n)
+	}
+
+	xfunc.Nbody = body
+
+	typecheck(&xfunc, Etop)
+	sym.Def = xfunc
+	xtop = list(xtop, xfunc)
+	Curfn = savecurfn
+
+	return xfunc
+}
+
+func walkpartialcall(n *Node, init **NodeList) *Node {
+	// Create closure in the form of a composite literal.
+	// For x.M with receiver (x) type T, the generated code looks like:
+	//
+	//	clos = &struct{F uintptr; R T}{M.T·f, x}
+	//
+	// Like walkclosure above.
+
+	if Isinter(n.Left.Type) {
+		// Trigger panic for method on nil interface now.
+		// Otherwise it happens in the wrapper and is confusing.
+		n.Left = cheapexpr(n.Left, init)
+
+		checknil(n.Left, init)
+	}
+
+	typ := Nod(OTSTRUCT, nil, nil)
+	typ.List = list1(Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR])))
+	typ.List = list(typ.List, Nod(ODCLFIELD, newname(Lookup("R")), typenod(n.Left.Type)))
+
+	clos := Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
+	clos.Esc = n.Esc
+	clos.Right.Implicit = true
+	clos.List = list1(Nod(OCFUNC, n.Nname.Nname, nil))
+	clos.List = list(clos.List, n.Left)
+
+	// Force type conversion from *struct to the func type.
+	clos = Nod(OCONVNOP, clos, nil)
+
+	clos.Type = n.Type
+
+	typecheck(&clos, Erv)
+
+	// typecheck will insert a PTRLIT node under CONVNOP,
+	// tag it with escape analysis result.
+	clos.Left.Esc = n.Esc
+
+	// non-escaping temp to use, if any.
+	// orderexpr did not compute the type; fill it in now.
+	if n.Alloc != nil {
+		n.Alloc.Type = clos.Left.Left.Type
+		n.Alloc.Orig.Type = n.Alloc.Type
+		clos.Left.Right = n.Alloc
+		n.Alloc = nil
+	}
+
+	walkexpr(&clos, init)
+
+	return clos
+}
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
new file mode 100644
index 0000000..b3605ab
--- /dev/null
+++ b/src/cmd/compile/internal/gc/const.go
@@ -0,0 +1,1717 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/compile/internal/big"
+	"cmd/internal/obj"
+	"strings"
+)
+
+// Int returns n as an int.
+// n must be an integer constant.
+func (n *Node) Int() int64 {
+	if !Isconst(n, CTINT) {
+		Fatal("Int(%v)", n)
+	}
+	return Mpgetfix(n.Val.U.(*Mpint))
+}
+
+// SetInt sets n's value to i.
+// n must be an integer constant.
+func (n *Node) SetInt(i int64) {
+	if !Isconst(n, CTINT) {
+		Fatal("SetInt(%v)", n)
+	}
+	Mpmovecfix(n.Val.U.(*Mpint), i)
+}
+
+// SetBigInt sets n's value to x.
+// n must be an integer constant.
+func (n *Node) SetBigInt(x *big.Int) {
+	if !Isconst(n, CTINT) {
+		Fatal("SetBigInt(%v)", n)
+	}
+	n.Val.U.(*Mpint).Val.Set(x)
+}
+
+// Bool returns n as an bool.
+// n must be an boolean constant.
+func (n *Node) Bool() bool {
+	if !Isconst(n, CTBOOL) {
+		Fatal("Int(%v)", n)
+	}
+	return n.Val.U.(bool)
+}
+
+/*
+ * truncate float literal fv to 32-bit or 64-bit precision
+ * according to type; return truncated value.
+ */
+func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
+	if t == nil {
+		return oldv
+	}
+
+	var v Val
+	v.Ctype = CTFLT
+	v.U = oldv
+	overflow(v, t)
+
+	fv := newMpflt()
+	mpmovefltflt(fv, oldv)
+
+	// convert large precision literal floating
+	// into limited precision (float64 or float32)
+	switch t.Etype {
+	case TFLOAT64:
+		d := mpgetflt(fv)
+		Mpmovecflt(fv, d)
+
+	case TFLOAT32:
+		d := mpgetflt32(fv)
+		Mpmovecflt(fv, d)
+	}
+
+	return fv
+}
+
+/*
+ * convert n, if literal, to type t.
+ * implicit conversion.
+ */
+func Convlit(np **Node, t *Type) {
+	convlit1(np, t, false)
+}
+
+/*
+ * convert n, if literal, to type t.
+ * return a new node if necessary
+ * (if n is a named constant, can't edit n->type directly).
+ */
+func convlit1(np **Node, t *Type, explicit bool) {
+	n := *np
+	if n == nil || t == nil || n.Type == nil || isideal(t) || n.Type == t {
+		return
+	}
+	if !explicit && !isideal(n.Type) {
+		return
+	}
+
+	if n.Op == OLITERAL {
+		nn := Nod(OXXX, nil, nil)
+		*nn = *n
+		n = nn
+		*np = n
+	}
+
+	switch n.Op {
+	default:
+		if n.Type == idealbool {
+			if t.Etype == TBOOL {
+				n.Type = t
+			} else {
+				n.Type = Types[TBOOL]
+			}
+		}
+
+		if n.Type.Etype == TIDEAL {
+			Convlit(&n.Left, t)
+			Convlit(&n.Right, t)
+			n.Type = t
+		}
+
+		return
+
+		// target is invalid type for a constant?  leave alone.
+	case OLITERAL:
+		if !okforconst[t.Etype] && n.Type.Etype != TNIL {
+			defaultlit(&n, nil)
+			*np = n
+			return
+		}
+
+	case OLSH, ORSH:
+		convlit1(&n.Left, t, explicit && isideal(n.Left.Type))
+		t = n.Left.Type
+		if t != nil && t.Etype == TIDEAL && n.Val.Ctype != CTINT {
+			n.Val = toint(n.Val)
+		}
+		if t != nil && !Isint[t.Etype] {
+			Yyerror("invalid operation: %v (shift of type %v)", n, t)
+			t = nil
+		}
+
+		n.Type = t
+		return
+
+	case OCOMPLEX:
+		if n.Type.Etype == TIDEAL {
+			switch t.Etype {
+			// If trying to convert to non-complex type,
+			// leave as complex128 and let typechecker complain.
+			default:
+				t = Types[TCOMPLEX128]
+				fallthrough
+
+				//fallthrough
+			case TCOMPLEX128:
+				n.Type = t
+
+				Convlit(&n.Left, Types[TFLOAT64])
+				Convlit(&n.Right, Types[TFLOAT64])
+
+			case TCOMPLEX64:
+				n.Type = t
+				Convlit(&n.Left, Types[TFLOAT32])
+				Convlit(&n.Right, Types[TFLOAT32])
+			}
+		}
+
+		return
+	}
+
+	// avoided repeated calculations, errors
+	if Eqtype(n.Type, t) {
+		return
+	}
+
+	ct := consttype(n)
+	var et int
+	if ct < 0 {
+		goto bad
+	}
+
+	et = int(t.Etype)
+	if et == TINTER {
+		if ct == CTNIL && n.Type == Types[TNIL] {
+			n.Type = t
+			return
+		}
+
+		defaultlit(np, nil)
+		return
+	}
+
+	switch ct {
+	default:
+		goto bad
+
+	case CTNIL:
+		switch et {
+		default:
+			n.Type = nil
+			goto bad
+
+			// let normal conversion code handle it
+		case TSTRING:
+			return
+
+		case TARRAY:
+			if !Isslice(t) {
+				goto bad
+			}
+
+		case TPTR32,
+			TPTR64,
+			TINTER,
+			TMAP,
+			TCHAN,
+			TFUNC,
+			TUNSAFEPTR:
+			break
+
+			// A nil literal may be converted to uintptr
+		// if it is an unsafe.Pointer
+		case TUINTPTR:
+			if n.Type.Etype == TUNSAFEPTR {
+				n.Val.U = new(Mpint)
+				Mpmovecfix(n.Val.U.(*Mpint), 0)
+				n.Val.Ctype = CTINT
+			} else {
+				goto bad
+			}
+		}
+
+	case CTSTR, CTBOOL:
+		if et != int(n.Type.Etype) {
+			goto bad
+		}
+
+	case CTINT, CTRUNE, CTFLT, CTCPLX:
+		if n.Type.Etype == TUNSAFEPTR && t.Etype != TUINTPTR {
+			goto bad
+		}
+		ct := int(n.Val.Ctype)
+		if Isint[et] {
+			switch ct {
+			default:
+				goto bad
+
+			case CTCPLX, CTFLT, CTRUNE:
+				n.Val = toint(n.Val)
+				fallthrough
+
+				// flowthrough
+			case CTINT:
+				overflow(n.Val, t)
+			}
+		} else if Isfloat[et] {
+			switch ct {
+			default:
+				goto bad
+
+			case CTCPLX, CTINT, CTRUNE:
+				n.Val = toflt(n.Val)
+				fallthrough
+
+				// flowthrough
+			case CTFLT:
+				n.Val.U = truncfltlit(n.Val.U.(*Mpflt), t)
+			}
+		} else if Iscomplex[et] {
+			switch ct {
+			default:
+				goto bad
+
+			case CTFLT, CTINT, CTRUNE:
+				n.Val = tocplx(n.Val)
+
+			case CTCPLX:
+				overflow(n.Val, t)
+			}
+		} else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit {
+			n.Val = tostr(n.Val)
+		} else {
+			goto bad
+		}
+	}
+
+	n.Type = t
+	return
+
+bad:
+	if n.Diag == 0 {
+		if t.Broke == 0 {
+			Yyerror("cannot convert %v to type %v", n, t)
+		}
+		n.Diag = 1
+	}
+
+	if isideal(n.Type) {
+		defaultlit(&n, nil)
+		*np = n
+	}
+}
+
+func copyval(v Val) Val {
+	switch v.Ctype {
+	case CTINT, CTRUNE:
+		i := new(Mpint)
+		mpmovefixfix(i, v.U.(*Mpint))
+		v.U = i
+
+	case CTFLT:
+		f := newMpflt()
+		mpmovefltflt(f, v.U.(*Mpflt))
+		v.U = f
+
+	case CTCPLX:
+		c := new(Mpcplx)
+		mpmovefltflt(&c.Real, &v.U.(*Mpcplx).Real)
+		mpmovefltflt(&c.Imag, &v.U.(*Mpcplx).Imag)
+		v.U = c
+	}
+
+	return v
+}
+
+func tocplx(v Val) Val {
+	switch v.Ctype {
+	case CTINT, CTRUNE:
+		c := new(Mpcplx)
+		Mpmovefixflt(&c.Real, v.U.(*Mpint))
+		Mpmovecflt(&c.Imag, 0.0)
+		v.Ctype = CTCPLX
+		v.U = c
+
+	case CTFLT:
+		c := new(Mpcplx)
+		mpmovefltflt(&c.Real, v.U.(*Mpflt))
+		Mpmovecflt(&c.Imag, 0.0)
+		v.Ctype = CTCPLX
+		v.U = c
+	}
+
+	return v
+}
+
+func toflt(v Val) Val {
+	switch v.Ctype {
+	case CTINT, CTRUNE:
+		f := newMpflt()
+		Mpmovefixflt(f, v.U.(*Mpint))
+		v.Ctype = CTFLT
+		v.U = f
+
+	case CTCPLX:
+		f := newMpflt()
+		mpmovefltflt(f, &v.U.(*Mpcplx).Real)
+		if mpcmpfltc(&v.U.(*Mpcplx).Imag, 0) != 0 {
+			Yyerror("constant %v%vi truncated to real", Fconv(&v.U.(*Mpcplx).Real, obj.FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, obj.FmtSharp|obj.FmtSign))
+		}
+		v.Ctype = CTFLT
+		v.U = f
+	}
+
+	return v
+}
+
+func toint(v Val) Val {
+	switch v.Ctype {
+	case CTRUNE:
+		v.Ctype = CTINT
+
+	case CTFLT:
+		i := new(Mpint)
+		if mpmovefltfix(i, v.U.(*Mpflt)) < 0 {
+			Yyerror("constant %v truncated to integer", Fconv(v.U.(*Mpflt), obj.FmtSharp))
+		}
+		v.Ctype = CTINT
+		v.U = i
+
+	case CTCPLX:
+		i := new(Mpint)
+		if mpmovefltfix(i, &v.U.(*Mpcplx).Real) < 0 {
+			Yyerror("constant %v%vi truncated to integer", Fconv(&v.U.(*Mpcplx).Real, obj.FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, obj.FmtSharp|obj.FmtSign))
+		}
+		if mpcmpfltc(&v.U.(*Mpcplx).Imag, 0) != 0 {
+			Yyerror("constant %v%vi truncated to real", Fconv(&v.U.(*Mpcplx).Real, obj.FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, obj.FmtSharp|obj.FmtSign))
+		}
+		v.Ctype = CTINT
+		v.U = i
+	}
+
+	return v
+}
+
+func doesoverflow(v Val, t *Type) bool {
+	switch v.Ctype {
+	case CTINT, CTRUNE:
+		if !Isint[t.Etype] {
+			Fatal("overflow: %v integer constant", t)
+		}
+		if Mpcmpfixfix(v.U.(*Mpint), Minintval[t.Etype]) < 0 || Mpcmpfixfix(v.U.(*Mpint), Maxintval[t.Etype]) > 0 {
+			return true
+		}
+
+	case CTFLT:
+		if !Isfloat[t.Etype] {
+			Fatal("overflow: %v floating-point constant", t)
+		}
+		if mpcmpfltflt(v.U.(*Mpflt), minfltval[t.Etype]) <= 0 || mpcmpfltflt(v.U.(*Mpflt), maxfltval[t.Etype]) >= 0 {
+			return true
+		}
+
+	case CTCPLX:
+		if !Iscomplex[t.Etype] {
+			Fatal("overflow: %v complex constant", t)
+		}
+		if mpcmpfltflt(&v.U.(*Mpcplx).Real, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.(*Mpcplx).Real, maxfltval[t.Etype]) >= 0 || mpcmpfltflt(&v.U.(*Mpcplx).Imag, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.(*Mpcplx).Imag, maxfltval[t.Etype]) >= 0 {
+			return true
+		}
+	}
+
+	return false
+}
+
+func overflow(v Val, t *Type) {
+	// v has already been converted
+	// to appropriate form for t.
+	if t == nil || t.Etype == TIDEAL {
+		return
+	}
+
+	// Only uintptrs may be converted to unsafe.Pointer, which cannot overflow.
+	if t.Etype == TUNSAFEPTR {
+		return
+	}
+
+	if !doesoverflow(v, t) {
+		return
+	}
+
+	switch v.Ctype {
+	case CTINT, CTRUNE:
+		Yyerror("constant %v overflows %v", v.U.(*Mpint), t)
+
+	case CTFLT:
+		Yyerror("constant %v overflows %v", Fconv(v.U.(*Mpflt), obj.FmtSharp), t)
+
+	case CTCPLX:
+		Yyerror("constant %v overflows %v", Fconv(v.U.(*Mpflt), obj.FmtSharp), t)
+	}
+}
+
+func tostr(v Val) Val {
+	switch v.Ctype {
+	case CTINT, CTRUNE:
+		if Mpcmpfixfix(v.U.(*Mpint), Minintval[TINT]) < 0 || Mpcmpfixfix(v.U.(*Mpint), Maxintval[TINT]) > 0 {
+			Yyerror("overflow in int -> string")
+		}
+		r := uint(Mpgetfix(v.U.(*Mpint)))
+		v = Val{}
+		v.Ctype = CTSTR
+		v.U = string(r)
+
+	case CTFLT:
+		Yyerror("no float -> string")
+		fallthrough
+
+	case CTNIL:
+		v = Val{}
+		v.Ctype = CTSTR
+		v.U = ""
+	}
+
+	return v
+}
+
+func consttype(n *Node) int {
+	if n == nil || n.Op != OLITERAL {
+		return -1
+	}
+	return int(n.Val.Ctype)
+}
+
+func Isconst(n *Node, ct int) bool {
+	t := consttype(n)
+
+	// If the caller is asking for CTINT, allow CTRUNE too.
+	// Makes life easier for back ends.
+	return t == ct || (ct == CTINT && t == CTRUNE)
+}
+
+func saveorig(n *Node) *Node {
+	if n == n.Orig {
+		// duplicate node for n->orig.
+		n1 := Nod(OLITERAL, nil, nil)
+
+		n.Orig = n1
+		*n1 = *n
+	}
+
+	return n.Orig
+}
+
+/*
+ * if n is constant, rewrite as OLITERAL node.
+ */
+func evconst(n *Node) {
+	// pick off just the opcodes that can be
+	// constant evaluated.
+	switch n.Op {
+	default:
+		return
+
+	case OADD,
+		OAND,
+		OANDAND,
+		OANDNOT,
+		OARRAYBYTESTR,
+		OCOM,
+		ODIV,
+		OEQ,
+		OGE,
+		OGT,
+		OLE,
+		OLSH,
+		OLT,
+		OMINUS,
+		OMOD,
+		OMUL,
+		ONE,
+		ONOT,
+		OOR,
+		OOROR,
+		OPLUS,
+		ORSH,
+		OSUB,
+		OXOR:
+		break
+
+	case OCONV:
+		if n.Type == nil {
+			return
+		}
+		if !okforconst[n.Type.Etype] && n.Type.Etype != TNIL {
+			return
+		}
+
+		// merge adjacent constants in the argument list.
+	case OADDSTR:
+		var nr *Node
+		var nl *Node
+		var l2 *NodeList
+		for l1 := n.List; l1 != nil; l1 = l1.Next {
+			if Isconst(l1.N, CTSTR) && l1.Next != nil && Isconst(l1.Next.N, CTSTR) {
+				// merge from l1 up to but not including l2
+				var strs []string
+				l2 = l1
+				for l2 != nil && Isconst(l2.N, CTSTR) {
+					nr = l2.N
+					strs = append(strs, nr.Val.U.(string))
+					l2 = l2.Next
+				}
+
+				nl = Nod(OXXX, nil, nil)
+				*nl = *l1.N
+				nl.Orig = nl
+				nl.Val.Ctype = CTSTR
+				nl.Val.U = strings.Join(strs, "")
+				l1.N = nl
+				l1.Next = l2
+			}
+		}
+
+		// fix list end pointer.
+		for l2 := n.List; l2 != nil; l2 = l2.Next {
+			n.List.End = l2
+		}
+
+		// collapse single-constant list to single constant.
+		if count(n.List) == 1 && Isconst(n.List.N, CTSTR) {
+			n.Op = OLITERAL
+			n.Val = n.List.N.Val
+		}
+
+		return
+	}
+
+	nl := n.Left
+	if nl == nil || nl.Type == nil {
+		return
+	}
+	if consttype(nl) < 0 {
+		return
+	}
+	wl := int(nl.Type.Etype)
+	if Isint[wl] || Isfloat[wl] || Iscomplex[wl] {
+		wl = TIDEAL
+	}
+
+	nr := n.Right
+	var rv Val
+	var lno int
+	var wr int
+	var v Val
+	var norig *Node
+	if nr == nil {
+		// copy numeric value to avoid modifying
+		// nl, in case someone still refers to it (e.g. iota).
+		v = nl.Val
+
+		if wl == TIDEAL {
+			v = copyval(v)
+		}
+
+		switch uint32(n.Op)<<16 | uint32(v.Ctype) {
+		default:
+			if n.Diag == 0 {
+				Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), nl.Type)
+				n.Diag = 1
+			}
+
+			return
+
+		case OCONV<<16 | CTNIL,
+			OARRAYBYTESTR<<16 | CTNIL:
+			if n.Type.Etype == TSTRING {
+				v = tostr(v)
+				nl.Type = n.Type
+				break
+			}
+			fallthrough
+
+			// fall through
+		case OCONV<<16 | CTINT,
+			OCONV<<16 | CTRUNE,
+			OCONV<<16 | CTFLT,
+			OCONV<<16 | CTSTR:
+			convlit1(&nl, n.Type, true)
+
+			v = nl.Val
+
+		case OPLUS<<16 | CTINT,
+			OPLUS<<16 | CTRUNE:
+			break
+
+		case OMINUS<<16 | CTINT,
+			OMINUS<<16 | CTRUNE:
+			mpnegfix(v.U.(*Mpint))
+
+		case OCOM<<16 | CTINT,
+			OCOM<<16 | CTRUNE:
+			et := Txxx
+			if nl.Type != nil {
+				et = int(nl.Type.Etype)
+			}
+
+			// calculate the mask in b
+			// result will be (a ^ mask)
+			var b Mpint
+			switch et {
+			// signed guys change sign
+			default:
+				Mpmovecfix(&b, -1)
+
+				// unsigned guys invert their bits
+			case TUINT8,
+				TUINT16,
+				TUINT32,
+				TUINT64,
+				TUINT,
+				TUINTPTR:
+				mpmovefixfix(&b, Maxintval[et])
+			}
+
+			mpxorfixfix(v.U.(*Mpint), &b)
+
+		case OPLUS<<16 | CTFLT:
+			break
+
+		case OMINUS<<16 | CTFLT:
+			mpnegflt(v.U.(*Mpflt))
+
+		case OPLUS<<16 | CTCPLX:
+			break
+
+		case OMINUS<<16 | CTCPLX:
+			mpnegflt(&v.U.(*Mpcplx).Real)
+			mpnegflt(&v.U.(*Mpcplx).Imag)
+
+		case ONOT<<16 | CTBOOL:
+			if !v.U.(bool) {
+				goto settrue
+			}
+			goto setfalse
+		}
+		goto ret
+	}
+	if nr.Type == nil {
+		return
+	}
+	if consttype(nr) < 0 {
+		return
+	}
+	wr = int(nr.Type.Etype)
+	if Isint[wr] || Isfloat[wr] || Iscomplex[wr] {
+		wr = TIDEAL
+	}
+
+	// check for compatible general types (numeric, string, etc)
+	if wl != wr {
+		goto illegal
+	}
+
+	// check for compatible types.
+	switch n.Op {
+	// ideal const mixes with anything but otherwise must match.
+	default:
+		if nl.Type.Etype != TIDEAL {
+			defaultlit(&nr, nl.Type)
+			n.Right = nr
+		}
+
+		if nr.Type.Etype != TIDEAL {
+			defaultlit(&nl, nr.Type)
+			n.Left = nl
+		}
+
+		if nl.Type.Etype != nr.Type.Etype {
+			goto illegal
+		}
+
+		// right must be unsigned.
+	// left can be ideal.
+	case OLSH, ORSH:
+		defaultlit(&nr, Types[TUINT])
+
+		n.Right = nr
+		if nr.Type != nil && (Issigned[nr.Type.Etype] || !Isint[nr.Type.Etype]) {
+			goto illegal
+		}
+		if nl.Val.Ctype != CTRUNE {
+			nl.Val = toint(nl.Val)
+		}
+		nr.Val = toint(nr.Val)
+	}
+
+	// copy numeric value to avoid modifying
+	// n->left, in case someone still refers to it (e.g. iota).
+	v = nl.Val
+
+	if wl == TIDEAL {
+		v = copyval(v)
+	}
+
+	rv = nr.Val
+
+	// convert to common ideal
+	if v.Ctype == CTCPLX || rv.Ctype == CTCPLX {
+		v = tocplx(v)
+		rv = tocplx(rv)
+	}
+
+	if v.Ctype == CTFLT || rv.Ctype == CTFLT {
+		v = toflt(v)
+		rv = toflt(rv)
+	}
+
+	// Rune and int turns into rune.
+	if v.Ctype == CTRUNE && rv.Ctype == CTINT {
+		rv.Ctype = CTRUNE
+	}
+	if v.Ctype == CTINT && rv.Ctype == CTRUNE {
+		if n.Op == OLSH || n.Op == ORSH {
+			rv.Ctype = CTINT
+		} else {
+			v.Ctype = CTRUNE
+		}
+	}
+
+	if v.Ctype != rv.Ctype {
+		// Use of undefined name as constant?
+		if (v.Ctype == 0 || rv.Ctype == 0) && nerrors > 0 {
+			return
+		}
+		Fatal("constant type mismatch %v(%d) %v(%d)", nl.Type, v.Ctype, nr.Type, rv.Ctype)
+	}
+
+	// run op
+	switch uint32(n.Op)<<16 | uint32(v.Ctype) {
+	default:
+		goto illegal
+
+	case OADD<<16 | CTINT,
+		OADD<<16 | CTRUNE:
+		mpaddfixfix(v.U.(*Mpint), rv.U.(*Mpint), 0)
+
+	case OSUB<<16 | CTINT,
+		OSUB<<16 | CTRUNE:
+		mpsubfixfix(v.U.(*Mpint), rv.U.(*Mpint))
+
+	case OMUL<<16 | CTINT,
+		OMUL<<16 | CTRUNE:
+		mpmulfixfix(v.U.(*Mpint), rv.U.(*Mpint))
+
+	case ODIV<<16 | CTINT,
+		ODIV<<16 | CTRUNE:
+		if mpcmpfixc(rv.U.(*Mpint), 0) == 0 {
+			Yyerror("division by zero")
+			mpsetovf(v.U.(*Mpint))
+			break
+		}
+
+		mpdivfixfix(v.U.(*Mpint), rv.U.(*Mpint))
+
+	case OMOD<<16 | CTINT,
+		OMOD<<16 | CTRUNE:
+		if mpcmpfixc(rv.U.(*Mpint), 0) == 0 {
+			Yyerror("division by zero")
+			mpsetovf(v.U.(*Mpint))
+			break
+		}
+
+		mpmodfixfix(v.U.(*Mpint), rv.U.(*Mpint))
+
+	case OLSH<<16 | CTINT,
+		OLSH<<16 | CTRUNE:
+		mplshfixfix(v.U.(*Mpint), rv.U.(*Mpint))
+
+	case ORSH<<16 | CTINT,
+		ORSH<<16 | CTRUNE:
+		mprshfixfix(v.U.(*Mpint), rv.U.(*Mpint))
+
+	case OOR<<16 | CTINT,
+		OOR<<16 | CTRUNE:
+		mporfixfix(v.U.(*Mpint), rv.U.(*Mpint))
+
+	case OAND<<16 | CTINT,
+		OAND<<16 | CTRUNE:
+		mpandfixfix(v.U.(*Mpint), rv.U.(*Mpint))
+
+	case OANDNOT<<16 | CTINT,
+		OANDNOT<<16 | CTRUNE:
+		mpandnotfixfix(v.U.(*Mpint), rv.U.(*Mpint))
+
+	case OXOR<<16 | CTINT,
+		OXOR<<16 | CTRUNE:
+		mpxorfixfix(v.U.(*Mpint), rv.U.(*Mpint))
+
+	case OADD<<16 | CTFLT:
+		mpaddfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
+
+	case OSUB<<16 | CTFLT:
+		mpsubfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
+
+	case OMUL<<16 | CTFLT:
+		mpmulfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
+
+	case ODIV<<16 | CTFLT:
+		if mpcmpfltc(rv.U.(*Mpflt), 0) == 0 {
+			Yyerror("division by zero")
+			Mpmovecflt(v.U.(*Mpflt), 1.0)
+			break
+		}
+
+		mpdivfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
+
+		// The default case above would print 'ideal % ideal',
+	// which is not quite an ideal error.
+	case OMOD<<16 | CTFLT:
+		if n.Diag == 0 {
+			Yyerror("illegal constant expression: floating-point %% operation")
+			n.Diag = 1
+		}
+
+		return
+
+	case OADD<<16 | CTCPLX:
+		mpaddfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real)
+		mpaddfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag)
+
+	case OSUB<<16 | CTCPLX:
+		mpsubfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real)
+		mpsubfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag)
+
+	case OMUL<<16 | CTCPLX:
+		cmplxmpy(v.U.(*Mpcplx), rv.U.(*Mpcplx))
+
+	case ODIV<<16 | CTCPLX:
+		if mpcmpfltc(&rv.U.(*Mpcplx).Real, 0) == 0 && mpcmpfltc(&rv.U.(*Mpcplx).Imag, 0) == 0 {
+			Yyerror("complex division by zero")
+			Mpmovecflt(&rv.U.(*Mpcplx).Real, 1.0)
+			Mpmovecflt(&rv.U.(*Mpcplx).Imag, 0.0)
+			break
+		}
+
+		cmplxdiv(v.U.(*Mpcplx), rv.U.(*Mpcplx))
+
+	case OEQ<<16 | CTNIL:
+		goto settrue
+
+	case ONE<<16 | CTNIL:
+		goto setfalse
+
+	case OEQ<<16 | CTINT,
+		OEQ<<16 | CTRUNE:
+		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) == 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case ONE<<16 | CTINT,
+		ONE<<16 | CTRUNE:
+		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) != 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OLT<<16 | CTINT,
+		OLT<<16 | CTRUNE:
+		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) < 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OLE<<16 | CTINT,
+		OLE<<16 | CTRUNE:
+		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) <= 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OGE<<16 | CTINT,
+		OGE<<16 | CTRUNE:
+		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) >= 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OGT<<16 | CTINT,
+		OGT<<16 | CTRUNE:
+		if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) > 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OEQ<<16 | CTFLT:
+		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) == 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case ONE<<16 | CTFLT:
+		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) != 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OLT<<16 | CTFLT:
+		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) < 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OLE<<16 | CTFLT:
+		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) <= 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OGE<<16 | CTFLT:
+		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) >= 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OGT<<16 | CTFLT:
+		if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) > 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OEQ<<16 | CTCPLX:
+		if mpcmpfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real) == 0 && mpcmpfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag) == 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case ONE<<16 | CTCPLX:
+		if mpcmpfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real) != 0 || mpcmpfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag) != 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OEQ<<16 | CTSTR:
+		if cmpslit(nl, nr) == 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case ONE<<16 | CTSTR:
+		if cmpslit(nl, nr) != 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OLT<<16 | CTSTR:
+		if cmpslit(nl, nr) < 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OLE<<16 | CTSTR:
+		if cmpslit(nl, nr) <= 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OGE<<16 | CTSTR:
+		if cmpslit(nl, nr) >= 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OGT<<16 | CTSTR:
+		if cmpslit(nl, nr) > 0 {
+			goto settrue
+		}
+		goto setfalse
+
+	case OOROR<<16 | CTBOOL:
+		if v.U.(bool) || rv.U.(bool) {
+			goto settrue
+		}
+		goto setfalse
+
+	case OANDAND<<16 | CTBOOL:
+		if v.U.(bool) && rv.U.(bool) {
+			goto settrue
+		}
+		goto setfalse
+
+	case OEQ<<16 | CTBOOL:
+		if v.U.(bool) == rv.U.(bool) {
+			goto settrue
+		}
+		goto setfalse
+
+	case ONE<<16 | CTBOOL:
+		if v.U.(bool) != rv.U.(bool) {
+			goto settrue
+		}
+		goto setfalse
+	}
+
+	goto ret
+
+ret:
+	norig = saveorig(n)
+	*n = *nl
+
+	// restore value of n->orig.
+	n.Orig = norig
+
+	n.Val = v
+
+	// check range.
+	lno = int(setlineno(n))
+
+	overflow(v, n.Type)
+	lineno = int32(lno)
+
+	// truncate precision for non-ideal float.
+	if v.Ctype == CTFLT && n.Type.Etype != TIDEAL {
+		n.Val.U = truncfltlit(v.U.(*Mpflt), n.Type)
+	}
+	return
+
+settrue:
+	norig = saveorig(n)
+	*n = *Nodbool(true)
+	n.Orig = norig
+	return
+
+setfalse:
+	norig = saveorig(n)
+	*n = *Nodbool(false)
+	n.Orig = norig
+	return
+
+illegal:
+	if n.Diag == 0 {
+		Yyerror("illegal constant expression: %v %v %v", nl.Type, Oconv(int(n.Op), 0), nr.Type)
+		n.Diag = 1
+	}
+
+	return
+}
+
+func nodlit(v Val) *Node {
+	n := Nod(OLITERAL, nil, nil)
+	n.Val = v
+	switch v.Ctype {
+	default:
+		Fatal("nodlit ctype %d", v.Ctype)
+
+	case CTSTR:
+		n.Type = idealstring
+
+	case CTBOOL:
+		n.Type = idealbool
+
+	case CTINT, CTRUNE, CTFLT, CTCPLX:
+		n.Type = Types[TIDEAL]
+
+	case CTNIL:
+		n.Type = Types[TNIL]
+	}
+
+	return n
+}
+
+func nodcplxlit(r Val, i Val) *Node {
+	r = toflt(r)
+	i = toflt(i)
+
+	c := new(Mpcplx)
+	n := Nod(OLITERAL, nil, nil)
+	n.Type = Types[TIDEAL]
+	n.Val.U = c
+	n.Val.Ctype = CTCPLX
+
+	if r.Ctype != CTFLT || i.Ctype != CTFLT {
+		Fatal("nodcplxlit ctype %d/%d", r.Ctype, i.Ctype)
+	}
+
+	mpmovefltflt(&c.Real, r.U.(*Mpflt))
+	mpmovefltflt(&c.Imag, i.U.(*Mpflt))
+	return n
+}
+
+// idealkind returns a constant kind like consttype
+// but for an arbitrary "ideal" (untyped constant) expression.
+func idealkind(n *Node) int {
+	if n == nil || !isideal(n.Type) {
+		return CTxxx
+	}
+
+	switch n.Op {
+	default:
+		return CTxxx
+
+	case OLITERAL:
+		return int(n.Val.Ctype)
+
+		// numeric kinds.
+	case OADD,
+		OAND,
+		OANDNOT,
+		OCOM,
+		ODIV,
+		OMINUS,
+		OMOD,
+		OMUL,
+		OSUB,
+		OXOR,
+		OOR,
+		OPLUS:
+		k1 := idealkind(n.Left)
+
+		k2 := idealkind(n.Right)
+		if k1 > k2 {
+			return k1
+		} else {
+			return k2
+		}
+
+	case OREAL, OIMAG:
+		return CTFLT
+
+	case OCOMPLEX:
+		return CTCPLX
+
+	case OADDSTR:
+		return CTSTR
+
+	case OANDAND,
+		OEQ,
+		OGE,
+		OGT,
+		OLE,
+		OLT,
+		ONE,
+		ONOT,
+		OOROR,
+		OCMPSTR,
+		OCMPIFACE:
+		return CTBOOL
+
+		// shifts (beware!).
+	case OLSH, ORSH:
+		return idealkind(n.Left)
+	}
+}
+
+func defaultlit(np **Node, t *Type) {
+	n := *np
+	if n == nil || !isideal(n.Type) {
+		return
+	}
+
+	if n.Op == OLITERAL {
+		nn := Nod(OXXX, nil, nil)
+		*nn = *n
+		n = nn
+		*np = n
+	}
+
+	lno := int(setlineno(n))
+	ctype := idealkind(n)
+	var t1 *Type
+	switch ctype {
+	default:
+		if t != nil {
+			Convlit(np, t)
+			return
+		}
+
+		if n.Val.Ctype == CTNIL {
+			lineno = int32(lno)
+			if n.Diag == 0 {
+				Yyerror("use of untyped nil")
+				n.Diag = 1
+			}
+
+			n.Type = nil
+			break
+		}
+
+		if n.Val.Ctype == CTSTR {
+			t1 := Types[TSTRING]
+			Convlit(np, t1)
+			break
+		}
+
+		Yyerror("defaultlit: unknown literal: %v", n)
+
+	case CTxxx:
+		Fatal("defaultlit: idealkind is CTxxx: %v", Nconv(n, obj.FmtSign))
+
+	case CTBOOL:
+		t1 := Types[TBOOL]
+		if t != nil && t.Etype == TBOOL {
+			t1 = t
+		}
+		Convlit(np, t1)
+
+	case CTINT:
+		t1 = Types[TINT]
+		goto num
+
+	case CTRUNE:
+		t1 = runetype
+		goto num
+
+	case CTFLT:
+		t1 = Types[TFLOAT64]
+		goto num
+
+	case CTCPLX:
+		t1 = Types[TCOMPLEX128]
+		goto num
+	}
+
+	lineno = int32(lno)
+	return
+
+num:
+	if t != nil {
+		if Isint[t.Etype] {
+			t1 = t
+			n.Val = toint(n.Val)
+		} else if Isfloat[t.Etype] {
+			t1 = t
+			n.Val = toflt(n.Val)
+		} else if Iscomplex[t.Etype] {
+			t1 = t
+			n.Val = tocplx(n.Val)
+		}
+	}
+
+	overflow(n.Val, t1)
+	Convlit(np, t1)
+	lineno = int32(lno)
+	return
+}
+
+/*
+ * defaultlit on both nodes simultaneously;
+ * if they're both ideal going in they better
+ * get the same type going out.
+ * force means must assign concrete (non-ideal) type.
+ */
+func defaultlit2(lp **Node, rp **Node, force int) {
+	l := *lp
+	r := *rp
+	if l.Type == nil || r.Type == nil {
+		return
+	}
+	if !isideal(l.Type) {
+		Convlit(rp, l.Type)
+		return
+	}
+
+	if !isideal(r.Type) {
+		Convlit(lp, r.Type)
+		return
+	}
+
+	if force == 0 {
+		return
+	}
+	if l.Type.Etype == TBOOL {
+		Convlit(lp, Types[TBOOL])
+		Convlit(rp, Types[TBOOL])
+	}
+
+	lkind := idealkind(l)
+	rkind := idealkind(r)
+	if lkind == CTCPLX || rkind == CTCPLX {
+		Convlit(lp, Types[TCOMPLEX128])
+		Convlit(rp, Types[TCOMPLEX128])
+		return
+	}
+
+	if lkind == CTFLT || rkind == CTFLT {
+		Convlit(lp, Types[TFLOAT64])
+		Convlit(rp, Types[TFLOAT64])
+		return
+	}
+
+	if lkind == CTRUNE || rkind == CTRUNE {
+		Convlit(lp, runetype)
+		Convlit(rp, runetype)
+		return
+	}
+
+	Convlit(lp, Types[TINT])
+	Convlit(rp, Types[TINT])
+}
+
+func cmpslit(l, r *Node) int {
+	return stringsCompare(l.Val.U.(string), r.Val.U.(string))
+}
+
+func Smallintconst(n *Node) bool {
+	if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
+		switch Simtype[n.Type.Etype] {
+		case TINT8,
+			TUINT8,
+			TINT16,
+			TUINT16,
+			TINT32,
+			TUINT32,
+			TBOOL,
+			TPTR32:
+			return true
+
+		case TIDEAL, TINT64, TUINT64, TPTR64:
+			if Mpcmpfixfix(n.Val.U.(*Mpint), Minintval[TINT32]) < 0 || Mpcmpfixfix(n.Val.U.(*Mpint), Maxintval[TINT32]) > 0 {
+				break
+			}
+			return true
+		}
+	}
+
+	return false
+}
+
+func nonnegconst(n *Node) int {
+	if n.Op == OLITERAL && n.Type != nil {
+		switch Simtype[n.Type.Etype] {
+		// check negative and 2^31
+		case TINT8,
+			TUINT8,
+			TINT16,
+			TUINT16,
+			TINT32,
+			TUINT32,
+			TINT64,
+			TUINT64,
+			TIDEAL:
+			if Mpcmpfixfix(n.Val.U.(*Mpint), Minintval[TUINT32]) < 0 || Mpcmpfixfix(n.Val.U.(*Mpint), Maxintval[TINT32]) > 0 {
+				break
+			}
+			return int(Mpgetfix(n.Val.U.(*Mpint)))
+		}
+	}
+
+	return -1
+}
+
+/*
+ * convert x to type et and back to int64
+ * for sign extension and truncation.
+ */
+func iconv(x int64, et int) int64 {
+	switch et {
+	case TINT8:
+		x = int64(int8(x))
+
+	case TUINT8:
+		x = int64(uint8(x))
+
+	case TINT16:
+		x = int64(int16(x))
+
+	case TUINT16:
+		x = int64(uint64(x))
+
+	case TINT32:
+		x = int64(int32(x))
+
+	case TUINT32:
+		x = int64(uint32(x))
+
+	case TINT64, TUINT64:
+		break
+	}
+
+	return x
+}
+
+// Convconst converts constant node n to type t and
+// places the result in con.
+func (n *Node) Convconst(con *Node, t *Type) {
+	tt := Simsimtype(t)
+
+	// copy the constant for conversion
+	Nodconst(con, Types[TINT8], 0)
+
+	con.Type = t
+	con.Val = n.Val
+
+	if Isint[tt] {
+		con.Val.Ctype = CTINT
+		con.Val.U = new(Mpint)
+		var i int64
+		switch n.Val.Ctype {
+		default:
+			Fatal("convconst ctype=%d %v", n.Val.Ctype, Tconv(t, obj.FmtLong))
+
+		case CTINT, CTRUNE:
+			i = Mpgetfix(n.Val.U.(*Mpint))
+
+		case CTBOOL:
+			i = int64(obj.Bool2int(n.Val.U.(bool)))
+
+		case CTNIL:
+			i = 0
+		}
+
+		i = iconv(i, tt)
+		Mpmovecfix(con.Val.U.(*Mpint), i)
+		return
+	}
+
+	if Isfloat[tt] {
+		con.Val = toflt(con.Val)
+		if con.Val.Ctype != CTFLT {
+			Fatal("convconst ctype=%d %v", con.Val.Ctype, t)
+		}
+		if tt == TFLOAT32 {
+			con.Val.U = truncfltlit(con.Val.U.(*Mpflt), t)
+		}
+		return
+	}
+
+	if Iscomplex[tt] {
+		con.Val = tocplx(con.Val)
+		if tt == TCOMPLEX64 {
+			con.Val.U.(*Mpcplx).Real = *truncfltlit(&con.Val.U.(*Mpcplx).Real, Types[TFLOAT32])
+			con.Val.U.(*Mpcplx).Imag = *truncfltlit(&con.Val.U.(*Mpcplx).Imag, Types[TFLOAT32])
+		}
+		return
+	}
+
+	Fatal("convconst %v constant", Tconv(t, obj.FmtLong))
+}
+
+// complex multiply v *= rv
+//	(a, b) * (c, d) = (a*c - b*d, b*c + a*d)
+func cmplxmpy(v *Mpcplx, rv *Mpcplx) {
+	var ac Mpflt
+	var bd Mpflt
+	var bc Mpflt
+	var ad Mpflt
+
+	mpmovefltflt(&ac, &v.Real)
+	mpmulfltflt(&ac, &rv.Real) // ac
+
+	mpmovefltflt(&bd, &v.Imag)
+
+	mpmulfltflt(&bd, &rv.Imag) // bd
+
+	mpmovefltflt(&bc, &v.Imag)
+
+	mpmulfltflt(&bc, &rv.Real) // bc
+
+	mpmovefltflt(&ad, &v.Real)
+
+	mpmulfltflt(&ad, &rv.Imag) // ad
+
+	mpmovefltflt(&v.Real, &ac)
+
+	mpsubfltflt(&v.Real, &bd) // ac-bd
+
+	mpmovefltflt(&v.Imag, &bc)
+
+	mpaddfltflt(&v.Imag, &ad) // bc+ad
+}
+
+// complex divide v /= rv
+//	(a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
+func cmplxdiv(v *Mpcplx, rv *Mpcplx) {
+	var ac Mpflt
+	var bd Mpflt
+	var bc Mpflt
+	var ad Mpflt
+	var cc_plus_dd Mpflt
+
+	mpmovefltflt(&cc_plus_dd, &rv.Real)
+	mpmulfltflt(&cc_plus_dd, &rv.Real) // cc
+
+	mpmovefltflt(&ac, &rv.Imag)
+
+	mpmulfltflt(&ac, &rv.Imag) // dd
+
+	mpaddfltflt(&cc_plus_dd, &ac) // cc+dd
+
+	mpmovefltflt(&ac, &v.Real)
+
+	mpmulfltflt(&ac, &rv.Real) // ac
+
+	mpmovefltflt(&bd, &v.Imag)
+
+	mpmulfltflt(&bd, &rv.Imag) // bd
+
+	mpmovefltflt(&bc, &v.Imag)
+
+	mpmulfltflt(&bc, &rv.Real) // bc
+
+	mpmovefltflt(&ad, &v.Real)
+
+	mpmulfltflt(&ad, &rv.Imag) // ad
+
+	mpmovefltflt(&v.Real, &ac)
+
+	mpaddfltflt(&v.Real, &bd)         // ac+bd
+	mpdivfltflt(&v.Real, &cc_plus_dd) // (ac+bd)/(cc+dd)
+
+	mpmovefltflt(&v.Imag, &bc)
+
+	mpsubfltflt(&v.Imag, &ad)         // bc-ad
+	mpdivfltflt(&v.Imag, &cc_plus_dd) // (bc+ad)/(cc+dd)
+}
+
+// Is n a Go language constant (as opposed to a compile-time constant)?
+// Expressions derived from nil, like string([]byte(nil)), while they
+// may be known at compile time, are not Go language constants.
+// Only called for expressions known to evaluated to compile-time
+// constants.
+func isgoconst(n *Node) bool {
+	if n.Orig != nil {
+		n = n.Orig
+	}
+
+	switch n.Op {
+	case OADD,
+		OADDSTR,
+		OAND,
+		OANDAND,
+		OANDNOT,
+		OCOM,
+		ODIV,
+		OEQ,
+		OGE,
+		OGT,
+		OLE,
+		OLSH,
+		OLT,
+		OMINUS,
+		OMOD,
+		OMUL,
+		ONE,
+		ONOT,
+		OOR,
+		OOROR,
+		OPLUS,
+		ORSH,
+		OSUB,
+		OXOR,
+		OIOTA,
+		OCOMPLEX,
+		OREAL,
+		OIMAG:
+		if isgoconst(n.Left) && (n.Right == nil || isgoconst(n.Right)) {
+			return true
+		}
+
+	case OCONV:
+		if okforconst[n.Type.Etype] && isgoconst(n.Left) {
+			return true
+		}
+
+	case OLEN, OCAP:
+		l := n.Left
+		if isgoconst(l) {
+			return true
+		}
+
+		// Special case: len/cap is constant when applied to array or
+		// pointer to array when the expression does not contain
+		// function calls or channel receive operations.
+		t := l.Type
+
+		if t != nil && Isptr[t.Etype] {
+			t = t.Type
+		}
+		if Isfixedarray(t) && !hascallchan(l) {
+			return true
+		}
+
+	case OLITERAL:
+		if n.Val.Ctype != CTNIL {
+			return true
+		}
+
+	case ONAME:
+		l := n.Sym.Def
+		if l != nil && l.Op == OLITERAL && n.Val.Ctype != CTNIL {
+			return true
+		}
+
+	case ONONAME:
+		if n.Sym.Def != nil && n.Sym.Def.Op == OIOTA {
+			return true
+		}
+
+		// Only constant calls are unsafe.Alignof, Offsetof, and Sizeof.
+	case OCALL:
+		l := n.Left
+
+		for l.Op == OPAREN {
+			l = l.Left
+		}
+		if l.Op != ONAME || l.Sym.Pkg != unsafepkg {
+			break
+		}
+		if l.Sym.Name == "Alignof" || l.Sym.Name == "Offsetof" || l.Sym.Name == "Sizeof" {
+			return true
+		}
+	}
+
+	//dump("nonconst", n);
+	return false
+}
+
+func hascallchan(n *Node) bool {
+	if n == nil {
+		return false
+	}
+	switch n.Op {
+	case OAPPEND,
+		OCALL,
+		OCALLFUNC,
+		OCALLINTER,
+		OCALLMETH,
+		OCAP,
+		OCLOSE,
+		OCOMPLEX,
+		OCOPY,
+		ODELETE,
+		OIMAG,
+		OLEN,
+		OMAKE,
+		ONEW,
+		OPANIC,
+		OPRINT,
+		OPRINTN,
+		OREAL,
+		ORECOVER,
+		ORECV:
+		return true
+	}
+
+	if hascallchan(n.Left) || hascallchan(n.Right) {
+		return true
+	}
+
+	for l := n.List; l != nil; l = l.Next {
+		if hascallchan(l.N) {
+			return true
+		}
+	}
+	for l := n.Rlist; l != nil; l = l.Next {
+		if hascallchan(l.N) {
+			return true
+		}
+	}
+
+	return false
+}
diff --git a/src/cmd/compile/internal/gc/cplx.go b/src/cmd/compile/internal/gc/cplx.go
new file mode 100644
index 0000000..56a4892
--- /dev/null
+++ b/src/cmd/compile/internal/gc/cplx.go
@@ -0,0 +1,480 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+func overlap_cplx(f *Node, t *Node) bool {
+	// check whether f and t could be overlapping stack references.
+	// not exact, because it's hard to check for the stack register
+	// in portable code.  close enough: worst case we will allocate
+	// an extra temporary and the registerizer will clean it up.
+	return f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset
+}
+
+func complexbool(op int, nl, nr, res *Node, wantTrue bool, likely int, to *obj.Prog) {
+	// make both sides addable in ullman order
+	if nr != nil {
+		if nl.Ullman > nr.Ullman && !nl.Addable {
+			nl = CgenTemp(nl)
+		}
+
+		if !nr.Addable {
+			nr = CgenTemp(nr)
+		}
+	}
+	if !nl.Addable {
+		nl = CgenTemp(nl)
+	}
+
+	// Break nl and nr into real and imaginary components.
+	var lreal, limag, rreal, rimag Node
+	subnode(&lreal, &limag, nl)
+	subnode(&rreal, &rimag, nr)
+
+	// build tree
+	// if branching:
+	// 	real(l) == real(r) && imag(l) == imag(r)
+	// if generating a value, use a branch-free version:
+	// 	real(l) == real(r) & imag(l) == imag(r)
+	realeq := Node{
+		Op:    OEQ,
+		Left:  &lreal,
+		Right: &rreal,
+		Type:  Types[TBOOL],
+	}
+	imageq := Node{
+		Op:    OEQ,
+		Left:  &limag,
+		Right: &rimag,
+		Type:  Types[TBOOL],
+	}
+	and := Node{
+		Op:    OANDAND,
+		Left:  &realeq,
+		Right: &imageq,
+		Type:  Types[TBOOL],
+	}
+
+	if res != nil {
+		// generating a value
+		and.Op = OAND
+		if op == ONE {
+			and.Op = OOR
+			realeq.Op = ONE
+			imageq.Op = ONE
+		}
+		Bvgen(&and, res, true)
+		return
+	}
+
+	// generating a branch
+	if op == ONE {
+		wantTrue = !wantTrue
+	}
+
+	Bgen(&and, wantTrue, likely, to)
+}
+
+// break addable nc-complex into nr-real and ni-imaginary
+func subnode(nr *Node, ni *Node, nc *Node) {
+	if !nc.Addable {
+		Fatal("subnode not addable")
+	}
+
+	tc := Simsimtype(nc.Type)
+	tc = cplxsubtype(tc)
+	t := Types[tc]
+
+	if nc.Op == OLITERAL {
+		nodfconst(nr, t, &nc.Val.U.(*Mpcplx).Real)
+		nodfconst(ni, t, &nc.Val.U.(*Mpcplx).Imag)
+		return
+	}
+
+	*nr = *nc
+	nr.Type = t
+
+	*ni = *nc
+	ni.Type = t
+	ni.Xoffset += t.Width
+}
+
+// generate code res = -nl
+func minus(nl *Node, res *Node) {
+	var ra Node
+	ra.Op = OMINUS
+	ra.Left = nl
+	ra.Type = nl.Type
+	Cgen(&ra, res)
+}
+
+// build and execute tree
+//	real(res) = -real(nl)
+//	imag(res) = -imag(nl)
+func complexminus(nl *Node, res *Node) {
+	var n1 Node
+	var n2 Node
+	var n5 Node
+	var n6 Node
+
+	subnode(&n1, &n2, nl)
+	subnode(&n5, &n6, res)
+
+	minus(&n1, &n5)
+	minus(&n2, &n6)
+}
+
+// build and execute tree
+//	real(res) = real(nl) op real(nr)
+//	imag(res) = imag(nl) op imag(nr)
+func complexadd(op int, nl *Node, nr *Node, res *Node) {
+	var n1 Node
+	var n2 Node
+	var n3 Node
+	var n4 Node
+	var n5 Node
+	var n6 Node
+
+	subnode(&n1, &n2, nl)
+	subnode(&n3, &n4, nr)
+	subnode(&n5, &n6, res)
+
+	var ra Node
+	ra.Op = uint8(op)
+	ra.Left = &n1
+	ra.Right = &n3
+	ra.Type = n1.Type
+	Cgen(&ra, &n5)
+
+	ra = Node{}
+	ra.Op = uint8(op)
+	ra.Left = &n2
+	ra.Right = &n4
+	ra.Type = n2.Type
+	Cgen(&ra, &n6)
+}
+
+// build and execute tree
+//	tmp       = real(nl)*real(nr) - imag(nl)*imag(nr)
+//	imag(res) = real(nl)*imag(nr) + imag(nl)*real(nr)
+//	real(res) = tmp
+func complexmul(nl *Node, nr *Node, res *Node) {
+	var n1 Node
+	var n2 Node
+	var n3 Node
+	var n4 Node
+	var n5 Node
+	var n6 Node
+	var tmp Node
+
+	subnode(&n1, &n2, nl)
+	subnode(&n3, &n4, nr)
+	subnode(&n5, &n6, res)
+	Tempname(&tmp, n5.Type)
+
+	// real part -> tmp
+	var rm1 Node
+
+	rm1.Op = OMUL
+	rm1.Left = &n1
+	rm1.Right = &n3
+	rm1.Type = n1.Type
+
+	var rm2 Node
+	rm2.Op = OMUL
+	rm2.Left = &n2
+	rm2.Right = &n4
+	rm2.Type = n2.Type
+
+	var ra Node
+	ra.Op = OSUB
+	ra.Left = &rm1
+	ra.Right = &rm2
+	ra.Type = rm1.Type
+	Cgen(&ra, &tmp)
+
+	// imag part
+	rm1 = Node{}
+
+	rm1.Op = OMUL
+	rm1.Left = &n1
+	rm1.Right = &n4
+	rm1.Type = n1.Type
+
+	rm2 = Node{}
+	rm2.Op = OMUL
+	rm2.Left = &n2
+	rm2.Right = &n3
+	rm2.Type = n2.Type
+
+	ra = Node{}
+	ra.Op = OADD
+	ra.Left = &rm1
+	ra.Right = &rm2
+	ra.Type = rm1.Type
+	Cgen(&ra, &n6)
+
+	// tmp ->real part
+	Cgen(&tmp, &n5)
+}
+
+func nodfconst(n *Node, t *Type, fval *Mpflt) {
+	*n = Node{}
+	n.Op = OLITERAL
+	n.Addable = true
+	ullmancalc(n)
+	n.Val.U = fval
+	n.Val.Ctype = CTFLT
+	n.Type = t
+
+	if !Isfloat[t.Etype] {
+		Fatal("nodfconst: bad type %v", t)
+	}
+}
+
+func Complexop(n *Node, res *Node) bool {
+	if n != nil && n.Type != nil {
+		if Iscomplex[n.Type.Etype] {
+			goto maybe
+		}
+	}
+
+	if res != nil && res.Type != nil {
+		if Iscomplex[res.Type.Etype] {
+			goto maybe
+		}
+	}
+
+	if n.Op == OREAL || n.Op == OIMAG {
+		//dump("\ncomplex-yes", n);
+		return true
+	}
+
+	//dump("\ncomplex-no", n);
+	return false
+
+maybe:
+	switch n.Op {
+	case OCONV, // implemented ops
+		OADD,
+		OSUB,
+		OMUL,
+		OMINUS,
+		OCOMPLEX,
+		OREAL,
+		OIMAG:
+		//dump("\ncomplex-yes", n);
+		return true
+
+	case ODOT,
+		ODOTPTR,
+		OINDEX,
+		OIND,
+		ONAME:
+		//dump("\ncomplex-yes", n);
+		return true
+	}
+
+	//dump("\ncomplex-no", n);
+	return false
+}
+
+func Complexmove(f *Node, t *Node) {
+	if Debug['g'] != 0 {
+		Dump("\ncomplexmove-f", f)
+		Dump("complexmove-t", t)
+	}
+
+	if !t.Addable {
+		Fatal("complexmove: to not addable")
+	}
+
+	ft := Simsimtype(f.Type)
+	tt := Simsimtype(t.Type)
+	switch uint32(ft)<<16 | uint32(tt) {
+	default:
+		Fatal("complexmove: unknown conversion: %v -> %v\n", f.Type, t.Type)
+
+		// complex to complex move/convert.
+	// make f addable.
+	// also use temporary if possible stack overlap.
+	case TCOMPLEX64<<16 | TCOMPLEX64,
+		TCOMPLEX64<<16 | TCOMPLEX128,
+		TCOMPLEX128<<16 | TCOMPLEX64,
+		TCOMPLEX128<<16 | TCOMPLEX128:
+		if !f.Addable || overlap_cplx(f, t) {
+			var tmp Node
+			Tempname(&tmp, f.Type)
+			Complexmove(f, &tmp)
+			f = &tmp
+		}
+
+		var n1 Node
+		var n2 Node
+		subnode(&n1, &n2, f)
+		var n4 Node
+		var n3 Node
+		subnode(&n3, &n4, t)
+
+		Cgen(&n1, &n3)
+		Cgen(&n2, &n4)
+	}
+}
+
+func Complexgen(n *Node, res *Node) {
+	if Debug['g'] != 0 {
+		Dump("\ncomplexgen-n", n)
+		Dump("complexgen-res", res)
+	}
+
+	for n.Op == OCONVNOP {
+		n = n.Left
+	}
+
+	// pick off float/complex opcodes
+	switch n.Op {
+	case OCOMPLEX:
+		if res.Addable {
+			var n1 Node
+			var n2 Node
+			subnode(&n1, &n2, res)
+			var tmp Node
+			Tempname(&tmp, n1.Type)
+			Cgen(n.Left, &tmp)
+			Cgen(n.Right, &n2)
+			Cgen(&tmp, &n1)
+			return
+		}
+
+	case OREAL, OIMAG:
+		nl := n.Left
+		if !nl.Addable {
+			var tmp Node
+			Tempname(&tmp, nl.Type)
+			Complexgen(nl, &tmp)
+			nl = &tmp
+		}
+
+		var n1 Node
+		var n2 Node
+		subnode(&n1, &n2, nl)
+		if n.Op == OREAL {
+			Cgen(&n1, res)
+			return
+		}
+
+		Cgen(&n2, res)
+		return
+	}
+
+	// perform conversion from n to res
+	tl := Simsimtype(res.Type)
+
+	tl = cplxsubtype(tl)
+	tr := Simsimtype(n.Type)
+	tr = cplxsubtype(tr)
+	if tl != tr {
+		if !n.Addable {
+			var n1 Node
+			Tempname(&n1, n.Type)
+			Complexmove(n, &n1)
+			n = &n1
+		}
+
+		Complexmove(n, res)
+		return
+	}
+
+	if !res.Addable {
+		var n1 Node
+		Igen(res, &n1, nil)
+		Cgen(n, &n1)
+		Regfree(&n1)
+		return
+	}
+
+	if n.Addable {
+		Complexmove(n, res)
+		return
+	}
+
+	switch n.Op {
+	default:
+		Dump("complexgen: unknown op", n)
+		Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
+
+	case ODOT,
+		ODOTPTR,
+		OINDEX,
+		OIND,
+		ONAME, // PHEAP or PPARAMREF var
+		OCALLFUNC,
+		OCALLMETH,
+		OCALLINTER:
+		var n1 Node
+		Igen(n, &n1, res)
+
+		Complexmove(&n1, res)
+		Regfree(&n1)
+		return
+
+	case OCONV,
+		OADD,
+		OSUB,
+		OMUL,
+		OMINUS,
+		OCOMPLEX,
+		OREAL,
+		OIMAG:
+		break
+	}
+
+	nl := n.Left
+	if nl == nil {
+		return
+	}
+	nr := n.Right
+
+	// make both sides addable in ullman order
+	var tnl Node
+	if nr != nil {
+		if nl.Ullman > nr.Ullman && !nl.Addable {
+			Tempname(&tnl, nl.Type)
+			Cgen(nl, &tnl)
+			nl = &tnl
+		}
+
+		if !nr.Addable {
+			var tnr Node
+			Tempname(&tnr, nr.Type)
+			Cgen(nr, &tnr)
+			nr = &tnr
+		}
+	}
+
+	if !nl.Addable {
+		Tempname(&tnl, nl.Type)
+		Cgen(nl, &tnl)
+		nl = &tnl
+	}
+
+	switch n.Op {
+	default:
+		Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
+
+	case OCONV:
+		Complexmove(nl, res)
+
+	case OMINUS:
+		complexminus(nl, res)
+
+	case OADD, OSUB:
+		complexadd(int(n.Op), nl, nr, res)
+
+	case OMUL:
+		complexmul(nl, nr, res)
+	}
+}
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
new file mode 100644
index 0000000..85a33be
--- /dev/null
+++ b/src/cmd/compile/internal/gc/dcl.go
@@ -0,0 +1,1494 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+	"strings"
+)
+
+func dflag() bool {
+	if Debug['d'] == 0 {
+		return false
+	}
+	if Debug['y'] != 0 {
+		return true
+	}
+	if incannedimport != 0 {
+		return false
+	}
+	return true
+}
+
+/*
+ * declaration stack & operations
+ */
+func dcopy(a *Sym, b *Sym) {
+	a.Pkg = b.Pkg
+	a.Name = b.Name
+	a.Def = b.Def
+	a.Block = b.Block
+	a.Lastlineno = b.Lastlineno
+}
+
+func push() *Sym {
+	d := new(Sym)
+	d.Lastlineno = lineno
+	d.Link = dclstack
+	dclstack = d
+	return d
+}
+
+func pushdcl(s *Sym) *Sym {
+	d := push()
+	dcopy(d, s)
+	if dflag() {
+		fmt.Printf("\t%v push %v %p\n", Ctxt.Line(int(lineno)), s, s.Def)
+	}
+	return d
+}
+
+func popdcl() {
+	var d *Sym
+	var s *Sym
+	var lno int
+
+	//	if(dflag())
+	//		print("revert\n");
+
+	for d = dclstack; d != nil; d = d.Link {
+		if d.Name == "" {
+			break
+		}
+		s = Pkglookup(d.Name, d.Pkg)
+		lno = int(s.Lastlineno)
+		dcopy(s, d)
+		d.Lastlineno = int32(lno)
+		if dflag() {
+			fmt.Printf("\t%v pop %v %p\n", Ctxt.Line(int(lineno)), s, s.Def)
+		}
+	}
+
+	if d == nil {
+		Fatal("popdcl: no mark")
+	}
+	dclstack = d.Link
+	block = d.Block
+}
+
+func poptodcl() {
+	// pop the old marker and push a new one
+	// (cannot reuse the existing one)
+	// because we use the markers to identify blocks
+	// for the goto restriction checks.
+	popdcl()
+
+	markdcl()
+}
+
+func markdcl() {
+	d := push()
+	d.Name = "" // used as a mark in fifo
+	d.Block = block
+
+	blockgen++
+	block = blockgen
+}
+
+//	if(dflag())
+//		print("markdcl\n");
+func dumpdcl(st string) {
+	var s *Sym
+
+	i := 0
+	for d := dclstack; d != nil; d = d.Link {
+		i++
+		fmt.Printf("    %.2d %p", i, d)
+		if d.Name == "" {
+			fmt.Printf("\n")
+			continue
+		}
+
+		fmt.Printf(" '%s'", d.Name)
+		s = Pkglookup(d.Name, d.Pkg)
+		fmt.Printf(" %v\n", s)
+	}
+}
+
+func testdclstack() {
+	for d := dclstack; d != nil; d = d.Link {
+		if d.Name == "" {
+			if nerrors != 0 {
+				errorexit()
+			}
+			Yyerror("mark left on the stack")
+			continue
+		}
+	}
+}
+
+func redeclare(s *Sym, where string) {
+	if s.Lastlineno == 0 {
+		var tmp string
+		if s.Origpkg != nil {
+			tmp = s.Origpkg.Path
+		} else {
+			tmp = s.Pkg.Path
+		}
+		pkgstr := tmp
+		Yyerror("%v redeclared %s\n"+"\tprevious declaration during import %q", s, where, pkgstr)
+	} else {
+		line1 := parserline()
+		line2 := int(s.Lastlineno)
+
+		// When an import and a declaration collide in separate files,
+		// present the import as the "redeclared", because the declaration
+		// is visible where the import is, but not vice versa.
+		// See issue 4510.
+		if s.Def == nil {
+			line2 = line1
+			line1 = int(s.Lastlineno)
+		}
+
+		yyerrorl(int(line1), "%v redeclared %s\n"+"\tprevious declaration at %v", s, where, Ctxt.Line(line2))
+	}
+}
+
+var vargen int
+
+/*
+ * declare individual names - var, typ, const
+ */
+
+var declare_typegen int
+
+func declare(n *Node, ctxt uint8) {
+	if ctxt == PDISCARD {
+		return
+	}
+
+	if isblank(n) {
+		return
+	}
+
+	n.Lineno = int32(parserline())
+	s := n.Sym
+
+	// kludgy: typecheckok means we're past parsing.  Eg genwrapper may declare out of package names later.
+	if importpkg == nil && typecheckok == 0 && s.Pkg != localpkg {
+		Yyerror("cannot declare name %v", s)
+	}
+
+	if ctxt == PEXTERN && s.Name == "init" {
+		Yyerror("cannot declare init - must be func")
+	}
+
+	gen := 0
+	if ctxt == PEXTERN {
+		externdcl = list(externdcl, n)
+		if dflag() {
+			fmt.Printf("\t%v global decl %v %p\n", Ctxt.Line(int(lineno)), s, n)
+		}
+	} else {
+		if Curfn == nil && ctxt == PAUTO {
+			Fatal("automatic outside function")
+		}
+		if Curfn != nil {
+			Curfn.Func.Dcl = list(Curfn.Func.Dcl, n)
+		}
+		if n.Op == OTYPE {
+			declare_typegen++
+			gen = declare_typegen
+		} else if n.Op == ONAME && ctxt == PAUTO && !strings.Contains(s.Name, "·") {
+			vargen++
+			gen = vargen
+		}
+		pushdcl(s)
+		n.Curfn = Curfn
+	}
+
+	if ctxt == PAUTO {
+		n.Xoffset = 0
+	}
+
+	if s.Block == block {
+		// functype will print errors about duplicate function arguments.
+		// Don't repeat the error here.
+		if ctxt != PPARAM && ctxt != PPARAMOUT {
+			redeclare(s, "in this block")
+		}
+	}
+
+	s.Block = block
+	s.Lastlineno = int32(parserline())
+	s.Def = n
+	n.Vargen = int32(gen)
+	n.Funcdepth = Funcdepth
+	n.Class = uint8(ctxt)
+
+	autoexport(n, ctxt)
+}
+
+func addvar(n *Node, t *Type, ctxt uint8) {
+	if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
+		Fatal("addvar: n=%v t=%v nil", n, t)
+	}
+
+	n.Op = ONAME
+	declare(n, ctxt)
+	n.Type = t
+}
+
+/*
+ * declare variables from grammar
+ * new_name_list (type | [type] = expr_list)
+ */
+func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
+	var init *NodeList
+	doexpr := el != nil
+
+	if count(el) == 1 && count(vl) > 1 {
+		e := el.N
+		as2 := Nod(OAS2, nil, nil)
+		as2.List = vl
+		as2.Rlist = list1(e)
+		var v *Node
+		for ; vl != nil; vl = vl.Next {
+			v = vl.N
+			v.Op = ONAME
+			declare(v, dclcontext)
+			v.Ntype = t
+			v.Defn = as2
+			if Funcdepth > 0 {
+				init = list(init, Nod(ODCL, v, nil))
+			}
+		}
+
+		return list(init, as2)
+	}
+
+	var v *Node
+	var e *Node
+	for ; vl != nil; vl = vl.Next {
+		if doexpr {
+			if el == nil {
+				Yyerror("missing expression in var declaration")
+				break
+			}
+
+			e = el.N
+			el = el.Next
+		} else {
+			e = nil
+		}
+
+		v = vl.N
+		v.Op = ONAME
+		declare(v, dclcontext)
+		v.Ntype = t
+
+		if e != nil || Funcdepth > 0 || isblank(v) {
+			if Funcdepth > 0 {
+				init = list(init, Nod(ODCL, v, nil))
+			}
+			e = Nod(OAS, v, e)
+			init = list(init, e)
+			if e.Right != nil {
+				v.Defn = e
+			}
+		}
+	}
+
+	if el != nil {
+		Yyerror("extra expression in var declaration")
+	}
+	return init
+}
+
+/*
+ * declare constants from grammar
+ * new_name_list [[type] = expr_list]
+ */
+func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
+	if cl == nil {
+		if t != nil {
+			Yyerror("const declaration cannot have type without expression")
+		}
+		cl = lastconst
+		t = lasttype
+	} else {
+		lastconst = cl
+		lasttype = t
+	}
+
+	cl = listtreecopy(cl)
+
+	var v *Node
+	var c *Node
+	var vv *NodeList
+	for ; vl != nil; vl = vl.Next {
+		if cl == nil {
+			Yyerror("missing value in const declaration")
+			break
+		}
+
+		c = cl.N
+		cl = cl.Next
+
+		v = vl.N
+		v.Op = OLITERAL
+		declare(v, dclcontext)
+
+		v.Ntype = t
+		v.Defn = c
+
+		vv = list(vv, Nod(ODCLCONST, v, nil))
+	}
+
+	if cl != nil {
+		Yyerror("extra expression in const declaration")
+	}
+	iota_ += 1
+	return vv
+}
+
+/*
+ * this generates a new name node,
+ * typically for labels or other one-off names.
+ */
+func newname(s *Sym) *Node {
+	if s == nil {
+		Fatal("newname nil")
+	}
+
+	n := Nod(ONAME, nil, nil)
+	n.Sym = s
+	n.Type = nil
+	n.Addable = true
+	n.Ullman = 1
+	n.Xoffset = 0
+	return n
+}
+
+// newfuncname generates a new name node for a function or method.
+// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360.
+func newfuncname(s *Sym) *Node {
+	n := newname(s)
+	n.Func = new(Func)
+	return n
+}
+
+/*
+ * this generates a new name node for a name
+ * being declared.
+ */
+func dclname(s *Sym) *Node {
+	n := newname(s)
+	n.Op = ONONAME // caller will correct it
+	return n
+}
+
+func typenod(t *Type) *Node {
+	// if we copied another type with *t = *u
+	// then t->nod might be out of date, so
+	// check t->nod->type too
+	if t.Nod == nil || t.Nod.Type != t {
+		t.Nod = Nod(OTYPE, nil, nil)
+		t.Nod.Type = t
+		t.Nod.Sym = t.Sym
+	}
+
+	return t.Nod
+}
+
+/*
+ * this will return an old name
+ * that has already been pushed on the
+ * declaration list. a diagnostic is
+ * generated if no name has been defined.
+ */
+func oldname(s *Sym) *Node {
+	n := s.Def
+	if n == nil {
+		// maybe a top-level name will come along
+		// to give this a definition later.
+		// walkdef will check s->def again once
+		// all the input source has been processed.
+		n = newname(s)
+
+		n.Op = ONONAME
+		n.Iota = iota_ // save current iota value in const declarations
+	}
+
+	if Curfn != nil && n.Funcdepth > 0 && n.Funcdepth != Funcdepth && n.Op == ONAME {
+		// inner func is referring to var in outer func.
+		//
+		// TODO(rsc): If there is an outer variable x and we
+		// are parsing x := 5 inside the closure, until we get to
+		// the := it looks like a reference to the outer x so we'll
+		// make x a closure variable unnecessarily.
+		if n.Closure == nil || n.Closure.Funcdepth != Funcdepth {
+			// create new closure var.
+			c := Nod(ONAME, nil, nil)
+
+			c.Sym = s
+			c.Class = PPARAMREF
+			c.Isddd = n.Isddd
+			c.Defn = n
+			c.Addable = false
+			c.Ullman = 2
+			c.Funcdepth = Funcdepth
+			c.Outer = n.Closure
+			n.Closure = c
+			c.Closure = n
+			c.Xoffset = 0
+			Curfn.Func.Cvars = list(Curfn.Func.Cvars, c)
+		}
+
+		// return ref to closure var, not original
+		return n.Closure
+	}
+
+	return n
+}
+
+/*
+ * := declarations
+ */
+func colasname(n *Node) bool {
+	switch n.Op {
+	case ONAME,
+		ONONAME,
+		OPACK,
+		OTYPE,
+		OLITERAL:
+		return n.Sym != nil
+	}
+
+	return false
+}
+
+func colasdefn(left *NodeList, defn *Node) {
+	for l := left; l != nil; l = l.Next {
+		if l.N.Sym != nil {
+			l.N.Sym.Flags |= SymUniq
+		}
+	}
+
+	nnew := 0
+	nerr := 0
+	var n *Node
+	for l := left; l != nil; l = l.Next {
+		n = l.N
+		if isblank(n) {
+			continue
+		}
+		if !colasname(n) {
+			yyerrorl(int(defn.Lineno), "non-name %v on left side of :=", n)
+			nerr++
+			continue
+		}
+
+		if n.Sym.Flags&SymUniq == 0 {
+			yyerrorl(int(defn.Lineno), "%v repeated on left side of :=", n.Sym)
+			n.Diag++
+			nerr++
+			continue
+		}
+
+		n.Sym.Flags &^= SymUniq
+		if n.Sym.Block == block {
+			continue
+		}
+
+		nnew++
+		n = newname(n.Sym)
+		declare(n, dclcontext)
+		n.Defn = defn
+		defn.Ninit = list(defn.Ninit, Nod(ODCL, n, nil))
+		l.N = n
+	}
+
+	if nnew == 0 && nerr == 0 {
+		yyerrorl(int(defn.Lineno), "no new variables on left side of :=")
+	}
+}
+
+func colas(left *NodeList, right *NodeList, lno int32) *Node {
+	as := Nod(OAS2, nil, nil)
+	as.List = left
+	as.Rlist = right
+	as.Colas = true
+	as.Lineno = lno
+	colasdefn(left, as)
+
+	// make the tree prettier; not necessary
+	if count(left) == 1 && count(right) == 1 {
+		as.Left = as.List.N
+		as.Right = as.Rlist.N
+		as.List = nil
+		as.Rlist = nil
+		as.Op = OAS
+	}
+
+	return as
+}
+
+/*
+ * declare the arguments in an
+ * interface field declaration.
+ */
+func ifacedcl(n *Node) {
+	if n.Op != ODCLFIELD || n.Right == nil {
+		Fatal("ifacedcl")
+	}
+
+	if isblank(n.Left) {
+		Yyerror("methods must have a unique non-blank name")
+	}
+
+	n.Func = new(Func)
+	dclcontext = PPARAM
+	markdcl()
+	Funcdepth++
+	n.Outer = Curfn
+	Curfn = n
+	funcargs(n.Right)
+
+	// funcbody is normally called after the parser has
+	// seen the body of a function but since an interface
+	// field declaration does not have a body, we must
+	// call it now to pop the current declaration context.
+	dclcontext = PAUTO
+
+	funcbody(n)
+}
+
+/*
+ * declare the function proper
+ * and declare the arguments.
+ * called in extern-declaration context
+ * returns in auto-declaration context.
+ */
+func funchdr(n *Node) {
+	// change the declaration context from extern to auto
+	if Funcdepth == 0 && dclcontext != PEXTERN {
+		Fatal("funchdr: dclcontext")
+	}
+
+	dclcontext = PAUTO
+	markdcl()
+	Funcdepth++
+
+	n.Outer = Curfn
+	Curfn = n
+
+	if n.Nname != nil {
+		funcargs(n.Nname.Ntype)
+	} else if n.Ntype != nil {
+		funcargs(n.Ntype)
+	} else {
+		funcargs2(n.Type)
+	}
+}
+
+func funcargs(nt *Node) {
+	if nt.Op != OTFUNC {
+		Fatal("funcargs %v", Oconv(int(nt.Op), 0))
+	}
+
+	// re-start the variable generation number
+	// we want to use small numbers for the return variables,
+	// so let them have the chunk starting at 1.
+	vargen = count(nt.Rlist)
+
+	// declare the receiver and in arguments.
+	// no n->defn because type checking of func header
+	// will not fill in the types until later
+	if nt.Left != nil {
+		n := nt.Left
+		if n.Op != ODCLFIELD {
+			Fatal("funcargs receiver %v", Oconv(int(n.Op), 0))
+		}
+		if n.Left != nil {
+			n.Left.Op = ONAME
+			n.Left.Ntype = n.Right
+			declare(n.Left, PPARAM)
+			if dclcontext == PAUTO {
+				vargen++
+				n.Left.Vargen = int32(vargen)
+			}
+		}
+	}
+
+	var n *Node
+	for l := nt.List; l != nil; l = l.Next {
+		n = l.N
+		if n.Op != ODCLFIELD {
+			Fatal("funcargs in %v", Oconv(int(n.Op), 0))
+		}
+		if n.Left != nil {
+			n.Left.Op = ONAME
+			n.Left.Ntype = n.Right
+			declare(n.Left, PPARAM)
+			if dclcontext == PAUTO {
+				vargen++
+				n.Left.Vargen = int32(vargen)
+			}
+		}
+	}
+
+	// declare the out arguments.
+	gen := count(nt.List)
+	var i int = 0
+	var nn *Node
+	for l := nt.Rlist; l != nil; l = l.Next {
+		n = l.N
+
+		if n.Op != ODCLFIELD {
+			Fatal("funcargs out %v", Oconv(int(n.Op), 0))
+		}
+
+		if n.Left == nil {
+			// Name so that escape analysis can track it. ~r stands for 'result'.
+			n.Left = newname(Lookupf("~r%d", gen))
+			gen++
+		}
+
+		// TODO: n->left->missing = 1;
+		n.Left.Op = ONAME
+
+		if isblank(n.Left) {
+			// Give it a name so we can assign to it during return. ~b stands for 'blank'.
+			// The name must be different from ~r above because if you have
+			//	func f() (_ int)
+			//	func g() int
+			// f is allowed to use a plain 'return' with no arguments, while g is not.
+			// So the two cases must be distinguished.
+			// We do not record a pointer to the original node (n->orig).
+			// Having multiple names causes too much confusion in later passes.
+			nn = Nod(OXXX, nil, nil)
+
+			*nn = *n.Left
+			nn.Orig = nn
+			nn.Sym = Lookupf("~b%d", gen)
+			gen++
+			n.Left = nn
+		}
+
+		n.Left.Ntype = n.Right
+		declare(n.Left, PPARAMOUT)
+		if dclcontext == PAUTO {
+			i++
+			n.Left.Vargen = int32(i)
+		}
+	}
+}
+
+/*
+ * Same as funcargs, except run over an already constructed TFUNC.
+ * This happens during import, where the hidden_fndcl rule has
+ * used functype directly to parse the function's type.
+ */
+func funcargs2(t *Type) {
+	if t.Etype != TFUNC {
+		Fatal("funcargs2 %v", t)
+	}
+
+	if t.Thistuple != 0 {
+		var n *Node
+		for ft := getthisx(t).Type; ft != nil; ft = ft.Down {
+			if ft.Nname == nil || ft.Nname.Sym == nil {
+				continue
+			}
+			n = ft.Nname // no need for newname(ft->nname->sym)
+			n.Type = ft.Type
+			declare(n, PPARAM)
+		}
+	}
+
+	if t.Intuple != 0 {
+		var n *Node
+		for ft := getinargx(t).Type; ft != nil; ft = ft.Down {
+			if ft.Nname == nil || ft.Nname.Sym == nil {
+				continue
+			}
+			n = ft.Nname
+			n.Type = ft.Type
+			declare(n, PPARAM)
+		}
+	}
+
+	if t.Outtuple != 0 {
+		var n *Node
+		for ft := getoutargx(t).Type; ft != nil; ft = ft.Down {
+			if ft.Nname == nil || ft.Nname.Sym == nil {
+				continue
+			}
+			n = ft.Nname
+			n.Type = ft.Type
+			declare(n, PPARAMOUT)
+		}
+	}
+}
+
+/*
+ * finish the body.
+ * called in auto-declaration context.
+ * returns in extern-declaration context.
+ */
+func funcbody(n *Node) {
+	// change the declaration context from auto to extern
+	if dclcontext != PAUTO {
+		Fatal("funcbody: dclcontext")
+	}
+	popdcl()
+	Funcdepth--
+	Curfn = n.Outer
+	n.Outer = nil
+	if Funcdepth == 0 {
+		dclcontext = PEXTERN
+	}
+}
+
+/*
+ * new type being defined with name s.
+ */
+func typedcl0(s *Sym) *Node {
+	n := newname(s)
+	n.Op = OTYPE
+	declare(n, dclcontext)
+	return n
+}
+
+/*
+ * node n, which was returned by typedcl0
+ * is being declared to have uncompiled type t.
+ * return the ODCLTYPE node to use.
+ */
+func typedcl1(n *Node, t *Node, local bool) *Node {
+	n.Ntype = t
+	n.Local = local
+	return Nod(ODCLTYPE, n, nil)
+}
+
+/*
+ * structs, functions, and methods.
+ * they don't belong here, but where do they belong?
+ */
+func checkembeddedtype(t *Type) {
+	if t == nil {
+		return
+	}
+
+	if t.Sym == nil && Isptr[t.Etype] {
+		t = t.Type
+		if t.Etype == TINTER {
+			Yyerror("embedded type cannot be a pointer to interface")
+		}
+	}
+
+	if Isptr[t.Etype] {
+		Yyerror("embedded type cannot be a pointer")
+	} else if t.Etype == TFORW && t.Embedlineno == 0 {
+		t.Embedlineno = lineno
+	}
+}
+
+func structfield(n *Node) *Type {
+	lno := int(lineno)
+	lineno = n.Lineno
+
+	if n.Op != ODCLFIELD {
+		Fatal("structfield: oops %v\n", n)
+	}
+
+	f := typ(TFIELD)
+	f.Isddd = n.Isddd
+
+	if n.Right != nil {
+		typecheck(&n.Right, Etype)
+		n.Type = n.Right.Type
+		if n.Left != nil {
+			n.Left.Type = n.Type
+		}
+		if n.Embedded != 0 {
+			checkembeddedtype(n.Type)
+		}
+	}
+
+	n.Right = nil
+
+	f.Type = n.Type
+	if f.Type == nil {
+		f.Broke = 1
+	}
+
+	switch n.Val.Ctype {
+	case CTSTR:
+		f.Note = new(string)
+		*f.Note = n.Val.U.(string)
+
+	default:
+		Yyerror("field annotation must be string")
+		fallthrough
+
+	case CTxxx:
+		f.Note = nil
+	}
+
+	if n.Left != nil && n.Left.Op == ONAME {
+		f.Nname = n.Left
+		f.Embedded = n.Embedded
+		f.Sym = f.Nname.Sym
+	}
+
+	lineno = int32(lno)
+	return f
+}
+
+var uniqgen uint32
+
+func checkdupfields(t *Type, what string) {
+	lno := int(lineno)
+
+	for ; t != nil; t = t.Down {
+		if t.Sym != nil && t.Nname != nil && !isblank(t.Nname) {
+			if t.Sym.Uniqgen == uniqgen {
+				lineno = t.Nname.Lineno
+				Yyerror("duplicate %s %s", what, t.Sym.Name)
+			} else {
+				t.Sym.Uniqgen = uniqgen
+			}
+		}
+	}
+
+	lineno = int32(lno)
+}
+
+/*
+ * convert a parsed id/type list into
+ * a type for struct/interface/arglist
+ */
+func tostruct(l *NodeList) *Type {
+	var f *Type
+	t := typ(TSTRUCT)
+
+	for tp := &t.Type; l != nil; l = l.Next {
+		f = structfield(l.N)
+
+		*tp = f
+		tp = &f.Down
+	}
+
+	for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
+		if f.Broke != 0 {
+			t.Broke = 1
+		}
+	}
+
+	uniqgen++
+	checkdupfields(t.Type, "field")
+
+	if t.Broke == 0 {
+		checkwidth(t)
+	}
+
+	return t
+}
+
+func tofunargs(l *NodeList) *Type {
+	var f *Type
+
+	t := typ(TSTRUCT)
+	t.Funarg = 1
+
+	for tp := &t.Type; l != nil; l = l.Next {
+		f = structfield(l.N)
+		f.Funarg = 1
+
+		// esc.c needs to find f given a PPARAM to add the tag.
+		if l.N.Left != nil && l.N.Left.Class == PPARAM {
+			l.N.Left.Paramfld = f
+		}
+
+		*tp = f
+		tp = &f.Down
+	}
+
+	for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
+		if f.Broke != 0 {
+			t.Broke = 1
+		}
+	}
+
+	return t
+}
+
+func interfacefield(n *Node) *Type {
+	lno := int(lineno)
+	lineno = n.Lineno
+
+	if n.Op != ODCLFIELD {
+		Fatal("interfacefield: oops %v\n", n)
+	}
+
+	if n.Val.Ctype != CTxxx {
+		Yyerror("interface method cannot have annotation")
+	}
+
+	f := typ(TFIELD)
+	f.Isddd = n.Isddd
+
+	if n.Right != nil {
+		if n.Left != nil {
+			// queue resolution of method type for later.
+			// right now all we need is the name list.
+			// avoids cycles for recursive interface types.
+			n.Type = typ(TINTERMETH)
+
+			n.Type.Nname = n.Right
+			n.Left.Type = n.Type
+			queuemethod(n)
+
+			if n.Left.Op == ONAME {
+				f.Nname = n.Left
+				f.Embedded = n.Embedded
+				f.Sym = f.Nname.Sym
+			}
+		} else {
+			typecheck(&n.Right, Etype)
+			n.Type = n.Right.Type
+
+			if n.Embedded != 0 {
+				checkembeddedtype(n.Type)
+			}
+
+			if n.Type != nil {
+				switch n.Type.Etype {
+				case TINTER:
+					break
+
+				case TFORW:
+					Yyerror("interface type loop involving %v", n.Type)
+					f.Broke = 1
+
+				default:
+					Yyerror("interface contains embedded non-interface %v", n.Type)
+					f.Broke = 1
+				}
+			}
+		}
+	}
+
+	n.Right = nil
+
+	f.Type = n.Type
+	if f.Type == nil {
+		f.Broke = 1
+	}
+
+	lineno = int32(lno)
+	return f
+}
+
+func tointerface(l *NodeList) *Type {
+	var f *Type
+	var t1 *Type
+
+	t := typ(TINTER)
+
+	tp := &t.Type
+	for ; l != nil; l = l.Next {
+		f = interfacefield(l.N)
+
+		if l.N.Left == nil && f.Type.Etype == TINTER {
+			// embedded interface, inline methods
+			for t1 = f.Type.Type; t1 != nil; t1 = t1.Down {
+				f = typ(TFIELD)
+				f.Type = t1.Type
+				f.Broke = t1.Broke
+				f.Sym = t1.Sym
+				if f.Sym != nil {
+					f.Nname = newname(f.Sym)
+				}
+				*tp = f
+				tp = &f.Down
+			}
+		} else {
+			*tp = f
+			tp = &f.Down
+		}
+	}
+
+	for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
+		if f.Broke != 0 {
+			t.Broke = 1
+		}
+	}
+
+	uniqgen++
+	checkdupfields(t.Type, "method")
+	t = sortinter(t)
+	checkwidth(t)
+
+	return t
+}
+
+func embedded(s *Sym, pkg *Pkg) *Node {
+	const (
+		CenterDot = 0xB7
+	)
+	// Names sometimes have disambiguation junk
+	// appended after a center dot.  Discard it when
+	// making the name for the embedded struct field.
+	name := s.Name
+
+	if i := strings.Index(s.Name, string(CenterDot)); i >= 0 {
+		name = s.Name[:i]
+	}
+
+	var n *Node
+	if exportname(name) {
+		n = newname(Lookup(name))
+	} else if s.Pkg == builtinpkg {
+		// The name of embedded builtins belongs to pkg.
+		n = newname(Pkglookup(name, pkg))
+	} else {
+		n = newname(Pkglookup(name, s.Pkg))
+	}
+	n = Nod(ODCLFIELD, n, oldname(s))
+	n.Embedded = 1
+	return n
+}
+
+/*
+ * check that the list of declarations is either all anonymous or all named
+ */
+func findtype(l *NodeList) *Node {
+	for ; l != nil; l = l.Next {
+		if l.N.Op == OKEY {
+			return l.N.Right
+		}
+	}
+	return nil
+}
+
+func checkarglist(all *NodeList, input int) *NodeList {
+	named := 0
+	for l := all; l != nil; l = l.Next {
+		if l.N.Op == OKEY {
+			named = 1
+			break
+		}
+	}
+
+	if named != 0 {
+		var n *Node
+		var l *NodeList
+		for l = all; l != nil; l = l.Next {
+			n = l.N
+			if n.Op != OKEY && n.Sym == nil {
+				Yyerror("mixed named and unnamed function parameters")
+				break
+			}
+		}
+
+		if l == nil && n != nil && n.Op != OKEY {
+			Yyerror("final function parameter must have type")
+		}
+	}
+
+	var nextt *Node
+	var t *Node
+	var n *Node
+	for l := all; l != nil; l = l.Next {
+		// can cache result from findtype to avoid
+		// quadratic behavior here, but unlikely to matter.
+		n = l.N
+
+		if named != 0 {
+			if n.Op == OKEY {
+				t = n.Right
+				n = n.Left
+				nextt = nil
+			} else {
+				if nextt == nil {
+					nextt = findtype(l)
+				}
+				t = nextt
+			}
+		} else {
+			t = n
+			n = nil
+		}
+
+		// during import l->n->op is OKEY, but l->n->left->sym == S
+		// means it was a '?', not that it was
+		// a lone type This doesn't matter for the exported
+		// declarations, which are parsed by rules that don't
+		// use checkargs, but can happen for func literals in
+		// the inline bodies.
+		// TODO(rsc) this can go when typefmt case TFIELD in exportmode fmt.c prints _ instead of ?
+		if importpkg != nil && n.Sym == nil {
+			n = nil
+		}
+
+		if n != nil && n.Sym == nil {
+			t = n
+			n = nil
+		}
+
+		if n != nil {
+			n = newname(n.Sym)
+		}
+		n = Nod(ODCLFIELD, n, t)
+		if n.Right != nil && n.Right.Op == ODDD {
+			if input == 0 {
+				Yyerror("cannot use ... in output argument list")
+			} else if l.Next != nil {
+				Yyerror("can only use ... as final argument in list")
+			}
+			n.Right.Op = OTARRAY
+			n.Right.Right = n.Right.Left
+			n.Right.Left = nil
+			n.Isddd = true
+			if n.Left != nil {
+				n.Left.Isddd = true
+			}
+		}
+
+		l.N = n
+	}
+
+	return all
+}
+
+func fakethis() *Node {
+	n := Nod(ODCLFIELD, nil, typenod(Ptrto(typ(TSTRUCT))))
+	return n
+}
+
+/*
+ * Is this field a method on an interface?
+ * Those methods have an anonymous
+ * *struct{} as the receiver.
+ * (See fakethis above.)
+ */
+func isifacemethod(f *Type) bool {
+	rcvr := getthisx(f).Type
+	if rcvr.Sym != nil {
+		return false
+	}
+	t := rcvr.Type
+	if !Isptr[t.Etype] {
+		return false
+	}
+	t = t.Type
+	if t.Sym != nil || t.Etype != TSTRUCT || t.Type != nil {
+		return false
+	}
+	return true
+}
+
+/*
+ * turn a parsed function declaration
+ * into a type
+ */
+func functype(this *Node, in *NodeList, out *NodeList) *Type {
+	t := typ(TFUNC)
+
+	var rcvr *NodeList
+	if this != nil {
+		rcvr = list1(this)
+	}
+	t.Type = tofunargs(rcvr)
+	t.Type.Down = tofunargs(out)
+	t.Type.Down.Down = tofunargs(in)
+
+	uniqgen++
+	checkdupfields(t.Type.Type, "argument")
+	checkdupfields(t.Type.Down.Type, "argument")
+	checkdupfields(t.Type.Down.Down.Type, "argument")
+
+	if t.Type.Broke != 0 || t.Type.Down.Broke != 0 || t.Type.Down.Down.Broke != 0 {
+		t.Broke = 1
+	}
+
+	if this != nil {
+		t.Thistuple = 1
+	}
+	t.Outtuple = count(out)
+	t.Intuple = count(in)
+	t.Outnamed = 0
+	if t.Outtuple > 0 && out.N.Left != nil && out.N.Left.Orig != nil {
+		s := out.N.Left.Orig.Sym
+		if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result
+			t.Outnamed = 1
+		}
+	}
+
+	return t
+}
+
+var methodsym_toppkg *Pkg
+
+func methodsym(nsym *Sym, t0 *Type, iface int) *Sym {
+	var s *Sym
+	var p string
+	var suffix string
+	var spkg *Pkg
+
+	t := t0
+	if t == nil {
+		goto bad
+	}
+	s = t.Sym
+	if s == nil && Isptr[t.Etype] {
+		t = t.Type
+		if t == nil {
+			goto bad
+		}
+		s = t.Sym
+	}
+
+	spkg = nil
+	if s != nil {
+		spkg = s.Pkg
+	}
+
+	// if t0 == *t and t0 has a sym,
+	// we want to see *t, not t0, in the method name.
+	if t != t0 && t0.Sym != nil {
+		t0 = Ptrto(t)
+	}
+
+	suffix = ""
+	if iface != 0 {
+		dowidth(t0)
+		if t0.Width < Types[Tptr].Width {
+			suffix = "·i"
+		}
+	}
+
+	if (spkg == nil || nsym.Pkg != spkg) && !exportname(nsym.Name) {
+		if t0.Sym == nil && Isptr[t0.Etype] {
+			p = fmt.Sprintf("(%v).%s.%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Pkg.Prefix, nsym.Name, suffix)
+		} else {
+			p = fmt.Sprintf("%v.%s.%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Pkg.Prefix, nsym.Name, suffix)
+		}
+	} else {
+		if t0.Sym == nil && Isptr[t0.Etype] {
+			p = fmt.Sprintf("(%v).%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Name, suffix)
+		} else {
+			p = fmt.Sprintf("%v.%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Name, suffix)
+		}
+	}
+
+	if spkg == nil {
+		if methodsym_toppkg == nil {
+			methodsym_toppkg = mkpkg("go")
+		}
+		spkg = methodsym_toppkg
+	}
+
+	s = Pkglookup(p, spkg)
+
+	return s
+
+bad:
+	Yyerror("illegal receiver type: %v", t0)
+	return nil
+}
+
+func methodname(n *Node, t *Type) *Node {
+	s := methodsym(n.Sym, t, 0)
+	if s == nil {
+		return n
+	}
+	return newname(s)
+}
+
+func methodname1(n *Node, t *Node) *Node {
+	star := ""
+	if t.Op == OIND {
+		star = "*"
+		t = t.Left
+	}
+
+	if t.Sym == nil || isblank(n) {
+		return newfuncname(n.Sym)
+	}
+
+	var p string
+	if star != "" {
+		p = fmt.Sprintf("(%s%v).%v", star, t.Sym, n.Sym)
+	} else {
+		p = fmt.Sprintf("%v.%v", t.Sym, n.Sym)
+	}
+
+	if exportname(t.Sym.Name) {
+		n = newfuncname(Lookup(p))
+	} else {
+		n = newfuncname(Pkglookup(p, t.Sym.Pkg))
+	}
+
+	return n
+}
+
+/*
+ * add a method, declared as a function,
+ * n is fieldname, pa is base type, t is function type
+ */
+func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
+	// get field sym
+	if sf == nil {
+		Fatal("no method symbol")
+	}
+
+	// get parent type sym
+	pa := getthisx(t).Type // ptr to this structure
+	if pa == nil {
+		Yyerror("missing receiver")
+		return
+	}
+
+	pa = pa.Type
+	f := methtype(pa, 1)
+	if f == nil {
+		t = pa
+		if t == nil { // rely on typecheck having complained before
+			return
+		}
+		if t != nil {
+			if Isptr[t.Etype] {
+				if t.Sym != nil {
+					Yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
+					return
+				}
+
+				t = t.Type
+			}
+
+			if t.Broke != 0 { // rely on typecheck having complained before
+				return
+			}
+			if t.Sym == nil {
+				Yyerror("invalid receiver type %v (%v is an unnamed type)", pa, t)
+				return
+			}
+
+			if Isptr[t.Etype] {
+				Yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
+				return
+			}
+
+			if t.Etype == TINTER {
+				Yyerror("invalid receiver type %v (%v is an interface type)", pa, t)
+				return
+			}
+		}
+
+		// Should have picked off all the reasons above,
+		// but just in case, fall back to generic error.
+		Yyerror("invalid receiver type %v (%v / %v)", pa, Tconv(pa, obj.FmtLong), Tconv(t, obj.FmtLong))
+
+		return
+	}
+
+	pa = f
+	if pa.Etype == TSTRUCT {
+		for f := pa.Type; f != nil; f = f.Down {
+			if f.Sym == sf {
+				Yyerror("type %v has both field and method named %v", pa, sf)
+				return
+			}
+		}
+	}
+
+	if local && !pa.Local {
+		// defining method on non-local type.
+		Yyerror("cannot define new methods on non-local type %v", pa)
+
+		return
+	}
+
+	n := Nod(ODCLFIELD, newname(sf), nil)
+	n.Type = t
+
+	var d *Type // last found
+	for f := pa.Method; f != nil; f = f.Down {
+		d = f
+		if f.Etype != TFIELD {
+			Fatal("addmethod: not TFIELD: %v", Tconv(f, obj.FmtLong))
+		}
+		if sf.Name != f.Sym.Name {
+			continue
+		}
+		if !Eqtype(t, f.Type) {
+			Yyerror("method redeclared: %v.%v\n\t%v\n\t%v", pa, sf, f.Type, t)
+		}
+		return
+	}
+
+	f = structfield(n)
+	f.Nointerface = nointerface
+
+	// during import unexported method names should be in the type's package
+	if importpkg != nil && f.Sym != nil && !exportname(f.Sym.Name) && f.Sym.Pkg != structpkg {
+		Fatal("imported method name %v in wrong package %s\n", Sconv(f.Sym, obj.FmtSign), structpkg.Name)
+	}
+
+	if d == nil {
+		pa.Method = f
+	} else {
+		d.Down = f
+	}
+	return
+}
+
+func funccompile(n *Node) {
+	Stksize = BADWIDTH
+	Maxarg = 0
+
+	if n.Type == nil {
+		if nerrors == 0 {
+			Fatal("funccompile missing type")
+		}
+		return
+	}
+
+	// assign parameter offsets
+	checkwidth(n.Type)
+
+	if Curfn != nil {
+		Fatal("funccompile %v inside %v", n.Nname.Sym, Curfn.Nname.Sym)
+	}
+
+	Stksize = 0
+	dclcontext = PAUTO
+	Funcdepth = n.Funcdepth + 1
+	compile(n)
+	Curfn = nil
+	Funcdepth = 0
+	dclcontext = PEXTERN
+}
+
+func funcsym(s *Sym) *Sym {
+	if s.Fsym != nil {
+		return s.Fsym
+	}
+
+	s1 := Pkglookup(s.Name+"·f", s.Pkg)
+	if s1.Def == nil {
+		s1.Def = newfuncname(s1)
+		s1.Def.Func.Shortname = newname(s)
+		funcsyms = list(funcsyms, s1.Def)
+	}
+	s.Fsym = s1
+
+	return s1
+}
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
new file mode 100644
index 0000000..a9a1748
--- /dev/null
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -0,0 +1,1758 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+	"strings"
+)
+
+// Run analysis on minimal sets of mutually recursive functions
+// or single non-recursive functions, bottom up.
+//
+// Finding these sets is finding strongly connected components
+// in the static call graph.  The algorithm for doing that is taken
+// from Sedgewick, Algorithms, Second Edition, p. 482, with two
+// adaptations.
+//
+// First, a hidden closure function (n->curfn != N) cannot be the
+// root of a connected component. Refusing to use it as a root
+// forces it into the component of the function in which it appears.
+// This is more convenient for escape analysis.
+//
+// Second, each function becomes two virtual nodes in the graph,
+// with numbers n and n+1. We record the function's node number as n
+// but search from node n+1. If the search tells us that the component
+// number (min) is n+1, we know that this is a trivial component: one function
+// plus its closures. If the search tells us that the component number is
+// n, then there was a path from node n+1 back to node n, meaning that
+// the function set is mutually recursive. The escape analysis can be
+// more precise when analyzing a single non-recursive function than
+// when analyzing a set of mutually recursive functions.
+
+// TODO(rsc): Look into using a map[*Node]bool instead of walkgen,
+// to allow analysis passes to use walkgen themselves.
+
+type bottomUpVisitor struct {
+	analyze  func(*NodeList, bool)
+	visitgen uint32
+	stack    *NodeList
+}
+
+// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
+// It calls analyze with successive groups of functions, working from
+// the bottom of the call graph upward. Each time analyze is called with
+// a list of functions, every function on that list only calls other functions
+// on the list or functions that have been passed in previous invocations of
+// analyze. Closures appear in the same list as their outer functions.
+// The lists are as short as possible while preserving those requirements.
+// (In a typical program, many invocations of analyze will be passed just
+// a single function.) The boolean argument 'recursive' passed to analyze
+// specifies whether the functions on the list are mutually recursive.
+// If recursive is false, the list consists of only a single function and its closures.
+// If recursive is true, the list may still contain only a single function,
+// if that function is itself recursive.
+func visitBottomUp(list *NodeList, analyze func(list *NodeList, recursive bool)) {
+	for l := list; l != nil; l = l.Next {
+		l.N.Walkgen = 0
+	}
+
+	var v bottomUpVisitor
+	v.analyze = analyze
+	for l := list; l != nil; l = l.Next {
+		if l.N.Op == ODCLFUNC && l.N.Curfn == nil {
+			v.visit(l.N)
+		}
+	}
+
+	for l := list; l != nil; l = l.Next {
+		l.N.Walkgen = 0
+	}
+}
+
+func (v *bottomUpVisitor) visit(n *Node) uint32 {
+	if n.Walkgen > 0 {
+		// already visited
+		return n.Walkgen
+	}
+
+	v.visitgen++
+	n.Walkgen = v.visitgen
+	v.visitgen++
+	min := v.visitgen
+
+	l := new(NodeList)
+	l.Next = v.stack
+	l.N = n
+	v.stack = l
+	min = v.visitcodelist(n.Nbody, min)
+	if (min == n.Walkgen || min == n.Walkgen+1) && n.Curfn == nil {
+		// This node is the root of a strongly connected component.
+
+		// The original min passed to visitcodelist was n->walkgen+1.
+		// If visitcodelist found its way back to n->walkgen, then this
+		// block is a set of mutually recursive functions.
+		// Otherwise it's just a lone function that does not recurse.
+		recursive := min == n.Walkgen
+
+		// Remove connected component from stack.
+		// Mark walkgen so that future visits return a large number
+		// so as not to affect the caller's min.
+		block := v.stack
+
+		var l *NodeList
+		for l = v.stack; l.N != n; l = l.Next {
+			l.N.Walkgen = ^uint32(0)
+		}
+		n.Walkgen = ^uint32(0)
+		v.stack = l.Next
+		l.Next = nil
+
+		// Run escape analysis on this set of functions.
+		v.analyze(block, recursive)
+	}
+
+	return min
+}
+
+func (v *bottomUpVisitor) visitcodelist(l *NodeList, min uint32) uint32 {
+	for ; l != nil; l = l.Next {
+		min = v.visitcode(l.N, min)
+	}
+	return min
+}
+
+func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 {
+	if n == nil {
+		return min
+	}
+
+	min = v.visitcodelist(n.Ninit, min)
+	min = v.visitcode(n.Left, min)
+	min = v.visitcode(n.Right, min)
+	min = v.visitcodelist(n.List, min)
+	min = v.visitcode(n.Ntest, min)
+	min = v.visitcode(n.Nincr, min)
+	min = v.visitcodelist(n.Nbody, min)
+	min = v.visitcodelist(n.Nelse, min)
+	min = v.visitcodelist(n.Rlist, min)
+
+	if n.Op == OCALLFUNC || n.Op == OCALLMETH {
+		fn := n.Left
+		if n.Op == OCALLMETH {
+			fn = n.Left.Right.Sym.Def
+		}
+		if fn != nil && fn.Op == ONAME && fn.Class == PFUNC && fn.Defn != nil {
+			m := v.visit(fn.Defn)
+			if m < min {
+				min = m
+			}
+		}
+	}
+
+	if n.Op == OCLOSURE {
+		m := v.visit(n.Closure)
+		if m < min {
+			min = m
+		}
+	}
+
+	return min
+}
+
+// Escape analysis.
+
+// An escape analysis pass for a set of functions.
+// The analysis assumes that closures and the functions in which they
+// appear are analyzed together, so that the aliasing between their
+// variables can be modeled more precisely.
+//
+// First escfunc, esc and escassign recurse over the ast of each
+// function to dig out flow(dst,src) edges between any
+// pointer-containing nodes and store them in dst->escflowsrc.  For
+// variables assigned to a variable in an outer scope or used as a
+// return value, they store a flow(theSink, src) edge to a fake node
+// 'the Sink'.  For variables referenced in closures, an edge
+// flow(closure, &var) is recorded and the flow of a closure itself to
+// an outer scope is tracked the same way as other variables.
+//
+// Then escflood walks the graph starting at theSink and tags all
+// variables of it can reach an & node as escaping and all function
+// parameters it can reach as leaking.
+//
+// If a value's address is taken but the address does not escape,
+// then the value can stay on the stack.  If the value new(T) does
+// not escape, then new(T) can be rewritten into a stack allocation.
+// The same is true of slice literals.
+//
+// If optimizations are disabled (-N), this code is not used.
+// Instead, the compiler assumes that any value whose address
+// is taken without being immediately dereferenced
+// needs to be moved to the heap, and new(T) and slice
+// literals are always real allocations.
+
+func escapes(all *NodeList) {
+	visitBottomUp(all, escAnalyze)
+}
+
+const (
+	EscFuncUnknown = 0 + iota
+	EscFuncPlanned
+	EscFuncStarted
+	EscFuncTagged
+)
+
+// There appear to be some loops in the escape graph, causing
+// arbitrary recursion into deeper and deeper levels.
+// Cut this off safely by making minLevel sticky: once you
+// get that deep, you cannot go down any further but you also
+// cannot go up any further. This is a conservative fix.
+// Making minLevel smaller (more negative) would handle more
+// complex chains of indirections followed by address-of operations,
+// at the cost of repeating the traversal once for each additional
+// allowed level when a loop is encountered. Using -2 suffices to
+// pass all the tests we have written so far, which we assume matches
+// the level of complexity we want the escape analysis code to handle.
+const (
+	MinLevel = -2
+)
+
+// A Level encodes the reference state and context applied to
+// (stack, heap) allocated memory.
+//
+// value is the overall sum of *(1) and &(-1) operations encountered
+// along a path from a destination (sink, return value) to a source
+// (allocation, parameter).
+//
+// suffixValue is the maximum-copy-started-suffix-level applied to a sink.
+// For example:
+// sink = x.left.left --> level=2, x is dereferenced twice and does not escape to sink.
+// sink = &Node{x} --> level=-1, x is accessible from sink via one "address of"
+// sink = &Node{&Node{x}} --> level=-2, x is accessible from sink via two "address of"
+// sink = &Node{&Node{x.left}} --> level=-1, but x is NOT accessible from sink because it was indirected and then copied.
+// (The copy operations are sometimes implicit in the source code; in this case,
+// value of x.left was copied into a field of a newly allocated Node)
+//
+// There's one of these for each Node, and the integer values
+// rarely exceed even what can be stored in 4 bits, never mind 8.
+type Level struct {
+	value, suffixValue int8
+}
+
+func (l Level) int() int {
+	return int(l.value)
+}
+
+func levelFrom(i int) Level {
+	if i <= MinLevel {
+		return Level{value: MinLevel}
+	}
+	return Level{value: int8(i)}
+}
+
+func satInc8(x int8) int8 {
+	if x == 127 {
+		return 127
+	}
+	return x + 1
+}
+
+func satAdd8(x, y int8) int8 {
+	z := x + y
+	if x^y < 0 || x^z >= 0 {
+		return z
+	}
+	if x < 0 {
+		return -128
+	}
+	return 127
+}
+
+func min8(a, b int8) int8 {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func max8(a, b int8) int8 {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+// inc returns the level l + 1, representing the effect of an indirect (*) operation.
+func (l Level) inc() Level {
+	if l.value <= MinLevel {
+		return Level{value: MinLevel}
+	}
+	return Level{value: satInc8(l.value), suffixValue: satInc8(l.suffixValue)}
+}
+
+// dec returns the level l - 1, representing the effect of an address-of (&) operation.
+func (l Level) dec() Level {
+	if l.value <= MinLevel {
+		return Level{value: MinLevel}
+	}
+	return Level{value: l.value - 1, suffixValue: l.suffixValue - 1}
+}
+
+// copy returns the level for a copy of a value with level l.
+func (l Level) copy() Level {
+	return Level{value: l.value, suffixValue: max8(l.suffixValue, 0)}
+}
+
+func (l1 Level) min(l2 Level) Level {
+	return Level{
+		value:       min8(l1.value, l2.value),
+		suffixValue: min8(l1.suffixValue, l2.suffixValue)}
+}
+
+// guaranteedDereference returns the number of dereferences
+// applied to a pointer before addresses are taken/generated.
+// This is the maximum level computed from path suffixes starting
+// with copies where paths flow from destination to source.
+func (l Level) guaranteedDereference() int {
+	return int(l.suffixValue)
+}
+
+// Escape constants are numbered in order of increasing "escapiness"
+// to help make inferences be monotonic.  With the exception of
+// EscNever which is sticky, eX < eY means that eY is more exposed
+// than eX, and hence replaces it in a conservative analysis.
+const (
+	EscUnknown = iota
+	EscNone    // Does not escape to heap, result, or parameters.
+	EscReturn  // Is returned or reachable from returned.
+	EscScope   // Allocated in an inner loop scope, assigned to an outer loop scope,
+	// which allows the construction of non-escaping but arbitrarily large linked
+	// data structures (i.e., not eligible for allocation in a fixed-size stack frame).
+	EscHeap           // Reachable from the heap
+	EscNever          // By construction will not escape.
+	EscBits           = 3
+	EscMask           = (1 << EscBits) - 1
+	EscContentEscapes = 1 << EscBits // value obtained by indirect of parameter escapes to heap
+	EscReturnBits     = EscBits + 1
+	// Node.esc encoding = | escapeReturnEncoding:(width-4) | contentEscapes:1 | escEnum:3
+)
+
+// escMax returns the maximum of an existing escape value
+// (and its additional parameter flow flags) and a new escape type.
+func escMax(e, etype uint16) uint16 {
+	if e&EscMask >= EscScope {
+		// normalize
+		if e&^EscMask != 0 {
+			Fatal("Escape information had unexpected return encoding bits (w/ EscScope, EscHeap, EscNever), e&EscMask=%v", e&EscMask)
+		}
+	}
+	if e&EscMask > etype {
+		return e
+	}
+	if etype == EscNone || etype == EscReturn {
+		return (e &^ EscMask) | etype
+	}
+	return etype
+}
+
+// For each input parameter to a function, the escapeReturnEncoding describes
+// how the parameter may leak to the function's outputs.  This is currently the
+// "level" of the leak where level is 0 or larger (negative level means stored into
+// something whose address is returned -- but that implies stored into the heap,
+// hence EscHeap, which means that the details are not currently relevant. )
+const (
+	bitsPerOutputInTag = 3                                         // For each output, the number of bits for a tag
+	bitsMaskForTag     = uint16(1<<bitsPerOutputInTag) - 1         // The bit mask to extract a single tag.
+	outputsPerTag      = (16 - EscReturnBits) / bitsPerOutputInTag // The number of outputs that can be tagged.
+	maxEncodedLevel    = int(bitsMaskForTag - 1)                   // The largest level that can be stored in a tag.
+)
+
+type EscState struct {
+	// Fake node that all
+	//   - return values and output variables
+	//   - parameters on imported functions not marked 'safe'
+	//   - assignments to global variables
+	// flow to.
+	theSink Node
+
+	dsts      *NodeList // all dst nodes
+	loopdepth int32     // for detecting nested loop scopes
+	pdepth    int       // for debug printing in recursions.
+	dstcount  int       // diagnostic
+	edgecount int       // diagnostic
+	noesc     *NodeList // list of possible non-escaping nodes, for printing
+	recursive bool      // recursive function or group of mutually recursive functions.
+}
+
+// funcSym returns n.Nname.Sym if no nils are encountered along the way.
+func funcSym(n *Node) *Sym {
+	if n == nil || n.Nname == nil {
+		return nil
+	}
+	return n.Nname.Sym
+}
+
+// curfnSym returns n.Curfn.Nname.Sym if no nils are encountered along the way.
+func curfnSym(n *Node) *Sym {
+	return funcSym(n.Curfn)
+}
+
+func escAnalyze(all *NodeList, recursive bool) {
+	var es EscState
+	e := &es
+	e.theSink.Op = ONAME
+	e.theSink.Orig = &e.theSink
+	e.theSink.Class = PEXTERN
+	e.theSink.Sym = Lookup(".sink")
+	e.theSink.Escloopdepth = -1
+	e.recursive = recursive
+
+	for l := all; l != nil; l = l.Next {
+		if l.N.Op == ODCLFUNC {
+			l.N.Esc = EscFuncPlanned
+		}
+	}
+
+	// flow-analyze functions
+	for l := all; l != nil; l = l.Next {
+		if l.N.Op == ODCLFUNC {
+			escfunc(e, l.N)
+		}
+	}
+
+	// print("escapes: %d e->dsts, %d edges\n", e->dstcount, e->edgecount);
+
+	// visit the upstream of each dst, mark address nodes with
+	// addrescapes, mark parameters unsafe
+	for l := e.dsts; l != nil; l = l.Next {
+		escflood(e, l.N)
+	}
+
+	// for all top level functions, tag the typenodes corresponding to the param nodes
+	for l := all; l != nil; l = l.Next {
+		if l.N.Op == ODCLFUNC {
+			esctag(e, l.N)
+		}
+	}
+
+	if Debug['m'] != 0 {
+		for l := e.noesc; l != nil; l = l.Next {
+			if l.N.Esc == EscNone {
+				Warnl(int(l.N.Lineno), "%v %v does not escape", curfnSym(l.N), Nconv(l.N, obj.FmtShort))
+			}
+		}
+	}
+}
+
+func escfunc(e *EscState, func_ *Node) {
+	//	print("escfunc %N %s\n", func->nname, e->recursive?"(recursive)":"");
+
+	if func_.Esc != 1 {
+		Fatal("repeat escfunc %v", func_.Nname)
+	}
+	func_.Esc = EscFuncStarted
+
+	saveld := e.loopdepth
+	e.loopdepth = 1
+	savefn := Curfn
+	Curfn = func_
+
+	for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+		if ll.N.Op != ONAME {
+			continue
+		}
+		switch ll.N.Class {
+		// out params are in a loopdepth between the sink and all local variables
+		case PPARAMOUT:
+			ll.N.Escloopdepth = 0
+
+		case PPARAM:
+			ll.N.Escloopdepth = 1
+			if ll.N.Type != nil && !haspointers(ll.N.Type) {
+				break
+			}
+			if Curfn.Nbody == nil && !Curfn.Noescape {
+				ll.N.Esc = EscHeap
+			} else {
+				ll.N.Esc = EscNone // prime for escflood later
+			}
+			e.noesc = list(e.noesc, ll.N)
+		}
+	}
+
+	// in a mutually recursive group we lose track of the return values
+	if e.recursive {
+		for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+			if ll.N.Op == ONAME && ll.N.Class == PPARAMOUT {
+				escflows(e, &e.theSink, ll.N)
+			}
+		}
+	}
+
+	escloopdepthlist(e, Curfn.Nbody)
+	esclist(e, Curfn.Nbody, Curfn)
+	Curfn = savefn
+	e.loopdepth = saveld
+}
+
+// Mark labels that have no backjumps to them as not increasing e->loopdepth.
+// Walk hasn't generated (goto|label)->left->sym->label yet, so we'll cheat
+// and set it to one of the following two.  Then in esc we'll clear it again.
+var looping Label
+
+var nonlooping Label
+
+func escloopdepthlist(e *EscState, l *NodeList) {
+	for ; l != nil; l = l.Next {
+		escloopdepth(e, l.N)
+	}
+}
+
+func escloopdepth(e *EscState, n *Node) {
+	if n == nil {
+		return
+	}
+
+	escloopdepthlist(e, n.Ninit)
+
+	switch n.Op {
+	case OLABEL:
+		if n.Left == nil || n.Left.Sym == nil {
+			Fatal("esc:label without label: %v", Nconv(n, obj.FmtSign))
+		}
+
+		// Walk will complain about this label being already defined, but that's not until
+		// after escape analysis. in the future, maybe pull label & goto analysis out of walk and put before esc
+		// if(n->left->sym->label != nil)
+		//	fatal("escape analysis messed up analyzing label: %+N", n);
+		n.Left.Sym.Label = &nonlooping
+
+	case OGOTO:
+		if n.Left == nil || n.Left.Sym == nil {
+			Fatal("esc:goto without label: %v", Nconv(n, obj.FmtSign))
+		}
+
+		// If we come past one that's uninitialized, this must be a (harmless) forward jump
+		// but if it's set to nonlooping the label must have preceded this goto.
+		if n.Left.Sym.Label == &nonlooping {
+			n.Left.Sym.Label = &looping
+		}
+	}
+
+	escloopdepth(e, n.Left)
+	escloopdepth(e, n.Right)
+	escloopdepthlist(e, n.List)
+	escloopdepth(e, n.Ntest)
+	escloopdepth(e, n.Nincr)
+	escloopdepthlist(e, n.Nbody)
+	escloopdepthlist(e, n.Nelse)
+	escloopdepthlist(e, n.Rlist)
+}
+
+func esclist(e *EscState, l *NodeList, up *Node) {
+	for ; l != nil; l = l.Next {
+		esc(e, l.N, up)
+	}
+}
+
+func esc(e *EscState, n *Node, up *Node) {
+	if n == nil {
+		return
+	}
+
+	lno := int(setlineno(n))
+
+	// ninit logically runs at a different loopdepth than the rest of the for loop.
+	esclist(e, n.Ninit, n)
+
+	if n.Op == OFOR || n.Op == ORANGE {
+		e.loopdepth++
+	}
+
+	// type switch variables have no ODCL.
+	// process type switch as declaration.
+	// must happen before processing of switch body,
+	// so before recursion.
+	if n.Op == OSWITCH && n.Ntest != nil && n.Ntest.Op == OTYPESW {
+		for ll := n.List; ll != nil; ll = ll.Next { // cases
+
+			// ll->n->nname is the variable per case
+			if ll.N.Nname != nil {
+				ll.N.Nname.Escloopdepth = e.loopdepth
+			}
+		}
+	}
+
+	esc(e, n.Left, n)
+	esc(e, n.Right, n)
+	esc(e, n.Ntest, n)
+	esc(e, n.Nincr, n)
+	esclist(e, n.Nbody, n)
+	esclist(e, n.Nelse, n)
+	esclist(e, n.List, n)
+	esclist(e, n.Rlist, n)
+
+	if n.Op == OFOR || n.Op == ORANGE {
+		e.loopdepth--
+	}
+
+	if Debug['m'] > 1 {
+		fmt.Printf("%v:[%d] %v esc: %v\n", Ctxt.Line(int(lineno)), e.loopdepth, funcSym(Curfn), n)
+	}
+
+	switch n.Op {
+	// Record loop depth at declaration.
+	case ODCL:
+		if n.Left != nil {
+			n.Left.Escloopdepth = e.loopdepth
+		}
+
+	case OLABEL:
+		if n.Left.Sym.Label == &nonlooping {
+			if Debug['m'] > 1 {
+				fmt.Printf("%v:%v non-looping label\n", Ctxt.Line(int(lineno)), n)
+			}
+		} else if n.Left.Sym.Label == &looping {
+			if Debug['m'] > 1 {
+				fmt.Printf("%v: %v looping label\n", Ctxt.Line(int(lineno)), n)
+			}
+			e.loopdepth++
+		}
+
+		// See case OLABEL in escloopdepth above
+		// else if(n->left->sym->label == nil)
+		//	fatal("escape analysis missed or messed up a label: %+N", n);
+
+		n.Left.Sym.Label = nil
+
+		// Everything but fixed array is a dereference.
+	case ORANGE:
+		if n.List != nil && n.List.Next != nil {
+			if Isfixedarray(n.Type) {
+				escassign(e, n.List.Next.N, n.Right)
+			} else {
+				escassign(e, n.List.Next.N, addDereference(n.Right))
+			}
+		}
+
+	case OSWITCH:
+		if n.Ntest != nil && n.Ntest.Op == OTYPESW {
+			for ll := n.List; ll != nil; ll = ll.Next { // cases
+
+				// ntest->right is the argument of the .(type),
+				// ll->n->nname is the variable per case
+				escassign(e, ll.N.Nname, n.Ntest.Right)
+			}
+		}
+
+		// Filter out the following special case.
+	//
+	//	func (b *Buffer) Foo() {
+	//		n, m := ...
+	//		b.buf = b.buf[n:m]
+	//	}
+	//
+	// This assignment is a no-op for escape analysis,
+	// it does not store any new pointers into b that were not already there.
+	// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+	case OAS, OASOP, OASWB:
+		if (n.Left.Op == OIND || n.Left.Op == ODOTPTR) && n.Left.Left.Op == ONAME && // dst is ONAME dereference
+			(n.Right.Op == OSLICE || n.Right.Op == OSLICE3 || n.Right.Op == OSLICESTR) && // src is slice operation
+			(n.Right.Left.Op == OIND || n.Right.Left.Op == ODOTPTR) && n.Right.Left.Left.Op == ONAME && // slice is applied to ONAME dereference
+			n.Left.Left == n.Right.Left.Left { // dst and src reference the same base ONAME
+
+			// Here we also assume that the statement will not contain calls,
+			// that is, that order will move any calls to init.
+			// Otherwise base ONAME value could change between the moments
+			// when we evaluate it for dst and for src.
+			//
+			// Note, this optimization does not apply to OSLICEARR,
+			// because it does introduce a new pointer into b that was not already there
+			// (pointer to b itself). After such assignment, if b contents escape,
+			// b escapes as well. If we ignore such OSLICEARR, we will conclude
+			// that b does not escape when b contents do.
+			if Debug['m'] != 0 {
+				Warnl(int(n.Lineno), "%v ignoring self-assignment to %v", curfnSym(n), Nconv(n.Left, obj.FmtShort))
+			}
+
+			break
+		}
+
+		escassign(e, n.Left, n.Right)
+
+	case OAS2: // x,y = a,b
+		if count(n.List) == count(n.Rlist) {
+			ll := n.List
+			lr := n.Rlist
+			for ; ll != nil; ll, lr = ll.Next, lr.Next {
+				escassign(e, ll.N, lr.N)
+			}
+		}
+
+	case OAS2RECV, // v, ok = <-ch
+		OAS2MAPR,    // v, ok = m[k]
+		OAS2DOTTYPE: // v, ok = x.(type)
+		escassign(e, n.List.N, n.Rlist.N)
+
+	case OSEND: // ch <- x
+		escassign(e, &e.theSink, n.Right)
+
+	case ODEFER:
+		if e.loopdepth == 1 { // top level
+			break
+		}
+		// arguments leak out of scope
+		// TODO: leak to a dummy node instead
+		fallthrough
+
+	case OPROC:
+		// go f(x) - f and x escape
+		escassign(e, &e.theSink, n.Left.Left)
+
+		escassign(e, &e.theSink, n.Left.Right) // ODDDARG for call
+		for ll := n.Left.List; ll != nil; ll = ll.Next {
+			escassign(e, &e.theSink, ll.N)
+		}
+
+	case OCALLMETH, OCALLFUNC, OCALLINTER:
+		esccall(e, n, up)
+
+		// esccall already done on n->rlist->n. tie it's escretval to n->list
+	case OAS2FUNC: // x,y = f()
+		lr := n.Rlist.N.Escretval
+
+		var ll *NodeList
+		for ll = n.List; lr != nil && ll != nil; lr, ll = lr.Next, ll.Next {
+			escassign(e, ll.N, lr.N)
+		}
+		if lr != nil || ll != nil {
+			Fatal("esc oas2func")
+		}
+
+	case ORETURN:
+		ll := n.List
+		if count(n.List) == 1 && Curfn.Type.Outtuple > 1 {
+			// OAS2FUNC in disguise
+			// esccall already done on n->list->n
+			// tie n->list->n->escretval to curfn->dcl PPARAMOUT's
+			ll = n.List.N.Escretval
+		}
+
+		for lr := Curfn.Func.Dcl; lr != nil && ll != nil; lr = lr.Next {
+			if lr.N.Op != ONAME || lr.N.Class != PPARAMOUT {
+				continue
+			}
+			escassign(e, lr.N, ll.N)
+			ll = ll.Next
+		}
+
+		if ll != nil {
+			Fatal("esc return list")
+		}
+
+		// Argument could leak through recover.
+	case OPANIC:
+		escassign(e, &e.theSink, n.Left)
+
+	case OAPPEND:
+		if !n.Isddd {
+			for ll := n.List.Next; ll != nil; ll = ll.Next {
+				escassign(e, &e.theSink, ll.N) // lose track of assign to dereference
+			}
+		} else {
+			// append(slice1, slice2...) -- slice2 itself does not escape, but contents do.
+			slice2 := n.List.Next.N
+			escassign(e, &e.theSink, addDereference(slice2)) // lose track of assign of dereference
+			if Debug['m'] > 2 {
+				Warnl(int(n.Lineno), "%v special treatment of append(slice1, slice2...) %v", curfnSym(n), Nconv(n, obj.FmtShort))
+			}
+		}
+		escassign(e, &e.theSink, addDereference(n.List.N)) // The original elements are now leaked, too
+
+	case OCONV, OCONVNOP:
+		escassign(e, n, n.Left)
+
+	case OCONVIFACE:
+		n.Esc = EscNone // until proven otherwise
+		e.noesc = list(e.noesc, n)
+		n.Escloopdepth = e.loopdepth
+		escassign(e, n, n.Left)
+
+	case OARRAYLIT:
+		if Isslice(n.Type) {
+			// Slice itself is not leaked until proven otherwise
+			n.Esc = EscNone
+			e.noesc = list(e.noesc, n)
+			n.Escloopdepth = e.loopdepth
+		}
+
+		// Link values to array/slice
+		for ll := n.List; ll != nil; ll = ll.Next {
+			escassign(e, n, ll.N.Right)
+		}
+
+		// Link values to struct.
+	case OSTRUCTLIT:
+		for ll := n.List; ll != nil; ll = ll.Next {
+			escassign(e, n, ll.N.Right)
+		}
+
+	case OPTRLIT:
+		n.Esc = EscNone // until proven otherwise
+		e.noesc = list(e.noesc, n)
+		n.Escloopdepth = e.loopdepth
+
+		// Link OSTRUCTLIT to OPTRLIT; if OPTRLIT escapes, OSTRUCTLIT elements do too.
+		escassign(e, n, n.Left)
+
+	case OCALLPART:
+		n.Esc = EscNone // until proven otherwise
+		e.noesc = list(e.noesc, n)
+		n.Escloopdepth = e.loopdepth
+
+		// Contents make it to memory, lose track.
+		escassign(e, &e.theSink, n.Left)
+
+	case OMAPLIT:
+		n.Esc = EscNone // until proven otherwise
+		e.noesc = list(e.noesc, n)
+		n.Escloopdepth = e.loopdepth
+
+		// Keys and values make it to memory, lose track.
+		for ll := n.List; ll != nil; ll = ll.Next {
+			escassign(e, &e.theSink, ll.N.Left)
+			escassign(e, &e.theSink, ll.N.Right)
+		}
+
+		// Link addresses of captured variables to closure.
+	case OCLOSURE:
+		var a *Node
+		var v *Node
+		for ll := n.Func.Cvars; ll != nil; ll = ll.Next {
+			v = ll.N
+			if v.Op == OXXX { // unnamed out argument; see dcl.c:/^funcargs
+				continue
+			}
+			a = v.Closure
+			if !v.Name.Byval {
+				a = Nod(OADDR, a, nil)
+				a.Lineno = v.Lineno
+				a.Escloopdepth = e.loopdepth
+				typecheck(&a, Erv)
+			}
+
+			escassign(e, n, a)
+		}
+		fallthrough
+
+	case OMAKECHAN,
+		OMAKEMAP,
+		OMAKESLICE,
+		ONEW,
+		OARRAYRUNESTR,
+		OARRAYBYTESTR,
+		OSTRARRAYRUNE,
+		OSTRARRAYBYTE,
+		ORUNESTR:
+		n.Escloopdepth = e.loopdepth
+
+		n.Esc = EscNone // until proven otherwise
+		e.noesc = list(e.noesc, n)
+
+	case OADDSTR:
+		n.Escloopdepth = e.loopdepth
+		n.Esc = EscNone // until proven otherwise
+		e.noesc = list(e.noesc, n)
+
+	// Arguments of OADDSTR do not escape.
+
+	case OADDR:
+		n.Esc = EscNone // until proven otherwise
+		e.noesc = list(e.noesc, n)
+
+		// current loop depth is an upper bound on actual loop depth
+		// of addressed value.
+		n.Escloopdepth = e.loopdepth
+
+		// for &x, use loop depth of x if known.
+		// it should always be known, but if not, be conservative
+		// and keep the current loop depth.
+		if n.Left.Op == ONAME {
+			switch n.Left.Class {
+			case PAUTO:
+				if n.Left.Escloopdepth != 0 {
+					n.Escloopdepth = n.Left.Escloopdepth
+				}
+
+				// PPARAM is loop depth 1 always.
+			// PPARAMOUT is loop depth 0 for writes
+			// but considered loop depth 1 for address-of,
+			// so that writing the address of one result
+			// to another (or the same) result makes the
+			// first result move to the heap.
+			case PPARAM, PPARAMOUT:
+				n.Escloopdepth = 1
+			}
+		}
+	}
+
+	lineno = int32(lno)
+}
+
+// Assert that expr somehow gets assigned to dst, if non nil.  for
+// dst==nil, any name node expr still must be marked as being
+// evaluated in curfn.	For expr==nil, dst must still be examined for
+// evaluations inside it (e.g *f(x) = y)
+func escassign(e *EscState, dst *Node, src *Node) {
+	if isblank(dst) || dst == nil || src == nil || src.Op == ONONAME || src.Op == OXXX {
+		return
+	}
+
+	if Debug['m'] > 1 {
+		fmt.Printf("%v:[%d] %v escassign: %v(%v)[%v] = %v(%v)[%v]\n",
+			Ctxt.Line(int(lineno)), e.loopdepth, funcSym(Curfn),
+			Nconv(dst, obj.FmtShort), Jconv(dst, obj.FmtShort), Oconv(int(dst.Op), 0),
+			Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort), Oconv(int(src.Op), 0))
+	}
+
+	setlineno(dst)
+
+	// Analyze lhs of assignment.
+	// Replace dst with e->theSink if we can't track it.
+	switch dst.Op {
+	default:
+		Dump("dst", dst)
+		Fatal("escassign: unexpected dst")
+
+	case OARRAYLIT,
+		OCLOSURE,
+		OCONV,
+		OCONVIFACE,
+		OCONVNOP,
+		OMAPLIT,
+		OSTRUCTLIT,
+		OPTRLIT,
+		OCALLPART:
+		break
+
+	case ONAME:
+		if dst.Class == PEXTERN {
+			dst = &e.theSink
+		}
+
+	case ODOT: // treat "dst.x  = src" as "dst = src"
+		escassign(e, dst.Left, src)
+
+		return
+
+	case OINDEX:
+		if Isfixedarray(dst.Left.Type) {
+			escassign(e, dst.Left, src)
+			return
+		}
+
+		dst = &e.theSink // lose track of dereference
+
+	case OIND, ODOTPTR:
+		dst = &e.theSink // lose track of dereference
+
+		// lose track of key and value
+	case OINDEXMAP:
+		escassign(e, &e.theSink, dst.Right)
+
+		dst = &e.theSink
+	}
+
+	lno := int(setlineno(src))
+	e.pdepth++
+
+	switch src.Op {
+	case OADDR, // dst = &x
+		OIND,    // dst = *x
+		ODOTPTR, // dst = (*x).f
+		ONAME,
+		OPARAM,
+		ODDDARG,
+		OPTRLIT,
+		OARRAYLIT,
+		OMAPLIT,
+		OSTRUCTLIT,
+		OMAKECHAN,
+		OMAKEMAP,
+		OMAKESLICE,
+		OARRAYRUNESTR,
+		OARRAYBYTESTR,
+		OSTRARRAYRUNE,
+		OSTRARRAYBYTE,
+		OADDSTR,
+		ONEW,
+		OCALLPART,
+		ORUNESTR,
+		OCONVIFACE:
+		escflows(e, dst, src)
+
+	case OCLOSURE:
+		// OCLOSURE is lowered to OPTRLIT,
+		// insert OADDR to account for the additional indirection.
+		a := Nod(OADDR, src, nil)
+		a.Lineno = src.Lineno
+		a.Escloopdepth = src.Escloopdepth
+		a.Type = Ptrto(src.Type)
+		escflows(e, dst, a)
+
+	// Flowing multiple returns to a single dst happens when
+	// analyzing "go f(g())": here g() flows to sink (issue 4529).
+	case OCALLMETH, OCALLFUNC, OCALLINTER:
+		for ll := src.Escretval; ll != nil; ll = ll.Next {
+			escflows(e, dst, ll.N)
+		}
+
+		// A non-pointer escaping from a struct does not concern us.
+	case ODOT:
+		if src.Type != nil && !haspointers(src.Type) {
+			break
+		}
+		fallthrough
+
+		// Conversions, field access, slice all preserve the input value.
+	case OCONV,
+		OCONVNOP,
+		ODOTMETH,
+		// treat recv.meth as a value with recv in it, only happens in ODEFER and OPROC
+		// iface.method already leaks iface in esccall, no need to put in extra ODOTINTER edge here
+		ODOTTYPE,
+		ODOTTYPE2,
+		OSLICE,
+		OSLICE3,
+		OSLICEARR,
+		OSLICE3ARR,
+		OSLICESTR:
+		// Conversions, field access, slice all preserve the input value.
+		escassign(e, dst, src.Left)
+
+	case OAPPEND:
+		// Append returns first argument.
+		// Subsequent arguments are already leaked because they are operands to append.
+		escassign(e, dst, src.List.N)
+
+	case OINDEX:
+		// Index of array preserves input value.
+		if Isfixedarray(src.Left.Type) {
+			escassign(e, dst, src.Left)
+		} else {
+			escflows(e, dst, src)
+		}
+
+		// Might be pointer arithmetic, in which case
+	// the operands flow into the result.
+	// TODO(rsc): Decide what the story is here.  This is unsettling.
+	case OADD,
+		OSUB,
+		OOR,
+		OXOR,
+		OMUL,
+		ODIV,
+		OMOD,
+		OLSH,
+		ORSH,
+		OAND,
+		OANDNOT,
+		OPLUS,
+		OMINUS,
+		OCOM:
+		escassign(e, dst, src.Left)
+
+		escassign(e, dst, src.Right)
+	}
+
+	e.pdepth--
+	lineno = int32(lno)
+}
+
+// Common case for escapes is 16 bits 000000000xxxEEEE
+// where commonest cases for xxx encoding in-to-out pointer
+//  flow are 000, 001, 010, 011  and EEEE is computed Esc bits.
+// Note width of xxx depends on value of constant
+// bitsPerOutputInTag -- expect 2 or 3, so in practice the
+// tag cache array is 64 or 128 long.  Some entries will
+// never be populated.
+var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string
+
+// mktag returns the string representation for an escape analysis tag.
+func mktag(mask int) *string {
+	switch mask & EscMask {
+	case EscNone, EscReturn:
+		break
+
+	default:
+		Fatal("escape mktag")
+	}
+
+	if mask < len(tags) && tags[mask] != "" {
+		return &tags[mask]
+	}
+
+	s := fmt.Sprintf("esc:0x%x", mask)
+	if mask < len(tags) {
+		tags[mask] = s
+	}
+	return &s
+}
+
+// parsetag decodes an escape analysis tag and returns the esc value.
+func parsetag(note *string) uint16 {
+	if note == nil || !strings.HasPrefix(*note, "esc:") {
+		return EscUnknown
+	}
+	em := uint16(atoi((*note)[4:]))
+	if em == 0 {
+		return EscNone
+	}
+	return em
+}
+
+// describeEscape returns a string describing the escape tag.
+// The result is either one of {EscUnknown, EscNone, EscHeap} which all have no further annotation
+// or a description of parameter flow, which takes the form of an optional "contentToHeap"
+// indicating that the content of this parameter is leaked to the heap, followed by a sequence
+// of level encodings separated by spaces, one for each parameter, where _ means no flow,
+// = means direct flow, and N asterisks (*) encodes content (obtained by indirection) flow.
+// e.g., "contentToHeap _ =" means that a parameter's content (one or more dereferences)
+// escapes to the heap, the parameter does not leak to the first output, but does leak directly
+// to the second output (and if there are more than two outputs, there is no flow to those.)
+func describeEscape(em uint16) string {
+	var s string
+	if em&EscMask == EscUnknown {
+		s = "EscUnknown"
+	}
+	if em&EscMask == EscNone {
+		s = "EscNone"
+	}
+	if em&EscMask == EscHeap {
+		s = "EscHeap"
+	}
+	if em&EscMask == EscReturn {
+		s = "EscReturn"
+	}
+	if em&EscMask == EscScope {
+		s = "EscScope"
+	}
+	if em&EscContentEscapes != 0 {
+		if s != "" {
+			s += " "
+		}
+		s += "contentToHeap"
+	}
+	for em >>= EscReturnBits; em != 0; em = em >> bitsPerOutputInTag {
+		// See encoding description above
+		if s != "" {
+			s += " "
+		}
+		switch embits := em & bitsMaskForTag; embits {
+		case 0:
+			s += "_"
+		case 1:
+			s += "="
+		default:
+			for i := uint16(0); i < embits-1; i++ {
+				s += "*"
+			}
+		}
+
+	}
+	return s
+}
+
+// escassignfromtag models the input-to-output assignment flow of one of a function
+// calls arguments, where the flow is encoded in "note".
+func escassignfromtag(e *EscState, note *string, dsts *NodeList, src *Node) uint16 {
+	em := parsetag(note)
+
+	if Debug['m'] > 2 {
+		fmt.Printf("%v::assignfromtag:: src=%v, em=%s\n",
+			Ctxt.Line(int(lineno)), Nconv(src, obj.FmtShort), describeEscape(em))
+	}
+
+	if em == EscUnknown {
+		escassign(e, &e.theSink, src)
+		return em
+	}
+
+	if em == EscNone {
+		return em
+	}
+
+	// If content inside parameter (reached via indirection)
+	// escapes to heap, mark as such.
+	if em&EscContentEscapes != 0 {
+		escassign(e, &e.theSink, addDereference(src))
+	}
+
+	em0 := em
+	for em >>= EscReturnBits; em != 0 && dsts != nil; em, dsts = em>>bitsPerOutputInTag, dsts.Next {
+		// Prefer the lowest-level path to the reference (for escape purposes).
+		// Two-bit encoding (for example. 1, 3, and 4 bits are other options)
+		//  01 = 0-level
+		//  10 = 1-level, (content escapes),
+		//  11 = 2-level, (content of content escapes),
+		embits := em & bitsMaskForTag
+		if embits > 0 {
+			n := src
+			for i := uint16(0); i < embits-1; i++ {
+				n = addDereference(n) // encode level>0 as indirections
+			}
+			escassign(e, dsts.N, n)
+		}
+	}
+	// If there are too many outputs to fit in the tag,
+	// that is handled at the encoding end as EscHeap,
+	// so there is no need to check here.
+
+	if em != 0 && dsts == nil {
+		Fatal("corrupt esc tag %q or messed up escretval list\n", note)
+	}
+	return em0
+}
+
+// addDereference constructs a suitable OIND note applied to src.
+// Because this is for purposes of escape accounting, not execution,
+// some semantically dubious node combinations are (currently) possible.
+func addDereference(n *Node) *Node {
+	ind := Nod(OIND, n, nil)
+	ind.Escloopdepth = n.Escloopdepth
+	ind.Lineno = n.Lineno
+	t := n.Type
+	if Istype(t, Tptr) {
+		// This should model our own sloppy use of OIND to encode
+		// decreasing levels of indirection; i.e., "indirecting" an array
+		// might yield the type of an element.  To be enhanced...
+		t = t.Type
+	}
+	ind.Type = t
+	return ind
+}
+
+// escNoteOutputParamFlow encodes maxEncodedLevel/.../1/0-level flow to the vargen'th parameter.
+// Levels greater than maxEncodedLevel are replaced with maxEncodedLevel.
+// If the encoding cannot describe the modified input level and output number, then EscHeap is returned.
+func escNoteOutputParamFlow(e uint16, vargen int32, level Level) uint16 {
+	// Flow+level is encoded in two bits.
+	// 00 = not flow, xx = level+1 for 0 <= level <= maxEncodedLevel
+	// 16 bits for Esc allows 6x2bits or 4x3bits or 3x4bits if additional information would be useful.
+	if level.int() <= 0 && level.guaranteedDereference() > 0 {
+		return escMax(e|EscContentEscapes, EscNone) // At least one deref, thus only content.
+	}
+	if level.int() < 0 {
+		return EscHeap
+	}
+	if level.int() > maxEncodedLevel {
+		// Cannot encode larger values than maxEncodedLevel.
+		level = levelFrom(maxEncodedLevel)
+	}
+	encoded := uint16(level.int() + 1)
+
+	shift := uint(bitsPerOutputInTag*(vargen-1) + EscReturnBits)
+	old := (e >> shift) & bitsMaskForTag
+	if old == 0 || encoded != 0 && encoded < old {
+		old = encoded
+	}
+
+	encodedFlow := old << shift
+	if (encodedFlow>>shift)&bitsMaskForTag != old {
+		// Encoding failure defaults to heap.
+		return EscHeap
+	}
+
+	return (e &^ (bitsMaskForTag << shift)) | encodedFlow
+}
+
+// This is a bit messier than fortunate, pulled out of esc's big
+// switch for clarity.	We either have the paramnodes, which may be
+// connected to other things through flows or we have the parameter type
+// nodes, which may be marked "noescape". Navigating the ast is slightly
+// different for methods vs plain functions and for imported vs
+// this-package
+func esccall(e *EscState, n *Node, up *Node) {
+	var fntype *Type
+
+	var fn *Node
+	switch n.Op {
+	default:
+		Fatal("esccall")
+
+	case OCALLFUNC:
+		fn = n.Left
+		fntype = fn.Type
+
+	case OCALLMETH:
+		fn = n.Left.Right.Sym.Def
+		if fn != nil {
+			fntype = fn.Type
+		} else {
+			fntype = n.Left.Type
+		}
+
+	case OCALLINTER:
+		fntype = n.Left.Type
+	}
+
+	ll := n.List
+	if n.List != nil && n.List.Next == nil {
+		a := n.List.N
+		if a.Type.Etype == TSTRUCT && a.Type.Funarg != 0 { // f(g()).
+			ll = a.Escretval
+		}
+	}
+
+	if fn != nil && fn.Op == ONAME && fn.Class == PFUNC &&
+		fn.Defn != nil && fn.Defn.Nbody != nil && fn.Ntype != nil && fn.Defn.Esc < EscFuncTagged {
+		if Debug['m'] > 2 {
+			fmt.Printf("%v::esccall:: %v in recursive group\n", Ctxt.Line(int(lineno)), Nconv(n, obj.FmtShort))
+		}
+
+		// function in same mutually recursive group.  Incorporate into flow graph.
+		//		print("esc local fn: %N\n", fn->ntype);
+		if fn.Defn.Esc == EscFuncUnknown || n.Escretval != nil {
+			Fatal("graph inconsistency")
+		}
+
+		// set up out list on this call node
+		for lr := fn.Ntype.Rlist; lr != nil; lr = lr.Next {
+			n.Escretval = list(n.Escretval, lr.N.Left) // type.rlist ->  dclfield -> ONAME (PPARAMOUT)
+		}
+
+		// Receiver.
+		if n.Op != OCALLFUNC {
+			escassign(e, fn.Ntype.Left.Left, n.Left.Left)
+		}
+
+		var src *Node
+		for lr := fn.Ntype.List; ll != nil && lr != nil; ll, lr = ll.Next, lr.Next {
+			src = ll.N
+			if lr.N.Isddd && !n.Isddd {
+				// Introduce ODDDARG node to represent ... allocation.
+				src = Nod(ODDDARG, nil, nil)
+
+				src.Type = typ(TARRAY)
+				src.Type.Type = lr.N.Type.Type
+				src.Type.Bound = int64(count(ll))
+				src.Type = Ptrto(src.Type) // make pointer so it will be tracked
+				src.Escloopdepth = e.loopdepth
+				src.Lineno = n.Lineno
+				src.Esc = EscNone // until we find otherwise
+				e.noesc = list(e.noesc, src)
+				n.Right = src
+			}
+
+			if lr.N.Left != nil {
+				escassign(e, lr.N.Left, src)
+			}
+			if src != ll.N {
+				break
+			}
+		}
+
+		// "..." arguments are untracked
+		for ; ll != nil; ll = ll.Next {
+			if Debug['m'] > 2 {
+				fmt.Printf("%v::esccall:: ... <- %v, untracked\n", Ctxt.Line(int(lineno)), Nconv(ll.N, obj.FmtShort))
+			}
+			escassign(e, &e.theSink, ll.N)
+		}
+
+		return
+	}
+
+	// Imported or completely analyzed function.  Use the escape tags.
+	if n.Escretval != nil {
+		Fatal("esc already decorated call %v\n", Nconv(n, obj.FmtSign))
+	}
+
+	if Debug['m'] > 2 {
+		fmt.Printf("%v::esccall:: %v not recursive\n", Ctxt.Line(int(lineno)), Nconv(n, obj.FmtShort))
+	}
+
+	// set up out list on this call node with dummy auto ONAMES in the current (calling) function.
+	i := 0
+
+	var src *Node
+	var buf string
+	for t := getoutargx(fntype).Type; t != nil; t = t.Down {
+		src = Nod(ONAME, nil, nil)
+		buf = fmt.Sprintf(".out%d", i)
+		i++
+		src.Sym = Lookup(buf)
+		src.Type = t.Type
+		src.Class = PAUTO
+		src.Curfn = Curfn
+		src.Escloopdepth = e.loopdepth
+		src.Used = true
+		src.Lineno = n.Lineno
+		n.Escretval = list(n.Escretval, src)
+	}
+
+	//	print("esc analyzed fn: %#N (%+T) returning (%+H)\n", fn, fntype, n->escretval);
+
+	// Receiver.
+	if n.Op != OCALLFUNC {
+		t := getthisx(fntype).Type
+		src := n.Left.Left
+		if haspointers(t.Type) {
+			escassignfromtag(e, t.Note, n.Escretval, src)
+		}
+	}
+
+	var a *Node
+	for t := getinargx(fntype).Type; ll != nil; ll = ll.Next {
+		src = ll.N
+		if t.Isddd && !n.Isddd {
+			// Introduce ODDDARG node to represent ... allocation.
+			src = Nod(ODDDARG, nil, nil)
+
+			src.Escloopdepth = e.loopdepth
+			src.Lineno = n.Lineno
+			src.Type = typ(TARRAY)
+			src.Type.Type = t.Type.Type
+			src.Type.Bound = int64(count(ll))
+			src.Type = Ptrto(src.Type) // make pointer so it will be tracked
+			src.Esc = EscNone          // until we find otherwise
+			e.noesc = list(e.noesc, src)
+			n.Right = src
+		}
+
+		if haspointers(t.Type) {
+			if escassignfromtag(e, t.Note, n.Escretval, src) == EscNone && up.Op != ODEFER && up.Op != OPROC {
+				a = src
+				for a.Op == OCONVNOP {
+					a = a.Left
+				}
+				switch a.Op {
+				// The callee has already been analyzed, so its arguments have esc tags.
+				// The argument is marked as not escaping at all.
+				// Record that fact so that any temporary used for
+				// synthesizing this expression can be reclaimed when
+				// the function returns.
+				// This 'noescape' is even stronger than the usual esc == EscNone.
+				// src->esc == EscNone means that src does not escape the current function.
+				// src->noescape = 1 here means that src does not escape this statement
+				// in the current function.
+				case OCALLPART,
+					OCLOSURE,
+					ODDDARG,
+					OARRAYLIT,
+					OPTRLIT,
+					OSTRUCTLIT:
+					a.Noescape = true
+				}
+			}
+		}
+
+		if src != ll.N {
+			break
+		}
+		t = t.Down
+	}
+
+	// "..." arguments are untracked
+	for ; ll != nil; ll = ll.Next {
+		escassign(e, &e.theSink, ll.N)
+		if Debug['m'] > 2 {
+			fmt.Printf("%v::esccall:: ... <- %v, untracked\n", Ctxt.Line(int(lineno)), Nconv(ll.N, obj.FmtShort))
+		}
+	}
+}
+
+// escflows records the link src->dst in dst, throwing out some quick wins,
+// and also ensuring that dst is noted as a flow destination.
+func escflows(e *EscState, dst *Node, src *Node) {
+	if dst == nil || src == nil || dst == src {
+		return
+	}
+
+	// Don't bother building a graph for scalars.
+	if src.Type != nil && !haspointers(src.Type) {
+		return
+	}
+
+	if Debug['m'] > 2 {
+		fmt.Printf("%v::flows:: %v <- %v\n", Ctxt.Line(int(lineno)), Nconv(dst, obj.FmtShort), Nconv(src, obj.FmtShort))
+	}
+
+	if dst.Escflowsrc == nil {
+		e.dsts = list(e.dsts, dst)
+		e.dstcount++
+	}
+
+	e.edgecount++
+
+	dst.Escflowsrc = list(dst.Escflowsrc, src)
+}
+
+// Whenever we hit a reference node, the level goes up by one, and whenever
+// we hit an OADDR, the level goes down by one. as long as we're on a level > 0
+// finding an OADDR just means we're following the upstream of a dereference,
+// so this address doesn't leak (yet).
+// If level == 0, it means the /value/ of this node can reach the root of this flood.
+// so if this node is an OADDR, it's argument should be marked as escaping iff
+// it's currfn/e->loopdepth are different from the flood's root.
+// Once an object has been moved to the heap, all of it's upstream should be considered
+// escaping to the global scope.
+func escflood(e *EscState, dst *Node) {
+	switch dst.Op {
+	case ONAME, OCLOSURE:
+		break
+
+	default:
+		return
+	}
+
+	if Debug['m'] > 1 {
+		fmt.Printf("\nescflood:%d: dst %v scope:%v[%d]\n", walkgen, Nconv(dst, obj.FmtShort), curfnSym(dst), dst.Escloopdepth)
+	}
+
+	for l := dst.Escflowsrc; l != nil; l = l.Next {
+		walkgen++
+		escwalk(e, levelFrom(0), dst, l.N)
+	}
+}
+
+// funcOutputAndInput reports whether dst and src correspond to output and input parameters of the same function.
+func funcOutputAndInput(dst, src *Node) bool {
+	// Note if dst is marked as escaping, then "returned" is too weak.
+	return dst.Op == ONAME && dst.Class == PPARAMOUT &&
+		src.Op == ONAME && src.Class == PPARAM && src.Curfn == dst.Curfn
+}
+
+func escwalk(e *EscState, level Level, dst *Node, src *Node) {
+
+	if src.Walkgen == walkgen {
+		// Esclevels are vectors, do not compare as integers,
+		// and must use "min" of old and new to guarantee
+		// convergence.
+		level = level.min(src.Esclevel)
+		if level == src.Esclevel {
+			return
+		}
+	}
+
+	src.Walkgen = walkgen
+	src.Esclevel = level
+
+	if Debug['m'] > 1 {
+		fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %v(%v) scope:%v[%d]\n",
+			level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", Oconv(int(src.Op), 0), Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort), curfnSym(src), src.Escloopdepth)
+	}
+
+	e.pdepth++
+
+	// Input parameter flowing to output parameter?
+	var leaks bool
+	if funcOutputAndInput(dst, src) && src.Esc&EscMask < EscScope && dst.Esc != EscHeap {
+		// This case handles:
+		// 1. return in
+		// 2. return &in
+		// 3. tmp := in; return &tmp
+		// 4. return *in
+		if Debug['m'] != 0 {
+			if Debug['m'] == 1 {
+				Warnl(int(src.Lineno), "leaking param: %v to result %v level=%v", Nconv(src, obj.FmtShort), dst.Sym, level.int())
+			} else {
+				Warnl(int(src.Lineno), "leaking param: %v to result %v level=%v", Nconv(src, obj.FmtShort), dst.Sym, level)
+			}
+		}
+		if src.Esc&EscMask != EscReturn {
+			src.Esc = EscReturn | src.Esc&EscContentEscapes
+		}
+		src.Esc = escNoteOutputParamFlow(src.Esc, dst.Vargen, level)
+		goto recurse
+	}
+
+	// If parameter content escapes to heap, set EscContentEscapes
+	// Note minor confusion around escape from pointer-to-struct vs escape from struct
+	if dst.Esc == EscHeap &&
+		src.Op == ONAME && src.Class == PPARAM && src.Esc&EscMask < EscScope &&
+		level.int() > 0 {
+		src.Esc = escMax(EscContentEscapes|src.Esc, EscNone)
+		if Debug['m'] != 0 {
+			Warnl(int(src.Lineno), "mark escaped content: %v", Nconv(src, obj.FmtShort))
+		}
+	}
+
+	leaks = level.int() <= 0 && level.guaranteedDereference() <= 0 && dst.Escloopdepth < src.Escloopdepth
+
+	switch src.Op {
+	case ONAME:
+		if src.Class == PPARAM && (leaks || dst.Escloopdepth < 0) && src.Esc&EscMask < EscScope {
+			if level.guaranteedDereference() > 0 {
+				src.Esc = escMax(EscContentEscapes|src.Esc, EscNone)
+				if Debug['m'] != 0 {
+					if Debug['m'] == 1 {
+						Warnl(int(src.Lineno), "leaking param content: %v", Nconv(src, obj.FmtShort))
+					} else {
+						Warnl(int(src.Lineno), "leaking param content: %v level=%v dst.eld=%v src.eld=%v dst=%v",
+							Nconv(src, obj.FmtShort), level, dst.Escloopdepth, src.Escloopdepth, Nconv(dst, obj.FmtShort))
+					}
+				}
+			} else {
+				src.Esc = EscScope
+				if Debug['m'] != 0 {
+					if Debug['m'] == 1 {
+						Warnl(int(src.Lineno), "leaking param: %v", Nconv(src, obj.FmtShort))
+					} else {
+						Warnl(int(src.Lineno), "leaking param: %v level=%v dst.eld=%v src.eld=%v dst=%v",
+							Nconv(src, obj.FmtShort), level, dst.Escloopdepth, src.Escloopdepth, Nconv(dst, obj.FmtShort))
+					}
+				}
+			}
+		}
+
+		// Treat a PPARAMREF closure variable as equivalent to the
+		// original variable.
+		if src.Class == PPARAMREF {
+			if leaks && Debug['m'] != 0 {
+				Warnl(int(src.Lineno), "leaking closure reference %v", Nconv(src, obj.FmtShort))
+			}
+			escwalk(e, level, dst, src.Closure)
+		}
+
+	case OPTRLIT, OADDR:
+		if leaks {
+			src.Esc = EscHeap
+			addrescapes(src.Left)
+			if Debug['m'] != 0 {
+				p := src
+				if p.Left.Op == OCLOSURE {
+					p = p.Left // merely to satisfy error messages in tests
+				}
+				if Debug['m'] > 1 {
+					Warnl(int(src.Lineno), "%v escapes to heap, level=%v, dst.eld=%v, src.eld=%v",
+						Nconv(p, obj.FmtShort), level, dst.Escloopdepth, src.Escloopdepth)
+				} else {
+					Warnl(int(src.Lineno), "%v escapes to heap", Nconv(p, obj.FmtShort))
+				}
+			}
+		}
+
+		escwalk(e, level.dec(), dst, src.Left)
+
+	case OAPPEND:
+		escwalk(e, level, dst, src.List.N)
+
+	case OARRAYLIT:
+		if Isfixedarray(src.Type) {
+			break
+		}
+		for ll := src.List; ll != nil; ll = ll.Next {
+			escwalk(e, level.dec(), dst, ll.N.Right)
+		}
+
+		fallthrough
+
+	case ODDDARG,
+		OMAKECHAN,
+		OMAKEMAP,
+		OMAKESLICE,
+		OARRAYRUNESTR,
+		OARRAYBYTESTR,
+		OSTRARRAYRUNE,
+		OSTRARRAYBYTE,
+		OADDSTR,
+		OMAPLIT,
+		ONEW,
+		OCLOSURE,
+		OCALLPART,
+		ORUNESTR,
+		OCONVIFACE:
+		if leaks {
+			src.Esc = EscHeap
+			if Debug['m'] != 0 {
+				Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
+			}
+		}
+
+	case ODOT,
+		OSLICE,
+		OSLICEARR,
+		OSLICE3,
+		OSLICE3ARR,
+		OSLICESTR:
+		escwalk(e, level, dst, src.Left)
+
+	case OINDEX:
+		if Isfixedarray(src.Left.Type) {
+			escwalk(e, level, dst, src.Left)
+			break
+		}
+		fallthrough
+
+	case ODOTPTR, OINDEXMAP, OIND:
+		escwalk(e, level.inc(), dst, src.Left)
+
+	// In this case a link went directly to a call, but should really go
+	// to the dummy .outN outputs that were created for the call that
+	// themselves link to the inputs with levels adjusted.
+	// See e.g. #10466
+	// This can only happen with functions returning a single result.
+	case OCALLMETH, OCALLFUNC, OCALLINTER:
+		if src.Escretval != nil {
+			if Debug['m'] > 1 {
+				fmt.Printf("%v:[%d] dst %v escwalk replace src: %v with %v\n",
+					Ctxt.Line(int(lineno)), e.loopdepth,
+					Nconv(dst, obj.FmtShort), Nconv(src, obj.FmtShort), Nconv(src.Escretval.N, obj.FmtShort))
+			}
+			src = src.Escretval.N
+		}
+	}
+
+recurse:
+	level = level.copy()
+	for ll := src.Escflowsrc; ll != nil; ll = ll.Next {
+		escwalk(e, level, dst, ll.N)
+	}
+
+	e.pdepth--
+}
+
+func esctag(e *EscState, func_ *Node) {
+	func_.Esc = EscFuncTagged
+
+	// External functions are assumed unsafe,
+	// unless //go:noescape is given before the declaration.
+	if func_.Nbody == nil {
+		if func_.Noescape {
+			for t := getinargx(func_.Type).Type; t != nil; t = t.Down {
+				if haspointers(t.Type) {
+					t.Note = mktag(EscNone)
+				}
+			}
+		}
+
+		return
+	}
+
+	savefn := Curfn
+	Curfn = func_
+
+	for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+		if ll.N.Op != ONAME {
+			continue
+		}
+
+		switch ll.N.Esc & EscMask {
+		case EscNone, // not touched by escflood
+			EscReturn:
+			if haspointers(ll.N.Type) { // don't bother tagging for scalars
+				ll.N.Paramfld.Note = mktag(int(ll.N.Esc))
+			}
+
+		case EscHeap, // touched by escflood, moved to heap
+			EscScope: // touched by escflood, value leaves scope
+			break
+		}
+	}
+
+	Curfn = savefn
+}
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
new file mode 100644
index 0000000..614de4e
--- /dev/null
+++ b/src/cmd/compile/internal/gc/export.go
@@ -0,0 +1,560 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+	"sort"
+	"unicode"
+	"unicode/utf8"
+)
+
+var asmlist *NodeList
+
+// Mark n's symbol as exported
+func exportsym(n *Node) {
+	if n == nil || n.Sym == nil {
+		return
+	}
+	if n.Sym.Flags&(SymExport|SymPackage) != 0 {
+		if n.Sym.Flags&SymPackage != 0 {
+			Yyerror("export/package mismatch: %v", n.Sym)
+		}
+		return
+	}
+
+	n.Sym.Flags |= SymExport
+
+	if Debug['E'] != 0 {
+		fmt.Printf("export symbol %v\n", n.Sym)
+	}
+	exportlist = list(exportlist, n)
+}
+
+func exportname(s string) bool {
+	if s[0] < utf8.RuneSelf {
+		return 'A' <= s[0] && s[0] <= 'Z'
+	}
+	r, _ := utf8.DecodeRuneInString(s)
+	return unicode.IsUpper(r)
+}
+
+func initname(s string) bool {
+	return s == "init"
+}
+
+// exportedsym reports whether a symbol will be visible
+// to files that import our package.
+func exportedsym(sym *Sym) bool {
+	// Builtins are visible everywhere.
+	if sym.Pkg == builtinpkg || sym.Origpkg == builtinpkg {
+		return true
+	}
+
+	return sym.Pkg == localpkg && exportname(sym.Name)
+}
+
+func autoexport(n *Node, ctxt uint8) {
+	if n == nil || n.Sym == nil {
+		return
+	}
+	if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
+		return
+	}
+	if n.Param != nil && n.Ntype != nil && n.Ntype.Op == OTFUNC && n.Ntype.Left != nil { // method
+		return
+	}
+
+	// -A is for cmd/gc/mkbuiltin script, so export everything
+	if Debug['A'] != 0 || exportname(n.Sym.Name) || initname(n.Sym.Name) {
+		exportsym(n)
+	}
+	if asmhdr != "" && n.Sym.Pkg == localpkg && n.Sym.Flags&SymAsm == 0 {
+		n.Sym.Flags |= SymAsm
+		asmlist = list(asmlist, n)
+	}
+}
+
+func dumppkg(p *Pkg) {
+	if p == nil || p == localpkg || p.Exported != 0 || p == builtinpkg {
+		return
+	}
+	p.Exported = 1
+	suffix := ""
+	if p.Direct == 0 {
+		suffix = " // indirect"
+	}
+	fmt.Fprintf(bout, "\timport %s %q%s\n", p.Name, p.Path, suffix)
+}
+
+// Look for anything we need for the inline body
+func reexportdeplist(ll *NodeList) {
+	for ; ll != nil; ll = ll.Next {
+		reexportdep(ll.N)
+	}
+}
+
+func reexportdep(n *Node) {
+	if n == nil {
+		return
+	}
+
+	//print("reexportdep %+hN\n", n);
+	switch n.Op {
+	case ONAME:
+		switch n.Class &^ PHEAP {
+		// methods will be printed along with their type
+		// nodes for T.Method expressions
+		case PFUNC:
+			if n.Left != nil && n.Left.Op == OTYPE {
+				break
+			}
+
+			// nodes for method calls.
+			if n.Type == nil || n.Type.Thistuple > 0 {
+				break
+			}
+			fallthrough
+
+		case PEXTERN:
+			if n.Sym != nil && !exportedsym(n.Sym) {
+				if Debug['E'] != 0 {
+					fmt.Printf("reexport name %v\n", n.Sym)
+				}
+				exportlist = list(exportlist, n)
+			}
+		}
+
+		// Local variables in the bodies need their type.
+	case ODCL:
+		t := n.Left.Type
+
+		if t != Types[t.Etype] && t != idealbool && t != idealstring {
+			if Isptr[t.Etype] {
+				t = t.Type
+			}
+			if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
+				if Debug['E'] != 0 {
+					fmt.Printf("reexport type %v from declaration\n", t.Sym)
+				}
+				exportlist = list(exportlist, t.Sym.Def)
+			}
+		}
+
+	case OLITERAL:
+		t := n.Type
+		if t != Types[n.Type.Etype] && t != idealbool && t != idealstring {
+			if Isptr[t.Etype] {
+				t = t.Type
+			}
+			if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
+				if Debug['E'] != 0 {
+					fmt.Printf("reexport literal type %v\n", t.Sym)
+				}
+				exportlist = list(exportlist, t.Sym.Def)
+			}
+		}
+		fallthrough
+
+	case OTYPE:
+		if n.Sym != nil && !exportedsym(n.Sym) {
+			if Debug['E'] != 0 {
+				fmt.Printf("reexport literal/type %v\n", n.Sym)
+			}
+			exportlist = list(exportlist, n)
+		}
+
+		// for operations that need a type when rendered, put the type on the export list.
+	case OCONV,
+		OCONVIFACE,
+		OCONVNOP,
+		ORUNESTR,
+		OARRAYBYTESTR,
+		OARRAYRUNESTR,
+		OSTRARRAYBYTE,
+		OSTRARRAYRUNE,
+		ODOTTYPE,
+		ODOTTYPE2,
+		OSTRUCTLIT,
+		OARRAYLIT,
+		OPTRLIT,
+		OMAKEMAP,
+		OMAKESLICE,
+		OMAKECHAN:
+		t := n.Type
+
+		if t.Sym == nil && t.Type != nil {
+			t = t.Type
+		}
+		if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
+			if Debug['E'] != 0 {
+				fmt.Printf("reexport type for expression %v\n", t.Sym)
+			}
+			exportlist = list(exportlist, t.Sym.Def)
+		}
+	}
+
+	reexportdep(n.Left)
+	reexportdep(n.Right)
+	reexportdeplist(n.List)
+	reexportdeplist(n.Rlist)
+	reexportdeplist(n.Ninit)
+	reexportdep(n.Ntest)
+	reexportdep(n.Nincr)
+	reexportdeplist(n.Nbody)
+	reexportdeplist(n.Nelse)
+}
+
+func dumpexportconst(s *Sym) {
+	n := s.Def
+	typecheck(&n, Erv)
+	if n == nil || n.Op != OLITERAL {
+		Fatal("dumpexportconst: oconst nil: %v", s)
+	}
+
+	t := n.Type // may or may not be specified
+	dumpexporttype(t)
+
+	if t != nil && !isideal(t) {
+		fmt.Fprintf(bout, "\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
+	} else {
+		fmt.Fprintf(bout, "\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
+	}
+}
+
+func dumpexportvar(s *Sym) {
+	n := s.Def
+	typecheck(&n, Erv|Ecall)
+	if n == nil || n.Type == nil {
+		Yyerror("variable exported but not defined: %v", s)
+		return
+	}
+
+	t := n.Type
+	dumpexporttype(t)
+
+	if t.Etype == TFUNC && n.Class == PFUNC {
+		if n.Func != nil && n.Func.Inl != nil {
+			// when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
+			// currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
+			if Debug['l'] < 2 {
+				typecheckinl(n)
+			}
+
+			// NOTE: The space after %#S here is necessary for ld's export data parser.
+			fmt.Fprintf(bout, "\tfunc %v %v { %v }\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp), Hconv(n.Func.Inl, obj.FmtSharp))
+
+			reexportdeplist(n.Func.Inl)
+		} else {
+			fmt.Fprintf(bout, "\tfunc %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp))
+		}
+	} else {
+		fmt.Fprintf(bout, "\tvar %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp))
+	}
+}
+
+type methodbyname []*Type
+
+func (x methodbyname) Len() int {
+	return len(x)
+}
+
+func (x methodbyname) Swap(i, j int) {
+	x[i], x[j] = x[j], x[i]
+}
+
+func (x methodbyname) Less(i, j int) bool {
+	a := x[i]
+	b := x[j]
+	return stringsCompare(a.Sym.Name, b.Sym.Name) < 0
+}
+
+func dumpexporttype(t *Type) {
+	if t == nil {
+		return
+	}
+	if t.Printed != 0 || t == Types[t.Etype] || t == bytetype || t == runetype || t == errortype {
+		return
+	}
+	t.Printed = 1
+
+	if t.Sym != nil && t.Etype != TFIELD {
+		dumppkg(t.Sym.Pkg)
+	}
+
+	dumpexporttype(t.Type)
+	dumpexporttype(t.Down)
+
+	if t.Sym == nil || t.Etype == TFIELD {
+		return
+	}
+
+	n := 0
+	for f := t.Method; f != nil; f = f.Down {
+		dumpexporttype(f)
+		n++
+	}
+
+	m := make([]*Type, n)
+	i := 0
+	for f := t.Method; f != nil; f = f.Down {
+		m[i] = f
+		i++
+	}
+	sort.Sort(methodbyname(m[:n]))
+
+	fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
+	var f *Type
+	for i := 0; i < n; i++ {
+		f = m[i]
+		if f.Nointerface {
+			fmt.Fprintf(bout, "\t//go:nointerface\n")
+		}
+		if f.Type.Nname != nil && f.Type.Nname.Func.Inl != nil { // nname was set by caninl
+
+			// when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
+			// currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
+			if Debug['l'] < 2 {
+				typecheckinl(f.Type.Nname)
+			}
+			fmt.Fprintf(bout, "\tfunc (%v) %v %v { %v }\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp), Hconv(f.Type.Nname.Func.Inl, obj.FmtSharp))
+			reexportdeplist(f.Type.Nname.Func.Inl)
+		} else {
+			fmt.Fprintf(bout, "\tfunc (%v) %v %v\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp))
+		}
+	}
+}
+
+func dumpsym(s *Sym) {
+	if s.Flags&SymExported != 0 {
+		return
+	}
+	s.Flags |= SymExported
+
+	if s.Def == nil {
+		Yyerror("unknown export symbol: %v", s)
+		return
+	}
+
+	//	print("dumpsym %O %+S\n", s->def->op, s);
+	dumppkg(s.Pkg)
+
+	switch s.Def.Op {
+	default:
+		Yyerror("unexpected export symbol: %v %v", Oconv(int(s.Def.Op), 0), s)
+
+	case OLITERAL:
+		dumpexportconst(s)
+
+	case OTYPE:
+		if s.Def.Type.Etype == TFORW {
+			Yyerror("export of incomplete type %v", s)
+		} else {
+			dumpexporttype(s.Def.Type)
+		}
+
+	case ONAME:
+		dumpexportvar(s)
+	}
+}
+
+func dumpexport() {
+	lno := lineno
+
+	fmt.Fprintf(bout, "\n$$\npackage %s", localpkg.Name)
+	if safemode != 0 {
+		fmt.Fprintf(bout, " safe")
+	}
+	fmt.Fprintf(bout, "\n")
+
+	for _, p := range pkgs {
+		if p.Direct != 0 {
+			dumppkg(p)
+		}
+	}
+
+	for l := exportlist; l != nil; l = l.Next {
+		lineno = l.N.Lineno
+		dumpsym(l.N.Sym)
+	}
+
+	fmt.Fprintf(bout, "\n$$\n")
+	lineno = lno
+}
+
+/*
+ * import
+ */
+
+/*
+ * return the sym for ss, which should match lexical
+ */
+func importsym(s *Sym, op int) *Sym {
+	if s.Def != nil && int(s.Def.Op) != op {
+		pkgstr := fmt.Sprintf("during import %q", importpkg.Path)
+		redeclare(s, pkgstr)
+	}
+
+	// mark the symbol so it is not reexported
+	if s.Def == nil {
+		if exportname(s.Name) || initname(s.Name) {
+			s.Flags |= SymExport
+		} else {
+			s.Flags |= SymPackage // package scope
+		}
+	}
+
+	return s
+}
+
+/*
+ * return the type pkg.name, forward declaring if needed
+ */
+func pkgtype(s *Sym) *Type {
+	importsym(s, OTYPE)
+	if s.Def == nil || s.Def.Op != OTYPE {
+		t := typ(TFORW)
+		t.Sym = s
+		s.Def = typenod(t)
+	}
+
+	if s.Def.Type == nil {
+		Yyerror("pkgtype %v", s)
+	}
+	return s.Def.Type
+}
+
+var numImport = make(map[string]int)
+
+func importimport(s *Sym, path string) {
+	// Informational: record package name
+	// associated with import path, for use in
+	// human-readable messages.
+
+	if isbadimport(path) {
+		errorexit()
+	}
+	p := mkpkg(path)
+	if p.Name == "" {
+		p.Name = s.Name
+		numImport[s.Name]++
+	} else if p.Name != s.Name {
+		Yyerror("conflicting names %s and %s for package %q", p.Name, s.Name, p.Path)
+	}
+
+	if incannedimport == 0 && myimportpath != "" && path == myimportpath {
+		Yyerror("import %q: package depends on %q (import cycle)", importpkg.Path, path)
+		errorexit()
+	}
+}
+
+func importconst(s *Sym, t *Type, n *Node) {
+	importsym(s, OLITERAL)
+	Convlit(&n, t)
+
+	if s.Def != nil { // TODO: check if already the same.
+		return
+	}
+
+	if n.Op != OLITERAL {
+		Yyerror("expression must be a constant")
+		return
+	}
+
+	if n.Sym != nil {
+		n1 := Nod(OXXX, nil, nil)
+		*n1 = *n
+		n = n1
+	}
+
+	n.Orig = newname(s)
+	n.Sym = s
+	declare(n, PEXTERN)
+
+	if Debug['E'] != 0 {
+		fmt.Printf("import const %v\n", s)
+	}
+}
+
+func importvar(s *Sym, t *Type) {
+	importsym(s, ONAME)
+	if s.Def != nil && s.Def.Op == ONAME {
+		if Eqtype(t, s.Def.Type) {
+			return
+		}
+		Yyerror("inconsistent definition for var %v during import\n\t%v (in %q)\n\t%v (in %q)", s, s.Def.Type, s.Importdef.Path, t, importpkg.Path)
+	}
+
+	n := newname(s)
+	s.Importdef = importpkg
+	n.Type = t
+	declare(n, PEXTERN)
+
+	if Debug['E'] != 0 {
+		fmt.Printf("import var %v %v\n", s, Tconv(t, obj.FmtLong))
+	}
+}
+
+func importtype(pt *Type, t *Type) {
+	// override declaration in unsafe.go for Pointer.
+	// there is no way in Go code to define unsafe.Pointer
+	// so we have to supply it.
+	if incannedimport != 0 && importpkg.Name == "unsafe" && pt.Nod.Sym.Name == "Pointer" {
+		t = Types[TUNSAFEPTR]
+	}
+
+	if pt.Etype == TFORW {
+		n := pt.Nod
+		copytype(pt.Nod, t)
+		pt.Nod = n // unzero nod
+		pt.Sym.Importdef = importpkg
+		pt.Sym.Lastlineno = int32(parserline())
+		declare(n, PEXTERN)
+		checkwidth(pt)
+	} else if !Eqtype(pt.Orig, t) {
+		Yyerror("inconsistent definition for type %v during import\n\t%v (in %q)\n\t%v (in %q)", pt.Sym, Tconv(pt, obj.FmtLong), pt.Sym.Importdef.Path, Tconv(t, obj.FmtLong), importpkg.Path)
+	}
+
+	if Debug['E'] != 0 {
+		fmt.Printf("import type %v %v\n", pt, Tconv(t, obj.FmtLong))
+	}
+}
+
+func dumpasmhdr() {
+	var b *obj.Biobuf
+
+	b, err := obj.Bopenw(asmhdr)
+	if err != nil {
+		Fatal("%v", err)
+	}
+	fmt.Fprintf(b, "// generated by %cg -asmhdr from package %s\n\n", Thearch.Thechar, localpkg.Name)
+	var n *Node
+	var t *Type
+	for l := asmlist; l != nil; l = l.Next {
+		n = l.N
+		if isblanksym(n.Sym) {
+			continue
+		}
+		switch n.Op {
+		case OLITERAL:
+			fmt.Fprintf(b, "#define const_%s %v\n", n.Sym.Name, Vconv(&n.Val, obj.FmtSharp))
+
+		case OTYPE:
+			t = n.Type
+			if t.Etype != TSTRUCT || t.Map != nil || t.Funarg != 0 {
+				break
+			}
+			fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width))
+			for t = t.Type; t != nil; t = t.Down {
+				if !isblanksym(t.Sym) {
+					fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, t.Sym.Name, int(t.Width))
+				}
+			}
+		}
+	}
+
+	obj.Bterm(b)
+}
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
new file mode 100644
index 0000000..9d8482b
--- /dev/null
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -0,0 +1,1736 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"bytes"
+	"cmd/internal/obj"
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+//
+// Format conversions
+//	%L int		Line numbers
+//
+//	%E int		etype values (aka 'Kind')
+//
+//	%O int		Node Opcodes
+//		Flags: "%#O": print go syntax. (automatic unless fmtmode == FDbg)
+//
+//	%J Node*	Node details
+//		Flags: "%hJ" suppresses things not relevant until walk.
+//
+//	%V Val*		Constant values
+//
+//	%S Sym*		Symbols
+//		Flags: +,- #: mode (see below)
+//			"%hS"	unqualified identifier in any mode
+//			"%hhS"  in export mode: unqualified identifier if exported, qualified if not
+//
+//	%T Type*	Types
+//		Flags: +,- #: mode (see below)
+//			'l' definition instead of name.
+//			'h' omit "func" and receiver in function types
+//			'u' (only in -/Sym mode) print type identifiers wit package name instead of prefix.
+//
+//	%N Node*	Nodes
+//		Flags: +,- #: mode (see below)
+//			'h' (only in +/debug mode) suppress recursion
+//			'l' (only in Error mode) print "foo (type Bar)"
+//
+//	%H NodeList*	NodeLists
+//		Flags: those of %N
+//			','  separate items with ',' instead of ';'
+//
+//   In mparith1.c:
+//      %B Mpint*	Big integers
+//	%F Mpflt*	Big floats
+//
+//   %S, %T and %N obey use the following flags to set the format mode:
+const (
+	FErr = iota
+	FDbg
+	FExp
+	FTypeId
+)
+
+var fmtmode int = FErr
+
+var fmtpkgpfx int // %uT stickyness
+
+//
+// E.g. for %S:	%+S %#S %-S	print an identifier properly qualified for debug/export/internal mode.
+//
+// The mode flags  +, - and # are sticky, meaning they persist through
+// recursions of %N, %T and %S, but not the h and l flags.  The u flag is
+// sticky only on %T recursions and only used in %-/Sym mode.
+
+//
+// Useful format combinations:
+//
+//	%+N   %+H	multiline recursive debug dump of node/nodelist
+//	%+hN  %+hH	non recursive debug dump
+//
+//	%#N   %#T	export format
+//	%#lT		type definition instead of name
+//	%#hT		omit"func" and receiver in function signature
+//
+//	%lN		"foo (type Bar)" for error messages
+//
+//	%-T		type identifiers
+//	%-hT		type identifiers without "func" and arg names in type signatures (methodsym)
+//	%-uT		type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
+//
+
+func setfmode(flags *int) int {
+	fm := fmtmode
+	if *flags&obj.FmtSign != 0 {
+		fmtmode = FDbg
+	} else if *flags&obj.FmtSharp != 0 {
+		fmtmode = FExp
+	} else if *flags&obj.FmtLeft != 0 {
+		fmtmode = FTypeId
+	}
+
+	*flags &^= (obj.FmtSharp | obj.FmtLeft | obj.FmtSign)
+	return fm
+}
+
+// Fmt "%L": Linenumbers
+
+var goopnames = []string{
+	OADDR:     "&",
+	OADD:      "+",
+	OADDSTR:   "+",
+	OANDAND:   "&&",
+	OANDNOT:   "&^",
+	OAND:      "&",
+	OAPPEND:   "append",
+	OAS:       "=",
+	OAS2:      "=",
+	OBREAK:    "break",
+	OCALL:     "function call", // not actual syntax
+	OCAP:      "cap",
+	OCASE:     "case",
+	OCLOSE:    "close",
+	OCOMPLEX:  "complex",
+	OCOM:      "^",
+	OCONTINUE: "continue",
+	OCOPY:     "copy",
+	ODEC:      "--",
+	ODELETE:   "delete",
+	ODEFER:    "defer",
+	ODIV:      "/",
+	OEQ:       "==",
+	OFALL:     "fallthrough",
+	OFOR:      "for",
+	OGE:       ">=",
+	OGOTO:     "goto",
+	OGT:       ">",
+	OIF:       "if",
+	OIMAG:     "imag",
+	OINC:      "++",
+	OIND:      "*",
+	OLEN:      "len",
+	OLE:       "<=",
+	OLSH:      "<<",
+	OLT:       "<",
+	OMAKE:     "make",
+	OMINUS:    "-",
+	OMOD:      "%",
+	OMUL:      "*",
+	ONEW:      "new",
+	ONE:       "!=",
+	ONOT:      "!",
+	OOROR:     "||",
+	OOR:       "|",
+	OPANIC:    "panic",
+	OPLUS:     "+",
+	OPRINTN:   "println",
+	OPRINT:    "print",
+	ORANGE:    "range",
+	OREAL:     "real",
+	ORECV:     "<-",
+	ORECOVER:  "recover",
+	ORETURN:   "return",
+	ORSH:      ">>",
+	OSELECT:   "select",
+	OSEND:     "<-",
+	OSUB:      "-",
+	OSWITCH:   "switch",
+	OXOR:      "^",
+}
+
+// Fmt "%O":  Node opcodes
+func Oconv(o int, flag int) string {
+	if (flag&obj.FmtSharp != 0) || fmtmode != FDbg {
+		if o >= 0 && o < len(goopnames) && goopnames[o] != "" {
+			return goopnames[o]
+		}
+	}
+
+	if o >= 0 && o < len(opnames) && opnames[o] != "" {
+		return opnames[o]
+	}
+
+	return fmt.Sprintf("O-%d", o)
+}
+
+var classnames = []string{
+	"Pxxx",
+	"PEXTERN",
+	"PAUTO",
+	"PPARAM",
+	"PPARAMOUT",
+	"PPARAMREF",
+	"PFUNC",
+}
+
+// Fmt "%J": Node details.
+func Jconv(n *Node, flag int) string {
+	var buf bytes.Buffer
+
+	c := flag & obj.FmtShort
+
+	if c == 0 && n.Ullman != 0 {
+		fmt.Fprintf(&buf, " u(%d)", n.Ullman)
+	}
+
+	if c == 0 && n.Addable {
+		fmt.Fprintf(&buf, " a(%v)", n.Addable)
+	}
+
+	if c == 0 && n.Vargen != 0 {
+		fmt.Fprintf(&buf, " g(%d)", n.Vargen)
+	}
+
+	if n.Lineno != 0 {
+		fmt.Fprintf(&buf, " l(%d)", n.Lineno)
+	}
+
+	if c == 0 && n.Xoffset != BADWIDTH {
+		fmt.Fprintf(&buf, " x(%d%+d)", n.Xoffset, n.Stkdelta)
+	}
+
+	if n.Class != 0 {
+		s := ""
+		if n.Class&PHEAP != 0 {
+			s = ",heap"
+		}
+		if int(n.Class&^PHEAP) < len(classnames) {
+			fmt.Fprintf(&buf, " class(%s%s)", classnames[n.Class&^PHEAP], s)
+		} else {
+			fmt.Fprintf(&buf, " class(%d?%s)", n.Class&^PHEAP, s)
+		}
+	}
+
+	if n.Colas {
+		fmt.Fprintf(&buf, " colas(%v)", n.Colas)
+	}
+
+	if n.Funcdepth != 0 {
+		fmt.Fprintf(&buf, " f(%d)", n.Funcdepth)
+	}
+
+	switch n.Esc {
+	case EscUnknown:
+		break
+
+	case EscHeap:
+		buf.WriteString(" esc(h)")
+
+	case EscScope:
+		buf.WriteString(" esc(s)")
+
+	case EscNone:
+		buf.WriteString(" esc(no)")
+
+	case EscNever:
+		if c == 0 {
+			buf.WriteString(" esc(N)")
+		}
+
+	default:
+		fmt.Fprintf(&buf, " esc(%d)", n.Esc)
+	}
+
+	if n.Escloopdepth != 0 {
+		fmt.Fprintf(&buf, " ld(%d)", n.Escloopdepth)
+	}
+
+	if c == 0 && n.Typecheck != 0 {
+		fmt.Fprintf(&buf, " tc(%d)", n.Typecheck)
+	}
+
+	if c == 0 && n.Dodata != 0 {
+		fmt.Fprintf(&buf, " dd(%d)", n.Dodata)
+	}
+
+	if n.Isddd {
+		fmt.Fprintf(&buf, " isddd(%v)", n.Isddd)
+	}
+
+	if n.Implicit {
+		fmt.Fprintf(&buf, " implicit(%v)", n.Implicit)
+	}
+
+	if n.Embedded != 0 {
+		fmt.Fprintf(&buf, " embedded(%d)", n.Embedded)
+	}
+
+	if n.Addrtaken {
+		buf.WriteString(" addrtaken")
+	}
+
+	if n.Assigned {
+		buf.WriteString(" assigned")
+	}
+
+	if c == 0 && n.Used {
+		fmt.Fprintf(&buf, " used(%v)", n.Used)
+	}
+	return buf.String()
+}
+
+// Fmt "%V": Values
+func Vconv(v *Val, flag int) string {
+	switch v.Ctype {
+	case CTINT:
+		if (flag&obj.FmtSharp != 0) || fmtmode == FExp {
+			return Bconv(v.U.(*Mpint), obj.FmtSharp)
+		}
+		return Bconv(v.U.(*Mpint), 0)
+
+	case CTRUNE:
+		x := Mpgetfix(v.U.(*Mpint))
+		if ' ' <= x && x < 0x80 && x != '\\' && x != '\'' {
+			return fmt.Sprintf("'%c'", int(x))
+		}
+		if 0 <= x && x < 1<<16 {
+			return fmt.Sprintf("'\\u%04x'", uint(int(x)))
+		}
+		if 0 <= x && x <= utf8.MaxRune {
+			return fmt.Sprintf("'\\U%08x'", uint64(x))
+		}
+		return fmt.Sprintf("('\\x00' + %v)", v.U.(*Mpint))
+
+	case CTFLT:
+		if (flag&obj.FmtSharp != 0) || fmtmode == FExp {
+			return Fconv(v.U.(*Mpflt), 0)
+		}
+		return Fconv(v.U.(*Mpflt), obj.FmtSharp)
+
+	case CTCPLX:
+		if (flag&obj.FmtSharp != 0) || fmtmode == FExp {
+			return fmt.Sprintf("(%v+%vi)", &v.U.(*Mpcplx).Real, &v.U.(*Mpcplx).Imag)
+		}
+		if mpcmpfltc(&v.U.(*Mpcplx).Real, 0) == 0 {
+			return fmt.Sprintf("%vi", Fconv(&v.U.(*Mpcplx).Imag, obj.FmtSharp))
+		}
+		if mpcmpfltc(&v.U.(*Mpcplx).Imag, 0) == 0 {
+			return Fconv(&v.U.(*Mpcplx).Real, obj.FmtSharp)
+		}
+		if mpcmpfltc(&v.U.(*Mpcplx).Imag, 0) < 0 {
+			return fmt.Sprintf("(%v%vi)", Fconv(&v.U.(*Mpcplx).Real, obj.FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, obj.FmtSharp))
+		}
+		return fmt.Sprintf("(%v+%vi)", Fconv(&v.U.(*Mpcplx).Real, obj.FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, obj.FmtSharp))
+
+	case CTSTR:
+		return strconv.Quote(v.U.(string))
+
+	case CTBOOL:
+		if v.U.(bool) {
+			return "true"
+		}
+		return "false"
+
+	case CTNIL:
+		return "nil"
+	}
+
+	return fmt.Sprintf("<ctype=%d>", v.Ctype)
+}
+
+/*
+s%,%,\n%g
+s%\n+%\n%g
+s%^[	]*T%%g
+s%,.*%%g
+s%.+%	[T&]		= "&",%g
+s%^	........*\]%&~%g
+s%~	%%g
+*/
+var etnames = []string{
+	TINT:        "INT",
+	TUINT:       "UINT",
+	TINT8:       "INT8",
+	TUINT8:      "UINT8",
+	TINT16:      "INT16",
+	TUINT16:     "UINT16",
+	TINT32:      "INT32",
+	TUINT32:     "UINT32",
+	TINT64:      "INT64",
+	TUINT64:     "UINT64",
+	TUINTPTR:    "UINTPTR",
+	TFLOAT32:    "FLOAT32",
+	TFLOAT64:    "FLOAT64",
+	TCOMPLEX64:  "COMPLEX64",
+	TCOMPLEX128: "COMPLEX128",
+	TBOOL:       "BOOL",
+	TPTR32:      "PTR32",
+	TPTR64:      "PTR64",
+	TFUNC:       "FUNC",
+	TARRAY:      "ARRAY",
+	TSTRUCT:     "STRUCT",
+	TCHAN:       "CHAN",
+	TMAP:        "MAP",
+	TINTER:      "INTER",
+	TFORW:       "FORW",
+	TFIELD:      "FIELD",
+	TSTRING:     "STRING",
+	TANY:        "ANY",
+}
+
+// Fmt "%E": etype
+func Econv(et int, flag int) string {
+	if et >= 0 && et < len(etnames) && etnames[et] != "" {
+		return etnames[et]
+	}
+	return fmt.Sprintf("E-%d", et)
+}
+
+// Fmt "%S": syms
+func symfmt(s *Sym, flag int) string {
+	if s.Pkg != nil && flag&obj.FmtShort == 0 {
+		switch fmtmode {
+		case FErr: // This is for the user
+			if s.Pkg == localpkg {
+				return s.Name
+			}
+
+			// If the name was used by multiple packages, display the full path,
+			if s.Pkg.Name != "" && numImport[s.Pkg.Name] > 1 {
+				return fmt.Sprintf("%q.%s", s.Pkg.Path, s.Name)
+			}
+			return fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
+
+		case FDbg:
+			return fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
+
+		case FTypeId:
+			if flag&obj.FmtUnsigned != 0 {
+				return fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name) // dcommontype, typehash
+			}
+			return fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name) // (methodsym), typesym, weaksym
+
+		case FExp:
+			if s.Name != "" && s.Name[0] == '.' {
+				Fatal("exporting synthetic symbol %s", s.Name)
+			}
+			if s.Pkg != builtinpkg {
+				return fmt.Sprintf("@%q.%s", s.Pkg.Path, s.Name)
+			}
+		}
+	}
+
+	if flag&obj.FmtByte != 0 {
+		// FmtByte (hh) implies FmtShort (h)
+		// skip leading "type." in method name
+		p := s.Name
+		if i := strings.LastIndex(s.Name, "."); i >= 0 {
+			p = s.Name[i+1:]
+		}
+
+		// exportname needs to see the name without the prefix too.
+		if (fmtmode == FExp && !exportname(p)) || fmtmode == FDbg {
+			return fmt.Sprintf("@%q.%s", s.Pkg.Path, p)
+		}
+
+		return p
+	}
+
+	return s.Name
+}
+
+var basicnames = []string{
+	TINT:        "int",
+	TUINT:       "uint",
+	TINT8:       "int8",
+	TUINT8:      "uint8",
+	TINT16:      "int16",
+	TUINT16:     "uint16",
+	TINT32:      "int32",
+	TUINT32:     "uint32",
+	TINT64:      "int64",
+	TUINT64:     "uint64",
+	TUINTPTR:    "uintptr",
+	TFLOAT32:    "float32",
+	TFLOAT64:    "float64",
+	TCOMPLEX64:  "complex64",
+	TCOMPLEX128: "complex128",
+	TBOOL:       "bool",
+	TANY:        "any",
+	TSTRING:     "string",
+	TNIL:        "nil",
+	TIDEAL:      "untyped number",
+	TBLANK:      "blank",
+}
+
+func typefmt(t *Type, flag int) string {
+	if t == nil {
+		return "<T>"
+	}
+
+	if t == bytetype || t == runetype {
+		// in %-T mode collapse rune and byte with their originals.
+		if fmtmode != FTypeId {
+			return Sconv(t.Sym, obj.FmtShort)
+		}
+		t = Types[t.Etype]
+	}
+
+	if t == errortype {
+		return "error"
+	}
+
+	// Unless the 'l' flag was specified, if the type has a name, just print that name.
+	if flag&obj.FmtLong == 0 && t.Sym != nil && t.Etype != TFIELD && t != Types[t.Etype] {
+		switch fmtmode {
+		case FTypeId:
+			if flag&obj.FmtShort != 0 {
+				if t.Vargen != 0 {
+					return fmt.Sprintf("%v·%d", Sconv(t.Sym, obj.FmtShort), t.Vargen)
+				}
+				return Sconv(t.Sym, obj.FmtShort)
+			}
+
+			if flag&obj.FmtUnsigned != 0 {
+				return Sconv(t.Sym, obj.FmtUnsigned)
+			}
+			fallthrough
+
+		case FExp:
+			if t.Sym.Pkg == localpkg && t.Vargen != 0 {
+				return fmt.Sprintf("%v·%d", t.Sym, t.Vargen)
+			}
+		}
+
+		return Sconv(t.Sym, 0)
+	}
+
+	if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
+		prefix := ""
+		if fmtmode == FErr && (t == idealbool || t == idealstring) {
+			prefix = "untyped "
+		}
+		return prefix + basicnames[t.Etype]
+	}
+
+	if fmtmode == FDbg {
+		fmtmode = 0
+		str := Econv(int(t.Etype), 0) + "-" + typefmt(t, flag)
+		fmtmode = FDbg
+		return str
+	}
+
+	switch t.Etype {
+	case TPTR32, TPTR64:
+		if fmtmode == FTypeId && (flag&obj.FmtShort != 0) {
+			return fmt.Sprintf("*%v", Tconv(t.Type, obj.FmtShort))
+		}
+		return fmt.Sprintf("*%v", t.Type)
+
+	case TARRAY:
+		if t.Bound >= 0 {
+			return fmt.Sprintf("[%d]%v", t.Bound, t.Type)
+		}
+		if t.Bound == -100 {
+			return fmt.Sprintf("[...]%v", t.Type)
+		}
+		return fmt.Sprintf("[]%v", t.Type)
+
+	case TCHAN:
+		switch t.Chan {
+		case Crecv:
+			return fmt.Sprintf("<-chan %v", t.Type)
+
+		case Csend:
+			return fmt.Sprintf("chan<- %v", t.Type)
+		}
+
+		if t.Type != nil && t.Type.Etype == TCHAN && t.Type.Sym == nil && t.Type.Chan == Crecv {
+			return fmt.Sprintf("chan (%v)", t.Type)
+		}
+		return fmt.Sprintf("chan %v", t.Type)
+
+	case TMAP:
+		return fmt.Sprintf("map[%v]%v", t.Down, t.Type)
+
+	case TINTER:
+		var buf bytes.Buffer
+		buf.WriteString("interface {")
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
+			buf.WriteString(" ")
+			if exportname(t1.Sym.Name) {
+				buf.WriteString(Sconv(t1.Sym, obj.FmtShort))
+			} else {
+				buf.WriteString(Sconv(t1.Sym, obj.FmtUnsigned))
+			}
+			buf.WriteString(Tconv(t1.Type, obj.FmtShort))
+			if t1.Down != nil {
+				buf.WriteString(";")
+			}
+		}
+		if t.Type != nil {
+			buf.WriteString(" ")
+		}
+		buf.WriteString("}")
+		return buf.String()
+
+	case TFUNC:
+		var buf bytes.Buffer
+		if flag&obj.FmtShort != 0 {
+			// no leading func
+		} else {
+			if t.Thistuple != 0 {
+				buf.WriteString("method")
+				buf.WriteString(Tconv(getthisx(t), 0))
+				buf.WriteString(" ")
+			}
+			buf.WriteString("func")
+		}
+		buf.WriteString(Tconv(getinargx(t), 0))
+
+		switch t.Outtuple {
+		case 0:
+			break
+
+		case 1:
+			if fmtmode != FExp {
+				buf.WriteString(" ")
+				buf.WriteString(Tconv(getoutargx(t).Type.Type, 0)) // struct->field->field's type
+				break
+			}
+			fallthrough
+
+		default:
+			buf.WriteString(" ")
+			buf.WriteString(Tconv(getoutargx(t), 0))
+		}
+		return buf.String()
+
+	case TSTRUCT:
+		if t.Map != nil {
+			// Format the bucket struct for map[x]y as map.bucket[x]y.
+			// This avoids a recursive print that generates very long names.
+			if t.Map.Bucket == t {
+				return fmt.Sprintf("map.bucket[%v]%v", t.Map.Down, t.Map.Type)
+			}
+
+			if t.Map.Hmap == t {
+				return fmt.Sprintf("map.hdr[%v]%v", t.Map.Down, t.Map.Type)
+			}
+
+			if t.Map.Hiter == t {
+				return fmt.Sprintf("map.iter[%v]%v", t.Map.Down, t.Map.Type)
+			}
+
+			Yyerror("unknown internal map type")
+		}
+
+		var buf bytes.Buffer
+		if t.Funarg != 0 {
+			buf.WriteString("(")
+			if fmtmode == FTypeId || fmtmode == FErr { // no argument names on function signature, and no "noescape"/"nosplit" tags
+				for t1 := t.Type; t1 != nil; t1 = t1.Down {
+					buf.WriteString(Tconv(t1, obj.FmtShort))
+					if t1.Down != nil {
+						buf.WriteString(", ")
+					}
+				}
+			} else {
+				for t1 := t.Type; t1 != nil; t1 = t1.Down {
+					buf.WriteString(Tconv(t1, 0))
+					if t1.Down != nil {
+						buf.WriteString(", ")
+					}
+				}
+			}
+			buf.WriteString(")")
+		} else {
+			buf.WriteString("struct {")
+			for t1 := t.Type; t1 != nil; t1 = t1.Down {
+				buf.WriteString(" ")
+				buf.WriteString(Tconv(t1, obj.FmtLong))
+				if t1.Down != nil {
+					buf.WriteString(";")
+				}
+			}
+			if t.Type != nil {
+				buf.WriteString(" ")
+			}
+			buf.WriteString("}")
+		}
+		return buf.String()
+
+	case TFIELD:
+		var name string
+		if flag&obj.FmtShort == 0 {
+			s := t.Sym
+
+			// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
+			// ~r%d is a (formerly) unnamed result.
+			if (fmtmode == FErr || fmtmode == FExp) && t.Nname != nil {
+				if t.Nname.Orig != nil {
+					s = t.Nname.Orig.Sym
+					if s != nil && s.Name[0] == '~' {
+						if s.Name[1] == 'r' { // originally an unnamed result
+							s = nil
+						} else if s.Name[1] == 'b' { // originally the blank identifier _
+							s = Lookup("_")
+						}
+					}
+				} else {
+					s = nil
+				}
+			}
+
+			if s != nil && t.Embedded == 0 {
+				if t.Funarg != 0 {
+					name = Nconv(t.Nname, 0)
+				} else if flag&obj.FmtLong != 0 {
+					name = Sconv(s, obj.FmtShort|obj.FmtByte) // qualify non-exported names (used on structs, not on funarg)
+				} else {
+					name = Sconv(s, 0)
+				}
+			} else if fmtmode == FExp {
+				// TODO(rsc) this breaks on the eliding of unused arguments in the backend
+				// when this is fixed, the special case in dcl.c checkarglist can go.
+				//if(t->funarg)
+				//	fmtstrcpy(fp, "_ ");
+				//else
+				if t.Embedded != 0 && s.Pkg != nil && len(s.Pkg.Path) > 0 {
+					name = fmt.Sprintf("@%q.?", s.Pkg.Path)
+				} else {
+					name = "?"
+				}
+			}
+		}
+
+		var typ string
+		if t.Isddd {
+			typ = "..." + Tconv(t.Type.Type, 0)
+		} else {
+			typ = Tconv(t.Type, 0)
+		}
+
+		str := typ
+		if name != "" {
+			str = name + " " + typ
+		}
+		if flag&obj.FmtShort == 0 && t.Note != nil {
+			str += " " + strconv.Quote(*t.Note)
+		}
+		return str
+
+	case TFORW:
+		if t.Sym != nil {
+			return fmt.Sprintf("undefined %v", t.Sym)
+		}
+		return "undefined"
+
+	case TUNSAFEPTR:
+		if fmtmode == FExp {
+			return "@\"unsafe\".Pointer"
+		}
+		return "unsafe.Pointer"
+	}
+
+	if fmtmode == FExp {
+		Fatal("missing %v case during export", Econv(int(t.Etype), 0))
+	}
+
+	// Don't know how to handle - fall back to detailed prints.
+	return fmt.Sprintf("%v <%v> %v", Econv(int(t.Etype), 0), t.Sym, t.Type)
+}
+
+// Statements which may be rendered with a simplestmt as init.
+func stmtwithinit(op int) bool {
+	switch op {
+	case OIF, OFOR, OSWITCH:
+		return true
+	}
+
+	return false
+}
+
+func stmtfmt(n *Node) string {
+	var f string
+
+	// some statements allow for an init, but at most one,
+	// but we may have an arbitrary number added, eg by typecheck
+	// and inlining.  If it doesn't fit the syntax, emit an enclosing
+	// block starting with the init statements.
+
+	// if we can just say "for" n->ninit; ... then do so
+	simpleinit := n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
+
+	// otherwise, print the inits as separate statements
+	complexinit := n.Ninit != nil && !simpleinit && (fmtmode != FErr)
+
+	// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
+	extrablock := complexinit && stmtwithinit(int(n.Op))
+
+	if extrablock {
+		f += "{"
+	}
+
+	if complexinit {
+		f += fmt.Sprintf(" %v; ", n.Ninit)
+	}
+
+	switch n.Op {
+	case ODCL:
+		if fmtmode == FExp {
+			switch n.Left.Class &^ PHEAP {
+			case PPARAM, PPARAMOUT, PAUTO:
+				f += fmt.Sprintf("var %v %v", n.Left, n.Left.Type)
+				goto ret
+			}
+		}
+
+		f += fmt.Sprintf("var %v %v", n.Left.Sym, n.Left.Type)
+
+	case ODCLFIELD:
+		if n.Left != nil {
+			f += fmt.Sprintf("%v %v", n.Left, n.Right)
+		} else {
+			f += Nconv(n.Right, 0)
+		}
+
+		// Don't export "v = <N>" initializing statements, hope they're always
+	// preceded by the DCL which will be re-parsed and typecheck to reproduce
+	// the "v = <N>" again.
+	case OAS, OASWB:
+		if fmtmode == FExp && n.Right == nil {
+			break
+		}
+
+		if n.Colas && !complexinit {
+			f += fmt.Sprintf("%v := %v", n.Left, n.Right)
+		} else {
+			f += fmt.Sprintf("%v = %v", n.Left, n.Right)
+		}
+
+	case OASOP:
+		if n.Implicit {
+			if n.Etype == OADD {
+				f += fmt.Sprintf("%v++", n.Left)
+			} else {
+				f += fmt.Sprintf("%v--", n.Left)
+			}
+			break
+		}
+
+		f += fmt.Sprintf("%v %v= %v", n.Left, Oconv(int(n.Etype), obj.FmtSharp), n.Right)
+
+	case OAS2:
+		if n.Colas && !complexinit {
+			f += fmt.Sprintf("%v := %v", Hconv(n.List, obj.FmtComma), Hconv(n.Rlist, obj.FmtComma))
+			break
+		}
+		fallthrough
+
+	case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+		f += fmt.Sprintf("%v = %v", Hconv(n.List, obj.FmtComma), Hconv(n.Rlist, obj.FmtComma))
+
+	case ORETURN:
+		f += fmt.Sprintf("return %v", Hconv(n.List, obj.FmtComma))
+
+	case ORETJMP:
+		f += fmt.Sprintf("retjmp %v", n.Sym)
+
+	case OPROC:
+		f += fmt.Sprintf("go %v", n.Left)
+
+	case ODEFER:
+		f += fmt.Sprintf("defer %v", n.Left)
+
+	case OIF:
+		if simpleinit {
+			f += fmt.Sprintf("if %v; %v { %v }", n.Ninit.N, n.Ntest, n.Nbody)
+		} else {
+			f += fmt.Sprintf("if %v { %v }", n.Ntest, n.Nbody)
+		}
+		if n.Nelse != nil {
+			f += fmt.Sprintf(" else { %v }", n.Nelse)
+		}
+
+	case OFOR:
+		if fmtmode == FErr { // TODO maybe only if FmtShort, same below
+			f += "for loop"
+			break
+		}
+
+		f += "for"
+		if simpleinit {
+			f += fmt.Sprintf(" %v;", n.Ninit.N)
+		} else if n.Nincr != nil {
+			f += " ;"
+		}
+
+		if n.Ntest != nil {
+			f += fmt.Sprintf(" %v", n.Ntest)
+		}
+
+		if n.Nincr != nil {
+			f += fmt.Sprintf("; %v", n.Nincr)
+		} else if simpleinit {
+			f += ";"
+		}
+
+		f += fmt.Sprintf(" { %v }", n.Nbody)
+
+	case ORANGE:
+		if fmtmode == FErr {
+			f += "for loop"
+			break
+		}
+
+		if n.List == nil {
+			f += fmt.Sprintf("for range %v { %v }", n.Right, n.Nbody)
+			break
+		}
+
+		f += fmt.Sprintf("for %v = range %v { %v }", Hconv(n.List, obj.FmtComma), n.Right, n.Nbody)
+
+	case OSELECT, OSWITCH:
+		if fmtmode == FErr {
+			f += fmt.Sprintf("%v statement", Oconv(int(n.Op), 0))
+			break
+		}
+
+		f += Oconv(int(n.Op), obj.FmtSharp)
+		if simpleinit {
+			f += fmt.Sprintf(" %v;", n.Ninit.N)
+		}
+		if n.Ntest != nil {
+			f += Nconv(n.Ntest, 0)
+		}
+
+		f += fmt.Sprintf(" { %v }", n.List)
+
+	case OCASE, OXCASE:
+		if n.List != nil {
+			f += fmt.Sprintf("case %v: %v", Hconv(n.List, obj.FmtComma), n.Nbody)
+		} else {
+			f += fmt.Sprintf("default: %v", n.Nbody)
+		}
+
+	case OBREAK,
+		OCONTINUE,
+		OGOTO,
+		OFALL,
+		OXFALL:
+		if n.Left != nil {
+			f += fmt.Sprintf("%v %v", Oconv(int(n.Op), obj.FmtSharp), n.Left)
+		} else {
+			f += Oconv(int(n.Op), obj.FmtSharp)
+		}
+
+	case OEMPTY:
+		break
+
+	case OLABEL:
+		f += fmt.Sprintf("%v: ", n.Left)
+	}
+
+ret:
+	if extrablock {
+		f += "}"
+	}
+
+	return f
+}
+
+var opprec = []int{
+	OAPPEND:       8,
+	OARRAYBYTESTR: 8,
+	OARRAYLIT:     8,
+	OARRAYRUNESTR: 8,
+	OCALLFUNC:     8,
+	OCALLINTER:    8,
+	OCALLMETH:     8,
+	OCALL:         8,
+	OCAP:          8,
+	OCLOSE:        8,
+	OCONVIFACE:    8,
+	OCONVNOP:      8,
+	OCONV:         8,
+	OCOPY:         8,
+	ODELETE:       8,
+	OGETG:         8,
+	OLEN:          8,
+	OLITERAL:      8,
+	OMAKESLICE:    8,
+	OMAKE:         8,
+	OMAPLIT:       8,
+	ONAME:         8,
+	ONEW:          8,
+	ONONAME:       8,
+	OPACK:         8,
+	OPANIC:        8,
+	OPAREN:        8,
+	OPRINTN:       8,
+	OPRINT:        8,
+	ORUNESTR:      8,
+	OSTRARRAYBYTE: 8,
+	OSTRARRAYRUNE: 8,
+	OSTRUCTLIT:    8,
+	OTARRAY:       8,
+	OTCHAN:        8,
+	OTFUNC:        8,
+	OTINTER:       8,
+	OTMAP:         8,
+	OTSTRUCT:      8,
+	OINDEXMAP:     8,
+	OINDEX:        8,
+	OSLICE:        8,
+	OSLICESTR:     8,
+	OSLICEARR:     8,
+	OSLICE3:       8,
+	OSLICE3ARR:    8,
+	ODOTINTER:     8,
+	ODOTMETH:      8,
+	ODOTPTR:       8,
+	ODOTTYPE2:     8,
+	ODOTTYPE:      8,
+	ODOT:          8,
+	OXDOT:         8,
+	OCALLPART:     8,
+	OPLUS:         7,
+	ONOT:          7,
+	OCOM:          7,
+	OMINUS:        7,
+	OADDR:         7,
+	OIND:          7,
+	ORECV:         7,
+	OMUL:          6,
+	ODIV:          6,
+	OMOD:          6,
+	OLSH:          6,
+	ORSH:          6,
+	OAND:          6,
+	OANDNOT:       6,
+	OADD:          5,
+	OSUB:          5,
+	OOR:           5,
+	OXOR:          5,
+	OEQ:           4,
+	OLT:           4,
+	OLE:           4,
+	OGE:           4,
+	OGT:           4,
+	ONE:           4,
+	OCMPSTR:       4,
+	OCMPIFACE:     4,
+	OSEND:         3,
+	OANDAND:       2,
+	OOROR:         1,
+	// Statements handled by stmtfmt
+	OAS:         -1,
+	OAS2:        -1,
+	OAS2DOTTYPE: -1,
+	OAS2FUNC:    -1,
+	OAS2MAPR:    -1,
+	OAS2RECV:    -1,
+	OASOP:       -1,
+	OBREAK:      -1,
+	OCASE:       -1,
+	OCONTINUE:   -1,
+	ODCL:        -1,
+	ODCLFIELD:   -1,
+	ODEFER:      -1,
+	OEMPTY:      -1,
+	OFALL:       -1,
+	OFOR:        -1,
+	OGOTO:       -1,
+	OIF:         -1,
+	OLABEL:      -1,
+	OPROC:       -1,
+	ORANGE:      -1,
+	ORETURN:     -1,
+	OSELECT:     -1,
+	OSWITCH:     -1,
+	OXCASE:      -1,
+	OXFALL:      -1,
+	OEND:        0,
+}
+
+func exprfmt(n *Node, prec int) string {
+	for n != nil && n.Implicit && (n.Op == OIND || n.Op == OADDR) {
+		n = n.Left
+	}
+
+	if n == nil {
+		return "<N>"
+	}
+
+	nprec := opprec[n.Op]
+	if n.Op == OTYPE && n.Sym != nil {
+		nprec = 8
+	}
+
+	if prec > nprec {
+		return fmt.Sprintf("(%v)", n)
+	}
+
+	switch n.Op {
+	case OPAREN:
+		return fmt.Sprintf("(%v)", n.Left)
+
+	case ODDDARG:
+		return "... argument"
+
+	case OREGISTER:
+		return obj.Rconv(int(n.Reg))
+
+	case OLITERAL: // this is a bit of a mess
+		if fmtmode == FErr {
+			if n.Orig != nil && n.Orig != n {
+				return exprfmt(n.Orig, prec)
+			}
+			if n.Sym != nil {
+				return Sconv(n.Sym, 0)
+			}
+		}
+		if n.Val.Ctype == CTNIL && n.Orig != nil && n.Orig != n {
+			return exprfmt(n.Orig, prec)
+		}
+		if n.Type != nil && n.Type != Types[n.Type.Etype] && n.Type != idealbool && n.Type != idealstring {
+			// Need parens when type begins with what might
+			// be misinterpreted as a unary operator: * or <-.
+			if Isptr[n.Type.Etype] || (n.Type.Etype == TCHAN && n.Type.Chan == Crecv) {
+				return fmt.Sprintf("(%v)(%v)", n.Type, Vconv(&n.Val, 0))
+			} else {
+				return fmt.Sprintf("%v(%v)", n.Type, Vconv(&n.Val, 0))
+			}
+		}
+
+		return Vconv(&n.Val, 0)
+
+		// Special case: name used as local variable in export.
+	// _ becomes ~b%d internally; print as _ for export
+	case ONAME:
+		if (fmtmode == FExp || fmtmode == FErr) && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
+			return "_"
+		}
+		if fmtmode == FExp && n.Sym != nil && !isblank(n) && n.Vargen > 0 {
+			return fmt.Sprintf("%v·%d", n.Sym, n.Vargen)
+		}
+
+		// Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
+		// but for export, this should be rendered as (*pkg.T).meth.
+		// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
+		if fmtmode == FExp && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME {
+			if Isptr[n.Left.Type.Etype] {
+				return fmt.Sprintf("(%v).%v", n.Left.Type, Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
+			} else {
+				return fmt.Sprintf("%v.%v", n.Left.Type, Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
+			}
+		}
+		fallthrough
+
+		//fallthrough
+	case OPACK, ONONAME:
+		return Sconv(n.Sym, 0)
+
+	case OTYPE:
+		if n.Type == nil && n.Sym != nil {
+			return Sconv(n.Sym, 0)
+		}
+		return Tconv(n.Type, 0)
+
+	case OTARRAY:
+		if n.Left != nil {
+			return fmt.Sprintf("[]%v", n.Left)
+		}
+		var f string
+		f += fmt.Sprintf("[]%v", n.Right)
+		return f // happens before typecheck
+
+	case OTMAP:
+		return fmt.Sprintf("map[%v]%v", n.Left, n.Right)
+
+	case OTCHAN:
+		switch n.Etype {
+		case Crecv:
+			return fmt.Sprintf("<-chan %v", n.Left)
+
+		case Csend:
+			return fmt.Sprintf("chan<- %v", n.Left)
+
+		default:
+			if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.Etype == Crecv {
+				return fmt.Sprintf("chan (%v)", n.Left)
+			} else {
+				return fmt.Sprintf("chan %v", n.Left)
+			}
+		}
+
+	case OTSTRUCT:
+		return "<struct>"
+
+	case OTINTER:
+		return "<inter>"
+
+	case OTFUNC:
+		return "<func>"
+
+	case OCLOSURE:
+		if fmtmode == FErr {
+			return "func literal"
+		}
+		if n.Nbody != nil {
+			return fmt.Sprintf("%v { %v }", n.Type, n.Nbody)
+		}
+		return fmt.Sprintf("%v { %v }", n.Type, n.Closure.Nbody)
+
+	case OCOMPLIT:
+		ptrlit := n.Right != nil && n.Right.Implicit && n.Right.Type != nil && Isptr[n.Right.Type.Etype]
+		if fmtmode == FErr {
+			if n.Right != nil && n.Right.Type != nil && !n.Implicit {
+				if ptrlit {
+					return fmt.Sprintf("&%v literal", n.Right.Type.Type)
+				} else {
+					return fmt.Sprintf("%v literal", n.Right.Type)
+				}
+			}
+
+			return "composite literal"
+		}
+
+		if fmtmode == FExp && ptrlit {
+			// typecheck has overwritten OIND by OTYPE with pointer type.
+			return fmt.Sprintf("(&%v{ %v })", n.Right.Type.Type, Hconv(n.List, obj.FmtComma))
+		}
+
+		return fmt.Sprintf("(%v{ %v })", n.Right, Hconv(n.List, obj.FmtComma))
+
+	case OPTRLIT:
+		if fmtmode == FExp && n.Left.Implicit {
+			return Nconv(n.Left, 0)
+		}
+		return fmt.Sprintf("&%v", n.Left)
+
+	case OSTRUCTLIT:
+		if fmtmode == FExp { // requires special handling of field names
+			var f string
+			if n.Implicit {
+				f += "{"
+			} else {
+				f += fmt.Sprintf("(%v{", n.Type)
+			}
+			for l := n.List; l != nil; l = l.Next {
+				f += fmt.Sprintf(" %v:%v", Sconv(l.N.Left.Sym, obj.FmtShort|obj.FmtByte), l.N.Right)
+
+				if l.Next != nil {
+					f += ","
+				} else {
+					f += " "
+				}
+			}
+
+			if !n.Implicit {
+				f += "})"
+				return f
+			}
+			f += "}"
+			return f
+		}
+		fallthrough
+
+	case OARRAYLIT, OMAPLIT:
+		if fmtmode == FErr {
+			return fmt.Sprintf("%v literal", n.Type)
+		}
+		if fmtmode == FExp && n.Implicit {
+			return fmt.Sprintf("{ %v }", Hconv(n.List, obj.FmtComma))
+		}
+		return fmt.Sprintf("(%v{ %v })", n.Type, Hconv(n.List, obj.FmtComma))
+
+	case OKEY:
+		if n.Left != nil && n.Right != nil {
+			if fmtmode == FExp && n.Left.Type != nil && n.Left.Type.Etype == TFIELD {
+				// requires special handling of field names
+				return fmt.Sprintf("%v:%v", Sconv(n.Left.Sym, obj.FmtShort|obj.FmtByte), n.Right)
+			} else {
+				return fmt.Sprintf("%v:%v", n.Left, n.Right)
+			}
+		}
+
+		if n.Left == nil && n.Right != nil {
+			return fmt.Sprintf(":%v", n.Right)
+		}
+		if n.Left != nil && n.Right == nil {
+			return fmt.Sprintf("%v:", n.Left)
+		}
+		return ":"
+
+	case OXDOT,
+		ODOT,
+		ODOTPTR,
+		ODOTINTER,
+		ODOTMETH,
+		OCALLPART:
+		var f string
+		f += exprfmt(n.Left, nprec)
+		if n.Right == nil || n.Right.Sym == nil {
+			f += ".<nil>"
+			return f
+		}
+		f += fmt.Sprintf(".%v", Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
+		return f
+
+	case ODOTTYPE, ODOTTYPE2:
+		var f string
+		f += exprfmt(n.Left, nprec)
+		if n.Right != nil {
+			f += fmt.Sprintf(".(%v)", n.Right)
+			return f
+		}
+		f += fmt.Sprintf(".(%v)", n.Type)
+		return f
+
+	case OINDEX,
+		OINDEXMAP,
+		OSLICE,
+		OSLICESTR,
+		OSLICEARR,
+		OSLICE3,
+		OSLICE3ARR:
+		var f string
+		f += exprfmt(n.Left, nprec)
+		f += fmt.Sprintf("[%v]", n.Right)
+		return f
+
+	case OCOPY, OCOMPLEX:
+		return fmt.Sprintf("%v(%v, %v)", Oconv(int(n.Op), obj.FmtSharp), n.Left, n.Right)
+
+	case OCONV,
+		OCONVIFACE,
+		OCONVNOP,
+		OARRAYBYTESTR,
+		OARRAYRUNESTR,
+		OSTRARRAYBYTE,
+		OSTRARRAYRUNE,
+		ORUNESTR:
+		if n.Type == nil || n.Type.Sym == nil {
+			return fmt.Sprintf("(%v)(%v)", n.Type, n.Left)
+		}
+		if n.Left != nil {
+			return fmt.Sprintf("%v(%v)", n.Type, n.Left)
+		}
+		return fmt.Sprintf("%v(%v)", n.Type, Hconv(n.List, obj.FmtComma))
+
+	case OREAL,
+		OIMAG,
+		OAPPEND,
+		OCAP,
+		OCLOSE,
+		ODELETE,
+		OLEN,
+		OMAKE,
+		ONEW,
+		OPANIC,
+		ORECOVER,
+		OPRINT,
+		OPRINTN:
+		if n.Left != nil {
+			return fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), n.Left)
+		}
+		if n.Isddd {
+			return fmt.Sprintf("%v(%v...)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+		}
+		return fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+
+	case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
+		var f string
+		f += exprfmt(n.Left, nprec)
+		if n.Isddd {
+			f += fmt.Sprintf("(%v...)", Hconv(n.List, obj.FmtComma))
+			return f
+		}
+		f += fmt.Sprintf("(%v)", Hconv(n.List, obj.FmtComma))
+		return f
+
+	case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+		if n.List != nil { // pre-typecheck
+			return fmt.Sprintf("make(%v, %v)", n.Type, Hconv(n.List, obj.FmtComma))
+		}
+		if n.Right != nil {
+			return fmt.Sprintf("make(%v, %v, %v)", n.Type, n.Left, n.Right)
+		}
+		if n.Left != nil && (n.Op == OMAKESLICE || !isideal(n.Left.Type)) {
+			return fmt.Sprintf("make(%v, %v)", n.Type, n.Left)
+		}
+		return fmt.Sprintf("make(%v)", n.Type)
+
+		// Unary
+	case OPLUS,
+		OMINUS,
+		OADDR,
+		OCOM,
+		OIND,
+		ONOT,
+		ORECV:
+		var f string
+		if n.Left.Op == n.Op {
+			f += fmt.Sprintf("%v ", Oconv(int(n.Op), obj.FmtSharp))
+		} else {
+			f += Oconv(int(n.Op), obj.FmtSharp)
+		}
+		f += exprfmt(n.Left, nprec+1)
+		return f
+
+		// Binary
+	case OADD,
+		OAND,
+		OANDAND,
+		OANDNOT,
+		ODIV,
+		OEQ,
+		OGE,
+		OGT,
+		OLE,
+		OLT,
+		OLSH,
+		OMOD,
+		OMUL,
+		ONE,
+		OOR,
+		OOROR,
+		ORSH,
+		OSEND,
+		OSUB,
+		OXOR:
+		var f string
+		f += exprfmt(n.Left, nprec)
+
+		f += fmt.Sprintf(" %v ", Oconv(int(n.Op), obj.FmtSharp))
+		f += exprfmt(n.Right, nprec+1)
+		return f
+
+	case OADDSTR:
+		var f string
+		for l := n.List; l != nil; l = l.Next {
+			if l != n.List {
+				f += " + "
+			}
+			f += exprfmt(l.N, nprec)
+		}
+
+		return f
+
+	case OCMPSTR, OCMPIFACE:
+		var f string
+		f += exprfmt(n.Left, nprec)
+		f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp))
+		f += exprfmt(n.Right, nprec+1)
+		return f
+	}
+
+	return fmt.Sprintf("<node %v>", Oconv(int(n.Op), 0))
+}
+
+func nodefmt(n *Node, flag int) string {
+	t := n.Type
+
+	// we almost always want the original, except in export mode for literals
+	// this saves the importer some work, and avoids us having to redo some
+	// special casing for package unsafe
+	if (fmtmode != FExp || n.Op != OLITERAL) && n.Orig != nil {
+		n = n.Orig
+	}
+
+	if flag&obj.FmtLong != 0 && t != nil {
+		if t.Etype == TNIL {
+			return "nil"
+		} else {
+			return fmt.Sprintf("%v (type %v)", n, t)
+		}
+	}
+
+	// TODO inlining produces expressions with ninits. we can't print these yet.
+
+	if opprec[n.Op] < 0 {
+		return stmtfmt(n)
+	}
+
+	return exprfmt(n, 0)
+}
+
+var dumpdepth int
+
+func indent(buf *bytes.Buffer) {
+	buf.WriteString("\n")
+	for i := 0; i < dumpdepth; i++ {
+		buf.WriteString(".   ")
+	}
+}
+
+func nodedump(n *Node, flag int) string {
+	if n == nil {
+		return ""
+	}
+
+	recur := flag&obj.FmtShort == 0
+
+	var buf bytes.Buffer
+	if recur {
+		indent(&buf)
+		if dumpdepth > 10 {
+			buf.WriteString("...")
+			return buf.String()
+		}
+
+		if n.Ninit != nil {
+			fmt.Fprintf(&buf, "%v-init%v", Oconv(int(n.Op), 0), n.Ninit)
+			indent(&buf)
+		}
+	}
+
+	switch n.Op {
+	default:
+		fmt.Fprintf(&buf, "%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
+
+	case OREGISTER, OINDREG:
+		fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), obj.Rconv(int(n.Reg)), Jconv(n, 0))
+
+	case OLITERAL:
+		fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), Vconv(&n.Val, 0), Jconv(n, 0))
+
+	case ONAME, ONONAME:
+		if n.Sym != nil {
+			fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), n.Sym, Jconv(n, 0))
+		} else {
+			fmt.Fprintf(&buf, "%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
+		}
+		if recur && n.Type == nil && n.Ntype != nil {
+			indent(&buf)
+			fmt.Fprintf(&buf, "%v-ntype%v", Oconv(int(n.Op), 0), n.Ntype)
+		}
+
+	case OASOP:
+		fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), Oconv(int(n.Etype), 0), Jconv(n, 0))
+
+	case OTYPE:
+		fmt.Fprintf(&buf, "%v %v%v type=%v", Oconv(int(n.Op), 0), n.Sym, Jconv(n, 0), n.Type)
+		if recur && n.Type == nil && n.Ntype != nil {
+			indent(&buf)
+			fmt.Fprintf(&buf, "%v-ntype%v", Oconv(int(n.Op), 0), n.Ntype)
+		}
+	}
+
+	if n.Sym != nil && n.Op != ONAME {
+		fmt.Fprintf(&buf, " %v G%d", n.Sym, n.Vargen)
+	}
+
+	if n.Type != nil {
+		fmt.Fprintf(&buf, " %v", n.Type)
+	}
+
+	if recur {
+		if n.Left != nil {
+			buf.WriteString(Nconv(n.Left, 0))
+		}
+		if n.Right != nil {
+			buf.WriteString(Nconv(n.Right, 0))
+		}
+		if n.List != nil {
+			indent(&buf)
+			fmt.Fprintf(&buf, "%v-list%v", Oconv(int(n.Op), 0), n.List)
+		}
+
+		if n.Rlist != nil {
+			indent(&buf)
+			fmt.Fprintf(&buf, "%v-rlist%v", Oconv(int(n.Op), 0), n.Rlist)
+		}
+
+		if n.Ntest != nil {
+			indent(&buf)
+			fmt.Fprintf(&buf, "%v-test%v", Oconv(int(n.Op), 0), n.Ntest)
+		}
+
+		if n.Nbody != nil {
+			indent(&buf)
+			fmt.Fprintf(&buf, "%v-body%v", Oconv(int(n.Op), 0), n.Nbody)
+		}
+
+		if n.Nelse != nil {
+			indent(&buf)
+			fmt.Fprintf(&buf, "%v-else%v", Oconv(int(n.Op), 0), n.Nelse)
+		}
+
+		if n.Nincr != nil {
+			indent(&buf)
+			fmt.Fprintf(&buf, "%v-incr%v", Oconv(int(n.Op), 0), n.Nincr)
+		}
+	}
+
+	return buf.String()
+}
+
+func (s *Sym) String() string {
+	return Sconv(s, 0)
+}
+
+// Fmt "%S": syms
+// Flags:  "%hS" suppresses qualifying with package
+func Sconv(s *Sym, flag int) string {
+	if flag&obj.FmtLong != 0 {
+		panic("linksymfmt")
+	}
+
+	if s == nil {
+		return "<S>"
+	}
+
+	if s.Name == "_" {
+		return "_"
+	}
+
+	sf := flag
+	sm := setfmode(&flag)
+	var r int
+	_ = r
+	str := symfmt(s, flag)
+	flag = sf
+	fmtmode = sm
+	return str
+}
+
+func (t *Type) String() string {
+	return Tconv(t, 0)
+}
+
+// Fmt "%T": types.
+// Flags: 'l' print definition, not name
+//	  'h' omit 'func' and receiver from function types, short type names
+//	  'u' package name, not prefix (FTypeId mode, sticky)
+func Tconv(t *Type, flag int) string {
+	if t == nil {
+		return "<T>"
+	}
+
+	if t.Trecur > 4 {
+		return "<...>"
+	}
+
+	t.Trecur++
+	sf := flag
+	sm := setfmode(&flag)
+
+	if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
+		fmtpkgpfx++
+	}
+	if fmtpkgpfx != 0 {
+		flag |= obj.FmtUnsigned
+	}
+
+	var r int
+	_ = r
+	str := typefmt(t, flag)
+
+	if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
+		fmtpkgpfx--
+	}
+
+	flag = sf
+	fmtmode = sm
+	t.Trecur--
+	return str
+}
+
+func (n *Node) String() string {
+	return Nconv(n, 0)
+}
+
+// Fmt '%N': Nodes.
+// Flags: 'l' suffix with "(type %T)" where possible
+//	  '+h' in debug mode, don't recurse, no multiline output
+func Nconv(n *Node, flag int) string {
+	if n == nil {
+		return "<N>"
+	}
+	sf := flag
+	sm := setfmode(&flag)
+
+	var r int
+	_ = r
+	var str string
+	switch fmtmode {
+	case FErr, FExp:
+		str = nodefmt(n, flag)
+
+	case FDbg:
+		dumpdepth++
+		str = nodedump(n, flag)
+		dumpdepth--
+
+	default:
+		Fatal("unhandled %%N mode")
+	}
+
+	flag = sf
+	fmtmode = sm
+	return str
+}
+
+func (l *NodeList) String() string {
+	return Hconv(l, 0)
+}
+
+// Fmt '%H': NodeList.
+// Flags: all those of %N plus ',': separate with comma's instead of semicolons.
+func Hconv(l *NodeList, flag int) string {
+	if l == nil && fmtmode == FDbg {
+		return "<nil>"
+	}
+
+	sf := flag
+	sm := setfmode(&flag)
+	var r int
+	_ = r
+	sep := "; "
+	if fmtmode == FDbg {
+		sep = "\n"
+	} else if flag&obj.FmtComma != 0 {
+		sep = ", "
+	}
+
+	var buf bytes.Buffer
+	for ; l != nil; l = l.Next {
+		buf.WriteString(Nconv(l.N, 0))
+		if l.Next != nil {
+			buf.WriteString(sep)
+		}
+	}
+
+	flag = sf
+	fmtmode = sm
+	return buf.String()
+}
+
+func dumplist(s string, l *NodeList) {
+	fmt.Printf("%s%v\n", s, Hconv(l, obj.FmtSign))
+}
+
+func Dump(s string, n *Node) {
+	fmt.Printf("%s [%p]%v\n", s, n, Nconv(n, obj.FmtSign))
+}
diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go
new file mode 100644
index 0000000..d3c6387
--- /dev/null
+++ b/src/cmd/compile/internal/gc/gen.go
@@ -0,0 +1,1279 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+)
+
+/*
+ * portable half of code generator.
+ * mainly statements and control flow.
+ */
+var labellist *Label
+
+var lastlabel *Label
+
+func Sysfunc(name string) *Node {
+	n := newname(Pkglookup(name, Runtimepkg))
+	n.Class = PFUNC
+	return n
+}
+
+// addrescapes tags node n as having had its address taken
+// by "increasing" the "value" of n.Esc to EscHeap.
+// Storage is allocated as necessary to allow the address
+// to be taken.
+func addrescapes(n *Node) {
+	switch n.Op {
+	// probably a type error already.
+	// dump("addrescapes", n);
+	default:
+		break
+
+	case ONAME:
+		if n == nodfp {
+			break
+		}
+
+		// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
+		// on PPARAM it means something different.
+		if n.Class == PAUTO && n.Esc == EscNever {
+			break
+		}
+
+		switch n.Class {
+		case PPARAMREF:
+			addrescapes(n.Defn)
+
+		// if func param, need separate temporary
+		// to hold heap pointer.
+		// the function type has already been checked
+		// (we're in the function body)
+		// so the param already has a valid xoffset.
+
+		// expression to refer to stack copy
+		case PPARAM, PPARAMOUT:
+			n.Stackparam = Nod(OPARAM, n, nil)
+
+			n.Stackparam.Type = n.Type
+			n.Stackparam.Addable = true
+			if n.Xoffset == BADWIDTH {
+				Fatal("addrescapes before param assignment")
+			}
+			n.Stackparam.Xoffset = n.Xoffset
+			fallthrough
+
+		case PAUTO:
+			n.Class |= PHEAP
+
+			n.Addable = false
+			n.Ullman = 2
+			n.Xoffset = 0
+
+			// create stack variable to hold pointer to heap
+			oldfn := Curfn
+
+			Curfn = n.Curfn
+			n.Name.Heapaddr = temp(Ptrto(n.Type))
+			buf := fmt.Sprintf("&%v", n.Sym)
+			n.Name.Heapaddr.Sym = Lookup(buf)
+			n.Name.Heapaddr.Orig.Sym = n.Name.Heapaddr.Sym
+			n.Esc = EscHeap
+			if Debug['m'] != 0 {
+				fmt.Printf("%v: moved to heap: %v\n", n.Line(), n)
+			}
+			Curfn = oldfn
+		}
+
+	case OIND, ODOTPTR:
+		break
+
+	// ODOTPTR has already been introduced,
+	// so these are the non-pointer ODOT and OINDEX.
+	// In &x[0], if x is a slice, then x does not
+	// escape--the pointer inside x does, but that
+	// is always a heap pointer anyway.
+	case ODOT, OINDEX, OPAREN, OCONVNOP:
+		if !Isslice(n.Left.Type) {
+			addrescapes(n.Left)
+		}
+	}
+}
+
+func clearlabels() {
+	for l := labellist; l != nil; l = l.Link {
+		l.Sym.Label = nil
+	}
+
+	labellist = nil
+	lastlabel = nil
+}
+
+func newlab(n *Node) *Label {
+	s := n.Left.Sym
+	lab := s.Label
+	if lab == nil {
+		lab = new(Label)
+		if lastlabel == nil {
+			labellist = lab
+		} else {
+			lastlabel.Link = lab
+		}
+		lastlabel = lab
+		lab.Sym = s
+		s.Label = lab
+	}
+
+	if n.Op == OLABEL {
+		if lab.Def != nil {
+			Yyerror("label %v already defined at %v", s, lab.Def.Line())
+		} else {
+			lab.Def = n
+		}
+	} else {
+		lab.Use = list(lab.Use, n)
+	}
+
+	return lab
+}
+
+func checkgoto(from *Node, to *Node) {
+	if from.Sym == to.Sym {
+		return
+	}
+
+	nf := 0
+	for fs := from.Sym; fs != nil; fs = fs.Link {
+		nf++
+	}
+	nt := 0
+	for fs := to.Sym; fs != nil; fs = fs.Link {
+		nt++
+	}
+	fs := from.Sym
+	for ; nf > nt; nf-- {
+		fs = fs.Link
+	}
+	if fs != to.Sym {
+		lno := int(lineno)
+		setlineno(from)
+
+		// decide what to complain about.
+		// prefer to complain about 'into block' over declarations,
+		// so scan backward to find most recent block or else dcl.
+		var block *Sym
+
+		var dcl *Sym
+		ts := to.Sym
+		for ; nt > nf; nt-- {
+			if ts.Pkg == nil {
+				block = ts
+			} else {
+				dcl = ts
+			}
+			ts = ts.Link
+		}
+
+		for ts != fs {
+			if ts.Pkg == nil {
+				block = ts
+			} else {
+				dcl = ts
+			}
+			ts = ts.Link
+			fs = fs.Link
+		}
+
+		if block != nil {
+			Yyerror("goto %v jumps into block starting at %v", from.Left.Sym, Ctxt.Line(int(block.Lastlineno)))
+		} else {
+			Yyerror("goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, Ctxt.Line(int(dcl.Lastlineno)))
+		}
+		lineno = int32(lno)
+	}
+}
+
+func stmtlabel(n *Node) *Label {
+	if n.Sym != nil {
+		lab := n.Sym.Label
+		if lab != nil {
+			if lab.Def != nil {
+				if lab.Def.Defn == n {
+					return lab
+				}
+			}
+		}
+	}
+	return nil
+}
+
+/*
+ * compile statements
+ */
+func Genlist(l *NodeList) {
+	for ; l != nil; l = l.Next {
+		gen(l.N)
+	}
+}
+
+/*
+ * generate code to start new proc running call n.
+ */
+func cgen_proc(n *Node, proc int) {
+	switch n.Left.Op {
+	default:
+		Fatal("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
+
+	case OCALLMETH:
+		cgen_callmeth(n.Left, proc)
+
+	case OCALLINTER:
+		cgen_callinter(n.Left, nil, proc)
+
+	case OCALLFUNC:
+		cgen_call(n.Left, proc)
+	}
+}
+
+/*
+ * generate declaration.
+ * have to allocate heap copy
+ * for escaped variables.
+ */
+func cgen_dcl(n *Node) {
+	if Debug['g'] != 0 {
+		Dump("\ncgen-dcl", n)
+	}
+	if n.Op != ONAME {
+		Dump("cgen_dcl", n)
+		Fatal("cgen_dcl")
+	}
+
+	if n.Class&PHEAP == 0 {
+		return
+	}
+	if compiling_runtime != 0 {
+		Yyerror("%v escapes to heap, not allowed in runtime.", n)
+	}
+	if n.Alloc == nil {
+		n.Alloc = callnew(n.Type)
+	}
+	Cgen_as(n.Name.Heapaddr, n.Alloc)
+}
+
+/*
+ * generate discard of value
+ */
+func cgen_discard(nr *Node) {
+	if nr == nil {
+		return
+	}
+
+	switch nr.Op {
+	case ONAME:
+		if nr.Class&PHEAP == 0 && nr.Class != PEXTERN && nr.Class != PFUNC && nr.Class != PPARAMREF {
+			gused(nr)
+		}
+
+		// unary
+	case OADD,
+		OAND,
+		ODIV,
+		OEQ,
+		OGE,
+		OGT,
+		OLE,
+		OLSH,
+		OLT,
+		OMOD,
+		OMUL,
+		ONE,
+		OOR,
+		ORSH,
+		OSUB,
+		OXOR:
+		cgen_discard(nr.Left)
+
+		cgen_discard(nr.Right)
+
+		// binary
+	case OCAP,
+		OCOM,
+		OLEN,
+		OMINUS,
+		ONOT,
+		OPLUS:
+		cgen_discard(nr.Left)
+
+	case OIND:
+		Cgen_checknil(nr.Left)
+
+		// special enough to just evaluate
+	default:
+		var tmp Node
+		Tempname(&tmp, nr.Type)
+
+		Cgen_as(&tmp, nr)
+		gused(&tmp)
+	}
+}
+
+/*
+ * clearslim generates code to zero a slim node.
+ */
+func Clearslim(n *Node) {
+	var z Node
+	z.Op = OLITERAL
+	z.Type = n.Type
+	z.Addable = true
+
+	switch Simtype[n.Type.Etype] {
+	case TCOMPLEX64, TCOMPLEX128:
+		z.Val.U = new(Mpcplx)
+		Mpmovecflt(&z.Val.U.(*Mpcplx).Real, 0.0)
+		Mpmovecflt(&z.Val.U.(*Mpcplx).Imag, 0.0)
+
+	case TFLOAT32, TFLOAT64:
+		var zero Mpflt
+		Mpmovecflt(&zero, 0.0)
+		z.Val.Ctype = CTFLT
+		z.Val.U = &zero
+
+	case TPTR32, TPTR64, TCHAN, TMAP:
+		z.Val.Ctype = CTNIL
+
+	case TBOOL:
+		z.Val.Ctype = CTBOOL
+		z.Val.U = false
+
+	case TINT8,
+		TINT16,
+		TINT32,
+		TINT64,
+		TUINT8,
+		TUINT16,
+		TUINT32,
+		TUINT64:
+		z.Val.Ctype = CTINT
+		z.Val.U = new(Mpint)
+		Mpmovecfix(z.Val.U.(*Mpint), 0)
+
+	default:
+		Fatal("clearslim called on type %v", n.Type)
+	}
+
+	ullmancalc(&z)
+	Cgen(&z, n)
+}
+
+/*
+ * generate:
+ *	res = iface{typ, data}
+ * n->left is typ
+ * n->right is data
+ */
+func Cgen_eface(n *Node, res *Node) {
+	/*
+	 * the right node of an eface may contain function calls that uses res as an argument,
+	 * so it's important that it is done first
+	 */
+
+	tmp := temp(Types[Tptr])
+	Cgen(n.Right, tmp)
+
+	Gvardef(res)
+
+	dst := *res
+	dst.Type = Types[Tptr]
+	dst.Xoffset += int64(Widthptr)
+	Cgen(tmp, &dst)
+
+	dst.Xoffset -= int64(Widthptr)
+	Cgen(n.Left, &dst)
+}
+
+/*
+ * generate one of:
+ *	res, resok = x.(T)
+ *	res = x.(T) (when resok == nil)
+ * n.Left is x
+ * n.Type is T
+ */
+func cgen_dottype(n *Node, res, resok *Node, wb bool) {
+	if Debug_typeassert > 0 {
+		Warn("type assertion inlined")
+	}
+	//	iface := n.Left
+	//	r1 := iword(iface)
+	//	if n.Left is non-empty interface {
+	//		r1 = *r1
+	//	}
+	//	if r1 == T {
+	//		res = idata(iface)
+	//		resok = true
+	//	} else {
+	//		assert[EI]2T(x, T, nil) // (when resok == nil; does not return)
+	//		resok = false // (when resok != nil)
+	//	}
+	//
+	var iface Node
+	Igen(n.Left, &iface, res)
+	var r1, r2 Node
+	byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte)
+	Regalloc(&r1, byteptr, nil)
+	iface.Type = byteptr
+	Cgen(&iface, &r1)
+	if !isnilinter(n.Left.Type) {
+		// Holding itab, want concrete type in second word.
+		p := Thearch.Ginscmp(OEQ, byteptr, &r1, Nodintconst(0), -1)
+		r2 = r1
+		r2.Op = OINDREG
+		r2.Xoffset = int64(Widthptr)
+		Cgen(&r2, &r1)
+		Patch(p, Pc)
+	}
+	Regalloc(&r2, byteptr, nil)
+	Cgen(typename(n.Type), &r2)
+	p := Thearch.Ginscmp(ONE, byteptr, &r1, &r2, -1)
+	Regfree(&r2) // not needed for success path; reclaimed on one failure path
+	iface.Xoffset += int64(Widthptr)
+	Cgen(&iface, &r1)
+	Regfree(&iface)
+
+	if resok == nil {
+		r1.Type = res.Type
+		cgen_wb(&r1, res, wb)
+		q := Gbranch(obj.AJMP, nil, 0)
+		Patch(p, Pc)
+		Regrealloc(&r2) // reclaim from above, for this failure path
+		fn := syslook("panicdottype", 0)
+		dowidth(fn.Type)
+		call := Nod(OCALLFUNC, fn, nil)
+		r1.Type = byteptr
+		r2.Type = byteptr
+		call.List = list(list(list1(&r1), &r2), typename(n.Left.Type))
+		call.List = ascompatte(OCALLFUNC, call, false, getinarg(fn.Type), call.List, 0, nil)
+		gen(call)
+		Regfree(&r1)
+		Regfree(&r2)
+		Thearch.Gins(obj.AUNDEF, nil, nil)
+		Patch(q, Pc)
+	} else {
+		// This half is handling the res, resok = x.(T) case,
+		// which is called from gen, not cgen, and is consequently fussier
+		// about blank assignments. We have to avoid calling cgen for those.
+		r1.Type = res.Type
+		if !isblank(res) {
+			cgen_wb(&r1, res, wb)
+		}
+		Regfree(&r1)
+		if !isblank(resok) {
+			Cgen(Nodbool(true), resok)
+		}
+		q := Gbranch(obj.AJMP, nil, 0)
+		Patch(p, Pc)
+		if !isblank(res) {
+			n := nodnil()
+			n.Type = res.Type
+			Cgen(n, res)
+		}
+		if !isblank(resok) {
+			Cgen(Nodbool(false), resok)
+		}
+		Patch(q, Pc)
+	}
+}
+
+/*
+ * generate:
+ *	res, resok = x.(T)
+ * n.Left is x
+ * n.Type is T
+ */
+func Cgen_As2dottype(n, res, resok *Node) {
+	if Debug_typeassert > 0 {
+		Warn("type assertion inlined")
+	}
+	//	iface := n.Left
+	//	r1 := iword(iface)
+	//	if n.Left is non-empty interface {
+	//		r1 = *r1
+	//	}
+	//	if r1 == T {
+	//		res = idata(iface)
+	//		resok = true
+	//	} else {
+	//		res = nil
+	//		resok = false
+	//	}
+	//
+	var iface Node
+	Igen(n.Left, &iface, nil)
+	var r1, r2 Node
+	byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte)
+	Regalloc(&r1, byteptr, res)
+	iface.Type = byteptr
+	Cgen(&iface, &r1)
+	if !isnilinter(n.Left.Type) {
+		// Holding itab, want concrete type in second word.
+		p := Thearch.Ginscmp(OEQ, byteptr, &r1, Nodintconst(0), -1)
+		r2 = r1
+		r2.Op = OINDREG
+		r2.Xoffset = int64(Widthptr)
+		Cgen(&r2, &r1)
+		Patch(p, Pc)
+	}
+	Regalloc(&r2, byteptr, nil)
+	Cgen(typename(n.Type), &r2)
+	p := Thearch.Ginscmp(ONE, byteptr, &r1, &r2, -1)
+	iface.Type = n.Type
+	iface.Xoffset += int64(Widthptr)
+	Cgen(&iface, &r1)
+	if iface.Op != 0 {
+		Regfree(&iface)
+	}
+	Cgen(&r1, res)
+	q := Gbranch(obj.AJMP, nil, 0)
+	Patch(p, Pc)
+
+	fn := syslook("panicdottype", 0)
+	dowidth(fn.Type)
+	call := Nod(OCALLFUNC, fn, nil)
+	call.List = list(list(list1(&r1), &r2), typename(n.Left.Type))
+	call.List = ascompatte(OCALLFUNC, call, false, getinarg(fn.Type), call.List, 0, nil)
+	gen(call)
+	Regfree(&r1)
+	Regfree(&r2)
+	Thearch.Gins(obj.AUNDEF, nil, nil)
+	Patch(q, Pc)
+}
+
+/*
+ * gather series of offsets
+ * >=0 is direct addressed field
+ * <0 is pointer to next field (+1)
+ */
+func Dotoffset(n *Node, oary []int64, nn **Node) int {
+	var i int
+
+	switch n.Op {
+	case ODOT:
+		if n.Xoffset == BADWIDTH {
+			Dump("bad width in dotoffset", n)
+			Fatal("bad width in dotoffset")
+		}
+
+		i = Dotoffset(n.Left, oary, nn)
+		if i > 0 {
+			if oary[i-1] >= 0 {
+				oary[i-1] += n.Xoffset
+			} else {
+				oary[i-1] -= n.Xoffset
+			}
+			break
+		}
+
+		if i < 10 {
+			oary[i] = n.Xoffset
+			i++
+		}
+
+	case ODOTPTR:
+		if n.Xoffset == BADWIDTH {
+			Dump("bad width in dotoffset", n)
+			Fatal("bad width in dotoffset")
+		}
+
+		i = Dotoffset(n.Left, oary, nn)
+		if i < 10 {
+			oary[i] = -(n.Xoffset + 1)
+			i++
+		}
+
+	default:
+		*nn = n
+		return 0
+	}
+
+	if i >= 10 {
+		*nn = nil
+	}
+	return i
+}
+
+/*
+ * make a new off the books
+ */
+func Tempname(nn *Node, t *Type) {
+	if Curfn == nil {
+		Fatal("no curfn for tempname")
+	}
+
+	if t == nil {
+		Yyerror("tempname called with nil type")
+		t = Types[TINT32]
+	}
+
+	// give each tmp a different name so that there
+	// a chance to registerizer them
+	s := Lookupf("autotmp_%.4d", statuniqgen)
+	statuniqgen++
+	n := Nod(ONAME, nil, nil)
+	n.Sym = s
+	s.Def = n
+	n.Type = t
+	n.Class = PAUTO
+	n.Addable = true
+	n.Ullman = 1
+	n.Esc = EscNever
+	n.Curfn = Curfn
+	Curfn.Func.Dcl = list(Curfn.Func.Dcl, n)
+
+	dowidth(t)
+	n.Xoffset = 0
+	*nn = *n
+}
+
+func temp(t *Type) *Node {
+	n := Nod(OXXX, nil, nil)
+	Tempname(n, t)
+	n.Sym.Def.Used = true
+	return n.Orig
+}
+
+func gen(n *Node) {
+	//dump("gen", n);
+
+	lno := setlineno(n)
+
+	wasregalloc := Anyregalloc()
+
+	if n == nil {
+		goto ret
+	}
+
+	if n.Ninit != nil {
+		Genlist(n.Ninit)
+	}
+
+	setlineno(n)
+
+	switch n.Op {
+	default:
+		Fatal("gen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+	case OCASE,
+		OFALL,
+		OXCASE,
+		OXFALL,
+		ODCLCONST,
+		ODCLFUNC,
+		ODCLTYPE:
+		break
+
+	case OEMPTY:
+		break
+
+	case OBLOCK:
+		Genlist(n.List)
+
+	case OLABEL:
+		if isblanksym(n.Left.Sym) {
+			break
+		}
+
+		lab := newlab(n)
+
+		// if there are pending gotos, resolve them all to the current pc.
+		var p2 *obj.Prog
+		for p1 := lab.Gotopc; p1 != nil; p1 = p2 {
+			p2 = unpatch(p1)
+			Patch(p1, Pc)
+		}
+
+		lab.Gotopc = nil
+		if lab.Labelpc == nil {
+			lab.Labelpc = Pc
+		}
+
+		if n.Defn != nil {
+			switch n.Defn.Op {
+			// so stmtlabel can find the label
+			case OFOR, OSWITCH, OSELECT:
+				n.Defn.Sym = lab.Sym
+			}
+		}
+
+		// if label is defined, emit jump to it.
+	// otherwise save list of pending gotos in lab->gotopc.
+	// the list is linked through the normal jump target field
+	// to avoid a second list.  (the jumps are actually still
+	// valid code, since they're just going to another goto
+	// to the same label.  we'll unwind it when we learn the pc
+	// of the label in the OLABEL case above.)
+	case OGOTO:
+		lab := newlab(n)
+
+		if lab.Labelpc != nil {
+			gjmp(lab.Labelpc)
+		} else {
+			lab.Gotopc = gjmp(lab.Gotopc)
+		}
+
+	case OBREAK:
+		if n.Left != nil {
+			lab := n.Left.Sym.Label
+			if lab == nil {
+				Yyerror("break label not defined: %v", n.Left.Sym)
+				break
+			}
+
+			lab.Used = 1
+			if lab.Breakpc == nil {
+				Yyerror("invalid break label %v", n.Left.Sym)
+				break
+			}
+
+			gjmp(lab.Breakpc)
+			break
+		}
+
+		if breakpc == nil {
+			Yyerror("break is not in a loop")
+			break
+		}
+
+		gjmp(breakpc)
+
+	case OCONTINUE:
+		if n.Left != nil {
+			lab := n.Left.Sym.Label
+			if lab == nil {
+				Yyerror("continue label not defined: %v", n.Left.Sym)
+				break
+			}
+
+			lab.Used = 1
+			if lab.Continpc == nil {
+				Yyerror("invalid continue label %v", n.Left.Sym)
+				break
+			}
+
+			gjmp(lab.Continpc)
+			break
+		}
+
+		if continpc == nil {
+			Yyerror("continue is not in a loop")
+			break
+		}
+
+		gjmp(continpc)
+
+	case OFOR:
+		sbreak := breakpc
+		p1 := gjmp(nil)     //		goto test
+		breakpc = gjmp(nil) // break:	goto done
+		scontin := continpc
+		continpc = Pc
+
+		// define break and continue labels
+		lab := stmtlabel(n)
+		if lab != nil {
+			lab.Breakpc = breakpc
+			lab.Continpc = continpc
+		}
+
+		gen(n.Nincr)                      // contin:	incr
+		Patch(p1, Pc)                     // test:
+		Bgen(n.Ntest, false, -1, breakpc) //		if(!test) goto break
+		Genlist(n.Nbody)                  //		body
+		gjmp(continpc)
+		Patch(breakpc, Pc) // done:
+		continpc = scontin
+		breakpc = sbreak
+		if lab != nil {
+			lab.Breakpc = nil
+			lab.Continpc = nil
+		}
+
+	case OIF:
+		p1 := gjmp(nil)                          //		goto test
+		p2 := gjmp(nil)                          // p2:		goto else
+		Patch(p1, Pc)                            // test:
+		Bgen(n.Ntest, false, int(-n.Likely), p2) //		if(!test) goto p2
+		Genlist(n.Nbody)                         //		then
+		p3 := gjmp(nil)                          //		goto done
+		Patch(p2, Pc)                            // else:
+		Genlist(n.Nelse)                         //		else
+		Patch(p3, Pc)                            // done:
+
+	case OSWITCH:
+		sbreak := breakpc
+		p1 := gjmp(nil)     //		goto test
+		breakpc = gjmp(nil) // break:	goto done
+
+		// define break label
+		lab := stmtlabel(n)
+		if lab != nil {
+			lab.Breakpc = breakpc
+		}
+
+		Patch(p1, Pc)      // test:
+		Genlist(n.Nbody)   //		switch(test) body
+		Patch(breakpc, Pc) // done:
+		breakpc = sbreak
+		if lab != nil {
+			lab.Breakpc = nil
+		}
+
+	case OSELECT:
+		sbreak := breakpc
+		p1 := gjmp(nil)     //		goto test
+		breakpc = gjmp(nil) // break:	goto done
+
+		// define break label
+		lab := stmtlabel(n)
+		if lab != nil {
+			lab.Breakpc = breakpc
+		}
+
+		Patch(p1, Pc)      // test:
+		Genlist(n.Nbody)   //		select() body
+		Patch(breakpc, Pc) // done:
+		breakpc = sbreak
+		if lab != nil {
+			lab.Breakpc = nil
+		}
+
+	case ODCL:
+		cgen_dcl(n.Left)
+
+	case OAS:
+		if gen_as_init(n) {
+			break
+		}
+		Cgen_as(n.Left, n.Right)
+
+	case OASWB:
+		Cgen_as_wb(n.Left, n.Right, true)
+
+	case OAS2DOTTYPE:
+		cgen_dottype(n.Rlist.N, n.List.N, n.List.Next.N, false)
+
+	case OCALLMETH:
+		cgen_callmeth(n, 0)
+
+	case OCALLINTER:
+		cgen_callinter(n, nil, 0)
+
+	case OCALLFUNC:
+		cgen_call(n, 0)
+
+	case OPROC:
+		cgen_proc(n, 1)
+
+	case ODEFER:
+		cgen_proc(n, 2)
+
+	case ORETURN, ORETJMP:
+		cgen_ret(n)
+
+	// Function calls turned into compiler intrinsics.
+	// At top level, can just ignore the call and make sure to preserve side effects in the argument, if any.
+	case OGETG:
+		// nothing
+	case OSQRT:
+		cgen_discard(n.Left)
+
+	case OCHECKNIL:
+		Cgen_checknil(n.Left)
+
+	case OVARKILL:
+		gvarkill(n.Left)
+	}
+
+ret:
+	if Anyregalloc() != wasregalloc {
+		Dump("node", n)
+		Fatal("registers left allocated")
+	}
+
+	lineno = lno
+}
+
+func Cgen_as(nl, nr *Node) {
+	Cgen_as_wb(nl, nr, false)
+}
+
+func Cgen_as_wb(nl, nr *Node, wb bool) {
+	if Debug['g'] != 0 {
+		op := "cgen_as"
+		if wb {
+			op = "cgen_as_wb"
+		}
+		Dump(op, nl)
+		Dump(op+" = ", nr)
+	}
+
+	for nr != nil && nr.Op == OCONVNOP {
+		nr = nr.Left
+	}
+
+	if nl == nil || isblank(nl) {
+		cgen_discard(nr)
+		return
+	}
+
+	if nr == nil || iszero(nr) {
+		// heaps should already be clear
+		if nr == nil && (nl.Class&PHEAP != 0) {
+			return
+		}
+
+		tl := nl.Type
+		if tl == nil {
+			return
+		}
+		if Isfat(tl) {
+			if nl.Op == ONAME {
+				Gvardef(nl)
+			}
+			Thearch.Clearfat(nl)
+			return
+		}
+
+		Clearslim(nl)
+		return
+	}
+
+	tl := nl.Type
+	if tl == nil {
+		return
+	}
+
+	cgen_wb(nr, nl, wb)
+}
+
+func cgen_callmeth(n *Node, proc int) {
+	// generate a rewrite in n2 for the method call
+	// (p.f)(...) goes to (f)(p,...)
+
+	l := n.Left
+
+	if l.Op != ODOTMETH {
+		Fatal("cgen_callmeth: not dotmethod: %v", l)
+	}
+
+	n2 := *n
+	n2.Op = OCALLFUNC
+	n2.Left = l.Right
+	n2.Left.Type = l.Type
+
+	if n2.Left.Op == ONAME {
+		n2.Left.Class = PFUNC
+	}
+	cgen_call(&n2, proc)
+}
+
+// CgenTemp creates a temporary node, assigns n to it, and returns it.
+func CgenTemp(n *Node) *Node {
+	var tmp Node
+	Tempname(&tmp, n.Type)
+	Cgen(n, &tmp)
+	return &tmp
+}
+
+func checklabels() {
+	var l *NodeList
+
+	for lab := labellist; lab != nil; lab = lab.Link {
+		if lab.Def == nil {
+			for l = lab.Use; l != nil; l = l.Next {
+				yyerrorl(int(l.N.Lineno), "label %v not defined", lab.Sym)
+			}
+			continue
+		}
+
+		if lab.Use == nil && lab.Used == 0 {
+			yyerrorl(int(lab.Def.Lineno), "label %v defined and not used", lab.Sym)
+			continue
+		}
+
+		if lab.Gotopc != nil {
+			Fatal("label %v never resolved", lab.Sym)
+		}
+		for l = lab.Use; l != nil; l = l.Next {
+			checkgoto(l.N, lab.Def)
+		}
+	}
+}
+
+// Componentgen copies a composite value by moving its individual components.
+// Slices, strings and interfaces are supported. Small structs or arrays with
+// elements of basic type are also supported.
+// nr is nil when assigning a zero value.
+func Componentgen(nr, nl *Node) bool {
+	return componentgen_wb(nr, nl, false)
+}
+
+// componentgen_wb is like componentgen but if wb==true emits write barriers for pointer updates.
+func componentgen_wb(nr, nl *Node, wb bool) bool {
+	// Don't generate any code for complete copy of a variable into itself.
+	// It's useless, and the VARDEF will incorrectly mark the old value as dead.
+	// (This check assumes that the arguments passed to componentgen did not
+	// themselves come from Igen, or else we could have Op==ONAME but
+	// with a Type and Xoffset describing an individual field, not the entire
+	// variable.)
+	if nl.Op == ONAME && nl == nr {
+		return true
+	}
+
+	// Count number of moves required to move components.
+	// If using write barrier, can only emit one pointer.
+	// TODO(rsc): Allow more pointers, for reflect.Value.
+	const maxMoves = 8
+	n := 0
+	numPtr := 0
+	visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
+		n++
+		if int(Simtype[t.Etype]) == Tptr && t != itable {
+			numPtr++
+		}
+		return n <= maxMoves && (!wb || numPtr <= 1)
+	})
+	if n > maxMoves || wb && numPtr > 1 {
+		return false
+	}
+
+	// Must call emitVardef after evaluating rhs but before writing to lhs.
+	emitVardef := func() {
+		// Emit vardef if needed.
+		if nl.Op == ONAME {
+			switch nl.Type.Etype {
+			case TARRAY, TSTRING, TINTER, TSTRUCT:
+				Gvardef(nl)
+			}
+		}
+	}
+
+	isConstString := Isconst(nr, CTSTR)
+
+	if !cadable(nl) && nr != nil && !cadable(nr) && !isConstString {
+		return false
+	}
+
+	var nodl Node
+	if cadable(nl) {
+		nodl = *nl
+	} else {
+		if nr != nil && !cadable(nr) && !isConstString {
+			return false
+		}
+		if nr == nil || isConstString || nl.Ullman >= nr.Ullman {
+			Igen(nl, &nodl, nil)
+			defer Regfree(&nodl)
+		}
+	}
+	lbase := nodl.Xoffset
+
+	// Special case: zeroing.
+	var nodr Node
+	if nr == nil {
+		// When zeroing, prepare a register containing zero.
+		// TODO(rsc): Check that this is actually generating the best code.
+		if Thearch.REGZERO != 0 {
+			// cpu has a dedicated zero register
+			Nodreg(&nodr, Types[TUINT], Thearch.REGZERO)
+		} else {
+			// no dedicated zero register
+			var zero Node
+			Nodconst(&zero, nl.Type, 0)
+			Regalloc(&nodr, Types[TUINT], nil)
+			Thearch.Gmove(&zero, &nodr)
+			defer Regfree(&nodr)
+		}
+
+		emitVardef()
+		visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
+			nodl.Type = t
+			nodl.Xoffset = lbase + offset
+			nodr.Type = t
+			if Isfloat[t.Etype] {
+				// TODO(rsc): Cache zero register like we do for integers?
+				Clearslim(&nodl)
+			} else {
+				Thearch.Gmove(&nodr, &nodl)
+			}
+			return true
+		})
+		return true
+	}
+
+	// Special case: assignment of string constant.
+	if isConstString {
+		emitVardef()
+
+		// base
+		nodl.Type = Ptrto(Types[TUINT8])
+		Regalloc(&nodr, Types[Tptr], nil)
+		p := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), nil, &nodr)
+		Datastring(nr.Val.U.(string), &p.From)
+		p.From.Type = obj.TYPE_ADDR
+		Thearch.Gmove(&nodr, &nodl)
+		Regfree(&nodr)
+
+		// length
+		nodl.Type = Types[Simtype[TUINT]]
+		nodl.Xoffset += int64(Array_nel) - int64(Array_array)
+		Nodconst(&nodr, nodl.Type, int64(len(nr.Val.U.(string))))
+		Thearch.Gmove(&nodr, &nodl)
+		return true
+	}
+
+	// General case: copy nl = nr.
+	nodr = *nr
+	if !cadable(nr) {
+		if nr.Ullman >= UINF && nodl.Op == OINDREG {
+			Fatal("miscompile")
+		}
+		Igen(nr, &nodr, nil)
+		defer Regfree(&nodr)
+	}
+	rbase := nodr.Xoffset
+
+	if nodl.Op == 0 {
+		Igen(nl, &nodl, nil)
+		defer Regfree(&nodl)
+		lbase = nodl.Xoffset
+	}
+
+	emitVardef()
+	var (
+		ptrType   *Type
+		ptrOffset int64
+	)
+	visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
+		if wb && int(Simtype[t.Etype]) == Tptr && t != itable {
+			if ptrType != nil {
+				Fatal("componentgen_wb %v", Tconv(nl.Type, 0))
+			}
+			ptrType = t
+			ptrOffset = offset
+			return true
+		}
+		nodl.Type = t
+		nodl.Xoffset = lbase + offset
+		nodr.Type = t
+		nodr.Xoffset = rbase + offset
+		Thearch.Gmove(&nodr, &nodl)
+		return true
+	})
+	if ptrType != nil {
+		nodl.Type = ptrType
+		nodl.Xoffset = lbase + ptrOffset
+		nodr.Type = ptrType
+		nodr.Xoffset = rbase + ptrOffset
+		cgen_wbptr(&nodr, &nodl)
+	}
+	return true
+}
+
+// visitComponents walks the individual components of the type t,
+// walking into array elements, struct fields, the real and imaginary
+// parts of complex numbers, and on 32-bit systems the high and
+// low halves of 64-bit integers.
+// It calls f for each such component, passing the component (aka element)
+// type and memory offset, assuming t starts at startOffset.
+// If f ever returns false, visitComponents returns false without any more
+// calls to f. Otherwise visitComponents returns true.
+func visitComponents(t *Type, startOffset int64, f func(elem *Type, elemOffset int64) bool) bool {
+	switch t.Etype {
+	case TINT64:
+		if Widthreg == 8 {
+			break
+		}
+		// NOTE: Assuming little endian (signed top half at offset 4).
+		// We don't have any 32-bit big-endian systems.
+		if Thearch.Thechar != '5' && Thearch.Thechar != '8' {
+			Fatal("unknown 32-bit architecture")
+		}
+		return f(Types[TUINT32], startOffset) &&
+			f(Types[TINT32], startOffset+4)
+
+	case TUINT64:
+		if Widthreg == 8 {
+			break
+		}
+		return f(Types[TUINT32], startOffset) &&
+			f(Types[TUINT32], startOffset+4)
+
+	case TCOMPLEX64:
+		return f(Types[TFLOAT32], startOffset) &&
+			f(Types[TFLOAT32], startOffset+4)
+
+	case TCOMPLEX128:
+		return f(Types[TFLOAT64], startOffset) &&
+			f(Types[TFLOAT64], startOffset+8)
+
+	case TINTER:
+		return f(itable, startOffset) &&
+			f(Ptrto(Types[TUINT8]), startOffset+int64(Widthptr))
+		return true
+
+	case TSTRING:
+		return f(Ptrto(Types[TUINT8]), startOffset) &&
+			f(Types[Simtype[TUINT]], startOffset+int64(Widthptr))
+
+	case TARRAY:
+		if Isslice(t) {
+			return f(Ptrto(t.Type), startOffset+int64(Array_array)) &&
+				f(Types[Simtype[TUINT]], startOffset+int64(Array_nel)) &&
+				f(Types[Simtype[TUINT]], startOffset+int64(Array_cap))
+		}
+
+		// Short-circuit [1e6]struct{}.
+		if t.Type.Width == 0 {
+			return true
+		}
+
+		for i := int64(0); i < t.Bound; i++ {
+			if !visitComponents(t.Type, startOffset+i*t.Type.Width, f) {
+				return false
+			}
+		}
+		return true
+
+	case TSTRUCT:
+		if t.Type != nil && t.Type.Width != 0 {
+			// NOTE(rsc): If this happens, the right thing to do is to say
+			//	startOffset -= t.Type.Width
+			// but I want to see if it does.
+			// The old version of componentgen handled this,
+			// in code introduced in CL 6932045 to fix issue #4518.
+			// But the test case in issue 4518 does not trigger this anymore,
+			// so maybe this complication is no longer needed.
+			Fatal("struct not at offset 0")
+		}
+
+		for field := t.Type; field != nil; field = field.Down {
+			if field.Etype != TFIELD {
+				Fatal("bad struct")
+			}
+			if !visitComponents(field.Type, startOffset+field.Width, f) {
+				return false
+			}
+		}
+		return true
+	}
+	return f(t, startOffset)
+}
+
+func cadable(n *Node) bool {
+	// Note: Not sure why you can have n.Op == ONAME without n.Addable, but you can.
+	return n.Addable && n.Op == ONAME
+}
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
new file mode 100644
index 0000000..dc33f62
--- /dev/null
+++ b/src/cmd/compile/internal/gc/go.go
@@ -0,0 +1,841 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"bytes"
+	"cmd/compile/internal/big"
+	"cmd/internal/obj"
+)
+
+// avoid <ctype.h>
+
+// The parser's maximum stack size.
+// We have to use a #define macro here since yacc
+// or bison will check for its definition and use
+// a potentially smaller value if it is undefined.
+const (
+	NHUNK           = 50000
+	BUFSIZ          = 8192
+	NSYMB           = 500
+	NHASH           = 1024
+	MAXALIGN        = 7
+	UINF            = 100
+	PRIME1          = 3
+	BADWIDTH        = -1000000000
+	MaxStackVarSize = 10 * 1024 * 1024
+)
+
+const (
+	// These values are known by runtime.
+	// The MEMx and NOEQx values must run in parallel.  See algtype.
+	AMEM = iota
+	AMEM0
+	AMEM8
+	AMEM16
+	AMEM32
+	AMEM64
+	AMEM128
+	ANOEQ
+	ANOEQ0
+	ANOEQ8
+	ANOEQ16
+	ANOEQ32
+	ANOEQ64
+	ANOEQ128
+	ASTRING
+	AINTER
+	ANILINTER
+	ASLICE
+	AFLOAT32
+	AFLOAT64
+	ACPLX64
+	ACPLX128
+	AUNK = 100
+)
+
+const (
+	// Maximum size in bits for Mpints before signalling
+	// overflow and also mantissa precision for Mpflts.
+	Mpprec = 512
+	// Turn on for constant arithmetic debugging output.
+	Mpdebug = false
+)
+
+// Mpint represents an integer constant.
+type Mpint struct {
+	Val big.Int
+	Ovf bool // set if Val overflowed compiler limit (sticky)
+}
+
+// Mpflt represents a floating-point constant.
+type Mpflt struct {
+	Val big.Float
+}
+
+// Mpcplx represents a complex constant.
+type Mpcplx struct {
+	Real Mpflt
+	Imag Mpflt
+}
+
+type Val struct {
+	Ctype int16
+	// U contains one of:
+	// bool     bool when Ctype == CTBOOL
+	// *Mpint   int when Ctype == CTINT, rune when Ctype == CTRUNE
+	// *Mpflt   float when Ctype == CTFLT
+	// *Mpcplx  pair of floats when Ctype == CTCPLX
+	// string   string when Ctype == CTSTR
+	U interface{}
+}
+
+type Pkg struct {
+	Name     string // package name
+	Path     string // string literal used in import statement
+	Pathsym  *Sym
+	Prefix   string // escaped path for use in symbol table
+	Imported uint8  // export data of this package was parsed
+	Exported int8   // import line written in export data
+	Direct   int8   // imported directly
+	Safe     bool   // whether the package is marked as safe
+	Syms     map[string]*Sym
+}
+
+type Sym struct {
+	Lexical   uint16
+	Flags     uint8
+	Link      *Sym
+	Uniqgen   uint32
+	Importdef *Pkg   // where imported definition was found
+	Linkname  string // link name
+
+	// saved and restored by dcopy
+	Pkg        *Pkg
+	Name       string // variable name
+	Def        *Node  // definition: ONAME OTYPE OPACK or OLITERAL
+	Label      *Label // corresponding label (ephemeral)
+	Block      int32  // blocknumber to catch redeclaration
+	Lastlineno int32  // last declaration for diagnostic
+	Origpkg    *Pkg   // original package for . import
+	Lsym       *obj.LSym
+	Fsym       *Sym // funcsym
+}
+
+type Type struct {
+	Etype       uint8
+	Nointerface bool
+	Noalg       uint8
+	Chan        uint8
+	Trecur      uint8 // to detect loops
+	Printed     uint8
+	Embedded    uint8 // TFIELD embedded type
+	Siggen      uint8
+	Funarg      uint8 // on TSTRUCT and TFIELD
+	Copyany     uint8
+	Local       bool // created in this file
+	Deferwidth  uint8
+	Broke       uint8 // broken type definition.
+	Isddd       bool  // TFIELD is ... argument
+	Align       uint8
+	Haspointers uint8 // 0 unknown, 1 no, 2 yes
+
+	Nod    *Node // canonical OTYPE node
+	Orig   *Type // original type (type literal or predefined type)
+	Lineno int
+
+	// TFUNC
+	Thistuple int
+	Outtuple  int
+	Intuple   int
+	Outnamed  uint8
+
+	Method  *Type
+	Xmethod *Type
+
+	Sym    *Sym
+	Vargen int32 // unique name for OTYPE/ONAME
+
+	Nname  *Node
+	Argwid int64
+
+	// most nodes
+	Type  *Type // actual type for TFIELD, element type for TARRAY, TCHAN, TMAP, TPTRxx
+	Width int64 // offset in TFIELD, width in all others
+
+	// TFIELD
+	Down  *Type   // next struct field, also key type in TMAP
+	Outer *Type   // outer struct
+	Note  *string // literal string annotation
+
+	// TARRAY
+	Bound int64 // negative is dynamic array
+
+	// TMAP
+	Bucket *Type // internal type representing a hash bucket
+	Hmap   *Type // internal type representing a Hmap (map header object)
+	Hiter  *Type // internal type representing hash iterator state
+	Map    *Type // link from the above 3 internal types back to the map type.
+
+	Maplineno   int32 // first use of TFORW as map key
+	Embedlineno int32 // first use of TFORW as embedded type
+
+	// for TFORW, where to copy the eventual value to
+	Copyto *NodeList
+
+	Lastfn *Node // for usefield
+}
+
+type Label struct {
+	Used uint8
+	Sym  *Sym
+	Def  *Node
+	Use  *NodeList
+	Link *Label
+
+	// for use during gen
+	Gotopc   *obj.Prog // pointer to unresolved gotos
+	Labelpc  *obj.Prog // pointer to code
+	Breakpc  *obj.Prog // pointer to code
+	Continpc *obj.Prog // pointer to code
+}
+
+type InitEntry struct {
+	Xoffset int64 // struct, array only
+	Expr    *Node // bytes of run-time computed expressions
+}
+
+type InitPlan struct {
+	Lit  int64
+	Zero int64
+	Expr int64
+	E    []InitEntry
+}
+
+const (
+	SymExport   = 1 << 0 // to be exported
+	SymPackage  = 1 << 1
+	SymExported = 1 << 2 // already written out by export
+	SymUniq     = 1 << 3
+	SymSiggen   = 1 << 4
+	SymAsm      = 1 << 5
+	SymAlgGen   = 1 << 6
+)
+
+var dclstack *Sym
+
+type Iter struct {
+	Done  int
+	Tfunc *Type
+	T     *Type
+}
+
+const (
+	Txxx = iota
+
+	TINT8
+	TUINT8
+	TINT16
+	TUINT16
+	TINT32
+	TUINT32
+	TINT64
+	TUINT64
+	TINT
+	TUINT
+	TUINTPTR
+
+	TCOMPLEX64
+	TCOMPLEX128
+
+	TFLOAT32
+	TFLOAT64
+
+	TBOOL
+
+	TPTR32
+	TPTR64
+
+	TFUNC
+	TARRAY
+	T_old_DARRAY
+	TSTRUCT
+	TCHAN
+	TMAP
+	TINTER
+	TFORW
+	TFIELD
+	TANY
+	TSTRING
+	TUNSAFEPTR
+
+	// pseudo-types for literals
+	TIDEAL
+	TNIL
+	TBLANK
+
+	// pseudo-type for frame layout
+	TFUNCARGS
+	TCHANARGS
+	TINTERMETH
+
+	NTYPE
+)
+
+const (
+	CTxxx = iota
+
+	CTINT
+	CTRUNE
+	CTFLT
+	CTCPLX
+	CTSTR
+	CTBOOL
+	CTNIL
+)
+
+const (
+	/* types of channel */
+	/* must match ../../pkg/nreflect/type.go:/Chandir */
+	Cxxx  = 0
+	Crecv = 1 << 0
+	Csend = 1 << 1
+	Cboth = Crecv | Csend
+)
+
+// declaration context
+const (
+	Pxxx      = uint8(iota)
+	PEXTERN   // global variable
+	PAUTO     // local variables
+	PPARAM    // input arguments
+	PPARAMOUT // output results
+	PPARAMREF // closure variable reference
+	PFUNC     // global function
+
+	PDISCARD // discard during parse of duplicate import
+
+	PHEAP = uint8(1 << 7) // an extra bit to identify an escaped variable
+)
+
+const (
+	Etop      = 1 << 1 // evaluated at statement level
+	Erv       = 1 << 2 // evaluated in value context
+	Etype     = 1 << 3
+	Ecall     = 1 << 4  // call-only expressions are ok
+	Efnstruct = 1 << 5  // multivalue function returns are ok
+	Eiota     = 1 << 6  // iota is ok
+	Easgn     = 1 << 7  // assigning to expression
+	Eindir    = 1 << 8  // indirecting through expression
+	Eaddr     = 1 << 9  // taking address of expression
+	Eproc     = 1 << 10 // inside a go statement
+	Ecomplit  = 1 << 11 // type in composite literal
+)
+
+type Typedef struct {
+	Name   string
+	Etype  int
+	Sameas int
+}
+
+type Sig struct {
+	name   string
+	pkg    *Pkg
+	isym   *Sym
+	tsym   *Sym
+	type_  *Type
+	mtype  *Type
+	offset int32
+	link   *Sig
+}
+
+type Io struct {
+	infile     string
+	bin        *obj.Biobuf
+	nlsemi     int
+	eofnl      int
+	last       int
+	peekc      int
+	peekc1     int    // second peekc for ...
+	cp         string // used for content when bin==nil
+	importsafe bool
+}
+
+type Dlist struct {
+	field *Type
+}
+
+type Idir struct {
+	link *Idir
+	dir  string
+}
+
+/*
+ * argument passing to/from
+ * smagic and umagic
+ */
+type Magic struct {
+	W   int // input for both - width
+	S   int // output for both - shift
+	Bad int // output for both - unexpected failure
+
+	// magic multiplier for signed literal divisors
+	Sd int64 // input - literal divisor
+	Sm int64 // output - multiplier
+
+	// magic multiplier for unsigned literal divisors
+	Ud uint64 // input - literal divisor
+	Um uint64 // output - multiplier
+	Ua int    // output - adder
+}
+
+/*
+ * note this is the runtime representation
+ * of the compilers arrays.
+ *
+ * typedef	struct
+ * {				// must not move anything
+ *	uchar	array[8];	// pointer to data
+ *	uchar	nel[4];		// number of elements
+ *	uchar	cap[4];		// allocated number of elements
+ * } Array;
+ */
+var Array_array int // runtime offsetof(Array,array) - same for String
+
+var Array_nel int // runtime offsetof(Array,nel) - same for String
+
+var Array_cap int // runtime offsetof(Array,cap)
+
+var sizeof_Array int // runtime sizeof(Array)
+
+/*
+ * note this is the runtime representation
+ * of the compilers strings.
+ *
+ * typedef	struct
+ * {				// must not move anything
+ *	uchar	array[8];	// pointer to data
+ *	uchar	nel[4];		// number of elements
+ * } String;
+ */
+var sizeof_String int // runtime sizeof(String)
+
+var dotlist [10]Dlist // size is max depth of embeddeds
+
+var curio Io
+
+var pushedio Io
+
+var lexlineno int32
+
+var lineno int32
+
+var prevlineno int32
+
+var pragcgobuf string
+
+var infile string
+
+var outfile string
+
+var bout *obj.Biobuf
+
+var nerrors int
+
+var nsavederrors int
+
+var nsyntaxerrors int
+
+var decldepth int32
+
+var safemode int
+
+var nolocalimports int
+
+var lexbuf bytes.Buffer
+var strbuf bytes.Buffer
+
+var litbuf string
+
+var Debug [256]int
+
+var debugstr string
+
+var Debug_checknil int
+var Debug_typeassert int
+
+var importmyname *Sym // my name for package
+
+var localpkg *Pkg // package being compiled
+
+var importpkg *Pkg // package being imported
+
+var structpkg *Pkg // package that declared struct, during import
+
+var builtinpkg *Pkg // fake package for builtins
+
+var gostringpkg *Pkg // fake pkg for Go strings
+
+var itabpkg *Pkg // fake pkg for itab cache
+
+var Runtimepkg *Pkg // package runtime
+
+var racepkg *Pkg // package runtime/race
+
+var typepkg *Pkg // fake package for runtime type info (headers)
+
+var typelinkpkg *Pkg // fake package for runtime type info (data)
+
+var weaktypepkg *Pkg // weak references to runtime type info
+
+var unsafepkg *Pkg // package unsafe
+
+var trackpkg *Pkg // fake package for field tracking
+
+var Tptr int // either TPTR32 or TPTR64
+
+var myimportpath string
+
+var idirs *Idir
+
+var localimport string
+
+var asmhdr string
+
+var Types [NTYPE]*Type
+
+var idealstring *Type
+
+var idealbool *Type
+
+var bytetype *Type
+
+var runetype *Type
+
+var errortype *Type
+
+var Simtype [NTYPE]uint8
+
+var (
+	Isptr     [NTYPE]bool
+	isforw    [NTYPE]bool
+	Isint     [NTYPE]bool
+	Isfloat   [NTYPE]bool
+	Iscomplex [NTYPE]bool
+	Issigned  [NTYPE]bool
+	issimple  [NTYPE]bool
+)
+
+var (
+	okforeq    [NTYPE]bool
+	okforadd   [NTYPE]bool
+	okforand   [NTYPE]bool
+	okfornone  [NTYPE]bool
+	okforcmp   [NTYPE]bool
+	okforbool  [NTYPE]bool
+	okforcap   [NTYPE]bool
+	okforlen   [NTYPE]bool
+	okforarith [NTYPE]bool
+	okforconst [NTYPE]bool
+)
+
+var (
+	okfor [OEND][]bool
+	iscmp [OEND]bool
+)
+
+var Minintval [NTYPE]*Mpint
+
+var Maxintval [NTYPE]*Mpint
+
+var minfltval [NTYPE]*Mpflt
+
+var maxfltval [NTYPE]*Mpflt
+
+var xtop *NodeList
+
+var externdcl *NodeList
+
+var exportlist *NodeList
+
+var importlist *NodeList // imported functions and methods with inlinable bodies
+
+var funcsyms *NodeList
+
+var dclcontext uint8 // PEXTERN/PAUTO
+
+var incannedimport int
+
+var statuniqgen int // name generator for static temps
+
+var loophack int
+
+var iota_ int32
+
+var lastconst *NodeList
+
+var lasttype *Node
+
+var Maxarg int64
+
+var Stksize int64 // stack size for current frame
+
+var stkptrsize int64 // prefix of stack containing pointers
+
+var blockgen int32 // max block number
+
+var block int32 // current block number
+
+var Hasdefer int // flag that curfn has defer statetment
+
+var Curfn *Node
+
+var Widthptr int
+
+var Widthint int
+
+var Widthreg int
+
+var typesw *Node
+
+var nblank *Node
+
+var hunk string
+
+var nhunk int32
+
+var thunk int32
+
+var Funcdepth int32
+
+var typecheckok int
+
+var compiling_runtime int
+
+var compiling_wrappers int
+
+var inl_nonlocal int
+
+var use_writebarrier int
+
+var pure_go int
+
+var flag_installsuffix string
+
+var flag_race int
+
+var flag_largemodel int
+
+var noescape bool
+
+var nosplit bool
+
+var nowritebarrier bool
+
+var debuglive int
+
+var Ctxt *obj.Link
+
+var nointerface bool
+
+var writearchive int
+
+var bstdout obj.Biobuf
+
+var Nacl bool
+
+var continpc *obj.Prog
+
+var breakpc *obj.Prog
+
+var Pc *obj.Prog
+
+var nodfp *Node
+
+var Disable_checknil int
+
+var zerosize int64
+
+type Flow struct {
+	Prog   *obj.Prog // actual instruction
+	P1     *Flow     // predecessors of this instruction: p1,
+	P2     *Flow     // and then p2 linked though p2link.
+	P2link *Flow
+	S1     *Flow // successors of this instruction (at most two: s1 and s2).
+	S2     *Flow
+	Link   *Flow // next instruction in function code
+
+	Active int32 // usable by client
+
+	Id     int32  // sequence number in flow graph
+	Rpo    int32  // reverse post ordering
+	Loop   uint16 // x5 for every loop
+	Refset uint8  // diagnostic generated
+
+	Data interface{} // for use by client
+}
+
+type Graph struct {
+	Start *Flow
+	Num   int
+
+	// After calling flowrpo, rpo lists the flow nodes in reverse postorder,
+	// and each non-dead Flow node f has g->rpo[f->rpo] == f.
+	Rpo []*Flow
+}
+
+/*
+ *	interface to back end
+ */
+
+const (
+	// Pseudo-op, like TEXT, GLOBL, TYPE, PCDATA, FUNCDATA.
+	Pseudo = 1 << 1
+
+	// There's nothing to say about the instruction,
+	// but it's still okay to see.
+	OK = 1 << 2
+
+	// Size of right-side write, or right-side read if no write.
+	SizeB = 1 << 3
+	SizeW = 1 << 4
+	SizeL = 1 << 5
+	SizeQ = 1 << 6
+	SizeF = 1 << 7
+	SizeD = 1 << 8
+
+	// Left side (Prog.from): address taken, read, write.
+	LeftAddr  = 1 << 9
+	LeftRead  = 1 << 10
+	LeftWrite = 1 << 11
+
+	// Register in middle (Prog.reg); only ever read. (arm, ppc64)
+	RegRead    = 1 << 12
+	CanRegRead = 1 << 13
+
+	// Right side (Prog.to): address taken, read, write.
+	RightAddr  = 1 << 14
+	RightRead  = 1 << 15
+	RightWrite = 1 << 16
+
+	// Instruction kinds
+	Move  = 1 << 17 // straight move
+	Conv  = 1 << 18 // size conversion
+	Cjmp  = 1 << 19 // conditional jump
+	Break = 1 << 20 // breaks control flow (no fallthrough)
+	Call  = 1 << 21 // function call
+	Jump  = 1 << 22 // jump
+	Skip  = 1 << 23 // data instruction
+
+	// Set, use, or kill of carry bit.
+	// Kill means we never look at the carry bit after this kind of instruction.
+	SetCarry  = 1 << 24
+	UseCarry  = 1 << 25
+	KillCarry = 1 << 26
+
+	// Special cases for register use. (amd64, 386)
+	ShiftCX  = 1 << 27 // possible shift by CX
+	ImulAXDX = 1 << 28 // possible multiply into DX:AX
+
+	// Instruction updates whichever of from/to is type D_OREG. (ppc64)
+	PostInc = 1 << 29
+)
+
+type Arch struct {
+	Thechar      int
+	Thestring    string
+	Thelinkarch  *obj.LinkArch
+	Typedefs     []Typedef
+	REGSP        int
+	REGCTXT      int
+	REGCALLX     int // BX
+	REGCALLX2    int // AX
+	REGRETURN    int // AX
+	REGMIN       int
+	REGMAX       int
+	REGZERO      int // architectural zero register, if available
+	FREGMIN      int
+	FREGMAX      int
+	MAXWIDTH     int64
+	ReservedRegs []int
+
+	AddIndex     func(*Node, int64, *Node) bool // optional
+	Betypeinit   func()
+	Bgen_float   func(*Node, bool, int, *obj.Prog) // optional
+	Cgen64       func(*Node, *Node)                // only on 32-bit systems
+	Cgenindex    func(*Node, *Node, bool) *obj.Prog
+	Cgen_bmul    func(int, *Node, *Node, *Node) bool
+	Cgen_float   func(*Node, *Node) // optional
+	Cgen_hmul    func(*Node, *Node, *Node)
+	Cgen_shift   func(int, bool, *Node, *Node, *Node)
+	Clearfat     func(*Node)
+	Cmp64        func(*Node, *Node, int, int, *obj.Prog) // only on 32-bit systems
+	Defframe     func(*obj.Prog)
+	Dodiv        func(int, *Node, *Node, *Node)
+	Excise       func(*Flow)
+	Expandchecks func(*obj.Prog)
+	Getg         func(*Node)
+	Gins         func(int, *Node, *Node) *obj.Prog
+
+	// Ginscmp generates code comparing n1 to n2 and jumping away if op is satisfied.
+	// The returned prog should be Patch'ed with the jump target.
+	// If op is not satisfied, code falls through to the next emitted instruction.
+	// Likely is the branch prediction hint: +1 for likely, -1 for unlikely, 0 for no opinion.
+	//
+	// Ginscmp must be able to handle all kinds of arguments for n1 and n2,
+	// not just simple registers, although it can assume that there are no
+	// function calls needed during the evaluation, and on 32-bit systems
+	// the values are guaranteed not to be 64-bit values, so no in-memory
+	// temporaries are necessary.
+	Ginscmp func(op int, t *Type, n1, n2 *Node, likely int) *obj.Prog
+
+	// Ginsboolval inserts instructions to convert the result
+	// of a just-completed comparison to a boolean value.
+	// The first argument is the conditional jump instruction
+	// corresponding to the desired value.
+	// The second argument is the destination.
+	// If not present, Ginsboolval will be emulated with jumps.
+	Ginsboolval func(int, *Node)
+
+	Ginscon      func(int, int64, *Node)
+	Ginsnop      func()
+	Gmove        func(*Node, *Node)
+	Igenindex    func(*Node, *Node, bool) *obj.Prog
+	Linkarchinit func()
+	Peep         func(*obj.Prog)
+	Proginfo     func(*obj.Prog) // fills in Prog.Info
+	Regtyp       func(*obj.Addr) bool
+	Sameaddr     func(*obj.Addr, *obj.Addr) bool
+	Smallindir   func(*obj.Addr, *obj.Addr) bool
+	Stackaddr    func(*obj.Addr) bool
+	Blockcopy    func(*Node, *Node, int64, int64, int64)
+	Sudoaddable  func(int, *Node, *obj.Addr) bool
+	Sudoclean    func()
+	Excludedregs func() uint64
+	RtoB         func(int) uint64
+	FtoB         func(int) uint64
+	BtoR         func(uint64) int
+	BtoF         func(uint64) int
+	Optoas       func(int, *Type) int
+	Doregbits    func(int) uint64
+	Regnames     func(*int) []string
+	Use387       bool // should 8g use 387 FP instructions instead of sse2.
+}
+
+var pcloc int32
+
+var Thearch Arch
+
+var Newproc *Node
+
+var Deferproc *Node
+
+var Deferreturn *Node
+
+var Panicindex *Node
+
+var panicslice *Node
+
+var throwreturn *Node
diff --git a/src/cmd/compile/internal/gc/go.y b/src/cmd/compile/internal/gc/go.y
new file mode 100644
index 0000000..7d523ae
--- /dev/null
+++ b/src/cmd/compile/internal/gc/go.y
@@ -0,0 +1,2312 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Go language grammar.
+ *
+ * The Go semicolon rules are:
+ *
+ *  1. all statements and declarations are terminated by semicolons.
+ *  2. semicolons can be omitted before a closing ) or }.
+ *  3. semicolons are inserted by the lexer before a newline
+ *      following a specific list of tokens.
+ *
+ * Rules #1 and #2 are accomplished by writing the lists as
+ * semicolon-separated lists with an optional trailing semicolon.
+ * Rule #3 is implemented in yylex.
+ */
+
+%{
+package gc
+
+import (
+	"fmt"
+	"strings"
+)
+%}
+%union	{
+	node *Node
+	list *NodeList
+	typ *Type
+	sym *Sym
+	val Val
+	i int
+}
+
+// |sed 's/.*	//' |9 fmt -l1 |sort |9 fmt -l50 | sed 's/^/%xxx		/'
+
+%token	<val>	LLITERAL
+%token	<i>	LASOP LCOLAS
+%token	<sym>	LBREAK LCASE LCHAN LCONST LCONTINUE LDDD
+%token	<sym>	LDEFAULT LDEFER LELSE LFALL LFOR LFUNC LGO LGOTO
+%token	<sym>	LIF LIMPORT LINTERFACE LMAP LNAME
+%token	<sym>	LPACKAGE LRANGE LRETURN LSELECT LSTRUCT LSWITCH
+%token	<sym>	LTYPE LVAR
+
+%token		LANDAND LANDNOT LBODY LCOMM LDEC LEQ LGE LGT
+%token		LIGNORE LINC LLE LLSH LLT LNE LOROR LRSH
+
+%type	<i>	lbrace import_here
+%type	<sym>	sym packname
+%type	<val>	oliteral
+
+%type	<node>	stmt ntype
+%type	<node>	arg_type
+%type	<node>	case caseblock
+%type	<node>	compound_stmt dotname embed expr complitexpr bare_complitexpr
+%type	<node>	expr_or_type
+%type	<node>	fndcl hidden_fndcl fnliteral
+%type	<node>	for_body for_header for_stmt if_header if_stmt non_dcl_stmt
+%type	<node>	interfacedcl keyval labelname name
+%type	<node>	name_or_type non_expr_type
+%type	<node>	new_name dcl_name oexpr typedclname
+%type	<node>	onew_name
+%type	<node>	osimple_stmt pexpr pexpr_no_paren
+%type	<node>	pseudocall range_stmt select_stmt
+%type	<node>	simple_stmt
+%type	<node>	switch_stmt uexpr
+%type	<node>	xfndcl typedcl start_complit
+
+%type	<list>	xdcl fnbody fnres loop_body dcl_name_list
+%type	<list>	new_name_list expr_list keyval_list braced_keyval_list expr_or_type_list xdcl_list
+%type	<list>	oexpr_list caseblock_list elseif elseif_list else stmt_list oarg_type_list_ocomma arg_type_list
+%type	<list>	interfacedcl_list vardcl vardcl_list structdcl structdcl_list
+%type	<list>	common_dcl constdcl constdcl1 constdcl_list typedcl_list
+
+%type	<node>	convtype comptype dotdotdot
+%type	<node>	indcl interfacetype structtype ptrtype
+%type	<node>	recvchantype non_recvchantype othertype fnret_type fntype
+
+%type	<sym>	hidden_importsym hidden_pkg_importsym
+
+%type	<node>	hidden_constant hidden_literal hidden_funarg
+%type	<node>	hidden_interfacedcl hidden_structdcl
+
+%type	<list>	hidden_funres
+%type	<list>	ohidden_funres
+%type	<list>	hidden_funarg_list ohidden_funarg_list
+%type	<list>	hidden_interfacedcl_list ohidden_interfacedcl_list
+%type	<list>	hidden_structdcl_list ohidden_structdcl_list
+
+%type	<typ>	hidden_type hidden_type_misc hidden_pkgtype
+%type	<typ>	hidden_type_func
+%type	<typ>	hidden_type_recv_chan hidden_type_non_recv_chan
+
+%left		LCOMM	/* outside the usual hierarchy; here for good error messages */
+
+%left		LOROR
+%left		LANDAND
+%left		LEQ LNE LLE LGE LLT LGT
+%left		'+' '-' '|' '^'
+%left		'*' '/' '%' '&' LLSH LRSH LANDNOT
+
+/*
+ * manual override of shift/reduce conflicts.
+ * the general form is that we assign a precedence
+ * to the token being shifted and then introduce
+ * NotToken with lower precedence or PreferToToken with higher
+ * and annotate the reducing rule accordingly.
+ */
+%left		NotPackage
+%left		LPACKAGE
+
+%left		NotParen
+%left		'('
+
+%left		')'
+%left		PreferToRightParen
+
+%error loadsys package LIMPORT '(' LLITERAL import_package import_there ',':
+	"unexpected comma during import block"
+
+%error loadsys package LIMPORT LNAME ';':
+	"missing import path; require quoted string"
+
+%error loadsys package imports LFUNC LNAME '(' ')' '{' LIF if_header ';':
+	"missing { after if clause"
+
+%error loadsys package imports LFUNC LNAME '(' ')' '{' LSWITCH if_header ';':
+	"missing { after switch clause"
+
+%error loadsys package imports LFUNC LNAME '(' ')' '{' LFOR for_header ';':
+	"missing { after for clause"
+
+%error loadsys package imports LFUNC LNAME '(' ')' '{' LFOR ';' LBODY:
+	"missing { after for clause"
+
+%error loadsys package imports LFUNC LNAME '(' ')' ';' '{':
+	"unexpected semicolon or newline before {"
+
+%error loadsys package imports LTYPE LNAME ';':
+	"unexpected semicolon or newline in type declaration"
+
+%error loadsys package imports LCHAN '}':
+	"unexpected } in channel type"
+
+%error loadsys package imports LCHAN ')':
+	"unexpected ) in channel type"
+
+%error loadsys package imports LCHAN ',':
+	"unexpected comma in channel type"
+
+%error loadsys package imports LFUNC LNAME '(' ')' '{' if_stmt ';' LELSE:
+	"unexpected semicolon or newline before else"
+
+%error loadsys package imports LTYPE LNAME LINTERFACE '{' LNAME ',' LNAME:
+	"name list not allowed in interface type"
+
+%error loadsys package imports LFUNC LNAME '(' ')' '{' LFOR LVAR LNAME '=' LNAME:
+	"var declaration not allowed in for initializer"
+
+%error loadsys package imports LVAR LNAME '[' ']' LNAME '{':
+	"unexpected { at end of statement"
+
+%error loadsys package imports LFUNC LNAME '(' ')' '{' LVAR LNAME '[' ']' LNAME '{':
+	"unexpected { at end of statement"
+
+%error loadsys package imports LFUNC LNAME '(' ')' '{' LDEFER LNAME ';':
+	"argument to go/defer must be function call"
+
+%error loadsys package imports LVAR LNAME '=' LNAME '{' LNAME ';':
+	"need trailing comma before newline in composite literal"
+
+%error loadsys package imports LVAR LNAME '=' comptype '{' LNAME ';':
+	"need trailing comma before newline in composite literal"
+
+%error loadsys package imports LFUNC LNAME '(' ')' '{' LFUNC LNAME:
+	"nested func not allowed"
+
+%error loadsys package imports LFUNC LNAME '(' ')' '{' LIF if_header loop_body LELSE ';':
+	"else must be followed by if or statement block"
+
+%%
+file:
+	loadsys
+	package
+	imports
+	xdcl_list
+	{
+		xtop = concat(xtop, $4);
+	}
+
+package:
+	%prec NotPackage
+	{
+		prevlineno = lineno;
+		Yyerror("package statement must be first");
+		errorexit();
+	}
+|	LPACKAGE sym ';'
+	{
+		mkpackage($2.Name);
+	}
+
+/*
+ * this loads the definitions for the low-level runtime functions,
+ * so that the compiler can generate calls to them,
+ * but does not make the name "runtime" visible as a package.
+ */
+loadsys:
+	{
+		importpkg = Runtimepkg;
+
+		if Debug['A'] != 0 {
+			cannedimports("runtime.Builtin", "package runtime\n\n$$\n\n");
+		} else {
+			cannedimports("runtime.Builtin", runtimeimport);
+		}
+		curio.importsafe = true
+	}
+	import_package
+	import_there
+	{
+		importpkg = nil;
+	}
+
+imports:
+|	imports import ';'
+
+import:
+	LIMPORT import_stmt
+|	LIMPORT '(' import_stmt_list osemi ')'
+|	LIMPORT '(' ')'
+
+import_stmt:
+	import_here import_package import_there
+	{
+		ipkg := importpkg;
+		my := importmyname;
+		importpkg = nil;
+		importmyname = nil;
+
+		if my == nil {
+			my = Lookup(ipkg.Name);
+		}
+
+		pack := Nod(OPACK, nil, nil);
+		pack.Sym = my;
+		pack.Pkg = ipkg;
+		pack.Lineno = int32($1);
+
+		if strings.HasPrefix(my.Name, ".") {
+			importdot(ipkg, pack);
+			break;
+		}
+		if my.Name == "init" {
+			Yyerror("cannot import package as init - init must be a func");
+			break;
+		}
+		if my.Name == "_" {
+			break;
+		}
+		if my.Def != nil {
+			lineno = int32($1);
+			redeclare(my, "as imported package name");
+		}
+		my.Def = pack;
+		my.Lastlineno = int32($1);
+		my.Block = 1;	// at top level
+	}
+|	import_here import_there
+	{
+		// When an invalid import path is passed to importfile,
+		// it calls Yyerror and then sets up a fake import with
+		// no package statement. This allows us to test more
+		// than one invalid import statement in a single file.
+		if nerrors == 0 {
+			Fatal("phase error in import");
+		}
+	}
+
+import_stmt_list:
+	import_stmt
+|	import_stmt_list ';' import_stmt
+
+import_here:
+	LLITERAL
+	{
+		// import with original name
+		$$ = parserline();
+		importmyname = nil;
+		importfile(&$1, $$);
+	}
+|	sym LLITERAL
+	{
+		// import with given name
+		$$ = parserline();
+		importmyname = $1;
+		importfile(&$2, $$);
+	}
+|	'.' LLITERAL
+	{
+		// import into my name space
+		$$ = parserline();
+		importmyname = Lookup(".");
+		importfile(&$2, $$);
+	}
+
+import_package:
+	LPACKAGE LNAME import_safety ';'
+	{
+		if importpkg.Name == "" {
+			importpkg.Name = $2.Name;
+			numImport[$2.Name]++
+		} else if importpkg.Name != $2.Name {
+			Yyerror("conflicting names %s and %s for package %q", importpkg.Name, $2.Name, importpkg.Path);
+		}
+		importpkg.Direct = 1;
+		importpkg.Safe = curio.importsafe
+
+		if safemode != 0 && !curio.importsafe {
+			Yyerror("cannot import unsafe package %q", importpkg.Path);
+		}
+	}
+
+import_safety:
+|	LNAME
+	{
+		if $1.Name == "safe" {
+			curio.importsafe = true
+		}
+	}
+
+import_there:
+	{
+		defercheckwidth();
+	}
+	hidden_import_list '$' '$'
+	{
+		resumecheckwidth();
+		unimportfile();
+	}
+
+/*
+ * declarations
+ */
+xdcl:
+	{
+		Yyerror("empty top-level declaration");
+		$$ = nil;
+	}
+|	common_dcl
+|	xfndcl
+	{
+		$$ = list1($1);
+	}
+|	non_dcl_stmt
+	{
+		Yyerror("non-declaration statement outside function body");
+		$$ = nil;
+	}
+|	error
+	{
+		$$ = nil;
+	}
+
+common_dcl:
+	LVAR vardcl
+	{
+		$$ = $2;
+	}
+|	LVAR '(' vardcl_list osemi ')'
+	{
+		$$ = $3;
+	}
+|	LVAR '(' ')'
+	{
+		$$ = nil;
+	}
+|	lconst constdcl
+	{
+		$$ = $2;
+		iota_ = -100000;
+		lastconst = nil;
+	}
+|	lconst '(' constdcl osemi ')'
+	{
+		$$ = $3;
+		iota_ = -100000;
+		lastconst = nil;
+	}
+|	lconst '(' constdcl ';' constdcl_list osemi ')'
+	{
+		$$ = concat($3, $5);
+		iota_ = -100000;
+		lastconst = nil;
+	}
+|	lconst '(' ')'
+	{
+		$$ = nil;
+		iota_ = -100000;
+	}
+|	LTYPE typedcl
+	{
+		$$ = list1($2);
+	}
+|	LTYPE '(' typedcl_list osemi ')'
+	{
+		$$ = $3;
+	}
+|	LTYPE '(' ')'
+	{
+		$$ = nil;
+	}
+
+lconst:
+	LCONST
+	{
+		iota_ = 0;
+	}
+
+vardcl:
+	dcl_name_list ntype
+	{
+		$$ = variter($1, $2, nil);
+	}
+|	dcl_name_list ntype '=' expr_list
+	{
+		$$ = variter($1, $2, $4);
+	}
+|	dcl_name_list '=' expr_list
+	{
+		$$ = variter($1, nil, $3);
+	}
+
+constdcl:
+	dcl_name_list ntype '=' expr_list
+	{
+		$$ = constiter($1, $2, $4);
+	}
+|	dcl_name_list '=' expr_list
+	{
+		$$ = constiter($1, nil, $3);
+	}
+
+constdcl1:
+	constdcl
+|	dcl_name_list ntype
+	{
+		$$ = constiter($1, $2, nil);
+	}
+|	dcl_name_list
+	{
+		$$ = constiter($1, nil, nil);
+	}
+
+typedclname:
+	sym
+	{
+		// different from dclname because the name
+		// becomes visible right here, not at the end
+		// of the declaration.
+		$$ = typedcl0($1);
+	}
+
+typedcl:
+	typedclname ntype
+	{
+		$$ = typedcl1($1, $2, true);
+	}
+
+simple_stmt:
+	expr
+	{
+		$$ = $1;
+
+		// These nodes do not carry line numbers.
+		// Since a bare name used as an expression is an error,
+		// introduce a wrapper node to give the correct line.
+		switch($$.Op) {
+		case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+			$$ = Nod(OPAREN, $$, nil);
+			$$.Implicit = true;
+			break;
+		}
+	}
+|	expr LASOP expr
+	{
+		$$ = Nod(OASOP, $1, $3);
+		$$.Etype = uint8($2);			// rathole to pass opcode
+	}
+|	expr_list '=' expr_list
+	{
+		if $1.Next == nil && $3.Next == nil {
+			// simple
+			$$ = Nod(OAS, $1.N, $3.N);
+			break;
+		}
+		// multiple
+		$$ = Nod(OAS2, nil, nil);
+		$$.List = $1;
+		$$.Rlist = $3;
+	}
+|	expr_list LCOLAS expr_list
+	{
+		if $3.N.Op == OTYPESW {
+			$$ = Nod(OTYPESW, nil, $3.N.Right);
+			if $3.Next != nil {
+				Yyerror("expr.(type) must be alone in list");
+			}
+			if $1.Next != nil {
+				Yyerror("argument count mismatch: %d = %d", count($1), 1);
+			} else if ($1.N.Op != ONAME && $1.N.Op != OTYPE && $1.N.Op != ONONAME) || isblank($1.N) {
+				Yyerror("invalid variable name %s in type switch", $1.N);
+			} else {
+				$$.Left = dclname($1.N.Sym);
+			}  // it's a colas, so must not re-use an oldname.
+			break;
+		}
+		$$ = colas($1, $3, int32($2));
+	}
+|	expr LINC
+	{
+		$$ = Nod(OASOP, $1, Nodintconst(1));
+		$$.Implicit = true;
+		$$.Etype = OADD;
+	}
+|	expr LDEC
+	{
+		$$ = Nod(OASOP, $1, Nodintconst(1));
+		$$.Implicit = true;
+		$$.Etype = OSUB;
+	}
+
+case:
+	LCASE expr_or_type_list ':'
+	{
+		var n, nn *Node
+
+		// will be converted to OCASE
+		// right will point to next case
+		// done in casebody()
+		markdcl();
+		$$ = Nod(OXCASE, nil, nil);
+		$$.List = $2;
+		if typesw != nil && typesw.Right != nil {
+			n = typesw.Right.Left
+			if n != nil {
+				// type switch - declare variable
+				nn = newname(n.Sym);
+				declare(nn, dclcontext);
+				$$.Nname = nn;
+	
+				// keep track of the instances for reporting unused
+				nn.Defn = typesw.Right;
+			}
+		}
+	}
+|	LCASE expr_or_type_list '=' expr ':'
+	{
+		var n *Node
+
+		// will be converted to OCASE
+		// right will point to next case
+		// done in casebody()
+		markdcl();
+		$$ = Nod(OXCASE, nil, nil);
+		if $2.Next == nil {
+			n = Nod(OAS, $2.N, $4);
+		} else {
+			n = Nod(OAS2, nil, nil);
+			n.List = $2;
+			n.Rlist = list1($4);
+		}
+		$$.List = list1(n);
+	}
+|	LCASE expr_or_type_list LCOLAS expr ':'
+	{
+		// will be converted to OCASE
+		// right will point to next case
+		// done in casebody()
+		markdcl();
+		$$ = Nod(OXCASE, nil, nil);
+		$$.List = list1(colas($2, list1($4), int32($3)));
+	}
+|	LDEFAULT ':'
+	{
+		var n, nn *Node
+
+		markdcl();
+		$$ = Nod(OXCASE, nil, nil);
+		if typesw != nil && typesw.Right != nil {
+			n = typesw.Right.Left
+			if n != nil {
+				// type switch - declare variable
+				nn = newname(n.Sym);
+				declare(nn, dclcontext);
+				$$.Nname = nn;
+	
+				// keep track of the instances for reporting unused
+				nn.Defn = typesw.Right;
+			}
+		}
+	}
+
+compound_stmt:
+	'{'
+	{
+		markdcl();
+	}
+	stmt_list '}'
+	{
+		if $3 == nil {
+			$$ = Nod(OEMPTY, nil, nil);
+		} else {
+			$$ = liststmt($3);
+		}
+		popdcl();
+	}
+
+caseblock:
+	case
+	{
+		// If the last token read by the lexer was consumed
+		// as part of the case, clear it (parser has cleared yychar).
+		// If the last token read by the lexer was the lookahead
+		// leave it alone (parser has it cached in yychar).
+		// This is so that the stmt_list action doesn't look at
+		// the case tokens if the stmt_list is empty.
+		yylast = yychar;
+		$1.Xoffset = int64(block);
+	}
+	stmt_list
+	{
+		// This is the only place in the language where a statement
+		// list is not allowed to drop the final semicolon, because
+		// it's the only place where a statement list is not followed 
+		// by a closing brace.  Handle the error for pedantry.
+
+		// Find the final token of the statement list.
+		// yylast is lookahead; yyprev is last of stmt_list
+		last := yyprev;
+
+		if last > 0 && last != ';' && yychar != '}' {
+			Yyerror("missing statement after label");
+		}
+		$$ = $1;
+		$$.Nbody = $3;
+		popdcl();
+	}
+
+caseblock_list:
+	{
+		$$ = nil;
+	}
+|	caseblock_list caseblock
+	{
+		$$ = list($1, $2);
+	}
+
+loop_body:
+	LBODY
+	{
+		markdcl();
+	}
+	stmt_list '}'
+	{
+		$$ = $3;
+		popdcl();
+	}
+
+range_stmt:
+	expr_list '=' LRANGE expr
+	{
+		$$ = Nod(ORANGE, nil, $4);
+		$$.List = $1;
+		$$.Etype = 0;	// := flag
+	}
+|	expr_list LCOLAS LRANGE expr
+	{
+		$$ = Nod(ORANGE, nil, $4);
+		$$.List = $1;
+		$$.Colas = true;
+		colasdefn($1, $$);
+	}
+|	LRANGE expr
+	{
+		$$ = Nod(ORANGE, nil, $2);
+		$$.Etype = 0; // := flag
+	}
+
+for_header:
+	osimple_stmt ';' osimple_stmt ';' osimple_stmt
+	{
+		// init ; test ; incr
+		if $5 != nil && $5.Colas {
+			Yyerror("cannot declare in the for-increment");
+		}
+		$$ = Nod(OFOR, nil, nil);
+		if $1 != nil {
+			$$.Ninit = list1($1);
+		}
+		$$.Ntest = $3;
+		$$.Nincr = $5;
+	}
+|	osimple_stmt
+	{
+		// normal test
+		$$ = Nod(OFOR, nil, nil);
+		$$.Ntest = $1;
+	}
+|	range_stmt
+
+for_body:
+	for_header loop_body
+	{
+		$$ = $1;
+		$$.Nbody = concat($$.Nbody, $2);
+	}
+
+for_stmt:
+	LFOR
+	{
+		markdcl();
+	}
+	for_body
+	{
+		$$ = $3;
+		popdcl();
+	}
+
+if_header:
+	osimple_stmt
+	{
+		// test
+		$$ = Nod(OIF, nil, nil);
+		$$.Ntest = $1;
+	}
+|	osimple_stmt ';' osimple_stmt
+	{
+		// init ; test
+		$$ = Nod(OIF, nil, nil);
+		if $1 != nil {
+			$$.Ninit = list1($1);
+		}
+		$$.Ntest = $3;
+	}
+
+/* IF cond body (ELSE IF cond body)* (ELSE block)? */
+if_stmt:
+	LIF
+	{
+		markdcl();
+	}
+	if_header
+	{
+		if $3.Ntest == nil {
+			Yyerror("missing condition in if statement");
+		}
+	}
+	loop_body
+	{
+		$3.Nbody = $5;
+	}
+	elseif_list else
+	{
+		var n *Node
+		var nn *NodeList
+
+		$$ = $3;
+		n = $3;
+		popdcl();
+		for nn = concat($7, $8); nn != nil; nn = nn.Next {
+			if nn.N.Op == OIF {
+				popdcl();
+			}
+			n.Nelse = list1(nn.N);
+			n = nn.N;
+		}
+	}
+
+elseif:
+	LELSE LIF 
+	{
+		markdcl();
+	}
+	if_header loop_body
+	{
+		if $4.Ntest == nil {
+			Yyerror("missing condition in if statement");
+		}
+		$4.Nbody = $5;
+		$$ = list1($4);
+	}
+
+elseif_list:
+	{
+		$$ = nil;
+	}
+|	elseif_list elseif
+	{
+		$$ = concat($1, $2);
+	}
+
+else:
+	{
+		$$ = nil;
+	}
+|	LELSE compound_stmt
+	{
+		l := &NodeList{N: $2}
+		l.End = l
+		$$ = l;
+	}
+
+switch_stmt:
+	LSWITCH
+	{
+		markdcl();
+	}
+	if_header
+	{
+		var n *Node
+		n = $3.Ntest;
+		if n != nil && n.Op != OTYPESW {
+			n = nil;
+		}
+		typesw = Nod(OXXX, typesw, n);
+	}
+	LBODY caseblock_list '}'
+	{
+		$$ = $3;
+		$$.Op = OSWITCH;
+		$$.List = $6;
+		typesw = typesw.Left;
+		popdcl();
+	}
+
+select_stmt:
+	LSELECT
+	{
+		typesw = Nod(OXXX, typesw, nil);
+	}
+	LBODY caseblock_list '}'
+	{
+		$$ = Nod(OSELECT, nil, nil);
+		$$.Lineno = typesw.Lineno;
+		$$.List = $4;
+		typesw = typesw.Left;
+	}
+
+/*
+ * expressions
+ */
+expr:
+	uexpr
+|	expr LOROR expr
+	{
+		$$ = Nod(OOROR, $1, $3);
+	}
+|	expr LANDAND expr
+	{
+		$$ = Nod(OANDAND, $1, $3);
+	}
+|	expr LEQ expr
+	{
+		$$ = Nod(OEQ, $1, $3);
+	}
+|	expr LNE expr
+	{
+		$$ = Nod(ONE, $1, $3);
+	}
+|	expr LLT expr
+	{
+		$$ = Nod(OLT, $1, $3);
+	}
+|	expr LLE expr
+	{
+		$$ = Nod(OLE, $1, $3);
+	}
+|	expr LGE expr
+	{
+		$$ = Nod(OGE, $1, $3);
+	}
+|	expr LGT expr
+	{
+		$$ = Nod(OGT, $1, $3);
+	}
+|	expr '+' expr
+	{
+		$$ = Nod(OADD, $1, $3);
+	}
+|	expr '-' expr
+	{
+		$$ = Nod(OSUB, $1, $3);
+	}
+|	expr '|' expr
+	{
+		$$ = Nod(OOR, $1, $3);
+	}
+|	expr '^' expr
+	{
+		$$ = Nod(OXOR, $1, $3);
+	}
+|	expr '*' expr
+	{
+		$$ = Nod(OMUL, $1, $3);
+	}
+|	expr '/' expr
+	{
+		$$ = Nod(ODIV, $1, $3);
+	}
+|	expr '%' expr
+	{
+		$$ = Nod(OMOD, $1, $3);
+	}
+|	expr '&' expr
+	{
+		$$ = Nod(OAND, $1, $3);
+	}
+|	expr LANDNOT expr
+	{
+		$$ = Nod(OANDNOT, $1, $3);
+	}
+|	expr LLSH expr
+	{
+		$$ = Nod(OLSH, $1, $3);
+	}
+|	expr LRSH expr
+	{
+		$$ = Nod(ORSH, $1, $3);
+	}
+	/* not an expression anymore, but left in so we can give a good error */
+|	expr LCOMM expr
+	{
+		$$ = Nod(OSEND, $1, $3);
+	}
+
+uexpr:
+	pexpr
+|	'*' uexpr
+	{
+		$$ = Nod(OIND, $2, nil);
+	}
+|	'&' uexpr
+	{
+		if $2.Op == OCOMPLIT {
+			// Special case for &T{...}: turn into (*T){...}.
+			$$ = $2;
+			$$.Right = Nod(OIND, $$.Right, nil);
+			$$.Right.Implicit = true;
+		} else {
+			$$ = Nod(OADDR, $2, nil);
+		}
+	}
+|	'+' uexpr
+	{
+		$$ = Nod(OPLUS, $2, nil);
+	}
+|	'-' uexpr
+	{
+		$$ = Nod(OMINUS, $2, nil);
+	}
+|	'!' uexpr
+	{
+		$$ = Nod(ONOT, $2, nil);
+	}
+|	'~' uexpr
+	{
+		Yyerror("the bitwise complement operator is ^");
+		$$ = Nod(OCOM, $2, nil);
+	}
+|	'^' uexpr
+	{
+		$$ = Nod(OCOM, $2, nil);
+	}
+|	LCOMM uexpr
+	{
+		$$ = Nod(ORECV, $2, nil);
+	}
+
+/*
+ * call-like statements that
+ * can be preceded by 'defer' and 'go'
+ */
+pseudocall:
+	pexpr '(' ')'
+	{
+		$$ = Nod(OCALL, $1, nil);
+	}
+|	pexpr '(' expr_or_type_list ocomma ')'
+	{
+		$$ = Nod(OCALL, $1, nil);
+		$$.List = $3;
+	}
+|	pexpr '(' expr_or_type_list LDDD ocomma ')'
+	{
+		$$ = Nod(OCALL, $1, nil);
+		$$.List = $3;
+		$$.Isddd = true;
+	}
+
+pexpr_no_paren:
+	LLITERAL
+	{
+		$$ = nodlit($1);
+	}
+|	name
+|	pexpr '.' sym
+	{
+		if $1.Op == OPACK {
+			var s *Sym
+			s = restrictlookup($3.Name, $1.Pkg);
+			$1.Used = true;
+			$$ = oldname(s);
+			break;
+		}
+		$$ = Nod(OXDOT, $1, newname($3));
+	}
+|	pexpr '.' '(' expr_or_type ')'
+	{
+		$$ = Nod(ODOTTYPE, $1, $4);
+	}
+|	pexpr '.' '(' LTYPE ')'
+	{
+		$$ = Nod(OTYPESW, nil, $1);
+	}
+|	pexpr '[' expr ']'
+	{
+		$$ = Nod(OINDEX, $1, $3);
+	}
+|	pexpr '[' oexpr ':' oexpr ']'
+	{
+		$$ = Nod(OSLICE, $1, Nod(OKEY, $3, $5));
+	}
+|	pexpr '[' oexpr ':' oexpr ':' oexpr ']'
+	{
+		if $5 == nil {
+			Yyerror("middle index required in 3-index slice");
+		}
+		if $7 == nil {
+			Yyerror("final index required in 3-index slice");
+		}
+		$$ = Nod(OSLICE3, $1, Nod(OKEY, $3, Nod(OKEY, $5, $7)));
+	}
+|	pseudocall
+|	convtype '(' expr ocomma ')'
+	{
+		// conversion
+		$$ = Nod(OCALL, $1, nil);
+		$$.List = list1($3);
+	}
+|	comptype lbrace start_complit braced_keyval_list '}'
+	{
+		$$ = $3;
+		$$.Right = $1;
+		$$.List = $4;
+		fixlbrace($2);
+	}
+|	pexpr_no_paren '{' start_complit braced_keyval_list '}'
+	{
+		$$ = $3;
+		$$.Right = $1;
+		$$.List = $4;
+	}
+|	'(' expr_or_type ')' '{' start_complit braced_keyval_list '}'
+	{
+		Yyerror("cannot parenthesize type in composite literal");
+		$$ = $5;
+		$$.Right = $2;
+		$$.List = $6;
+	}
+|	fnliteral
+
+start_complit:
+	{
+		// composite expression.
+		// make node early so we get the right line number.
+		$$ = Nod(OCOMPLIT, nil, nil);
+	}
+
+keyval:
+	expr ':' complitexpr
+	{
+		$$ = Nod(OKEY, $1, $3);
+	}
+
+bare_complitexpr:
+	expr
+	{
+		// These nodes do not carry line numbers.
+		// Since a composite literal commonly spans several lines,
+		// the line number on errors may be misleading.
+		// Introduce a wrapper node to give the correct line.
+		$$ = $1;
+		switch($$.Op) {
+		case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+			$$ = Nod(OPAREN, $$, nil);
+			$$.Implicit = true;
+		}
+	}
+|	'{' start_complit braced_keyval_list '}'
+	{
+		$$ = $2;
+		$$.List = $3;
+	}
+
+complitexpr:
+	expr
+|	'{' start_complit braced_keyval_list '}'
+	{
+		$$ = $2;
+		$$.List = $3;
+	}
+
+pexpr:
+	pexpr_no_paren
+|	'(' expr_or_type ')'
+	{
+		$$ = $2;
+		
+		// Need to know on lhs of := whether there are ( ).
+		// Don't bother with the OPAREN in other cases:
+		// it's just a waste of memory and time.
+		switch($$.Op) {
+		case ONAME, ONONAME, OPACK, OTYPE, OLITERAL, OTYPESW:
+			$$ = Nod(OPAREN, $$, nil);
+		}
+	}
+
+expr_or_type:
+	expr
+|	non_expr_type	%prec PreferToRightParen
+
+name_or_type:
+	ntype
+
+lbrace:
+	LBODY
+	{
+		$$ = LBODY;
+	}
+|	'{'
+	{
+		$$ = '{';
+	}
+
+/*
+ * names and types
+ *	newname is used before declared
+ *	oldname is used after declared
+ */
+new_name:
+	sym
+	{
+		if $1 == nil {
+			$$ = nil;
+		} else {
+			$$ = newname($1);
+		}
+	}
+
+dcl_name:
+	sym
+	{
+		$$ = dclname($1);
+	}
+
+onew_name:
+	{
+		$$ = nil;
+	}
+|	new_name
+
+sym:
+	LNAME
+	{
+		$$ = $1;
+		// during imports, unqualified non-exported identifiers are from builtinpkg
+		if importpkg != nil && !exportname($1.Name) {
+			$$ = Pkglookup($1.Name, builtinpkg);
+		}
+	}
+|	hidden_importsym
+|	'?'
+	{
+		$$ = nil;
+	}
+
+hidden_importsym:
+	'@' LLITERAL '.' LNAME
+	{
+		var p *Pkg
+
+		if $2.U.(string) == "" {
+			p = importpkg;
+		} else {
+			if isbadimport($2.U.(string)) {
+				errorexit();
+			}
+			p = mkpkg($2.U.(string));
+		}
+		$$ = Pkglookup($4.Name, p);
+	}
+|	'@' LLITERAL '.' '?'
+	{
+		var p *Pkg
+
+		if $2.U.(string) == "" {
+			p = importpkg;
+		} else {
+			if isbadimport($2.U.(string)) {
+				errorexit();
+			}
+			p = mkpkg($2.U.(string));
+		}
+		$$ = Pkglookup("?", p);
+	}
+
+name:
+	sym	%prec NotParen
+	{
+		$$ = oldname($1);
+		if $$.Pack != nil {
+			$$.Pack.Used = true;
+		}
+	}
+
+labelname:
+	new_name
+
+/*
+ * to avoid parsing conflicts, type is split into
+ *	channel types
+ *	function types
+ *	parenthesized types
+ *	any other type
+ * the type system makes additional restrictions,
+ * but those are not implemented in the grammar.
+ */
+dotdotdot:
+	LDDD
+	{
+		Yyerror("final argument in variadic function missing type");
+		$$ = Nod(ODDD, typenod(typ(TINTER)), nil);
+	}
+|	LDDD ntype
+	{
+		$$ = Nod(ODDD, $2, nil);
+	}
+
+ntype:
+	recvchantype
+|	fntype
+|	othertype
+|	ptrtype
+|	dotname
+|	'(' ntype ')'
+	{
+		$$ = $2;
+	}
+
+non_expr_type:
+	recvchantype
+|	fntype
+|	othertype
+|	'*' non_expr_type
+	{
+		$$ = Nod(OIND, $2, nil);
+	}
+
+non_recvchantype:
+	fntype
+|	othertype
+|	ptrtype
+|	dotname
+|	'(' ntype ')'
+	{
+		$$ = $2;
+	}
+
+convtype:
+	fntype
+|	othertype
+
+comptype:
+	othertype
+
+fnret_type:
+	recvchantype
+|	fntype
+|	othertype
+|	ptrtype
+|	dotname
+
+dotname:
+	name
+|	name '.' sym
+	{
+		if $1.Op == OPACK {
+			var s *Sym
+			s = restrictlookup($3.Name, $1.Pkg);
+			$1.Used = true;
+			$$ = oldname(s);
+			break;
+		}
+		$$ = Nod(OXDOT, $1, newname($3));
+	}
+
+othertype:
+	'[' oexpr ']' ntype
+	{
+		$$ = Nod(OTARRAY, $2, $4);
+	}
+|	'[' LDDD ']' ntype
+	{
+		// array literal of nelem
+		$$ = Nod(OTARRAY, Nod(ODDD, nil, nil), $4);
+	}
+|	LCHAN non_recvchantype
+	{
+		$$ = Nod(OTCHAN, $2, nil);
+		$$.Etype = Cboth;
+	}
+|	LCHAN LCOMM ntype
+	{
+		$$ = Nod(OTCHAN, $3, nil);
+		$$.Etype = Csend;
+	}
+|	LMAP '[' ntype ']' ntype
+	{
+		$$ = Nod(OTMAP, $3, $5);
+	}
+|	structtype
+|	interfacetype
+
+ptrtype:
+	'*' ntype
+	{
+		$$ = Nod(OIND, $2, nil);
+	}
+
+recvchantype:
+	LCOMM LCHAN ntype
+	{
+		$$ = Nod(OTCHAN, $3, nil);
+		$$.Etype = Crecv;
+	}
+
+structtype:
+	LSTRUCT lbrace structdcl_list osemi '}'
+	{
+		$$ = Nod(OTSTRUCT, nil, nil);
+		$$.List = $3;
+		fixlbrace($2);
+	}
+|	LSTRUCT lbrace '}'
+	{
+		$$ = Nod(OTSTRUCT, nil, nil);
+		fixlbrace($2);
+	}
+
+interfacetype:
+	LINTERFACE lbrace interfacedcl_list osemi '}'
+	{
+		$$ = Nod(OTINTER, nil, nil);
+		$$.List = $3;
+		fixlbrace($2);
+	}
+|	LINTERFACE lbrace '}'
+	{
+		$$ = Nod(OTINTER, nil, nil);
+		fixlbrace($2);
+	}
+
+/*
+ * function stuff
+ * all in one place to show how crappy it all is
+ */
+xfndcl:
+	LFUNC fndcl fnbody
+	{
+		$$ = $2;
+		if $$ == nil {
+			break;
+		}
+		if noescape && $3 != nil {
+			Yyerror("can only use //go:noescape with external func implementations");
+		}
+		$$.Nbody = $3;
+		$$.Func.Endlineno = lineno;
+		$$.Noescape = noescape;
+		$$.Func.Nosplit = nosplit;
+		$$.Func.Nowritebarrier = nowritebarrier;
+		funcbody($$);
+	}
+
+fndcl:
+	sym '(' oarg_type_list_ocomma ')' fnres
+	{
+		var t *Node
+
+		$$ = nil;
+		$3 = checkarglist($3, 1);
+
+		if $1.Name == "init" {
+			$1 = renameinit();
+			if $3 != nil || $5 != nil {
+				Yyerror("func init must have no arguments and no return values");
+			}
+		}
+		if localpkg.Name == "main" && $1.Name == "main" {
+			if $3 != nil || $5 != nil {
+				Yyerror("func main must have no arguments and no return values");
+			}
+		}
+
+		t = Nod(OTFUNC, nil, nil);
+		t.List = $3;
+		t.Rlist = $5;
+
+		$$ = Nod(ODCLFUNC, nil, nil);
+		$$.Nname = newfuncname($1);
+		$$.Nname.Defn = $$;
+		$$.Nname.Ntype = t;		// TODO: check if nname already has an ntype
+		declare($$.Nname, PFUNC);
+
+		funchdr($$);
+	}
+|	'(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')' fnres
+	{
+		var rcvr, t *Node
+
+		$$ = nil;
+		$2 = checkarglist($2, 0);
+		$6 = checkarglist($6, 1);
+
+		if $2 == nil {
+			Yyerror("method has no receiver");
+			break;
+		}
+		if $2.Next != nil {
+			Yyerror("method has multiple receivers");
+			break;
+		}
+		rcvr = $2.N;
+		if rcvr.Op != ODCLFIELD {
+			Yyerror("bad receiver in method");
+			break;
+		}
+
+		t = Nod(OTFUNC, rcvr, nil);
+		t.List = $6;
+		t.Rlist = $8;
+
+		$$ = Nod(ODCLFUNC, nil, nil);
+		$$.Func.Shortname = newfuncname($4);
+		$$.Nname = methodname1($$.Func.Shortname, rcvr.Right);
+		$$.Nname.Defn = $$;
+		$$.Nname.Ntype = t;
+		$$.Nname.Nointerface = nointerface;
+		declare($$.Nname, PFUNC);
+
+		funchdr($$);
+	}
+
+hidden_fndcl:
+	hidden_pkg_importsym '(' ohidden_funarg_list ')' ohidden_funres
+	{
+		var s *Sym
+		var t *Type
+
+		$$ = nil;
+
+		s = $1;
+		t = functype(nil, $3, $5);
+
+		importsym(s, ONAME);
+		if s.Def != nil && s.Def.Op == ONAME {
+			if Eqtype(t, s.Def.Type) {
+				dclcontext = PDISCARD;  // since we skip funchdr below
+				break;
+			}
+			Yyerror("inconsistent definition for func %v during import\n\t%v\n\t%v", s, s.Def.Type, t);
+		}
+
+		$$ = newfuncname(s);
+		$$.Type = t;
+		declare($$, PFUNC);
+
+		funchdr($$);
+	}
+|	'(' hidden_funarg_list ')' sym '(' ohidden_funarg_list ')' ohidden_funres
+	{
+		$$ = methodname1(newname($4), $2.N.Right); 
+		$$.Type = functype($2.N, $6, $8);
+
+		checkwidth($$.Type);
+		addmethod($4, $$.Type, false, nointerface);
+		nointerface = false
+		funchdr($$);
+		
+		// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
+		// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
+		// out by typecheck's lookdot as this $$.ttype.  So by providing
+		// this back link here we avoid special casing there.
+		$$.Type.Nname = $$;
+	}
+
+fntype:
+	LFUNC '(' oarg_type_list_ocomma ')' fnres
+	{
+		$3 = checkarglist($3, 1);
+		$$ = Nod(OTFUNC, nil, nil);
+		$$.List = $3;
+		$$.Rlist = $5;
+	}
+
+fnbody:
+	{
+		$$ = nil;
+	}
+|	'{' stmt_list '}'
+	{
+		$$ = $2;
+		if $$ == nil {
+			$$ = list1(Nod(OEMPTY, nil, nil));
+		}
+	}
+
+fnres:
+	%prec NotParen
+	{
+		$$ = nil;
+	}
+|	fnret_type
+	{
+		$$ = list1(Nod(ODCLFIELD, nil, $1));
+	}
+|	'(' oarg_type_list_ocomma ')'
+	{
+		$2 = checkarglist($2, 0);
+		$$ = $2;
+	}
+
+fnlitdcl:
+	fntype
+	{
+		closurehdr($1);
+	}
+
+fnliteral:
+	fnlitdcl lbrace stmt_list '}'
+	{
+		$$ = closurebody($3);
+		fixlbrace($2);
+	}
+|	fnlitdcl error
+	{
+		$$ = closurebody(nil);
+	}
+
+/*
+ * lists of things
+ * note that they are left recursive
+ * to conserve yacc stack. they need to
+ * be reversed to interpret correctly
+ */
+xdcl_list:
+	{
+		$$ = nil;
+	}
+|	xdcl_list xdcl ';'
+	{
+		$$ = concat($1, $2);
+		if nsyntaxerrors == 0 {
+			testdclstack();
+		}
+		nointerface = false
+		noescape = false
+		nosplit = false
+		nowritebarrier = false
+	}
+
+vardcl_list:
+	vardcl
+|	vardcl_list ';' vardcl
+	{
+		$$ = concat($1, $3);
+	}
+
+constdcl_list:
+	constdcl1
+|	constdcl_list ';' constdcl1
+	{
+		$$ = concat($1, $3);
+	}
+
+typedcl_list:
+	typedcl
+	{
+		$$ = list1($1);
+	}
+|	typedcl_list ';' typedcl
+	{
+		$$ = list($1, $3);
+	}
+
+structdcl_list:
+	structdcl
+|	structdcl_list ';' structdcl
+	{
+		$$ = concat($1, $3);
+	}
+
+interfacedcl_list:
+	interfacedcl
+	{
+		$$ = list1($1);
+	}
+|	interfacedcl_list ';' interfacedcl
+	{
+		$$ = list($1, $3);
+	}
+
+structdcl:
+	new_name_list ntype oliteral
+	{
+		var l *NodeList
+
+		var n *Node
+		l = $1;
+		if l == nil {
+			// ? symbol, during import (list1(nil) == nil)
+			n = $2;
+			if n.Op == OIND {
+				n = n.Left;
+			}
+			n = embedded(n.Sym, importpkg);
+			n.Right = $2;
+			n.Val = $3;
+			$$ = list1(n);
+			break;
+		}
+
+		for l=$1; l != nil; l=l.Next {
+			l.N = Nod(ODCLFIELD, l.N, $2);
+			l.N.Val = $3;
+		}
+	}
+|	embed oliteral
+	{
+		$1.Val = $2;
+		$$ = list1($1);
+	}
+|	'(' embed ')' oliteral
+	{
+		$2.Val = $4;
+		$$ = list1($2);
+		Yyerror("cannot parenthesize embedded type");
+	}
+|	'*' embed oliteral
+	{
+		$2.Right = Nod(OIND, $2.Right, nil);
+		$2.Val = $3;
+		$$ = list1($2);
+	}
+|	'(' '*' embed ')' oliteral
+	{
+		$3.Right = Nod(OIND, $3.Right, nil);
+		$3.Val = $5;
+		$$ = list1($3);
+		Yyerror("cannot parenthesize embedded type");
+	}
+|	'*' '(' embed ')' oliteral
+	{
+		$3.Right = Nod(OIND, $3.Right, nil);
+		$3.Val = $5;
+		$$ = list1($3);
+		Yyerror("cannot parenthesize embedded type");
+	}
+
+packname:
+	LNAME
+	{
+		var n *Node
+
+		$$ = $1;
+		n = oldname($1);
+		if n.Pack != nil {
+			n.Pack.Used = true;
+		}
+	}
+|	LNAME '.' sym
+	{
+		var pkg *Pkg
+
+		if $1.Def == nil || $1.Def.Op != OPACK {
+			Yyerror("%v is not a package", $1);
+			pkg = localpkg;
+		} else {
+			$1.Def.Used = true;
+			pkg = $1.Def.Pkg;
+		}
+		$$ = restrictlookup($3.Name, pkg);
+	}
+
+embed:
+	packname
+	{
+		$$ = embedded($1, localpkg);
+	}
+
+interfacedcl:
+	new_name indcl
+	{
+		$$ = Nod(ODCLFIELD, $1, $2);
+		ifacedcl($$);
+	}
+|	packname
+	{
+		$$ = Nod(ODCLFIELD, nil, oldname($1));
+	}
+|	'(' packname ')'
+	{
+		$$ = Nod(ODCLFIELD, nil, oldname($2));
+		Yyerror("cannot parenthesize embedded type");
+	}
+
+indcl:
+	'(' oarg_type_list_ocomma ')' fnres
+	{
+		// without func keyword
+		$2 = checkarglist($2, 1);
+		$$ = Nod(OTFUNC, fakethis(), nil);
+		$$.List = $2;
+		$$.Rlist = $4;
+	}
+
+/*
+ * function arguments.
+ */
+arg_type:
+	name_or_type
+|	sym name_or_type
+	{
+		$$ = Nod(ONONAME, nil, nil);
+		$$.Sym = $1;
+		$$ = Nod(OKEY, $$, $2);
+	}
+|	sym dotdotdot
+	{
+		$$ = Nod(ONONAME, nil, nil);
+		$$.Sym = $1;
+		$$ = Nod(OKEY, $$, $2);
+	}
+|	dotdotdot
+
+arg_type_list:
+	arg_type
+	{
+		$$ = list1($1);
+	}
+|	arg_type_list ',' arg_type
+	{
+		$$ = list($1, $3);
+	}
+
+oarg_type_list_ocomma:
+	{
+		$$ = nil;
+	}
+|	arg_type_list ocomma
+	{
+		$$ = $1;
+	}
+
+/*
+ * statement
+ */
+stmt:
+	{
+		$$ = nil;
+	}
+|	compound_stmt
+|	common_dcl
+	{
+		$$ = liststmt($1);
+	}
+|	non_dcl_stmt
+|	error
+	{
+		$$ = nil;
+	}
+
+non_dcl_stmt:
+	simple_stmt
+|	for_stmt
+|	switch_stmt
+|	select_stmt
+|	if_stmt
+|	labelname ':'
+	{
+		$1 = Nod(OLABEL, $1, nil);
+		$1.Sym = dclstack;  // context, for goto restrictions
+	}
+	stmt
+	{
+		var l *NodeList
+
+		$1.Defn = $4;
+		l = list1($1);
+		if $4 != nil {
+			l = list(l, $4);
+		}
+		$$ = liststmt(l);
+	}
+|	LFALL
+	{
+		// will be converted to OFALL
+		$$ = Nod(OXFALL, nil, nil);
+		$$.Xoffset = int64(block);
+	}
+|	LBREAK onew_name
+	{
+		$$ = Nod(OBREAK, $2, nil);
+	}
+|	LCONTINUE onew_name
+	{
+		$$ = Nod(OCONTINUE, $2, nil);
+	}
+|	LGO pseudocall
+	{
+		$$ = Nod(OPROC, $2, nil);
+	}
+|	LDEFER pseudocall
+	{
+		$$ = Nod(ODEFER, $2, nil);
+	}
+|	LGOTO new_name
+	{
+		$$ = Nod(OGOTO, $2, nil);
+		$$.Sym = dclstack;  // context, for goto restrictions
+	}
+|	LRETURN oexpr_list
+	{
+		$$ = Nod(ORETURN, nil, nil);
+		$$.List = $2;
+		if $$.List == nil && Curfn != nil {
+			var l *NodeList
+
+			for l=Curfn.Func.Dcl; l != nil; l=l.Next {
+				if l.N.Class == PPARAM {
+					continue;
+				}
+				if l.N.Class != PPARAMOUT {
+					break;
+				}
+				if l.N.Sym.Def != l.N {
+					Yyerror("%s is shadowed during return", l.N.Sym.Name);
+				}
+			}
+		}
+	}
+
+stmt_list:
+	stmt
+	{
+		$$ = nil;
+		if $1 != nil {
+			$$ = list1($1);
+		}
+	}
+|	stmt_list ';' stmt
+	{
+		$$ = $1;
+		if $3 != nil {
+			$$ = list($$, $3);
+		}
+	}
+
+new_name_list:
+	new_name
+	{
+		$$ = list1($1);
+	}
+|	new_name_list ',' new_name
+	{
+		$$ = list($1, $3);
+	}
+
+dcl_name_list:
+	dcl_name
+	{
+		$$ = list1($1);
+	}
+|	dcl_name_list ',' dcl_name
+	{
+		$$ = list($1, $3);
+	}
+
+expr_list:
+	expr
+	{
+		$$ = list1($1);
+	}
+|	expr_list ',' expr
+	{
+		$$ = list($1, $3);
+	}
+
+expr_or_type_list:
+	expr_or_type
+	{
+		$$ = list1($1);
+	}
+|	expr_or_type_list ',' expr_or_type
+	{
+		$$ = list($1, $3);
+	}
+
+/*
+ * list of combo of keyval and val
+ */
+keyval_list:
+	keyval
+	{
+		$$ = list1($1);
+	}
+|	bare_complitexpr
+	{
+		$$ = list1($1);
+	}
+|	keyval_list ',' keyval
+	{
+		$$ = list($1, $3);
+	}
+|	keyval_list ',' bare_complitexpr
+	{
+		$$ = list($1, $3);
+	}
+
+braced_keyval_list:
+	{
+		$$ = nil;
+	}
+|	keyval_list ocomma
+	{
+		$$ = $1;
+	}
+
+/*
+ * optional things
+ */
+osemi:
+|	';'
+
+ocomma:
+|	','
+
+oexpr:
+	{
+		$$ = nil;
+	}
+|	expr
+
+oexpr_list:
+	{
+		$$ = nil;
+	}
+|	expr_list
+
+osimple_stmt:
+	{
+		$$ = nil;
+	}
+|	simple_stmt
+
+ohidden_funarg_list:
+	{
+		$$ = nil;
+	}
+|	hidden_funarg_list
+
+ohidden_structdcl_list:
+	{
+		$$ = nil;
+	}
+|	hidden_structdcl_list
+
+ohidden_interfacedcl_list:
+	{
+		$$ = nil;
+	}
+|	hidden_interfacedcl_list
+
+oliteral:
+	{
+		$$.Ctype = CTxxx;
+	}
+|	LLITERAL
+
+/*
+ * import syntax from package header
+ */
+hidden_import:
+	LIMPORT LNAME LLITERAL ';'
+	{
+		importimport($2, $3.U.(string));
+	}
+|	LVAR hidden_pkg_importsym hidden_type ';'
+	{
+		importvar($2, $3);
+	}
+|	LCONST hidden_pkg_importsym '=' hidden_constant ';'
+	{
+		importconst($2, Types[TIDEAL], $4);
+	}
+|	LCONST hidden_pkg_importsym hidden_type '=' hidden_constant ';'
+	{
+		importconst($2, $3, $5);
+	}
+|	LTYPE hidden_pkgtype hidden_type ';'
+	{
+		importtype($2, $3);
+	}
+|	LFUNC hidden_fndcl fnbody ';'
+	{
+		if $2 == nil {
+			dclcontext = PEXTERN;  // since we skip the funcbody below
+			break;
+		}
+
+		$2.Func.Inl = $3;
+
+		funcbody($2);
+		importlist = list(importlist, $2);
+
+		if Debug['E'] > 0 {
+			fmt.Printf("import [%q] func %v \n", importpkg.Path, $2)
+			if Debug['m'] > 2 && $2.Func.Inl != nil {
+				fmt.Printf("inl body:%v\n", $2.Func.Inl)
+			}
+		}
+	}
+
+hidden_pkg_importsym:
+	hidden_importsym
+	{
+		$$ = $1;
+		structpkg = $$.Pkg;
+	}
+
+hidden_pkgtype:
+	hidden_pkg_importsym
+	{
+		$$ = pkgtype($1);
+		importsym($1, OTYPE);
+	}
+
+/*
+ *  importing types
+ */
+
+hidden_type:
+	hidden_type_misc
+|	hidden_type_recv_chan
+|	hidden_type_func
+
+hidden_type_non_recv_chan:
+	hidden_type_misc
+|	hidden_type_func
+
+hidden_type_misc:
+	hidden_importsym
+	{
+		$$ = pkgtype($1);
+	}
+|	LNAME
+	{
+		// predefined name like uint8
+		$1 = Pkglookup($1.Name, builtinpkg);
+		if $1.Def == nil || $1.Def.Op != OTYPE {
+			Yyerror("%s is not a type", $1.Name);
+			$$ = nil;
+		} else {
+			$$ = $1.Def.Type;
+		}
+	}
+|	'[' ']' hidden_type
+	{
+		$$ = aindex(nil, $3);
+	}
+|	'[' LLITERAL ']' hidden_type
+	{
+		$$ = aindex(nodlit($2), $4);
+	}
+|	LMAP '[' hidden_type ']' hidden_type
+	{
+		$$ = maptype($3, $5);
+	}
+|	LSTRUCT '{' ohidden_structdcl_list '}'
+	{
+		$$ = tostruct($3);
+	}
+|	LINTERFACE '{' ohidden_interfacedcl_list '}'
+	{
+		$$ = tointerface($3);
+	}
+|	'*' hidden_type
+	{
+		$$ = Ptrto($2);
+	}
+|	LCHAN hidden_type_non_recv_chan
+	{
+		$$ = typ(TCHAN);
+		$$.Type = $2;
+		$$.Chan = Cboth;
+	}
+|	LCHAN '(' hidden_type_recv_chan ')'
+	{
+		$$ = typ(TCHAN);
+		$$.Type = $3;
+		$$.Chan = Cboth;
+	}
+|	LCHAN LCOMM hidden_type
+	{
+		$$ = typ(TCHAN);
+		$$.Type = $3;
+		$$.Chan = Csend;
+	}
+
+hidden_type_recv_chan:
+	LCOMM LCHAN hidden_type
+	{
+		$$ = typ(TCHAN);
+		$$.Type = $3;
+		$$.Chan = Crecv;
+	}
+
+hidden_type_func:
+	LFUNC '(' ohidden_funarg_list ')' ohidden_funres
+	{
+		$$ = functype(nil, $3, $5);
+	}
+
+hidden_funarg:
+	sym hidden_type oliteral
+	{
+		$$ = Nod(ODCLFIELD, nil, typenod($2));
+		if $1 != nil {
+			$$.Left = newname($1);
+		}
+		$$.Val = $3;
+	}
+|	sym LDDD hidden_type oliteral
+	{
+		var t *Type
+	
+		t = typ(TARRAY);
+		t.Bound = -1;
+		t.Type = $3;
+
+		$$ = Nod(ODCLFIELD, nil, typenod(t));
+		if $1 != nil {
+			$$.Left = newname($1);
+		}
+		$$.Isddd = true;
+		$$.Val = $4;
+	}
+
+hidden_structdcl:
+	sym hidden_type oliteral
+	{
+		var s *Sym
+		var p *Pkg
+
+		if $1 != nil && $1.Name != "?" {
+			$$ = Nod(ODCLFIELD, newname($1), typenod($2));
+			$$.Val = $3;
+		} else {
+			s = $2.Sym;
+			if s == nil && Isptr[$2.Etype] {
+				s = $2.Type.Sym;
+			}
+			p = importpkg;
+			if $1 != nil {
+				p = $1.Pkg;
+			}
+			$$ = embedded(s, p);
+			$$.Right = typenod($2);
+			$$.Val = $3;
+		}
+	}
+
+hidden_interfacedcl:
+	sym '(' ohidden_funarg_list ')' ohidden_funres
+	{
+		$$ = Nod(ODCLFIELD, newname($1), typenod(functype(fakethis(), $3, $5)));
+	}
+|	hidden_type
+	{
+		$$ = Nod(ODCLFIELD, nil, typenod($1));
+	}
+
+ohidden_funres:
+	{
+		$$ = nil;
+	}
+|	hidden_funres
+
+hidden_funres:
+	'(' ohidden_funarg_list ')'
+	{
+		$$ = $2;
+	}
+|	hidden_type
+	{
+		$$ = list1(Nod(ODCLFIELD, nil, typenod($1)));
+	}
+
+/*
+ *  importing constants
+ */
+
+hidden_literal:
+	LLITERAL
+	{
+		$$ = nodlit($1);
+	}
+|	'-' LLITERAL
+	{
+		$$ = nodlit($2);
+		switch($$.Val.Ctype){
+		case CTINT, CTRUNE:
+			mpnegfix($$.Val.U.(*Mpint));
+			break;
+		case CTFLT:
+			mpnegflt($$.Val.U.(*Mpflt));
+			break;
+		case CTCPLX:
+			mpnegflt(&$$.Val.U.(*Mpcplx).Real);
+			mpnegflt(&$$.Val.U.(*Mpcplx).Imag);
+			break;
+		default:
+			Yyerror("bad negated constant");
+		}
+	}
+|	sym
+	{
+		$$ = oldname(Pkglookup($1.Name, builtinpkg));
+		if $$.Op != OLITERAL {
+			Yyerror("bad constant %v", $$.Sym);
+		}
+	}
+
+hidden_constant:
+	hidden_literal
+|	'(' hidden_literal '+' hidden_literal ')'
+	{
+		if $2.Val.Ctype == CTRUNE && $4.Val.Ctype == CTINT {
+			$$ = $2;
+			mpaddfixfix($2.Val.U.(*Mpint), $4.Val.U.(*Mpint), 0);
+			break;
+		}
+		$4.Val.U.(*Mpcplx).Real = $4.Val.U.(*Mpcplx).Imag;
+		Mpmovecflt(&$4.Val.U.(*Mpcplx).Imag, 0.0);
+		$$ = nodcplxlit($2.Val, $4.Val);
+	}
+
+hidden_import_list:
+|	hidden_import_list hidden_import
+
+hidden_funarg_list:
+	hidden_funarg
+	{
+		$$ = list1($1);
+	}
+|	hidden_funarg_list ',' hidden_funarg
+	{
+		$$ = list($1, $3);
+	}
+
+hidden_structdcl_list:
+	hidden_structdcl
+	{
+		$$ = list1($1);
+	}
+|	hidden_structdcl_list ';' hidden_structdcl
+	{
+		$$ = list($1, $3);
+	}
+
+hidden_interfacedcl_list:
+	hidden_interfacedcl
+	{
+		$$ = list1($1);
+	}
+|	hidden_interfacedcl_list ';' hidden_interfacedcl
+	{
+		$$ = list($1, $3);
+	}
+
+%%
+func fixlbrace(lbr int) {
+	// If the opening brace was an LBODY,
+	// set up for another one now that we're done.
+	// See comment in lex.C about loophack.
+	if lbr == LBODY {
+		loophack = 1
+	}
+}
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
new file mode 100644
index 0000000..5ec4587
--- /dev/null
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -0,0 +1,827 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+	"runtime"
+	"strings"
+)
+
+var ddumped int
+
+var dfirst *obj.Prog
+
+var dpc *obj.Prog
+
+/*
+ * Is this node a memory operand?
+ */
+func Ismem(n *Node) bool {
+	switch n.Op {
+	case OITAB,
+		OSPTR,
+		OLEN,
+		OCAP,
+		OINDREG,
+		ONAME,
+		OPARAM,
+		OCLOSUREVAR:
+		return true
+
+	case OADDR:
+		return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
+	}
+
+	return false
+}
+
+func Samereg(a *Node, b *Node) bool {
+	if a == nil || b == nil {
+		return false
+	}
+	if a.Op != OREGISTER {
+		return false
+	}
+	if b.Op != OREGISTER {
+		return false
+	}
+	if a.Reg != b.Reg {
+		return false
+	}
+	return true
+}
+
+func Gbranch(as int, t *Type, likely int) *obj.Prog {
+	p := Prog(as)
+	p.To.Type = obj.TYPE_BRANCH
+	p.To.Val = nil
+	if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' {
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(obj.Bool2int(likely > 0))
+	}
+
+	if Debug['g'] != 0 {
+		fmt.Printf("%v\n", p)
+	}
+
+	return p
+}
+
+func Prog(as int) *obj.Prog {
+	var p *obj.Prog
+
+	if as == obj.ADATA || as == obj.AGLOBL {
+		if ddumped != 0 {
+			Fatal("already dumped data")
+		}
+		if dpc == nil {
+			dpc = Ctxt.NewProg()
+			dfirst = dpc
+		}
+
+		p = dpc
+		dpc = Ctxt.NewProg()
+		p.Link = dpc
+	} else {
+		p = Pc
+		Pc = Ctxt.NewProg()
+		Clearp(Pc)
+		p.Link = Pc
+	}
+
+	if lineno == 0 {
+		if Debug['K'] != 0 {
+			Warn("prog: line 0")
+		}
+	}
+
+	p.As = int16(as)
+	p.Lineno = lineno
+	return p
+}
+
+func Nodreg(n *Node, t *Type, r int) {
+	if t == nil {
+		Fatal("nodreg: t nil")
+	}
+
+	*n = Node{}
+	n.Op = OREGISTER
+	n.Addable = true
+	ullmancalc(n)
+	n.Reg = int16(r)
+	n.Type = t
+}
+
+func Nodindreg(n *Node, t *Type, r int) {
+	Nodreg(n, t, r)
+	n.Op = OINDREG
+}
+
+func Afunclit(a *obj.Addr, n *Node) {
+	if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN {
+		a.Type = obj.TYPE_MEM
+		a.Sym = Linksym(n.Sym)
+	}
+}
+
+func Clearp(p *obj.Prog) {
+	obj.Nopout(p)
+	p.As = obj.AEND
+	p.Pc = int64(pcloc)
+	pcloc++
+}
+
+func dumpdata() {
+	ddumped = 1
+	if dfirst == nil {
+		return
+	}
+	newplist()
+	*Pc = *dfirst
+	Pc = dpc
+	Clearp(Pc)
+}
+
+// Fixup instructions after allocauto (formerly compactframe) has moved all autos around.
+func fixautoused(p *obj.Prog) {
+	for lp := &p; ; {
+		p = *lp
+		if p == nil {
+			break
+		}
+		if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !((p.From.Node).(*Node)).Used {
+			*lp = p.Link
+			continue
+		}
+
+		if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && !((p.To.Node).(*Node)).Used {
+			// Cannot remove VARDEF instruction, because - unlike TYPE handled above -
+			// VARDEFs are interspersed with other code, and a jump might be using the
+			// VARDEF as a target. Replace with a no-op instead. A later pass will remove
+			// the no-ops.
+			obj.Nopout(p)
+
+			continue
+		}
+
+		if p.From.Name == obj.NAME_AUTO && p.From.Node != nil {
+			p.From.Offset += ((p.From.Node).(*Node)).Stkdelta
+		}
+
+		if p.To.Name == obj.NAME_AUTO && p.To.Node != nil {
+			p.To.Offset += ((p.To.Node).(*Node)).Stkdelta
+		}
+
+		lp = &p.Link
+	}
+}
+
+func ggloblnod(nam *Node) {
+	p := Thearch.Gins(obj.AGLOBL, nam, nil)
+	p.Lineno = nam.Lineno
+	p.From.Sym.Gotype = Linksym(ngotype(nam))
+	p.To.Sym = nil
+	p.To.Type = obj.TYPE_CONST
+	p.To.Offset = nam.Type.Width
+	if nam.Name.Readonly {
+		p.From3.Offset = obj.RODATA
+	}
+	if nam.Type != nil && !haspointers(nam.Type) {
+		p.From3.Offset |= obj.NOPTR
+	}
+}
+
+func ggloblsym(s *Sym, width int32, flags int16) {
+	p := Thearch.Gins(obj.AGLOBL, nil, nil)
+	p.From.Type = obj.TYPE_MEM
+	p.From.Name = obj.NAME_EXTERN
+	p.From.Sym = Linksym(s)
+	if flags&obj.LOCAL != 0 {
+		p.From.Sym.Local = true
+		flags &= ^obj.LOCAL
+	}
+	p.To.Type = obj.TYPE_CONST
+	p.To.Offset = int64(width)
+	p.From3.Offset = int64(flags)
+}
+
+func gjmp(to *obj.Prog) *obj.Prog {
+	p := Gbranch(obj.AJMP, nil, 0)
+	if to != nil {
+		Patch(p, to)
+	}
+	return p
+}
+
+func gtrack(s *Sym) {
+	p := Thearch.Gins(obj.AUSEFIELD, nil, nil)
+	p.From.Type = obj.TYPE_MEM
+	p.From.Name = obj.NAME_EXTERN
+	p.From.Sym = Linksym(s)
+}
+
+func gused(n *Node) {
+	Thearch.Gins(obj.ANOP, n, nil) // used
+}
+
+func Isfat(t *Type) bool {
+	if t != nil {
+		switch t.Etype {
+		case TSTRUCT, TARRAY, TSTRING,
+			TINTER: // maybe remove later
+			return true
+		}
+	}
+
+	return false
+}
+
+// Sweep the prog list to mark any used nodes.
+func markautoused(p *obj.Prog) {
+	for ; p != nil; p = p.Link {
+		if p.As == obj.ATYPE || p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+			continue
+		}
+
+		if p.From.Node != nil {
+			((p.From.Node).(*Node)).Used = true
+		}
+
+		if p.To.Node != nil {
+			((p.To.Node).(*Node)).Used = true
+		}
+	}
+}
+
+// Naddr rewrites a to refer to n.
+// It assumes that a is zeroed on entry.
+func Naddr(a *obj.Addr, n *Node) {
+	if n == nil {
+		return
+	}
+
+	if n.Type != nil && n.Type.Etype != TIDEAL {
+		// TODO(rsc): This is undone by the selective clearing of width below,
+		// to match architectures that were not as aggressive in setting width
+		// during naddr. Those widths must be cleared to avoid triggering
+		// failures in gins when it detects real but heretofore latent (and one
+		// hopes innocuous) type mismatches.
+		// The type mismatches should be fixed and the clearing below removed.
+		dowidth(n.Type)
+
+		a.Width = n.Type.Width
+	}
+
+	switch n.Op {
+	default:
+		a := a // copy to let escape into Ctxt.Dconv
+		Debug['h'] = 1
+		Dump("naddr", n)
+		Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
+
+	case OREGISTER:
+		a.Type = obj.TYPE_REG
+		a.Reg = n.Reg
+		a.Sym = nil
+		if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
+			a.Width = 0
+		}
+
+	case OINDREG:
+		a.Type = obj.TYPE_MEM
+		a.Reg = n.Reg
+		a.Sym = Linksym(n.Sym)
+		a.Offset = n.Xoffset
+		if a.Offset != int64(int32(a.Offset)) {
+			Yyerror("offset %d too large for OINDREG", a.Offset)
+		}
+		if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
+			a.Width = 0
+		}
+
+		// n->left is PHEAP ONAME for stack parameter.
+	// compute address of actual parameter on stack.
+	case OPARAM:
+		a.Etype = Simtype[n.Left.Type.Etype]
+
+		a.Width = n.Left.Type.Width
+		a.Offset = n.Xoffset
+		a.Sym = Linksym(n.Left.Sym)
+		a.Type = obj.TYPE_MEM
+		a.Name = obj.NAME_PARAM
+		a.Node = n.Left.Orig
+
+	case OCLOSUREVAR:
+		if !Curfn.Func.Needctxt {
+			Fatal("closurevar without needctxt")
+		}
+		a.Type = obj.TYPE_MEM
+		a.Reg = int16(Thearch.REGCTXT)
+		a.Sym = nil
+		a.Offset = n.Xoffset
+
+	case OCFUNC:
+		Naddr(a, n.Left)
+		a.Sym = Linksym(n.Left.Sym)
+
+	case ONAME:
+		a.Etype = 0
+		if n.Type != nil {
+			a.Etype = Simtype[n.Type.Etype]
+		}
+		a.Offset = n.Xoffset
+		s := n.Sym
+		a.Node = n.Orig
+
+		//if(a->node >= (Node*)&n)
+		//	fatal("stack node");
+		if s == nil {
+			s = Lookup(".noname")
+		}
+		if n.Name.Method {
+			if n.Type != nil {
+				if n.Type.Sym != nil {
+					if n.Type.Sym.Pkg != nil {
+						s = Pkglookup(s.Name, n.Type.Sym.Pkg)
+					}
+				}
+			}
+		}
+
+		a.Type = obj.TYPE_MEM
+		switch n.Class {
+		default:
+			Fatal("naddr: ONAME class %v %d\n", n.Sym, n.Class)
+
+		case PEXTERN:
+			a.Name = obj.NAME_EXTERN
+
+		case PAUTO:
+			a.Name = obj.NAME_AUTO
+
+		case PPARAM, PPARAMOUT:
+			a.Name = obj.NAME_PARAM
+
+		case PFUNC:
+			a.Name = obj.NAME_EXTERN
+			a.Type = obj.TYPE_ADDR
+			a.Width = int64(Widthptr)
+			s = funcsym(s)
+		}
+
+		a.Sym = Linksym(s)
+
+	case OLITERAL:
+		if Thearch.Thechar == '8' {
+			a.Width = 0
+		}
+		switch n.Val.Ctype {
+		default:
+			Fatal("naddr: const %v", Tconv(n.Type, obj.FmtLong))
+
+		case CTFLT:
+			a.Type = obj.TYPE_FCONST
+			a.Val = mpgetflt(n.Val.U.(*Mpflt))
+
+		case CTINT, CTRUNE:
+			a.Sym = nil
+			a.Type = obj.TYPE_CONST
+			a.Offset = Mpgetfix(n.Val.U.(*Mpint))
+
+		case CTSTR:
+			datagostring(n.Val.U.(string), a)
+
+		case CTBOOL:
+			a.Sym = nil
+			a.Type = obj.TYPE_CONST
+			a.Offset = int64(obj.Bool2int(n.Val.U.(bool)))
+
+		case CTNIL:
+			a.Sym = nil
+			a.Type = obj.TYPE_CONST
+			a.Offset = 0
+		}
+
+	case OADDR:
+		Naddr(a, n.Left)
+		a.Etype = uint8(Tptr)
+		if Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
+			a.Width = int64(Widthptr)
+		}
+		if a.Type != obj.TYPE_MEM {
+			a := a // copy to let escape into Ctxt.Dconv
+			Fatal("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
+		}
+		a.Type = obj.TYPE_ADDR
+
+		// itable of interface value
+	case OITAB:
+		Naddr(a, n.Left)
+
+		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+			break // itab(nil)
+		}
+		a.Etype = uint8(Tptr)
+		a.Width = int64(Widthptr)
+
+		// pointer in a string or slice
+	case OSPTR:
+		Naddr(a, n.Left)
+
+		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+			break // ptr(nil)
+		}
+		a.Etype = Simtype[Tptr]
+		a.Offset += int64(Array_array)
+		a.Width = int64(Widthptr)
+
+		// len of string or slice
+	case OLEN:
+		Naddr(a, n.Left)
+
+		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+			break // len(nil)
+		}
+		a.Etype = Simtype[TUINT]
+		a.Offset += int64(Array_nel)
+		if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
+			a.Width = int64(Widthint)
+		}
+
+		// cap of string or slice
+	case OCAP:
+		Naddr(a, n.Left)
+
+		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+			break // cap(nil)
+		}
+		a.Etype = Simtype[TUINT]
+		a.Offset += int64(Array_cap)
+		if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
+			a.Width = int64(Widthint)
+		}
+	}
+	return
+}
+
+func newplist() *obj.Plist {
+	pl := obj.Linknewplist(Ctxt)
+
+	Pc = Ctxt.NewProg()
+	Clearp(Pc)
+	pl.Firstpc = Pc
+
+	return pl
+}
+
+func nodarg(t *Type, fp int) *Node {
+	var n *Node
+
+	// entire argument struct, not just one arg
+	if t.Etype == TSTRUCT && t.Funarg != 0 {
+		n = Nod(ONAME, nil, nil)
+		n.Sym = Lookup(".args")
+		n.Type = t
+		var savet Iter
+		first := Structfirst(&savet, &t)
+		if first == nil {
+			Fatal("nodarg: bad struct")
+		}
+		if first.Width == BADWIDTH {
+			Fatal("nodarg: offset not computed for %v", t)
+		}
+		n.Xoffset = first.Width
+		n.Addable = true
+		goto fp
+	}
+
+	if t.Etype != TFIELD {
+		Fatal("nodarg: not field %v", t)
+	}
+
+	if fp == 1 {
+		var n *Node
+		for l := Curfn.Func.Dcl; l != nil; l = l.Next {
+			n = l.N
+			if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
+				return n
+			}
+		}
+	}
+
+	n = Nod(ONAME, nil, nil)
+	n.Type = t.Type
+	n.Sym = t.Sym
+
+	if t.Width == BADWIDTH {
+		Fatal("nodarg: offset not computed for %v", t)
+	}
+	n.Xoffset = t.Width
+	n.Addable = true
+	n.Orig = t.Nname
+
+	// Rewrite argument named _ to __,
+	// or else the assignment to _ will be
+	// discarded during code generation.
+fp:
+	if isblank(n) {
+		n.Sym = Lookup("__")
+	}
+
+	switch fp {
+	case 0: // output arg
+		n.Op = OINDREG
+
+		n.Reg = int16(Thearch.REGSP)
+		if HasLinkRegister() {
+			n.Xoffset += int64(Ctxt.Arch.Ptrsize)
+		}
+
+	case 1: // input arg
+		n.Class = PPARAM
+
+	case 2: // offset output arg
+		Fatal("shouldn't be used")
+	}
+
+	n.Typecheck = 1
+	return n
+}
+
+func Patch(p *obj.Prog, to *obj.Prog) {
+	if p.To.Type != obj.TYPE_BRANCH {
+		Fatal("patch: not a branch")
+	}
+	p.To.Val = to
+	p.To.Offset = to.Pc
+}
+
+func unpatch(p *obj.Prog) *obj.Prog {
+	if p.To.Type != obj.TYPE_BRANCH {
+		Fatal("unpatch: not a branch")
+	}
+	q, _ := p.To.Val.(*obj.Prog)
+	p.To.Val = nil
+	p.To.Offset = 0
+	return q
+}
+
+var reg [100]int       // count of references to reg
+var regstk [100][]byte // allocation sites, when -v is given
+
+func ginit() {
+	for r := range reg {
+		reg[r] = 1
+	}
+
+	for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+		reg[r-Thearch.REGMIN] = 0
+	}
+	for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
+		reg[r-Thearch.REGMIN] = 0
+	}
+
+	for _, r := range Thearch.ReservedRegs {
+		reg[r-Thearch.REGMIN] = 1
+	}
+}
+
+func gclean() {
+	for _, r := range Thearch.ReservedRegs {
+		reg[r-Thearch.REGMIN]--
+	}
+
+	for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+		n := reg[r-Thearch.REGMIN]
+		if n != 0 {
+			if Debug['v'] != 0 {
+				Regdump()
+			}
+			Yyerror("reg %v left allocated", obj.Rconv(r))
+		}
+	}
+
+	for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
+		n := reg[r-Thearch.REGMIN]
+		if n != 0 {
+			if Debug['v'] != 0 {
+				Regdump()
+			}
+			Yyerror("reg %v left allocated", obj.Rconv(r))
+		}
+	}
+}
+
+func Anyregalloc() bool {
+	n := 0
+	for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+		if reg[r-Thearch.REGMIN] == 0 {
+			n++
+		}
+	}
+	return n > len(Thearch.ReservedRegs)
+}
+
+/*
+ * allocate register of type t, leave in n.
+ * if o != N, o may be reusable register.
+ * caller must Regfree(n).
+ */
+func Regalloc(n *Node, t *Type, o *Node) {
+	if t == nil {
+		Fatal("regalloc: t nil")
+	}
+	et := int(Simtype[t.Etype])
+	if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) {
+		Fatal("regalloc 64bit")
+	}
+
+	var i int
+Switch:
+	switch et {
+	default:
+		Fatal("regalloc: unknown type %v", t)
+
+	case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TPTR32, TPTR64, TBOOL:
+		if o != nil && o.Op == OREGISTER {
+			i = int(o.Reg)
+			if Thearch.REGMIN <= i && i <= Thearch.REGMAX {
+				break Switch
+			}
+		}
+		for i = Thearch.REGMIN; i <= Thearch.REGMAX; i++ {
+			if reg[i-Thearch.REGMIN] == 0 {
+				break Switch
+			}
+		}
+		Flusherrors()
+		Regdump()
+		Fatal("out of fixed registers")
+
+	case TFLOAT32, TFLOAT64:
+		if Thearch.Use387 {
+			i = Thearch.FREGMIN // x86.REG_F0
+			break Switch
+		}
+		if o != nil && o.Op == OREGISTER {
+			i = int(o.Reg)
+			if Thearch.FREGMIN <= i && i <= Thearch.FREGMAX {
+				break Switch
+			}
+		}
+		for i = Thearch.FREGMIN; i <= Thearch.FREGMAX; i++ {
+			if reg[i-Thearch.REGMIN] == 0 { // note: REGMIN, not FREGMIN
+				break Switch
+			}
+		}
+		Flusherrors()
+		Regdump()
+		Fatal("out of floating registers")
+
+	case TCOMPLEX64, TCOMPLEX128:
+		Tempname(n, t)
+		return
+	}
+
+	ix := i - Thearch.REGMIN
+	if reg[ix] == 0 && Debug['v'] > 0 {
+		if regstk[ix] == nil {
+			regstk[ix] = make([]byte, 4096)
+		}
+		stk := regstk[ix]
+		n := runtime.Stack(stk[:cap(stk)], false)
+		regstk[ix] = stk[:n]
+	}
+	reg[ix]++
+	Nodreg(n, t, i)
+}
+
+func Regfree(n *Node) {
+	if n.Op == ONAME {
+		return
+	}
+	if n.Op != OREGISTER && n.Op != OINDREG {
+		Fatal("regfree: not a register")
+	}
+	i := int(n.Reg)
+	if i == Thearch.REGSP {
+		return
+	}
+	switch {
+	case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
+		Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
+		// ok
+	default:
+		Fatal("regfree: reg out of range")
+	}
+
+	i -= Thearch.REGMIN
+	if reg[i] <= 0 {
+		Fatal("regfree: reg not allocated")
+	}
+	reg[i]--
+	if reg[i] == 0 {
+		regstk[i] = regstk[i][:0]
+	}
+}
+
+// Reginuse reports whether r is in use.
+func Reginuse(r int) bool {
+	switch {
+	case Thearch.REGMIN <= r && r <= Thearch.REGMAX,
+		Thearch.FREGMIN <= r && r <= Thearch.FREGMAX:
+		// ok
+	default:
+		Fatal("reginuse: reg out of range")
+	}
+
+	return reg[r-Thearch.REGMIN] > 0
+}
+
+// Regrealloc(n) undoes the effect of Regfree(n),
+// so that a register can be given up but then reclaimed.
+func Regrealloc(n *Node) {
+	if n.Op != OREGISTER && n.Op != OINDREG {
+		Fatal("regrealloc: not a register")
+	}
+	i := int(n.Reg)
+	if i == Thearch.REGSP {
+		return
+	}
+	switch {
+	case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
+		Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
+		// ok
+	default:
+		Fatal("regrealloc: reg out of range")
+	}
+
+	i -= Thearch.REGMIN
+	if reg[i] == 0 && Debug['v'] > 0 {
+		if regstk[i] == nil {
+			regstk[i] = make([]byte, 4096)
+		}
+		stk := regstk[i]
+		n := runtime.Stack(stk[:cap(stk)], false)
+		regstk[i] = stk[:n]
+	}
+	reg[i]++
+}
+
+func Regdump() {
+	if Debug['v'] == 0 {
+		fmt.Printf("run compiler with -v for register allocation sites\n")
+		return
+	}
+
+	dump := func(r int) {
+		stk := regstk[r-Thearch.REGMIN]
+		if len(stk) == 0 {
+			return
+		}
+		fmt.Printf("reg %v allocated at:\n", obj.Rconv(r))
+		fmt.Printf("\t%s\n", strings.Replace(strings.TrimSpace(string(stk)), "\n", "\n\t", -1))
+	}
+
+	for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+		if reg[r-Thearch.REGMIN] != 0 {
+			dump(r)
+		}
+	}
+	for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
+		if reg[r-Thearch.REGMIN] == 0 {
+			dump(r)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go
new file mode 100644
index 0000000..b5d1e50
--- /dev/null
+++ b/src/cmd/compile/internal/gc/init.go
@@ -0,0 +1,189 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+//	case OADD:
+//		if(n->right->op == OLITERAL) {
+//			v = n->right->vconst;
+//			naddr(n->left, a, canemitcode);
+//		} else
+//		if(n->left->op == OLITERAL) {
+//			v = n->left->vconst;
+//			naddr(n->right, a, canemitcode);
+//		} else
+//			goto bad;
+//		a->offset += v;
+//		break;
+
+/*
+ * a function named init is a special case.
+ * it is called by the initialization before
+ * main is run. to make it unique within a
+ * package and also uncallable, the name,
+ * normally "pkg.init", is altered to "pkg.init.1".
+ */
+
+var renameinit_initgen int
+
+func renameinit() *Sym {
+	renameinit_initgen++
+	return Lookupf("init.%d", renameinit_initgen)
+}
+
+/*
+ * hand-craft the following initialization code
+ *	var initdone· uint8 				(1)
+ *	func init()					(2)
+ *		if initdone· != 0 {			(3)
+ *			if initdone· == 2		(4)
+ *				return
+ *			throw();			(5)
+ *		}
+ *		initdone· = 1;				(6)
+ *		// over all matching imported symbols
+ *			<pkg>.init()			(7)
+ *		{ <init stmts> }			(8)
+ *		init.<n>() // if any			(9)
+ *		initdone· = 2;				(10)
+ *		return					(11)
+ *	}
+ */
+func anyinit(n *NodeList) bool {
+	// are there any interesting init statements
+	for l := n; l != nil; l = l.Next {
+		switch l.N.Op {
+		case ODCLFUNC, ODCLCONST, ODCLTYPE, OEMPTY:
+			break
+
+		case OAS, OASWB:
+			if isblank(l.N.Left) && candiscard(l.N.Right) {
+				break
+			}
+			fallthrough
+
+			// fall through
+		default:
+			return true
+		}
+	}
+
+	// is this main
+	if localpkg.Name == "main" {
+		return true
+	}
+
+	// is there an explicit init function
+	s := Lookup("init.1")
+
+	if s.Def != nil {
+		return true
+	}
+
+	// are there any imported init functions
+	for _, s := range initSyms {
+		if s.Def != nil {
+			return true
+		}
+	}
+
+	// then none
+	return false
+}
+
+func fninit(n *NodeList) {
+	if Debug['A'] != 0 {
+		// sys.go or unsafe.go during compiler build
+		return
+	}
+
+	n = initfix(n)
+	if !anyinit(n) {
+		return
+	}
+
+	var r *NodeList
+
+	// (1)
+	gatevar := newname(Lookup("initdone·"))
+	addvar(gatevar, Types[TUINT8], PEXTERN)
+
+	// (2)
+	Maxarg = 0
+
+	fn := Nod(ODCLFUNC, nil, nil)
+	initsym := Lookup("init")
+	fn.Nname = newname(initsym)
+	fn.Nname.Defn = fn
+	fn.Nname.Ntype = Nod(OTFUNC, nil, nil)
+	declare(fn.Nname, PFUNC)
+	funchdr(fn)
+
+	// (3)
+	a := Nod(OIF, nil, nil)
+
+	a.Ntest = Nod(ONE, gatevar, Nodintconst(0))
+	r = list(r, a)
+
+	// (4)
+	b := Nod(OIF, nil, nil)
+
+	b.Ntest = Nod(OEQ, gatevar, Nodintconst(2))
+	b.Nbody = list1(Nod(ORETURN, nil, nil))
+	a.Nbody = list1(b)
+
+	// (5)
+	b = syslook("throwinit", 0)
+
+	b = Nod(OCALL, b, nil)
+	a.Nbody = list(a.Nbody, b)
+
+	// (6)
+	a = Nod(OAS, gatevar, Nodintconst(1))
+
+	r = list(r, a)
+
+	// (7)
+	for _, s := range initSyms {
+		if s.Def != nil && s != initsym {
+			// could check that it is fn of no args/returns
+			a = Nod(OCALL, s.Def, nil)
+			r = list(r, a)
+		}
+	}
+
+	// (8)
+	r = concat(r, n)
+
+	// (9)
+	// could check that it is fn of no args/returns
+	for i := 1; ; i++ {
+		s := Lookupf("init.%d", i)
+		if s.Def == nil {
+			break
+		}
+		a = Nod(OCALL, s.Def, nil)
+		r = list(r, a)
+	}
+
+	// (10)
+	a = Nod(OAS, gatevar, Nodintconst(2))
+
+	r = list(r, a)
+
+	// (11)
+	a = Nod(ORETURN, nil, nil)
+
+	r = list(r, a)
+	exportsym(fn.Nname)
+
+	fn.Nbody = r
+	funcbody(fn)
+
+	Curfn = fn
+	typecheck(&fn, Etop)
+	typechecklist(r, Etop)
+	Curfn = nil
+	funccompile(fn)
+}
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
new file mode 100644
index 0000000..22a5d3d
--- /dev/null
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -0,0 +1,1008 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The inlining facility makes 2 passes: first caninl determines which
+// functions are suitable for inlining, and for those that are it
+// saves a copy of the body. Then inlcalls walks each function body to
+// expand calls to inlinable functions.
+//
+// The debug['l'] flag controls the agressiveness. Note that main() swaps level 0 and 1,
+// making 1 the default and -l disable.  -ll and more is useful to flush out bugs.
+// These additional levels (beyond -l) may be buggy and are not supported.
+//      0: disabled
+//      1: 40-nodes leaf functions, oneliners, lazy typechecking (default)
+//      2: early typechecking of all imported bodies
+//      3: allow variadic functions
+//      4: allow non-leaf functions , (breaks runtime.Caller)
+//
+//  At some point this may get another default and become switch-offable with -N.
+//
+//  The debug['m'] flag enables diagnostic output.  a single -m is useful for verifying
+//  which calls get inlined or not, more is for debugging, and may go away at any point.
+//
+// TODO:
+//   - inline functions with ... args
+//   - handle T.meth(f()) with func f() (t T, arg, arg, )
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+)
+
+// Used by caninl.
+
+// Used by inlcalls
+
+// Used during inlsubst[list]
+var inlfn *Node // function currently being inlined
+
+var inlretlabel *Node // target of the goto substituted in place of a return
+
+var inlretvars *NodeList // temp out variables
+
+// Get the function's package.  For ordinary functions it's on the ->sym, but for imported methods
+// the ->sym can be re-used in the local package, so peel it off the receiver's type.
+func fnpkg(fn *Node) *Pkg {
+	if fn.Type.Thistuple != 0 {
+		// method
+		rcvr := getthisx(fn.Type).Type.Type
+
+		if Isptr[rcvr.Etype] {
+			rcvr = rcvr.Type
+		}
+		if rcvr.Sym == nil {
+			Fatal("receiver with no sym: [%v] %v  (%v)", fn.Sym, Nconv(fn, obj.FmtLong), rcvr)
+		}
+		return rcvr.Sym.Pkg
+	}
+
+	// non-method
+	return fn.Sym.Pkg
+}
+
+// Lazy typechecking of imported bodies.  For local functions, caninl will set ->typecheck
+// because they're a copy of an already checked body.
+func typecheckinl(fn *Node) {
+	lno := int(setlineno(fn))
+
+	// typecheckinl is only for imported functions;
+	// their bodies may refer to unsafe as long as the package
+	// was marked safe during import (which was checked then).
+	// the ->inl of a local function has been typechecked before caninl copied it.
+	pkg := fnpkg(fn)
+
+	if pkg == localpkg || pkg == nil {
+		return // typecheckinl on local function
+	}
+
+	if Debug['m'] > 2 {
+		fmt.Printf("typecheck import [%v] %v { %v }\n", fn.Sym, Nconv(fn, obj.FmtLong), Hconv(fn.Func.Inl, obj.FmtSharp))
+	}
+
+	save_safemode := safemode
+	safemode = 0
+
+	savefn := Curfn
+	Curfn = fn
+	typechecklist(fn.Func.Inl, Etop)
+	Curfn = savefn
+
+	safemode = save_safemode
+
+	lineno = int32(lno)
+}
+
+// Caninl determines whether fn is inlineable.
+// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
+// fn and ->nbody will already have been typechecked.
+func caninl(fn *Node) {
+	if fn.Op != ODCLFUNC {
+		Fatal("caninl %v", fn)
+	}
+	if fn.Nname == nil {
+		Fatal("caninl no nname %v", Nconv(fn, obj.FmtSign))
+	}
+
+	// If fn has no body (is defined outside of Go), cannot inline it.
+	if fn.Nbody == nil {
+		return
+	}
+
+	if fn.Typecheck == 0 {
+		Fatal("caninl on non-typechecked function %v", fn)
+	}
+
+	// can't handle ... args yet
+	if Debug['l'] < 3 {
+		for t := fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+			if t.Isddd {
+				return
+			}
+		}
+	}
+
+	// Runtime package must not be race instrumented.
+	// Racewalk skips runtime package. However, some runtime code can be
+	// inlined into other packages and instrumented there. To avoid this,
+	// we disable inlining of runtime functions in race mode.
+	// The example that we observed is inlining of LockOSThread,
+	// which lead to false race reports on m contents.
+	if flag_race != 0 && myimportpath == "runtime" {
+		return
+	}
+
+	const maxBudget = 80
+	budget := maxBudget // allowed hairyness
+	if ishairylist(fn.Nbody, &budget) || budget < 0 {
+		return
+	}
+
+	savefn := Curfn
+	Curfn = fn
+
+	fn.Nname.Func.Inl = fn.Nbody
+	fn.Nbody = inlcopylist(fn.Nname.Func.Inl)
+	fn.Nname.Func.Inldcl = inlcopylist(fn.Nname.Defn.Func.Dcl)
+	fn.Nname.Func.InlCost = int32(maxBudget - budget)
+
+	// hack, TODO, check for better way to link method nodes back to the thing with the ->inl
+	// this is so export can find the body of a method
+	fn.Type.Nname = fn.Nname
+
+	if Debug['m'] > 1 {
+		fmt.Printf("%v: can inline %v as: %v { %v }\n", fn.Line(), Nconv(fn.Nname, obj.FmtSharp), Tconv(fn.Type, obj.FmtSharp), Hconv(fn.Nname.Func.Inl, obj.FmtSharp))
+	} else if Debug['m'] != 0 {
+		fmt.Printf("%v: can inline %v\n", fn.Line(), fn.Nname)
+	}
+
+	Curfn = savefn
+}
+
+// Look for anything we want to punt on.
+func ishairylist(ll *NodeList, budget *int) bool {
+	for ; ll != nil; ll = ll.Next {
+		if ishairy(ll.N, budget) {
+			return true
+		}
+	}
+	return false
+}
+
+func ishairy(n *Node, budget *int) bool {
+	if n == nil {
+		return false
+	}
+
+	switch n.Op {
+	// Call is okay if inlinable and we have the budget for the body.
+	case OCALLFUNC:
+		if n.Left.Func != nil && n.Left.Func.Inl != nil {
+			*budget -= int(n.Left.Func.InlCost)
+			break
+		}
+		if n.Left.Op == ONAME && n.Left.Left != nil && n.Left.Left.Op == OTYPE && n.Left.Right != nil && n.Left.Right.Op == ONAME { // methods called as functions
+			if n.Left.Sym.Def != nil && n.Left.Sym.Def.Func.Inl != nil {
+				*budget -= int(n.Left.Sym.Def.Func.InlCost)
+				break
+			}
+		}
+		if Debug['l'] < 4 {
+			return true
+		}
+
+	// Call is okay if inlinable and we have the budget for the body.
+	case OCALLMETH:
+		if n.Left.Type == nil {
+			Fatal("no function type for [%p] %v\n", n.Left, Nconv(n.Left, obj.FmtSign))
+		}
+		if n.Left.Type.Nname == nil {
+			Fatal("no function definition for [%p] %v\n", n.Left.Type, Tconv(n.Left.Type, obj.FmtSign))
+		}
+		if n.Left.Type.Nname.Func.Inl != nil {
+			*budget -= int(n.Left.Type.Nname.Func.InlCost)
+			break
+		}
+		if Debug['l'] < 4 {
+			return true
+		}
+
+	// Things that are too hairy, irrespective of the budget
+	case OCALL, OCALLINTER, OPANIC, ORECOVER:
+		if Debug['l'] < 4 {
+			return true
+		}
+
+	case OCLOSURE,
+		OCALLPART,
+		ORANGE,
+		OFOR,
+		OSELECT,
+		OSWITCH,
+		OPROC,
+		ODEFER,
+		ODCLTYPE,  // can't print yet
+		ODCLCONST, // can't print yet
+		ORETJMP:
+		return true
+	}
+
+	(*budget)--
+
+	return *budget < 0 || ishairy(n.Left, budget) || ishairy(n.Right, budget) || ishairylist(n.List, budget) || ishairylist(n.Rlist, budget) || ishairylist(n.Ninit, budget) || ishairy(n.Ntest, budget) || ishairy(n.Nincr, budget) || ishairylist(n.Nbody, budget) || ishairylist(n.Nelse, budget)
+}
+
+// Inlcopy and inlcopylist recursively copy the body of a function.
+// Any name-like node of non-local class is marked for re-export by adding it to
+// the exportlist.
+func inlcopylist(ll *NodeList) *NodeList {
+	var l *NodeList
+	for ; ll != nil; ll = ll.Next {
+		l = list(l, inlcopy(ll.N))
+	}
+	return l
+}
+
+func inlcopy(n *Node) *Node {
+	if n == nil {
+		return nil
+	}
+
+	switch n.Op {
+	case ONAME, OTYPE, OLITERAL:
+		return n
+	}
+
+	m := Nod(OXXX, nil, nil)
+	*m = *n
+	if m.Func != nil {
+		m.Func.Inl = nil
+	}
+	m.Left = inlcopy(n.Left)
+	m.Right = inlcopy(n.Right)
+	m.List = inlcopylist(n.List)
+	m.Rlist = inlcopylist(n.Rlist)
+	m.Ninit = inlcopylist(n.Ninit)
+	m.Ntest = inlcopy(n.Ntest)
+	m.Nincr = inlcopy(n.Nincr)
+	m.Nbody = inlcopylist(n.Nbody)
+	m.Nelse = inlcopylist(n.Nelse)
+
+	return m
+}
+
+// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
+// calls made to inlineable functions.  This is the external entry point.
+func inlcalls(fn *Node) {
+	savefn := Curfn
+	Curfn = fn
+	inlnode(&fn)
+	if fn != Curfn {
+		Fatal("inlnode replaced curfn")
+	}
+	Curfn = savefn
+}
+
+// Turn an OINLCALL into a statement.
+func inlconv2stmt(n *Node) {
+	n.Op = OBLOCK
+
+	// n->ninit stays
+	n.List = n.Nbody
+
+	n.Nbody = nil
+	n.Rlist = nil
+}
+
+// Turn an OINLCALL into a single valued expression.
+func inlconv2expr(np **Node) {
+	n := *np
+	r := n.Rlist.N
+	addinit(&r, concat(n.Ninit, n.Nbody))
+	*np = r
+}
+
+// Turn the rlist (with the return values) of the OINLCALL in
+// n into an expression list lumping the ninit and body
+// containing the inlined statements on the first list element so
+// order will be preserved Used in return, oas2func and call
+// statements.
+func inlconv2list(n *Node) *NodeList {
+	if n.Op != OINLCALL || n.Rlist == nil {
+		Fatal("inlconv2list %v\n", Nconv(n, obj.FmtSign))
+	}
+
+	l := n.Rlist
+	addinit(&l.N, concat(n.Ninit, n.Nbody))
+	return l
+}
+
+func inlnodelist(l *NodeList) {
+	for ; l != nil; l = l.Next {
+		inlnode(&l.N)
+	}
+}
+
+// inlnode recurses over the tree to find inlineable calls, which will
+// be turned into OINLCALLs by mkinlcall.  When the recursion comes
+// back up will examine left, right, list, rlist, ninit, ntest, nincr,
+// nbody and nelse and use one of the 4 inlconv/glue functions above
+// to turn the OINLCALL into an expression, a statement, or patch it
+// in to this nodes list or rlist as appropriate.
+// NOTE it makes no sense to pass the glue functions down the
+// recursion to the level where the OINLCALL gets created because they
+// have to edit /this/ n, so you'd have to push that one down as well,
+// but then you may as well do it here.  so this is cleaner and
+// shorter and less complicated.
+func inlnode(np **Node) {
+	if *np == nil {
+		return
+	}
+
+	n := *np
+
+	switch n.Op {
+	// inhibit inlining of their argument
+	case ODEFER, OPROC:
+		switch n.Left.Op {
+		case OCALLFUNC, OCALLMETH:
+			n.Left.Etype = n.Op
+		}
+		fallthrough
+
+		// TODO do them here (or earlier),
+	// so escape analysis can avoid more heapmoves.
+	case OCLOSURE:
+		return
+	}
+
+	lno := int(setlineno(n))
+
+	inlnodelist(n.Ninit)
+	for l := n.Ninit; l != nil; l = l.Next {
+		if l.N.Op == OINLCALL {
+			inlconv2stmt(l.N)
+		}
+	}
+
+	inlnode(&n.Left)
+	if n.Left != nil && n.Left.Op == OINLCALL {
+		inlconv2expr(&n.Left)
+	}
+
+	inlnode(&n.Right)
+	if n.Right != nil && n.Right.Op == OINLCALL {
+		inlconv2expr(&n.Right)
+	}
+
+	inlnodelist(n.List)
+	switch n.Op {
+	case OBLOCK:
+		for l := n.List; l != nil; l = l.Next {
+			if l.N.Op == OINLCALL {
+				inlconv2stmt(l.N)
+			}
+		}
+
+		// if we just replaced arg in f(arg()) or return arg with an inlined call
+	// and arg returns multiple values, glue as list
+	case ORETURN,
+		OCALLFUNC,
+		OCALLMETH,
+		OCALLINTER,
+		OAPPEND,
+		OCOMPLEX:
+		if count(n.List) == 1 && n.List.N.Op == OINLCALL && count(n.List.N.Rlist) > 1 {
+			n.List = inlconv2list(n.List.N)
+			break
+		}
+		fallthrough
+
+	default:
+		for l := n.List; l != nil; l = l.Next {
+			if l.N.Op == OINLCALL {
+				inlconv2expr(&l.N)
+			}
+		}
+	}
+
+	inlnodelist(n.Rlist)
+	switch n.Op {
+	case OAS2FUNC:
+		if n.Rlist.N.Op == OINLCALL {
+			n.Rlist = inlconv2list(n.Rlist.N)
+			n.Op = OAS2
+			n.Typecheck = 0
+			typecheck(np, Etop)
+			break
+		}
+		fallthrough
+
+	default:
+		for l := n.Rlist; l != nil; l = l.Next {
+			if l.N.Op == OINLCALL {
+				inlconv2expr(&l.N)
+			}
+		}
+	}
+
+	inlnode(&n.Ntest)
+	if n.Ntest != nil && n.Ntest.Op == OINLCALL {
+		inlconv2expr(&n.Ntest)
+	}
+
+	inlnode(&n.Nincr)
+	if n.Nincr != nil && n.Nincr.Op == OINLCALL {
+		inlconv2stmt(n.Nincr)
+	}
+
+	inlnodelist(n.Nbody)
+	for l := n.Nbody; l != nil; l = l.Next {
+		if l.N.Op == OINLCALL {
+			inlconv2stmt(l.N)
+		}
+	}
+
+	inlnodelist(n.Nelse)
+	for l := n.Nelse; l != nil; l = l.Next {
+		if l.N.Op == OINLCALL {
+			inlconv2stmt(l.N)
+		}
+	}
+
+	// with all the branches out of the way, it is now time to
+	// transmogrify this node itself unless inhibited by the
+	// switch at the top of this function.
+	switch n.Op {
+	case OCALLFUNC, OCALLMETH:
+		if n.Etype == OPROC || n.Etype == ODEFER {
+			return
+		}
+	}
+
+	switch n.Op {
+	case OCALLFUNC:
+		if Debug['m'] > 3 {
+			fmt.Printf("%v:call to func %v\n", n.Line(), Nconv(n.Left, obj.FmtSign))
+		}
+		if n.Left.Func != nil && n.Left.Func.Inl != nil { // normal case
+			mkinlcall(np, n.Left, n.Isddd)
+		} else if n.Left.Op == ONAME && n.Left.Left != nil && n.Left.Left.Op == OTYPE && n.Left.Right != nil && n.Left.Right.Op == ONAME { // methods called as functions
+			if n.Left.Sym.Def != nil {
+				mkinlcall(np, n.Left.Sym.Def, n.Isddd)
+			}
+		}
+
+	case OCALLMETH:
+		if Debug['m'] > 3 {
+			fmt.Printf("%v:call to meth %v\n", n.Line(), Nconv(n.Left.Right, obj.FmtLong))
+		}
+
+		// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
+		if n.Left.Type == nil {
+			Fatal("no function type for [%p] %v\n", n.Left, Nconv(n.Left, obj.FmtSign))
+		}
+
+		if n.Left.Type.Nname == nil {
+			Fatal("no function definition for [%p] %v\n", n.Left.Type, Tconv(n.Left.Type, obj.FmtSign))
+		}
+
+		mkinlcall(np, n.Left.Type.Nname, n.Isddd)
+	}
+
+	lineno = int32(lno)
+}
+
+func mkinlcall(np **Node, fn *Node, isddd bool) {
+	save_safemode := safemode
+
+	// imported functions may refer to unsafe as long as the
+	// package was marked safe during import (already checked).
+	pkg := fnpkg(fn)
+
+	if pkg != localpkg && pkg != nil {
+		safemode = 0
+	}
+	mkinlcall1(np, fn, isddd)
+	safemode = save_safemode
+}
+
+func tinlvar(t *Type) *Node {
+	if t.Nname != nil && !isblank(t.Nname) {
+		if t.Nname.Name.Inlvar == nil {
+			Fatal("missing inlvar for %v\n", t.Nname)
+		}
+		return t.Nname.Name.Inlvar
+	}
+
+	typecheck(&nblank, Erv|Easgn)
+	return nblank
+}
+
+var inlgen int
+
+// if *np is a call, and fn is a function with an inlinable body, substitute *np with an OINLCALL.
+// On return ninit has the parameter assignments, the nbody is the
+// inlined function body and list, rlist contain the input, output
+// parameters.
+func mkinlcall1(np **Node, fn *Node, isddd bool) {
+	// For variadic fn.
+	if fn.Func.Inl == nil {
+		return
+	}
+
+	if fn == Curfn || fn.Defn == Curfn {
+		return
+	}
+
+	if Debug['l'] < 2 {
+		typecheckinl(fn)
+	}
+
+	n := *np
+
+	// Bingo, we have a function node, and it has an inlineable body
+	if Debug['m'] > 1 {
+		fmt.Printf("%v: inlining call to %v %v { %v }\n", n.Line(), fn.Sym, Tconv(fn.Type, obj.FmtSharp), Hconv(fn.Func.Inl, obj.FmtSharp))
+	} else if Debug['m'] != 0 {
+		fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
+	}
+
+	if Debug['m'] > 2 {
+		fmt.Printf("%v: Before inlining: %v\n", n.Line(), Nconv(n, obj.FmtSign))
+	}
+
+	saveinlfn := inlfn
+	inlfn = fn
+
+	ninit := n.Ninit
+
+	//dumplist("ninit pre", ninit);
+
+	var dcl *NodeList
+	if fn.Defn != nil { // local function
+		dcl = fn.Func.Inldcl // imported function
+	} else {
+		dcl = fn.Func.Dcl
+	}
+
+	inlretvars = nil
+	i := 0
+
+	// Make temp names to use instead of the originals
+	for ll := dcl; ll != nil; ll = ll.Next {
+		if ll.N.Class == PPARAMOUT { // return values handled below.
+			continue
+		}
+		if ll.N.Op == ONAME {
+			ll.N.Name.Inlvar = inlvar(ll.N)
+
+			// Typecheck because inlvar is not necessarily a function parameter.
+			typecheck(&ll.N.Name.Inlvar, Erv)
+
+			if ll.N.Class&^PHEAP != PAUTO {
+				ninit = list(ninit, Nod(ODCL, ll.N.Name.Inlvar, nil)) // otherwise gen won't emit the allocations for heapallocs
+			}
+		}
+	}
+
+	// temporaries for return values.
+	var m *Node
+	for t := getoutargx(fn.Type).Type; t != nil; t = t.Down {
+		if t != nil && t.Nname != nil && !isblank(t.Nname) {
+			m = inlvar(t.Nname)
+			typecheck(&m, Erv)
+			t.Nname.Name.Inlvar = m
+		} else {
+			// anonymous return values, synthesize names for use in assignment that replaces return
+			m = retvar(t, i)
+			i++
+		}
+
+		ninit = list(ninit, Nod(ODCL, m, nil))
+		inlretvars = list(inlretvars, m)
+	}
+
+	// assign receiver.
+	var as *Node
+	if fn.Type.Thistuple != 0 && n.Left.Op == ODOTMETH {
+		// method call with a receiver.
+		t := getthisx(fn.Type).Type
+
+		if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Name.Inlvar == nil {
+			Fatal("missing inlvar for %v\n", t.Nname)
+		}
+		if n.Left.Left == nil {
+			Fatal("method call without receiver: %v", Nconv(n, obj.FmtSign))
+		}
+		if t == nil {
+			Fatal("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
+		}
+		as = Nod(OAS, tinlvar(t), n.Left.Left)
+		if as != nil {
+			typecheck(&as, Etop)
+			ninit = list(ninit, as)
+		}
+	}
+
+	// check if inlined function is variadic.
+	variadic := false
+
+	var varargtype *Type
+	varargcount := 0
+	for t := fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+		if t.Isddd {
+			variadic = true
+			varargtype = t.Type
+		}
+	}
+
+	// but if argument is dotted too forget about variadicity.
+	if variadic && isddd {
+		variadic = false
+	}
+
+	// check if argument is actually a returned tuple from call.
+	multiret := 0
+
+	if n.List != nil && n.List.Next == nil {
+		switch n.List.N.Op {
+		case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH:
+			if n.List.N.Left.Type.Outtuple > 1 {
+				multiret = n.List.N.Left.Type.Outtuple - 1
+			}
+		}
+	}
+
+	if variadic {
+		varargcount = count(n.List) + multiret
+		if n.Left.Op != ODOTMETH {
+			varargcount -= fn.Type.Thistuple
+		}
+		varargcount -= fn.Type.Intuple - 1
+	}
+
+	// assign arguments to the parameters' temp names
+	as = Nod(OAS2, nil, nil)
+
+	as.Rlist = n.List
+	ll := n.List
+
+	// TODO: if len(nlist) == 1 but multiple args, check that n->list->n is a call?
+	if fn.Type.Thistuple != 0 && n.Left.Op != ODOTMETH {
+		// non-method call to method
+		if n.List == nil {
+			Fatal("non-method call to method without first arg: %v", Nconv(n, obj.FmtSign))
+		}
+
+		// append receiver inlvar to LHS.
+		t := getthisx(fn.Type).Type
+
+		if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Name.Inlvar == nil {
+			Fatal("missing inlvar for %v\n", t.Nname)
+		}
+		if t == nil {
+			Fatal("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
+		}
+		as.List = list(as.List, tinlvar(t))
+		ll = ll.Next // track argument count.
+	}
+
+	// append ordinary arguments to LHS.
+	chkargcount := n.List != nil && n.List.Next != nil
+
+	var vararg *Node      // the slice argument to a variadic call
+	var varargs *NodeList // the list of LHS names to put in vararg.
+	if !chkargcount {
+		// 0 or 1 expression on RHS.
+		var i int
+		for t := getinargx(fn.Type).Type; t != nil; t = t.Down {
+			if variadic && t.Isddd {
+				vararg = tinlvar(t)
+				for i = 0; i < varargcount && ll != nil; i++ {
+					m = argvar(varargtype, i)
+					varargs = list(varargs, m)
+					as.List = list(as.List, m)
+				}
+
+				break
+			}
+
+			as.List = list(as.List, tinlvar(t))
+		}
+	} else {
+		// match arguments except final variadic (unless the call is dotted itself)
+		var t *Type
+		for t = getinargx(fn.Type).Type; t != nil; {
+			if ll == nil {
+				break
+			}
+			if variadic && t.Isddd {
+				break
+			}
+			as.List = list(as.List, tinlvar(t))
+			t = t.Down
+			ll = ll.Next
+		}
+
+		// match varargcount arguments with variadic parameters.
+		if variadic && t != nil && t.Isddd {
+			vararg = tinlvar(t)
+			var i int
+			for i = 0; i < varargcount && ll != nil; i++ {
+				m = argvar(varargtype, i)
+				varargs = list(varargs, m)
+				as.List = list(as.List, m)
+				ll = ll.Next
+			}
+
+			if i == varargcount {
+				t = t.Down
+			}
+		}
+
+		if ll != nil || t != nil {
+			Fatal("arg count mismatch: %v  vs %v\n", Tconv(getinargx(fn.Type), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+		}
+	}
+
+	if as.Rlist != nil {
+		typecheck(&as, Etop)
+		ninit = list(ninit, as)
+	}
+
+	// turn the variadic args into a slice.
+	if variadic {
+		as = Nod(OAS, vararg, nil)
+		if varargcount == 0 {
+			as.Right = nodnil()
+			as.Right.Type = varargtype
+		} else {
+			vararrtype := typ(TARRAY)
+			vararrtype.Type = varargtype.Type
+			vararrtype.Bound = int64(varargcount)
+
+			as.Right = Nod(OCOMPLIT, nil, typenod(varargtype))
+			as.Right.List = varargs
+			as.Right = Nod(OSLICE, as.Right, Nod(OKEY, nil, nil))
+		}
+
+		typecheck(&as, Etop)
+		ninit = list(ninit, as)
+	}
+
+	// zero the outparams
+	for ll := inlretvars; ll != nil; ll = ll.Next {
+		as = Nod(OAS, ll.N, nil)
+		typecheck(&as, Etop)
+		ninit = list(ninit, as)
+	}
+
+	inlretlabel = newlabel_inl()
+	inlgen++
+	body := inlsubstlist(fn.Func.Inl)
+
+	body = list(body, Nod(OGOTO, inlretlabel, nil)) // avoid 'not used' when function doesnt have return
+	body = list(body, Nod(OLABEL, inlretlabel, nil))
+
+	typechecklist(body, Etop)
+
+	//dumplist("ninit post", ninit);
+
+	call := Nod(OINLCALL, nil, nil)
+
+	call.Ninit = ninit
+	call.Nbody = body
+	call.Rlist = inlretvars
+	call.Type = n.Type
+	call.Typecheck = 1
+
+	setlno(call, int(n.Lineno))
+
+	//dumplist("call body", body);
+
+	*np = call
+
+	inlfn = saveinlfn
+
+	// transitive inlining
+	// might be nice to do this before exporting the body,
+	// but can't emit the body with inlining expanded.
+	// instead we emit the things that the body needs
+	// and each use must redo the inlining.
+	// luckily these are small.
+	body = fn.Func.Inl
+	fn.Func.Inl = nil // prevent infinite recursion (shouldn't happen anyway)
+	inlnodelist(call.Nbody)
+	for ll := call.Nbody; ll != nil; ll = ll.Next {
+		if ll.N.Op == OINLCALL {
+			inlconv2stmt(ll.N)
+		}
+	}
+	fn.Func.Inl = body
+
+	if Debug['m'] > 2 {
+		fmt.Printf("%v: After inlining %v\n\n", n.Line(), Nconv(*np, obj.FmtSign))
+	}
+}
+
+// Every time we expand a function we generate a new set of tmpnames,
+// PAUTO's in the calling functions, and link them off of the
+// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
+func inlvar(var_ *Node) *Node {
+	if Debug['m'] > 3 {
+		fmt.Printf("inlvar %v\n", Nconv(var_, obj.FmtSign))
+	}
+
+	n := newname(var_.Sym)
+	n.Type = var_.Type
+	n.Class = PAUTO
+	n.Used = true
+	n.Curfn = Curfn // the calling function, not the called one
+	n.Addrtaken = var_.Addrtaken
+
+	// Esc pass wont run if we're inlining into a iface wrapper.
+	// Luckily, we can steal the results from the target func.
+	// If inlining a function defined in another package after
+	// escape analysis is done, treat all local vars as escaping.
+	// See issue 9537.
+	if var_.Esc == EscHeap || (inl_nonlocal != 0 && var_.Op == ONAME) {
+		addrescapes(n)
+	}
+
+	Curfn.Func.Dcl = list(Curfn.Func.Dcl, n)
+	return n
+}
+
+// Synthesize a variable to store the inlined function's results in.
+func retvar(t *Type, i int) *Node {
+	n := newname(Lookupf("~r%d", i))
+	n.Type = t.Type
+	n.Class = PAUTO
+	n.Used = true
+	n.Curfn = Curfn // the calling function, not the called one
+	Curfn.Func.Dcl = list(Curfn.Func.Dcl, n)
+	return n
+}
+
+// Synthesize a variable to store the inlined function's arguments
+// when they come from a multiple return call.
+func argvar(t *Type, i int) *Node {
+	n := newname(Lookupf("~arg%d", i))
+	n.Type = t.Type
+	n.Class = PAUTO
+	n.Used = true
+	n.Curfn = Curfn // the calling function, not the called one
+	Curfn.Func.Dcl = list(Curfn.Func.Dcl, n)
+	return n
+}
+
+var newlabel_inl_label int
+
+func newlabel_inl() *Node {
+	newlabel_inl_label++
+	n := newname(Lookupf(".inlret%.6d", newlabel_inl_label))
+	n.Etype = 1 // flag 'safe' for escape analysis (no backjumps)
+	return n
+}
+
+// inlsubst and inlsubstlist recursively copy the body of the saved
+// pristine ->inl body of the function while substituting references
+// to input/output parameters with ones to the tmpnames, and
+// substituting returns with assignments to the output.
+func inlsubstlist(ll *NodeList) *NodeList {
+	var l *NodeList
+	for ; ll != nil; ll = ll.Next {
+		l = list(l, inlsubst(ll.N))
+	}
+	return l
+}
+
+func inlsubst(n *Node) *Node {
+	if n == nil {
+		return nil
+	}
+
+	switch n.Op {
+	case ONAME:
+		if n.Name.Inlvar != nil { // These will be set during inlnode
+			if Debug['m'] > 2 {
+				fmt.Printf("substituting name %v  ->  %v\n", Nconv(n, obj.FmtSign), Nconv(n.Name.Inlvar, obj.FmtSign))
+			}
+			return n.Name.Inlvar
+		}
+
+		if Debug['m'] > 2 {
+			fmt.Printf("not substituting name %v\n", Nconv(n, obj.FmtSign))
+		}
+		return n
+
+	case OLITERAL, OTYPE:
+		return n
+
+		// Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function.
+
+	//		dump("Return before substitution", n);
+	case ORETURN:
+		m := Nod(OGOTO, inlretlabel, nil)
+
+		m.Ninit = inlsubstlist(n.Ninit)
+
+		if inlretvars != nil && n.List != nil {
+			as := Nod(OAS2, nil, nil)
+
+			// shallow copy or OINLCALL->rlist will be the same list, and later walk and typecheck may clobber that.
+			for ll := inlretvars; ll != nil; ll = ll.Next {
+				as.List = list(as.List, ll.N)
+			}
+			as.Rlist = inlsubstlist(n.List)
+			typecheck(&as, Etop)
+			m.Ninit = list(m.Ninit, as)
+		}
+
+		typechecklist(m.Ninit, Etop)
+		typecheck(&m, Etop)
+
+		//		dump("Return after substitution", m);
+		return m
+
+	case OGOTO, OLABEL:
+		m := Nod(OXXX, nil, nil)
+		*m = *n
+		m.Ninit = nil
+		p := fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
+		m.Left = newname(Lookup(p))
+
+		return m
+	}
+
+	m := Nod(OXXX, nil, nil)
+	*m = *n
+	m.Ninit = nil
+
+	if n.Op == OCLOSURE {
+		Fatal("cannot inline function containing closure: %v", Nconv(n, obj.FmtSign))
+	}
+
+	m.Left = inlsubst(n.Left)
+	m.Right = inlsubst(n.Right)
+	m.List = inlsubstlist(n.List)
+	m.Rlist = inlsubstlist(n.Rlist)
+	m.Ninit = concat(m.Ninit, inlsubstlist(n.Ninit))
+	m.Ntest = inlsubst(n.Ntest)
+	m.Nincr = inlsubst(n.Nincr)
+	m.Nbody = inlsubstlist(n.Nbody)
+	m.Nelse = inlsubstlist(n.Nelse)
+
+	return m
+}
+
+// Plaster over linenumbers
+func setlnolist(ll *NodeList, lno int) {
+	for ; ll != nil; ll = ll.Next {
+		setlno(ll.N, lno)
+	}
+}
+
+func setlno(n *Node, lno int) {
+	if n == nil {
+		return
+	}
+
+	// don't clobber names, unless they're freshly synthesized
+	if n.Op != ONAME || n.Lineno == 0 {
+		n.Lineno = int32(lno)
+	}
+
+	setlno(n.Left, lno)
+	setlno(n.Right, lno)
+	setlnolist(n.List, lno)
+	setlnolist(n.Rlist, lno)
+	setlnolist(n.Ninit, lno)
+	setlno(n.Ntest, lno)
+	setlno(n.Nincr, lno)
+	setlnolist(n.Nbody, lno)
+	setlnolist(n.Nelse, lno)
+}
diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go
new file mode 100644
index 0000000..f921140
--- /dev/null
+++ b/src/cmd/compile/internal/gc/lex.go
@@ -0,0 +1,2601 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go tool yacc go.y
+//go:generate go run mkbuiltin.go runtime unsafe
+
+package gc
+
+import (
+	"bytes"
+	"cmd/internal/obj"
+	"flag"
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"path"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+var yyprev int
+
+var yylast int
+
+var imported_unsafe int
+
+var goos string
+
+var goarch string
+
+var goroot string
+
+var (
+	Debug_wb     int
+	Debug_append int
+	Debug_slice  int
+)
+
+// Debug arguments.
+// These can be specified with the -d flag, as in "-d nil"
+// to set the debug_checknil variable. In general the list passed
+// to -d can be comma-separated.
+var debugtab = []struct {
+	name string
+	val  *int
+}{
+	{"append", &Debug_append},         // print information about append compilation
+	{"disablenil", &Disable_checknil}, // disable nil checks
+	{"gcprog", &Debug_gcprog},         // print dump of GC programs
+	{"nil", &Debug_checknil},          // print information about nil checks
+	{"slice", &Debug_slice},           // print information about slice compilation
+	{"typeassert", &Debug_typeassert}, // print information about type assertion inlining
+	{"wb", &Debug_wb},                 // print information about write barriers
+}
+
+// Our own isdigit, isspace, isalpha, isalnum that take care
+// of EOF and other out of range arguments.
+func yy_isdigit(c int) bool {
+	return c >= 0 && c <= 0xFF && isdigit(c)
+}
+
+func yy_isspace(c int) bool {
+	return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+func yy_isalpha(c int) bool {
+	return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
+}
+
+func yy_isalnum(c int) bool {
+	return c >= 0 && c <= 0xFF && isalnum(c)
+}
+
+// Disallow use of isdigit etc.
+
+const (
+	EOF = -1
+)
+
+func usage() {
+	fmt.Printf("usage: %cg [options] file.go...\n", Thearch.Thechar)
+	obj.Flagprint(1)
+	Exit(2)
+}
+
+func hidePanic() {
+	if nsavederrors+nerrors > 0 {
+		// If we've already complained about things
+		// in the program, don't bother complaining
+		// about a panic too; let the user clean up
+		// the code and try again.
+		if err := recover(); err != nil {
+			errorexit()
+		}
+	}
+}
+
+func doversion() {
+	p := obj.Expstring()
+	if p == "X:none" {
+		p = ""
+	}
+	sep := ""
+	if p != "" {
+		sep = " "
+	}
+	fmt.Printf("%cg version %s%s%s\n", Thearch.Thechar, obj.Getgoversion(), sep, p)
+	os.Exit(0)
+}
+
+func Main() {
+	defer hidePanic()
+
+	// Allow GOARCH=thearch.thestring or GOARCH=thearch.thestringsuffix,
+	// but not other values.
+	p := obj.Getgoarch()
+
+	if !strings.HasPrefix(p, Thearch.Thestring) {
+		log.Fatalf("cannot use %cg with GOARCH=%s", Thearch.Thechar, p)
+	}
+	goarch = p
+
+	Thearch.Linkarchinit()
+	Ctxt = obj.Linknew(Thearch.Thelinkarch)
+	Ctxt.Diag = Yyerror
+	Ctxt.Bso = &bstdout
+	bstdout = *obj.Binitw(os.Stdout)
+
+	localpkg = mkpkg("")
+	localpkg.Prefix = "\"\""
+
+	// pseudo-package, for scoping
+	builtinpkg = mkpkg("go.builtin")
+
+	builtinpkg.Prefix = "go.builtin" // not go%2ebuiltin
+
+	// pseudo-package, accessed by import "unsafe"
+	unsafepkg = mkpkg("unsafe")
+
+	unsafepkg.Name = "unsafe"
+
+	// real package, referred to by generated runtime calls
+	Runtimepkg = mkpkg("runtime")
+
+	Runtimepkg.Name = "runtime"
+
+	// pseudo-packages used in symbol tables
+	gostringpkg = mkpkg("go.string")
+
+	gostringpkg.Name = "go.string"
+	gostringpkg.Prefix = "go.string" // not go%2estring
+
+	itabpkg = mkpkg("go.itab")
+
+	itabpkg.Name = "go.itab"
+	itabpkg.Prefix = "go.itab" // not go%2eitab
+
+	weaktypepkg = mkpkg("go.weak.type")
+
+	weaktypepkg.Name = "go.weak.type"
+	weaktypepkg.Prefix = "go.weak.type" // not go%2eweak%2etype
+
+	typelinkpkg = mkpkg("go.typelink")
+	typelinkpkg.Name = "go.typelink"
+	typelinkpkg.Prefix = "go.typelink" // not go%2etypelink
+
+	trackpkg = mkpkg("go.track")
+
+	trackpkg.Name = "go.track"
+	trackpkg.Prefix = "go.track" // not go%2etrack
+
+	typepkg = mkpkg("type")
+
+	typepkg.Name = "type"
+
+	goroot = obj.Getgoroot()
+	goos = obj.Getgoos()
+
+	Nacl = goos == "nacl"
+	if Nacl {
+		flag_largemodel = 1
+	}
+
+	outfile = ""
+	obj.Flagcount("+", "compiling runtime", &compiling_runtime)
+	obj.Flagcount("%", "debug non-static initializers", &Debug['%'])
+	obj.Flagcount("A", "for bootstrapping, allow 'any' type", &Debug['A'])
+	obj.Flagcount("B", "disable bounds checking", &Debug['B'])
+	obj.Flagstr("D", "set relative `path` for local imports", &localimport)
+	obj.Flagcount("E", "debug symbol export", &Debug['E'])
+	obj.Flagfn1("I", "add `directory` to import search path", addidir)
+	obj.Flagcount("K", "debug missing line numbers", &Debug['K'])
+	obj.Flagcount("L", "use full (long) path in error messages", &Debug['L'])
+	obj.Flagcount("M", "debug move generation", &Debug['M'])
+	obj.Flagcount("N", "disable optimizations", &Debug['N'])
+	obj.Flagcount("P", "debug peephole optimizer", &Debug['P'])
+	obj.Flagcount("R", "debug register optimizer", &Debug['R'])
+	obj.Flagcount("S", "print assembly listing", &Debug['S'])
+	obj.Flagfn0("V", "print compiler version", doversion)
+	obj.Flagcount("W", "debug parse tree after type checking", &Debug['W'])
+	obj.Flagstr("asmhdr", "write assembly header to `file`", &asmhdr)
+	obj.Flagcount("complete", "compiling complete package (no C or assembly)", &pure_go)
+	obj.Flagstr("d", "print debug information about items in `list`", &debugstr)
+	obj.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
+	obj.Flagcount("f", "debug stack frames", &Debug['f'])
+	obj.Flagcount("g", "debug code generation", &Debug['g'])
+	obj.Flagcount("h", "halt on error", &Debug['h'])
+	obj.Flagcount("i", "debug line number stack", &Debug['i'])
+	obj.Flagstr("installsuffix", "set pkg directory `suffix`", &flag_installsuffix)
+	obj.Flagcount("j", "debug runtime-initialized variables", &Debug['j'])
+	obj.Flagcount("l", "disable inlining", &Debug['l'])
+	obj.Flagcount("live", "debug liveness analysis", &debuglive)
+	obj.Flagcount("m", "print optimization decisions", &Debug['m'])
+	obj.Flagcount("nolocalimports", "reject local (relative) imports", &nolocalimports)
+	obj.Flagstr("o", "write output to `file`", &outfile)
+	obj.Flagstr("p", "set expected package import `path`", &myimportpath)
+	obj.Flagcount("pack", "write package file instead of object file", &writearchive)
+	obj.Flagcount("r", "debug generated wrappers", &Debug['r'])
+	obj.Flagcount("race", "enable race detector", &flag_race)
+	obj.Flagcount("s", "warn about composite literals that can be simplified", &Debug['s'])
+	obj.Flagstr("trimpath", "remove `prefix` from recorded source file paths", &Ctxt.LineHist.TrimPathPrefix)
+	obj.Flagcount("u", "reject unsafe code", &safemode)
+	obj.Flagcount("v", "increase debug verbosity", &Debug['v'])
+	obj.Flagcount("w", "debug type checking", &Debug['w'])
+	use_writebarrier = 1
+	obj.Flagcount("wb", "enable write barrier", &use_writebarrier)
+	obj.Flagcount("x", "debug lexer", &Debug['x'])
+	obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y'])
+	var flag_shared int
+	var flag_dynlink bool
+	if Thearch.Thechar == '6' {
+		obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel)
+		obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared)
+		flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
+	}
+	obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile)
+	obj.Flagstr("memprofile", "write memory profile to `file`", &memprofile)
+	obj.Flagint64("memprofilerate", "set runtime.MemProfileRate to `rate`", &memprofilerate)
+	obj.Flagparse(usage)
+
+	if flag_dynlink {
+		flag_shared = 1
+	}
+	Ctxt.Flag_shared = int32(flag_shared)
+	Ctxt.Flag_dynlink = flag_dynlink
+
+	Ctxt.Debugasm = int32(Debug['S'])
+	Ctxt.Debugvlog = int32(Debug['v'])
+
+	if flag.NArg() < 1 {
+		usage()
+	}
+
+	startProfile()
+
+	if flag_race != 0 {
+		racepkg = mkpkg("runtime/race")
+		racepkg.Name = "race"
+	}
+
+	// parse -d argument
+	if debugstr != "" {
+	Split:
+		for _, name := range strings.Split(debugstr, ",") {
+			if name == "" {
+				continue
+			}
+			val := 1
+			if i := strings.Index(name, "="); i >= 0 {
+				var err error
+				val, err = strconv.Atoi(name[i+1:])
+				if err != nil {
+					log.Fatalf("invalid debug value %v", name)
+				}
+				name = name[:i]
+			}
+			for _, t := range debugtab {
+				if t.name == name {
+					if t.val != nil {
+						*t.val = val
+						continue Split
+					}
+				}
+			}
+			log.Fatalf("unknown debug key -d %s\n", name)
+		}
+	}
+
+	// enable inlining.  for now:
+	//	default: inlining on.  (debug['l'] == 1)
+	//	-l: inlining off  (debug['l'] == 0)
+	//	-ll, -lll: inlining on again, with extra debugging (debug['l'] > 1)
+	if Debug['l'] <= 1 {
+		Debug['l'] = 1 - Debug['l']
+	}
+
+	Thearch.Betypeinit()
+	if Widthptr == 0 {
+		Fatal("betypeinit failed")
+	}
+
+	lexinit()
+	typeinit()
+	lexinit1()
+	// TODO(rsc): Restore yytinit?
+
+	blockgen = 1
+	dclcontext = PEXTERN
+	nerrors = 0
+	lexlineno = 1
+
+	for _, infile = range flag.Args() {
+		linehistpush(infile)
+
+		curio.infile = infile
+		var err error
+		curio.bin, err = obj.Bopenr(infile)
+		if err != nil {
+			fmt.Printf("open %s: %v\n", infile, err)
+			errorexit()
+		}
+
+		curio.peekc = 0
+		curio.peekc1 = 0
+		curio.nlsemi = 0
+		curio.eofnl = 0
+		curio.last = 0
+
+		// Skip initial BOM if present.
+		if obj.Bgetrune(curio.bin) != obj.BOM {
+			obj.Bungetrune(curio.bin)
+		}
+
+		block = 1
+		iota_ = -1000000
+
+		imported_unsafe = 0
+
+		yyparse()
+		if nsyntaxerrors != 0 {
+			errorexit()
+		}
+
+		linehistpop()
+		if curio.bin != nil {
+			obj.Bterm(curio.bin)
+		}
+	}
+
+	testdclstack()
+	mkpackage(localpkg.Name) // final import not used checks
+	lexfini()
+
+	typecheckok = 1
+	if Debug['f'] != 0 {
+		frame(1)
+	}
+
+	// Process top-level declarations in phases.
+
+	// Phase 1: const, type, and names and types of funcs.
+	//   This will gather all the information about types
+	//   and methods but doesn't depend on any of it.
+	defercheckwidth()
+
+	for l := xtop; l != nil; l = l.Next {
+		if l.N.Op != ODCL && l.N.Op != OAS {
+			typecheck(&l.N, Etop)
+		}
+	}
+
+	// Phase 2: Variable assignments.
+	//   To check interface assignments, depends on phase 1.
+	for l := xtop; l != nil; l = l.Next {
+		if l.N.Op == ODCL || l.N.Op == OAS {
+			typecheck(&l.N, Etop)
+		}
+	}
+	resumecheckwidth()
+
+	// Phase 3: Type check function bodies.
+	for l := xtop; l != nil; l = l.Next {
+		if l.N.Op == ODCLFUNC || l.N.Op == OCLOSURE {
+			Curfn = l.N
+			decldepth = 1
+			saveerrors()
+			typechecklist(l.N.Nbody, Etop)
+			checkreturn(l.N)
+			if nerrors != 0 {
+				l.N.Nbody = nil // type errors; do not compile
+			}
+		}
+	}
+
+	// Phase 4: Decide how to capture closed variables.
+	// This needs to run before escape analysis,
+	// because variables captured by value do not escape.
+	for l := xtop; l != nil; l = l.Next {
+		if l.N.Op == ODCLFUNC && l.N.Closure != nil {
+			Curfn = l.N
+			capturevars(l.N)
+		}
+	}
+
+	Curfn = nil
+
+	if nsavederrors+nerrors != 0 {
+		errorexit()
+	}
+
+	// Phase 5: Inlining
+	if Debug['l'] > 1 {
+		// Typecheck imported function bodies if debug['l'] > 1,
+		// otherwise lazily when used or re-exported.
+		for l := importlist; l != nil; l = l.Next {
+			if l.N.Func.Inl != nil {
+				saveerrors()
+				typecheckinl(l.N)
+			}
+		}
+
+		if nsavederrors+nerrors != 0 {
+			errorexit()
+		}
+	}
+
+	if Debug['l'] != 0 {
+		// Find functions that can be inlined and clone them before walk expands them.
+		visitBottomUp(xtop, func(list *NodeList, recursive bool) {
+			for l := list; l != nil; l = l.Next {
+				if l.N.Op == ODCLFUNC {
+					caninl(l.N)
+					inlcalls(l.N)
+				}
+			}
+		})
+	}
+
+	// Phase 6: Escape analysis.
+	// Required for moving heap allocations onto stack,
+	// which in turn is required by the closure implementation,
+	// which stores the addresses of stack variables into the closure.
+	// If the closure does not escape, it needs to be on the stack
+	// or else the stack copier will not update it.
+	escapes(xtop)
+
+	// Escape analysis moved escaped values off stack.
+	// Move large values off stack too.
+	movelarge(xtop)
+
+	// Phase 7: Transform closure bodies to properly reference captured variables.
+	// This needs to happen before walk, because closures must be transformed
+	// before walk reaches a call of a closure.
+	for l := xtop; l != nil; l = l.Next {
+		if l.N.Op == ODCLFUNC && l.N.Closure != nil {
+			Curfn = l.N
+			transformclosure(l.N)
+		}
+	}
+
+	Curfn = nil
+
+	// Phase 8: Compile top level functions.
+	for l := xtop; l != nil; l = l.Next {
+		if l.N.Op == ODCLFUNC {
+			funccompile(l.N)
+		}
+	}
+
+	if nsavederrors+nerrors == 0 {
+		fninit(xtop)
+	}
+
+	// Phase 9: Check external declarations.
+	for l := externdcl; l != nil; l = l.Next {
+		if l.N.Op == ONAME {
+			typecheck(&l.N, Erv)
+		}
+	}
+
+	if nerrors+nsavederrors != 0 {
+		errorexit()
+	}
+
+	dumpobj()
+
+	if asmhdr != "" {
+		dumpasmhdr()
+	}
+
+	if nerrors+nsavederrors != 0 {
+		errorexit()
+	}
+
+	Flusherrors()
+}
+
+func saveerrors() {
+	nsavederrors += nerrors
+	nerrors = 0
+}
+
+func arsize(b *obj.Biobuf, name string) int {
+	var buf [ArhdrSize]byte
+	if _, err := io.ReadFull(b, buf[:]); err != nil {
+		return -1
+	}
+	aname := strings.Trim(string(buf[0:16]), " ")
+	if !strings.HasPrefix(aname, name) {
+		return -1
+	}
+	asize := strings.Trim(string(buf[48:58]), " ")
+	i, _ := strconv.Atoi(asize)
+	return i
+}
+
+func skiptopkgdef(b *obj.Biobuf) bool {
+	/* archive header */
+	p := obj.Brdline(b, '\n')
+	if p == "" {
+		return false
+	}
+	if obj.Blinelen(b) != 8 {
+		return false
+	}
+	if p != "!<arch>\n" {
+		return false
+	}
+
+	/* symbol table may be first; skip it */
+	sz := arsize(b, "__.GOSYMDEF")
+
+	if sz >= 0 {
+		obj.Bseek(b, int64(sz), 1)
+	} else {
+		obj.Bseek(b, 8, 0)
+	}
+
+	/* package export block is next */
+	sz = arsize(b, "__.PKGDEF")
+
+	if sz <= 0 {
+		return false
+	}
+	return true
+}
+
+func addidir(dir string) {
+	if dir == "" {
+		return
+	}
+
+	var pp **Idir
+	for pp = &idirs; *pp != nil; pp = &(*pp).link {
+	}
+	*pp = new(Idir)
+	(*pp).link = nil
+	(*pp).dir = dir
+}
+
+// is this path a local name?  begins with ./ or ../ or /
+func islocalname(name string) bool {
+	return strings.HasPrefix(name, "/") ||
+		Ctxt.Windows != 0 && len(name) >= 3 && yy_isalpha(int(name[0])) && name[1] == ':' && name[2] == '/' ||
+		strings.HasPrefix(name, "./") || name == "." ||
+		strings.HasPrefix(name, "../") || name == ".."
+}
+
+func findpkg(name string) (file string, ok bool) {
+	if islocalname(name) {
+		if safemode != 0 || nolocalimports != 0 {
+			return "", false
+		}
+
+		// try .a before .6.  important for building libraries:
+		// if there is an array.6 in the array.a library,
+		// want to find all of array.a, not just array.6.
+		file = fmt.Sprintf("%s.a", name)
+		if obj.Access(file, 0) >= 0 {
+			return file, true
+		}
+		file = fmt.Sprintf("%s.%c", name, Thearch.Thechar)
+		if obj.Access(file, 0) >= 0 {
+			return file, true
+		}
+		return "", false
+	}
+
+	// local imports should be canonicalized already.
+	// don't want to see "encoding/../encoding/base64"
+	// as different from "encoding/base64".
+	var q string
+	_ = q
+	if path.Clean(name) != name {
+		Yyerror("non-canonical import path %q (should be %q)", name, q)
+		return "", false
+	}
+
+	for p := idirs; p != nil; p = p.link {
+		file = fmt.Sprintf("%s/%s.a", p.dir, name)
+		if obj.Access(file, 0) >= 0 {
+			return file, true
+		}
+		file = fmt.Sprintf("%s/%s.%c", p.dir, name, Thearch.Thechar)
+		if obj.Access(file, 0) >= 0 {
+			return file, true
+		}
+	}
+
+	if goroot != "" {
+		suffix := ""
+		suffixsep := ""
+		if flag_installsuffix != "" {
+			suffixsep = "_"
+			suffix = flag_installsuffix
+		} else if flag_race != 0 {
+			suffixsep = "_"
+			suffix = "race"
+		}
+
+		file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", goroot, goos, goarch, suffixsep, suffix, name)
+		if obj.Access(file, 0) >= 0 {
+			return file, true
+		}
+		file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.%c", goroot, goos, goarch, suffixsep, suffix, name, Thearch.Thechar)
+		if obj.Access(file, 0) >= 0 {
+			return file, true
+		}
+	}
+
+	return "", false
+}
+
+func fakeimport() {
+	importpkg = mkpkg("fake")
+	cannedimports("fake.6", "$$\n")
+}
+
+func importfile(f *Val, line int) {
+	if f.Ctype != CTSTR {
+		Yyerror("import statement not a string")
+		fakeimport()
+		return
+	}
+
+	if len(f.U.(string)) == 0 {
+		Yyerror("import path is empty")
+		fakeimport()
+		return
+	}
+
+	if isbadimport(f.U.(string)) {
+		fakeimport()
+		return
+	}
+
+	// The package name main is no longer reserved,
+	// but we reserve the import path "main" to identify
+	// the main package, just as we reserve the import
+	// path "math" to identify the standard math package.
+	if f.U.(string) == "main" {
+		Yyerror("cannot import \"main\"")
+		errorexit()
+	}
+
+	if myimportpath != "" && f.U.(string) == myimportpath {
+		Yyerror("import %q while compiling that package (import cycle)", f.U.(string))
+		errorexit()
+	}
+
+	if f.U.(string) == "unsafe" {
+		if safemode != 0 {
+			Yyerror("cannot import package unsafe")
+			errorexit()
+		}
+
+		importpkg = mkpkg(f.U.(string))
+		cannedimports("unsafe.6", unsafeimport)
+		imported_unsafe = 1
+		return
+	}
+
+	path_ := f.U.(string)
+	if islocalname(path_) {
+		if path_[0] == '/' {
+			Yyerror("import path cannot be absolute path")
+			fakeimport()
+			return
+		}
+
+		prefix := Ctxt.Pathname
+		if localimport != "" {
+			prefix = localimport
+		}
+		cleanbuf := prefix
+		cleanbuf += "/"
+		cleanbuf += path_
+		cleanbuf = path.Clean(cleanbuf)
+		path_ = cleanbuf
+
+		if isbadimport(path_) {
+			fakeimport()
+			return
+		}
+	}
+
+	file, found := findpkg(path_)
+	if !found {
+		Yyerror("can't find import: %q", f.U.(string))
+		errorexit()
+	}
+
+	importpkg = mkpkg(path_)
+
+	// If we already saw that package, feed a dummy statement
+	// to the lexer to avoid parsing export data twice.
+	if importpkg.Imported != 0 {
+		tag := ""
+		if importpkg.Safe {
+			tag = "safe"
+		}
+
+		p := fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
+		cannedimports(file, p)
+		return
+	}
+
+	importpkg.Imported = 1
+
+	var err error
+	var imp *obj.Biobuf
+	imp, err = obj.Bopenr(file)
+	if err != nil {
+		Yyerror("can't open import: %q: %v", f.U.(string), err)
+		errorexit()
+	}
+
+	if strings.HasSuffix(file, ".a") {
+		if !skiptopkgdef(imp) {
+			Yyerror("import %s: not a package file", file)
+			errorexit()
+		}
+	}
+
+	// check object header
+	p := obj.Brdstr(imp, '\n', 1)
+
+	if p != "empty archive" {
+		if !strings.HasPrefix(p, "go object ") {
+			Yyerror("import %s: not a go object file", file)
+			errorexit()
+		}
+
+		q := fmt.Sprintf("%s %s %s %s", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+		if p[10:] != q {
+			Yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
+			errorexit()
+		}
+	}
+
+	// assume files move (get installed)
+	// so don't record the full path.
+	linehistpragma(file[len(file)-len(path_)-2:]) // acts as #pragma lib
+
+	/*
+	 * position the input right
+	 * after $$ and return
+	 */
+	pushedio = curio
+
+	curio.bin = imp
+	curio.peekc = 0
+	curio.peekc1 = 0
+	curio.infile = file
+	curio.nlsemi = 0
+	typecheckok = 1
+
+	var c int32
+	for {
+		c = int32(getc())
+		if c == EOF {
+			break
+		}
+		if c != '$' {
+			continue
+		}
+		c = int32(getc())
+		if c == EOF {
+			break
+		}
+		if c != '$' {
+			continue
+		}
+		return
+	}
+
+	Yyerror("no import in %q", f.U.(string))
+	unimportfile()
+}
+
+func unimportfile() {
+	if curio.bin != nil {
+		obj.Bterm(curio.bin)
+		curio.bin = nil
+	} else {
+		lexlineno-- // re correct sys.6 line number
+	}
+
+	curio = pushedio
+
+	pushedio.bin = nil
+	incannedimport = 0
+	typecheckok = 0
+}
+
+func cannedimports(file string, cp string) {
+	lexlineno++ // if sys.6 is included on line 1,
+
+	pushedio = curio
+
+	curio.bin = nil
+	curio.peekc = 0
+	curio.peekc1 = 0
+	curio.infile = file
+	curio.cp = cp
+	curio.nlsemi = 0
+	curio.importsafe = false
+
+	typecheckok = 1
+	incannedimport = 1
+}
+
+func isfrog(c int) bool {
+	// complain about possibly invisible control characters
+	if c < ' ' {
+		return !yy_isspace(c) // exclude good white space
+	}
+
+	if 0x7f <= c && c <= 0xa0 { // DEL, unicode block including unbreakable space.
+		return true
+	}
+	return false
+}
+
+type Loophack struct {
+	v    int
+	next *Loophack
+}
+
+var _yylex_lstk *Loophack
+
+func _yylex(yylval *yySymType) int32 {
+	var c1 int
+	var escflag int
+	var v int64
+	var cp *bytes.Buffer
+	var rune_ uint
+	var s *Sym
+	var h *Loophack
+	var str string
+
+	prevlineno = lineno
+
+l0:
+	c := getc()
+	if yy_isspace(c) {
+		if c == '\n' && curio.nlsemi != 0 {
+			ungetc(c)
+			if Debug['x'] != 0 {
+				fmt.Printf("lex: implicit semi\n")
+			}
+			return ';'
+		}
+
+		goto l0
+	}
+
+	lineno = lexlineno /* start of token */
+
+	if c >= utf8.RuneSelf {
+		/* all multibyte runes are alpha */
+		cp = &lexbuf
+		cp.Reset()
+
+		goto talph
+	}
+
+	if yy_isalpha(c) {
+		cp = &lexbuf
+		cp.Reset()
+		goto talph
+	}
+
+	if yy_isdigit(c) {
+		cp = &lexbuf
+		cp.Reset()
+		if c != '0' {
+			for {
+				cp.WriteByte(byte(c))
+				c = getc()
+				if yy_isdigit(c) {
+					continue
+				}
+				if c == '.' {
+					goto casedot
+				}
+				if c == 'e' || c == 'E' || c == 'p' || c == 'P' {
+					goto caseep
+				}
+				if c == 'i' {
+					goto casei
+				}
+				goto ncu
+			}
+		}
+
+		cp.WriteByte(byte(c))
+		c = getc()
+		if c == 'x' || c == 'X' {
+			for {
+				cp.WriteByte(byte(c))
+				c = getc()
+				if yy_isdigit(c) {
+					continue
+				}
+				if c >= 'a' && c <= 'f' {
+					continue
+				}
+				if c >= 'A' && c <= 'F' {
+					continue
+				}
+				if lexbuf.Len() == 2 {
+					Yyerror("malformed hex constant")
+				}
+				if c == 'p' {
+					goto caseep
+				}
+				goto ncu
+			}
+		}
+
+		if c == 'p' { // 0p begins floating point zero
+			goto caseep
+		}
+
+		c1 = 0
+		for {
+			if !yy_isdigit(c) {
+				break
+			}
+			if c < '0' || c > '7' {
+				c1 = 1 // not octal
+			}
+			cp.WriteByte(byte(c))
+			c = getc()
+		}
+
+		if c == '.' {
+			goto casedot
+		}
+		if c == 'e' || c == 'E' {
+			goto caseep
+		}
+		if c == 'i' {
+			goto casei
+		}
+		if c1 != 0 {
+			Yyerror("malformed octal constant")
+		}
+		goto ncu
+	}
+
+	switch c {
+	case EOF:
+		lineno = prevlineno
+		ungetc(EOF)
+		return -1
+
+	case '_':
+		cp = &lexbuf
+		cp.Reset()
+		goto talph
+
+	case '.':
+		c1 = getc()
+		if yy_isdigit(c1) {
+			cp = &lexbuf
+			cp.Reset()
+			cp.WriteByte(byte(c))
+			c = c1
+			goto casedot
+		}
+
+		if c1 == '.' {
+			c1 = getc()
+			if c1 == '.' {
+				c = LDDD
+				goto lx
+			}
+
+			ungetc(c1)
+			c1 = '.'
+		}
+
+		/* "..." */
+	case '"':
+		lexbuf.Reset()
+		lexbuf.WriteString(`"<string>"`)
+
+		cp = &strbuf
+		cp.Reset()
+
+		for {
+			if escchar('"', &escflag, &v) {
+				break
+			}
+			if v < utf8.RuneSelf || escflag != 0 {
+				cp.WriteByte(byte(v))
+			} else {
+				rune_ = uint(v)
+				cp.WriteRune(rune(rune_))
+			}
+		}
+
+		goto strlit
+
+		/* `...` */
+	case '`':
+		lexbuf.Reset()
+		lexbuf.WriteString("`<string>`")
+
+		cp = &strbuf
+		cp.Reset()
+
+		for {
+			c = int(getr())
+			if c == '\r' {
+				continue
+			}
+			if c == EOF {
+				Yyerror("eof in string")
+				break
+			}
+
+			if c == '`' {
+				break
+			}
+			cp.WriteRune(rune(c))
+		}
+
+		goto strlit
+
+		/* '.' */
+	case '\'':
+		if escchar('\'', &escflag, &v) {
+			Yyerror("empty character literal or unescaped ' in character literal")
+			v = '\''
+		}
+
+		if !escchar('\'', &escflag, &v) {
+			Yyerror("missing '")
+			ungetc(int(v))
+		}
+
+		yylval.val.U = new(Mpint)
+		Mpmovecfix(yylval.val.U.(*Mpint), v)
+		yylval.val.Ctype = CTRUNE
+		if Debug['x'] != 0 {
+			fmt.Printf("lex: codepoint literal\n")
+		}
+		litbuf = "string literal"
+		return LLITERAL
+
+	case '/':
+		c1 = getc()
+		if c1 == '*' {
+			nl := 0
+			for {
+				c = int(getr())
+				if c == '\n' {
+					nl = 1
+				}
+				for c == '*' {
+					c = int(getr())
+					if c == '/' {
+						if nl != 0 {
+							ungetc('\n')
+						}
+						goto l0
+					}
+
+					if c == '\n' {
+						nl = 1
+					}
+				}
+
+				if c == EOF {
+					Yyerror("eof in comment")
+					errorexit()
+				}
+			}
+		}
+
+		if c1 == '/' {
+			c = getlinepragma()
+			for {
+				if c == '\n' || c == EOF {
+					ungetc(c)
+					goto l0
+				}
+
+				c = int(getr())
+			}
+		}
+
+		if c1 == '=' {
+			c = ODIV
+			goto asop
+		}
+
+	case ':':
+		c1 = getc()
+		if c1 == '=' {
+			c = LCOLAS
+			yylval.i = int(lexlineno)
+			goto lx
+		}
+
+	case '*':
+		c1 = getc()
+		if c1 == '=' {
+			c = OMUL
+			goto asop
+		}
+
+	case '%':
+		c1 = getc()
+		if c1 == '=' {
+			c = OMOD
+			goto asop
+		}
+
+	case '+':
+		c1 = getc()
+		if c1 == '+' {
+			c = LINC
+			goto lx
+		}
+
+		if c1 == '=' {
+			c = OADD
+			goto asop
+		}
+
+	case '-':
+		c1 = getc()
+		if c1 == '-' {
+			c = LDEC
+			goto lx
+		}
+
+		if c1 == '=' {
+			c = OSUB
+			goto asop
+		}
+
+	case '>':
+		c1 = getc()
+		if c1 == '>' {
+			c = LRSH
+			c1 = getc()
+			if c1 == '=' {
+				c = ORSH
+				goto asop
+			}
+
+			break
+		}
+
+		if c1 == '=' {
+			c = LGE
+			goto lx
+		}
+
+		c = LGT
+
+	case '<':
+		c1 = getc()
+		if c1 == '<' {
+			c = LLSH
+			c1 = getc()
+			if c1 == '=' {
+				c = OLSH
+				goto asop
+			}
+
+			break
+		}
+
+		if c1 == '=' {
+			c = LLE
+			goto lx
+		}
+
+		if c1 == '-' {
+			c = LCOMM
+			goto lx
+		}
+
+		c = LLT
+
+	case '=':
+		c1 = getc()
+		if c1 == '=' {
+			c = LEQ
+			goto lx
+		}
+
+	case '!':
+		c1 = getc()
+		if c1 == '=' {
+			c = LNE
+			goto lx
+		}
+
+	case '&':
+		c1 = getc()
+		if c1 == '&' {
+			c = LANDAND
+			goto lx
+		}
+
+		if c1 == '^' {
+			c = LANDNOT
+			c1 = getc()
+			if c1 == '=' {
+				c = OANDNOT
+				goto asop
+			}
+
+			break
+		}
+
+		if c1 == '=' {
+			c = OAND
+			goto asop
+		}
+
+	case '|':
+		c1 = getc()
+		if c1 == '|' {
+			c = LOROR
+			goto lx
+		}
+
+		if c1 == '=' {
+			c = OOR
+			goto asop
+		}
+
+	case '^':
+		c1 = getc()
+		if c1 == '=' {
+			c = OXOR
+			goto asop
+		}
+
+		/*
+		 * clumsy dance:
+		 * to implement rule that disallows
+		 *	if T{1}[0] { ... }
+		 * but allows
+		 * 	if (T{1}[0]) { ... }
+		 * the block bodies for if/for/switch/select
+		 * begin with an LBODY token, not '{'.
+		 *
+		 * when we see the keyword, the next
+		 * non-parenthesized '{' becomes an LBODY.
+		 * loophack is normally 0.
+		 * a keyword makes it go up to 1.
+		 * parens push loophack onto a stack and go back to 0.
+		 * a '{' with loophack == 1 becomes LBODY and disables loophack.
+		 *
+		 * i said it was clumsy.
+		 */
+	case '(', '[':
+		if loophack != 0 || _yylex_lstk != nil {
+			h = new(Loophack)
+			if h == nil {
+				Flusherrors()
+				Yyerror("out of memory")
+				errorexit()
+			}
+
+			h.v = loophack
+			h.next = _yylex_lstk
+			_yylex_lstk = h
+			loophack = 0
+		}
+
+		goto lx
+
+	case ')', ']':
+		if _yylex_lstk != nil {
+			h = _yylex_lstk
+			loophack = h.v
+			_yylex_lstk = h.next
+		}
+
+		goto lx
+
+	case '{':
+		if loophack == 1 {
+			if Debug['x'] != 0 {
+				fmt.Printf("%v lex: LBODY\n", Ctxt.Line(int(lexlineno)))
+			}
+			loophack = 0
+			return LBODY
+		}
+
+		goto lx
+
+	default:
+		goto lx
+	}
+
+	ungetc(c1)
+
+lx:
+	if c > 0xff {
+		if Debug['x'] != 0 {
+			fmt.Printf("%v lex: TOKEN %s\n", Ctxt.Line(int(lexlineno)), lexname(c))
+		}
+	} else {
+		if Debug['x'] != 0 {
+			fmt.Printf("%v lex: TOKEN '%c'\n", Ctxt.Line(int(lexlineno)), c)
+		}
+	}
+	if isfrog(c) {
+		Yyerror("illegal character 0x%x", uint(c))
+		goto l0
+	}
+
+	if importpkg == nil && (c == '#' || c == '$' || c == '?' || c == '@' || c == '\\') {
+		Yyerror("%s: unexpected %c", "syntax error", c)
+		goto l0
+	}
+
+	return int32(c)
+
+asop:
+	yylval.i = c // rathole to hold which asop
+	if Debug['x'] != 0 {
+		fmt.Printf("lex: TOKEN ASOP %c\n", c)
+	}
+	return LASOP
+
+	/*
+	 * cp is set to lexbuf and some
+	 * prefix has been stored
+	 */
+talph:
+	for {
+		if c >= utf8.RuneSelf {
+			ungetc(c)
+			rune_ = uint(getr())
+
+			// 0xb7 · is used for internal names
+			if !unicode.IsLetter(rune(rune_)) && !unicode.IsDigit(rune(rune_)) && (importpkg == nil || rune_ != 0xb7) {
+				Yyerror("invalid identifier character U+%04x", rune_)
+			}
+			cp.WriteRune(rune(rune_))
+		} else if !yy_isalnum(c) && c != '_' {
+			break
+		} else {
+			cp.WriteByte(byte(c))
+		}
+		c = getc()
+	}
+
+	cp = nil
+	ungetc(c)
+
+	s = LookupBytes(lexbuf.Bytes())
+	switch s.Lexical {
+	case LIGNORE:
+		goto l0
+
+	case LFOR, LIF, LSWITCH, LSELECT:
+		loophack = 1 // see comment about loophack above
+	}
+
+	if Debug['x'] != 0 {
+		fmt.Printf("lex: %s %s\n", s, lexname(int(s.Lexical)))
+	}
+	yylval.sym = s
+	return int32(s.Lexical)
+
+ncu:
+	cp = nil
+	ungetc(c)
+
+	str = lexbuf.String()
+	yylval.val.U = new(Mpint)
+	mpatofix(yylval.val.U.(*Mpint), str)
+	if yylval.val.U.(*Mpint).Ovf {
+		Yyerror("overflow in constant")
+		Mpmovecfix(yylval.val.U.(*Mpint), 0)
+	}
+
+	yylval.val.Ctype = CTINT
+	if Debug['x'] != 0 {
+		fmt.Printf("lex: integer literal\n")
+	}
+	litbuf = "literal " + str
+	return LLITERAL
+
+casedot:
+	for {
+		cp.WriteByte(byte(c))
+		c = getc()
+		if !yy_isdigit(c) {
+			break
+		}
+	}
+
+	if c == 'i' {
+		goto casei
+	}
+	if c != 'e' && c != 'E' {
+		goto caseout
+	}
+
+caseep:
+	cp.WriteByte(byte(c))
+	c = getc()
+	if c == '+' || c == '-' {
+		cp.WriteByte(byte(c))
+		c = getc()
+	}
+
+	if !yy_isdigit(c) {
+		Yyerror("malformed fp constant exponent")
+	}
+	for yy_isdigit(c) {
+		cp.WriteByte(byte(c))
+		c = getc()
+	}
+
+	if c == 'i' {
+		goto casei
+	}
+	goto caseout
+
+	// imaginary constant
+casei:
+	cp = nil
+
+	str = lexbuf.String()
+	yylval.val.U = new(Mpcplx)
+	Mpmovecflt(&yylval.val.U.(*Mpcplx).Real, 0.0)
+	mpatoflt(&yylval.val.U.(*Mpcplx).Imag, str)
+	if yylval.val.U.(*Mpcplx).Imag.Val.IsInf() {
+		Yyerror("overflow in imaginary constant")
+		Mpmovecflt(&yylval.val.U.(*Mpcplx).Real, 0.0)
+	}
+
+	yylval.val.Ctype = CTCPLX
+	if Debug['x'] != 0 {
+		fmt.Printf("lex: imaginary literal\n")
+	}
+	litbuf = "literal " + str
+	return LLITERAL
+
+caseout:
+	cp = nil
+	ungetc(c)
+
+	str = lexbuf.String()
+	yylval.val.U = newMpflt()
+	mpatoflt(yylval.val.U.(*Mpflt), str)
+	if yylval.val.U.(*Mpflt).Val.IsInf() {
+		Yyerror("overflow in float constant")
+		Mpmovecflt(yylval.val.U.(*Mpflt), 0.0)
+	}
+
+	yylval.val.Ctype = CTFLT
+	if Debug['x'] != 0 {
+		fmt.Printf("lex: floating literal\n")
+	}
+	litbuf = "literal " + str
+	return LLITERAL
+
+strlit:
+	yylval.val.U = internString(cp.Bytes())
+	yylval.val.Ctype = CTSTR
+	if Debug['x'] != 0 {
+		fmt.Printf("lex: string literal\n")
+	}
+	litbuf = "string literal"
+	return LLITERAL
+}
+
+var internedStrings = map[string]string{}
+
+func internString(b []byte) string {
+	s, ok := internedStrings[string(b)] // string(b) here doesn't allocate
+	if ok {
+		return s
+	}
+	s = string(b)
+	internedStrings[s] = s
+	return s
+}
+
+func more(pp *string) bool {
+	p := *pp
+	for p != "" && yy_isspace(int(p[0])) {
+		p = p[1:]
+	}
+	*pp = p
+	return p != ""
+}
+
+/*
+ * read and interpret syntax that looks like
+ * //line parse.y:15
+ * as a discontinuity in sequential line numbers.
+ * the next line of input comes from parse.y:15
+ */
+func getlinepragma() int {
+	var cmd, verb, name string
+
+	c := int(getr())
+	if c == 'g' {
+		cp := &lexbuf
+		cp.Reset()
+		cp.WriteByte('g') // already read
+		for {
+			c = int(getr())
+			if c == EOF || c >= utf8.RuneSelf {
+				return c
+			}
+			if c == '\n' {
+				break
+			}
+			cp.WriteByte(byte(c))
+		}
+		cp = nil
+
+		text := lexbuf.String()
+
+		if strings.HasPrefix(text, "go:cgo_") {
+			pragcgo(text)
+		}
+
+		cmd = text
+		verb = cmd
+		if i := strings.Index(verb, " "); i >= 0 {
+			verb = verb[:i]
+		}
+
+		if verb == "go:linkname" {
+			if imported_unsafe == 0 {
+				Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
+			}
+			f := strings.Fields(cmd)
+			if len(f) != 3 {
+				Yyerror("usage: //go:linkname localname linkname")
+				return c
+			}
+
+			Lookup(f[1]).Linkname = f[2]
+			return c
+		}
+
+		if verb == "go:nointerface" && obj.Fieldtrack_enabled != 0 {
+			nointerface = true
+			return c
+		}
+
+		if verb == "go:noescape" {
+			noescape = true
+			return c
+		}
+
+		if verb == "go:nosplit" {
+			nosplit = true
+			return c
+		}
+
+		if verb == "go:nowritebarrier" {
+			if compiling_runtime == 0 {
+				Yyerror("//go:nowritebarrier only allowed in runtime")
+			}
+			nowritebarrier = true
+			return c
+		}
+		return c
+	}
+	if c != 'l' {
+		return c
+	}
+	for i := 1; i < 5; i++ {
+		c = int(getr())
+		if c != int("line "[i]) {
+			return c
+		}
+	}
+
+	cp := &lexbuf
+	cp.Reset()
+	linep := 0
+	for {
+		c = int(getr())
+		if c == EOF {
+			return c
+		}
+		if c == '\n' {
+			break
+		}
+		if c == ' ' {
+			continue
+		}
+		if c == ':' {
+			linep = cp.Len() + 1
+		}
+		cp.WriteByte(byte(c))
+	}
+
+	cp = nil
+
+	if linep == 0 {
+		return c
+	}
+	text := lexbuf.String()
+	n := 0
+	for _, c := range text[linep:] {
+		if c < '0' || c > '9' {
+			goto out
+		}
+		n = n*10 + int(c) - '0'
+		if n > 1e8 {
+			Yyerror("line number out of range")
+			errorexit()
+		}
+	}
+
+	if n <= 0 {
+		return c
+	}
+
+	name = text[:linep-1]
+	linehistupdate(name, n)
+	return c
+
+out:
+	return c
+}
+
+func getimpsym(pp *string) string {
+	more(pp) // skip spaces
+	p := *pp
+	if p == "" || p[0] == '"' {
+		return ""
+	}
+	i := 0
+	for i < len(p) && !yy_isspace(int(p[i])) && p[i] != '"' {
+		i++
+	}
+	sym := p[:i]
+	*pp = p[i:]
+	return sym
+}
+
+func getquoted(pp *string) (string, bool) {
+	more(pp) // skip spaces
+	p := *pp
+	if p == "" || p[0] != '"' {
+		return "", false
+	}
+	p = p[1:]
+	i := strings.Index(p, `"`)
+	if i < 0 {
+		return "", false
+	}
+	*pp = p[i+1:]
+	return p[:i], true
+}
+
+// Copied nearly verbatim from the C compiler's #pragma parser.
+// TODO: Rewrite more cleanly once the compiler is written in Go.
+func pragcgo(text string) {
+	var q string
+
+	if i := strings.Index(text, " "); i >= 0 {
+		text, q = text[:i], text[i:]
+	}
+
+	verb := text[3:] // skip "go:"
+
+	if verb == "cgo_dynamic_linker" || verb == "dynlinker" {
+		var ok bool
+		var p string
+		p, ok = getquoted(&q)
+		if !ok {
+			Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
+			return
+		}
+		pragcgobuf += fmt.Sprintf("cgo_dynamic_linker %v\n", plan9quote(p))
+		return
+
+	}
+
+	if verb == "dynexport" {
+		verb = "cgo_export_dynamic"
+	}
+	if verb == "cgo_export_static" || verb == "cgo_export_dynamic" {
+		local := getimpsym(&q)
+		var remote string
+		if local == "" {
+			goto err2
+		}
+		if !more(&q) {
+			pragcgobuf += fmt.Sprintf("%s %v\n", verb, plan9quote(local))
+			return
+		}
+
+		remote = getimpsym(&q)
+		if remote == "" {
+			goto err2
+		}
+		pragcgobuf += fmt.Sprintf("%s %v %v\n", verb, plan9quote(local), plan9quote(remote))
+		return
+
+	err2:
+		Yyerror("usage: //go:%s local [remote]", verb)
+		return
+	}
+
+	if verb == "cgo_import_dynamic" || verb == "dynimport" {
+		var ok bool
+		local := getimpsym(&q)
+		var p string
+		var remote string
+		if local == "" {
+			goto err3
+		}
+		if !more(&q) {
+			pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v\n", plan9quote(local))
+			return
+		}
+
+		remote = getimpsym(&q)
+		if remote == "" {
+			goto err3
+		}
+		if !more(&q) {
+			pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v\n", plan9quote(local), plan9quote(remote))
+			return
+		}
+
+		p, ok = getquoted(&q)
+		if !ok {
+			goto err3
+		}
+		pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v %v\n", plan9quote(local), plan9quote(remote), plan9quote(p))
+		return
+
+	err3:
+		Yyerror("usage: //go:cgo_import_dynamic local [remote [\"library\"]]")
+		return
+	}
+
+	if verb == "cgo_import_static" {
+		local := getimpsym(&q)
+		if local == "" || more(&q) {
+			Yyerror("usage: //go:cgo_import_static local")
+			return
+		}
+		pragcgobuf += fmt.Sprintf("cgo_import_static %v\n", plan9quote(local))
+		return
+
+	}
+
+	if verb == "cgo_ldflag" {
+		var ok bool
+		var p string
+		p, ok = getquoted(&q)
+		if !ok {
+			Yyerror("usage: //go:cgo_ldflag \"arg\"")
+			return
+		}
+		pragcgobuf += fmt.Sprintf("cgo_ldflag %v\n", plan9quote(p))
+		return
+
+	}
+}
+
+type yy struct{}
+
+func (yy) Lex(v *yySymType) int {
+	return int(yylex(v))
+}
+
+func (yy) Error(msg string) {
+	Yyerror("%s", msg)
+}
+
+var theparser yyParser
+var parsing bool
+
+func yyparse() {
+	theparser = yyNewParser()
+	parsing = true
+	theparser.Parse(yy{})
+	parsing = false
+}
+
+func yylex(yylval *yySymType) int32 {
+	lx := int(_yylex(yylval))
+
+	if curio.nlsemi != 0 && lx == EOF {
+		// Treat EOF as "end of line" for the purposes
+		// of inserting a semicolon.
+		lx = ';'
+	}
+
+	switch lx {
+	case LNAME,
+		LLITERAL,
+		LBREAK,
+		LCONTINUE,
+		LFALL,
+		LRETURN,
+		LINC,
+		LDEC,
+		')',
+		'}',
+		']':
+		curio.nlsemi = 1
+
+	default:
+		curio.nlsemi = 0
+	}
+
+	// Track last two tokens returned by yylex.
+	yyprev = yylast
+
+	yylast = lx
+	return int32(lx)
+}
+
+func getc() int {
+	c := curio.peekc
+	if c != 0 {
+		curio.peekc = curio.peekc1
+		curio.peekc1 = 0
+		goto check
+	}
+
+	if curio.bin == nil {
+		if len(curio.cp) == 0 {
+			c = 0
+		} else {
+			c = int(curio.cp[0])
+			curio.cp = curio.cp[1:]
+		}
+	} else {
+	loop:
+		c = obj.Bgetc(curio.bin)
+		if c == 0xef {
+			buf, err := curio.bin.Peek(2)
+			if err != nil {
+				log.Fatalf("getc: peeking: %v", err)
+			}
+			if buf[0] == 0xbb && buf[1] == 0xbf {
+				yyerrorl(int(lexlineno), "Unicode (UTF-8) BOM in middle of file")
+
+				// consume BOM bytes
+				obj.Bgetc(curio.bin)
+				obj.Bgetc(curio.bin)
+				goto loop
+			}
+		}
+	}
+
+check:
+	switch c {
+	case 0:
+		if curio.bin != nil {
+			Yyerror("illegal NUL byte")
+			break
+		}
+		fallthrough
+
+		// insert \n at EOF
+	case EOF:
+		if curio.eofnl != 0 || curio.last == '\n' {
+			return EOF
+		}
+		curio.eofnl = 1
+		c = '\n'
+		fallthrough
+
+	case '\n':
+		if pushedio.bin == nil {
+			lexlineno++
+		}
+	}
+
+	curio.last = c
+	return c
+}
+
+func ungetc(c int) {
+	curio.peekc1 = curio.peekc
+	curio.peekc = c
+	if c == '\n' && pushedio.bin == nil {
+		lexlineno--
+	}
+}
+
+func getr() int32 {
+	var buf [utf8.UTFMax]byte
+
+	for i := 0; ; i++ {
+		c := getc()
+		if i == 0 && c < utf8.RuneSelf {
+			return int32(c)
+		}
+		buf[i] = byte(c)
+		if i+1 == len(buf) || utf8.FullRune(buf[:i+1]) {
+			r, w := utf8.DecodeRune(buf[:i+1])
+			if r == utf8.RuneError && w == 1 {
+				lineno = lexlineno
+				// The string conversion here makes a copy for passing
+				// to fmt.Printf, so that buf itself does not escape and can
+				// be allocated on the stack.
+				Yyerror("illegal UTF-8 sequence % x", string(buf[:i+1]))
+			}
+			return int32(r)
+		}
+	}
+}
+
+func escchar(e int, escflg *int, val *int64) bool {
+	*escflg = 0
+
+	c := int(getr())
+	switch c {
+	case EOF:
+		Yyerror("eof in string")
+		return true
+
+	case '\n':
+		Yyerror("newline in string")
+		return true
+
+	case '\\':
+		break
+
+	default:
+		if c == e {
+			return true
+		}
+		*val = int64(c)
+		return false
+	}
+
+	u := 0
+	c = int(getr())
+	var i int
+	switch c {
+	case 'x':
+		*escflg = 1 // it's a byte
+		i = 2
+		goto hex
+
+	case 'u':
+		i = 4
+		u = 1
+		goto hex
+
+	case 'U':
+		i = 8
+		u = 1
+		goto hex
+
+	case '0',
+		'1',
+		'2',
+		'3',
+		'4',
+		'5',
+		'6',
+		'7':
+		*escflg = 1 // it's a byte
+		l := int64(c) - '0'
+		for i := 2; i > 0; i-- {
+			c = getc()
+			if c >= '0' && c <= '7' {
+				l = l*8 + int64(c) - '0'
+				continue
+			}
+
+			Yyerror("non-octal character in escape sequence: %c", c)
+			ungetc(c)
+		}
+
+		if l > 255 {
+			Yyerror("octal escape value > 255: %d", l)
+		}
+
+		*val = l
+		return false
+
+	case 'a':
+		c = '\a'
+	case 'b':
+		c = '\b'
+	case 'f':
+		c = '\f'
+	case 'n':
+		c = '\n'
+	case 'r':
+		c = '\r'
+	case 't':
+		c = '\t'
+	case 'v':
+		c = '\v'
+	case '\\':
+		c = '\\'
+
+	default:
+		if c != e {
+			Yyerror("unknown escape sequence: %c", c)
+		}
+	}
+
+	*val = int64(c)
+	return false
+
+hex:
+	l := int64(0)
+	for ; i > 0; i-- {
+		c = getc()
+		if c >= '0' && c <= '9' {
+			l = l*16 + int64(c) - '0'
+			continue
+		}
+
+		if c >= 'a' && c <= 'f' {
+			l = l*16 + int64(c) - 'a' + 10
+			continue
+		}
+
+		if c >= 'A' && c <= 'F' {
+			l = l*16 + int64(c) - 'A' + 10
+			continue
+		}
+
+		Yyerror("non-hex character in escape sequence: %c", c)
+		ungetc(c)
+		break
+	}
+
+	if u != 0 && (l > utf8.MaxRune || (0xd800 <= l && l < 0xe000)) {
+		Yyerror("invalid Unicode code point in escape sequence: %#x", l)
+		l = utf8.RuneError
+	}
+
+	*val = l
+	return false
+}
+
+var syms = []struct {
+	name    string
+	lexical int
+	etype   int
+	op      int
+}{
+	/* basic types */
+	{"int8", LNAME, TINT8, OXXX},
+	{"int16", LNAME, TINT16, OXXX},
+	{"int32", LNAME, TINT32, OXXX},
+	{"int64", LNAME, TINT64, OXXX},
+	{"uint8", LNAME, TUINT8, OXXX},
+	{"uint16", LNAME, TUINT16, OXXX},
+	{"uint32", LNAME, TUINT32, OXXX},
+	{"uint64", LNAME, TUINT64, OXXX},
+	{"float32", LNAME, TFLOAT32, OXXX},
+	{"float64", LNAME, TFLOAT64, OXXX},
+	{"complex64", LNAME, TCOMPLEX64, OXXX},
+	{"complex128", LNAME, TCOMPLEX128, OXXX},
+	{"bool", LNAME, TBOOL, OXXX},
+	{"string", LNAME, TSTRING, OXXX},
+	{"any", LNAME, TANY, OXXX},
+	{"break", LBREAK, Txxx, OXXX},
+	{"case", LCASE, Txxx, OXXX},
+	{"chan", LCHAN, Txxx, OXXX},
+	{"const", LCONST, Txxx, OXXX},
+	{"continue", LCONTINUE, Txxx, OXXX},
+	{"default", LDEFAULT, Txxx, OXXX},
+	{"else", LELSE, Txxx, OXXX},
+	{"defer", LDEFER, Txxx, OXXX},
+	{"fallthrough", LFALL, Txxx, OXXX},
+	{"for", LFOR, Txxx, OXXX},
+	{"func", LFUNC, Txxx, OXXX},
+	{"go", LGO, Txxx, OXXX},
+	{"goto", LGOTO, Txxx, OXXX},
+	{"if", LIF, Txxx, OXXX},
+	{"import", LIMPORT, Txxx, OXXX},
+	{"interface", LINTERFACE, Txxx, OXXX},
+	{"map", LMAP, Txxx, OXXX},
+	{"package", LPACKAGE, Txxx, OXXX},
+	{"range", LRANGE, Txxx, OXXX},
+	{"return", LRETURN, Txxx, OXXX},
+	{"select", LSELECT, Txxx, OXXX},
+	{"struct", LSTRUCT, Txxx, OXXX},
+	{"switch", LSWITCH, Txxx, OXXX},
+	{"type", LTYPE, Txxx, OXXX},
+	{"var", LVAR, Txxx, OXXX},
+	{"append", LNAME, Txxx, OAPPEND},
+	{"cap", LNAME, Txxx, OCAP},
+	{"close", LNAME, Txxx, OCLOSE},
+	{"complex", LNAME, Txxx, OCOMPLEX},
+	{"copy", LNAME, Txxx, OCOPY},
+	{"delete", LNAME, Txxx, ODELETE},
+	{"imag", LNAME, Txxx, OIMAG},
+	{"len", LNAME, Txxx, OLEN},
+	{"make", LNAME, Txxx, OMAKE},
+	{"new", LNAME, Txxx, ONEW},
+	{"panic", LNAME, Txxx, OPANIC},
+	{"print", LNAME, Txxx, OPRINT},
+	{"println", LNAME, Txxx, OPRINTN},
+	{"real", LNAME, Txxx, OREAL},
+	{"recover", LNAME, Txxx, ORECOVER},
+	{"notwithstanding", LIGNORE, Txxx, OXXX},
+	{"thetruthofthematter", LIGNORE, Txxx, OXXX},
+	{"despiteallobjections", LIGNORE, Txxx, OXXX},
+	{"whereas", LIGNORE, Txxx, OXXX},
+	{"insofaras", LIGNORE, Txxx, OXXX},
+}
+
+func lexinit() {
+	var lex int
+	var s *Sym
+	var s1 *Sym
+	var t *Type
+	var etype int
+
+	/*
+	 * initialize basic types array
+	 * initialize known symbols
+	 */
+	for i := 0; i < len(syms); i++ {
+		lex = syms[i].lexical
+		s = Lookup(syms[i].name)
+		s.Lexical = uint16(lex)
+
+		etype = syms[i].etype
+		if etype != Txxx {
+			if etype < 0 || etype >= len(Types) {
+				Fatal("lexinit: %s bad etype", s.Name)
+			}
+			s1 = Pkglookup(syms[i].name, builtinpkg)
+			t = Types[etype]
+			if t == nil {
+				t = typ(etype)
+				t.Sym = s1
+
+				if etype != TANY && etype != TSTRING {
+					dowidth(t)
+				}
+				Types[etype] = t
+			}
+
+			s1.Lexical = LNAME
+			s1.Def = typenod(t)
+			continue
+		}
+
+		etype = syms[i].op
+		if etype != OXXX {
+			s1 = Pkglookup(syms[i].name, builtinpkg)
+			s1.Lexical = LNAME
+			s1.Def = Nod(ONAME, nil, nil)
+			s1.Def.Sym = s1
+			s1.Def.Etype = uint8(etype)
+		}
+	}
+
+	// logically, the type of a string literal.
+	// types[TSTRING] is the named type string
+	// (the type of x in var x string or var x = "hello").
+	// this is the ideal form
+	// (the type of x in const x = "hello").
+	idealstring = typ(TSTRING)
+
+	idealbool = typ(TBOOL)
+
+	s = Pkglookup("true", builtinpkg)
+	s.Def = Nodbool(true)
+	s.Def.Sym = Lookup("true")
+	s.Def.Type = idealbool
+
+	s = Pkglookup("false", builtinpkg)
+	s.Def = Nodbool(false)
+	s.Def.Sym = Lookup("false")
+	s.Def.Type = idealbool
+
+	s = Lookup("_")
+	s.Block = -100
+	s.Def = Nod(ONAME, nil, nil)
+	s.Def.Sym = s
+	Types[TBLANK] = typ(TBLANK)
+	s.Def.Type = Types[TBLANK]
+	nblank = s.Def
+
+	s = Pkglookup("_", builtinpkg)
+	s.Block = -100
+	s.Def = Nod(ONAME, nil, nil)
+	s.Def.Sym = s
+	Types[TBLANK] = typ(TBLANK)
+	s.Def.Type = Types[TBLANK]
+
+	Types[TNIL] = typ(TNIL)
+	s = Pkglookup("nil", builtinpkg)
+	var v Val
+	v.Ctype = CTNIL
+	s.Def = nodlit(v)
+	s.Def.Sym = s
+}
+
+func lexinit1() {
+	// t = interface { Error() string }
+	rcvr := typ(TSTRUCT)
+
+	rcvr.Type = typ(TFIELD)
+	rcvr.Type.Type = Ptrto(typ(TSTRUCT))
+	rcvr.Funarg = 1
+	in := typ(TSTRUCT)
+	in.Funarg = 1
+	out := typ(TSTRUCT)
+	out.Type = typ(TFIELD)
+	out.Type.Type = Types[TSTRING]
+	out.Funarg = 1
+	f := typ(TFUNC)
+	*getthis(f) = rcvr
+	*Getoutarg(f) = out
+	*getinarg(f) = in
+	f.Thistuple = 1
+	f.Intuple = 0
+	f.Outnamed = 0
+	f.Outtuple = 1
+	t := typ(TINTER)
+	t.Type = typ(TFIELD)
+	t.Type.Sym = Lookup("Error")
+	t.Type.Type = f
+
+	// error type
+	s := Lookup("error")
+
+	s.Lexical = LNAME
+	s1 := Pkglookup("error", builtinpkg)
+	errortype = t
+	errortype.Sym = s1
+	s1.Lexical = LNAME
+	s1.Def = typenod(errortype)
+
+	// byte alias
+	s = Lookup("byte")
+
+	s.Lexical = LNAME
+	s1 = Pkglookup("byte", builtinpkg)
+	bytetype = typ(TUINT8)
+	bytetype.Sym = s1
+	s1.Lexical = LNAME
+	s1.Def = typenod(bytetype)
+
+	// rune alias
+	s = Lookup("rune")
+
+	s.Lexical = LNAME
+	s1 = Pkglookup("rune", builtinpkg)
+	runetype = typ(TINT32)
+	runetype.Sym = s1
+	s1.Lexical = LNAME
+	s1.Def = typenod(runetype)
+}
+
+func lexfini() {
+	var s *Sym
+	var lex int
+	var etype int
+	var i int
+
+	for i = 0; i < len(syms); i++ {
+		lex = syms[i].lexical
+		if lex != LNAME {
+			continue
+		}
+		s = Lookup(syms[i].name)
+		s.Lexical = uint16(lex)
+
+		etype = syms[i].etype
+		if etype != Txxx && (etype != TANY || Debug['A'] != 0) && s.Def == nil {
+			s.Def = typenod(Types[etype])
+			s.Origpkg = builtinpkg
+		}
+
+		etype = syms[i].op
+		if etype != OXXX && s.Def == nil {
+			s.Def = Nod(ONAME, nil, nil)
+			s.Def.Sym = s
+			s.Def.Etype = uint8(etype)
+			s.Origpkg = builtinpkg
+		}
+	}
+
+	// backend-specific builtin types (e.g. int).
+	for i = range Thearch.Typedefs {
+		s = Lookup(Thearch.Typedefs[i].Name)
+		if s.Def == nil {
+			s.Def = typenod(Types[Thearch.Typedefs[i].Etype])
+			s.Origpkg = builtinpkg
+		}
+	}
+
+	// there's only so much table-driven we can handle.
+	// these are special cases.
+	s = Lookup("byte")
+
+	if s.Def == nil {
+		s.Def = typenod(bytetype)
+		s.Origpkg = builtinpkg
+	}
+
+	s = Lookup("error")
+	if s.Def == nil {
+		s.Def = typenod(errortype)
+		s.Origpkg = builtinpkg
+	}
+
+	s = Lookup("rune")
+	if s.Def == nil {
+		s.Def = typenod(runetype)
+		s.Origpkg = builtinpkg
+	}
+
+	s = Lookup("nil")
+	if s.Def == nil {
+		var v Val
+		v.Ctype = CTNIL
+		s.Def = nodlit(v)
+		s.Def.Sym = s
+		s.Origpkg = builtinpkg
+	}
+
+	s = Lookup("iota")
+	if s.Def == nil {
+		s.Def = Nod(OIOTA, nil, nil)
+		s.Def.Sym = s
+		s.Origpkg = builtinpkg
+	}
+
+	s = Lookup("true")
+	if s.Def == nil {
+		s.Def = Nodbool(true)
+		s.Def.Sym = s
+		s.Origpkg = builtinpkg
+	}
+
+	s = Lookup("false")
+	if s.Def == nil {
+		s.Def = Nodbool(false)
+		s.Def.Sym = s
+		s.Origpkg = builtinpkg
+	}
+
+	nodfp = Nod(ONAME, nil, nil)
+	nodfp.Type = Types[TINT32]
+	nodfp.Xoffset = 0
+	nodfp.Class = PPARAM
+	nodfp.Sym = Lookup(".fp")
+}
+
+var lexn = []struct {
+	lex  int
+	name string
+}{
+	{LANDAND, "ANDAND"},
+	{LANDNOT, "ANDNOT"},
+	{LASOP, "ASOP"},
+	{LBREAK, "BREAK"},
+	{LCASE, "CASE"},
+	{LCHAN, "CHAN"},
+	{LCOLAS, "COLAS"},
+	{LCOMM, "<-"},
+	{LCONST, "CONST"},
+	{LCONTINUE, "CONTINUE"},
+	{LDDD, "..."},
+	{LDEC, "DEC"},
+	{LDEFAULT, "DEFAULT"},
+	{LDEFER, "DEFER"},
+	{LELSE, "ELSE"},
+	{LEQ, "EQ"},
+	{LFALL, "FALL"},
+	{LFOR, "FOR"},
+	{LFUNC, "FUNC"},
+	{LGE, "GE"},
+	{LGO, "GO"},
+	{LGOTO, "GOTO"},
+	{LGT, "GT"},
+	{LIF, "IF"},
+	{LIMPORT, "IMPORT"},
+	{LINC, "INC"},
+	{LINTERFACE, "INTERFACE"},
+	{LLE, "LE"},
+	{LLITERAL, "LITERAL"},
+	{LLSH, "LSH"},
+	{LLT, "LT"},
+	{LMAP, "MAP"},
+	{LNAME, "NAME"},
+	{LNE, "NE"},
+	{LOROR, "OROR"},
+	{LPACKAGE, "PACKAGE"},
+	{LRANGE, "RANGE"},
+	{LRETURN, "RETURN"},
+	{LRSH, "RSH"},
+	{LSELECT, "SELECT"},
+	{LSTRUCT, "STRUCT"},
+	{LSWITCH, "SWITCH"},
+	{LTYPE, "TYPE"},
+	{LVAR, "VAR"},
+}
+
+func lexname(lex int) string {
+	for i := 0; i < len(lexn); i++ {
+		if lexn[i].lex == lex {
+			return lexn[i].name
+		}
+	}
+	return fmt.Sprintf("LEX-%d", lex)
+}
+
+var yytfix = []struct {
+	have string
+	want string
+}{
+	{"$end", "EOF"},
+	{"LASOP", "op="},
+	{"LBREAK", "break"},
+	{"LCASE", "case"},
+	{"LCHAN", "chan"},
+	{"LCOLAS", ":="},
+	{"LCONST", "const"},
+	{"LCONTINUE", "continue"},
+	{"LDDD", "..."},
+	{"LDEFAULT", "default"},
+	{"LDEFER", "defer"},
+	{"LELSE", "else"},
+	{"LFALL", "fallthrough"},
+	{"LFOR", "for"},
+	{"LFUNC", "func"},
+	{"LGO", "go"},
+	{"LGOTO", "goto"},
+	{"LIF", "if"},
+	{"LIMPORT", "import"},
+	{"LINTERFACE", "interface"},
+	{"LMAP", "map"},
+	{"LNAME", "name"},
+	{"LPACKAGE", "package"},
+	{"LRANGE", "range"},
+	{"LRETURN", "return"},
+	{"LSELECT", "select"},
+	{"LSTRUCT", "struct"},
+	{"LSWITCH", "switch"},
+	{"LTYPE", "type"},
+	{"LVAR", "var"},
+	{"LANDAND", "&&"},
+	{"LANDNOT", "&^"},
+	{"LBODY", "{"},
+	{"LCOMM", "<-"},
+	{"LDEC", "--"},
+	{"LINC", "++"},
+	{"LEQ", "=="},
+	{"LGE", ">="},
+	{"LGT", ">"},
+	{"LLE", "<="},
+	{"LLT", "<"},
+	{"LLSH", "<<"},
+	{"LRSH", ">>"},
+	{"LOROR", "||"},
+	{"LNE", "!="},
+	// spell out to avoid confusion with punctuation in error messages
+	{"';'", "semicolon or newline"},
+	{"','", "comma"},
+}
+
+func init() {
+	yyErrorVerbose = true
+
+Outer:
+	for i, s := range yyToknames {
+		// Apply yytfix if possible.
+		for _, fix := range yytfix {
+			if s == fix.have {
+				yyToknames[i] = fix.want
+				continue Outer
+			}
+		}
+
+		// Turn 'x' into x.
+		if len(s) == 3 && s[0] == '\'' && s[2] == '\'' {
+			yyToknames[i] = s[1:2]
+			continue
+		}
+	}
+}
+
+func pkgnotused(lineno int, path string, name string) {
+	// If the package was imported with a name other than the final
+	// import path element, show it explicitly in the error message.
+	// Note that this handles both renamed imports and imports of
+	// packages containing unconventional package declarations.
+	// Note that this uses / always, even on Windows, because Go import
+	// paths always use forward slashes.
+	elem := path
+	if i := strings.LastIndex(elem, "/"); i >= 0 {
+		elem = elem[i+1:]
+	}
+	if name == "" || elem == name {
+		yyerrorl(int(lineno), "imported and not used: %q", path)
+	} else {
+		yyerrorl(int(lineno), "imported and not used: %q as %s", path, name)
+	}
+}
+
+func mkpackage(pkgname string) {
+	if localpkg.Name == "" {
+		if pkgname == "_" {
+			Yyerror("invalid package name _")
+		}
+		localpkg.Name = pkgname
+	} else {
+		if pkgname != localpkg.Name {
+			Yyerror("package %s; expected %s", pkgname, localpkg.Name)
+		}
+		for _, s := range localpkg.Syms {
+			if s.Def == nil {
+				continue
+			}
+			if s.Def.Op == OPACK {
+				// throw away top-level package name leftover
+				// from previous file.
+				// leave s->block set to cause redeclaration
+				// errors if a conflicting top-level name is
+				// introduced by a different file.
+				if !s.Def.Used && nsyntaxerrors == 0 {
+					pkgnotused(int(s.Def.Lineno), s.Def.Pkg.Path, s.Name)
+				}
+				s.Def = nil
+				continue
+			}
+
+			if s.Def.Sym != s {
+				// throw away top-level name left over
+				// from previous import . "x"
+				if s.Def.Pack != nil && !s.Def.Pack.Used && nsyntaxerrors == 0 {
+					pkgnotused(int(s.Def.Pack.Lineno), s.Def.Pack.Pkg.Path, "")
+					s.Def.Pack.Used = true
+				}
+
+				s.Def = nil
+				continue
+			}
+		}
+	}
+
+	if outfile == "" {
+		p := infile
+		if i := strings.LastIndex(p, "/"); i >= 0 {
+			p = p[i+1:]
+		}
+		if Ctxt.Windows != 0 {
+			if i := strings.LastIndex(p, `\`); i >= 0 {
+				p = p[i+1:]
+			}
+		}
+		if i := strings.LastIndex(p, "."); i >= 0 {
+			p = p[:i]
+		}
+		outfile = fmt.Sprintf("%s.%c", p, Thearch.Thechar)
+	}
+}
diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/gc/mkbuiltin.go
new file mode 100644
index 0000000..b2362a6
--- /dev/null
+++ b/src/cmd/compile/internal/gc/mkbuiltin.go
@@ -0,0 +1,104 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Generate builtin.go from $* (runtime.go and unsafe.go).
+// Run this after changing runtime.go and unsafe.go
+// or after changing the export metadata format in the compiler.
+// Either way, you need to have a working compiler binary first.
+package main
+
+import (
+	"bufio"
+	"fmt"
+	"go/build"
+	"io"
+	"log"
+	"os"
+	"os/exec"
+	"runtime"
+	"strings"
+)
+
+func main() {
+	gochar, err := build.ArchChar(runtime.GOARCH)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	f, err := os.Create("builtin.go")
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer f.Close()
+	w := bufio.NewWriter(f)
+
+	fmt.Fprintln(w, "// AUTO-GENERATED by mkbuiltin.go; DO NOT EDIT")
+	fmt.Fprintln(w, "")
+	fmt.Fprintln(w, "package gc")
+
+	for _, name := range os.Args[1:] {
+		mkbuiltin(w, gochar, name)
+	}
+
+	if err := w.Flush(); err != nil {
+		log.Fatal(err)
+	}
+}
+
+// Compile .go file, import data from .6 file, and write Go string version.
+func mkbuiltin(w io.Writer, gochar string, name string) {
+	if err := exec.Command("go", "tool", gochar+"g", "-A", "builtin/"+name+".go").Run(); err != nil {
+		log.Fatal(err)
+	}
+	obj := fmt.Sprintf("%s.%s", name, gochar)
+	defer os.Remove(obj)
+
+	r, err := os.Open(obj)
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer r.Close()
+	scanner := bufio.NewScanner(r)
+
+	// Look for $$ that introduces imports.
+	for scanner.Scan() {
+		if strings.Contains(scanner.Text(), "$$") {
+			goto Begin
+		}
+	}
+	log.Fatal("did not find beginning of imports")
+
+Begin:
+	initfunc := fmt.Sprintf("init_%s_function", name)
+
+	fmt.Fprintf(w, "\nconst %simport = \"\" +\n", name)
+
+	// sys.go claims to be in package PACKAGE to avoid
+	// conflicts during "6g sys.go".  Rename PACKAGE to $2.
+	replacer := strings.NewReplacer("PACKAGE", name)
+
+	// Process imports, stopping at $$ that closes them.
+	for scanner.Scan() {
+		p := scanner.Text()
+		if strings.Contains(p, "$$") {
+			goto End
+		}
+
+		// Chop leading white space.
+		p = strings.TrimLeft(p, " \t")
+
+		// Cut out decl of init_$1_function - it doesn't exist.
+		if strings.Contains(p, initfunc) {
+			continue
+		}
+
+		fmt.Fprintf(w, "\t%q +\n", replacer.Replace(p)+"\n")
+	}
+	log.Fatal("did not find end of imports")
+
+End:
+	fmt.Fprintf(w, "\t\"$$\\n\"\n")
+}
diff --git a/src/cmd/compile/internal/gc/mparith2.go b/src/cmd/compile/internal/gc/mparith2.go
new file mode 100644
index 0000000..2c7e517
--- /dev/null
+++ b/src/cmd/compile/internal/gc/mparith2.go
@@ -0,0 +1,300 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/compile/internal/big"
+	"cmd/internal/obj"
+	"fmt"
+)
+
+/// implements fix arithmetic
+
+func mpsetovf(a *Mpint) {
+	a.Val.SetUint64(1) // avoid spurious div-zero errors
+	a.Ovf = true
+}
+
+func mptestovf(a *Mpint, extra int) bool {
+	// We don't need to be precise here, any reasonable upper limit would do.
+	// For now, use existing limit so we pass all the tests unchanged.
+	if a.Val.BitLen()+extra > Mpprec {
+		mpsetovf(a)
+	}
+	return a.Ovf
+}
+
+func mpmovefixfix(a, b *Mpint) {
+	a.Val.Set(&b.Val)
+}
+
+func mpmovefltfix(a *Mpint, b *Mpflt) int {
+	if _, acc := b.Val.Int(&a.Val); acc == big.Exact {
+		return 0
+	}
+
+	const delta = 16 // a reasonably small number of bits > 0
+	var t big.Float
+	t.SetPrec(Mpprec - delta)
+
+	// try rounding down a little
+	t.SetMode(big.ToZero)
+	t.Set(&b.Val)
+	if _, acc := t.Int(&a.Val); acc == big.Exact {
+		return 0
+	}
+
+	// try rounding up a little
+	t.SetMode(big.AwayFromZero)
+	t.Set(&b.Val)
+	if _, acc := t.Int(&a.Val); acc == big.Exact {
+		return 0
+	}
+
+	return -1
+}
+
+func mpaddfixfix(a, b *Mpint, quiet int) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpaddfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Add(&a.Val, &b.Val)
+
+	if mptestovf(a, 0) && quiet == 0 {
+		Yyerror("constant addition overflow")
+	}
+}
+
+func mpsubfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpsubfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Sub(&a.Val, &b.Val)
+
+	if mptestovf(a, 0) {
+		Yyerror("constant subtraction overflow")
+	}
+}
+
+func mpmulfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpmulfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Mul(&a.Val, &b.Val)
+
+	if mptestovf(a, 0) {
+		Yyerror("constant multiplication overflow")
+	}
+}
+
+func mpdivfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpdivfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Quo(&a.Val, &b.Val)
+
+	if mptestovf(a, 0) {
+		// can only happen for div-0 which should be checked elsewhere
+		Yyerror("constant division overflow")
+	}
+}
+
+func mpmodfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpmodfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Rem(&a.Val, &b.Val)
+
+	if mptestovf(a, 0) {
+		// should never happen
+		Yyerror("constant modulo overflow")
+	}
+}
+
+func mporfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mporfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Or(&a.Val, &b.Val)
+}
+
+func mpandfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpandfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.And(&a.Val, &b.Val)
+}
+
+func mpandnotfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpandnotfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.AndNot(&a.Val, &b.Val)
+}
+
+func mpxorfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpxorfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Xor(&a.Val, &b.Val)
+}
+
+// shift left by s (or right by -s)
+func Mpshiftfix(a *Mpint, s int) {
+	switch {
+	case s > 0:
+		if mptestovf(a, s) {
+			Yyerror("constant shift overflow")
+			return
+		}
+		a.Val.Lsh(&a.Val, uint(s))
+	case s < 0:
+		a.Val.Rsh(&a.Val, uint(-s))
+	}
+}
+
+func mplshfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mplshfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	s := Mpgetfix(b)
+	if s < 0 || s >= Mpprec {
+		Yyerror("stupid shift: %d", s)
+		Mpmovecfix(a, 0)
+		return
+	}
+
+	Mpshiftfix(a, int(s))
+}
+
+func mprshfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mprshfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	s := Mpgetfix(b)
+	if s < 0 || s >= Mpprec {
+		Yyerror("stupid shift: %d", s)
+		if a.Val.Sign() < 0 {
+			Mpmovecfix(a, -1)
+		} else {
+			Mpmovecfix(a, 0)
+		}
+		return
+	}
+
+	Mpshiftfix(a, int(-s))
+}
+
+func Mpcmpfixfix(a, b *Mpint) int {
+	return a.Val.Cmp(&b.Val)
+}
+
+func mpcmpfixc(b *Mpint, c int64) int {
+	return b.Val.Cmp(big.NewInt(c))
+}
+
+func mpnegfix(a *Mpint) {
+	a.Val.Neg(&a.Val)
+}
+
+func Mpgetfix(a *Mpint) int64 {
+	if a.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("constant overflow")
+		}
+		return 0
+	}
+
+	return a.Val.Int64()
+}
+
+func Mpmovecfix(a *Mpint, c int64) {
+	a.Val.SetInt64(c)
+}
+
+func mpatofix(a *Mpint, as string) {
+	_, ok := a.Val.SetString(as, 0)
+	if !ok {
+		// required syntax is [+-][0[x]]d*
+		// At the moment we lose precise error cause;
+		// the old code distinguished between:
+		// - malformed hex constant
+		// - malformed octal constant
+		// - malformed decimal constant
+		// TODO(gri) use different conversion function
+		Yyerror("malformed integer constant: %s", as)
+		a.Val.SetUint64(0)
+		return
+	}
+	if mptestovf(a, 0) {
+		Yyerror("constant too large: %s", as)
+	}
+}
+
+func (x *Mpint) String() string {
+	return Bconv(x, 0)
+}
+
+func Bconv(xval *Mpint, flag int) string {
+	if flag&obj.FmtSharp != 0 {
+		return fmt.Sprintf("%#x", &xval.Val)
+	}
+	return xval.Val.String()
+}
diff --git a/src/cmd/compile/internal/gc/mparith3.go b/src/cmd/compile/internal/gc/mparith3.go
new file mode 100644
index 0000000..181e91c
--- /dev/null
+++ b/src/cmd/compile/internal/gc/mparith3.go
@@ -0,0 +1,235 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/compile/internal/big"
+	"cmd/internal/obj"
+	"fmt"
+	"math"
+)
+
+/// implements float arihmetic
+
+func newMpflt() *Mpflt {
+	var a Mpflt
+	a.Val.SetPrec(Mpprec)
+	return &a
+}
+
+func Mpmovefixflt(a *Mpflt, b *Mpint) {
+	if b.Ovf {
+		// sign doesn't really matter but copy anyway
+		a.Val.SetInf(b.Val.Sign() < 0)
+		return
+	}
+	a.Val.SetInt(&b.Val)
+}
+
+func mpmovefltflt(a *Mpflt, b *Mpflt) {
+	a.Val.Set(&b.Val)
+}
+
+func mpaddfltflt(a *Mpflt, b *Mpflt) {
+	if Mpdebug {
+		fmt.Printf("\n%v + %v", a, b)
+	}
+
+	a.Val.Add(&a.Val, &b.Val)
+
+	if Mpdebug {
+		fmt.Printf(" = %v\n\n", a)
+	}
+}
+
+func mpaddcflt(a *Mpflt, c float64) {
+	var b Mpflt
+
+	Mpmovecflt(&b, c)
+	mpaddfltflt(a, &b)
+}
+
+func mpsubfltflt(a *Mpflt, b *Mpflt) {
+	if Mpdebug {
+		fmt.Printf("\n%v - %v", a, b)
+	}
+
+	a.Val.Sub(&a.Val, &b.Val)
+
+	if Mpdebug {
+		fmt.Printf(" = %v\n\n", a)
+	}
+}
+
+func mpmulfltflt(a *Mpflt, b *Mpflt) {
+	if Mpdebug {
+		fmt.Printf("%v\n * %v\n", a, b)
+	}
+
+	a.Val.Mul(&a.Val, &b.Val)
+
+	if Mpdebug {
+		fmt.Printf(" = %v\n\n", a)
+	}
+}
+
+func mpmulcflt(a *Mpflt, c float64) {
+	var b Mpflt
+
+	Mpmovecflt(&b, c)
+	mpmulfltflt(a, &b)
+}
+
+func mpdivfltflt(a *Mpflt, b *Mpflt) {
+	if Mpdebug {
+		fmt.Printf("%v\n / %v\n", a, b)
+	}
+
+	a.Val.Quo(&a.Val, &b.Val)
+
+	if Mpdebug {
+		fmt.Printf(" = %v\n\n", a)
+	}
+}
+
+func mpcmpfltflt(a *Mpflt, b *Mpflt) int {
+	return a.Val.Cmp(&b.Val)
+}
+
+func mpcmpfltc(b *Mpflt, c float64) int {
+	var a Mpflt
+
+	Mpmovecflt(&a, c)
+	return mpcmpfltflt(b, &a)
+}
+
+func mpgetfltN(a *Mpflt, prec int, bias int) float64 {
+	var x float64
+	switch prec {
+	case 53:
+		x, _ = a.Val.Float64()
+	case 24:
+		// We should be using a.Val.Float32() here but that seems incorrect
+		// for certain denormal values (all.bash fails). The current code
+		// appears to work for all existing test cases, though there ought
+		// to be issues with denormal numbers that are incorrectly rounded.
+		// TODO(gri) replace with a.Val.Float32() once correctly working
+		// See also: https://github.com/golang/go/issues/10321
+		var t Mpflt
+		t.Val.SetPrec(24).Set(&a.Val)
+		x, _ = t.Val.Float64()
+	default:
+		panic("unreachable")
+	}
+
+	// check for overflow
+	if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
+		Yyerror("mpgetflt ovf")
+	}
+
+	return x
+}
+
+func mpgetflt(a *Mpflt) float64 {
+	return mpgetfltN(a, 53, -1023)
+}
+
+func mpgetflt32(a *Mpflt) float64 {
+	return mpgetfltN(a, 24, -127)
+}
+
+func Mpmovecflt(a *Mpflt, c float64) {
+	if Mpdebug {
+		fmt.Printf("\nconst %g", c)
+	}
+
+	a.Val.SetFloat64(c)
+
+	if Mpdebug {
+		fmt.Printf(" = %v\n", a)
+	}
+}
+
+func mpnegflt(a *Mpflt) {
+	a.Val.Neg(&a.Val)
+}
+
+//
+// floating point input
+// required syntax is [+-]d*[.]d*[e[+-]d*] or [+-]0xH*[e[+-]d*]
+//
+func mpatoflt(a *Mpflt, as string) {
+	for len(as) > 0 && (as[0] == ' ' || as[0] == '\t') {
+		as = as[1:]
+	}
+
+	f, ok := a.Val.SetString(as)
+	if !ok {
+		// At the moment we lose precise error cause;
+		// the old code additionally distinguished between:
+		// - malformed hex constant
+		// - decimal point in hex constant
+		// - constant exponent out of range
+		// - decimal point and binary point in constant
+		// TODO(gri) use different conversion function or check separately
+		Yyerror("malformed constant: %s", as)
+		a.Val.SetUint64(0)
+	}
+
+	if f.IsInf() {
+		Yyerror("constant too large: %s", as)
+		a.Val.SetUint64(0)
+	}
+}
+
+func (f *Mpflt) String() string {
+	return Fconv(f, 0)
+}
+
+func Fconv(fvp *Mpflt, flag int) string {
+	if flag&obj.FmtSharp == 0 {
+		return fvp.Val.Format('b', 0)
+	}
+
+	// use decimal format for error messages
+
+	// determine sign
+	f := &fvp.Val
+	var sign string
+	if fvp.Val.Signbit() {
+		sign = "-"
+		f = new(big.Float).Abs(f)
+	} else if flag&obj.FmtSign != 0 {
+		sign = "+"
+	}
+
+	// Use fmt formatting if in float64 range (common case).
+	if x, _ := f.Float64(); !math.IsInf(x, 0) {
+		return fmt.Sprintf("%s%.6g", sign, x)
+	}
+
+	// Out of float64 range. Do approximate manual to decimal
+	// conversion to avoid precise but possibly slow Float
+	// formatting. The exponent is > 0 since a negative out-
+	// of-range exponent would have underflowed and led to 0.
+	// f = mant * 2**exp
+	var mant big.Float
+	exp := float64(f.MantExp(&mant)) // 0.5 <= mant < 1.0, exp > 0
+
+	// approximate float64 mantissa m and decimal exponent d
+	// f ~ m * 10**d
+	m, _ := mant.Float64()            // 0.5 <= m < 1.0
+	d := exp * (math.Ln2 / math.Ln10) // log_10(2)
+
+	// adjust m for truncated (integer) decimal exponent e
+	e := int64(d)
+	m *= math.Pow(10, d-float64(e))
+	for m >= 10 {
+		m /= 10
+		e++
+	}
+
+	return fmt.Sprintf("%s%.5fe+%d", sign, m, e)
+}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
new file mode 100644
index 0000000..2afd786
--- /dev/null
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -0,0 +1,434 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+	"strconv"
+)
+
+/*
+ * architecture-independent object file output
+ */
+const (
+	ArhdrSize = 60
+)
+
+func formathdr(arhdr []byte, name string, size int64) {
+	copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
+}
+
+func dumpobj() {
+	var err error
+	bout, err = obj.Bopenw(outfile)
+	if err != nil {
+		Flusherrors()
+		fmt.Printf("can't create %s: %v\n", outfile, err)
+		errorexit()
+	}
+
+	startobj := int64(0)
+	var arhdr [ArhdrSize]byte
+	if writearchive != 0 {
+		obj.Bwritestring(bout, "!<arch>\n")
+		arhdr = [ArhdrSize]byte{}
+		bout.Write(arhdr[:])
+		startobj = obj.Boffset(bout)
+	}
+
+	fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+	dumpexport()
+
+	if writearchive != 0 {
+		bout.Flush()
+		size := obj.Boffset(bout) - startobj
+		if size&1 != 0 {
+			obj.Bputc(bout, 0)
+		}
+		obj.Bseek(bout, startobj-ArhdrSize, 0)
+		formathdr(arhdr[:], "__.PKGDEF", size)
+		bout.Write(arhdr[:])
+		bout.Flush()
+
+		obj.Bseek(bout, startobj+size+(size&1), 0)
+		arhdr = [ArhdrSize]byte{}
+		bout.Write(arhdr[:])
+		startobj = obj.Boffset(bout)
+		fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+	}
+
+	if pragcgobuf != "" {
+		if writearchive != 0 {
+			// write empty export section; must be before cgo section
+			fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
+		}
+
+		fmt.Fprintf(bout, "\n$$  // cgo\n")
+		fmt.Fprintf(bout, "%s\n$$\n\n", pragcgobuf)
+	}
+
+	fmt.Fprintf(bout, "\n!\n")
+
+	var externs *NodeList
+	if externdcl != nil {
+		externs = externdcl.End
+	}
+
+	dumpglobls()
+	dumptypestructs()
+
+	// Dump extra globals.
+	tmp := externdcl
+
+	if externs != nil {
+		externdcl = externs.Next
+	}
+	dumpglobls()
+	externdcl = tmp
+
+	zero := Pkglookup("zerovalue", Runtimepkg)
+	ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA)
+
+	dumpdata()
+	obj.Writeobjdirect(Ctxt, bout)
+
+	if writearchive != 0 {
+		bout.Flush()
+		size := obj.Boffset(bout) - startobj
+		if size&1 != 0 {
+			obj.Bputc(bout, 0)
+		}
+		obj.Bseek(bout, startobj-ArhdrSize, 0)
+		name := fmt.Sprintf("_go_.%c", Thearch.Thechar)
+		formathdr(arhdr[:], name, size)
+		bout.Write(arhdr[:])
+	}
+
+	obj.Bterm(bout)
+}
+
+func dumpglobls() {
+	var n *Node
+
+	// add globals
+	for l := externdcl; l != nil; l = l.Next {
+		n = l.N
+		if n.Op != ONAME {
+			continue
+		}
+
+		if n.Type == nil {
+			Fatal("external %v nil type\n", n)
+		}
+		if n.Class == PFUNC {
+			continue
+		}
+		if n.Sym.Pkg != localpkg {
+			continue
+		}
+		dowidth(n.Type)
+
+		ggloblnod(n)
+	}
+
+	for l := funcsyms; l != nil; l = l.Next {
+		n = l.N
+		dsymptr(n.Sym, 0, n.Sym.Def.Func.Shortname.Sym, 0)
+		ggloblsym(n.Sym, int32(Widthptr), obj.DUPOK|obj.RODATA)
+	}
+
+	// Do not reprocess funcsyms on next dumpglobls call.
+	funcsyms = nil
+}
+
+func Bputname(b *obj.Biobuf, s *obj.LSym) {
+	obj.Bwritestring(b, s.Name)
+	obj.Bputc(b, 0)
+}
+
+func Linksym(s *Sym) *obj.LSym {
+	if s == nil {
+		return nil
+	}
+	if s.Lsym != nil {
+		return s.Lsym
+	}
+	var name string
+	if isblanksym(s) {
+		name = "_"
+	} else if s.Linkname != "" {
+		name = s.Linkname
+	} else {
+		name = s.Pkg.Prefix + "." + s.Name
+	}
+
+	ls := obj.Linklookup(Ctxt, name, 0)
+	s.Lsym = ls
+	return ls
+}
+
+func duintxx(s *Sym, off int, v uint64, wid int) int {
+	// Update symbol data directly instead of generating a
+	// DATA instruction that liblink will have to interpret later.
+	// This reduces compilation time and memory usage.
+	off = int(Rnd(int64(off), int64(wid)))
+
+	return int(obj.Setuintxx(Ctxt, Linksym(s), int64(off), v, int64(wid)))
+}
+
+func duint8(s *Sym, off int, v uint8) int {
+	return duintxx(s, off, uint64(v), 1)
+}
+
+func duint16(s *Sym, off int, v uint16) int {
+	return duintxx(s, off, uint64(v), 2)
+}
+
+func duint32(s *Sym, off int, v uint32) int {
+	return duintxx(s, off, uint64(v), 4)
+}
+
+func duint64(s *Sym, off int, v uint64) int {
+	return duintxx(s, off, v, 8)
+}
+
+func duintptr(s *Sym, off int, v uint64) int {
+	return duintxx(s, off, v, Widthptr)
+}
+
+var stringsym_gen int
+
+func stringsym(s string) *Sym {
+	var symname string
+	var pkg *Pkg
+	if len(s) > 100 {
+		// huge strings are made static to avoid long names
+		stringsym_gen++
+		symname = fmt.Sprintf(".gostring.%d", stringsym_gen)
+
+		pkg = localpkg
+	} else {
+		// small strings get named by their contents,
+		// so that multiple modules using the same string
+		// can share it.
+		symname = strconv.Quote(s)
+		pkg = gostringpkg
+	}
+
+	sym := Pkglookup(symname, pkg)
+
+	// SymUniq flag indicates that data is generated already
+	if sym.Flags&SymUniq != 0 {
+		return sym
+	}
+	sym.Flags |= SymUniq
+	sym.Def = newname(sym)
+
+	off := 0
+
+	// string header
+	off = dsymptr(sym, off, sym, Widthptr+Widthint)
+	off = duintxx(sym, off, uint64(len(s)), Widthint)
+
+	// string data
+	var m int
+	for n := 0; n < len(s); n += m {
+		m = 8
+		if m > len(s)-n {
+			m = len(s) - n
+		}
+		off = dsname(sym, off, s[n:n+m])
+	}
+
+	off = duint8(sym, off, 0)                    // terminating NUL for runtime
+	off = (off + Widthptr - 1) &^ (Widthptr - 1) // round to pointer alignment
+	ggloblsym(sym, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
+
+	return sym
+}
+
+var slicebytes_gen int
+
+func slicebytes(nam *Node, s string, len int) {
+	var m int
+
+	slicebytes_gen++
+	symname := fmt.Sprintf(".gobytes.%d", slicebytes_gen)
+	sym := Pkglookup(symname, localpkg)
+	sym.Def = newname(sym)
+
+	off := 0
+	for n := 0; n < len; n += m {
+		m = 8
+		if m > len-n {
+			m = len - n
+		}
+		off = dsname(sym, off, s[n:n+m])
+	}
+
+	ggloblsym(sym, int32(off), obj.NOPTR|obj.LOCAL)
+
+	if nam.Op != ONAME {
+		Fatal("slicebytes %v", nam)
+	}
+	off = int(nam.Xoffset)
+	off = dsymptr(nam.Sym, off, sym, 0)
+	off = duintxx(nam.Sym, off, uint64(len), Widthint)
+	duintxx(nam.Sym, off, uint64(len), Widthint)
+}
+
+func dstringptr(s *Sym, off int, str string) int {
+	off = int(Rnd(int64(off), int64(Widthptr)))
+	p := Thearch.Gins(obj.ADATA, nil, nil)
+	p.From.Type = obj.TYPE_MEM
+	p.From.Name = obj.NAME_EXTERN
+	p.From.Sym = Linksym(s)
+	p.From.Offset = int64(off)
+	p.From3.Type = obj.TYPE_CONST
+	p.From3.Offset = int64(Widthptr)
+
+	Datastring(str+"\x00", &p.To) // TODO(rsc): Remove NUL
+	p.To.Type = obj.TYPE_ADDR
+	p.To.Etype = Simtype[TINT]
+	off += Widthptr
+
+	return off
+}
+
+func Datastring(s string, a *obj.Addr) {
+	sym := stringsym(s)
+	a.Type = obj.TYPE_MEM
+	a.Name = obj.NAME_EXTERN
+	a.Sym = Linksym(sym)
+	a.Node = sym.Def
+	a.Offset = int64(Widthptr) + int64(Widthint) // skip header
+	a.Etype = Simtype[TINT]
+}
+
+func datagostring(sval string, a *obj.Addr) {
+	sym := stringsym(sval)
+	a.Type = obj.TYPE_MEM
+	a.Name = obj.NAME_EXTERN
+	a.Sym = Linksym(sym)
+	a.Node = sym.Def
+	a.Offset = 0 // header
+	a.Etype = TSTRING
+}
+
+func dgostringptr(s *Sym, off int, str string) int {
+	if str == "" {
+		return duintptr(s, off, 0)
+	}
+	return dgostrlitptr(s, off, &str)
+}
+
+func dgostrlitptr(s *Sym, off int, lit *string) int {
+	if lit == nil {
+		return duintptr(s, off, 0)
+	}
+	off = int(Rnd(int64(off), int64(Widthptr)))
+	p := Thearch.Gins(obj.ADATA, nil, nil)
+	p.From.Type = obj.TYPE_MEM
+	p.From.Name = obj.NAME_EXTERN
+	p.From.Sym = Linksym(s)
+	p.From.Offset = int64(off)
+	p.From3.Type = obj.TYPE_CONST
+	p.From3.Offset = int64(Widthptr)
+	datagostring(*lit, &p.To)
+	p.To.Type = obj.TYPE_ADDR
+	p.To.Etype = Simtype[TINT]
+	off += Widthptr
+
+	return off
+}
+
+func dsname(s *Sym, off int, t string) int {
+	p := Thearch.Gins(obj.ADATA, nil, nil)
+	p.From.Type = obj.TYPE_MEM
+	p.From.Name = obj.NAME_EXTERN
+	p.From.Offset = int64(off)
+	p.From.Sym = Linksym(s)
+	p.From3.Type = obj.TYPE_CONST
+	p.From3.Offset = int64(len(t))
+
+	p.To.Type = obj.TYPE_SCONST
+	p.To.Val = t
+	return off + len(t)
+}
+
+func dsymptr(s *Sym, off int, x *Sym, xoff int) int {
+	off = int(Rnd(int64(off), int64(Widthptr)))
+
+	p := Thearch.Gins(obj.ADATA, nil, nil)
+	p.From.Type = obj.TYPE_MEM
+	p.From.Name = obj.NAME_EXTERN
+	p.From.Sym = Linksym(s)
+	p.From.Offset = int64(off)
+	p.From3.Type = obj.TYPE_CONST
+	p.From3.Offset = int64(Widthptr)
+	p.To.Type = obj.TYPE_ADDR
+	p.To.Name = obj.NAME_EXTERN
+	p.To.Sym = Linksym(x)
+	p.To.Offset = int64(xoff)
+	off += Widthptr
+
+	return off
+}
+
+func gdata(nam *Node, nr *Node, wid int) {
+	if nr.Op == OLITERAL {
+		switch nr.Val.Ctype {
+		case CTCPLX:
+			gdatacomplex(nam, nr.Val.U.(*Mpcplx))
+			return
+
+		case CTSTR:
+			gdatastring(nam, nr.Val.U.(string))
+			return
+		}
+	}
+
+	p := Thearch.Gins(obj.ADATA, nam, nr)
+	p.From3.Type = obj.TYPE_CONST
+	p.From3.Offset = int64(wid)
+}
+
+func gdatacomplex(nam *Node, cval *Mpcplx) {
+	w := cplxsubtype(int(nam.Type.Etype))
+	w = int(Types[w].Width)
+
+	p := Thearch.Gins(obj.ADATA, nam, nil)
+	p.From3.Type = obj.TYPE_CONST
+	p.From3.Offset = int64(w)
+	p.To.Type = obj.TYPE_FCONST
+	p.To.Val = mpgetflt(&cval.Real)
+
+	p = Thearch.Gins(obj.ADATA, nam, nil)
+	p.From3.Type = obj.TYPE_CONST
+	p.From3.Offset = int64(w)
+	p.From.Offset += int64(w)
+	p.To.Type = obj.TYPE_FCONST
+	p.To.Val = mpgetflt(&cval.Imag)
+}
+
+func gdatastring(nam *Node, sval string) {
+	var nod1 Node
+
+	p := Thearch.Gins(obj.ADATA, nam, nil)
+	Datastring(sval, &p.To)
+	p.From3.Type = obj.TYPE_CONST
+	p.From3.Offset = Types[Tptr].Width
+	p.To.Type = obj.TYPE_ADDR
+
+	//print("%P\n", p);
+
+	Nodconst(&nod1, Types[TINT], int64(len(sval)))
+
+	p = Thearch.Gins(obj.ADATA, nam, &nod1)
+	p.From3.Type = obj.TYPE_CONST
+	p.From3.Offset = int64(Widthint)
+	p.From.Offset += int64(Widthptr)
+}
diff --git a/src/cmd/compile/internal/gc/opnames.go b/src/cmd/compile/internal/gc/opnames.go
new file mode 100644
index 0000000..fc03ec6
--- /dev/null
+++ b/src/cmd/compile/internal/gc/opnames.go
@@ -0,0 +1,162 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+// auto generated by go tool dist
+var opnames = []string{
+	OXXX:             "XXX",
+	ONAME:            "NAME",
+	ONONAME:          "NONAME",
+	OTYPE:            "TYPE",
+	OPACK:            "PACK",
+	OLITERAL:         "LITERAL",
+	OADD:             "ADD",
+	OSUB:             "SUB",
+	OOR:              "OR",
+	OXOR:             "XOR",
+	OADDSTR:          "ADDSTR",
+	OADDR:            "ADDR",
+	OANDAND:          "ANDAND",
+	OAPPEND:          "APPEND",
+	OARRAYBYTESTR:    "ARRAYBYTESTR",
+	OARRAYBYTESTRTMP: "ARRAYBYTESTRTMP",
+	OARRAYRUNESTR:    "ARRAYRUNESTR",
+	OSTRARRAYBYTE:    "STRARRAYBYTE",
+	OSTRARRAYBYTETMP: "STRARRAYBYTETMP",
+	OSTRARRAYRUNE:    "STRARRAYRUNE",
+	OAS:              "AS",
+	OAS2:             "AS2",
+	OAS2FUNC:         "AS2FUNC",
+	OAS2RECV:         "AS2RECV",
+	OAS2MAPR:         "AS2MAPR",
+	OAS2DOTTYPE:      "AS2DOTTYPE",
+	OASOP:            "ASOP",
+	OCALL:            "CALL",
+	OCALLFUNC:        "CALLFUNC",
+	OCALLMETH:        "CALLMETH",
+	OCALLINTER:       "CALLINTER",
+	OCALLPART:        "CALLPART",
+	OCAP:             "CAP",
+	OCLOSE:           "CLOSE",
+	OCLOSURE:         "CLOSURE",
+	OCMPIFACE:        "CMPIFACE",
+	OCMPSTR:          "CMPSTR",
+	OCOMPLIT:         "COMPLIT",
+	OMAPLIT:          "MAPLIT",
+	OSTRUCTLIT:       "STRUCTLIT",
+	OARRAYLIT:        "ARRAYLIT",
+	OPTRLIT:          "PTRLIT",
+	OCONV:            "CONV",
+	OCONVIFACE:       "CONVIFACE",
+	OCONVNOP:         "CONVNOP",
+	OCOPY:            "COPY",
+	ODCL:             "DCL",
+	ODCLFUNC:         "DCLFUNC",
+	ODCLFIELD:        "DCLFIELD",
+	ODCLCONST:        "DCLCONST",
+	ODCLTYPE:         "DCLTYPE",
+	ODELETE:          "DELETE",
+	ODOT:             "DOT",
+	ODOTPTR:          "DOTPTR",
+	ODOTMETH:         "DOTMETH",
+	ODOTINTER:        "DOTINTER",
+	OXDOT:            "XDOT",
+	ODOTTYPE:         "DOTTYPE",
+	ODOTTYPE2:        "DOTTYPE2",
+	OEQ:              "EQ",
+	ONE:              "NE",
+	OLT:              "LT",
+	OLE:              "LE",
+	OGE:              "GE",
+	OGT:              "GT",
+	OIND:             "IND",
+	OINDEX:           "INDEX",
+	OINDEXMAP:        "INDEXMAP",
+	OKEY:             "KEY",
+	OPARAM:           "PARAM",
+	OLEN:             "LEN",
+	OMAKE:            "MAKE",
+	OMAKECHAN:        "MAKECHAN",
+	OMAKEMAP:         "MAKEMAP",
+	OMAKESLICE:       "MAKESLICE",
+	OMUL:             "MUL",
+	ODIV:             "DIV",
+	OMOD:             "MOD",
+	OLSH:             "LSH",
+	ORSH:             "RSH",
+	OAND:             "AND",
+	OANDNOT:          "ANDNOT",
+	ONEW:             "NEW",
+	ONOT:             "NOT",
+	OCOM:             "COM",
+	OPLUS:            "PLUS",
+	OMINUS:           "MINUS",
+	OOROR:            "OROR",
+	OPANIC:           "PANIC",
+	OPRINT:           "PRINT",
+	OPRINTN:          "PRINTN",
+	OPAREN:           "PAREN",
+	OSEND:            "SEND",
+	OSLICE:           "SLICE",
+	OSLICEARR:        "SLICEARR",
+	OSLICESTR:        "SLICESTR",
+	OSLICE3:          "SLICE3",
+	OSLICE3ARR:       "SLICE3ARR",
+	ORECOVER:         "RECOVER",
+	ORECV:            "RECV",
+	ORUNESTR:         "RUNESTR",
+	OSELRECV:         "SELRECV",
+	OSELRECV2:        "SELRECV2",
+	OIOTA:            "IOTA",
+	OREAL:            "REAL",
+	OIMAG:            "IMAG",
+	OCOMPLEX:         "COMPLEX",
+	OBLOCK:           "BLOCK",
+	OBREAK:           "BREAK",
+	OCASE:            "CASE",
+	OXCASE:           "XCASE",
+	OCONTINUE:        "CONTINUE",
+	ODEFER:           "DEFER",
+	OEMPTY:           "EMPTY",
+	OFALL:            "FALL",
+	OXFALL:           "XFALL",
+	OFOR:             "FOR",
+	OGOTO:            "GOTO",
+	OIF:              "IF",
+	OLABEL:           "LABEL",
+	OPROC:            "PROC",
+	ORANGE:           "RANGE",
+	ORETURN:          "RETURN",
+	OSELECT:          "SELECT",
+	OSWITCH:          "SWITCH",
+	OTYPESW:          "TYPESW",
+	OTCHAN:           "TCHAN",
+	OTMAP:            "TMAP",
+	OTSTRUCT:         "TSTRUCT",
+	OTINTER:          "TINTER",
+	OTFUNC:           "TFUNC",
+	OTARRAY:          "TARRAY",
+	ODDD:             "DDD",
+	ODDDARG:          "DDDARG",
+	OINLCALL:         "INLCALL",
+	OEFACE:           "EFACE",
+	OITAB:            "ITAB",
+	OSPTR:            "SPTR",
+	OCLOSUREVAR:      "CLOSUREVAR",
+	OCFUNC:           "CFUNC",
+	OCHECKNIL:        "CHECKNIL",
+	OVARKILL:         "VARKILL",
+	OREGISTER:        "REGISTER",
+	OINDREG:          "INDREG",
+	OCMP:             "CMP",
+	ODEC:             "DEC",
+	OINC:             "INC",
+	OEXTEND:          "EXTEND",
+	OHMUL:            "HMUL",
+	OLROT:            "LROT",
+	ORROTC:           "RROTC",
+	ORETJMP:          "RETJMP",
+	OEND:             "END",
+}
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
new file mode 100644
index 0000000..b3fd282
--- /dev/null
+++ b/src/cmd/compile/internal/gc/order.go
@@ -0,0 +1,1170 @@
+// Copyright 2012 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Rewrite tree to use separate statements to enforce
+// order of evaluation.  Makes walk easier, because it
+// can (after this runs) reorder at will within an expression.
+//
+// Rewrite x op= y into x = x op y.
+//
+// Introduce temporaries as needed by runtime routines.
+// For example, the map runtime routines take the map key
+// by reference, so make sure all map keys are addressable
+// by copying them to temporaries as needed.
+// The same is true for channel operations.
+//
+// Arrange that map index expressions only appear in direct
+// assignments x = m[k] or m[k] = x, never in larger expressions.
+//
+// Arrange that receive expressions only appear in direct assignments
+// x = <-c or as standalone statements <-c, never in larger expressions.
+
+// TODO(rsc): The temporary introduction during multiple assignments
+// should be moved into this file, so that the temporaries can be cleaned
+// and so that conversions implicit in the OAS2FUNC and OAS2RECV
+// nodes can be made explicit and then have their temporaries cleaned.
+
+// TODO(rsc): Goto and multilevel break/continue can jump over
+// inserted VARKILL annotations. Work out a way to handle these.
+// The current implementation is safe, in that it will execute correctly.
+// But it won't reuse temporaries as aggressively as it might, and
+// it can result in unnecessary zeroing of those variables in the function
+// prologue.
+
+// Order holds state during the ordering process.
+type Order struct {
+	out  *NodeList // list of generated statements
+	temp *NodeList // head of stack of temporary variables
+	free *NodeList // free list of NodeList* structs (for use in temp)
+}
+
+// Order rewrites fn->nbody to apply the ordering constraints
+// described in the comment at the top of the file.
+func order(fn *Node) {
+	if Debug['W'] > 1 {
+		s := fmt.Sprintf("\nbefore order %v", fn.Nname.Sym)
+		dumplist(s, fn.Nbody)
+	}
+
+	orderblock(&fn.Nbody)
+}
+
+// Ordertemp allocates a new temporary with the given type,
+// pushes it onto the temp stack, and returns it.
+// If clear is true, ordertemp emits code to zero the temporary.
+func ordertemp(t *Type, order *Order, clear bool) *Node {
+	var_ := temp(t)
+	if clear {
+		a := Nod(OAS, var_, nil)
+		typecheck(&a, Etop)
+		order.out = list(order.out, a)
+	}
+
+	l := order.free
+	if l == nil {
+		l = new(NodeList)
+	}
+	order.free = l.Next
+	l.Next = order.temp
+	l.N = var_
+	order.temp = l
+	return var_
+}
+
+// Ordercopyexpr behaves like ordertemp but also emits
+// code to initialize the temporary to the value n.
+//
+// The clear argument is provided for use when the evaluation
+// of tmp = n turns into a function call that is passed a pointer
+// to the temporary as the output space. If the call blocks before
+// tmp has been written, the garbage collector will still treat the
+// temporary as live, so we must zero it before entering that call.
+// Today, this only happens for channel receive operations.
+// (The other candidate would be map access, but map access
+// returns a pointer to the result data instead of taking a pointer
+// to be filled in.)
+func ordercopyexpr(n *Node, t *Type, order *Order, clear int) *Node {
+	var_ := ordertemp(t, order, clear != 0)
+	a := Nod(OAS, var_, n)
+	typecheck(&a, Etop)
+	order.out = list(order.out, a)
+	return var_
+}
+
+// Ordercheapexpr returns a cheap version of n.
+// The definition of cheap is that n is a variable or constant.
+// If not, ordercheapexpr allocates a new tmp, emits tmp = n,
+// and then returns tmp.
+func ordercheapexpr(n *Node, order *Order) *Node {
+	if n == nil {
+		return nil
+	}
+	switch n.Op {
+	case ONAME, OLITERAL:
+		return n
+	case OLEN, OCAP:
+		l := ordercheapexpr(n.Left, order)
+		if l == n.Left {
+			return n
+		}
+		a := Nod(OXXX, nil, nil)
+		*a = *n
+		a.Orig = a
+		a.Left = l
+		typecheck(&a, Erv)
+		return a
+	}
+
+	return ordercopyexpr(n, n.Type, order, 0)
+}
+
+// Ordersafeexpr returns a safe version of n.
+// The definition of safe is that n can appear multiple times
+// without violating the semantics of the original program,
+// and that assigning to the safe version has the same effect
+// as assigning to the original n.
+//
+// The intended use is to apply to x when rewriting x += y into x = x + y.
+func ordersafeexpr(n *Node, order *Order) *Node {
+	switch n.Op {
+	case ONAME, OLITERAL:
+		return n
+
+	case ODOT, OLEN, OCAP:
+		l := ordersafeexpr(n.Left, order)
+		if l == n.Left {
+			return n
+		}
+		a := Nod(OXXX, nil, nil)
+		*a = *n
+		a.Orig = a
+		a.Left = l
+		typecheck(&a, Erv)
+		return a
+
+	case ODOTPTR, OIND:
+		l := ordercheapexpr(n.Left, order)
+		if l == n.Left {
+			return n
+		}
+		a := Nod(OXXX, nil, nil)
+		*a = *n
+		a.Orig = a
+		a.Left = l
+		typecheck(&a, Erv)
+		return a
+
+	case OINDEX, OINDEXMAP:
+		var l *Node
+		if Isfixedarray(n.Left.Type) {
+			l = ordersafeexpr(n.Left, order)
+		} else {
+			l = ordercheapexpr(n.Left, order)
+		}
+		r := ordercheapexpr(n.Right, order)
+		if l == n.Left && r == n.Right {
+			return n
+		}
+		a := Nod(OXXX, nil, nil)
+		*a = *n
+		a.Orig = a
+		a.Left = l
+		a.Right = r
+		typecheck(&a, Erv)
+		return a
+	}
+
+	Fatal("ordersafeexpr %v", Oconv(int(n.Op), 0))
+	return nil // not reached
+}
+
+// Istemp reports whether n is a temporary variable.
+func istemp(n *Node) bool {
+	if n.Op != ONAME {
+		return false
+	}
+	return strings.HasPrefix(n.Sym.Name, "autotmp_")
+}
+
+// Isaddrokay reports whether it is okay to pass n's address to runtime routines.
+// Taking the address of a variable makes the liveness and optimization analyses
+// lose track of where the variable's lifetime ends. To avoid hurting the analyses
+// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
+// because we emit explicit VARKILL instructions marking the end of those
+// temporaries' lifetimes.
+func isaddrokay(n *Node) bool {
+	return islvalue(n) && (n.Op != ONAME || n.Class == PEXTERN || istemp(n))
+}
+
+// Orderaddrtemp ensures that *np is okay to pass by address to runtime routines.
+// If the original argument *np is not okay, orderaddrtemp creates a tmp, emits
+// tmp = *np, and then sets *np to the tmp variable.
+func orderaddrtemp(np **Node, order *Order) {
+	n := *np
+	if isaddrokay(n) {
+		return
+	}
+	*np = ordercopyexpr(n, n.Type, order, 0)
+}
+
+// Marktemp returns the top of the temporary variable stack.
+func marktemp(order *Order) *NodeList {
+	return order.temp
+}
+
+// Poptemp pops temporaries off the stack until reaching the mark,
+// which must have been returned by marktemp.
+func poptemp(mark *NodeList, order *Order) {
+	var l *NodeList
+
+	for {
+		l = order.temp
+		if l == mark {
+			break
+		}
+		order.temp = l.Next
+		l.Next = order.free
+		order.free = l
+	}
+}
+
+// Cleantempnopop emits to *out VARKILL instructions for each temporary
+// above the mark on the temporary stack, but it does not pop them
+// from the stack.
+func cleantempnopop(mark *NodeList, order *Order, out **NodeList) {
+	var kill *Node
+
+	for l := order.temp; l != mark; l = l.Next {
+		kill = Nod(OVARKILL, l.N, nil)
+		typecheck(&kill, Etop)
+		*out = list(*out, kill)
+	}
+}
+
+// Cleantemp emits VARKILL instructions for each temporary above the
+// mark on the temporary stack and removes them from the stack.
+func cleantemp(top *NodeList, order *Order) {
+	cleantempnopop(top, order, &order.out)
+	poptemp(top, order)
+}
+
+// Orderstmtlist orders each of the statements in the list.
+func orderstmtlist(l *NodeList, order *Order) {
+	for ; l != nil; l = l.Next {
+		orderstmt(l.N, order)
+	}
+}
+
+// Orderblock orders the block of statements *l onto a new list,
+// and then replaces *l with that list.
+func orderblock(l **NodeList) {
+	var order Order
+	mark := marktemp(&order)
+	orderstmtlist(*l, &order)
+	cleantemp(mark, &order)
+	*l = order.out
+}
+
+// Orderexprinplace orders the side effects in *np and
+// leaves them as the init list of the final *np.
+func orderexprinplace(np **Node, outer *Order) {
+	n := *np
+	var order Order
+	orderexpr(&n, &order, nil)
+	addinit(&n, order.out)
+
+	// insert new temporaries from order
+	// at head of outer list.
+	lp := &order.temp
+
+	for *lp != nil {
+		lp = &(*lp).Next
+	}
+	*lp = outer.temp
+	outer.temp = order.temp
+
+	*np = n
+}
+
+// Orderstmtinplace orders the side effects of the single statement *np
+// and replaces it with the resulting statement list.
+func orderstmtinplace(np **Node) {
+	n := *np
+	var order Order
+	mark := marktemp(&order)
+	orderstmt(n, &order)
+	cleantemp(mark, &order)
+	*np = liststmt(order.out)
+}
+
+// Orderinit moves n's init list to order->out.
+func orderinit(n *Node, order *Order) {
+	orderstmtlist(n.Ninit, order)
+	n.Ninit = nil
+}
+
+// Ismulticall reports whether the list l is f() for a multi-value function.
+// Such an f() could appear as the lone argument to a multi-arg function.
+func ismulticall(l *NodeList) bool {
+	// one arg only
+	if l == nil || l.Next != nil {
+		return false
+	}
+	n := l.N
+
+	// must be call
+	switch n.Op {
+	default:
+		return false
+
+	case OCALLFUNC, OCALLMETH, OCALLINTER:
+		break
+	}
+
+	// call must return multiple values
+	return n.Left.Type.Outtuple > 1
+}
+
+// Copyret emits t1, t2, ... = n, where n is a function call,
+// and then returns the list t1, t2, ....
+func copyret(n *Node, order *Order) *NodeList {
+	if n.Type.Etype != TSTRUCT || n.Type.Funarg == 0 {
+		Fatal("copyret %v %d", n.Type, n.Left.Type.Outtuple)
+	}
+
+	var l1 *NodeList
+	var l2 *NodeList
+	var tl Iter
+	var tmp *Node
+	for t := Structfirst(&tl, &n.Type); t != nil; t = structnext(&tl) {
+		tmp = temp(t.Type)
+		l1 = list(l1, tmp)
+		l2 = list(l2, tmp)
+	}
+
+	as := Nod(OAS2, nil, nil)
+	as.List = l1
+	as.Rlist = list1(n)
+	typecheck(&as, Etop)
+	orderstmt(as, order)
+
+	return l2
+}
+
+// Ordercallargs orders the list of call arguments *l.
+func ordercallargs(l **NodeList, order *Order) {
+	if ismulticall(*l) {
+		// return f() where f() is multiple values.
+		*l = copyret((*l).N, order)
+	} else {
+		orderexprlist(*l, order)
+	}
+}
+
+// Ordercall orders the call expression n.
+// n->op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
+func ordercall(n *Node, order *Order) {
+	orderexpr(&n.Left, order, nil)
+	orderexpr(&n.Right, order, nil) // ODDDARG temp
+	ordercallargs(&n.List, order)
+}
+
+// Ordermapassign appends n to order->out, introducing temporaries
+// to make sure that all map assignments have the form m[k] = x,
+// where x is adressable.
+// (Orderexpr has already been called on n, so we know k is addressable.)
+//
+// If n is m[k] = x where x is not addressable, the rewrite is:
+//	tmp = x
+//	m[k] = tmp
+//
+// If n is the multiple assignment form ..., m[k], ... = ..., the rewrite is
+//	t1 = m
+//	t2 = k
+//	...., t3, ... = x
+//	t1[t2] = t3
+//
+// The temporaries t1, t2 are needed in case the ... being assigned
+// contain m or k. They are usually unnecessary, but in the unnecessary
+// cases they are also typically registerizable, so not much harm done.
+// And this only applies to the multiple-assignment form.
+// We could do a more precise analysis if needed, like in walk.c.
+//
+// Ordermapassign also inserts these temporaries if needed for
+// calling writebarrierfat with a pointer to n->right.
+func ordermapassign(n *Node, order *Order) {
+	switch n.Op {
+	default:
+		Fatal("ordermapassign %v", Oconv(int(n.Op), 0))
+
+	case OAS:
+		order.out = list(order.out, n)
+
+		// We call writebarrierfat only for values > 4 pointers long. See walk.c.
+		if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) && n.Left.Type.Width > int64(4*Widthptr))) && !isaddrokay(n.Right) {
+			m := n.Left
+			n.Left = ordertemp(m.Type, order, false)
+			a := Nod(OAS, m, n.Left)
+			typecheck(&a, Etop)
+			order.out = list(order.out, a)
+		}
+
+	case OAS2, OAS2DOTTYPE, OAS2MAPR, OAS2FUNC:
+		var post *NodeList
+		var m *Node
+		var a *Node
+		for l := n.List; l != nil; l = l.Next {
+			if l.N.Op == OINDEXMAP {
+				m = l.N
+				if !istemp(m.Left) {
+					m.Left = ordercopyexpr(m.Left, m.Left.Type, order, 0)
+				}
+				if !istemp(m.Right) {
+					m.Right = ordercopyexpr(m.Right, m.Right.Type, order, 0)
+				}
+				l.N = ordertemp(m.Type, order, false)
+				a = Nod(OAS, m, l.N)
+				typecheck(&a, Etop)
+				post = list(post, a)
+			}
+		}
+
+		order.out = list(order.out, n)
+		order.out = concat(order.out, post)
+	}
+}
+
+// Orderstmt orders the statement n, appending to order->out.
+// Temporaries created during the statement are cleaned
+// up using VARKILL instructions as possible.
+func orderstmt(n *Node, order *Order) {
+	if n == nil {
+		return
+	}
+
+	lno := int(setlineno(n))
+
+	orderinit(n, order)
+
+	switch n.Op {
+	default:
+		Fatal("orderstmt %v", Oconv(int(n.Op), 0))
+
+	case OVARKILL:
+		order.out = list(order.out, n)
+
+	case OAS:
+		t := marktemp(order)
+		orderexpr(&n.Left, order, nil)
+		orderexpr(&n.Right, order, n.Left)
+		ordermapassign(n, order)
+		cleantemp(t, order)
+
+	case OAS2,
+		OCLOSE,
+		OCOPY,
+		OPRINT,
+		OPRINTN,
+		ORECOVER,
+		ORECV:
+		t := marktemp(order)
+		orderexpr(&n.Left, order, nil)
+		orderexpr(&n.Right, order, nil)
+		orderexprlist(n.List, order)
+		orderexprlist(n.Rlist, order)
+		switch n.Op {
+		case OAS2, OAS2DOTTYPE:
+			ordermapassign(n, order)
+		default:
+			order.out = list(order.out, n)
+		}
+		cleantemp(t, order)
+
+	case OASOP:
+		// Special: rewrite l op= r into l = l op r.
+		// This simplies quite a few operations;
+		// most important is that it lets us separate
+		// out map read from map write when l is
+		// a map index expression.
+		t := marktemp(order)
+
+		orderexpr(&n.Left, order, nil)
+		n.Left = ordersafeexpr(n.Left, order)
+		tmp1 := treecopy(n.Left)
+		if tmp1.Op == OINDEXMAP {
+			tmp1.Etype = 0 // now an rvalue not an lvalue
+		}
+		tmp1 = ordercopyexpr(tmp1, n.Left.Type, order, 0)
+		n.Right = Nod(int(n.Etype), tmp1, n.Right)
+		typecheck(&n.Right, Erv)
+		orderexpr(&n.Right, order, nil)
+		n.Etype = 0
+		n.Op = OAS
+		ordermapassign(n, order)
+		cleantemp(t, order)
+
+		// Special: make sure key is addressable,
+	// and make sure OINDEXMAP is not copied out.
+	case OAS2MAPR:
+		t := marktemp(order)
+
+		orderexprlist(n.List, order)
+		r := n.Rlist.N
+		orderexpr(&r.Left, order, nil)
+		orderexpr(&r.Right, order, nil)
+
+		// See case OINDEXMAP below.
+		if r.Right.Op == OARRAYBYTESTR {
+			r.Right.Op = OARRAYBYTESTRTMP
+		}
+		orderaddrtemp(&r.Right, order)
+		ordermapassign(n, order)
+		cleantemp(t, order)
+
+		// Special: avoid copy of func call n->rlist->n.
+	case OAS2FUNC:
+		t := marktemp(order)
+
+		orderexprlist(n.List, order)
+		ordercall(n.Rlist.N, order)
+		ordermapassign(n, order)
+		cleantemp(t, order)
+
+		// Special: use temporary variables to hold result,
+	// so that assertI2Tetc can take address of temporary.
+	// No temporary for blank assignment.
+	case OAS2DOTTYPE:
+		t := marktemp(order)
+
+		orderexprlist(n.List, order)
+		orderexpr(&n.Rlist.N.Left, order, nil) // i in i.(T)
+		if isblank(n.List.N) {
+			order.out = list(order.out, n)
+		} else {
+			typ := n.Rlist.N.Type
+			tmp1 := ordertemp(typ, order, haspointers(typ))
+			order.out = list(order.out, n)
+			r := Nod(OAS, n.List.N, tmp1)
+			typecheck(&r, Etop)
+			ordermapassign(r, order)
+			n.List = list(list1(tmp1), n.List.Next.N)
+		}
+
+		cleantemp(t, order)
+
+		// Special: use temporary variables to hold result,
+	// so that chanrecv can take address of temporary.
+	case OAS2RECV:
+		t := marktemp(order)
+
+		orderexprlist(n.List, order)
+		orderexpr(&n.Rlist.N.Left, order, nil) // arg to recv
+		ch := n.Rlist.N.Left.Type
+		tmp1 := ordertemp(ch.Type, order, haspointers(ch.Type))
+		var tmp2 *Node
+		if !isblank(n.List.Next.N) {
+			tmp2 = ordertemp(n.List.Next.N.Type, order, false)
+		} else {
+			tmp2 = ordertemp(Types[TBOOL], order, false)
+		}
+		order.out = list(order.out, n)
+		r := Nod(OAS, n.List.N, tmp1)
+		typecheck(&r, Etop)
+		ordermapassign(r, order)
+		r = Nod(OAS, n.List.Next.N, tmp2)
+		typecheck(&r, Etop)
+		ordermapassign(r, order)
+		n.List = list(list1(tmp1), tmp2)
+		cleantemp(t, order)
+
+		// Special: does not save n onto out.
+	case OBLOCK, OEMPTY:
+		orderstmtlist(n.List, order)
+
+		// Special: n->left is not an expression; save as is.
+	case OBREAK,
+		OCONTINUE,
+		ODCL,
+		ODCLCONST,
+		ODCLTYPE,
+		OFALL,
+		OXFALL,
+		OGOTO,
+		OLABEL,
+		ORETJMP:
+		order.out = list(order.out, n)
+
+		// Special: handle call arguments.
+	case OCALLFUNC, OCALLINTER, OCALLMETH:
+		t := marktemp(order)
+
+		ordercall(n, order)
+		order.out = list(order.out, n)
+		cleantemp(t, order)
+
+		// Special: order arguments to inner call but not call itself.
+	case ODEFER, OPROC:
+		t := marktemp(order)
+
+		switch n.Left.Op {
+		// Delete will take the address of the key.
+		// Copy key into new temp and do not clean it
+		// (it persists beyond the statement).
+		case ODELETE:
+			orderexprlist(n.Left.List, order)
+
+			t1 := marktemp(order)
+			np := &n.Left.List.Next.N // map key
+			*np = ordercopyexpr(*np, (*np).Type, order, 0)
+			poptemp(t1, order)
+
+		default:
+			ordercall(n.Left, order)
+		}
+
+		order.out = list(order.out, n)
+		cleantemp(t, order)
+
+	case ODELETE:
+		t := marktemp(order)
+		orderexpr(&n.List.N, order, nil)
+		orderexpr(&n.List.Next.N, order, nil)
+		orderaddrtemp(&n.List.Next.N, order) // map key
+		order.out = list(order.out, n)
+		cleantemp(t, order)
+
+		// Clean temporaries from condition evaluation at
+	// beginning of loop body and after for statement.
+	case OFOR:
+		t := marktemp(order)
+
+		orderexprinplace(&n.Ntest, order)
+		var l *NodeList
+		cleantempnopop(t, order, &l)
+		n.Nbody = concat(l, n.Nbody)
+		orderblock(&n.Nbody)
+		orderstmtinplace(&n.Nincr)
+		order.out = list(order.out, n)
+		cleantemp(t, order)
+
+		// Clean temporaries from condition at
+	// beginning of both branches.
+	case OIF:
+		t := marktemp(order)
+
+		orderexprinplace(&n.Ntest, order)
+		var l *NodeList
+		cleantempnopop(t, order, &l)
+		n.Nbody = concat(l, n.Nbody)
+		l = nil
+		cleantempnopop(t, order, &l)
+		n.Nelse = concat(l, n.Nelse)
+		poptemp(t, order)
+		orderblock(&n.Nbody)
+		orderblock(&n.Nelse)
+		order.out = list(order.out, n)
+
+		// Special: argument will be converted to interface using convT2E
+	// so make sure it is an addressable temporary.
+	case OPANIC:
+		t := marktemp(order)
+
+		orderexpr(&n.Left, order, nil)
+		if !Isinter(n.Left.Type) {
+			orderaddrtemp(&n.Left, order)
+		}
+		order.out = list(order.out, n)
+		cleantemp(t, order)
+
+		// n->right is the expression being ranged over.
+	// order it, and then make a copy if we need one.
+	// We almost always do, to ensure that we don't
+	// see any value changes made during the loop.
+	// Usually the copy is cheap (e.g., array pointer, chan, slice, string are all tiny).
+	// The exception is ranging over an array value (not a slice, not a pointer to array),
+	// which must make a copy to avoid seeing updates made during
+	// the range body. Ranging over an array value is uncommon though.
+	case ORANGE:
+		t := marktemp(order)
+
+		orderexpr(&n.Right, order, nil)
+		switch n.Type.Etype {
+		default:
+			Fatal("orderstmt range %v", n.Type)
+
+			// Mark []byte(str) range expression to reuse string backing storage.
+		// It is safe because the storage cannot be mutated.
+		case TARRAY:
+			if n.Right.Op == OSTRARRAYBYTE {
+				n.Right.Op = OSTRARRAYBYTETMP
+			}
+			if count(n.List) < 2 || isblank(n.List.Next.N) {
+				// for i := range x will only use x once, to compute len(x).
+				// No need to copy it.
+				break
+			}
+			fallthrough
+
+			// chan, string, slice, array ranges use value multiple times.
+		// make copy.
+		// fall through
+		case TCHAN, TSTRING:
+			r := n.Right
+
+			if r.Type.Etype == TSTRING && r.Type != Types[TSTRING] {
+				r = Nod(OCONV, r, nil)
+				r.Type = Types[TSTRING]
+				typecheck(&r, Erv)
+			}
+
+			n.Right = ordercopyexpr(r, r.Type, order, 0)
+
+			// copy the map value in case it is a map literal.
+		// TODO(rsc): Make tmp = literal expressions reuse tmp.
+		// For maps tmp is just one word so it hardly matters.
+		case TMAP:
+			r := n.Right
+
+			n.Right = ordercopyexpr(r, r.Type, order, 0)
+
+			// n->alloc is the temp for the iterator.
+			n.Alloc = ordertemp(Types[TUINT8], order, true)
+		}
+
+		for l := n.List; l != nil; l = l.Next {
+			orderexprinplace(&l.N, order)
+		}
+		orderblock(&n.Nbody)
+		order.out = list(order.out, n)
+		cleantemp(t, order)
+
+	case ORETURN:
+		ordercallargs(&n.List, order)
+		order.out = list(order.out, n)
+
+		// Special: clean case temporaries in each block entry.
+	// Select must enter one of its blocks, so there is no
+	// need for a cleaning at the end.
+	// Doubly special: evaluation order for select is stricter
+	// than ordinary expressions. Even something like p.c
+	// has to be hoisted into a temporary, so that it cannot be
+	// reordered after the channel evaluation for a different
+	// case (if p were nil, then the timing of the fault would
+	// give this away).
+	case OSELECT:
+		t := marktemp(order)
+
+		var tmp1 *Node
+		var tmp2 *Node
+		var r *Node
+		for l := n.List; l != nil; l = l.Next {
+			if l.N.Op != OXCASE {
+				Fatal("order select case %v", Oconv(int(l.N.Op), 0))
+			}
+			r = l.N.Left
+			setlineno(l.N)
+
+			// Append any new body prologue to ninit.
+			// The next loop will insert ninit into nbody.
+			if l.N.Ninit != nil {
+				Fatal("order select ninit")
+			}
+			if r != nil {
+				switch r.Op {
+				default:
+					Yyerror("unknown op in select %v", Oconv(int(r.Op), 0))
+					Dump("select case", r)
+
+					// If this is case x := <-ch or case x, y := <-ch, the case has
+				// the ODCL nodes to declare x and y. We want to delay that
+				// declaration (and possible allocation) until inside the case body.
+				// Delete the ODCL nodes here and recreate them inside the body below.
+				case OSELRECV, OSELRECV2:
+					if r.Colas {
+						t = r.Ninit
+						if t != nil && t.N.Op == ODCL && t.N.Left == r.Left {
+							t = t.Next
+						}
+						if t != nil && t.N.Op == ODCL && t.N.Left == r.Ntest {
+							t = t.Next
+						}
+						if t == nil {
+							r.Ninit = nil
+						}
+					}
+
+					if r.Ninit != nil {
+						Yyerror("ninit on select recv")
+						dumplist("ninit", r.Ninit)
+					}
+
+					// case x = <-c
+					// case x, ok = <-c
+					// r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c.
+					// r->left == N means 'case <-c'.
+					// c is always evaluated; x and ok are only evaluated when assigned.
+					orderexpr(&r.Right.Left, order, nil)
+
+					if r.Right.Left.Op != ONAME {
+						r.Right.Left = ordercopyexpr(r.Right.Left, r.Right.Left.Type, order, 0)
+					}
+
+					// Introduce temporary for receive and move actual copy into case body.
+					// avoids problems with target being addressed, as usual.
+					// NOTE: If we wanted to be clever, we could arrange for just one
+					// temporary per distinct type, sharing the temp among all receives
+					// with that temp. Similarly one ok bool could be shared among all
+					// the x,ok receives. Not worth doing until there's a clear need.
+					if r.Left != nil && isblank(r.Left) {
+						r.Left = nil
+					}
+					if r.Left != nil {
+						// use channel element type for temporary to avoid conversions,
+						// such as in case interfacevalue = <-intchan.
+						// the conversion happens in the OAS instead.
+						tmp1 = r.Left
+
+						if r.Colas {
+							tmp2 = Nod(ODCL, tmp1, nil)
+							typecheck(&tmp2, Etop)
+							l.N.Ninit = list(l.N.Ninit, tmp2)
+						}
+
+						r.Left = ordertemp(r.Right.Left.Type.Type, order, haspointers(r.Right.Left.Type.Type))
+						tmp2 = Nod(OAS, tmp1, r.Left)
+						typecheck(&tmp2, Etop)
+						l.N.Ninit = list(l.N.Ninit, tmp2)
+					}
+
+					if r.Ntest != nil && isblank(r.Ntest) {
+						r.Ntest = nil
+					}
+					if r.Ntest != nil {
+						tmp1 = r.Ntest
+						if r.Colas {
+							tmp2 = Nod(ODCL, tmp1, nil)
+							typecheck(&tmp2, Etop)
+							l.N.Ninit = list(l.N.Ninit, tmp2)
+						}
+
+						r.Ntest = ordertemp(tmp1.Type, order, false)
+						tmp2 = Nod(OAS, tmp1, r.Ntest)
+						typecheck(&tmp2, Etop)
+						l.N.Ninit = list(l.N.Ninit, tmp2)
+					}
+
+					orderblock(&l.N.Ninit)
+
+				case OSEND:
+					if r.Ninit != nil {
+						Yyerror("ninit on select send")
+						dumplist("ninit", r.Ninit)
+					}
+
+					// case c <- x
+					// r->left is c, r->right is x, both are always evaluated.
+					orderexpr(&r.Left, order, nil)
+
+					if !istemp(r.Left) {
+						r.Left = ordercopyexpr(r.Left, r.Left.Type, order, 0)
+					}
+					orderexpr(&r.Right, order, nil)
+					if !istemp(r.Right) {
+						r.Right = ordercopyexpr(r.Right, r.Right.Type, order, 0)
+					}
+				}
+			}
+
+			orderblock(&l.N.Nbody)
+		}
+
+		// Now that we have accumulated all the temporaries, clean them.
+		// Also insert any ninit queued during the previous loop.
+		// (The temporary cleaning must follow that ninit work.)
+		for l := n.List; l != nil; l = l.Next {
+			cleantempnopop(t, order, &l.N.Ninit)
+			l.N.Nbody = concat(l.N.Ninit, l.N.Nbody)
+			l.N.Ninit = nil
+		}
+
+		order.out = list(order.out, n)
+		poptemp(t, order)
+
+		// Special: value being sent is passed as a pointer; make it addressable.
+	case OSEND:
+		t := marktemp(order)
+
+		orderexpr(&n.Left, order, nil)
+		orderexpr(&n.Right, order, nil)
+		orderaddrtemp(&n.Right, order)
+		order.out = list(order.out, n)
+		cleantemp(t, order)
+
+		// TODO(rsc): Clean temporaries more aggressively.
+	// Note that because walkswitch will rewrite some of the
+	// switch into a binary search, this is not as easy as it looks.
+	// (If we ran that code here we could invoke orderstmt on
+	// the if-else chain instead.)
+	// For now just clean all the temporaries at the end.
+	// In practice that's fine.
+	case OSWITCH:
+		t := marktemp(order)
+
+		orderexpr(&n.Ntest, order, nil)
+		for l := n.List; l != nil; l = l.Next {
+			if l.N.Op != OXCASE {
+				Fatal("order switch case %v", Oconv(int(l.N.Op), 0))
+			}
+			orderexprlistinplace(l.N.List, order)
+			orderblock(&l.N.Nbody)
+		}
+
+		order.out = list(order.out, n)
+		cleantemp(t, order)
+	}
+
+	lineno = int32(lno)
+}
+
+// Orderexprlist orders the expression list l into order.
+func orderexprlist(l *NodeList, order *Order) {
+	for ; l != nil; l = l.Next {
+		orderexpr(&l.N, order, nil)
+	}
+}
+
+// Orderexprlist orders the expression list l but saves
+// the side effects on the individual expression ninit lists.
+func orderexprlistinplace(l *NodeList, order *Order) {
+	for ; l != nil; l = l.Next {
+		orderexprinplace(&l.N, order)
+	}
+}
+
+// Orderexpr orders a single expression, appending side
+// effects to order->out as needed.
+// If this is part of an assignment lhs = *np, lhs is given.
+// Otherwise lhs == nil. (When lhs != nil it may be possible
+// to avoid copying the result of the expression to a temporary.)
+func orderexpr(np **Node, order *Order, lhs *Node) {
+	n := *np
+	if n == nil {
+		return
+	}
+
+	lno := int(setlineno(n))
+	orderinit(n, order)
+
+	switch n.Op {
+	default:
+		orderexpr(&n.Left, order, nil)
+		orderexpr(&n.Right, order, nil)
+		orderexprlist(n.List, order)
+		orderexprlist(n.Rlist, order)
+
+		// Addition of strings turns into a function call.
+	// Allocate a temporary to hold the strings.
+	// Fewer than 5 strings use direct runtime helpers.
+	case OADDSTR:
+		orderexprlist(n.List, order)
+
+		if count(n.List) > 5 {
+			t := typ(TARRAY)
+			t.Bound = int64(count(n.List))
+			t.Type = Types[TSTRING]
+			n.Alloc = ordertemp(t, order, false)
+		}
+
+		// Mark string(byteSlice) arguments to reuse byteSlice backing
+		// buffer during conversion. String concatenation does not
+		// memorize the strings for later use, so it is safe.
+		// However, we can do it only if there is at least one non-empty string literal.
+		// Otherwise if all other arguments are empty strings,
+		// concatstrings will return the reference to the temp string
+		// to the caller.
+		hasbyte := false
+
+		haslit := false
+		for l := n.List; l != nil; l = l.Next {
+			hasbyte = hasbyte || l.N.Op == OARRAYBYTESTR
+			haslit = haslit || l.N.Op == OLITERAL && len(l.N.Val.U.(string)) != 0
+		}
+
+		if haslit && hasbyte {
+			for l := n.List; l != nil; l = l.Next {
+				if l.N.Op == OARRAYBYTESTR {
+					l.N.Op = OARRAYBYTESTRTMP
+				}
+			}
+		}
+
+	case OCMPSTR:
+		orderexpr(&n.Left, order, nil)
+		orderexpr(&n.Right, order, nil)
+
+		// Mark string(byteSlice) arguments to reuse byteSlice backing
+		// buffer during conversion. String comparison does not
+		// memorize the strings for later use, so it is safe.
+		if n.Left.Op == OARRAYBYTESTR {
+			n.Left.Op = OARRAYBYTESTRTMP
+		}
+		if n.Right.Op == OARRAYBYTESTR {
+			n.Right.Op = OARRAYBYTESTRTMP
+		}
+
+		// key must be addressable
+	case OINDEXMAP:
+		orderexpr(&n.Left, order, nil)
+
+		orderexpr(&n.Right, order, nil)
+
+		// For x = m[string(k)] where k is []byte, the allocation of
+		// backing bytes for the string can be avoided by reusing
+		// the []byte backing array. This is a special case that it
+		// would be nice to handle more generally, but because
+		// there are no []byte-keyed maps, this specific case comes
+		// up in important cases in practice. See issue 3512.
+		// Nothing can change the []byte we are not copying before
+		// the map index, because the map access is going to
+		// be forced to happen immediately following this
+		// conversion (by the ordercopyexpr a few lines below).
+		if n.Etype == 0 && n.Right.Op == OARRAYBYTESTR {
+			n.Right.Op = OARRAYBYTESTRTMP
+		}
+
+		orderaddrtemp(&n.Right, order)
+		if n.Etype == 0 {
+			// use of value (not being assigned);
+			// make copy in temporary.
+			n = ordercopyexpr(n, n.Type, order, 0)
+		}
+
+		// concrete type (not interface) argument must be addressable
+	// temporary to pass to runtime.
+	case OCONVIFACE:
+		orderexpr(&n.Left, order, nil)
+
+		if !Isinter(n.Left.Type) {
+			orderaddrtemp(&n.Left, order)
+		}
+
+	case OANDAND, OOROR:
+		mark := marktemp(order)
+		orderexpr(&n.Left, order, nil)
+
+		// Clean temporaries from first branch at beginning of second.
+		// Leave them on the stack so that they can be killed in the outer
+		// context in case the short circuit is taken.
+		var l *NodeList
+
+		cleantempnopop(mark, order, &l)
+		n.Right.Ninit = concat(l, n.Right.Ninit)
+		orderexprinplace(&n.Right, order)
+
+	case OCALLFUNC,
+		OCALLINTER,
+		OCALLMETH,
+		OCAP,
+		OCOMPLEX,
+		OCOPY,
+		OIMAG,
+		OLEN,
+		OMAKECHAN,
+		OMAKEMAP,
+		OMAKESLICE,
+		ONEW,
+		OREAL,
+		ORECOVER:
+		ordercall(n, order)
+		if lhs == nil || lhs.Op != ONAME || flag_race != 0 {
+			n = ordercopyexpr(n, n.Type, order, 0)
+		}
+
+	case OAPPEND:
+		ordercallargs(&n.List, order)
+		if lhs == nil || flag_race != 0 || lhs.Op != ONAME && !samesafeexpr(lhs, n.List.N) {
+			n = ordercopyexpr(n, n.Type, order, 0)
+		}
+
+	case OSLICE, OSLICEARR, OSLICESTR:
+		orderexpr(&n.Left, order, nil)
+		orderexpr(&n.Right.Left, order, nil)
+		n.Right.Left = ordercheapexpr(n.Right.Left, order)
+		orderexpr(&n.Right.Right, order, nil)
+		n.Right.Right = ordercheapexpr(n.Right.Right, order)
+		if lhs == nil || flag_race != 0 || lhs.Op != ONAME && !samesafeexpr(lhs, n.Left) {
+			n = ordercopyexpr(n, n.Type, order, 0)
+		}
+
+	case OSLICE3, OSLICE3ARR:
+		orderexpr(&n.Left, order, nil)
+		orderexpr(&n.Right.Left, order, nil)
+		n.Right.Left = ordercheapexpr(n.Right.Left, order)
+		orderexpr(&n.Right.Right.Left, order, nil)
+		n.Right.Right.Left = ordercheapexpr(n.Right.Right.Left, order)
+		orderexpr(&n.Right.Right.Right, order, nil)
+		n.Right.Right.Right = ordercheapexpr(n.Right.Right.Right, order)
+		if lhs == nil || flag_race != 0 || lhs.Op != ONAME && !samesafeexpr(lhs, n.Left) {
+			n = ordercopyexpr(n, n.Type, order, 0)
+		}
+
+	case OCLOSURE:
+		if n.Noescape && n.Func.Cvars != nil {
+			n.Alloc = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
+		}
+
+	case OARRAYLIT, OCALLPART:
+		orderexpr(&n.Left, order, nil)
+		orderexpr(&n.Right, order, nil)
+		orderexprlist(n.List, order)
+		orderexprlist(n.Rlist, order)
+		if n.Noescape {
+			n.Alloc = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
+		}
+
+	case ODDDARG:
+		if n.Noescape {
+			// The ddd argument does not live beyond the call it is created for.
+			// Allocate a temporary that will be cleaned up when this statement
+			// completes. We could be more aggressive and try to arrange for it
+			// to be cleaned up when the call completes.
+			n.Alloc = ordertemp(n.Type.Type, order, false)
+		}
+
+	case ODOTTYPE, ODOTTYPE2:
+		orderexpr(&n.Left, order, nil)
+		// TODO(rsc): The Isfat is for consistency with componentgen and walkexpr.
+		// It needs to be removed in all three places.
+		// That would allow inlining x.(struct{*int}) the same as x.(*int).
+		if !isdirectiface(n.Type) || Isfat(n.Type) || flag_race != 0 {
+			n = ordercopyexpr(n, n.Type, order, 1)
+		}
+
+	case ORECV:
+		orderexpr(&n.Left, order, nil)
+		n = ordercopyexpr(n, n.Type, order, 1)
+
+	case OEQ, ONE:
+		orderexpr(&n.Left, order, nil)
+		orderexpr(&n.Right, order, nil)
+		t := n.Left.Type
+		if t.Etype == TSTRUCT || Isfixedarray(t) {
+			// for complex comparisons, we need both args to be
+			// addressable so we can pass them to the runtime.
+			orderaddrtemp(&n.Left, order)
+			orderaddrtemp(&n.Right, order)
+		}
+	}
+
+	lineno = int32(lno)
+
+	*np = n
+}
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
new file mode 100644
index 0000000..1b67cf2
--- /dev/null
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -0,0 +1,555 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"crypto/md5"
+	"fmt"
+	"strings"
+)
+
+// "Portable" code generation.
+
+var makefuncdatasym_nsym int32
+
+func makefuncdatasym(namefmt string, funcdatakind int64) *Sym {
+	var nod Node
+
+	sym := Lookupf(namefmt, makefuncdatasym_nsym)
+	makefuncdatasym_nsym++
+	pnod := newname(sym)
+	pnod.Class = PEXTERN
+	Nodconst(&nod, Types[TINT32], funcdatakind)
+	Thearch.Gins(obj.AFUNCDATA, &nod, pnod)
+	return sym
+}
+
+// gvardef inserts a VARDEF for n into the instruction stream.
+// VARDEF is an annotation for the liveness analysis, marking a place
+// where a complete initialization (definition) of a variable begins.
+// Since the liveness analysis can see initialization of single-word
+// variables quite easy, gvardef is usually only called for multi-word
+// or 'fat' variables, those satisfying isfat(n->type).
+// However, gvardef is also called when a non-fat variable is initialized
+// via a block move; the only time this happens is when you have
+//	return f()
+// for a function with multiple return values exactly matching the return
+// types of the current function.
+//
+// A 'VARDEF x' annotation in the instruction stream tells the liveness
+// analysis to behave as though the variable x is being initialized at that
+// point in the instruction stream. The VARDEF must appear before the
+// actual (multi-instruction) initialization, and it must also appear after
+// any uses of the previous value, if any. For example, if compiling:
+//
+//	x = x[1:]
+//
+// it is important to generate code like:
+//
+//	base, len, cap = pieces of x[1:]
+//	VARDEF x
+//	x = {base, len, cap}
+//
+// If instead the generated code looked like:
+//
+//	VARDEF x
+//	base, len, cap = pieces of x[1:]
+//	x = {base, len, cap}
+//
+// then the liveness analysis would decide the previous value of x was
+// unnecessary even though it is about to be used by the x[1:] computation.
+// Similarly, if the generated code looked like:
+//
+//	base, len, cap = pieces of x[1:]
+//	x = {base, len, cap}
+//	VARDEF x
+//
+// then the liveness analysis will not preserve the new value of x, because
+// the VARDEF appears to have "overwritten" it.
+//
+// VARDEF is a bit of a kludge to work around the fact that the instruction
+// stream is working on single-word values but the liveness analysis
+// wants to work on individual variables, which might be multi-word
+// aggregates. It might make sense at some point to look into letting
+// the liveness analysis work on single-word values as well, although
+// there are complications around interface values, slices, and strings,
+// all of which cannot be treated as individual words.
+//
+// VARKILL is the opposite of VARDEF: it marks a value as no longer needed,
+// even if its address has been taken. That is, a VARKILL annotation asserts
+// that its argument is certainly dead, for use when the liveness analysis
+// would not otherwise be able to deduce that fact.
+
+func gvardefx(n *Node, as int) {
+	if n == nil {
+		Fatal("gvardef nil")
+	}
+	if n.Op != ONAME {
+		Yyerror("gvardef %v; %v", Oconv(int(n.Op), obj.FmtSharp), n)
+		return
+	}
+
+	switch n.Class {
+	case PAUTO, PPARAM, PPARAMOUT:
+		Thearch.Gins(as, nil, n)
+	}
+}
+
+func Gvardef(n *Node) {
+	gvardefx(n, obj.AVARDEF)
+}
+
+func gvarkill(n *Node) {
+	gvardefx(n, obj.AVARKILL)
+}
+
+func removevardef(firstp *obj.Prog) {
+	for p := firstp; p != nil; p = p.Link {
+		for p.Link != nil && (p.Link.As == obj.AVARDEF || p.Link.As == obj.AVARKILL) {
+			p.Link = p.Link.Link
+		}
+		if p.To.Type == obj.TYPE_BRANCH {
+			for p.To.Val.(*obj.Prog) != nil && (p.To.Val.(*obj.Prog).As == obj.AVARDEF || p.To.Val.(*obj.Prog).As == obj.AVARKILL) {
+				p.To.Val = p.To.Val.(*obj.Prog).Link
+			}
+		}
+	}
+}
+
+func gcsymdup(s *Sym) {
+	ls := Linksym(s)
+	if len(ls.R) > 0 {
+		Fatal("cannot rosymdup %s with relocations", ls.Name)
+	}
+	ls.Name = fmt.Sprintf("gclocals·%x", md5.Sum(ls.P))
+	ls.Dupok = 1
+}
+
+func emitptrargsmap() {
+	sym := Lookup(fmt.Sprintf("%s.args_stackmap", Curfn.Nname.Sym.Name))
+
+	nptr := int(Curfn.Type.Argwid / int64(Widthptr))
+	bv := bvalloc(int32(nptr) * 2)
+	nbitmap := 1
+	if Curfn.Type.Outtuple > 0 {
+		nbitmap = 2
+	}
+	off := duint32(sym, 0, uint32(nbitmap))
+	off = duint32(sym, off, uint32(bv.n))
+	var xoffset int64
+	if Curfn.Type.Thistuple > 0 {
+		xoffset = 0
+		onebitwalktype1(getthisx(Curfn.Type), &xoffset, bv)
+	}
+
+	if Curfn.Type.Intuple > 0 {
+		xoffset = 0
+		onebitwalktype1(getinargx(Curfn.Type), &xoffset, bv)
+	}
+
+	for j := 0; int32(j) < bv.n; j += 32 {
+		off = duint32(sym, off, bv.b[j/32])
+	}
+	if Curfn.Type.Outtuple > 0 {
+		xoffset = 0
+		onebitwalktype1(getoutargx(Curfn.Type), &xoffset, bv)
+		for j := 0; int32(j) < bv.n; j += 32 {
+			off = duint32(sym, off, bv.b[j/32])
+		}
+	}
+
+	ggloblsym(sym, int32(off), obj.RODATA|obj.LOCAL)
+}
+
+// Sort the list of stack variables. Autos after anything else,
+// within autos, unused after used, within used, things with
+// pointers first, zeroed things first, and then decreasing size.
+// Because autos are laid out in decreasing addresses
+// on the stack, pointers first, zeroed things first and decreasing size
+// really means, in memory, things with pointers needing zeroing at
+// the top of the stack and increasing in size.
+// Non-autos sort on offset.
+func cmpstackvar(a *Node, b *Node) int {
+	if a.Class != b.Class {
+		if a.Class == PAUTO {
+			return +1
+		}
+		return -1
+	}
+
+	if a.Class != PAUTO {
+		if a.Xoffset < b.Xoffset {
+			return -1
+		}
+		if a.Xoffset > b.Xoffset {
+			return +1
+		}
+		return 0
+	}
+
+	if a.Used != b.Used {
+		return obj.Bool2int(b.Used) - obj.Bool2int(a.Used)
+	}
+
+	ap := obj.Bool2int(haspointers(a.Type))
+	bp := obj.Bool2int(haspointers(b.Type))
+	if ap != bp {
+		return bp - ap
+	}
+
+	ap = obj.Bool2int(a.Name.Needzero)
+	bp = obj.Bool2int(b.Name.Needzero)
+	if ap != bp {
+		return bp - ap
+	}
+
+	if a.Type.Width < b.Type.Width {
+		return +1
+	}
+	if a.Type.Width > b.Type.Width {
+		return -1
+	}
+
+	return stringsCompare(a.Sym.Name, b.Sym.Name)
+}
+
+// TODO(lvd) find out where the PAUTO/OLITERAL nodes come from.
+func allocauto(ptxt *obj.Prog) {
+	Stksize = 0
+	stkptrsize = 0
+
+	if Curfn.Func.Dcl == nil {
+		return
+	}
+
+	// Mark the PAUTO's unused.
+	for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+		if ll.N.Class == PAUTO {
+			ll.N.Used = false
+		}
+	}
+
+	markautoused(ptxt)
+
+	listsort(&Curfn.Func.Dcl, cmpstackvar)
+
+	// Unused autos are at the end, chop 'em off.
+	ll := Curfn.Func.Dcl
+
+	n := ll.N
+	if n.Class == PAUTO && n.Op == ONAME && !n.Used {
+		// No locals used at all
+		Curfn.Func.Dcl = nil
+
+		fixautoused(ptxt)
+		return
+	}
+
+	for ll := Curfn.Func.Dcl; ll.Next != nil; ll = ll.Next {
+		n = ll.Next.N
+		if n.Class == PAUTO && n.Op == ONAME && !n.Used {
+			ll.Next = nil
+			Curfn.Func.Dcl.End = ll
+			break
+		}
+	}
+
+	// Reassign stack offsets of the locals that are still there.
+	var w int64
+	for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+		n = ll.N
+		if n.Class != PAUTO || n.Op != ONAME {
+			continue
+		}
+
+		dowidth(n.Type)
+		w = n.Type.Width
+		if w >= Thearch.MAXWIDTH || w < 0 {
+			Fatal("bad width")
+		}
+		Stksize += w
+		Stksize = Rnd(Stksize, int64(n.Type.Align))
+		if haspointers(n.Type) {
+			stkptrsize = Stksize
+		}
+		if Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+			Stksize = Rnd(Stksize, int64(Widthptr))
+		}
+		if Stksize >= 1<<31 {
+			setlineno(Curfn)
+			Yyerror("stack frame too large (>2GB)")
+		}
+
+		n.Stkdelta = -Stksize - n.Xoffset
+	}
+
+	Stksize = Rnd(Stksize, int64(Widthreg))
+	stkptrsize = Rnd(stkptrsize, int64(Widthreg))
+
+	fixautoused(ptxt)
+
+	// The debug information needs accurate offsets on the symbols.
+	for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+		if ll.N.Class != PAUTO || ll.N.Op != ONAME {
+			continue
+		}
+		ll.N.Xoffset += ll.N.Stkdelta
+		ll.N.Stkdelta = 0
+	}
+}
+
+func movelarge(l *NodeList) {
+	for ; l != nil; l = l.Next {
+		if l.N.Op == ODCLFUNC {
+			movelargefn(l.N)
+		}
+	}
+}
+
+func movelargefn(fn *Node) {
+	var n *Node
+
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		n = l.N
+		if n.Class == PAUTO && n.Type != nil && n.Type.Width > MaxStackVarSize {
+			addrescapes(n)
+		}
+	}
+}
+
+func Cgen_checknil(n *Node) {
+	if Disable_checknil != 0 {
+		return
+	}
+
+	// Ideally we wouldn't see any integer types here, but we do.
+	if n.Type == nil || (!Isptr[n.Type.Etype] && !Isint[n.Type.Etype] && n.Type.Etype != TUNSAFEPTR) {
+		Dump("checknil", n)
+		Fatal("bad checknil")
+	}
+
+	if ((Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
+		var reg Node
+		Regalloc(&reg, Types[Tptr], n)
+		Cgen(n, &reg)
+		Thearch.Gins(obj.ACHECKNIL, &reg, nil)
+		Regfree(&reg)
+		return
+	}
+
+	Thearch.Gins(obj.ACHECKNIL, n, nil)
+}
+
+func compile(fn *Node) {
+	if Newproc == nil {
+		Newproc = Sysfunc("newproc")
+		Deferproc = Sysfunc("deferproc")
+		Deferreturn = Sysfunc("deferreturn")
+		Panicindex = Sysfunc("panicindex")
+		panicslice = Sysfunc("panicslice")
+		throwreturn = Sysfunc("throwreturn")
+	}
+
+	lno := setlineno(fn)
+
+	Curfn = fn
+	dowidth(Curfn.Type)
+
+	var oldstksize int64
+	var nod1 Node
+	var ptxt *obj.Prog
+	var pl *obj.Plist
+	var p *obj.Prog
+	var n *Node
+	var nam *Node
+	var gcargs *Sym
+	var gclocals *Sym
+	if fn.Nbody == nil {
+		if pure_go != 0 || strings.HasPrefix(fn.Nname.Sym.Name, "init.") {
+			Yyerror("missing function body for %q", fn.Nname.Sym.Name)
+			goto ret
+		}
+
+		if Debug['A'] != 0 {
+			goto ret
+		}
+		emitptrargsmap()
+		goto ret
+	}
+
+	saveerrors()
+
+	// set up domain for labels
+	clearlabels()
+
+	if Curfn.Type.Outnamed != 0 {
+		// add clearing of the output parameters
+		var save Iter
+		t := Structfirst(&save, Getoutarg(Curfn.Type))
+
+		for t != nil {
+			if t.Nname != nil {
+				n = Nod(OAS, t.Nname, nil)
+				typecheck(&n, Etop)
+				Curfn.Nbody = concat(list1(n), Curfn.Nbody)
+			}
+
+			t = structnext(&save)
+		}
+	}
+
+	order(Curfn)
+	if nerrors != 0 {
+		goto ret
+	}
+
+	Hasdefer = 0
+	walk(Curfn)
+	if nerrors != 0 {
+		goto ret
+	}
+	if flag_race != 0 {
+		racewalk(Curfn)
+	}
+	if nerrors != 0 {
+		goto ret
+	}
+
+	continpc = nil
+	breakpc = nil
+
+	pl = newplist()
+	pl.Name = Linksym(Curfn.Nname.Sym)
+
+	setlineno(Curfn)
+
+	Nodconst(&nod1, Types[TINT32], 0)
+	nam = Curfn.Nname
+	if isblank(nam) {
+		nam = nil
+	}
+	ptxt = Thearch.Gins(obj.ATEXT, nam, &nod1)
+	if fn.Func.Dupok {
+		ptxt.From3.Offset |= obj.DUPOK
+	}
+	if fn.Func.Wrapper {
+		ptxt.From3.Offset |= obj.WRAPPER
+	}
+	if fn.Func.Needctxt {
+		ptxt.From3.Offset |= obj.NEEDCTXT
+	}
+	if fn.Func.Nosplit {
+		ptxt.From3.Offset |= obj.NOSPLIT
+	}
+
+	// Clumsy but important.
+	// See test/recover.go for test cases and src/reflect/value.go
+	// for the actual functions being considered.
+	if myimportpath != "" && myimportpath == "reflect" {
+		if Curfn.Nname.Sym.Name == "callReflect" || Curfn.Nname.Sym.Name == "callMethod" {
+			ptxt.From3.Offset |= obj.WRAPPER
+		}
+	}
+
+	Afunclit(&ptxt.From, Curfn.Nname)
+
+	ginit()
+
+	gcargs = makefuncdatasym("gcargs·%d", obj.FUNCDATA_ArgsPointerMaps)
+	gclocals = makefuncdatasym("gclocals·%d", obj.FUNCDATA_LocalsPointerMaps)
+
+	for t := Curfn.Paramfld; t != nil; t = t.Down {
+		gtrack(tracksym(t.Type))
+	}
+
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		n = l.N
+		if n.Op != ONAME { // might be OTYPE or OLITERAL
+			continue
+		}
+		switch n.Class {
+		case PAUTO, PPARAM, PPARAMOUT:
+			Nodconst(&nod1, Types[TUINTPTR], l.N.Type.Width)
+			p = Thearch.Gins(obj.ATYPE, l.N, &nod1)
+			p.From.Gotype = Linksym(ngotype(l.N))
+		}
+	}
+
+	Genlist(Curfn.Func.Enter)
+	Genlist(Curfn.Nbody)
+	gclean()
+	checklabels()
+	if nerrors != 0 {
+		goto ret
+	}
+	if Curfn.Func.Endlineno != 0 {
+		lineno = Curfn.Func.Endlineno
+	}
+
+	if Curfn.Type.Outtuple != 0 {
+		Ginscall(throwreturn, 0)
+	}
+
+	ginit()
+
+	// TODO: Determine when the final cgen_ret can be omitted. Perhaps always?
+	cgen_ret(nil)
+
+	if Hasdefer != 0 {
+		// deferreturn pretends to have one uintptr argument.
+		// Reserve space for it so stack scanner is happy.
+		if Maxarg < int64(Widthptr) {
+			Maxarg = int64(Widthptr)
+		}
+	}
+
+	gclean()
+	if nerrors != 0 {
+		goto ret
+	}
+
+	Pc.As = obj.ARET // overwrite AEND
+	Pc.Lineno = lineno
+
+	fixjmp(ptxt)
+	if Debug['N'] == 0 || Debug['R'] != 0 || Debug['P'] != 0 {
+		regopt(ptxt)
+		nilopt(ptxt)
+	}
+
+	Thearch.Expandchecks(ptxt)
+
+	oldstksize = Stksize
+	allocauto(ptxt)
+
+	if false {
+		fmt.Printf("allocauto: %d to %d\n", oldstksize, int64(Stksize))
+	}
+
+	setlineno(Curfn)
+	if int64(Stksize)+Maxarg > 1<<31 {
+		Yyerror("stack frame too large (>2GB)")
+		goto ret
+	}
+
+	// Emit garbage collection symbols.
+	liveness(Curfn, ptxt, gcargs, gclocals)
+
+	gcsymdup(gcargs)
+	gcsymdup(gclocals)
+
+	Thearch.Defframe(ptxt)
+
+	if Debug['f'] != 0 {
+		frame(0)
+	}
+
+	// Remove leftover instrumentation from the instruction stream.
+	removevardef(ptxt)
+
+ret:
+	lineno = lno
+}
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
new file mode 100644
index 0000000..b4d0699
--- /dev/null
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -0,0 +1,1830 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector liveness bitmap generation.
+
+// The command line flag -live causes this code to print debug information.
+// The levels are:
+//
+//	-live (aka -live=1): print liveness lists as code warnings at safe points
+//	-live=2: print an assembly listing with liveness annotations
+//	-live=3: print information during each computation phase (much chattier)
+//
+// Each level includes the earlier output as well.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+	"sort"
+)
+
+const (
+	UNVISITED = 0
+	VISITED   = 1
+)
+
+// An ordinary basic block.
+//
+// Instructions are threaded together in a doubly-linked list.  To iterate in
+// program order follow the link pointer from the first node and stop after the
+// last node has been visited
+//
+//   for(p = bb->first;; p = p->link) {
+//     ...
+//     if(p == bb->last)
+//       break;
+//   }
+//
+// To iterate in reverse program order by following the opt pointer from the
+// last node
+//
+//   for(p = bb->last; p != nil; p = p->opt) {
+//     ...
+//   }
+type BasicBlock struct {
+	pred            []*BasicBlock // predecessors; if none, probably start of CFG
+	succ            []*BasicBlock // successors; if none, probably ends in return statement
+	first           *obj.Prog     // first instruction in block
+	last            *obj.Prog     // last instruction in block
+	rpo             int           // reverse post-order number (also index in cfg)
+	mark            int           // mark bit for traversals
+	lastbitmapindex int           // for livenessepilogue
+
+	// Summary sets of block effects.
+
+	// Computed during livenessprologue using only the content of
+	// individual blocks:
+	//
+	//	uevar: upward exposed variables (used before set in block)
+	//	varkill: killed variables (set in block)
+	//	avarinit: addrtaken variables set or used (proof of initialization)
+	uevar    Bvec
+	varkill  Bvec
+	avarinit Bvec
+
+	// Computed during livenesssolve using control flow information:
+	//
+	//	livein: variables live at block entry
+	//	liveout: variables live at block exit
+	//	avarinitany: addrtaken variables possibly initialized at block exit
+	//		(initialized in block or at exit from any predecessor block)
+	//	avarinitall: addrtaken variables certainly initialized at block exit
+	//		(initialized in block or at exit from all predecessor blocks)
+	livein      Bvec
+	liveout     Bvec
+	avarinitany Bvec
+	avarinitall Bvec
+}
+
+// A collection of global state used by liveness analysis.
+type Liveness struct {
+	fn   *Node
+	ptxt *obj.Prog
+	vars []*Node
+	cfg  []*BasicBlock
+
+	// An array with a bit vector for each safe point tracking live pointers
+	// in the arguments and locals area, indexed by bb.rpo.
+	argslivepointers []Bvec
+	livepointers     []Bvec
+}
+
+func xmalloc(size uint32) interface{} {
+	result := (interface{})(make([]byte, size))
+	if result == nil {
+		Fatal("malloc failed")
+	}
+	return result
+}
+
+// Constructs a new basic block containing a single instruction.
+func newblock(prog *obj.Prog) *BasicBlock {
+	if prog == nil {
+		Fatal("newblock: prog cannot be nil")
+	}
+	result := new(BasicBlock)
+	result.rpo = -1
+	result.mark = UNVISITED
+	result.first = prog
+	result.last = prog
+	result.pred = make([]*BasicBlock, 0, 2)
+	result.succ = make([]*BasicBlock, 0, 2)
+	return result
+}
+
+// Frees a basic block and all of its leaf data structures.
+func freeblock(bb *BasicBlock) {
+	if bb == nil {
+		Fatal("freeblock: cannot free nil")
+	}
+}
+
+// Adds an edge between two basic blocks by making from a predecessor of to and
+// to a successor of from.
+func addedge(from *BasicBlock, to *BasicBlock) {
+	if from == nil {
+		Fatal("addedge: from is nil")
+	}
+	if to == nil {
+		Fatal("addedge: to is nil")
+	}
+	from.succ = append(from.succ, to)
+	to.pred = append(to.pred, from)
+}
+
+// Inserts prev before curr in the instruction
+// stream.  Any control flow, such as branches or fall throughs, that target the
+// existing instruction are adjusted to target the new instruction.
+func splicebefore(lv *Liveness, bb *BasicBlock, prev *obj.Prog, curr *obj.Prog) {
+	// There may be other instructions pointing at curr,
+	// and we want them to now point at prev. Instead of
+	// trying to find all such instructions, swap the contents
+	// so that the problem becomes inserting next after curr.
+	// The "opt" field is the backward link in the linked list.
+
+	// Overwrite curr's data with prev, but keep the list links.
+	tmp := *curr
+
+	*curr = *prev
+	curr.Opt = tmp.Opt
+	curr.Link = tmp.Link
+
+	// Overwrite prev (now next) with curr's old data.
+	next := prev
+
+	*next = tmp
+	next.Opt = nil
+	next.Link = nil
+
+	// Now insert next after curr.
+	next.Link = curr.Link
+
+	next.Opt = curr
+	curr.Link = next
+	if next.Link != nil && next.Link.Opt == curr {
+		next.Link.Opt = next
+	}
+
+	if bb.last == curr {
+		bb.last = next
+	}
+}
+
+// A pretty printer for basic blocks.
+func printblock(bb *BasicBlock) {
+	fmt.Printf("basic block %d\n", bb.rpo)
+	fmt.Printf("\tpred:")
+	for _, pred := range bb.pred {
+		fmt.Printf(" %d", pred.rpo)
+	}
+	fmt.Printf("\n")
+	fmt.Printf("\tsucc:")
+	for _, succ := range bb.succ {
+		fmt.Printf(" %d", succ.rpo)
+	}
+	fmt.Printf("\n")
+	fmt.Printf("\tprog:\n")
+	for prog := bb.first; ; prog = prog.Link {
+		fmt.Printf("\t\t%v\n", prog)
+		if prog == bb.last {
+			break
+		}
+	}
+}
+
+// Iterates over a basic block applying a callback to each instruction.  There
+// are two criteria for termination.  If the end of basic block is reached a
+// value of zero is returned.  If the callback returns a non-zero value, the
+// iteration is stopped and the value of the callback is returned.
+func blockany(bb *BasicBlock, f func(*obj.Prog) bool) bool {
+	for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
+		if f(p) {
+			return true
+		}
+	}
+	return false
+}
+
+// Collects and returns and array of Node*s for functions arguments and local
+// variables.
+func getvariables(fn *Node) []*Node {
+	result := make([]*Node, 0, 0)
+	for ll := fn.Func.Dcl; ll != nil; ll = ll.Next {
+		if ll.N.Op == ONAME {
+			// In order for GODEBUG=gcdead=1 to work, each bitmap needs
+			// to contain information about all variables covered by the bitmap.
+			// For local variables, the bitmap only covers the stkptrsize
+			// bytes in the frame where variables containing pointers live.
+			// For arguments and results, the bitmap covers all variables,
+			// so we must include all the variables, even the ones without
+			// pointers.
+			//
+			// The Node.opt field is available for use by optimization passes.
+			// We use it to hold the index of the node in the variables array, plus 1
+			// (so that 0 means the Node is not in the variables array).
+			// Each pass should clear opt when done, but you never know,
+			// so clear them all ourselves too.
+			// The Node.curfn field is supposed to be set to the current function
+			// already, but for some compiler-introduced names it seems not to be,
+			// so fix that here.
+			// Later, when we want to find the index of a node in the variables list,
+			// we will check that n->curfn == curfn and n->opt > 0. Then n->opt - 1
+			// is the index in the variables list.
+			ll.N.Opt = nil
+
+			// The compiler doesn't emit initializations for zero-width parameters or results.
+			if ll.N.Type.Width == 0 {
+				continue
+			}
+
+			ll.N.Curfn = Curfn
+			switch ll.N.Class {
+			case PAUTO:
+				if haspointers(ll.N.Type) {
+					ll.N.Opt = int32(len(result))
+					result = append(result, ll.N)
+				}
+
+			case PPARAM, PPARAMOUT:
+				ll.N.Opt = int32(len(result))
+				result = append(result, ll.N)
+			}
+		}
+	}
+
+	return result
+}
+
+// A pretty printer for control flow graphs.  Takes an array of BasicBlock*s.
+func printcfg(cfg []*BasicBlock) {
+	for _, bb := range cfg {
+		printblock(bb)
+	}
+}
+
+// Assigns a reverse post order number to each connected basic block using the
+// standard algorithm.  Unconnected blocks will not be affected.
+func reversepostorder(root *BasicBlock, rpo *int32) {
+	root.mark = VISITED
+	for _, bb := range root.succ {
+		if bb.mark == UNVISITED {
+			reversepostorder(bb, rpo)
+		}
+	}
+	*rpo -= 1
+	root.rpo = int(*rpo)
+}
+
+// Comparison predicate used for sorting basic blocks by their rpo in ascending
+// order.
+type blockrpocmp []*BasicBlock
+
+func (x blockrpocmp) Len() int           { return len(x) }
+func (x blockrpocmp) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
+func (x blockrpocmp) Less(i, j int) bool { return x[i].rpo < x[j].rpo }
+
+// A pattern matcher for call instructions.  Returns true when the instruction
+// is a call to a specific package qualified function name.
+func iscall(prog *obj.Prog, name *obj.LSym) bool {
+	if prog == nil {
+		Fatal("iscall: prog is nil")
+	}
+	if name == nil {
+		Fatal("iscall: function name is nil")
+	}
+	if prog.As != obj.ACALL {
+		return false
+	}
+	return name == prog.To.Sym
+}
+
+// Returns true for instructions that call a runtime function implementing a
+// select communication clause.
+
+var selectNames [4]*obj.LSym
+
+func isselectcommcasecall(prog *obj.Prog) bool {
+	if selectNames[0] == nil {
+		selectNames[0] = Linksym(Pkglookup("selectsend", Runtimepkg))
+		selectNames[1] = Linksym(Pkglookup("selectrecv", Runtimepkg))
+		selectNames[2] = Linksym(Pkglookup("selectrecv2", Runtimepkg))
+		selectNames[3] = Linksym(Pkglookup("selectdefault", Runtimepkg))
+	}
+
+	for _, name := range selectNames {
+		if iscall(prog, name) {
+			return true
+		}
+	}
+	return false
+}
+
+// Returns true for call instructions that target runtime·newselect.
+
+var isnewselect_sym *obj.LSym
+
+func isnewselect(prog *obj.Prog) bool {
+	if isnewselect_sym == nil {
+		isnewselect_sym = Linksym(Pkglookup("newselect", Runtimepkg))
+	}
+	return iscall(prog, isnewselect_sym)
+}
+
+// Returns true for call instructions that target runtime·selectgo.
+
+var isselectgocall_sym *obj.LSym
+
+func isselectgocall(prog *obj.Prog) bool {
+	if isselectgocall_sym == nil {
+		isselectgocall_sym = Linksym(Pkglookup("selectgo", Runtimepkg))
+	}
+	return iscall(prog, isselectgocall_sym)
+}
+
+var isdeferreturn_sym *obj.LSym
+
+func isdeferreturn(prog *obj.Prog) bool {
+	if isdeferreturn_sym == nil {
+		isdeferreturn_sym = Linksym(Pkglookup("deferreturn", Runtimepkg))
+	}
+	return iscall(prog, isdeferreturn_sym)
+}
+
+// Walk backwards from a runtime·selectgo call up to its immediately dominating
+// runtime·newselect call.  Any successor nodes of communication clause nodes
+// are implicit successors of the runtime·selectgo call node.  The goal of this
+// analysis is to add these missing edges to complete the control flow graph.
+func addselectgosucc(selectgo *BasicBlock) {
+	var succ *BasicBlock
+
+	pred := selectgo
+	for {
+		if len(pred.pred) == 0 {
+			Fatal("selectgo does not have a newselect")
+		}
+		pred = pred.pred[0]
+		if blockany(pred, isselectcommcasecall) {
+			// A select comm case block should have exactly one
+			// successor.
+			if len(pred.succ) != 1 {
+				Fatal("select comm case has too many successors")
+			}
+			succ = pred.succ[0]
+
+			// Its successor should have exactly two successors.
+			// The drop through should flow to the selectgo block
+			// and the branch should lead to the select case
+			// statements block.
+			if len(succ.succ) != 2 {
+				Fatal("select comm case successor has too many successors")
+			}
+
+			// Add the block as a successor of the selectgo block.
+			addedge(selectgo, succ)
+		}
+
+		if blockany(pred, isnewselect) {
+			// Reached the matching newselect.
+			break
+		}
+	}
+}
+
+// The entry point for the missing selectgo control flow algorithm.  Takes an
+// array of BasicBlock*s containing selectgo calls.
+func fixselectgo(selectgo []*BasicBlock) {
+	for _, bb := range selectgo {
+		addselectgosucc(bb)
+	}
+}
+
+// Constructs a control flow graph from a sequence of instructions.  This
+// procedure is complicated by various sources of implicit control flow that are
+// not accounted for using the standard cfg construction algorithm.  Returns an
+// array of BasicBlock*s in control flow graph form (basic blocks ordered by
+// their RPO number).
+func newcfg(firstp *obj.Prog) []*BasicBlock {
+	// Reset the opt field of each prog to nil.  In the first and second
+	// passes, instructions that are labels temporarily use the opt field to
+	// point to their basic block.  In the third pass, the opt field reset
+	// to point to the predecessor of an instruction in its basic block.
+	for p := firstp; p != nil; p = p.Link {
+		p.Opt = nil
+	}
+
+	// Allocate an array to remember where we have seen selectgo calls.
+	// These blocks will be revisited to add successor control flow edges.
+	selectgo := make([]*BasicBlock, 0, 0)
+
+	// Loop through all instructions identifying branch targets
+	// and fall-throughs and allocate basic blocks.
+	cfg := make([]*BasicBlock, 0, 0)
+
+	bb := newblock(firstp)
+	cfg = append(cfg, bb)
+	for p := firstp; p != nil; p = p.Link {
+		Thearch.Proginfo(p)
+		if p.To.Type == obj.TYPE_BRANCH {
+			if p.To.Val == nil {
+				Fatal("prog branch to nil")
+			}
+			if p.To.Val.(*obj.Prog).Opt == nil {
+				p.To.Val.(*obj.Prog).Opt = newblock(p.To.Val.(*obj.Prog))
+				cfg = append(cfg, p.To.Val.(*obj.Prog).Opt.(*BasicBlock))
+			}
+
+			if p.As != obj.AJMP && p.Link != nil && p.Link.Opt == nil {
+				p.Link.Opt = newblock(p.Link)
+				cfg = append(cfg, p.Link.Opt.(*BasicBlock))
+			}
+		} else if isselectcommcasecall(p) || isselectgocall(p) {
+			// Accommodate implicit selectgo control flow.
+			if p.Link.Opt == nil {
+				p.Link.Opt = newblock(p.Link)
+				cfg = append(cfg, p.Link.Opt.(*BasicBlock))
+			}
+		}
+	}
+
+	// Loop through all basic blocks maximally growing the list of
+	// contained instructions until a label is reached.  Add edges
+	// for branches and fall-through instructions.
+	for _, bb := range cfg {
+		for p := bb.last; p != nil; p = p.Link {
+			if p.Opt != nil && p != bb.last {
+				break
+			}
+			bb.last = p
+
+			// Stop before an unreachable RET, to avoid creating
+			// unreachable control flow nodes.
+			if p.Link != nil && p.Link.As == obj.ARET && p.Link.Mode == 1 {
+				break
+			}
+
+			// Collect basic blocks with selectgo calls.
+			if isselectgocall(p) {
+				selectgo = append(selectgo, bb)
+			}
+		}
+
+		if bb.last.To.Type == obj.TYPE_BRANCH {
+			addedge(bb, bb.last.To.Val.(*obj.Prog).Opt.(*BasicBlock))
+		}
+		if bb.last.Link != nil {
+			// Add a fall-through when the instruction is
+			// not an unconditional control transfer.
+			if bb.last.As != obj.AJMP && bb.last.As != obj.ARET && bb.last.As != obj.AUNDEF {
+				addedge(bb, bb.last.Link.Opt.(*BasicBlock))
+			}
+		}
+	}
+
+	// Add back links so the instructions in a basic block can be traversed
+	// backward.  This is the final state of the instruction opt field.
+	for _, bb := range cfg {
+		p := bb.first
+		var prev *obj.Prog
+		for {
+			p.Opt = prev
+			if p == bb.last {
+				break
+			}
+			prev = p
+			p = p.Link
+		}
+	}
+
+	// Add missing successor edges to the selectgo blocks.
+	if len(selectgo) != 0 {
+		fixselectgo([]*BasicBlock(selectgo))
+	}
+
+	// Find a depth-first order and assign a depth-first number to
+	// all basic blocks.
+	for _, bb := range cfg {
+		bb.mark = UNVISITED
+	}
+	bb = cfg[0]
+	rpo := int32(len(cfg))
+	reversepostorder(bb, &rpo)
+
+	// Sort the basic blocks by their depth first number.  The
+	// array is now a depth-first spanning tree with the first
+	// node being the root.
+	sort.Sort(blockrpocmp(cfg))
+
+	// Unreachable control flow nodes are indicated by a -1 in the rpo
+	// field.  If we see these nodes something must have gone wrong in an
+	// upstream compilation phase.
+	bb = cfg[0]
+	if bb.rpo == -1 {
+		fmt.Printf("newcfg: unreachable basic block for %v\n", bb.last)
+		printcfg(cfg)
+		Fatal("newcfg: invalid control flow graph")
+	}
+
+	return cfg
+}
+
+// Frees a control flow graph (an array of BasicBlock*s) and all of its leaf
+// data structures.
+func freecfg(cfg []*BasicBlock) {
+	if len(cfg) > 0 {
+		bb0 := cfg[0]
+		for p := bb0.first; p != nil; p = p.Link {
+			p.Opt = nil
+		}
+	}
+}
+
+// Returns true if the node names a variable that is otherwise uninteresting to
+// the liveness computation.
+func isfunny(n *Node) bool {
+	return n.Sym != nil && (n.Sym.Name == ".fp" || n.Sym.Name == ".args")
+}
+
+// Computes the effects of an instruction on a set of
+// variables.  The vars argument is an array of Node*s.
+//
+// The output vectors give bits for variables:
+//	uevar - used by this instruction
+//	varkill - killed by this instruction
+//		for variables without address taken, means variable was set
+//		for variables with address taken, means variable was marked dead
+//	avarinit - initialized or referred to by this instruction,
+//		only for variables with address taken but not escaping to heap
+//
+// The avarinit output serves as a signal that the data has been
+// initialized, because any use of a variable must come after its
+// initialization.
+func progeffects(prog *obj.Prog, vars []*Node, uevar Bvec, varkill Bvec, avarinit Bvec) {
+	bvresetall(uevar)
+	bvresetall(varkill)
+	bvresetall(avarinit)
+
+	if prog.As == obj.ARET {
+		// Return instructions implicitly read all the arguments.  For
+		// the sake of correctness, out arguments must be read.  For the
+		// sake of backtrace quality, we read in arguments as well.
+		//
+		// A return instruction with a p->to is a tail return, which brings
+		// the stack pointer back up (if it ever went down) and then jumps
+		// to a new function entirely. That form of instruction must read
+		// all the parameters for correctness, and similarly it must not
+		// read the out arguments - they won't be set until the new
+		// function runs.
+		for i, node := range vars {
+			switch node.Class &^ PHEAP {
+			case PPARAM:
+				bvset(uevar, int32(i))
+
+				// If the result had its address taken, it is being tracked
+			// by the avarinit code, which does not use uevar.
+			// If we added it to uevar too, we'd not see any kill
+			// and decide that the varible was live entry, which it is not.
+			// So only use uevar in the non-addrtaken case.
+			// The p->to.type == thearch.D_NONE limits the bvset to
+			// non-tail-call return instructions; see note above
+			// the for loop for details.
+			case PPARAMOUT:
+				if !node.Addrtaken && prog.To.Type == obj.TYPE_NONE {
+					bvset(uevar, int32(i))
+				}
+			}
+		}
+
+		return
+	}
+
+	if prog.As == obj.ATEXT {
+		// A text instruction marks the entry point to a function and
+		// the definition point of all in arguments.
+		for i, node := range vars {
+			switch node.Class &^ PHEAP {
+			case PPARAM:
+				if node.Addrtaken {
+					bvset(avarinit, int32(i))
+				}
+				bvset(varkill, int32(i))
+			}
+		}
+
+		return
+	}
+
+	if prog.Info.Flags&(LeftRead|LeftWrite|LeftAddr) != 0 {
+		from := &prog.From
+		if from.Node != nil && from.Sym != nil && ((from.Node).(*Node)).Curfn == Curfn {
+			switch ((from.Node).(*Node)).Class &^ PHEAP {
+			case PAUTO, PPARAM, PPARAMOUT:
+				pos, ok := from.Node.(*Node).Opt.(int32) // index in vars
+				if !ok {
+					goto Next
+				}
+				if pos >= int32(len(vars)) || vars[pos] != from.Node {
+					Fatal("bad bookkeeping in liveness %v %d", Nconv(from.Node.(*Node), 0), pos)
+				}
+				if ((from.Node).(*Node)).Addrtaken {
+					bvset(avarinit, pos)
+				} else {
+					if prog.Info.Flags&(LeftRead|LeftAddr) != 0 {
+						bvset(uevar, pos)
+					}
+					if prog.Info.Flags&LeftWrite != 0 {
+						if from.Node != nil && !Isfat(((from.Node).(*Node)).Type) {
+							bvset(varkill, pos)
+						}
+					}
+				}
+			}
+		}
+	}
+
+Next:
+	if prog.Info.Flags&(RightRead|RightWrite|RightAddr) != 0 {
+		to := &prog.To
+		if to.Node != nil && to.Sym != nil && ((to.Node).(*Node)).Curfn == Curfn {
+			switch ((to.Node).(*Node)).Class &^ PHEAP {
+			case PAUTO, PPARAM, PPARAMOUT:
+				pos, ok := to.Node.(*Node).Opt.(int32) // index in vars
+				if !ok {
+					return
+				}
+				if pos >= int32(len(vars)) || vars[pos] != to.Node {
+					Fatal("bad bookkeeping in liveness %v %d", Nconv(to.Node.(*Node), 0), pos)
+				}
+				if ((to.Node).(*Node)).Addrtaken {
+					if prog.As != obj.AVARKILL {
+						bvset(avarinit, pos)
+					}
+					if prog.As == obj.AVARDEF || prog.As == obj.AVARKILL {
+						bvset(varkill, pos)
+					}
+				} else {
+					// RightRead is a read, obviously.
+					// RightAddr by itself is also implicitly a read.
+					//
+					// RightAddr|RightWrite means that the address is being taken
+					// but only so that the instruction can write to the value.
+					// It is not a read. It is equivalent to RightWrite except that
+					// having the RightAddr bit set keeps the registerizer from
+					// trying to substitute a register for the memory location.
+					if (prog.Info.Flags&RightRead != 0) || prog.Info.Flags&(RightAddr|RightWrite) == RightAddr {
+						bvset(uevar, pos)
+					}
+					if prog.Info.Flags&RightWrite != 0 {
+						if to.Node != nil && (!Isfat(((to.Node).(*Node)).Type) || prog.As == obj.AVARDEF) {
+							bvset(varkill, pos)
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+// Constructs a new liveness structure used to hold the global state of the
+// liveness computation.  The cfg argument is an array of BasicBlock*s and the
+// vars argument is an array of Node*s.
+func newliveness(fn *Node, ptxt *obj.Prog, cfg []*BasicBlock, vars []*Node) *Liveness {
+	result := new(Liveness)
+	result.fn = fn
+	result.ptxt = ptxt
+	result.cfg = cfg
+	result.vars = vars
+
+	nblocks := int32(len(cfg))
+	nvars := int32(len(vars))
+	bulk := bvbulkalloc(nvars, nblocks*7)
+	for _, bb := range cfg {
+		bb.uevar = bulk.next()
+		bb.varkill = bulk.next()
+		bb.livein = bulk.next()
+		bb.liveout = bulk.next()
+		bb.avarinit = bulk.next()
+		bb.avarinitany = bulk.next()
+		bb.avarinitall = bulk.next()
+	}
+
+	result.livepointers = make([]Bvec, 0, 0)
+	result.argslivepointers = make([]Bvec, 0, 0)
+	return result
+}
+
+// Frees the liveness structure and all of its leaf data structures.
+func freeliveness(lv *Liveness) {
+	if lv == nil {
+		Fatal("freeliveness: cannot free nil")
+	}
+}
+
+func printeffects(p *obj.Prog, uevar Bvec, varkill Bvec, avarinit Bvec) {
+	fmt.Printf("effects of %v", p)
+	fmt.Printf("\nuevar: ")
+	bvprint(uevar)
+	fmt.Printf("\nvarkill: ")
+	bvprint(varkill)
+	fmt.Printf("\navarinit: ")
+	bvprint(avarinit)
+	fmt.Printf("\n")
+}
+
+// Pretty print a variable node.  Uses Pascal like conventions for pointers and
+// addresses to avoid confusing the C like conventions used in the node variable
+// names.
+func printnode(node *Node) {
+	p := ""
+	if haspointers(node.Type) {
+		p = "^"
+	}
+	a := ""
+	if node.Addrtaken {
+		a = "@"
+	}
+	fmt.Printf(" %v%s%s", node, p, a)
+}
+
+// Pretty print a list of variables.  The vars argument is an array of Node*s.
+func printvars(name string, bv Bvec, vars []*Node) {
+	fmt.Printf("%s:", name)
+	for i, node := range vars {
+		if bvget(bv, int32(i)) != 0 {
+			printnode(node)
+		}
+	}
+	fmt.Printf("\n")
+}
+
+// Prints a basic block annotated with the information computed by liveness
+// analysis.
+func livenessprintblock(lv *Liveness, bb *BasicBlock) {
+	fmt.Printf("basic block %d\n", bb.rpo)
+
+	fmt.Printf("\tpred:")
+	for _, pred := range bb.pred {
+		fmt.Printf(" %d", pred.rpo)
+	}
+	fmt.Printf("\n")
+
+	fmt.Printf("\tsucc:")
+	for _, succ := range bb.succ {
+		fmt.Printf(" %d", succ.rpo)
+	}
+	fmt.Printf("\n")
+
+	printvars("\tuevar", bb.uevar, []*Node(lv.vars))
+	printvars("\tvarkill", bb.varkill, []*Node(lv.vars))
+	printvars("\tlivein", bb.livein, []*Node(lv.vars))
+	printvars("\tliveout", bb.liveout, []*Node(lv.vars))
+	printvars("\tavarinit", bb.avarinit, []*Node(lv.vars))
+	printvars("\tavarinitany", bb.avarinitany, []*Node(lv.vars))
+	printvars("\tavarinitall", bb.avarinitall, []*Node(lv.vars))
+
+	fmt.Printf("\tprog:\n")
+	for prog := bb.first; ; prog = prog.Link {
+		fmt.Printf("\t\t%v", prog)
+		if prog.As == obj.APCDATA && prog.From.Offset == obj.PCDATA_StackMapIndex {
+			pos := int32(prog.To.Offset)
+			live := lv.livepointers[pos]
+			fmt.Printf(" ")
+			bvprint(live)
+		}
+
+		fmt.Printf("\n")
+		if prog == bb.last {
+			break
+		}
+	}
+}
+
+// Prints a control flow graph annotated with any information computed by
+// liveness analysis.
+func livenessprintcfg(lv *Liveness) {
+	for _, bb := range lv.cfg {
+		livenessprintblock(lv, bb)
+	}
+}
+
+func checkauto(fn *Node, p *obj.Prog, n *Node) {
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		if l.N.Op == ONAME && l.N.Class == PAUTO && l.N == n {
+			return
+		}
+	}
+
+	if n == nil {
+		fmt.Printf("%v: checkauto %v: nil node in %v\n", p.Line(), Curfn, p)
+		return
+	}
+
+	fmt.Printf("checkauto %v: %v (%p; class=%d) not found in %v\n", Curfn, n, n, n.Class, p)
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		fmt.Printf("\t%v (%p; class=%d)\n", l.N, l.N, l.N.Class)
+	}
+	Yyerror("checkauto: invariant lost")
+}
+
+func checkparam(fn *Node, p *obj.Prog, n *Node) {
+	if isfunny(n) {
+		return
+	}
+	var a *Node
+	var class uint8
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		a = l.N
+		class = a.Class &^ PHEAP
+		if a.Op == ONAME && (class == PPARAM || class == PPARAMOUT) && a == n {
+			return
+		}
+	}
+
+	fmt.Printf("checkparam %v: %v (%p; class=%d) not found in %v\n", Curfn, n, n, n.Class, p)
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		fmt.Printf("\t%v (%p; class=%d)\n", l.N, l.N, l.N.Class)
+	}
+	Yyerror("checkparam: invariant lost")
+}
+
+func checkprog(fn *Node, p *obj.Prog) {
+	if p.From.Name == obj.NAME_AUTO {
+		checkauto(fn, p, p.From.Node.(*Node))
+	}
+	if p.From.Name == obj.NAME_PARAM {
+		checkparam(fn, p, p.From.Node.(*Node))
+	}
+	if p.To.Name == obj.NAME_AUTO {
+		checkauto(fn, p, p.To.Node.(*Node))
+	}
+	if p.To.Name == obj.NAME_PARAM {
+		checkparam(fn, p, p.To.Node.(*Node))
+	}
+}
+
+// Check instruction invariants.  We assume that the nodes corresponding to the
+// sources and destinations of memory operations will be declared in the
+// function.  This is not strictly true, as is the case for the so-called funny
+// nodes and there are special cases to skip over that stuff.  The analysis will
+// fail if this invariant blindly changes.
+func checkptxt(fn *Node, firstp *obj.Prog) {
+	if debuglive == 0 {
+		return
+	}
+
+	for p := firstp; p != nil; p = p.Link {
+		if false {
+			fmt.Printf("analyzing '%v'\n", p)
+		}
+		if p.As != obj.ADATA && p.As != obj.AGLOBL && p.As != obj.ATYPE {
+			checkprog(fn, p)
+		}
+	}
+}
+
+// NOTE: The bitmap for a specific type t should be cached in t after the first run
+// and then simply copied into bv at the correct offset on future calls with
+// the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, onebitwalktype1
+// accounts for 40% of the 6g execution time.
+func onebitwalktype1(t *Type, xoffset *int64, bv Bvec) {
+	if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 {
+		Fatal("onebitwalktype1: invalid initial alignment, %v", t)
+	}
+
+	switch t.Etype {
+	case TINT8,
+		TUINT8,
+		TINT16,
+		TUINT16,
+		TINT32,
+		TUINT32,
+		TINT64,
+		TUINT64,
+		TINT,
+		TUINT,
+		TUINTPTR,
+		TBOOL,
+		TFLOAT32,
+		TFLOAT64,
+		TCOMPLEX64,
+		TCOMPLEX128:
+		*xoffset += t.Width
+
+	case TPTR32,
+		TPTR64,
+		TUNSAFEPTR,
+		TFUNC,
+		TCHAN,
+		TMAP:
+		if *xoffset&int64(Widthptr-1) != 0 {
+			Fatal("onebitwalktype1: invalid alignment, %v", t)
+		}
+		bvset(bv, int32(*xoffset/int64(Widthptr))) // pointer
+		*xoffset += t.Width
+
+	case TSTRING:
+		// struct { byte *str; intgo len; }
+		if *xoffset&int64(Widthptr-1) != 0 {
+			Fatal("onebitwalktype1: invalid alignment, %v", t)
+		}
+		bvset(bv, int32(*xoffset/int64(Widthptr))) //pointer in first slot
+		*xoffset += t.Width
+
+	case TINTER:
+		// struct { Itab *tab;	void *data; }
+		// or, when isnilinter(t)==true:
+		// struct { Type *type; void *data; }
+		if *xoffset&int64(Widthptr-1) != 0 {
+			Fatal("onebitwalktype1: invalid alignment, %v", t)
+		}
+		bvset(bv, int32(*xoffset/int64(Widthptr)))   // pointer in first slot
+		bvset(bv, int32(*xoffset/int64(Widthptr)+1)) // pointer in second slot
+		*xoffset += t.Width
+
+	case TARRAY:
+		// The value of t->bound is -1 for slices types and >=0 for
+		// for fixed array types.  All other values are invalid.
+		if t.Bound < -1 {
+			Fatal("onebitwalktype1: invalid bound, %v", t)
+		}
+		if Isslice(t) {
+			// struct { byte *array; uintgo len; uintgo cap; }
+			if *xoffset&int64(Widthptr-1) != 0 {
+				Fatal("onebitwalktype1: invalid TARRAY alignment, %v", t)
+			}
+			bvset(bv, int32(*xoffset/int64(Widthptr))) // pointer in first slot (BitsPointer)
+			*xoffset += t.Width
+		} else {
+			for i := int64(0); i < t.Bound; i++ {
+				onebitwalktype1(t.Type, xoffset, bv)
+			}
+		}
+
+	case TSTRUCT:
+		o := int64(0)
+		var fieldoffset int64
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
+			fieldoffset = t1.Width
+			*xoffset += fieldoffset - o
+			onebitwalktype1(t1.Type, xoffset, bv)
+			o = fieldoffset + t1.Type.Width
+		}
+
+		*xoffset += t.Width - o
+
+	default:
+		Fatal("onebitwalktype1: unexpected type, %v", t)
+	}
+}
+
+// Returns the number of words of local variables.
+func localswords() int32 {
+	return int32(stkptrsize / int64(Widthptr))
+}
+
+// Returns the number of words of in and out arguments.
+func argswords() int32 {
+	return int32(Curfn.Type.Argwid / int64(Widthptr))
+}
+
+// Generates live pointer value maps for arguments and local variables.  The
+// this argument and the in arguments are always assumed live.  The vars
+// argument is an array of Node*s.
+func onebitlivepointermap(lv *Liveness, liveout Bvec, vars []*Node, args Bvec, locals Bvec) {
+	var node *Node
+	var xoffset int64
+
+	for i := int32(0); ; i++ {
+		i = int32(bvnext(liveout, i))
+		if i < 0 {
+			break
+		}
+		node = vars[i]
+		switch node.Class {
+		case PAUTO:
+			xoffset = node.Xoffset + stkptrsize
+			onebitwalktype1(node.Type, &xoffset, locals)
+
+		case PPARAM, PPARAMOUT:
+			xoffset = node.Xoffset
+			onebitwalktype1(node.Type, &xoffset, args)
+		}
+	}
+
+	// The node list only contains declared names.
+	// If the receiver or arguments are unnamed, they will be omitted
+	// from the list above. Preserve those values - even though they are unused -
+	// in order to keep their addresses live for use in stack traces.
+	thisargtype := getthisx(lv.fn.Type)
+
+	if thisargtype != nil {
+		xoffset = 0
+		onebitwalktype1(thisargtype, &xoffset, args)
+	}
+
+	inargtype := getinargx(lv.fn.Type)
+	if inargtype != nil {
+		xoffset = 0
+		onebitwalktype1(inargtype, &xoffset, args)
+	}
+}
+
+// Construct a disembodied instruction.
+func unlinkedprog(as int) *obj.Prog {
+	p := Ctxt.NewProg()
+	Clearp(p)
+	p.As = int16(as)
+	return p
+}
+
+// Construct a new PCDATA instruction associated with and for the purposes of
+// covering an existing instruction.
+func newpcdataprog(prog *obj.Prog, index int32) *obj.Prog {
+	var from Node
+	var to Node
+
+	Nodconst(&from, Types[TINT32], obj.PCDATA_StackMapIndex)
+	Nodconst(&to, Types[TINT32], int64(index))
+	pcdata := unlinkedprog(obj.APCDATA)
+	pcdata.Lineno = prog.Lineno
+	Naddr(&pcdata.From, &from)
+	Naddr(&pcdata.To, &to)
+	return pcdata
+}
+
+// Returns true for instructions that are safe points that must be annotated
+// with liveness information.
+func issafepoint(prog *obj.Prog) bool {
+	return prog.As == obj.ATEXT || prog.As == obj.ACALL
+}
+
+// Initializes the sets for solving the live variables.  Visits all the
+// instructions in each basic block to summarizes the information at each basic
+// block
+func livenessprologue(lv *Liveness) {
+	nvars := int32(len(lv.vars))
+	uevar := bvalloc(nvars)
+	varkill := bvalloc(nvars)
+	avarinit := bvalloc(nvars)
+	for _, bb := range lv.cfg {
+		// Walk the block instructions backward and update the block
+		// effects with the each prog effects.
+		for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
+			progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+			if debuglive >= 3 {
+				printeffects(p, uevar, varkill, avarinit)
+			}
+			bvor(bb.varkill, bb.varkill, varkill)
+			bvandnot(bb.uevar, bb.uevar, varkill)
+			bvor(bb.uevar, bb.uevar, uevar)
+		}
+
+		// Walk the block instructions forward to update avarinit bits.
+		// avarinit describes the effect at the end of the block, not the beginning.
+		bvresetall(varkill)
+
+		for p := bb.first; ; p = p.Link {
+			progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+			if debuglive >= 3 {
+				printeffects(p, uevar, varkill, avarinit)
+			}
+			bvandnot(bb.avarinit, bb.avarinit, varkill)
+			bvor(bb.avarinit, bb.avarinit, avarinit)
+			if p == bb.last {
+				break
+			}
+		}
+	}
+}
+
+// Solve the liveness dataflow equations.
+func livenesssolve(lv *Liveness) {
+	// These temporary bitvectors exist to avoid successive allocations and
+	// frees within the loop.
+	newlivein := bvalloc(int32(len(lv.vars)))
+
+	newliveout := bvalloc(int32(len(lv.vars)))
+	any := bvalloc(int32(len(lv.vars)))
+	all := bvalloc(int32(len(lv.vars)))
+
+	// Push avarinitall, avarinitany forward.
+	// avarinitall says the addressed var is initialized along all paths reaching the block exit.
+	// avarinitany says the addressed var is initialized along some path reaching the block exit.
+	for i, bb := range lv.cfg {
+		if i == 0 {
+			bvcopy(bb.avarinitall, bb.avarinit)
+		} else {
+			bvresetall(bb.avarinitall)
+			bvnot(bb.avarinitall)
+		}
+		bvcopy(bb.avarinitany, bb.avarinit)
+	}
+
+	change := int32(1)
+	for change != 0 {
+		change = 0
+		for _, bb := range lv.cfg {
+			bvresetall(any)
+			bvresetall(all)
+			for j, pred := range bb.pred {
+				if j == 0 {
+					bvcopy(any, pred.avarinitany)
+					bvcopy(all, pred.avarinitall)
+				} else {
+					bvor(any, any, pred.avarinitany)
+					bvand(all, all, pred.avarinitall)
+				}
+			}
+
+			bvandnot(any, any, bb.varkill)
+			bvandnot(all, all, bb.varkill)
+			bvor(any, any, bb.avarinit)
+			bvor(all, all, bb.avarinit)
+			if bvcmp(any, bb.avarinitany) != 0 {
+				change = 1
+				bvcopy(bb.avarinitany, any)
+			}
+
+			if bvcmp(all, bb.avarinitall) != 0 {
+				change = 1
+				bvcopy(bb.avarinitall, all)
+			}
+		}
+	}
+
+	// Iterate through the blocks in reverse round-robin fashion.  A work
+	// queue might be slightly faster.  As is, the number of iterations is
+	// so low that it hardly seems to be worth the complexity.
+	change = 1
+
+	for change != 0 {
+		change = 0
+
+		// Walk blocks in the general direction of propagation.  This
+		// improves convergence.
+		for i := len(lv.cfg) - 1; i >= 0; i-- {
+			bb := lv.cfg[i]
+
+			// A variable is live on output from this block
+			// if it is live on input to some successor.
+			//
+			// out[b] = \bigcup_{s \in succ[b]} in[s]
+			bvresetall(newliveout)
+			for _, succ := range bb.succ {
+				bvor(newliveout, newliveout, succ.livein)
+			}
+
+			if bvcmp(bb.liveout, newliveout) != 0 {
+				change = 1
+				bvcopy(bb.liveout, newliveout)
+			}
+
+			// A variable is live on input to this block
+			// if it is live on output from this block and
+			// not set by the code in this block.
+			//
+			// in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
+			bvandnot(newlivein, bb.liveout, bb.varkill)
+
+			bvor(bb.livein, newlivein, bb.uevar)
+		}
+	}
+}
+
+// This function is slow but it is only used for generating debug prints.
+// Check whether n is marked live in args/locals.
+func islive(n *Node, args Bvec, locals Bvec) bool {
+	switch n.Class {
+	case PPARAM, PPARAMOUT:
+		for i := 0; int64(i) < n.Type.Width/int64(Widthptr); i++ {
+			if bvget(args, int32(n.Xoffset/int64(Widthptr)+int64(i))) != 0 {
+				return true
+			}
+		}
+
+	case PAUTO:
+		for i := 0; int64(i) < n.Type.Width/int64(Widthptr); i++ {
+			if bvget(locals, int32((n.Xoffset+stkptrsize)/int64(Widthptr)+int64(i))) != 0 {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// Visits all instructions in a basic block and computes a bit vector of live
+// variables at each safe point locations.
+func livenessepilogue(lv *Liveness) {
+	var pred *BasicBlock
+	var args Bvec
+	var locals Bvec
+	var n *Node
+	var p *obj.Prog
+	var j int32
+	var pos int32
+	var xoffset int64
+
+	nvars := int32(len(lv.vars))
+	livein := bvalloc(nvars)
+	liveout := bvalloc(nvars)
+	uevar := bvalloc(nvars)
+	varkill := bvalloc(nvars)
+	avarinit := bvalloc(nvars)
+	any := bvalloc(nvars)
+	all := bvalloc(nvars)
+	ambig := bvalloc(localswords())
+	nmsg := int32(0)
+	startmsg := int32(0)
+
+	for _, bb := range lv.cfg {
+		// Compute avarinitany and avarinitall for entry to block.
+		// This duplicates information known during livenesssolve
+		// but avoids storing two more vectors for each block.
+		bvresetall(any)
+
+		bvresetall(all)
+		for j = 0; j < int32(len(bb.pred)); j++ {
+			pred = bb.pred[j]
+			if j == 0 {
+				bvcopy(any, pred.avarinitany)
+				bvcopy(all, pred.avarinitall)
+			} else {
+				bvor(any, any, pred.avarinitany)
+				bvand(all, all, pred.avarinitall)
+			}
+		}
+
+		// Walk forward through the basic block instructions and
+		// allocate liveness maps for those instructions that need them.
+		// Seed the maps with information about the addrtaken variables.
+		for p = bb.first; ; p = p.Link {
+			progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+			bvandnot(any, any, varkill)
+			bvandnot(all, all, varkill)
+			bvor(any, any, avarinit)
+			bvor(all, all, avarinit)
+
+			if issafepoint(p) {
+				// Annotate ambiguously live variables so that they can
+				// be zeroed at function entry.
+				// livein and liveout are dead here and used as temporaries.
+				bvresetall(livein)
+
+				bvandnot(liveout, any, all)
+				if !bvisempty(liveout) {
+					for pos = 0; pos < liveout.n; pos++ {
+						if bvget(liveout, pos) == 0 {
+							continue
+						}
+						bvset(all, pos) // silence future warnings in this block
+						n = lv.vars[pos]
+						if !n.Name.Needzero {
+							n.Name.Needzero = true
+							if debuglive >= 1 {
+								Warnl(int(p.Lineno), "%v: %v is ambiguously live", Curfn.Nname, Nconv(n, obj.FmtLong))
+							}
+
+							// Record in 'ambiguous' bitmap.
+							xoffset = n.Xoffset + stkptrsize
+
+							onebitwalktype1(n.Type, &xoffset, ambig)
+						}
+					}
+				}
+
+				// Allocate a bit vector for each class and facet of
+				// value we are tracking.
+
+				// Live stuff first.
+				args = bvalloc(argswords())
+
+				lv.argslivepointers = append(lv.argslivepointers, args)
+				locals = bvalloc(localswords())
+				lv.livepointers = append(lv.livepointers, locals)
+
+				if debuglive >= 3 {
+					fmt.Printf("%v\n", p)
+					printvars("avarinitany", any, lv.vars)
+				}
+
+				// Record any values with an "address taken" reaching
+				// this code position as live. Must do now instead of below
+				// because the any/all calculation requires walking forward
+				// over the block (as this loop does), while the liveout
+				// requires walking backward (as the next loop does).
+				onebitlivepointermap(lv, any, lv.vars, args, locals)
+			}
+
+			if p == bb.last {
+				break
+			}
+		}
+
+		bb.lastbitmapindex = len(lv.livepointers) - 1
+	}
+
+	var fmt_ string
+	var next *obj.Prog
+	var numlive int32
+	var msg []string
+	for _, bb := range lv.cfg {
+		if debuglive >= 1 && Curfn.Nname.Sym.Name != "init" && Curfn.Nname.Sym.Name[0] != '.' {
+			nmsg = int32(len(lv.livepointers))
+			startmsg = nmsg
+			msg = make([]string, nmsg)
+			for j = 0; j < nmsg; j++ {
+				msg[j] = ""
+			}
+		}
+
+		// walk backward, emit pcdata and populate the maps
+		pos = int32(bb.lastbitmapindex)
+
+		if pos < 0 {
+			// the first block we encounter should have the ATEXT so
+			// at no point should pos ever be less than zero.
+			Fatal("livenessepilogue")
+		}
+
+		bvcopy(livein, bb.liveout)
+		for p = bb.last; p != nil; p = next {
+			next = p.Opt.(*obj.Prog) // splicebefore modifies p->opt
+
+			// Propagate liveness information
+			progeffects(p, lv.vars, uevar, varkill, avarinit)
+
+			bvcopy(liveout, livein)
+			bvandnot(livein, liveout, varkill)
+			bvor(livein, livein, uevar)
+			if debuglive >= 3 && issafepoint(p) {
+				fmt.Printf("%v\n", p)
+				printvars("uevar", uevar, lv.vars)
+				printvars("varkill", varkill, lv.vars)
+				printvars("livein", livein, lv.vars)
+				printvars("liveout", liveout, lv.vars)
+			}
+
+			if issafepoint(p) {
+				// Found an interesting instruction, record the
+				// corresponding liveness information.
+
+				// Useful sanity check: on entry to the function,
+				// the only things that can possibly be live are the
+				// input parameters.
+				if p.As == obj.ATEXT {
+					for j = 0; j < liveout.n; j++ {
+						if bvget(liveout, j) == 0 {
+							continue
+						}
+						n = lv.vars[j]
+						if n.Class != PPARAM {
+							yyerrorl(int(p.Lineno), "internal error: %v %v recorded as live on entry", Curfn.Nname, Nconv(n, obj.FmtLong))
+						}
+					}
+				}
+
+				// Record live pointers.
+				args = lv.argslivepointers[pos]
+
+				locals = lv.livepointers[pos]
+				onebitlivepointermap(lv, liveout, lv.vars, args, locals)
+
+				// Ambiguously live variables are zeroed immediately after
+				// function entry. Mark them live for all the non-entry bitmaps
+				// so that GODEBUG=gcdead=1 mode does not poison them.
+				if p.As == obj.ACALL {
+					bvor(locals, locals, ambig)
+				}
+
+				// Show live pointer bitmaps.
+				// We're interpreting the args and locals bitmap instead of liveout so that we
+				// include the bits added by the avarinit logic in the
+				// previous loop.
+				if msg != nil {
+					fmt_ = ""
+					fmt_ += fmt.Sprintf("%v: live at ", p.Line())
+					if p.As == obj.ACALL && p.To.Node != nil {
+						fmt_ += fmt.Sprintf("call to %s:", ((p.To.Node).(*Node)).Sym.Name)
+					} else if p.As == obj.ACALL {
+						fmt_ += "indirect call:"
+					} else {
+						fmt_ += fmt.Sprintf("entry to %s:", ((p.From.Node).(*Node)).Sym.Name)
+					}
+					numlive = 0
+					for j = 0; j < int32(len(lv.vars)); j++ {
+						n = lv.vars[j]
+						if islive(n, args, locals) {
+							fmt_ += fmt.Sprintf(" %v", n)
+							numlive++
+						}
+					}
+
+					fmt_ += "\n"
+					if numlive == 0 { // squelch message
+
+					} else {
+						startmsg--
+						msg[startmsg] = fmt_
+					}
+				}
+
+				// Only CALL instructions need a PCDATA annotation.
+				// The TEXT instruction annotation is implicit.
+				if p.As == obj.ACALL {
+					if isdeferreturn(p) {
+						// runtime.deferreturn modifies its return address to return
+						// back to the CALL, not to the subsequent instruction.
+						// Because the return comes back one instruction early,
+						// the PCDATA must begin one instruction early too.
+						// The instruction before a call to deferreturn is always a
+						// no-op, to keep PC-specific data unambiguous.
+						splicebefore(lv, bb, newpcdataprog(p.Opt.(*obj.Prog), pos), p.Opt.(*obj.Prog))
+					} else {
+						splicebefore(lv, bb, newpcdataprog(p, pos), p)
+					}
+				}
+
+				pos--
+			}
+		}
+
+		if msg != nil {
+			for j = startmsg; j < nmsg; j++ {
+				if msg[j] != "" {
+					fmt.Printf("%s", msg[j])
+				}
+			}
+
+			msg = nil
+			nmsg = 0
+			startmsg = 0
+		}
+	}
+
+	Flusherrors()
+}
+
+// FNV-1 hash function constants.
+const (
+	H0 = 2166136261
+	Hp = 16777619
+)
+
+func hashbitmap(h uint32, bv Bvec) uint32 {
+	var w uint32
+
+	n := int((bv.n + 31) / 32)
+	for i := 0; i < n; i++ {
+		w = bv.b[i]
+		h = (h * Hp) ^ (w & 0xff)
+		h = (h * Hp) ^ ((w >> 8) & 0xff)
+		h = (h * Hp) ^ ((w >> 16) & 0xff)
+		h = (h * Hp) ^ ((w >> 24) & 0xff)
+	}
+
+	return h
+}
+
+// Compact liveness information by coalescing identical per-call-site bitmaps.
+// The merging only happens for a single function, not across the entire binary.
+//
+// There are actually two lists of bitmaps, one list for the local variables and one
+// list for the function arguments. Both lists are indexed by the same PCDATA
+// index, so the corresponding pairs must be considered together when
+// merging duplicates. The argument bitmaps change much less often during
+// function execution than the local variable bitmaps, so it is possible that
+// we could introduce a separate PCDATA index for arguments vs locals and
+// then compact the set of argument bitmaps separately from the set of
+// local variable bitmaps. As of 2014-04-02, doing this to the godoc binary
+// is actually a net loss: we save about 50k of argument bitmaps but the new
+// PCDATA tables cost about 100k. So for now we keep using a single index for
+// both bitmap lists.
+func livenesscompact(lv *Liveness) {
+	// Linear probing hash table of bitmaps seen so far.
+	// The hash table has 4n entries to keep the linear
+	// scan short. An entry of -1 indicates an empty slot.
+	n := len(lv.livepointers)
+
+	tablesize := 4 * n
+	table := make([]int, tablesize)
+	for i := range table {
+		table[i] = -1
+	}
+
+	// remap[i] = the new index of the old bit vector #i.
+	remap := make([]int, n)
+
+	for i := range remap {
+		remap[i] = -1
+	}
+	uniq := 0 // unique tables found so far
+
+	// Consider bit vectors in turn.
+	// If new, assign next number using uniq,
+	// record in remap, record in lv->livepointers and lv->argslivepointers
+	// under the new index, and add entry to hash table.
+	// If already seen, record earlier index in remap and free bitmaps.
+	var jarg Bvec
+	var j int
+	var h uint32
+	var arg Bvec
+	var jlocal Bvec
+	var local Bvec
+	for i := 0; i < n; i++ {
+		local = lv.livepointers[i]
+		arg = lv.argslivepointers[i]
+		h = hashbitmap(hashbitmap(H0, local), arg) % uint32(tablesize)
+
+		for {
+			j = table[h]
+			if j < 0 {
+				break
+			}
+			jlocal = lv.livepointers[j]
+			jarg = lv.argslivepointers[j]
+			if bvcmp(local, jlocal) == 0 && bvcmp(arg, jarg) == 0 {
+				remap[i] = j
+				goto Next
+			}
+
+			h++
+			if h == uint32(tablesize) {
+				h = 0
+			}
+		}
+
+		table[h] = uniq
+		remap[i] = uniq
+		lv.livepointers[uniq] = local
+		lv.argslivepointers[uniq] = arg
+		uniq++
+	Next:
+	}
+
+	// We've already reordered lv->livepointers[0:uniq]
+	// and lv->argslivepointers[0:uniq] and freed the bitmaps
+	// we don't need anymore. Clear the pointers later in the
+	// array so that we can tell where the coalesced bitmaps stop
+	// and so that we don't double-free when cleaning up.
+	for j := uniq; j < n; j++ {
+		lv.livepointers[j] = Bvec{}
+		lv.argslivepointers[j] = Bvec{}
+	}
+
+	// Rewrite PCDATA instructions to use new numbering.
+	var i int
+	for p := lv.ptxt; p != nil; p = p.Link {
+		if p.As == obj.APCDATA && p.From.Offset == obj.PCDATA_StackMapIndex {
+			i = int(p.To.Offset)
+			if i >= 0 {
+				p.To.Offset = int64(remap[i])
+			}
+		}
+	}
+}
+
+func printbitset(printed int, name string, vars []*Node, bits Bvec) int {
+	started := 0
+	for i, n := range vars {
+		if bvget(bits, int32(i)) == 0 {
+			continue
+		}
+		if started == 0 {
+			if printed == 0 {
+				fmt.Printf("\t")
+			} else {
+				fmt.Printf(" ")
+			}
+			started = 1
+			printed = 1
+			fmt.Printf("%s=", name)
+		} else {
+			fmt.Printf(",")
+		}
+
+		fmt.Printf("%s", n.Sym.Name)
+	}
+
+	return printed
+}
+
+// Prints the computed liveness information and inputs, for debugging.
+// This format synthesizes the information used during the multiple passes
+// into a single presentation.
+func livenessprintdebug(lv *Liveness) {
+	var j int
+	var printed int
+	var p *obj.Prog
+	var args Bvec
+	var locals Bvec
+	var n *Node
+
+	fmt.Printf("liveness: %s\n", Curfn.Nname.Sym.Name)
+
+	uevar := bvalloc(int32(len(lv.vars)))
+	varkill := bvalloc(int32(len(lv.vars)))
+	avarinit := bvalloc(int32(len(lv.vars)))
+
+	pcdata := 0
+	for i, bb := range lv.cfg {
+		if i > 0 {
+			fmt.Printf("\n")
+		}
+
+		// bb#0 pred=1,2 succ=3,4
+		fmt.Printf("bb#%d pred=", i)
+
+		for j = 0; j < len(bb.pred); j++ {
+			if j > 0 {
+				fmt.Printf(",")
+			}
+			fmt.Printf("%d", (bb.pred[j]).rpo)
+		}
+
+		fmt.Printf(" succ=")
+		for j = 0; j < len(bb.succ); j++ {
+			if j > 0 {
+				fmt.Printf(",")
+			}
+			fmt.Printf("%d", (bb.succ[j]).rpo)
+		}
+
+		fmt.Printf("\n")
+
+		// initial settings
+		printed = 0
+
+		printed = printbitset(printed, "uevar", lv.vars, bb.uevar)
+		printed = printbitset(printed, "livein", lv.vars, bb.livein)
+		if printed != 0 {
+			fmt.Printf("\n")
+		}
+
+		// program listing, with individual effects listed
+		for p = bb.first; ; p = p.Link {
+			fmt.Printf("%v\n", p)
+			if p.As == obj.APCDATA && p.From.Offset == obj.PCDATA_StackMapIndex {
+				pcdata = int(p.To.Offset)
+			}
+			progeffects(p, lv.vars, uevar, varkill, avarinit)
+			printed = 0
+			printed = printbitset(printed, "uevar", lv.vars, uevar)
+			printed = printbitset(printed, "varkill", lv.vars, varkill)
+			printed = printbitset(printed, "avarinit", lv.vars, avarinit)
+			if printed != 0 {
+				fmt.Printf("\n")
+			}
+			if issafepoint(p) {
+				args = lv.argslivepointers[pcdata]
+				locals = lv.livepointers[pcdata]
+				fmt.Printf("\tlive=")
+				printed = 0
+				for j = 0; j < len(lv.vars); j++ {
+					n = lv.vars[j]
+					if islive(n, args, locals) {
+						tmp9 := printed
+						printed++
+						if tmp9 != 0 {
+							fmt.Printf(",")
+						}
+						fmt.Printf("%v", n)
+					}
+				}
+
+				fmt.Printf("\n")
+			}
+
+			if p == bb.last {
+				break
+			}
+		}
+
+		// bb bitsets
+		fmt.Printf("end\n")
+
+		printed = printbitset(printed, "varkill", lv.vars, bb.varkill)
+		printed = printbitset(printed, "liveout", lv.vars, bb.liveout)
+		printed = printbitset(printed, "avarinit", lv.vars, bb.avarinit)
+		printed = printbitset(printed, "avarinitany", lv.vars, bb.avarinitany)
+		printed = printbitset(printed, "avarinitall", lv.vars, bb.avarinitall)
+		if printed != 0 {
+			fmt.Printf("\n")
+		}
+	}
+
+	fmt.Printf("\n")
+}
+
+// Dumps an array of bitmaps to a symbol as a sequence of uint32 values.  The
+// first word dumped is the total number of bitmaps.  The second word is the
+// length of the bitmaps.  All bitmaps are assumed to be of equal length.  The
+// words that are followed are the raw bitmap words.  The arr argument is an
+// array of Node*s.
+func onebitwritesymbol(arr []Bvec, sym *Sym) {
+	var i int
+	var j int
+	var word uint32
+
+	n := len(arr)
+	off := 0
+	off += 4 // number of bitmaps, to fill in later
+	bv := arr[0]
+	off = duint32(sym, off, uint32(bv.n)) // number of bits in each bitmap
+	for i = 0; i < n; i++ {
+		// bitmap words
+		bv = arr[i]
+
+		if bv.b == nil {
+			break
+		}
+		for j = 0; int32(j) < bv.n; j += 32 {
+			word = bv.b[j/32]
+
+			// Runtime reads the bitmaps as byte arrays. Oblige.
+			off = duint8(sym, off, uint8(word))
+
+			off = duint8(sym, off, uint8(word>>8))
+			off = duint8(sym, off, uint8(word>>16))
+			off = duint8(sym, off, uint8(word>>24))
+		}
+	}
+
+	duint32(sym, 0, uint32(i)) // number of bitmaps
+	ggloblsym(sym, int32(off), obj.RODATA)
+}
+
+func printprog(p *obj.Prog) {
+	for p != nil {
+		fmt.Printf("%v\n", p)
+		p = p.Link
+	}
+}
+
+// Entry pointer for liveness analysis.  Constructs a complete CFG, solves for
+// the liveness of pointer variables in the function, and emits a runtime data
+// structure read by the garbage collector.
+func liveness(fn *Node, firstp *obj.Prog, argssym *Sym, livesym *Sym) {
+	// Change name to dump debugging information only for a specific function.
+	debugdelta := 0
+
+	if Curfn.Nname.Sym.Name == "!" {
+		debugdelta = 2
+	}
+
+	debuglive += debugdelta
+	if debuglive >= 3 {
+		fmt.Printf("liveness: %s\n", Curfn.Nname.Sym.Name)
+		printprog(firstp)
+	}
+
+	checkptxt(fn, firstp)
+
+	// Construct the global liveness state.
+	cfg := newcfg(firstp)
+
+	if debuglive >= 3 {
+		printcfg([]*BasicBlock(cfg))
+	}
+	vars := getvariables(fn)
+	lv := newliveness(fn, firstp, cfg, vars)
+
+	// Run the dataflow framework.
+	livenessprologue(lv)
+
+	if debuglive >= 3 {
+		livenessprintcfg(lv)
+	}
+	livenesssolve(lv)
+	if debuglive >= 3 {
+		livenessprintcfg(lv)
+	}
+	livenessepilogue(lv)
+	if debuglive >= 3 {
+		livenessprintcfg(lv)
+	}
+	livenesscompact(lv)
+
+	if debuglive >= 2 {
+		livenessprintdebug(lv)
+	}
+
+	// Emit the live pointer map data structures
+	onebitwritesymbol(lv.livepointers, livesym)
+
+	onebitwritesymbol(lv.argslivepointers, argssym)
+
+	// Free everything.
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		if l.N != nil {
+			l.N.Opt = nil
+		}
+	}
+	freeliveness(lv)
+
+	freecfg([]*BasicBlock(cfg))
+
+	debuglive -= debugdelta
+}
diff --git a/src/cmd/compile/internal/gc/popt.go b/src/cmd/compile/internal/gc/popt.go
new file mode 100644
index 0000000..ce904e1
--- /dev/null
+++ b/src/cmd/compile/internal/gc/popt.go
@@ -0,0 +1,1086 @@
+// Derived from Inferno utils/6c/gc.h
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/gc.h
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// "Portable" optimizations.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+type OptStats struct {
+	Ncvtreg int32
+	Nspill  int32
+	Nreload int32
+	Ndelmov int32
+	Nvar    int32
+	Naddr   int32
+}
+
+var Ostats OptStats
+
+var noreturn_symlist [10]*Sym
+
+// p is a call instruction. Does the call fail to return?
+func Noreturn(p *obj.Prog) bool {
+	if noreturn_symlist[0] == nil {
+		noreturn_symlist[0] = Pkglookup("panicindex", Runtimepkg)
+		noreturn_symlist[1] = Pkglookup("panicslice", Runtimepkg)
+		noreturn_symlist[2] = Pkglookup("throwinit", Runtimepkg)
+		noreturn_symlist[3] = Pkglookup("gopanic", Runtimepkg)
+		noreturn_symlist[4] = Pkglookup("panicwrap", Runtimepkg)
+		noreturn_symlist[5] = Pkglookup("throwreturn", Runtimepkg)
+		noreturn_symlist[6] = Pkglookup("selectgo", Runtimepkg)
+		noreturn_symlist[7] = Pkglookup("block", Runtimepkg)
+	}
+
+	if p.To.Node == nil {
+		return false
+	}
+	s := ((p.To.Node).(*Node)).Sym
+	if s == nil {
+		return false
+	}
+	for i := 0; noreturn_symlist[i] != nil; i++ {
+		if s == noreturn_symlist[i] {
+			return true
+		}
+	}
+	return false
+}
+
+// JMP chasing and removal.
+//
+// The code generator depends on being able to write out jump
+// instructions that it can jump to now but fill in later.
+// the linker will resolve them nicely, but they make the code
+// longer and more difficult to follow during debugging.
+// Remove them.
+
+/* what instruction does a JMP to p eventually land on? */
+func chasejmp(p *obj.Prog, jmploop *int) *obj.Prog {
+	n := 0
+	for p != nil && p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH {
+		n++
+		if n > 10 {
+			*jmploop = 1
+			break
+		}
+
+		p = p.To.Val.(*obj.Prog)
+	}
+
+	return p
+}
+
+/*
+ * reuse reg pointer for mark/sweep state.
+ * leave reg==nil at end because alive==nil.
+ */
+var alive interface{} = nil
+var dead interface{} = 1
+
+/* mark all code reachable from firstp as alive */
+func mark(firstp *obj.Prog) {
+	for p := firstp; p != nil; p = p.Link {
+		if p.Opt != dead {
+			break
+		}
+		p.Opt = alive
+		if p.As != obj.ACALL && p.To.Type == obj.TYPE_BRANCH && p.To.Val.(*obj.Prog) != nil {
+			mark(p.To.Val.(*obj.Prog))
+		}
+		if p.As == obj.AJMP || p.As == obj.ARET || p.As == obj.AUNDEF {
+			break
+		}
+	}
+}
+
+func fixjmp(firstp *obj.Prog) {
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		fmt.Printf("\nfixjmp\n")
+	}
+
+	// pass 1: resolve jump to jump, mark all code as dead.
+	jmploop := 0
+
+	for p := firstp; p != nil; p = p.Link {
+		if Debug['R'] != 0 && Debug['v'] != 0 {
+			fmt.Printf("%v\n", p)
+		}
+		if p.As != obj.ACALL && p.To.Type == obj.TYPE_BRANCH && p.To.Val.(*obj.Prog) != nil && p.To.Val.(*obj.Prog).As == obj.AJMP {
+			p.To.Val = chasejmp(p.To.Val.(*obj.Prog), &jmploop)
+			if Debug['R'] != 0 && Debug['v'] != 0 {
+				fmt.Printf("->%v\n", p)
+			}
+		}
+
+		p.Opt = dead
+	}
+
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		fmt.Printf("\n")
+	}
+
+	// pass 2: mark all reachable code alive
+	mark(firstp)
+
+	// pass 3: delete dead code (mostly JMPs).
+	var last *obj.Prog
+
+	for p := firstp; p != nil; p = p.Link {
+		if p.Opt == dead {
+			if p.Link == nil && p.As == obj.ARET && last != nil && last.As != obj.ARET {
+				// This is the final ARET, and the code so far doesn't have one.
+				// Let it stay. The register allocator assumes that all live code in
+				// the function can be traversed by starting at all the RET instructions
+				// and following predecessor links. If we remove the final RET,
+				// this assumption will not hold in the case of an infinite loop
+				// at the end of a function.
+				// Keep the RET but mark it dead for the liveness analysis.
+				p.Mode = 1
+			} else {
+				if Debug['R'] != 0 && Debug['v'] != 0 {
+					fmt.Printf("del %v\n", p)
+				}
+				continue
+			}
+		}
+
+		if last != nil {
+			last.Link = p
+		}
+		last = p
+	}
+
+	last.Link = nil
+
+	// pass 4: elide JMP to next instruction.
+	// only safe if there are no jumps to JMPs anymore.
+	if jmploop == 0 {
+		var last *obj.Prog
+		for p := firstp; p != nil; p = p.Link {
+			if p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH && p.To.Val == p.Link {
+				if Debug['R'] != 0 && Debug['v'] != 0 {
+					fmt.Printf("del %v\n", p)
+				}
+				continue
+			}
+
+			if last != nil {
+				last.Link = p
+			}
+			last = p
+		}
+
+		last.Link = nil
+	}
+
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		fmt.Printf("\n")
+		for p := firstp; p != nil; p = p.Link {
+			fmt.Printf("%v\n", p)
+		}
+		fmt.Printf("\n")
+	}
+}
+
+// Control flow analysis. The Flow structures hold predecessor and successor
+// information as well as basic loop analysis.
+//
+//	graph = flowstart(firstp, 0);
+//	... use flow graph ...
+//	flowend(graph); // free graph
+//
+// Typical uses of the flow graph are to iterate over all the flow-relevant instructions:
+//
+//	for(f = graph->start; f != nil; f = f->link)
+//
+// or, given an instruction f, to iterate over all the predecessors, which is
+// f->p1 and this list:
+//
+//	for(f2 = f->p2; f2 != nil; f2 = f2->p2link)
+//
+// The size argument to flowstart specifies an amount of zeroed memory
+// to allocate in every f->data field, for use by the client.
+// If size == 0, f->data will be nil.
+
+var flowmark int
+
+// MaxFlowProg is the maximum size program (counted in instructions)
+// for which the flow code will build a graph. Functions larger than this limit
+// will not have flow graphs and consequently will not be optimized.
+const MaxFlowProg = 50000
+
+func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
+	// Count and mark instructions to annotate.
+	nf := 0
+
+	for p := firstp; p != nil; p = p.Link {
+		p.Opt = nil // should be already, but just in case
+		Thearch.Proginfo(p)
+		if p.Info.Flags&Skip != 0 {
+			continue
+		}
+		p.Opt = &flowmark
+		nf++
+	}
+
+	if nf == 0 {
+		return nil
+	}
+
+	if nf >= MaxFlowProg {
+		if Debug['v'] != 0 {
+			Warn("%v is too big (%d instructions)", Curfn.Nname.Sym, nf)
+		}
+		return nil
+	}
+
+	// Allocate annotations and assign to instructions.
+	graph := new(Graph)
+	ff := make([]Flow, nf)
+	start := &ff[0]
+	id := 0
+	var last *Flow
+	for p := firstp; p != nil; p = p.Link {
+		if p.Opt == nil {
+			continue
+		}
+		f := &ff[0]
+		ff = ff[1:]
+		p.Opt = f
+		f.Prog = p
+		if last != nil {
+			last.Link = f
+		}
+		last = f
+		if newData != nil {
+			f.Data = newData()
+		}
+		f.Id = int32(id)
+		id++
+	}
+
+	// Fill in pred/succ information.
+	var f1 *Flow
+	var p *obj.Prog
+	for f := start; f != nil; f = f.Link {
+		p = f.Prog
+		if p.Info.Flags&Break == 0 {
+			f1 = f.Link
+			f.S1 = f1
+			f1.P1 = f
+		}
+
+		if p.To.Type == obj.TYPE_BRANCH {
+			if p.To.Val == nil {
+				Fatal("pnil %v", p)
+			}
+			f1 = p.To.Val.(*obj.Prog).Opt.(*Flow)
+			if f1 == nil {
+				Fatal("fnil %v / %v", p, p.To.Val.(*obj.Prog))
+			}
+			if f1 == f {
+				//fatal("self loop %P", p);
+				continue
+			}
+
+			f.S2 = f1
+			f.P2link = f1.P2
+			f1.P2 = f
+		}
+	}
+
+	graph.Start = start
+	graph.Num = nf
+	return graph
+}
+
+func Flowend(graph *Graph) {
+	for f := graph.Start; f != nil; f = f.Link {
+		f.Prog.Info.Flags = 0 // drop cached proginfo
+		f.Prog.Opt = nil
+	}
+}
+
+/*
+ * find looping structure
+ *
+ * 1) find reverse postordering
+ * 2) find approximate dominators,
+ *	the actual dominators if the flow graph is reducible
+ *	otherwise, dominators plus some other non-dominators.
+ *	See Matthew S. Hecht and Jeffrey D. Ullman,
+ *	"Analysis of a Simple Algorithm for Global Data Flow Problems",
+ *	Conf.  Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
+ *	Oct. 1-3, 1973, pp.  207-217.
+ * 3) find all nodes with a predecessor dominated by the current node.
+ *	such a node is a loop head.
+ *	recursively, all preds with a greater rpo number are in the loop
+ */
+func postorder(r *Flow, rpo2r []*Flow, n int32) int32 {
+	r.Rpo = 1
+	r1 := r.S1
+	if r1 != nil && r1.Rpo == 0 {
+		n = postorder(r1, rpo2r, n)
+	}
+	r1 = r.S2
+	if r1 != nil && r1.Rpo == 0 {
+		n = postorder(r1, rpo2r, n)
+	}
+	rpo2r[n] = r
+	n++
+	return n
+}
+
+func rpolca(idom []int32, rpo1 int32, rpo2 int32) int32 {
+	if rpo1 == -1 {
+		return rpo2
+	}
+	var t int32
+	for rpo1 != rpo2 {
+		if rpo1 > rpo2 {
+			t = rpo2
+			rpo2 = rpo1
+			rpo1 = t
+		}
+
+		for rpo1 < rpo2 {
+			t = idom[rpo2]
+			if t >= rpo2 {
+				Fatal("bad idom")
+			}
+			rpo2 = t
+		}
+	}
+
+	return rpo1
+}
+
+func doms(idom []int32, r int32, s int32) bool {
+	for s > r {
+		s = idom[s]
+	}
+	return s == r
+}
+
+func loophead(idom []int32, r *Flow) bool {
+	src := r.Rpo
+	if r.P1 != nil && doms(idom, src, r.P1.Rpo) {
+		return true
+	}
+	for r = r.P2; r != nil; r = r.P2link {
+		if doms(idom, src, r.Rpo) {
+			return true
+		}
+	}
+	return false
+}
+
+func loopmark(rpo2r **Flow, head int32, r *Flow) {
+	if r.Rpo < head || r.Active == head {
+		return
+	}
+	r.Active = head
+	r.Loop += LOOP
+	if r.P1 != nil {
+		loopmark(rpo2r, head, r.P1)
+	}
+	for r = r.P2; r != nil; r = r.P2link {
+		loopmark(rpo2r, head, r)
+	}
+}
+
+func flowrpo(g *Graph) {
+	g.Rpo = make([]*Flow, g.Num)
+	idom := make([]int32, g.Num)
+
+	for r1 := g.Start; r1 != nil; r1 = r1.Link {
+		r1.Active = 0
+	}
+
+	rpo2r := g.Rpo
+	d := postorder(g.Start, rpo2r, 0)
+	nr := int32(g.Num)
+	if d > nr {
+		Fatal("too many reg nodes %d %d", d, nr)
+	}
+	nr = d
+	var r1 *Flow
+	for i := int32(0); i < nr/2; i++ {
+		r1 = rpo2r[i]
+		rpo2r[i] = rpo2r[nr-1-i]
+		rpo2r[nr-1-i] = r1
+	}
+
+	for i := int32(0); i < nr; i++ {
+		rpo2r[i].Rpo = i
+	}
+
+	idom[0] = 0
+	var me int32
+	for i := int32(0); i < nr; i++ {
+		r1 = rpo2r[i]
+		me = r1.Rpo
+		d = -1
+
+		// rpo2r[r->rpo] == r protects against considering dead code,
+		// which has r->rpo == 0.
+		if r1.P1 != nil && rpo2r[r1.P1.Rpo] == r1.P1 && r1.P1.Rpo < me {
+			d = r1.P1.Rpo
+		}
+		for r1 = r1.P2; r1 != nil; r1 = r1.P2link {
+			if rpo2r[r1.Rpo] == r1 && r1.Rpo < me {
+				d = rpolca(idom, d, r1.Rpo)
+			}
+		}
+		idom[i] = d
+	}
+
+	for i := int32(0); i < nr; i++ {
+		r1 = rpo2r[i]
+		r1.Loop++
+		if r1.P2 != nil && loophead(idom, r1) {
+			loopmark(&rpo2r[0], i, r1)
+		}
+	}
+
+	for r1 := g.Start; r1 != nil; r1 = r1.Link {
+		r1.Active = 0
+	}
+}
+
+func Uniqp(r *Flow) *Flow {
+	r1 := r.P1
+	if r1 == nil {
+		r1 = r.P2
+		if r1 == nil || r1.P2link != nil {
+			return nil
+		}
+	} else if r.P2 != nil {
+		return nil
+	}
+	return r1
+}
+
+func Uniqs(r *Flow) *Flow {
+	r1 := r.S1
+	if r1 == nil {
+		r1 = r.S2
+		if r1 == nil {
+			return nil
+		}
+	} else if r.S2 != nil {
+		return nil
+	}
+	return r1
+}
+
+// The compilers assume they can generate temporary variables
+// as needed to preserve the right semantics or simplify code
+// generation and the back end will still generate good code.
+// This results in a large number of ephemeral temporary variables.
+// Merge temps with non-overlapping lifetimes and equal types using the
+// greedy algorithm in Poletto and Sarkar, "Linear Scan Register Allocation",
+// ACM TOPLAS 1999.
+
+type TempVar struct {
+	node    *Node
+	def     *Flow    // definition of temp var
+	use     *Flow    // use list, chained through Flow.data
+	merge   *TempVar // merge var with this one
+	start   int64    // smallest Prog.pc in live range
+	end     int64    // largest Prog.pc in live range
+	addr    uint8    // address taken - no accurate end
+	removed uint8    // removed from program
+}
+
+type startcmp []*TempVar
+
+func (x startcmp) Len() int {
+	return len(x)
+}
+
+func (x startcmp) Swap(i, j int) {
+	x[i], x[j] = x[j], x[i]
+}
+
+func (x startcmp) Less(i, j int) bool {
+	a := x[i]
+	b := x[j]
+
+	if a.start < b.start {
+		return true
+	}
+	if a.start > b.start {
+		return false
+	}
+
+	// Order what's left by id or symbol name,
+	// just so that sort is forced into a specific ordering,
+	// so that the result of the sort does not depend on
+	// the sort implementation.
+	if a.def != b.def {
+		return int(a.def.Id-b.def.Id) < 0
+	}
+	if a.node != b.node {
+		return stringsCompare(a.node.Sym.Name, b.node.Sym.Name) < 0
+	}
+	return false
+}
+
+// Is n available for merging?
+func canmerge(n *Node) bool {
+	return n.Class == PAUTO && strings.HasPrefix(n.Sym.Name, "autotmp")
+}
+
+func mergetemp(firstp *obj.Prog) {
+	const (
+		debugmerge = 0
+	)
+
+	g := Flowstart(firstp, nil)
+	if g == nil {
+		return
+	}
+
+	// Build list of all mergeable variables.
+	nvar := 0
+	for l := Curfn.Func.Dcl; l != nil; l = l.Next {
+		if canmerge(l.N) {
+			nvar++
+		}
+	}
+
+	var_ := make([]TempVar, nvar)
+	nvar = 0
+	var n *Node
+	var v *TempVar
+	for l := Curfn.Func.Dcl; l != nil; l = l.Next {
+		n = l.N
+		if canmerge(n) {
+			v = &var_[nvar]
+			nvar++
+			n.Opt = v
+			v.node = n
+		}
+	}
+
+	// Build list of uses.
+	// We assume that the earliest reference to a temporary is its definition.
+	// This is not true of variables in general but our temporaries are all
+	// single-use (that's why we have so many!).
+	for f := g.Start; f != nil; f = f.Link {
+		p := f.Prog
+		if p.From.Node != nil && ((p.From.Node).(*Node)).Opt != nil && p.To.Node != nil && ((p.To.Node).(*Node)).Opt != nil {
+			Fatal("double node %v", p)
+		}
+		v = nil
+		n, _ = p.From.Node.(*Node)
+		if n != nil {
+			v, _ = n.Opt.(*TempVar)
+		}
+		if v == nil {
+			n, _ = p.To.Node.(*Node)
+			if n != nil {
+				v, _ = n.Opt.(*TempVar)
+			}
+		}
+		if v != nil {
+			if v.def == nil {
+				v.def = f
+			}
+			f.Data = v.use
+			v.use = f
+			if n == p.From.Node && (p.Info.Flags&LeftAddr != 0) {
+				v.addr = 1
+			}
+		}
+	}
+
+	if debugmerge > 1 && Debug['v'] != 0 {
+		Dumpit("before", g.Start, 0)
+	}
+
+	nkill := 0
+
+	// Special case.
+	for i := 0; i < len(var_); i++ {
+		v = &var_[i]
+		if v.addr != 0 {
+			continue
+		}
+
+		// Used in only one instruction, which had better be a write.
+		f := v.use
+		if f != nil && f.Data.(*Flow) == nil {
+			p := f.Prog
+			if p.To.Node == v.node && (p.Info.Flags&RightWrite != 0) && p.Info.Flags&RightRead == 0 {
+				p.As = obj.ANOP
+				p.To = obj.Addr{}
+				v.removed = 1
+				if debugmerge > 0 && Debug['v'] != 0 {
+					fmt.Printf("drop write-only %v\n", v.node.Sym)
+				}
+			} else {
+				Fatal("temp used and not set: %v", p)
+			}
+			nkill++
+			continue
+		}
+
+		// Written in one instruction, read in the next, otherwise unused,
+		// no jumps to the next instruction. Happens mainly in 386 compiler.
+		f = v.use
+		if f != nil && f.Link == f.Data.(*Flow) && (f.Data.(*Flow)).Data.(*Flow) == nil && Uniqp(f.Link) == f {
+			p := f.Prog
+			p1 := f.Link.Prog
+			const (
+				SizeAny = SizeB | SizeW | SizeL | SizeQ | SizeF | SizeD
+			)
+			if p.From.Node == v.node && p1.To.Node == v.node && (p.Info.Flags&Move != 0) && (p.Info.Flags|p1.Info.Flags)&(LeftAddr|RightAddr) == 0 && p.Info.Flags&SizeAny == p1.Info.Flags&SizeAny {
+				p1.From = p.From
+				Thearch.Excise(f)
+				v.removed = 1
+				if debugmerge > 0 && Debug['v'] != 0 {
+					fmt.Printf("drop immediate-use %v\n", v.node.Sym)
+				}
+			}
+
+			nkill++
+			continue
+		}
+	}
+
+	// Traverse live range of each variable to set start, end.
+	// Each flood uses a new value of gen so that we don't have
+	// to clear all the r->active words after each variable.
+	gen := int32(0)
+
+	for i := 0; i < len(var_); i++ {
+		v = &var_[i]
+		gen++
+		for f := v.use; f != nil; f = f.Data.(*Flow) {
+			mergewalk(v, f, uint32(gen))
+		}
+		if v.addr != 0 {
+			gen++
+			for f := v.use; f != nil; f = f.Data.(*Flow) {
+				varkillwalk(v, f, uint32(gen))
+			}
+		}
+	}
+
+	// Sort variables by start.
+	bystart := make([]*TempVar, len(var_))
+
+	for i := 0; i < len(var_); i++ {
+		bystart[i] = &var_[i]
+	}
+	sort.Sort(startcmp(bystart[:len(var_)]))
+
+	// List of in-use variables, sorted by end, so that the ones that
+	// will last the longest are the earliest ones in the array.
+	// The tail inuse[nfree:] holds no-longer-used variables.
+	// In theory we should use a sorted tree so that insertions are
+	// guaranteed O(log n) and then the loop is guaranteed O(n log n).
+	// In practice, it doesn't really matter.
+	inuse := make([]*TempVar, len(var_))
+
+	ninuse := 0
+	nfree := len(var_)
+	var t *Type
+	var v1 *TempVar
+	var j int
+	for i := 0; i < len(var_); i++ {
+		v = bystart[i]
+		if debugmerge > 0 && Debug['v'] != 0 {
+			fmt.Printf("consider %v: removed=%d\n", Nconv(v.node, obj.FmtSharp), v.removed)
+		}
+
+		if v.removed != 0 {
+			continue
+		}
+
+		// Expire no longer in use.
+		for ninuse > 0 && inuse[ninuse-1].end < v.start {
+			ninuse--
+			v1 = inuse[ninuse]
+			nfree--
+			inuse[nfree] = v1
+		}
+
+		if debugmerge > 0 && Debug['v'] != 0 {
+			fmt.Printf("consider %v: removed=%d nfree=%d nvar=%d\n", Nconv(v.node, obj.FmtSharp), v.removed, nfree, len(var_))
+		}
+
+		// Find old temp to reuse if possible.
+		t = v.node.Type
+
+		for j = nfree; j < len(var_); j++ {
+			v1 = inuse[j]
+			if debugmerge > 0 && Debug['v'] != 0 {
+				fmt.Printf("consider %v: maybe %v: type=%v,%v addrtaken=%v,%v\n", Nconv(v.node, obj.FmtSharp), Nconv(v1.node, obj.FmtSharp), t, v1.node.Type, v.node.Addrtaken, v1.node.Addrtaken)
+			}
+
+			// Require the types to match but also require the addrtaken bits to match.
+			// If a variable's address is taken, that disables registerization for the individual
+			// words of the variable (for example, the base,len,cap of a slice).
+			// We don't want to merge a non-addressed var with an addressed one and
+			// inhibit registerization of the former.
+			if Eqtype(t, v1.node.Type) && v.node.Addrtaken == v1.node.Addrtaken {
+				inuse[j] = inuse[nfree]
+				nfree++
+				if v1.merge != nil {
+					v.merge = v1.merge
+				} else {
+					v.merge = v1
+				}
+				nkill++
+				break
+			}
+		}
+
+		// Sort v into inuse.
+		j = ninuse
+		ninuse++
+
+		for j > 0 && inuse[j-1].end < v.end {
+			inuse[j] = inuse[j-1]
+			j--
+		}
+
+		inuse[j] = v
+	}
+
+	if debugmerge > 0 && Debug['v'] != 0 {
+		fmt.Printf("%v [%d - %d]\n", Curfn.Nname.Sym, len(var_), nkill)
+		var v *TempVar
+		for i := 0; i < len(var_); i++ {
+			v = &var_[i]
+			fmt.Printf("var %v %v %d-%d", Nconv(v.node, obj.FmtSharp), v.node.Type, v.start, v.end)
+			if v.addr != 0 {
+				fmt.Printf(" addr=1")
+			}
+			if v.removed != 0 {
+				fmt.Printf(" dead=1")
+			}
+			if v.merge != nil {
+				fmt.Printf(" merge %v", Nconv(v.merge.node, obj.FmtSharp))
+			}
+			if v.start == v.end && v.def != nil {
+				fmt.Printf(" %v", v.def.Prog)
+			}
+			fmt.Printf("\n")
+		}
+
+		if debugmerge > 1 && Debug['v'] != 0 {
+			Dumpit("after", g.Start, 0)
+		}
+	}
+
+	// Update node references to use merged temporaries.
+	for f := g.Start; f != nil; f = f.Link {
+		p := f.Prog
+		n, _ = p.From.Node.(*Node)
+		if n != nil {
+			v, _ = n.Opt.(*TempVar)
+			if v != nil && v.merge != nil {
+				p.From.Node = v.merge.node
+			}
+		}
+		n, _ = p.To.Node.(*Node)
+		if n != nil {
+			v, _ = n.Opt.(*TempVar)
+			if v != nil && v.merge != nil {
+				p.To.Node = v.merge.node
+			}
+		}
+	}
+
+	// Delete merged nodes from declaration list.
+	var l *NodeList
+	for lp := &Curfn.Func.Dcl; ; {
+		l = *lp
+		if l == nil {
+			break
+		}
+
+		Curfn.Func.Dcl.End = l
+		n = l.N
+		v, _ = n.Opt.(*TempVar)
+		if v != nil && (v.merge != nil || v.removed != 0) {
+			*lp = l.Next
+			continue
+		}
+
+		lp = &l.Next
+	}
+
+	// Clear aux structures.
+	for i := 0; i < len(var_); i++ {
+		var_[i].node.Opt = nil
+	}
+
+	Flowend(g)
+}
+
+func mergewalk(v *TempVar, f0 *Flow, gen uint32) {
+	var p *obj.Prog
+	var f1 *Flow
+
+	for f1 = f0; f1 != nil; f1 = f1.P1 {
+		if uint32(f1.Active) == gen {
+			break
+		}
+		f1.Active = int32(gen)
+		p = f1.Prog
+		if v.end < p.Pc {
+			v.end = p.Pc
+		}
+		if f1 == v.def {
+			v.start = p.Pc
+			break
+		}
+	}
+
+	var f2 *Flow
+	for f := f0; f != f1; f = f.P1 {
+		for f2 = f.P2; f2 != nil; f2 = f2.P2link {
+			mergewalk(v, f2, gen)
+		}
+	}
+}
+
+func varkillwalk(v *TempVar, f0 *Flow, gen uint32) {
+	var p *obj.Prog
+	var f1 *Flow
+
+	for f1 = f0; f1 != nil; f1 = f1.S1 {
+		if uint32(f1.Active) == gen {
+			break
+		}
+		f1.Active = int32(gen)
+		p = f1.Prog
+		if v.end < p.Pc {
+			v.end = p.Pc
+		}
+		if v.start > p.Pc {
+			v.start = p.Pc
+		}
+		if p.As == obj.ARET || (p.As == obj.AVARKILL && p.To.Node == v.node) {
+			break
+		}
+	}
+
+	for f := f0; f != f1; f = f.S1 {
+		varkillwalk(v, f.S2, gen)
+	}
+}
+
+// Eliminate redundant nil pointer checks.
+//
+// The code generation pass emits a CHECKNIL for every possibly nil pointer.
+// This pass removes a CHECKNIL if every predecessor path has already
+// checked this value for nil.
+//
+// Simple backwards flood from check to definition.
+// Run prog loop backward from end of program to beginning to avoid quadratic
+// behavior removing a run of checks.
+//
+// Assume that stack variables with address not taken can be loaded multiple times
+// from memory without being rechecked. Other variables need to be checked on
+// each load.
+
+var killed int // f->data is either nil or &killed
+
+func nilopt(firstp *obj.Prog) {
+	g := Flowstart(firstp, nil)
+	if g == nil {
+		return
+	}
+
+	if Debug_checknil > 1 { /* || strcmp(curfn->nname->sym->name, "f1") == 0 */
+		Dumpit("nilopt", g.Start, 0)
+	}
+
+	ncheck := 0
+	nkill := 0
+	var p *obj.Prog
+	for f := g.Start; f != nil; f = f.Link {
+		p = f.Prog
+		if p.As != obj.ACHECKNIL || !Thearch.Regtyp(&p.From) {
+			continue
+		}
+		ncheck++
+		if Thearch.Stackaddr(&p.From) {
+			if Debug_checknil != 0 && p.Lineno > 1 {
+				Warnl(int(p.Lineno), "removed nil check of SP address")
+			}
+			f.Data = &killed
+			continue
+		}
+
+		nilwalkfwd(f)
+		if f.Data != nil {
+			if Debug_checknil != 0 && p.Lineno > 1 {
+				Warnl(int(p.Lineno), "removed nil check before indirect")
+			}
+			continue
+		}
+
+		nilwalkback(f)
+		if f.Data != nil {
+			if Debug_checknil != 0 && p.Lineno > 1 {
+				Warnl(int(p.Lineno), "removed repeated nil check")
+			}
+			continue
+		}
+	}
+
+	for f := g.Start; f != nil; f = f.Link {
+		if f.Data != nil {
+			nkill++
+			Thearch.Excise(f)
+		}
+	}
+
+	Flowend(g)
+
+	if Debug_checknil > 1 {
+		fmt.Printf("%v: removed %d of %d nil checks\n", Curfn.Nname.Sym, nkill, ncheck)
+	}
+}
+
+func nilwalkback(fcheck *Flow) {
+	for f := fcheck; f != nil; f = Uniqp(f) {
+		p := f.Prog
+		if (p.Info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
+			// Found initialization of value we're checking for nil.
+			// without first finding the check, so this one is unchecked.
+			return
+		}
+
+		if f != fcheck && p.As == obj.ACHECKNIL && Thearch.Sameaddr(&p.From, &fcheck.Prog.From) {
+			fcheck.Data = &killed
+			return
+		}
+	}
+}
+
+// Here is a more complex version that scans backward across branches.
+// It assumes fcheck->kill = 1 has been set on entry, and its job is to find a reason
+// to keep the check (setting fcheck->kill = 0).
+// It doesn't handle copying of aggregates as well as I would like,
+// nor variables with their address taken,
+// and it's too subtle to turn on this late in Go 1.2. Perhaps for Go 1.3.
+/*
+for(f1 = f0; f1 != nil; f1 = f1->p1) {
+	if(f1->active == gen)
+		break;
+	f1->active = gen;
+	p = f1->prog;
+
+	// If same check, stop this loop but still check
+	// alternate predecessors up to this point.
+	if(f1 != fcheck && p->as == ACHECKNIL && thearch.sameaddr(&p->from, &fcheck->prog->from))
+		break;
+
+	if((p.Info.flags & RightWrite) && thearch.sameaddr(&p->to, &fcheck->prog->from)) {
+		// Found initialization of value we're checking for nil.
+		// without first finding the check, so this one is unchecked.
+		fcheck->kill = 0;
+		return;
+	}
+
+	if(f1->p1 == nil && f1->p2 == nil) {
+		print("lost pred for %P\n", fcheck->prog);
+		for(f1=f0; f1!=nil; f1=f1->p1) {
+			thearch.proginfo(&info, f1->prog);
+			print("\t%P %d %d %D %D\n", r1->prog, info.flags&RightWrite, thearch.sameaddr(&f1->prog->to, &fcheck->prog->from), &f1->prog->to, &fcheck->prog->from);
+		}
+		fatal("lost pred trail");
+	}
+}
+
+for(f = f0; f != f1; f = f->p1)
+	for(f2 = f->p2; f2 != nil; f2 = f2->p2link)
+		nilwalkback(fcheck, f2, gen);
+*/
+
+func nilwalkfwd(fcheck *Flow) {
+	// If the path down from rcheck dereferences the address
+	// (possibly with a small offset) before writing to memory
+	// and before any subsequent checks, it's okay to wait for
+	// that implicit check. Only consider this basic block to
+	// avoid problems like:
+	//	_ = *x // should panic
+	//	for {} // no writes but infinite loop may be considered visible
+
+	var last *Flow
+	for f := Uniqs(fcheck); f != nil; f = Uniqs(f) {
+		p := f.Prog
+		if (p.Info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) {
+			fcheck.Data = &killed
+			return
+		}
+
+		if (p.Info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) {
+			fcheck.Data = &killed
+			return
+		}
+
+		// Stop if another nil check happens.
+		if p.As == obj.ACHECKNIL {
+			return
+		}
+
+		// Stop if value is lost.
+		if (p.Info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
+			return
+		}
+
+		// Stop if memory write.
+		if (p.Info.Flags&RightWrite != 0) && !Thearch.Regtyp(&p.To) {
+			return
+		}
+
+		// Stop if we jump backward.
+		if last != nil && f.Id <= last.Id {
+			return
+		}
+		last = f
+	}
+}
diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go
new file mode 100644
index 0000000..446ec03
--- /dev/null
+++ b/src/cmd/compile/internal/gc/racewalk.go
@@ -0,0 +1,639 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"fmt"
+	"strings"
+)
+
+// The racewalk pass modifies the code tree for the function as follows:
+//
+// 1. It inserts a call to racefuncenter at the beginning of each function.
+// 2. It inserts a call to racefuncexit at the end of each function.
+// 3. It inserts a call to raceread before each memory read.
+// 4. It inserts a call to racewrite before each memory write.
+//
+// The rewriting is not yet complete. Certain nodes are not rewritten
+// but should be.
+
+// TODO(dvyukov): do not instrument initialization as writes:
+// a := make([]int, 10)
+
+// Do not instrument the following packages at all,
+// at best instrumentation would cause infinite recursion.
+var omit_pkgs = []string{"runtime", "runtime/race"}
+
+// Only insert racefuncenter/racefuncexit into the following packages.
+// Memory accesses in the packages are either uninteresting or will cause false positives.
+var noinst_pkgs = []string{"sync", "sync/atomic"}
+
+func ispkgin(pkgs []string) bool {
+	if myimportpath != "" {
+		for i := 0; i < len(pkgs); i++ {
+			if myimportpath == pkgs[i] {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+func isforkfunc(fn *Node) bool {
+	// Special case for syscall.forkAndExecInChild.
+	// In the child, this function must not acquire any locks, because
+	// they might have been locked at the time of the fork.  This means
+	// no rescheduling, no malloc calls, and no new stack segments.
+	// Race instrumentation does all of the above.
+	return myimportpath != "" && myimportpath == "syscall" && fn.Nname.Sym.Name == "forkAndExecInChild"
+}
+
+func racewalk(fn *Node) {
+	if ispkgin(omit_pkgs) || isforkfunc(fn) {
+		return
+	}
+
+	if !ispkgin(noinst_pkgs) {
+		racewalklist(fn.Nbody, nil)
+
+		// nothing interesting for race detector in fn->enter
+		racewalklist(fn.Func.Exit, nil)
+	}
+
+	// nodpc is the PC of the caller as extracted by
+	// getcallerpc. We use -widthptr(FP) for x86.
+	// BUG: this will not work on arm.
+	nodpc := Nod(OXXX, nil, nil)
+
+	*nodpc = *nodfp
+	nodpc.Type = Types[TUINTPTR]
+	nodpc.Xoffset = int64(-Widthptr)
+	nd := mkcall("racefuncenter", nil, nil, nodpc)
+	fn.Func.Enter = concat(list1(nd), fn.Func.Enter)
+	nd = mkcall("racefuncexit", nil, nil)
+	fn.Func.Exit = list(fn.Func.Exit, nd)
+
+	if Debug['W'] != 0 {
+		s := fmt.Sprintf("after racewalk %v", fn.Nname.Sym)
+		dumplist(s, fn.Nbody)
+		s = fmt.Sprintf("enter %v", fn.Nname.Sym)
+		dumplist(s, fn.Func.Enter)
+		s = fmt.Sprintf("exit %v", fn.Nname.Sym)
+		dumplist(s, fn.Func.Exit)
+	}
+}
+
+func racewalklist(l *NodeList, init **NodeList) {
+	var instr *NodeList
+
+	for ; l != nil; l = l.Next {
+		instr = nil
+		racewalknode(&l.N, &instr, 0, 0)
+		if init == nil {
+			l.N.Ninit = concat(l.N.Ninit, instr)
+		} else {
+			*init = concat(*init, instr)
+		}
+	}
+}
+
+// walkexpr and walkstmt combined
+// walks the tree and adds calls to the
+// instrumentation code to top-level (statement) nodes' init
+func racewalknode(np **Node, init **NodeList, wr int, skip int) {
+	n := *np
+
+	if n == nil {
+		return
+	}
+
+	if Debug['w'] > 1 {
+		Dump("racewalk-before", n)
+	}
+	setlineno(n)
+	if init == nil {
+		Fatal("racewalk: bad init list")
+	}
+	if init == &n.Ninit {
+		// If init == &n->ninit and n->ninit is non-nil,
+		// racewalknode might append it to itself.
+		// nil it out and handle it separately before putting it back.
+		l := n.Ninit
+
+		n.Ninit = nil
+		racewalklist(l, nil)
+		racewalknode(&n, &l, wr, skip) // recurse with nil n->ninit
+		appendinit(&n, l)
+		*np = n
+		return
+	}
+
+	racewalklist(n.Ninit, nil)
+
+	switch n.Op {
+	default:
+		Fatal("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
+
+	case OAS, OASWB, OAS2FUNC:
+		racewalknode(&n.Left, init, 1, 0)
+		racewalknode(&n.Right, init, 0, 0)
+		goto ret
+
+		// can't matter
+	case OCFUNC, OVARKILL:
+		goto ret
+
+	case OBLOCK:
+		if n.List == nil {
+			goto ret
+		}
+
+		switch n.List.N.Op {
+		// Blocks are used for multiple return function calls.
+		// x, y := f() becomes BLOCK{CALL f, AS x [SP+0], AS y [SP+n]}
+		// We don't want to instrument between the statements because it will
+		// smash the results.
+		case OCALLFUNC, OCALLMETH, OCALLINTER:
+			racewalknode(&n.List.N, &n.List.N.Ninit, 0, 0)
+
+			var fini *NodeList
+			racewalklist(n.List.Next, &fini)
+			n.List = concat(n.List, fini)
+
+			// Ordinary block, for loop initialization or inlined bodies.
+		default:
+			racewalklist(n.List, nil)
+		}
+
+		goto ret
+
+	case ODEFER:
+		racewalknode(&n.Left, init, 0, 0)
+		goto ret
+
+	case OPROC:
+		racewalknode(&n.Left, init, 0, 0)
+		goto ret
+
+	case OCALLINTER:
+		racewalknode(&n.Left, init, 0, 0)
+		goto ret
+
+		// Instrument dst argument of runtime.writebarrier* calls
+	// as we do not instrument runtime code.
+	// typedslicecopy is instrumented in runtime.
+	case OCALLFUNC:
+		if n.Left.Sym != nil && n.Left.Sym.Pkg == Runtimepkg && (strings.HasPrefix(n.Left.Sym.Name, "writebarrier") || n.Left.Sym.Name == "typedmemmove") {
+			// Find the dst argument.
+			// The list can be reordered, so it's not necessary just the first or the second element.
+			var l *NodeList
+			for l = n.List; l != nil; l = l.Next {
+				if n.Left.Sym.Name == "typedmemmove" {
+					if l.N.Left.Xoffset == int64(Widthptr) {
+						break
+					}
+				} else {
+					if l.N.Left.Xoffset == 0 {
+						break
+					}
+				}
+			}
+
+			if l == nil {
+				Fatal("racewalk: writebarrier no arg")
+			}
+			if l.N.Right.Op != OADDR {
+				Fatal("racewalk: writebarrier bad arg")
+			}
+			callinstr(&l.N.Right.Left, init, 1, 0)
+		}
+
+		racewalknode(&n.Left, init, 0, 0)
+		goto ret
+
+	case ONOT,
+		OMINUS,
+		OPLUS,
+		OREAL,
+		OIMAG,
+		OCOM,
+		OSQRT:
+		racewalknode(&n.Left, init, wr, 0)
+		goto ret
+
+	case ODOTINTER:
+		racewalknode(&n.Left, init, 0, 0)
+		goto ret
+
+	case ODOT:
+		racewalknode(&n.Left, init, 0, 1)
+		callinstr(&n, init, wr, skip)
+		goto ret
+
+	case ODOTPTR: // dst = (*x).f with implicit *; otherwise it's ODOT+OIND
+		racewalknode(&n.Left, init, 0, 0)
+
+		callinstr(&n, init, wr, skip)
+		goto ret
+
+	case OIND: // *p
+		racewalknode(&n.Left, init, 0, 0)
+
+		callinstr(&n, init, wr, skip)
+		goto ret
+
+	case OSPTR, OLEN, OCAP:
+		racewalknode(&n.Left, init, 0, 0)
+		if Istype(n.Left.Type, TMAP) {
+			n1 := Nod(OCONVNOP, n.Left, nil)
+			n1.Type = Ptrto(Types[TUINT8])
+			n1 = Nod(OIND, n1, nil)
+			typecheck(&n1, Erv)
+			callinstr(&n1, init, 0, skip)
+		}
+
+		goto ret
+
+	case OLSH,
+		ORSH,
+		OLROT,
+		OAND,
+		OANDNOT,
+		OOR,
+		OXOR,
+		OSUB,
+		OMUL,
+		OHMUL,
+		OEQ,
+		ONE,
+		OLT,
+		OLE,
+		OGE,
+		OGT,
+		OADD,
+		OCOMPLEX:
+		racewalknode(&n.Left, init, wr, 0)
+		racewalknode(&n.Right, init, wr, 0)
+		goto ret
+
+	case OANDAND, OOROR:
+		racewalknode(&n.Left, init, wr, 0)
+
+		// walk has ensured the node has moved to a location where
+		// side effects are safe.
+		// n->right may not be executed,
+		// so instrumentation goes to n->right->ninit, not init.
+		racewalknode(&n.Right, &n.Right.Ninit, wr, 0)
+
+		goto ret
+
+	case ONAME:
+		callinstr(&n, init, wr, skip)
+		goto ret
+
+	case OCONV:
+		racewalknode(&n.Left, init, wr, 0)
+		goto ret
+
+	case OCONVNOP:
+		racewalknode(&n.Left, init, wr, 0)
+		goto ret
+
+	case ODIV, OMOD:
+		racewalknode(&n.Left, init, wr, 0)
+		racewalknode(&n.Right, init, wr, 0)
+		goto ret
+
+	case OINDEX:
+		if !Isfixedarray(n.Left.Type) {
+			racewalknode(&n.Left, init, 0, 0)
+		} else if !islvalue(n.Left) {
+			// index of unaddressable array, like Map[k][i].
+			racewalknode(&n.Left, init, wr, 0)
+
+			racewalknode(&n.Right, init, 0, 0)
+			goto ret
+		}
+
+		racewalknode(&n.Right, init, 0, 0)
+		if n.Left.Type.Etype != TSTRING {
+			callinstr(&n, init, wr, skip)
+		}
+		goto ret
+
+	case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
+		racewalknode(&n.Left, init, 0, 0)
+		goto ret
+
+	case OADDR:
+		racewalknode(&n.Left, init, 0, 1)
+		goto ret
+
+		// n->left is Type* which is not interesting.
+	case OEFACE:
+		racewalknode(&n.Right, init, 0, 0)
+
+		goto ret
+
+	case OITAB:
+		racewalknode(&n.Left, init, 0, 0)
+		goto ret
+
+		// should not appear in AST by now
+	case OSEND,
+		ORECV,
+		OCLOSE,
+		ONEW,
+		OXCASE,
+		OXFALL,
+		OCASE,
+		OPANIC,
+		ORECOVER,
+		OCONVIFACE,
+		OCMPIFACE,
+		OMAKECHAN,
+		OMAKEMAP,
+		OMAKESLICE,
+		OCALL,
+		OCOPY,
+		OAPPEND,
+		ORUNESTR,
+		OARRAYBYTESTR,
+		OARRAYRUNESTR,
+		OSTRARRAYBYTE,
+		OSTRARRAYRUNE,
+		OINDEXMAP,
+		// lowered to call
+		OCMPSTR,
+		OADDSTR,
+		ODOTTYPE,
+		ODOTTYPE2,
+		OAS2DOTTYPE,
+		OCALLPART,
+		// lowered to PTRLIT
+		OCLOSURE,  // lowered to PTRLIT
+		ORANGE,    // lowered to ordinary for loop
+		OARRAYLIT, // lowered to assignments
+		OMAPLIT,
+		OSTRUCTLIT,
+		OAS2,
+		OAS2RECV,
+		OAS2MAPR,
+		OASOP:
+		Yyerror("racewalk: %v must be lowered by now", Oconv(int(n.Op), 0))
+
+		goto ret
+
+		// impossible nodes: only appear in backend.
+	case ORROTC, OEXTEND:
+		Yyerror("racewalk: %v cannot exist now", Oconv(int(n.Op), 0))
+		goto ret
+
+	case OGETG:
+		Yyerror("racewalk: OGETG can happen only in runtime which we don't instrument")
+		goto ret
+
+		// just do generic traversal
+	case OFOR,
+		OIF,
+		OCALLMETH,
+		ORETURN,
+		ORETJMP,
+		OSWITCH,
+		OSELECT,
+		OEMPTY,
+		OBREAK,
+		OCONTINUE,
+		OFALL,
+		OGOTO,
+		OLABEL:
+		goto ret
+
+		// does not require instrumentation
+	case OPRINT, // don't bother instrumenting it
+		OPRINTN,     // don't bother instrumenting it
+		OCHECKNIL,   // always followed by a read.
+		OPARAM,      // it appears only in fn->exit to copy heap params back
+		OCLOSUREVAR, // immutable pointer to captured variable
+		ODOTMETH,    // either part of CALLMETH or CALLPART (lowered to PTRLIT)
+		OINDREG,     // at this stage, only n(SP) nodes from nodarg
+		ODCL,        // declarations (without value) cannot be races
+		ODCLCONST,
+		ODCLTYPE,
+		OTYPE,
+		ONONAME,
+		OLITERAL,
+		OSLICESTR, // always preceded by bounds checking, avoid double instrumentation.
+		OTYPESW:   // ignored by code generation, do not instrument.
+		goto ret
+	}
+
+ret:
+	if n.Op != OBLOCK { // OBLOCK is handled above in a special way.
+		racewalklist(n.List, init)
+	}
+	if n.Ntest != nil {
+		racewalknode(&n.Ntest, &n.Ntest.Ninit, 0, 0)
+	}
+	if n.Nincr != nil {
+		racewalknode(&n.Nincr, &n.Nincr.Ninit, 0, 0)
+	}
+	racewalklist(n.Nbody, nil)
+	racewalklist(n.Nelse, nil)
+	racewalklist(n.Rlist, nil)
+	*np = n
+}
+
+func isartificial(n *Node) bool {
+	// compiler-emitted artificial things that we do not want to instrument,
+	// cant' possibly participate in a data race.
+	if n.Op == ONAME && n.Sym != nil && n.Sym.Name != "" {
+		if n.Sym.Name == "_" {
+			return true
+		}
+
+		// autotmp's are always local
+		if strings.HasPrefix(n.Sym.Name, "autotmp_") {
+			return true
+		}
+
+		// statictmp's are read-only
+		if strings.HasPrefix(n.Sym.Name, "statictmp_") {
+			return true
+		}
+
+		// go.itab is accessed only by the compiler and runtime (assume safe)
+		if n.Sym.Pkg != nil && n.Sym.Pkg.Name != "" && n.Sym.Pkg.Name == "go.itab" {
+			return true
+		}
+	}
+
+	return false
+}
+
+func callinstr(np **Node, init **NodeList, wr int, skip int) bool {
+	n := *np
+
+	//print("callinstr for %+N [ %O ] etype=%E class=%d\n",
+	//	  n, n->op, n->type ? n->type->etype : -1, n->class);
+
+	if skip != 0 || n.Type == nil || n.Type.Etype >= TIDEAL {
+		return false
+	}
+	t := n.Type
+	if isartificial(n) {
+		return false
+	}
+
+	b := outervalue(n)
+
+	// it skips e.g. stores to ... parameter array
+	if isartificial(b) {
+		return false
+	}
+	class := b.Class
+
+	// BUG: we _may_ want to instrument PAUTO sometimes
+	// e.g. if we've got a local variable/method receiver
+	// that has got a pointer inside. Whether it points to
+	// the heap or not is impossible to know at compile time
+	if (class&PHEAP != 0) || class == PPARAMREF || class == PEXTERN || b.Op == OINDEX || b.Op == ODOTPTR || b.Op == OIND {
+		hascalls := 0
+		foreach(n, hascallspred, &hascalls)
+		if hascalls != 0 {
+			n = detachexpr(n, init)
+			*np = n
+		}
+
+		n = treecopy(n)
+		makeaddable(n)
+		var f *Node
+		if t.Etype == TSTRUCT || Isfixedarray(t) {
+			name := "racereadrange"
+			if wr != 0 {
+				name = "racewriterange"
+			}
+			f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(t.Width))
+		} else {
+			name := "raceread"
+			if wr != 0 {
+				name = "racewrite"
+			}
+			f = mkcall(name, nil, init, uintptraddr(n))
+		}
+
+		*init = list(*init, f)
+		return true
+	}
+
+	return false
+}
+
+// makeaddable returns a node whose memory location is the
+// same as n, but which is addressable in the Go language
+// sense.
+// This is different from functions like cheapexpr that may make
+// a copy of their argument.
+func makeaddable(n *Node) {
+	// The arguments to uintptraddr technically have an address but
+	// may not be addressable in the Go sense: for example, in the case
+	// of T(v).Field where T is a struct type and v is
+	// an addressable value.
+	switch n.Op {
+	case OINDEX:
+		if Isfixedarray(n.Left.Type) {
+			makeaddable(n.Left)
+		}
+
+		// Turn T(v).Field into v.Field
+	case ODOT, OXDOT:
+		if n.Left.Op == OCONVNOP {
+			n.Left = n.Left.Left
+		}
+		makeaddable(n.Left)
+
+		// nothing to do
+	case ODOTPTR:
+		fallthrough
+	default:
+		break
+	}
+}
+
+func uintptraddr(n *Node) *Node {
+	r := Nod(OADDR, n, nil)
+	r.Bounded = true
+	r = conv(r, Types[TUNSAFEPTR])
+	r = conv(r, Types[TUINTPTR])
+	return r
+}
+
+func detachexpr(n *Node, init **NodeList) *Node {
+	addr := Nod(OADDR, n, nil)
+	l := temp(Ptrto(n.Type))
+	as := Nod(OAS, l, addr)
+	typecheck(&as, Etop)
+	walkexpr(&as, init)
+	*init = list(*init, as)
+	ind := Nod(OIND, l, nil)
+	typecheck(&ind, Erv)
+	walkexpr(&ind, init)
+	return ind
+}
+
+func foreachnode(n *Node, f func(*Node, interface{}), c interface{}) {
+	if n != nil {
+		f(n, c)
+	}
+}
+
+func foreachlist(l *NodeList, f func(*Node, interface{}), c interface{}) {
+	for ; l != nil; l = l.Next {
+		foreachnode(l.N, f, c)
+	}
+}
+
+func foreach(n *Node, f func(*Node, interface{}), c interface{}) {
+	foreachlist(n.Ninit, f, c)
+	foreachnode(n.Left, f, c)
+	foreachnode(n.Right, f, c)
+	foreachlist(n.List, f, c)
+	foreachnode(n.Ntest, f, c)
+	foreachnode(n.Nincr, f, c)
+	foreachlist(n.Nbody, f, c)
+	foreachlist(n.Nelse, f, c)
+	foreachlist(n.Rlist, f, c)
+}
+
+func hascallspred(n *Node, c interface{}) {
+	switch n.Op {
+	case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER:
+		(*c.(*int))++
+	}
+}
+
+// appendinit is like addinit in subr.go
+// but appends rather than prepends.
+func appendinit(np **Node, init *NodeList) {
+	if init == nil {
+		return
+	}
+
+	n := *np
+	switch n.Op {
+	// There may be multiple refs to this node;
+	// introduce OCONVNOP to hold init list.
+	case ONAME, OLITERAL:
+		n = Nod(OCONVNOP, n, nil)
+
+		n.Type = n.Left.Type
+		n.Typecheck = 1
+		*np = n
+	}
+
+	n.Ninit = concat(n.Ninit, init)
+	n.Ullman = UINF
+}
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
new file mode 100644
index 0000000..03beb1c
--- /dev/null
+++ b/src/cmd/compile/internal/gc/range.go
@@ -0,0 +1,406 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * range
+ */
+func typecheckrange(n *Node) {
+	var toomany int
+	var why string
+	var t1 *Type
+	var t2 *Type
+	var v1 *Node
+	var v2 *Node
+
+	// Typechecking order is important here:
+	// 0. first typecheck range expression (slice/map/chan),
+	//	it is evaluated only once and so logically it is not part of the loop.
+	// 1. typcheck produced values,
+	//	this part can declare new vars and so it must be typechecked before body,
+	//	because body can contain a closure that captures the vars.
+	// 2. decldepth++ to denote loop body.
+	// 3. typecheck body.
+	// 4. decldepth--.
+
+	typecheck(&n.Right, Erv)
+
+	t := n.Right.Type
+	if t == nil {
+		goto out
+	}
+
+	// delicate little dance.  see typecheckas2
+	for ll := n.List; ll != nil; ll = ll.Next {
+		if ll.N.Defn != n {
+			typecheck(&ll.N, Erv|Easgn)
+		}
+	}
+
+	if Isptr[t.Etype] && Isfixedarray(t.Type) {
+		t = t.Type
+	}
+	n.Type = t
+
+	toomany = 0
+	switch t.Etype {
+	default:
+		Yyerror("cannot range over %v", Nconv(n.Right, obj.FmtLong))
+		goto out
+
+	case TARRAY:
+		t1 = Types[TINT]
+		t2 = t.Type
+
+	case TMAP:
+		t1 = t.Down
+		t2 = t.Type
+
+	case TCHAN:
+		if t.Chan&Crecv == 0 {
+			Yyerror("invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
+			goto out
+		}
+
+		t1 = t.Type
+		t2 = nil
+		if count(n.List) == 2 {
+			toomany = 1
+		}
+
+	case TSTRING:
+		t1 = Types[TINT]
+		t2 = runetype
+	}
+
+	if count(n.List) > 2 || toomany != 0 {
+		Yyerror("too many variables in range")
+	}
+
+	v1 = nil
+	if n.List != nil {
+		v1 = n.List.N
+	}
+	v2 = nil
+	if n.List != nil && n.List.Next != nil {
+		v2 = n.List.Next.N
+	}
+
+	// this is not only a optimization but also a requirement in the spec.
+	// "if the second iteration variable is the blank identifier, the range
+	// clause is equivalent to the same clause with only the first variable
+	// present."
+	if isblank(v2) {
+		if v1 != nil {
+			n.List = list1(v1)
+		}
+		v2 = nil
+	}
+
+	if v1 != nil {
+		if v1.Defn == n {
+			v1.Type = t1
+		} else if v1.Type != nil && assignop(t1, v1.Type, &why) == 0 {
+			Yyerror("cannot assign type %v to %v in range%s", t1, Nconv(v1, obj.FmtLong), why)
+		}
+		checkassign(n, v1)
+	}
+
+	if v2 != nil {
+		if v2.Defn == n {
+			v2.Type = t2
+		} else if v2.Type != nil && assignop(t2, v2.Type, &why) == 0 {
+			Yyerror("cannot assign type %v to %v in range%s", t2, Nconv(v2, obj.FmtLong), why)
+		}
+		checkassign(n, v2)
+	}
+
+	// second half of dance
+out:
+	n.Typecheck = 1
+
+	for ll := n.List; ll != nil; ll = ll.Next {
+		if ll.N.Typecheck == 0 {
+			typecheck(&ll.N, Erv|Easgn)
+		}
+	}
+
+	decldepth++
+	typechecklist(n.Nbody, Etop)
+	decldepth--
+}
+
+func walkrange(n *Node) {
+	// variable name conventions:
+	//	ohv1, hv1, hv2: hidden (old) val 1, 2
+	//	ha, hit: hidden aggregate, iterator
+	//	hn, hp: hidden len, pointer
+	//	hb: hidden bool
+	//	a, v1, v2: not hidden aggregate, val 1, 2
+
+	t := n.Type
+
+	a := n.Right
+	lno := int(setlineno(a))
+
+	var v1 *Node
+	if n.List != nil {
+		v1 = n.List.N
+	}
+	var v2 *Node
+	if n.List != nil && n.List.Next != nil && !isblank(n.List.Next.N) {
+		v2 = n.List.Next.N
+	}
+
+	// n->list has no meaning anymore, clear it
+	// to avoid erroneous processing by racewalk.
+	n.List = nil
+
+	var body *NodeList
+	var init *NodeList
+	switch t.Etype {
+	default:
+		Fatal("walkrange")
+
+		// Lower n into runtime·memclr if possible, for
+	// fast zeroing of slices and arrays (issue 5373).
+	// Look for instances of
+	//
+	// for i := range a {
+	// 	a[i] = zero
+	// }
+	//
+	// in which the evaluation of a is side-effect-free.
+	case TARRAY:
+		if Debug['N'] == 0 {
+			if flag_race == 0 {
+				if v1 != nil {
+					if v2 == nil {
+						if n.Nbody != nil {
+							if n.Nbody.N != nil { // at least one statement in body
+								if n.Nbody.Next == nil { // at most one statement in body
+									tmp := n.Nbody.N // first statement of body
+									if tmp.Op == OAS {
+										if tmp.Left.Op == OINDEX {
+											if samesafeexpr(tmp.Left.Left, a) {
+												if samesafeexpr(tmp.Left.Right, v1) {
+													if t.Type.Width > 0 {
+														if iszero(tmp.Right) {
+															// Convert to
+															// if len(a) != 0 {
+															// 	hp = &a[0]
+															// 	hn = len(a)*sizeof(elem(a))
+															// 	memclr(hp, hn)
+															// 	i = len(a) - 1
+															// }
+															n.Op = OIF
+
+															n.Nbody = nil
+															n.Ntest = Nod(ONE, Nod(OLEN, a, nil), Nodintconst(0))
+															n.Nincr = nil
+
+															// hp = &a[0]
+															hp := temp(Ptrto(Types[TUINT8]))
+
+															tmp := Nod(OINDEX, a, Nodintconst(0))
+															tmp.Bounded = true
+															tmp = Nod(OADDR, tmp, nil)
+															tmp = Nod(OCONVNOP, tmp, nil)
+															tmp.Type = Ptrto(Types[TUINT8])
+															n.Nbody = list(n.Nbody, Nod(OAS, hp, tmp))
+
+															// hn = len(a) * sizeof(elem(a))
+															hn := temp(Types[TUINTPTR])
+
+															tmp = Nod(OLEN, a, nil)
+															tmp = Nod(OMUL, tmp, Nodintconst(t.Type.Width))
+															tmp = conv(tmp, Types[TUINTPTR])
+															n.Nbody = list(n.Nbody, Nod(OAS, hn, tmp))
+
+															// memclr(hp, hn)
+															fn := mkcall("memclr", nil, nil, hp, hn)
+
+															n.Nbody = list(n.Nbody, fn)
+
+															// i = len(a) - 1
+															v1 = Nod(OAS, v1, Nod(OSUB, Nod(OLEN, a, nil), Nodintconst(1)))
+
+															n.Nbody = list(n.Nbody, v1)
+
+															typecheck(&n.Ntest, Erv)
+															typechecklist(n.Nbody, Etop)
+															walkstmt(&n)
+															lineno = int32(lno)
+															return
+														}
+													}
+												}
+											}
+										}
+									}
+								}
+							}
+						}
+					}
+				}
+			}
+		}
+
+		// orderstmt arranged for a copy of the array/slice variable if needed.
+		ha := a
+
+		hv1 := temp(Types[TINT])
+		hn := temp(Types[TINT])
+		var hp *Node
+
+		init = list(init, Nod(OAS, hv1, nil))
+		init = list(init, Nod(OAS, hn, Nod(OLEN, ha, nil)))
+		if v2 != nil {
+			hp = temp(Ptrto(n.Type.Type))
+			tmp := Nod(OINDEX, ha, Nodintconst(0))
+			tmp.Bounded = true
+			init = list(init, Nod(OAS, hp, Nod(OADDR, tmp, nil)))
+		}
+
+		n.Ntest = Nod(OLT, hv1, hn)
+		n.Nincr = Nod(OAS, hv1, Nod(OADD, hv1, Nodintconst(1)))
+		if v1 == nil {
+			body = nil
+		} else if v2 == nil {
+			body = list1(Nod(OAS, v1, hv1))
+		} else {
+			a := Nod(OAS2, nil, nil)
+			a.List = list(list1(v1), v2)
+			a.Rlist = list(list1(hv1), Nod(OIND, hp, nil))
+			body = list1(a)
+
+			// Advance pointer as part of increment.
+			// We used to advance the pointer before executing the loop body,
+			// but doing so would make the pointer point past the end of the
+			// array during the final iteration, possibly causing another unrelated
+			// piece of memory not to be garbage collected until the loop finished.
+			// Advancing during the increment ensures that the pointer p only points
+			// pass the end of the array during the final "p++; i++; if(i >= len(x)) break;",
+			// after which p is dead, so it cannot confuse the collector.
+			tmp := Nod(OADD, hp, Nodintconst(t.Type.Width))
+
+			tmp.Type = hp.Type
+			tmp.Typecheck = 1
+			tmp.Right.Type = Types[Tptr]
+			tmp.Right.Typecheck = 1
+			a = Nod(OAS, hp, tmp)
+			typecheck(&a, Etop)
+			n.Nincr.Ninit = list1(a)
+		}
+
+		// orderstmt allocated the iterator for us.
+	// we only use a once, so no copy needed.
+	case TMAP:
+		ha := a
+
+		th := hiter(t)
+		hit := n.Alloc
+		hit.Type = th
+		n.Left = nil
+		keyname := newname(th.Type.Sym)      // depends on layout of iterator struct.  See reflect.go:hiter
+		valname := newname(th.Type.Down.Sym) // ditto
+
+		fn := syslook("mapiterinit", 1)
+
+		substArgTypes(fn, t.Down, t.Type, th)
+		init = list(init, mkcall1(fn, nil, nil, typename(t), ha, Nod(OADDR, hit, nil)))
+		n.Ntest = Nod(ONE, Nod(ODOT, hit, keyname), nodnil())
+
+		fn = syslook("mapiternext", 1)
+		substArgTypes(fn, th)
+		n.Nincr = mkcall1(fn, nil, nil, Nod(OADDR, hit, nil))
+
+		key := Nod(ODOT, hit, keyname)
+		key = Nod(OIND, key, nil)
+		if v1 == nil {
+			body = nil
+		} else if v2 == nil {
+			body = list1(Nod(OAS, v1, key))
+		} else {
+			val := Nod(ODOT, hit, valname)
+			val = Nod(OIND, val, nil)
+			a := Nod(OAS2, nil, nil)
+			a.List = list(list1(v1), v2)
+			a.Rlist = list(list1(key), val)
+			body = list1(a)
+		}
+
+		// orderstmt arranged for a copy of the channel variable.
+	case TCHAN:
+		ha := a
+
+		n.Ntest = nil
+
+		hv1 := temp(t.Type)
+		hv1.Typecheck = 1
+		if haspointers(t.Type) {
+			init = list(init, Nod(OAS, hv1, nil))
+		}
+		hb := temp(Types[TBOOL])
+
+		n.Ntest = Nod(ONE, hb, Nodbool(false))
+		a := Nod(OAS2RECV, nil, nil)
+		a.Typecheck = 1
+		a.List = list(list1(hv1), hb)
+		a.Rlist = list1(Nod(ORECV, ha, nil))
+		n.Ntest.Ninit = list1(a)
+		if v1 == nil {
+			body = nil
+		} else {
+			body = list1(Nod(OAS, v1, hv1))
+		}
+
+		// orderstmt arranged for a copy of the string variable.
+	case TSTRING:
+		ha := a
+
+		ohv1 := temp(Types[TINT])
+
+		hv1 := temp(Types[TINT])
+		init = list(init, Nod(OAS, hv1, nil))
+
+		var a *Node
+		var hv2 *Node
+		if v2 == nil {
+			a = Nod(OAS, hv1, mkcall("stringiter", Types[TINT], nil, ha, hv1))
+		} else {
+			hv2 = temp(runetype)
+			a = Nod(OAS2, nil, nil)
+			a.List = list(list1(hv1), hv2)
+			fn := syslook("stringiter2", 0)
+			a.Rlist = list1(mkcall1(fn, getoutargx(fn.Type), nil, ha, hv1))
+		}
+
+		n.Ntest = Nod(ONE, hv1, Nodintconst(0))
+		n.Ntest.Ninit = list(list1(Nod(OAS, ohv1, hv1)), a)
+
+		body = nil
+		if v1 != nil {
+			body = list1(Nod(OAS, v1, ohv1))
+		}
+		if v2 != nil {
+			body = list(body, Nod(OAS, v2, hv2))
+		}
+	}
+
+	n.Op = OFOR
+	typechecklist(init, Etop)
+	n.Ninit = concat(n.Ninit, init)
+	typechecklist(n.Ntest.Ninit, Etop)
+	typecheck(&n.Ntest, Erv)
+	typecheck(&n.Nincr, Etop)
+	typechecklist(body, Etop)
+	n.Nbody = concat(body, n.Nbody)
+	walkstmt(&n)
+
+	lineno = int32(lno)
+}
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
new file mode 100644
index 0000000..6c0962f
--- /dev/null
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -0,0 +1,1572 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/gcprog"
+	"cmd/internal/obj"
+	"fmt"
+	"os"
+)
+
+/*
+ * runtime interface and reflection data structures
+ */
+var signatlist *NodeList
+
+func sigcmp(a *Sig, b *Sig) int {
+	i := stringsCompare(a.name, b.name)
+	if i != 0 {
+		return i
+	}
+	if a.pkg == b.pkg {
+		return 0
+	}
+	if a.pkg == nil {
+		return -1
+	}
+	if b.pkg == nil {
+		return +1
+	}
+	return stringsCompare(a.pkg.Path, b.pkg.Path)
+}
+
+func lsort(l *Sig, f func(*Sig, *Sig) int) *Sig {
+	if l == nil || l.link == nil {
+		return l
+	}
+
+	l1 := l
+	l2 := l
+	for {
+		l2 = l2.link
+		if l2 == nil {
+			break
+		}
+		l2 = l2.link
+		if l2 == nil {
+			break
+		}
+		l1 = l1.link
+	}
+
+	l2 = l1.link
+	l1.link = nil
+	l1 = lsort(l, f)
+	l2 = lsort(l2, f)
+
+	/* set up lead element */
+	if f(l1, l2) < 0 {
+		l = l1
+		l1 = l1.link
+	} else {
+		l = l2
+		l2 = l2.link
+	}
+
+	le := l
+
+	for {
+		if l1 == nil {
+			for l2 != nil {
+				le.link = l2
+				le = l2
+				l2 = l2.link
+			}
+
+			le.link = nil
+			break
+		}
+
+		if l2 == nil {
+			for l1 != nil {
+				le.link = l1
+				le = l1
+				l1 = l1.link
+			}
+
+			break
+		}
+
+		if f(l1, l2) < 0 {
+			le.link = l1
+			le = l1
+			l1 = l1.link
+		} else {
+			le.link = l2
+			le = l2
+			l2 = l2.link
+		}
+	}
+
+	le.link = nil
+	return l
+}
+
+// Builds a type respresenting a Bucket structure for
+// the given map type.  This type is not visible to users -
+// we include only enough information to generate a correct GC
+// program for it.
+// Make sure this stays in sync with ../../runtime/hashmap.go!
+const (
+	BUCKETSIZE = 8
+	MAXKEYSIZE = 128
+	MAXVALSIZE = 128
+)
+
+func makefield(name string, t *Type) *Type {
+	f := typ(TFIELD)
+	f.Type = t
+	f.Sym = new(Sym)
+	f.Sym.Name = name
+	return f
+}
+
+func mapbucket(t *Type) *Type {
+	if t.Bucket != nil {
+		return t.Bucket
+	}
+
+	bucket := typ(TSTRUCT)
+	keytype := t.Down
+	valtype := t.Type
+	dowidth(keytype)
+	dowidth(valtype)
+	if keytype.Width > MAXKEYSIZE {
+		keytype = Ptrto(keytype)
+	}
+	if valtype.Width > MAXVALSIZE {
+		valtype = Ptrto(valtype)
+	}
+
+	// The first field is: uint8 topbits[BUCKETSIZE].
+	arr := typ(TARRAY)
+
+	arr.Type = Types[TUINT8]
+	arr.Bound = BUCKETSIZE
+	var field [4]*Type
+	field[0] = makefield("topbits", arr)
+	arr = typ(TARRAY)
+	arr.Type = keytype
+	arr.Bound = BUCKETSIZE
+	field[1] = makefield("keys", arr)
+	arr = typ(TARRAY)
+	arr.Type = valtype
+	arr.Bound = BUCKETSIZE
+	field[2] = makefield("values", arr)
+	field[3] = makefield("overflow", Ptrto(bucket))
+
+	// link up fields
+	bucket.Noalg = 1
+
+	bucket.Local = t.Local
+	bucket.Type = field[0]
+	for n := int32(0); n < int32(len(field)-1); n++ {
+		field[n].Down = field[n+1]
+	}
+	field[len(field)-1].Down = nil
+	dowidth(bucket)
+
+	// Pad to the native integer alignment.
+	// This is usually the same as widthptr; the exception (as usual) is amd64p32.
+	if Widthreg > Widthptr {
+		bucket.Width += int64(Widthreg) - int64(Widthptr)
+	}
+
+	// See comment on hmap.overflow in ../../runtime/hashmap.go.
+	if !haspointers(t.Type) && !haspointers(t.Down) && t.Type.Width <= MAXKEYSIZE && t.Down.Width <= MAXVALSIZE {
+		bucket.Haspointers = 1 // no pointers
+	}
+
+	t.Bucket = bucket
+
+	bucket.Map = t
+	return bucket
+}
+
+// Builds a type representing a Hmap structure for the given map type.
+// Make sure this stays in sync with ../../runtime/hashmap.go!
+func hmap(t *Type) *Type {
+	if t.Hmap != nil {
+		return t.Hmap
+	}
+
+	bucket := mapbucket(t)
+	var field [8]*Type
+	field[0] = makefield("count", Types[TINT])
+	field[1] = makefield("flags", Types[TUINT8])
+	field[2] = makefield("B", Types[TUINT8])
+	field[3] = makefield("hash0", Types[TUINT32])
+	field[4] = makefield("buckets", Ptrto(bucket))
+	field[5] = makefield("oldbuckets", Ptrto(bucket))
+	field[6] = makefield("nevacuate", Types[TUINTPTR])
+	field[7] = makefield("overflow", Types[TUNSAFEPTR])
+
+	h := typ(TSTRUCT)
+	h.Noalg = 1
+	h.Local = t.Local
+	h.Type = field[0]
+	for n := int32(0); n < int32(len(field)-1); n++ {
+		field[n].Down = field[n+1]
+	}
+	field[len(field)-1].Down = nil
+	dowidth(h)
+	t.Hmap = h
+	h.Map = t
+	return h
+}
+
+func hiter(t *Type) *Type {
+	if t.Hiter != nil {
+		return t.Hiter
+	}
+
+	// build a struct:
+	// hash_iter {
+	//    key *Key
+	//    val *Value
+	//    t *MapType
+	//    h *Hmap
+	//    buckets *Bucket
+	//    bptr *Bucket
+	//    overflow0 unsafe.Pointer
+	//    overflow1 unsafe.Pointer
+	//    startBucket uintptr
+	//    stuff uintptr
+	//    bucket uintptr
+	//    checkBucket uintptr
+	// }
+	// must match ../../runtime/hashmap.go:hash_iter.
+	var field [12]*Type
+	field[0] = makefield("key", Ptrto(t.Down))
+
+	field[1] = makefield("val", Ptrto(t.Type))
+	field[2] = makefield("t", Ptrto(Types[TUINT8]))
+	field[3] = makefield("h", Ptrto(hmap(t)))
+	field[4] = makefield("buckets", Ptrto(mapbucket(t)))
+	field[5] = makefield("bptr", Ptrto(mapbucket(t)))
+	field[6] = makefield("overflow0", Types[TUNSAFEPTR])
+	field[7] = makefield("overflow1", Types[TUNSAFEPTR])
+	field[8] = makefield("startBucket", Types[TUINTPTR])
+	field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I
+	field[10] = makefield("bucket", Types[TUINTPTR])
+	field[11] = makefield("checkBucket", Types[TUINTPTR])
+
+	// build iterator struct holding the above fields
+	i := typ(TSTRUCT)
+
+	i.Noalg = 1
+	i.Type = field[0]
+	for n := int32(0); n < int32(len(field)-1); n++ {
+		field[n].Down = field[n+1]
+	}
+	field[len(field)-1].Down = nil
+	dowidth(i)
+	if i.Width != int64(12*Widthptr) {
+		Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr)
+	}
+	t.Hiter = i
+	i.Map = t
+	return i
+}
+
+/*
+ * f is method type, with receiver.
+ * return function type, receiver as first argument (or not).
+ */
+func methodfunc(f *Type, receiver *Type) *Type {
+	var in *NodeList
+	if receiver != nil {
+		d := Nod(ODCLFIELD, nil, nil)
+		d.Type = receiver
+		in = list(in, d)
+	}
+
+	var d *Node
+	for t := getinargx(f).Type; t != nil; t = t.Down {
+		d = Nod(ODCLFIELD, nil, nil)
+		d.Type = t.Type
+		d.Isddd = t.Isddd
+		in = list(in, d)
+	}
+
+	var out *NodeList
+	for t := getoutargx(f).Type; t != nil; t = t.Down {
+		d = Nod(ODCLFIELD, nil, nil)
+		d.Type = t.Type
+		out = list(out, d)
+	}
+
+	t := functype(nil, in, out)
+	if f.Nname != nil {
+		// Link to name of original method function.
+		t.Nname = f.Nname
+	}
+
+	return t
+}
+
+/*
+ * return methods of non-interface type t, sorted by name.
+ * generates stub functions as needed.
+ */
+func methods(t *Type) *Sig {
+	// method type
+	mt := methtype(t, 0)
+
+	if mt == nil {
+		return nil
+	}
+	expandmeth(mt)
+
+	// type stored in interface word
+	it := t
+
+	if !isdirectiface(it) {
+		it = Ptrto(t)
+	}
+
+	// make list of methods for t,
+	// generating code if necessary.
+	var a *Sig
+
+	var this *Type
+	var b *Sig
+	var method *Sym
+	for f := mt.Xmethod; f != nil; f = f.Down {
+		if f.Etype != TFIELD {
+			Fatal("methods: not field %v", f)
+		}
+		if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 {
+			Fatal("non-method on %v method %v %v\n", mt, f.Sym, f)
+		}
+		if getthisx(f.Type).Type == nil {
+			Fatal("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
+		}
+		if f.Nointerface {
+			continue
+		}
+
+		method = f.Sym
+		if method == nil {
+			continue
+		}
+
+		// get receiver type for this particular method.
+		// if pointer receiver but non-pointer t and
+		// this is not an embedded pointer inside a struct,
+		// method does not apply.
+		this = getthisx(f.Type).Type.Type
+
+		if Isptr[this.Etype] && this.Type == t {
+			continue
+		}
+		if Isptr[this.Etype] && !Isptr[t.Etype] && f.Embedded != 2 && !isifacemethod(f.Type) {
+			continue
+		}
+
+		b = new(Sig)
+		b.link = a
+		a = b
+
+		a.name = method.Name
+		if !exportname(method.Name) {
+			if method.Pkg == nil {
+				Fatal("methods: missing package")
+			}
+			a.pkg = method.Pkg
+		}
+
+		a.isym = methodsym(method, it, 1)
+		a.tsym = methodsym(method, t, 0)
+		a.type_ = methodfunc(f.Type, t)
+		a.mtype = methodfunc(f.Type, nil)
+
+		if a.isym.Flags&SymSiggen == 0 {
+			a.isym.Flags |= SymSiggen
+			if !Eqtype(this, it) || this.Width < Types[Tptr].Width {
+				compiling_wrappers = 1
+				genwrapper(it, f, a.isym, 1)
+				compiling_wrappers = 0
+			}
+		}
+
+		if a.tsym.Flags&SymSiggen == 0 {
+			a.tsym.Flags |= SymSiggen
+			if !Eqtype(this, t) {
+				compiling_wrappers = 1
+				genwrapper(t, f, a.tsym, 0)
+				compiling_wrappers = 0
+			}
+		}
+	}
+
+	return lsort(a, sigcmp)
+}
+
+/*
+ * return methods of interface type t, sorted by name.
+ */
+func imethods(t *Type) *Sig {
+	var a *Sig
+	var method *Sym
+	var isym *Sym
+
+	var all *Sig
+	var last *Sig
+	for f := t.Type; f != nil; f = f.Down {
+		if f.Etype != TFIELD {
+			Fatal("imethods: not field")
+		}
+		if f.Type.Etype != TFUNC || f.Sym == nil {
+			continue
+		}
+		method = f.Sym
+		a = new(Sig)
+		a.name = method.Name
+		if !exportname(method.Name) {
+			if method.Pkg == nil {
+				Fatal("imethods: missing package")
+			}
+			a.pkg = method.Pkg
+		}
+
+		a.mtype = f.Type
+		a.offset = 0
+		a.type_ = methodfunc(f.Type, nil)
+
+		if last != nil && sigcmp(last, a) >= 0 {
+			Fatal("sigcmp vs sortinter %s %s", last.name, a.name)
+		}
+		if last == nil {
+			all = a
+		} else {
+			last.link = a
+		}
+		last = a
+
+		// Compiler can only refer to wrappers for non-blank methods.
+		if isblanksym(method) {
+			continue
+		}
+
+		// NOTE(rsc): Perhaps an oversight that
+		// IfaceType.Method is not in the reflect data.
+		// Generate the method body, so that compiled
+		// code can refer to it.
+		isym = methodsym(method, t, 0)
+
+		if isym.Flags&SymSiggen == 0 {
+			isym.Flags |= SymSiggen
+			genwrapper(t, f, isym, 0)
+		}
+	}
+
+	return all
+}
+
+var dimportpath_gopkg *Pkg
+
+func dimportpath(p *Pkg) {
+	if p.Pathsym != nil {
+		return
+	}
+
+	// If we are compiling the runtime package, there are two runtime packages around
+	// -- localpkg and Runtimepkg.  We don't want to produce import path symbols for
+	// both of them, so just produce one for localpkg.
+	if myimportpath == "runtime" && p == Runtimepkg {
+		return
+	}
+
+	if dimportpath_gopkg == nil {
+		dimportpath_gopkg = mkpkg("go")
+		dimportpath_gopkg.Name = "go"
+	}
+
+	nam := "importpath." + p.Prefix + "."
+
+	n := Nod(ONAME, nil, nil)
+	n.Sym = Pkglookup(nam, dimportpath_gopkg)
+
+	n.Class = PEXTERN
+	n.Xoffset = 0
+	p.Pathsym = n.Sym
+
+	if p == localpkg {
+		// Note: myimportpath != "", or else dgopkgpath won't call dimportpath.
+		gdatastring(n, myimportpath)
+	} else {
+		gdatastring(n, p.Path)
+	}
+	ggloblsym(n.Sym, int32(Types[TSTRING].Width), obj.DUPOK|obj.RODATA)
+}
+
+func dgopkgpath(s *Sym, ot int, pkg *Pkg) int {
+	if pkg == nil {
+		return dgostringptr(s, ot, "")
+	}
+
+	if pkg == localpkg && myimportpath == "" {
+		// If we don't know the full path of the package being compiled (i.e. -p
+		// was not passed on the compiler command line), emit reference to
+		// go.importpath.""., which 6l will rewrite using the correct import path.
+		// Every package that imports this one directly defines the symbol.
+		var ns *Sym
+
+		if ns == nil {
+			ns = Pkglookup("importpath.\"\".", mkpkg("go"))
+		}
+		return dsymptr(s, ot, ns, 0)
+	}
+
+	dimportpath(pkg)
+	return dsymptr(s, ot, pkg.Pathsym, 0)
+}
+
+/*
+ * uncommonType
+ * ../../runtime/type.go:/uncommonType
+ */
+func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
+	m := methods(t)
+	if t.Sym == nil && m == nil {
+		return off
+	}
+
+	// fill in *extraType pointer in header
+	off = int(Rnd(int64(off), int64(Widthptr)))
+
+	dsymptr(sym, ptroff, sym, off)
+
+	n := 0
+	for a := m; a != nil; a = a.link {
+		dtypesym(a.type_)
+		n++
+	}
+
+	ot := off
+	s := sym
+	if t.Sym != nil {
+		ot = dgostringptr(s, ot, t.Sym.Name)
+		if t != Types[t.Etype] && t != errortype {
+			ot = dgopkgpath(s, ot, t.Sym.Pkg)
+		} else {
+			ot = dgostringptr(s, ot, "")
+		}
+	} else {
+		ot = dgostringptr(s, ot, "")
+		ot = dgostringptr(s, ot, "")
+	}
+
+	// slice header
+	ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+
+	ot = duintxx(s, ot, uint64(n), Widthint)
+	ot = duintxx(s, ot, uint64(n), Widthint)
+
+	// methods
+	for a := m; a != nil; a = a.link {
+		// method
+		// ../../runtime/type.go:/method
+		ot = dgostringptr(s, ot, a.name)
+
+		ot = dgopkgpath(s, ot, a.pkg)
+		ot = dsymptr(s, ot, dtypesym(a.mtype), 0)
+		ot = dsymptr(s, ot, dtypesym(a.type_), 0)
+		if a.isym != nil {
+			ot = dsymptr(s, ot, a.isym, 0)
+		} else {
+			ot = duintptr(s, ot, 0)
+		}
+		if a.tsym != nil {
+			ot = dsymptr(s, ot, a.tsym, 0)
+		} else {
+			ot = duintptr(s, ot, 0)
+		}
+	}
+
+	return ot
+}
+
+var kinds = []int{
+	TINT:        obj.KindInt,
+	TUINT:       obj.KindUint,
+	TINT8:       obj.KindInt8,
+	TUINT8:      obj.KindUint8,
+	TINT16:      obj.KindInt16,
+	TUINT16:     obj.KindUint16,
+	TINT32:      obj.KindInt32,
+	TUINT32:     obj.KindUint32,
+	TINT64:      obj.KindInt64,
+	TUINT64:     obj.KindUint64,
+	TUINTPTR:    obj.KindUintptr,
+	TFLOAT32:    obj.KindFloat32,
+	TFLOAT64:    obj.KindFloat64,
+	TBOOL:       obj.KindBool,
+	TSTRING:     obj.KindString,
+	TPTR32:      obj.KindPtr,
+	TPTR64:      obj.KindPtr,
+	TSTRUCT:     obj.KindStruct,
+	TINTER:      obj.KindInterface,
+	TCHAN:       obj.KindChan,
+	TMAP:        obj.KindMap,
+	TARRAY:      obj.KindArray,
+	TFUNC:       obj.KindFunc,
+	TCOMPLEX64:  obj.KindComplex64,
+	TCOMPLEX128: obj.KindComplex128,
+	TUNSAFEPTR:  obj.KindUnsafePointer,
+}
+
+func haspointers(t *Type) bool {
+	if t.Haspointers != 0 {
+		return t.Haspointers-1 != 0
+	}
+
+	var ret bool
+	switch t.Etype {
+	case TINT,
+		TUINT,
+		TINT8,
+		TUINT8,
+		TINT16,
+		TUINT16,
+		TINT32,
+		TUINT32,
+		TINT64,
+		TUINT64,
+		TUINTPTR,
+		TFLOAT32,
+		TFLOAT64,
+		TCOMPLEX64,
+		TCOMPLEX128,
+		TBOOL:
+		ret = false
+
+	case TARRAY:
+		if t.Bound < 0 { // slice
+			ret = true
+			break
+		}
+
+		if t.Bound == 0 { // empty array
+			ret = false
+			break
+		}
+
+		ret = haspointers(t.Type)
+
+	case TSTRUCT:
+		ret = false
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
+			if haspointers(t1.Type) {
+				ret = true
+				break
+			}
+		}
+
+	case TSTRING,
+		TPTR32,
+		TPTR64,
+		TUNSAFEPTR,
+		TINTER,
+		TCHAN,
+		TMAP,
+		TFUNC:
+		fallthrough
+	default:
+		ret = true
+
+	case TFIELD:
+		Fatal("haspointers: unexpected type, %v", t)
+	}
+
+	t.Haspointers = 1 + uint8(obj.Bool2int(ret))
+	return ret
+}
+
+// typeptrdata returns the length in bytes of the prefix of t
+// containing pointer data. Anything after this offset is scalar data.
+func typeptrdata(t *Type) int64 {
+	if !haspointers(t) {
+		return 0
+	}
+
+	switch t.Etype {
+	case TPTR32,
+		TPTR64,
+		TUNSAFEPTR,
+		TFUNC,
+		TCHAN,
+		TMAP:
+		return int64(Widthptr)
+
+	case TSTRING:
+		// struct { byte *str; intgo len; }
+		return int64(Widthptr)
+
+	case TINTER:
+		// struct { Itab *tab;	void *data; } or
+		// struct { Type *type; void *data; }
+		return 2 * int64(Widthptr)
+
+	case TARRAY:
+		if Isslice(t) {
+			// struct { byte *array; uintgo len; uintgo cap; }
+			return int64(Widthptr)
+		}
+		// haspointers already eliminated t.Bound == 0.
+		return (t.Bound-1)*t.Type.Width + typeptrdata(t.Type)
+
+	case TSTRUCT:
+		// Find the last field that has pointers.
+		var lastPtrField *Type
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
+			if haspointers(t1.Type) {
+				lastPtrField = t1
+			}
+		}
+		return lastPtrField.Width + typeptrdata(lastPtrField.Type)
+
+	default:
+		Fatal("typeptrdata: unexpected type, %v", t)
+		return 0
+	}
+}
+
+/*
+ * commonType
+ * ../../runtime/type.go:/commonType
+ */
+
+var dcommontype_algarray *Sym
+
+func dcommontype(s *Sym, ot int, t *Type) int {
+	if ot != 0 {
+		Fatal("dcommontype %d", ot)
+	}
+
+	sizeofAlg := 2 * Widthptr
+	if dcommontype_algarray == nil {
+		dcommontype_algarray = Pkglookup("algarray", Runtimepkg)
+	}
+	dowidth(t)
+	alg := algtype(t)
+	var algsym *Sym
+	if alg < 0 || alg == AMEM {
+		algsym = dalgsym(t)
+	}
+
+	var sptr *Sym
+	if t.Sym != nil && !Isptr[t.Etype] {
+		sptr = dtypesym(Ptrto(t))
+	} else {
+		sptr = weaktypesym(Ptrto(t))
+	}
+
+	// All (non-reflect-allocated) Types share the same zero object.
+	// Each place in the compiler where a pointer to the zero object
+	// might be returned by a runtime call (map access return value,
+	// 2-arg type cast) declares the size of the zerovalue it needs.
+	// The linker magically takes the max of all the sizes.
+	zero := Pkglookup("zerovalue", Runtimepkg)
+
+	gcsym, useGCProg, ptrdata := dgcsym(t)
+
+	// We use size 0 here so we get the pointer to the zero value,
+	// but don't allocate space for the zero value unless we need it.
+	// TODO: how do we get this symbol into bss?  We really want
+	// a read-only bss, but I don't think such a thing exists.
+
+	// ../../pkg/reflect/type.go:/^type.commonType
+	// actual type structure
+	//	type commonType struct {
+	//		size          uintptr
+	//		ptrsize       uintptr
+	//		hash          uint32
+	//		_             uint8
+	//		align         uint8
+	//		fieldAlign    uint8
+	//		kind          uint8
+	//		alg           unsafe.Pointer
+	//		gcdata        unsafe.Pointer
+	//		string        *string
+	//		*extraType
+	//		ptrToThis     *Type
+	//		zero          unsafe.Pointer
+	//	}
+	ot = duintptr(s, ot, uint64(t.Width))
+	ot = duintptr(s, ot, uint64(ptrdata))
+
+	ot = duint32(s, ot, typehash(t))
+	ot = duint8(s, ot, 0) // unused
+
+	// runtime (and common sense) expects alignment to be a power of two.
+	i := int(t.Align)
+
+	if i == 0 {
+		i = 1
+	}
+	if i&(i-1) != 0 {
+		Fatal("invalid alignment %d for %v", t.Align, t)
+	}
+	ot = duint8(s, ot, t.Align) // align
+	ot = duint8(s, ot, t.Align) // fieldAlign
+
+	i = kinds[t.Etype]
+	if t.Etype == TARRAY && t.Bound < 0 {
+		i = obj.KindSlice
+	}
+	if !haspointers(t) {
+		i |= obj.KindNoPointers
+	}
+	if isdirectiface(t) {
+		i |= obj.KindDirectIface
+	}
+	if useGCProg {
+		i |= obj.KindGCProg
+	}
+	ot = duint8(s, ot, uint8(i)) // kind
+	if algsym == nil {
+		ot = dsymptr(s, ot, dcommontype_algarray, alg*sizeofAlg)
+	} else {
+		ot = dsymptr(s, ot, algsym, 0)
+	}
+	ot = dsymptr(s, ot, gcsym, 0)
+
+	p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned)
+
+	//print("dcommontype: %s\n", p);
+	ot = dgostringptr(s, ot, p) // string
+
+	// skip pointer to extraType,
+	// which follows the rest of this type structure.
+	// caller will fill in if needed.
+	// otherwise linker will assume 0.
+	ot += Widthptr
+
+	ot = dsymptr(s, ot, sptr, 0) // ptrto type
+	ot = dsymptr(s, ot, zero, 0) // ptr to zero value
+	return ot
+}
+
+func typesym(t *Type) *Sym {
+	return Pkglookup(Tconv(t, obj.FmtLeft), typepkg)
+}
+
+func tracksym(t *Type) *Sym {
+	return Pkglookup(Tconv(t.Outer, obj.FmtLeft)+"."+t.Sym.Name, trackpkg)
+}
+
+func typelinksym(t *Type) *Sym {
+	// %-uT is what the generated Type's string field says.
+	// It uses (ambiguous) package names instead of import paths.
+	// %-T is the complete, unambiguous type name.
+	// We want the types to end up sorted by string field,
+	// so use that first in the name, and then add :%-T to
+	// disambiguate. We use a tab character as the separator to
+	// ensure the types appear sorted by their string field. The
+	// names are a little long but they are discarded by the linker
+	// and do not end up in the symbol table of the final binary.
+	p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned) + "\t" + Tconv(t, obj.FmtLeft)
+
+	s := Pkglookup(p, typelinkpkg)
+
+	//print("typelinksym: %s -> %+S\n", p, s);
+
+	return s
+}
+
+func typesymprefix(prefix string, t *Type) *Sym {
+	p := prefix + "." + Tconv(t, obj.FmtLeft)
+	s := Pkglookup(p, typepkg)
+
+	//print("algsym: %s -> %+S\n", p, s);
+
+	return s
+}
+
+func typenamesym(t *Type) *Sym {
+	if t == nil || (Isptr[t.Etype] && t.Type == nil) || isideal(t) {
+		Fatal("typename %v", t)
+	}
+	s := typesym(t)
+	if s.Def == nil {
+		n := Nod(ONAME, nil, nil)
+		n.Sym = s
+		n.Type = Types[TUINT8]
+		n.Addable = true
+		n.Ullman = 1
+		n.Class = PEXTERN
+		n.Xoffset = 0
+		n.Typecheck = 1
+		s.Def = n
+
+		signatlist = list(signatlist, typenod(t))
+	}
+
+	return s.Def.Sym
+}
+
+func typename(t *Type) *Node {
+	s := typenamesym(t)
+	n := Nod(OADDR, s.Def, nil)
+	n.Type = Ptrto(s.Def.Type)
+	n.Addable = true
+	n.Ullman = 2
+	n.Typecheck = 1
+	return n
+}
+
+func weaktypesym(t *Type) *Sym {
+	p := Tconv(t, obj.FmtLeft)
+	s := Pkglookup(p, weaktypepkg)
+
+	//print("weaktypesym: %s -> %+S\n", p, s);
+
+	return s
+}
+
+/*
+ * Returns 1 if t has a reflexive equality operator.
+ * That is, if x==x for all x of type t.
+ */
+func isreflexive(t *Type) bool {
+	switch t.Etype {
+	case TBOOL,
+		TINT,
+		TUINT,
+		TINT8,
+		TUINT8,
+		TINT16,
+		TUINT16,
+		TINT32,
+		TUINT32,
+		TINT64,
+		TUINT64,
+		TUINTPTR,
+		TPTR32,
+		TPTR64,
+		TUNSAFEPTR,
+		TSTRING,
+		TCHAN:
+		return true
+
+	case TFLOAT32,
+		TFLOAT64,
+		TCOMPLEX64,
+		TCOMPLEX128,
+		TINTER:
+		return false
+
+	case TARRAY:
+		if Isslice(t) {
+			Fatal("slice can't be a map key: %v", t)
+		}
+		return isreflexive(t.Type)
+
+	case TSTRUCT:
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
+			if !isreflexive(t1.Type) {
+				return false
+			}
+		}
+
+		return true
+
+	default:
+		Fatal("bad type for map key: %v", t)
+		return false
+	}
+}
+
+func dtypesym(t *Type) *Sym {
+	// Replace byte, rune aliases with real type.
+	// They've been separate internally to make error messages
+	// better, but we have to merge them in the reflect tables.
+	if t == bytetype || t == runetype {
+		t = Types[t.Etype]
+	}
+
+	if isideal(t) {
+		Fatal("dtypesym %v", t)
+	}
+
+	s := typesym(t)
+	if s.Flags&SymSiggen != 0 {
+		return s
+	}
+	s.Flags |= SymSiggen
+
+	// special case (look for runtime below):
+	// when compiling package runtime,
+	// emit the type structures for int, float, etc.
+	tbase := t
+
+	if Isptr[t.Etype] && t.Sym == nil && t.Type.Sym != nil {
+		tbase = t.Type
+	}
+	dupok := 0
+	if tbase.Sym == nil {
+		dupok = obj.DUPOK
+	}
+
+	if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
+		goto ok
+	}
+
+	// named types from other files are defined only by those files
+	if tbase.Sym != nil && !tbase.Local {
+		return s
+	}
+	if isforw[tbase.Etype] {
+		return s
+	}
+
+ok:
+	ot := 0
+	xt := 0
+	switch t.Etype {
+	default:
+		ot = dcommontype(s, ot, t)
+		xt = ot - 3*Widthptr
+
+	case TARRAY:
+		if t.Bound >= 0 {
+			// ../../runtime/type.go:/ArrayType
+			s1 := dtypesym(t.Type)
+
+			t2 := typ(TARRAY)
+			t2.Type = t.Type
+			t2.Bound = -1 // slice
+			s2 := dtypesym(t2)
+			ot = dcommontype(s, ot, t)
+			xt = ot - 3*Widthptr
+			ot = dsymptr(s, ot, s1, 0)
+			ot = dsymptr(s, ot, s2, 0)
+			ot = duintptr(s, ot, uint64(t.Bound))
+		} else {
+			// ../../runtime/type.go:/SliceType
+			s1 := dtypesym(t.Type)
+
+			ot = dcommontype(s, ot, t)
+			xt = ot - 3*Widthptr
+			ot = dsymptr(s, ot, s1, 0)
+		}
+
+		// ../../runtime/type.go:/ChanType
+	case TCHAN:
+		s1 := dtypesym(t.Type)
+
+		ot = dcommontype(s, ot, t)
+		xt = ot - 3*Widthptr
+		ot = dsymptr(s, ot, s1, 0)
+		ot = duintptr(s, ot, uint64(t.Chan))
+
+	case TFUNC:
+		for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down {
+			dtypesym(t1.Type)
+		}
+		isddd := false
+		for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down {
+			isddd = t1.Isddd
+			dtypesym(t1.Type)
+		}
+
+		for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down {
+			dtypesym(t1.Type)
+		}
+
+		ot = dcommontype(s, ot, t)
+		xt = ot - 3*Widthptr
+		ot = duint8(s, ot, uint8(obj.Bool2int(isddd)))
+
+		// two slice headers: in and out.
+		ot = int(Rnd(int64(ot), int64(Widthptr)))
+
+		ot = dsymptr(s, ot, s, ot+2*(Widthptr+2*Widthint))
+		n := t.Thistuple + t.Intuple
+		ot = duintxx(s, ot, uint64(n), Widthint)
+		ot = duintxx(s, ot, uint64(n), Widthint)
+		ot = dsymptr(s, ot, s, ot+1*(Widthptr+2*Widthint)+n*Widthptr)
+		ot = duintxx(s, ot, uint64(t.Outtuple), Widthint)
+		ot = duintxx(s, ot, uint64(t.Outtuple), Widthint)
+
+		// slice data
+		for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down {
+			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+			n++
+		}
+		for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down {
+			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+			n++
+		}
+		for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down {
+			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+			n++
+		}
+
+	case TINTER:
+		m := imethods(t)
+		n := 0
+		for a := m; a != nil; a = a.link {
+			dtypesym(a.type_)
+			n++
+		}
+
+		// ../../runtime/type.go:/InterfaceType
+		ot = dcommontype(s, ot, t)
+
+		xt = ot - 3*Widthptr
+		ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+		ot = duintxx(s, ot, uint64(n), Widthint)
+		ot = duintxx(s, ot, uint64(n), Widthint)
+		for a := m; a != nil; a = a.link {
+			// ../../runtime/type.go:/imethod
+			ot = dgostringptr(s, ot, a.name)
+
+			ot = dgopkgpath(s, ot, a.pkg)
+			ot = dsymptr(s, ot, dtypesym(a.type_), 0)
+		}
+
+		// ../../runtime/type.go:/MapType
+	case TMAP:
+		s1 := dtypesym(t.Down)
+
+		s2 := dtypesym(t.Type)
+		s3 := dtypesym(mapbucket(t))
+		s4 := dtypesym(hmap(t))
+		ot = dcommontype(s, ot, t)
+		xt = ot - 3*Widthptr
+		ot = dsymptr(s, ot, s1, 0)
+		ot = dsymptr(s, ot, s2, 0)
+		ot = dsymptr(s, ot, s3, 0)
+		ot = dsymptr(s, ot, s4, 0)
+		if t.Down.Width > MAXKEYSIZE {
+			ot = duint8(s, ot, uint8(Widthptr))
+			ot = duint8(s, ot, 1) // indirect
+		} else {
+			ot = duint8(s, ot, uint8(t.Down.Width))
+			ot = duint8(s, ot, 0) // not indirect
+		}
+
+		if t.Type.Width > MAXVALSIZE {
+			ot = duint8(s, ot, uint8(Widthptr))
+			ot = duint8(s, ot, 1) // indirect
+		} else {
+			ot = duint8(s, ot, uint8(t.Type.Width))
+			ot = duint8(s, ot, 0) // not indirect
+		}
+
+		ot = duint16(s, ot, uint16(mapbucket(t).Width))
+		ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Down))))
+
+	case TPTR32, TPTR64:
+		if t.Type.Etype == TANY {
+			// ../../runtime/type.go:/UnsafePointerType
+			ot = dcommontype(s, ot, t)
+
+			break
+		}
+
+		// ../../runtime/type.go:/PtrType
+		s1 := dtypesym(t.Type)
+
+		ot = dcommontype(s, ot, t)
+		xt = ot - 3*Widthptr
+		ot = dsymptr(s, ot, s1, 0)
+
+		// ../../runtime/type.go:/StructType
+	// for security, only the exported fields.
+	case TSTRUCT:
+		n := 0
+
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
+			dtypesym(t1.Type)
+			n++
+		}
+
+		ot = dcommontype(s, ot, t)
+		xt = ot - 3*Widthptr
+		ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+		ot = duintxx(s, ot, uint64(n), Widthint)
+		ot = duintxx(s, ot, uint64(n), Widthint)
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
+			// ../../runtime/type.go:/structField
+			if t1.Sym != nil && t1.Embedded == 0 {
+				ot = dgostringptr(s, ot, t1.Sym.Name)
+				if exportname(t1.Sym.Name) {
+					ot = dgostringptr(s, ot, "")
+				} else {
+					ot = dgopkgpath(s, ot, t1.Sym.Pkg)
+				}
+			} else {
+				ot = dgostringptr(s, ot, "")
+				if t1.Type.Sym != nil && t1.Type.Sym.Pkg == builtinpkg {
+					ot = dgopkgpath(s, ot, localpkg)
+				} else {
+					ot = dgostringptr(s, ot, "")
+				}
+			}
+
+			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+			ot = dgostrlitptr(s, ot, t1.Note)
+			ot = duintptr(s, ot, uint64(t1.Width)) // field offset
+		}
+	}
+
+	ot = dextratype(s, ot, t, xt)
+	ggloblsym(s, int32(ot), int16(dupok|obj.RODATA))
+
+	// generate typelink.foo pointing at s = type.foo.
+	// The linker will leave a table of all the typelinks for
+	// types in the binary, so reflect can find them.
+	// We only need the link for unnamed composites that
+	// we want be able to find.
+	if t.Sym == nil {
+		switch t.Etype {
+		case TPTR32, TPTR64:
+			// The ptrto field of the type data cannot be relied on when
+			// dynamic linking: a type T may be defined in a module that makes
+			// no use of pointers to that type, but another module can contain
+			// a package that imports the first one and does use *T pointers.
+			// The second module will end up defining type data for *T and a
+			// type.*T symbol pointing at it. It's important that calling
+			// .PtrTo() on the refect.Type for T returns this type data and
+			// not some synthesized object, so we need reflect to be able to
+			// find it!
+			if !Ctxt.Flag_dynlink {
+				break
+			}
+			fallthrough
+		case TARRAY, TCHAN, TFUNC, TMAP:
+			slink := typelinksym(t)
+			dsymptr(slink, 0, s, 0)
+			ggloblsym(slink, int32(Widthptr), int16(dupok|obj.RODATA))
+		}
+	}
+
+	return s
+}
+
+func dumptypestructs() {
+	var n *Node
+
+	// copy types from externdcl list to signatlist
+	for l := externdcl; l != nil; l = l.Next {
+		n = l.N
+		if n.Op != OTYPE {
+			continue
+		}
+		signatlist = list(signatlist, n)
+	}
+
+	// process signatlist
+	var t *Type
+	for l := signatlist; l != nil; l = l.Next {
+		n = l.N
+		if n.Op != OTYPE {
+			continue
+		}
+		t = n.Type
+		dtypesym(t)
+		if t.Sym != nil {
+			dtypesym(Ptrto(t))
+		}
+	}
+
+	// generate import strings for imported packages
+	for _, p := range pkgs {
+		if p.Direct != 0 {
+			dimportpath(p)
+		}
+	}
+
+	// do basic types if compiling package runtime.
+	// they have to be in at least one package,
+	// and runtime is always loaded implicitly,
+	// so this is as good as any.
+	// another possible choice would be package main,
+	// but using runtime means fewer copies in .6 files.
+	if compiling_runtime != 0 {
+		for i := 1; i <= TBOOL; i++ {
+			dtypesym(Ptrto(Types[i]))
+		}
+		dtypesym(Ptrto(Types[TSTRING]))
+		dtypesym(Ptrto(Types[TUNSAFEPTR]))
+
+		// emit type structs for error and func(error) string.
+		// The latter is the type of an auto-generated wrapper.
+		dtypesym(Ptrto(errortype))
+
+		dtypesym(functype(nil, list1(Nod(ODCLFIELD, nil, typenod(errortype))), list1(Nod(ODCLFIELD, nil, typenod(Types[TSTRING])))))
+
+		// add paths for runtime and main, which 6l imports implicitly.
+		dimportpath(Runtimepkg)
+
+		if flag_race != 0 {
+			dimportpath(racepkg)
+		}
+		dimportpath(mkpkg("main"))
+	}
+}
+
+func dalgsym(t *Type) *Sym {
+	var s *Sym
+	var hashfunc *Sym
+	var eqfunc *Sym
+
+	// dalgsym is only called for a type that needs an algorithm table,
+	// which implies that the type is comparable (or else it would use ANOEQ).
+
+	if algtype(t) == AMEM {
+		// we use one algorithm table for all AMEM types of a given size
+		p := fmt.Sprintf(".alg%d", t.Width)
+
+		s = Pkglookup(p, typepkg)
+
+		if s.Flags&SymAlgGen != 0 {
+			return s
+		}
+		s.Flags |= SymAlgGen
+
+		// make hash closure
+		p = fmt.Sprintf(".hashfunc%d", t.Width)
+
+		hashfunc = Pkglookup(p, typepkg)
+
+		ot := 0
+		ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0)
+		ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure
+		ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA)
+
+		// make equality closure
+		p = fmt.Sprintf(".eqfunc%d", t.Width)
+
+		eqfunc = Pkglookup(p, typepkg)
+
+		ot = 0
+		ot = dsymptr(eqfunc, ot, Pkglookup("memequal_varlen", Runtimepkg), 0)
+		ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr)
+		ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA)
+	} else {
+		// generate an alg table specific to this type
+		s = typesymprefix(".alg", t)
+
+		hash := typesymprefix(".hash", t)
+		eq := typesymprefix(".eq", t)
+		hashfunc = typesymprefix(".hashfunc", t)
+		eqfunc = typesymprefix(".eqfunc", t)
+
+		genhash(hash, t)
+		geneq(eq, t)
+
+		// make Go funcs (closures) for calling hash and equal from Go
+		dsymptr(hashfunc, 0, hash, 0)
+
+		ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
+		dsymptr(eqfunc, 0, eq, 0)
+		ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
+	}
+
+	// ../../runtime/alg.go:/typeAlg
+	ot := 0
+
+	ot = dsymptr(s, ot, hashfunc, 0)
+	ot = dsymptr(s, ot, eqfunc, 0)
+	ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
+	return s
+}
+
+// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
+// which holds 1-bit entries describing where pointers are in a given type.
+// 16 bytes is enough to describe 128 pointer-sized words, 512 or 1024 bytes
+// depending on the system. Above this length, the GC information is
+// recorded as a GC program, which can express repetition compactly.
+// In either form, the information is used by the runtime to initialize the
+// heap bitmap, and for large types (like 128 or more words), they are
+// roughly the same speed. GC programs are never much larger and often
+// more compact. (If large arrays are involved, they can be arbitrarily more
+// compact.)
+//
+// The cutoff must be large enough that any allocation large enough to
+// use a GC program is large enough that it does not share heap bitmap
+// bytes with any other objects, allowing the GC program execution to
+// assume an aligned start and not use atomic operations. In the current
+// runtime, this means all malloc size classes larger than the cutoff must
+// be multiples of four words. On 32-bit systems that's 16 bytes, and
+// all size classes >= 16 bytes are 16-byte aligned, so no real constraint.
+// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed
+// for size classes >= 256 bytes. On a 64-bit sytem, 256 bytes allocated
+// is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes
+// must be >= 4.
+//
+// We use 16 because the GC programs do have some constant overhead
+// to get started, and processing 128 pointers seems to be enough to
+// amortize that overhead well.
+const maxPtrmaskBytes = 16
+
+// dgcsym emits and returns a data symbol containing GC information for type t,
+// along with a boolean reporting whether the UseGCProg bit should be set in
+// the type kind, and the ptrdata field to record in the reflect type information.
+func dgcsym(t *Type) (sym *Sym, useGCProg bool, ptrdata int64) {
+	ptrdata = typeptrdata(t)
+	if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 {
+		sym = dgcptrmask(t)
+		return
+	}
+
+	useGCProg = true
+	sym, ptrdata = dgcprog(t)
+	return
+}
+
+// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
+func dgcptrmask(t *Type) *Sym {
+	ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8)
+	fillptrmask(t, ptrmask)
+	p := fmt.Sprintf("gcbits.%x", ptrmask)
+
+	sym := Pkglookup(p, Runtimepkg)
+	if sym.Flags&SymUniq == 0 {
+		sym.Flags |= SymUniq
+		for i, x := range ptrmask {
+			duint8(sym, i, x)
+		}
+		ggloblsym(sym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
+	}
+	return sym
+}
+
+// fillptrmask fills in ptrmask with 1s corresponding to the
+// word offsets in t that hold pointers.
+// ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits.
+func fillptrmask(t *Type, ptrmask []byte) {
+	for i := range ptrmask {
+		ptrmask[i] = 0
+	}
+	if !haspointers(t) {
+		return
+	}
+
+	vec := bvalloc(8 * int32(len(ptrmask)))
+	xoffset := int64(0)
+	onebitwalktype1(t, &xoffset, vec)
+
+	nptr := typeptrdata(t) / int64(Widthptr)
+	for i := int64(0); i < nptr; i++ {
+		if bvget(vec, int32(i)) == 1 {
+			ptrmask[i/8] |= 1 << (uint(i) % 8)
+		}
+	}
+}
+
+// dgcprog emits and returns the symbol containing a GC program for type t
+// along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]).
+// In practice, the size is typeptrdata(t) except for non-trivial arrays.
+// For non-trivial arrays, the program describes the full t.Width size.
+func dgcprog(t *Type) (*Sym, int64) {
+	dowidth(t)
+	if t.Width == BADWIDTH {
+		Fatal("dgcprog: %v badwidth", t)
+	}
+	sym := typesymprefix(".gcprog", t)
+	var p GCProg
+	p.init(sym)
+	p.emit(t, 0)
+	offset := p.w.BitIndex() * int64(Widthptr)
+	p.end()
+	if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width {
+		Fatal("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
+	}
+	return sym, offset
+}
+
+type GCProg struct {
+	sym    *Sym
+	symoff int
+	w      gcprog.Writer
+}
+
+var Debug_gcprog int // set by -d gcprog
+
+func (p *GCProg) init(sym *Sym) {
+	p.sym = sym
+	p.symoff = 4 // first 4 bytes hold program length
+	p.w.Init(p.writeByte)
+	if Debug_gcprog > 0 {
+		fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", sym)
+		p.w.Debug(os.Stderr)
+	}
+}
+
+func (p *GCProg) writeByte(x byte) {
+	p.symoff = duint8(p.sym, p.symoff, x)
+}
+
+func (p *GCProg) end() {
+	p.w.End()
+	duint32(p.sym, 0, uint32(p.symoff-4))
+	ggloblsym(p.sym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
+	if Debug_gcprog > 0 {
+		fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.sym)
+	}
+}
+
+func (p *GCProg) emit(t *Type, offset int64) {
+	dowidth(t)
+	if !haspointers(t) {
+		return
+	}
+	if t.Width == int64(Widthptr) {
+		p.w.Ptr(offset / int64(Widthptr))
+		return
+	}
+	switch t.Etype {
+	default:
+		Fatal("GCProg.emit: unexpected type %v", t)
+
+	case TSTRING:
+		p.w.Ptr(offset / int64(Widthptr))
+
+	case TINTER:
+		p.w.Ptr(offset / int64(Widthptr))
+		p.w.Ptr(offset/int64(Widthptr) + 1)
+
+	case TARRAY:
+		if Isslice(t) {
+			p.w.Ptr(offset / int64(Widthptr))
+			return
+		}
+		if t.Bound == 0 {
+			// should have been handled by haspointers check above
+			Fatal("GCProg.emit: empty array")
+		}
+
+		// Flatten array-of-array-of-array to just a big array by multiplying counts.
+		count := t.Bound
+		elem := t.Type
+		for Isfixedarray(elem) {
+			count *= elem.Bound
+			elem = elem.Type
+		}
+
+		if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) {
+			// Cheaper to just emit the bits.
+			for i := int64(0); i < count; i++ {
+				p.emit(elem, offset+i*elem.Width)
+			}
+			return
+		}
+		p.emit(elem, offset)
+		p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr))
+		p.w.Repeat(elem.Width/int64(Widthptr), count-1)
+
+	case TSTRUCT:
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
+			p.emit(t1.Type, offset+t1.Width)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/gc/reg.go b/src/cmd/compile/internal/gc/reg.go
new file mode 100644
index 0000000..afe9523
--- /dev/null
+++ b/src/cmd/compile/internal/gc/reg.go
@@ -0,0 +1,1559 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import (
+	"bytes"
+	"cmd/internal/obj"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// A Var represents a single variable that may be stored in a register.
+// That variable may itself correspond to a hardware register,
+// to represent the use of registers in the unoptimized instruction stream.
+type Var struct {
+	offset     int64
+	node       *Node
+	nextinnode *Var
+	width      int
+	id         int // index in vars
+	name       int8
+	etype      int8
+	addr       int8
+}
+
+// Bits represents a set of Vars, stored as a bit set of var numbers
+// (the index in vars, or equivalently v.id).
+type Bits struct {
+	b [BITS]uint64
+}
+
+const (
+	BITS = 3
+	NVAR = BITS * 64
+)
+
+var (
+	vars [NVAR]Var // variables under consideration
+	nvar int       // number of vars
+
+	regbits uint64 // bits for hardware registers
+
+	zbits   Bits // zero
+	externs Bits // global variables
+	params  Bits // function parameters and results
+	ivar    Bits // function parameters (inputs)
+	ovar    Bits // function results (outputs)
+	consts  Bits // constant values
+	addrs   Bits // variables with address taken
+)
+
+// A Reg is a wrapper around a single Prog (one instruction) that holds
+// register optimization information while the optimizer runs.
+// r->prog is the instruction.
+type Reg struct {
+	set  Bits // regopt variables written by this instruction.
+	use1 Bits // regopt variables read by prog->from.
+	use2 Bits // regopt variables read by prog->to.
+
+	// refahead/refbehind are the regopt variables whose current
+	// value may be used in the following/preceding instructions
+	// up to a CALL (or the value is clobbered).
+	refbehind Bits
+	refahead  Bits
+
+	// calahead/calbehind are similar, but for variables in
+	// instructions that are reachable after hitting at least one
+	// CALL.
+	calbehind Bits
+	calahead  Bits
+
+	regdiff Bits
+	act     Bits
+	regu    uint64 // register used bitmap
+}
+
+// A Rgn represents a single regopt variable over a region of code
+// where a register could potentially be dedicated to that variable.
+// The code encompassed by a Rgn is defined by the flow graph,
+// starting at enter, flood-filling forward while varno is refahead
+// and backward while varno is refbehind, and following branches.
+// A single variable may be represented by multiple disjoint Rgns and
+// each Rgn may choose a different register for that variable.
+// Registers are allocated to regions greedily in order of descending
+// cost.
+type Rgn struct {
+	enter *Flow
+	cost  int16
+	varno int16
+	regno int16
+}
+
+// The Plan 9 C compilers used a limit of 600 regions,
+// but the yacc-generated parser in y.go has 3100 regions.
+// We set MaxRgn large enough to handle that.
+// There's not a huge cost to having too many regions:
+// the main processing traces the live area for each variable,
+// which is limited by the number of variables times the area,
+// not the raw region count. If there are many regions, they
+// are almost certainly small and easy to trace.
+// The only operation that scales with region count is the
+// sorting by cost, which uses sort.Sort and is therefore
+// guaranteed n log n.
+const MaxRgn = 6000
+
+var (
+	region  []Rgn
+	nregion int
+)
+
+type rcmp []Rgn
+
+func (x rcmp) Len() int {
+	return len(x)
+}
+
+func (x rcmp) Swap(i, j int) {
+	x[i], x[j] = x[j], x[i]
+}
+
+func (x rcmp) Less(i, j int) bool {
+	p1 := &x[i]
+	p2 := &x[j]
+	if p1.cost != p2.cost {
+		return int(p2.cost)-int(p1.cost) < 0
+	}
+	if p1.varno != p2.varno {
+		return int(p2.varno)-int(p1.varno) < 0
+	}
+	if p1.enter != p2.enter {
+		return int(p2.enter.Id-p1.enter.Id) < 0
+	}
+	return false
+}
+
+func setaddrs(bit Bits) {
+	var i int
+	var n int
+	var v *Var
+	var node *Node
+
+	for bany(&bit) {
+		// convert each bit to a variable
+		i = bnum(bit)
+
+		node = vars[i].node
+		n = int(vars[i].name)
+		biclr(&bit, uint(i))
+
+		// disable all pieces of that variable
+		for i = 0; i < nvar; i++ {
+			v = &vars[i]
+			if v.node == node && int(v.name) == n {
+				v.addr = 2
+			}
+		}
+	}
+}
+
+var regnodes [64]*Node
+
+func walkvardef(n *Node, f *Flow, active int) {
+	var f1 *Flow
+	var bn int
+	var v *Var
+
+	for f1 = f; f1 != nil; f1 = f1.S1 {
+		if f1.Active == int32(active) {
+			break
+		}
+		f1.Active = int32(active)
+		if f1.Prog.As == obj.AVARKILL && f1.Prog.To.Node == n {
+			break
+		}
+		for v, _ = n.Opt.(*Var); v != nil; v = v.nextinnode {
+			bn = v.id
+			biset(&(f1.Data.(*Reg)).act, uint(bn))
+		}
+
+		if f1.Prog.As == obj.ACALL {
+			break
+		}
+	}
+
+	for f2 := f; f2 != f1; f2 = f2.S1 {
+		if f2.S2 != nil {
+			walkvardef(n, f2.S2, active)
+		}
+	}
+}
+
+/*
+ * add mov b,rn
+ * just after r
+ */
+func addmove(r *Flow, bn int, rn int, f int) {
+	p1 := Ctxt.NewProg()
+	Clearp(p1)
+	p1.Pc = 9999
+
+	p := r.Prog
+	p1.Link = p.Link
+	p.Link = p1
+	p1.Lineno = p.Lineno
+
+	v := &vars[bn]
+
+	a := &p1.To
+	a.Offset = v.offset
+	a.Etype = uint8(v.etype)
+	a.Type = obj.TYPE_MEM
+	a.Name = v.name
+	a.Node = v.node
+	a.Sym = Linksym(v.node.Sym)
+
+	/* NOTE(rsc): 9g did
+	if(a->etype == TARRAY)
+		a->type = TYPE_ADDR;
+	else if(a->sym == nil)
+		a->type = TYPE_CONST;
+	*/
+	p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
+
+	// TODO(rsc): Remove special case here.
+	if (Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
+		p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
+	}
+	p1.From.Type = obj.TYPE_REG
+	p1.From.Reg = int16(rn)
+	p1.From.Name = obj.NAME_NONE
+	if f == 0 {
+		p1.From = *a
+		*a = obj.Addr{}
+		a.Type = obj.TYPE_REG
+		a.Reg = int16(rn)
+	}
+
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		fmt.Printf("%v ===add=== %v\n", p, p1)
+	}
+	Ostats.Nspill++
+}
+
+func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) bool {
+	t1 := o1 + int64(w1)
+	t2 := o2 + int64(w2)
+
+	if t1 <= o2 || t2 <= o1 {
+		return false
+	}
+
+	return true
+}
+
+func mkvar(f *Flow, a *obj.Addr) Bits {
+	/*
+	 * mark registers used
+	 */
+	if a.Type == obj.TYPE_NONE {
+		return zbits
+	}
+
+	r := f.Data.(*Reg)
+	r.use1.b[0] |= Thearch.Doregbits(int(a.Index)) // TODO: Use RtoB
+
+	var n int
+	switch a.Type {
+	default:
+		regu := Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
+		if regu == 0 {
+			return zbits
+		}
+		bit := zbits
+		bit.b[0] = regu
+		return bit
+
+		// TODO(rsc): Remove special case here.
+	case obj.TYPE_ADDR:
+		var bit Bits
+		if Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+			goto memcase
+		}
+		a.Type = obj.TYPE_MEM
+		bit = mkvar(f, a)
+		setaddrs(bit)
+		a.Type = obj.TYPE_ADDR
+		Ostats.Naddr++
+		return zbits
+
+	memcase:
+		fallthrough
+
+	case obj.TYPE_MEM:
+		if r != nil {
+			r.use1.b[0] |= Thearch.RtoB(int(a.Reg))
+		}
+
+		/* NOTE: 5g did
+		if(r->f.prog->scond & (C_PBIT|C_WBIT))
+			r->set.b[0] |= RtoB(a->reg);
+		*/
+		switch a.Name {
+		default:
+			// Note: This case handles NAME_EXTERN and NAME_STATIC.
+			// We treat these as requiring eager writes to memory, due to
+			// the possibility of a fault handler looking at them, so there is
+			// not much point in registerizing the loads.
+			// If we later choose the set of candidate variables from a
+			// larger list, these cases could be deprioritized instead of
+			// removed entirely.
+			return zbits
+
+		case obj.NAME_PARAM,
+			obj.NAME_AUTO:
+			n = int(a.Name)
+		}
+	}
+
+	node, _ := a.Node.(*Node)
+	if node == nil || node.Op != ONAME || node.Orig == nil {
+		return zbits
+	}
+	node = node.Orig
+	if node.Orig != node {
+		Fatal("%v: bad node", Ctxt.Dconv(a))
+	}
+	if node.Sym == nil || node.Sym.Name[0] == '.' {
+		return zbits
+	}
+	et := int(a.Etype)
+	o := a.Offset
+	w := a.Width
+	if w < 0 {
+		Fatal("bad width %d for %v", w, Ctxt.Dconv(a))
+	}
+
+	flag := 0
+	var v *Var
+	for i := 0; i < nvar; i++ {
+		v = &vars[i]
+		if v.node == node && int(v.name) == n {
+			if v.offset == o {
+				if int(v.etype) == et {
+					if int64(v.width) == w {
+						// TODO(rsc): Remove special case for arm here.
+						if flag == 0 || Thearch.Thechar != '5' {
+							return blsh(uint(i))
+						}
+					}
+				}
+			}
+
+			// if they overlap, disable both
+			if overlap_reg(v.offset, v.width, o, int(w)) {
+				//				print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et);
+				v.addr = 1
+
+				flag = 1
+			}
+		}
+	}
+
+	switch et {
+	case 0, TFUNC:
+		return zbits
+	}
+
+	if nvar >= NVAR {
+		if Debug['w'] > 1 && node != nil {
+			Fatal("variable not optimized: %v", Nconv(node, obj.FmtSharp))
+		}
+		if Debug['v'] > 0 {
+			Warn("variable not optimized: %v", Nconv(node, obj.FmtSharp))
+		}
+
+		// If we're not tracking a word in a variable, mark the rest as
+		// having its address taken, so that we keep the whole thing
+		// live at all calls. otherwise we might optimize away part of
+		// a variable but not all of it.
+		var v *Var
+		for i := 0; i < nvar; i++ {
+			v = &vars[i]
+			if v.node == node {
+				v.addr = 1
+			}
+		}
+
+		return zbits
+	}
+
+	i := nvar
+	nvar++
+	v = &vars[i]
+	v.id = i
+	v.offset = o
+	v.name = int8(n)
+	v.etype = int8(et)
+	v.width = int(w)
+	v.addr = int8(flag) // funny punning
+	v.node = node
+
+	// node->opt is the head of a linked list
+	// of Vars within the given Node, so that
+	// we can start at a Var and find all the other
+	// Vars in the same Go variable.
+	v.nextinnode, _ = node.Opt.(*Var)
+
+	node.Opt = v
+
+	bit := blsh(uint(i))
+	if n == obj.NAME_EXTERN || n == obj.NAME_STATIC {
+		for z := 0; z < BITS; z++ {
+			externs.b[z] |= bit.b[z]
+		}
+	}
+	if n == obj.NAME_PARAM {
+		for z := 0; z < BITS; z++ {
+			params.b[z] |= bit.b[z]
+		}
+	}
+
+	if node.Class == PPARAM {
+		for z := 0; z < BITS; z++ {
+			ivar.b[z] |= bit.b[z]
+		}
+	}
+	if node.Class == PPARAMOUT {
+		for z := 0; z < BITS; z++ {
+			ovar.b[z] |= bit.b[z]
+		}
+	}
+
+	// Treat values with their address taken as live at calls,
+	// because the garbage collector's liveness analysis in ../gc/plive.c does.
+	// These must be consistent or else we will elide stores and the garbage
+	// collector will see uninitialized data.
+	// The typical case where our own analysis is out of sync is when the
+	// node appears to have its address taken but that code doesn't actually
+	// get generated and therefore doesn't show up as an address being
+	// taken when we analyze the instruction stream.
+	// One instance of this case is when a closure uses the same name as
+	// an outer variable for one of its own variables declared with :=.
+	// The parser flags the outer variable as possibly shared, and therefore
+	// sets addrtaken, even though it ends up not being actually shared.
+	// If we were better about _ elision, _ = &x would suffice too.
+	// The broader := in a closure problem is mentioned in a comment in
+	// closure.c:/^typecheckclosure and dcl.c:/^oldname.
+	if node.Addrtaken {
+		v.addr = 1
+	}
+
+	// Disable registerization for globals, because:
+	// (1) we might panic at any time and we want the recovery code
+	// to see the latest values (issue 1304).
+	// (2) we don't know what pointers might point at them and we want
+	// loads via those pointers to see updated values and vice versa (issue 7995).
+	//
+	// Disable registerization for results if using defer, because the deferred func
+	// might recover and return, causing the current values to be used.
+	if node.Class == PEXTERN || (Hasdefer != 0 && node.Class == PPARAMOUT) {
+		v.addr = 1
+	}
+
+	if Debug['R'] != 0 {
+		fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(int(et), 0), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr)
+	}
+	Ostats.Nvar++
+
+	return bit
+}
+
+var change int
+
+func prop(f *Flow, ref Bits, cal Bits) {
+	var f1 *Flow
+	var r1 *Reg
+	var z int
+	var i int
+	var v *Var
+	var v1 *Var
+
+	for f1 = f; f1 != nil; f1 = f1.P1 {
+		r1 = f1.Data.(*Reg)
+		for z = 0; z < BITS; z++ {
+			ref.b[z] |= r1.refahead.b[z]
+			if ref.b[z] != r1.refahead.b[z] {
+				r1.refahead.b[z] = ref.b[z]
+				change = 1
+			}
+
+			cal.b[z] |= r1.calahead.b[z]
+			if cal.b[z] != r1.calahead.b[z] {
+				r1.calahead.b[z] = cal.b[z]
+				change = 1
+			}
+		}
+
+		switch f1.Prog.As {
+		case obj.ACALL:
+			if Noreturn(f1.Prog) {
+				break
+			}
+
+			// Mark all input variables (ivar) as used, because that's what the
+			// liveness bitmaps say. The liveness bitmaps say that so that a
+			// panic will not show stale values in the parameter dump.
+			// Mark variables with a recent VARDEF (r1->act) as used,
+			// so that the optimizer flushes initializations to memory,
+			// so that if a garbage collection happens during this CALL,
+			// the collector will see initialized memory. Again this is to
+			// match what the liveness bitmaps say.
+			for z = 0; z < BITS; z++ {
+				cal.b[z] |= ref.b[z] | externs.b[z] | ivar.b[z] | r1.act.b[z]
+				ref.b[z] = 0
+			}
+
+			// cal.b is the current approximation of what's live across the call.
+			// Every bit in cal.b is a single stack word. For each such word,
+			// find all the other tracked stack words in the same Go variable
+			// (struct/slice/string/interface) and mark them live too.
+			// This is necessary because the liveness analysis for the garbage
+			// collector works at variable granularity, not at word granularity.
+			// It is fundamental for slice/string/interface: the garbage collector
+			// needs the whole value, not just some of the words, in order to
+			// interpret the other bits correctly. Specifically, slice needs a consistent
+			// ptr and cap, string needs a consistent ptr and len, and interface
+			// needs a consistent type word and data word.
+			for z = 0; z < BITS; z++ {
+				if cal.b[z] == 0 {
+					continue
+				}
+				for i = 0; i < 64; i++ {
+					if z*64+i >= nvar || (cal.b[z]>>uint(i))&1 == 0 {
+						continue
+					}
+					v = &vars[z*64+i]
+					if v.node.Opt == nil { // v represents fixed register, not Go variable
+						continue
+					}
+
+					// v->node->opt is the head of a linked list of Vars
+					// corresponding to tracked words from the Go variable v->node.
+					// Walk the list and set all the bits.
+					// For a large struct this could end up being quadratic:
+					// after the first setting, the outer loop (for z, i) would see a 1 bit
+					// for all of the remaining words in the struct, and for each such
+					// word would go through and turn on all the bits again.
+					// To avoid the quadratic behavior, we only turn on the bits if
+					// v is the head of the list or if the head's bit is not yet turned on.
+					// This will set the bits at most twice, keeping the overall loop linear.
+					v1, _ = v.node.Opt.(*Var)
+
+					if v == v1 || !btest(&cal, uint(v1.id)) {
+						for ; v1 != nil; v1 = v1.nextinnode {
+							biset(&cal, uint(v1.id))
+						}
+					}
+				}
+			}
+
+		case obj.ATEXT:
+			for z = 0; z < BITS; z++ {
+				cal.b[z] = 0
+				ref.b[z] = 0
+			}
+
+		case obj.ARET:
+			for z = 0; z < BITS; z++ {
+				cal.b[z] = externs.b[z] | ovar.b[z]
+				ref.b[z] = 0
+			}
+		}
+
+		for z = 0; z < BITS; z++ {
+			ref.b[z] = ref.b[z]&^r1.set.b[z] | r1.use1.b[z] | r1.use2.b[z]
+			cal.b[z] &^= (r1.set.b[z] | r1.use1.b[z] | r1.use2.b[z])
+			r1.refbehind.b[z] = ref.b[z]
+			r1.calbehind.b[z] = cal.b[z]
+		}
+
+		if f1.Active != 0 {
+			break
+		}
+		f1.Active = 1
+	}
+
+	var r *Reg
+	var f2 *Flow
+	for ; f != f1; f = f.P1 {
+		r = f.Data.(*Reg)
+		for f2 = f.P2; f2 != nil; f2 = f2.P2link {
+			prop(f2, r.refbehind, r.calbehind)
+		}
+	}
+}
+
+func synch(f *Flow, dif Bits) {
+	var r1 *Reg
+	var z int
+
+	for f1 := f; f1 != nil; f1 = f1.S1 {
+		r1 = f1.Data.(*Reg)
+		for z = 0; z < BITS; z++ {
+			dif.b[z] = dif.b[z]&^(^r1.refbehind.b[z]&r1.refahead.b[z]) | r1.set.b[z] | r1.regdiff.b[z]
+			if dif.b[z] != r1.regdiff.b[z] {
+				r1.regdiff.b[z] = dif.b[z]
+				change = 1
+			}
+		}
+
+		if f1.Active != 0 {
+			break
+		}
+		f1.Active = 1
+		for z = 0; z < BITS; z++ {
+			dif.b[z] &^= (^r1.calbehind.b[z] & r1.calahead.b[z])
+		}
+		if f1.S2 != nil {
+			synch(f1.S2, dif)
+		}
+	}
+}
+
+func allreg(b uint64, r *Rgn) uint64 {
+	v := &vars[r.varno]
+	r.regno = 0
+	switch v.etype {
+	default:
+		Fatal("unknown etype %d/%v", Bitno(b), Econv(int(v.etype), 0))
+
+	case TINT8,
+		TUINT8,
+		TINT16,
+		TUINT16,
+		TINT32,
+		TUINT32,
+		TINT64,
+		TUINT64,
+		TINT,
+		TUINT,
+		TUINTPTR,
+		TBOOL,
+		TPTR32,
+		TPTR64:
+		i := Thearch.BtoR(^b)
+		if i != 0 && r.cost > 0 {
+			r.regno = int16(i)
+			return Thearch.RtoB(i)
+		}
+
+	case TFLOAT32, TFLOAT64:
+		i := Thearch.BtoF(^b)
+		if i != 0 && r.cost > 0 {
+			r.regno = int16(i)
+			return Thearch.FtoB(i)
+		}
+	}
+
+	return 0
+}
+
+func LOAD(r *Reg, z int) uint64 {
+	return ^r.refbehind.b[z] & r.refahead.b[z]
+}
+
+func STORE(r *Reg, z int) uint64 {
+	return ^r.calbehind.b[z] & r.calahead.b[z]
+}
+
+// Cost parameters
+const (
+	CLOAD = 5 // cost of load
+	CREF  = 5 // cost of reference if not registerized
+	LOOP  = 3 // loop execution count (applied in popt.go)
+)
+
+func paint1(f *Flow, bn int) {
+	z := bn / 64
+	bb := uint64(1 << uint(bn%64))
+	r := f.Data.(*Reg)
+	if r.act.b[z]&bb != 0 {
+		return
+	}
+	var f1 *Flow
+	var r1 *Reg
+	for {
+		if r.refbehind.b[z]&bb == 0 {
+			break
+		}
+		f1 = f.P1
+		if f1 == nil {
+			break
+		}
+		r1 = f1.Data.(*Reg)
+		if r1.refahead.b[z]&bb == 0 {
+			break
+		}
+		if r1.act.b[z]&bb != 0 {
+			break
+		}
+		f = f1
+		r = r1
+	}
+
+	if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
+		change -= CLOAD * int(f.Loop)
+	}
+
+	for {
+		r.act.b[z] |= bb
+
+		if f.Prog.As != obj.ANOP { // don't give credit for NOPs
+			if r.use1.b[z]&bb != 0 {
+				change += CREF * int(f.Loop)
+			}
+			if (r.use2.b[z]|r.set.b[z])&bb != 0 {
+				change += CREF * int(f.Loop)
+			}
+		}
+
+		if STORE(r, z)&r.regdiff.b[z]&bb != 0 {
+			change -= CLOAD * int(f.Loop)
+		}
+
+		if r.refbehind.b[z]&bb != 0 {
+			for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+				if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+					paint1(f1, bn)
+				}
+			}
+		}
+
+		if r.refahead.b[z]&bb == 0 {
+			break
+		}
+		f1 = f.S2
+		if f1 != nil {
+			if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+				paint1(f1, bn)
+			}
+		}
+		f = f.S1
+		if f == nil {
+			break
+		}
+		r = f.Data.(*Reg)
+		if r.act.b[z]&bb != 0 {
+			break
+		}
+		if r.refbehind.b[z]&bb == 0 {
+			break
+		}
+	}
+}
+
+func paint2(f *Flow, bn int, depth int) uint64 {
+	z := bn / 64
+	bb := uint64(1 << uint(bn%64))
+	vreg := regbits
+	r := f.Data.(*Reg)
+	if r.act.b[z]&bb == 0 {
+		return vreg
+	}
+	var r1 *Reg
+	var f1 *Flow
+	for {
+		if r.refbehind.b[z]&bb == 0 {
+			break
+		}
+		f1 = f.P1
+		if f1 == nil {
+			break
+		}
+		r1 = f1.Data.(*Reg)
+		if r1.refahead.b[z]&bb == 0 {
+			break
+		}
+		if r1.act.b[z]&bb == 0 {
+			break
+		}
+		f = f1
+		r = r1
+	}
+
+	for {
+		if Debug['R'] != 0 && Debug['v'] != 0 {
+			fmt.Printf("  paint2 %d %v\n", depth, f.Prog)
+		}
+
+		r.act.b[z] &^= bb
+
+		vreg |= r.regu
+
+		if r.refbehind.b[z]&bb != 0 {
+			for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+				if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+					vreg |= paint2(f1, bn, depth+1)
+				}
+			}
+		}
+
+		if r.refahead.b[z]&bb == 0 {
+			break
+		}
+		f1 = f.S2
+		if f1 != nil {
+			if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+				vreg |= paint2(f1, bn, depth+1)
+			}
+		}
+		f = f.S1
+		if f == nil {
+			break
+		}
+		r = f.Data.(*Reg)
+		if r.act.b[z]&bb == 0 {
+			break
+		}
+		if r.refbehind.b[z]&bb == 0 {
+			break
+		}
+	}
+
+	return vreg
+}
+
+func paint3(f *Flow, bn int, rb uint64, rn int) {
+	z := bn / 64
+	bb := uint64(1 << uint(bn%64))
+	r := f.Data.(*Reg)
+	if r.act.b[z]&bb != 0 {
+		return
+	}
+	var r1 *Reg
+	var f1 *Flow
+	for {
+		if r.refbehind.b[z]&bb == 0 {
+			break
+		}
+		f1 = f.P1
+		if f1 == nil {
+			break
+		}
+		r1 = f1.Data.(*Reg)
+		if r1.refahead.b[z]&bb == 0 {
+			break
+		}
+		if r1.act.b[z]&bb != 0 {
+			break
+		}
+		f = f1
+		r = r1
+	}
+
+	if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
+		addmove(f, bn, rn, 0)
+	}
+	var p *obj.Prog
+	for {
+		r.act.b[z] |= bb
+		p = f.Prog
+
+		if r.use1.b[z]&bb != 0 {
+			if Debug['R'] != 0 && Debug['v'] != 0 {
+				fmt.Printf("%v", p)
+			}
+			addreg(&p.From, rn)
+			if Debug['R'] != 0 && Debug['v'] != 0 {
+				fmt.Printf(" ===change== %v\n", p)
+			}
+		}
+
+		if (r.use2.b[z]|r.set.b[z])&bb != 0 {
+			if Debug['R'] != 0 && Debug['v'] != 0 {
+				fmt.Printf("%v", p)
+			}
+			addreg(&p.To, rn)
+			if Debug['R'] != 0 && Debug['v'] != 0 {
+				fmt.Printf(" ===change== %v\n", p)
+			}
+		}
+
+		if STORE(r, z)&r.regdiff.b[z]&bb != 0 {
+			addmove(f, bn, rn, 1)
+		}
+		r.regu |= rb
+
+		if r.refbehind.b[z]&bb != 0 {
+			for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+				if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+					paint3(f1, bn, rb, rn)
+				}
+			}
+		}
+
+		if r.refahead.b[z]&bb == 0 {
+			break
+		}
+		f1 = f.S2
+		if f1 != nil {
+			if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+				paint3(f1, bn, rb, rn)
+			}
+		}
+		f = f.S1
+		if f == nil {
+			break
+		}
+		r = f.Data.(*Reg)
+		if r.act.b[z]&bb != 0 {
+			break
+		}
+		if r.refbehind.b[z]&bb == 0 {
+			break
+		}
+	}
+}
+
+func addreg(a *obj.Addr, rn int) {
+	a.Sym = nil
+	a.Node = nil
+	a.Offset = 0
+	a.Type = obj.TYPE_REG
+	a.Reg = int16(rn)
+	a.Name = 0
+
+	Ostats.Ncvtreg++
+}
+
+func dumpone(f *Flow, isreg int) {
+	fmt.Printf("%d:%v", f.Loop, f.Prog)
+	if isreg != 0 {
+		r := f.Data.(*Reg)
+		var bit Bits
+		for z := 0; z < BITS; z++ {
+			bit.b[z] = r.set.b[z] | r.use1.b[z] | r.use2.b[z] | r.refbehind.b[z] | r.refahead.b[z] | r.calbehind.b[z] | r.calahead.b[z] | r.regdiff.b[z] | r.act.b[z] | 0
+		}
+		if bany(&bit) {
+			fmt.Printf("\t")
+			if bany(&r.set) {
+				fmt.Printf(" s:%v", &r.set)
+			}
+			if bany(&r.use1) {
+				fmt.Printf(" u1:%v", &r.use1)
+			}
+			if bany(&r.use2) {
+				fmt.Printf(" u2:%v", &r.use2)
+			}
+			if bany(&r.refbehind) {
+				fmt.Printf(" rb:%v ", &r.refbehind)
+			}
+			if bany(&r.refahead) {
+				fmt.Printf(" ra:%v ", &r.refahead)
+			}
+			if bany(&r.calbehind) {
+				fmt.Printf(" cb:%v ", &r.calbehind)
+			}
+			if bany(&r.calahead) {
+				fmt.Printf(" ca:%v ", &r.calahead)
+			}
+			if bany(&r.regdiff) {
+				fmt.Printf(" d:%v ", &r.regdiff)
+			}
+			if bany(&r.act) {
+				fmt.Printf(" a:%v ", &r.act)
+			}
+		}
+	}
+
+	fmt.Printf("\n")
+}
+
+func Dumpit(str string, r0 *Flow, isreg int) {
+	var r1 *Flow
+
+	fmt.Printf("\n%s\n", str)
+	for r := r0; r != nil; r = r.Link {
+		dumpone(r, isreg)
+		r1 = r.P2
+		if r1 != nil {
+			fmt.Printf("\tpred:")
+			for ; r1 != nil; r1 = r1.P2link {
+				fmt.Printf(" %.4d", uint(int(r1.Prog.Pc)))
+			}
+			if r.P1 != nil {
+				fmt.Printf(" (and %.4d)", uint(int(r.P1.Prog.Pc)))
+			} else {
+				fmt.Printf(" (only)")
+			}
+			fmt.Printf("\n")
+		}
+
+		// Print successors if it's not just the next one
+		if r.S1 != r.Link || r.S2 != nil {
+			fmt.Printf("\tsucc:")
+			if r.S1 != nil {
+				fmt.Printf(" %.4d", uint(int(r.S1.Prog.Pc)))
+			}
+			if r.S2 != nil {
+				fmt.Printf(" %.4d", uint(int(r.S2.Prog.Pc)))
+			}
+			fmt.Printf("\n")
+		}
+	}
+}
+
+func regopt(firstp *obj.Prog) {
+	mergetemp(firstp)
+
+	/*
+	 * control flow is more complicated in generated go code
+	 * than in generated c code.  define pseudo-variables for
+	 * registers, so we have complete register usage information.
+	 */
+	var nreg int
+	regnames := Thearch.Regnames(&nreg)
+
+	nvar = nreg
+	for i := 0; i < nreg; i++ {
+		vars[i] = Var{}
+	}
+	for i := 0; i < nreg; i++ {
+		if regnodes[i] == nil {
+			regnodes[i] = newname(Lookup(regnames[i]))
+		}
+		vars[i].node = regnodes[i]
+	}
+
+	regbits = Thearch.Excludedregs()
+	externs = zbits
+	params = zbits
+	consts = zbits
+	addrs = zbits
+	ivar = zbits
+	ovar = zbits
+
+	/*
+	 * pass 1
+	 * build aux data structure
+	 * allocate pcs
+	 * find use and set of variables
+	 */
+	g := Flowstart(firstp, func() interface{} { return new(Reg) })
+	if g == nil {
+		for i := 0; i < nvar; i++ {
+			vars[i].node.Opt = nil
+		}
+		return
+	}
+
+	firstf := g.Start
+
+	for f := firstf; f != nil; f = f.Link {
+		p := f.Prog
+		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+			continue
+		}
+
+		// Avoid making variables for direct-called functions.
+		if p.As == obj.ACALL && p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_EXTERN {
+			continue
+		}
+
+		// from vs to doesn't matter for registers.
+		r := f.Data.(*Reg)
+		r.use1.b[0] |= p.Info.Reguse | p.Info.Regindex
+		r.set.b[0] |= p.Info.Regset
+
+		bit := mkvar(f, &p.From)
+		if bany(&bit) {
+			if p.Info.Flags&LeftAddr != 0 {
+				setaddrs(bit)
+			}
+			if p.Info.Flags&LeftRead != 0 {
+				for z := 0; z < BITS; z++ {
+					r.use1.b[z] |= bit.b[z]
+				}
+			}
+			if p.Info.Flags&LeftWrite != 0 {
+				for z := 0; z < BITS; z++ {
+					r.set.b[z] |= bit.b[z]
+				}
+			}
+		}
+
+		// Compute used register for reg
+		if p.Info.Flags&RegRead != 0 {
+			r.use1.b[0] |= Thearch.RtoB(int(p.Reg))
+		}
+
+		// Currently we never generate three register forms.
+		// If we do, this will need to change.
+		if p.From3.Type != obj.TYPE_NONE {
+			Fatal("regopt not implemented for from3")
+		}
+
+		bit = mkvar(f, &p.To)
+		if bany(&bit) {
+			if p.Info.Flags&RightAddr != 0 {
+				setaddrs(bit)
+			}
+			if p.Info.Flags&RightRead != 0 {
+				for z := 0; z < BITS; z++ {
+					r.use2.b[z] |= bit.b[z]
+				}
+			}
+			if p.Info.Flags&RightWrite != 0 {
+				for z := 0; z < BITS; z++ {
+					r.set.b[z] |= bit.b[z]
+				}
+			}
+		}
+	}
+
+	for i := 0; i < nvar; i++ {
+		v := &vars[i]
+		if v.addr != 0 {
+			bit := blsh(uint(i))
+			for z := 0; z < BITS; z++ {
+				addrs.b[z] |= bit.b[z]
+			}
+		}
+
+		if Debug['R'] != 0 && Debug['v'] != 0 {
+			fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(int(v.etype), 0), v.width, v.node, v.offset)
+		}
+	}
+
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		Dumpit("pass1", firstf, 1)
+	}
+
+	/*
+	 * pass 2
+	 * find looping structure
+	 */
+	flowrpo(g)
+
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		Dumpit("pass2", firstf, 1)
+	}
+
+	/*
+	 * pass 2.5
+	 * iterate propagating fat vardef covering forward
+	 * r->act records vars with a VARDEF since the last CALL.
+	 * (r->act will be reused in pass 5 for something else,
+	 * but we'll be done with it by then.)
+	 */
+	active := 0
+
+	for f := firstf; f != nil; f = f.Link {
+		f.Active = 0
+		r := f.Data.(*Reg)
+		r.act = zbits
+	}
+
+	for f := firstf; f != nil; f = f.Link {
+		p := f.Prog
+		if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt != nil {
+			active++
+			walkvardef(p.To.Node.(*Node), f, active)
+		}
+	}
+
+	/*
+	 * pass 3
+	 * iterate propagating usage
+	 * 	back until flow graph is complete
+	 */
+	var f1 *Flow
+	var i int
+	var f *Flow
+loop1:
+	change = 0
+
+	for f = firstf; f != nil; f = f.Link {
+		f.Active = 0
+	}
+	for f = firstf; f != nil; f = f.Link {
+		if f.Prog.As == obj.ARET {
+			prop(f, zbits, zbits)
+		}
+	}
+
+	/* pick up unreachable code */
+loop11:
+	i = 0
+
+	for f = firstf; f != nil; f = f1 {
+		f1 = f.Link
+		if f1 != nil && f1.Active != 0 && f.Active == 0 {
+			prop(f, zbits, zbits)
+			i = 1
+		}
+	}
+
+	if i != 0 {
+		goto loop11
+	}
+	if change != 0 {
+		goto loop1
+	}
+
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		Dumpit("pass3", firstf, 1)
+	}
+
+	/*
+	 * pass 4
+	 * iterate propagating register/variable synchrony
+	 * 	forward until graph is complete
+	 */
+loop2:
+	change = 0
+
+	for f = firstf; f != nil; f = f.Link {
+		f.Active = 0
+	}
+	synch(firstf, zbits)
+	if change != 0 {
+		goto loop2
+	}
+
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		Dumpit("pass4", firstf, 1)
+	}
+
+	/*
+	 * pass 4.5
+	 * move register pseudo-variables into regu.
+	 */
+	mask := uint64((1 << uint(nreg)) - 1)
+	for f := firstf; f != nil; f = f.Link {
+		r := f.Data.(*Reg)
+		r.regu = (r.refbehind.b[0] | r.set.b[0]) & mask
+		r.set.b[0] &^= mask
+		r.use1.b[0] &^= mask
+		r.use2.b[0] &^= mask
+		r.refbehind.b[0] &^= mask
+		r.refahead.b[0] &^= mask
+		r.calbehind.b[0] &^= mask
+		r.calahead.b[0] &^= mask
+		r.regdiff.b[0] &^= mask
+		r.act.b[0] &^= mask
+	}
+
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		Dumpit("pass4.5", firstf, 1)
+	}
+
+	/*
+	 * pass 5
+	 * isolate regions
+	 * calculate costs (paint1)
+	 */
+	var bit Bits
+	if f := firstf; f != nil {
+		r := f.Data.(*Reg)
+		for z := 0; z < BITS; z++ {
+			bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
+		}
+		if bany(&bit) && f.Refset == 0 {
+			// should never happen - all variables are preset
+			if Debug['w'] != 0 {
+				fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), &bit)
+			}
+			f.Refset = 1
+		}
+	}
+
+	for f := firstf; f != nil; f = f.Link {
+		(f.Data.(*Reg)).act = zbits
+	}
+	nregion = 0
+	region = region[:0]
+	var rgp *Rgn
+	for f := firstf; f != nil; f = f.Link {
+		r := f.Data.(*Reg)
+		for z := 0; z < BITS; z++ {
+			bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
+		}
+		if bany(&bit) && f.Refset == 0 {
+			if Debug['w'] != 0 {
+				fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), &bit)
+			}
+			f.Refset = 1
+			Thearch.Excise(f)
+		}
+
+		for z := 0; z < BITS; z++ {
+			bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
+		}
+		for bany(&bit) {
+			i = bnum(bit)
+			change = 0
+			paint1(f, i)
+			biclr(&bit, uint(i))
+			if change <= 0 {
+				continue
+			}
+			if nregion >= MaxRgn {
+				nregion++
+				continue
+			}
+
+			region = append(region, Rgn{
+				enter: f,
+				cost:  int16(change),
+				varno: int16(i),
+			})
+			nregion++
+		}
+	}
+
+	if false && Debug['v'] != 0 && strings.Contains(Curfn.Nname.Sym.Name, "Parse") {
+		Warn("regions: %d\n", nregion)
+	}
+	if nregion >= MaxRgn {
+		if Debug['v'] != 0 {
+			Warn("too many regions: %d\n", nregion)
+		}
+		nregion = MaxRgn
+	}
+
+	sort.Sort(rcmp(region[:nregion]))
+
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		Dumpit("pass5", firstf, 1)
+	}
+
+	/*
+	 * pass 6
+	 * determine used registers (paint2)
+	 * replace code (paint3)
+	 */
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		fmt.Printf("\nregisterizing\n")
+	}
+	var usedreg uint64
+	var vreg uint64
+	for i := 0; i < nregion; i++ {
+		rgp = &region[i]
+		if Debug['R'] != 0 && Debug['v'] != 0 {
+			fmt.Printf("region %d: cost %d varno %d enter %d\n", i, rgp.cost, rgp.varno, rgp.enter.Prog.Pc)
+		}
+		bit = blsh(uint(rgp.varno))
+		usedreg = paint2(rgp.enter, int(rgp.varno), 0)
+		vreg = allreg(usedreg, rgp)
+		if rgp.regno != 0 {
+			if Debug['R'] != 0 && Debug['v'] != 0 {
+				v := &vars[rgp.varno]
+				fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(int(v.etype), 0), obj.Rconv(int(rgp.regno)), usedreg, vreg)
+			}
+
+			paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno))
+		}
+	}
+
+	/*
+	 * free aux structures. peep allocates new ones.
+	 */
+	for i := 0; i < nvar; i++ {
+		vars[i].node.Opt = nil
+	}
+	Flowend(g)
+	firstf = nil
+
+	if Debug['R'] != 0 && Debug['v'] != 0 {
+		// Rebuild flow graph, since we inserted instructions
+		g := Flowstart(firstp, nil)
+		firstf = g.Start
+		Dumpit("pass6", firstf, 0)
+		Flowend(g)
+		firstf = nil
+	}
+
+	/*
+	 * pass 7
+	 * peep-hole on basic block
+	 */
+	if Debug['R'] == 0 || Debug['P'] != 0 {
+		Thearch.Peep(firstp)
+	}
+
+	/*
+	 * eliminate nops
+	 */
+	for p := firstp; p != nil; p = p.Link {
+		for p.Link != nil && p.Link.As == obj.ANOP {
+			p.Link = p.Link.Link
+		}
+		if p.To.Type == obj.TYPE_BRANCH {
+			for p.To.Val.(*obj.Prog) != nil && p.To.Val.(*obj.Prog).As == obj.ANOP {
+				p.To.Val = p.To.Val.(*obj.Prog).Link
+			}
+		}
+	}
+
+	if Debug['R'] != 0 {
+		if Ostats.Ncvtreg != 0 || Ostats.Nspill != 0 || Ostats.Nreload != 0 || Ostats.Ndelmov != 0 || Ostats.Nvar != 0 || Ostats.Naddr != 0 || false {
+			fmt.Printf("\nstats\n")
+		}
+
+		if Ostats.Ncvtreg != 0 {
+			fmt.Printf("\t%4d cvtreg\n", Ostats.Ncvtreg)
+		}
+		if Ostats.Nspill != 0 {
+			fmt.Printf("\t%4d spill\n", Ostats.Nspill)
+		}
+		if Ostats.Nreload != 0 {
+			fmt.Printf("\t%4d reload\n", Ostats.Nreload)
+		}
+		if Ostats.Ndelmov != 0 {
+			fmt.Printf("\t%4d delmov\n", Ostats.Ndelmov)
+		}
+		if Ostats.Nvar != 0 {
+			fmt.Printf("\t%4d var\n", Ostats.Nvar)
+		}
+		if Ostats.Naddr != 0 {
+			fmt.Printf("\t%4d addr\n", Ostats.Naddr)
+		}
+
+		Ostats = OptStats{}
+	}
+}
+
+// bany reports whether any bits in a are set.
+func bany(a *Bits) bool {
+	for _, x := range &a.b { // & to avoid making a copy of a.b
+		if x != 0 {
+			return true
+		}
+	}
+	return false
+}
+
+// bnum reports the lowest index of a 1 bit in a.
+func bnum(a Bits) int {
+	for i, x := range &a.b { // & to avoid making a copy of a.b
+		if x != 0 {
+			return 64*i + Bitno(x)
+		}
+	}
+
+	Fatal("bad in bnum")
+	return 0
+}
+
+// blsh returns a Bits with 1 at index n, 0 elsewhere (1<<n).
+func blsh(n uint) Bits {
+	c := zbits
+	c.b[n/64] = 1 << (n % 64)
+	return c
+}
+
+// btest reports whether bit n is 1.
+func btest(a *Bits, n uint) bool {
+	return a.b[n/64]&(1<<(n%64)) != 0
+}
+
+// biset sets bit n to 1.
+func biset(a *Bits, n uint) {
+	a.b[n/64] |= 1 << (n % 64)
+}
+
+// biclr sets bit n to 0.
+func biclr(a *Bits, n uint) {
+	a.b[n/64] &^= (1 << (n % 64))
+}
+
+// Bitno reports the lowest index of a 1 bit in b.
+// It calls Fatal if there is no 1 bit.
+func Bitno(b uint64) int {
+	if b == 0 {
+		Fatal("bad in bitno")
+	}
+	n := 0
+	if b&(1<<32-1) == 0 {
+		n += 32
+		b >>= 32
+	}
+	if b&(1<<16-1) == 0 {
+		n += 16
+		b >>= 16
+	}
+	if b&(1<<8-1) == 0 {
+		n += 8
+		b >>= 8
+	}
+	if b&(1<<4-1) == 0 {
+		n += 4
+		b >>= 4
+	}
+	if b&(1<<2-1) == 0 {
+		n += 2
+		b >>= 2
+	}
+	if b&1 == 0 {
+		n++
+	}
+	return n
+}
+
+// String returns a space-separated list of the variables represented by bits.
+func (bits Bits) String() string {
+	// Note: This method takes a value receiver, both for convenience
+	// and to make it safe to modify the bits as we process them.
+	// Even so, most prints above use &bits, because then the value
+	// being stored in the interface{} is a pointer and does not require
+	// an allocation and copy to create the interface{}.
+	var buf bytes.Buffer
+	sep := ""
+	for bany(&bits) {
+		i := bnum(bits)
+		buf.WriteString(sep)
+		sep = " "
+		v := &vars[i]
+		if v.node == nil || v.node.Sym == nil {
+			fmt.Fprintf(&buf, "$%d", i)
+		} else {
+			fmt.Fprintf(&buf, "%s(%d)", v.node.Sym.Name, i)
+			if v.offset != 0 {
+				fmt.Fprintf(&buf, "%+d", int64(v.offset))
+			}
+		}
+		biclr(&bits, uint(i))
+	}
+	return buf.String()
+}
diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go
new file mode 100644
index 0000000..3a28ea3
--- /dev/null
+++ b/src/cmd/compile/internal/gc/select.go
@@ -0,0 +1,371 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+/*
+ * select
+ */
+func typecheckselect(sel *Node) {
+	var ncase *Node
+	var n *Node
+
+	var def *Node
+	lno := int(setlineno(sel))
+	count := 0
+	typechecklist(sel.Ninit, Etop)
+	for l := sel.List; l != nil; l = l.Next {
+		count++
+		ncase = l.N
+		setlineno(ncase)
+		if ncase.Op != OXCASE {
+			Fatal("typecheckselect %v", Oconv(int(ncase.Op), 0))
+		}
+
+		if ncase.List == nil {
+			// default
+			if def != nil {
+				Yyerror("multiple defaults in select (first at %v)", def.Line())
+			} else {
+				def = ncase
+			}
+		} else if ncase.List.Next != nil {
+			Yyerror("select cases cannot be lists")
+		} else {
+			n = typecheck(&ncase.List.N, Etop)
+			ncase.Left = n
+			ncase.List = nil
+			setlineno(n)
+			switch n.Op {
+			default:
+				Yyerror("select case must be receive, send or assign recv")
+
+				// convert x = <-c into OSELRECV(x, <-c).
+			// remove implicit conversions; the eventual assignment
+			// will reintroduce them.
+			case OAS:
+				if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit {
+					n.Right = n.Right.Left
+				}
+
+				if n.Right.Op != ORECV {
+					Yyerror("select assignment must have receive on right hand side")
+					break
+				}
+
+				n.Op = OSELRECV
+
+				// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
+			case OAS2RECV:
+				if n.Rlist.N.Op != ORECV {
+					Yyerror("select assignment must have receive on right hand side")
+					break
+				}
+
+				n.Op = OSELRECV2
+				n.Left = n.List.N
+				n.Ntest = n.List.Next.N
+				n.List = nil
+				n.Right = n.Rlist.N
+				n.Rlist = nil
+
+				// convert <-c into OSELRECV(N, <-c)
+			case ORECV:
+				n = Nod(OSELRECV, nil, n)
+
+				n.Typecheck = 1
+				ncase.Left = n
+
+			case OSEND:
+				break
+			}
+		}
+
+		typechecklist(ncase.Nbody, Etop)
+	}
+
+	sel.Xoffset = int64(count)
+	lineno = int32(lno)
+}
+
+func walkselect(sel *Node) {
+	if sel.List == nil && sel.Xoffset != 0 {
+		Fatal("double walkselect") // already rewrote
+	}
+
+	lno := int(setlineno(sel))
+	i := count(sel.List)
+
+	// optimization: zero-case select
+	var init *NodeList
+	var r *Node
+	var n *Node
+	var var_ *Node
+	var selv *Node
+	var cas *Node
+	if i == 0 {
+		sel.Nbody = list1(mkcall("block", nil, nil))
+		goto out
+	}
+
+	// optimization: one-case select: single op.
+	// TODO(rsc): Reenable optimization once order.c can handle it.
+	// golang.org/issue/7672.
+	if i == 1 {
+		cas := sel.List.N
+		setlineno(cas)
+		l := cas.Ninit
+		if cas.Left != nil { // not default:
+			n := cas.Left
+			l = concat(l, n.Ninit)
+			n.Ninit = nil
+			var ch *Node
+			switch n.Op {
+			default:
+				Fatal("select %v", Oconv(int(n.Op), 0))
+
+				// ok already
+			case OSEND:
+				ch = n.Left
+
+			case OSELRECV, OSELRECV2:
+				ch = n.Right.Left
+				if n.Op == OSELRECV || n.Ntest == nil {
+					if n.Left == nil {
+						n = n.Right
+					} else {
+						n.Op = OAS
+					}
+					break
+				}
+
+				if n.Left == nil {
+					typecheck(&nblank, Erv|Easgn)
+					n.Left = nblank
+				}
+
+				n.Op = OAS2
+				n.List = list(list1(n.Left), n.Ntest)
+				n.Rlist = list1(n.Right)
+				n.Right = nil
+				n.Left = nil
+				n.Ntest = nil
+				n.Typecheck = 0
+				typecheck(&n, Etop)
+			}
+
+			// if ch == nil { block() }; n;
+			a := Nod(OIF, nil, nil)
+
+			a.Ntest = Nod(OEQ, ch, nodnil())
+			a.Nbody = list1(mkcall("block", nil, &l))
+			typecheck(&a, Etop)
+			l = list(l, a)
+			l = list(l, n)
+		}
+
+		l = concat(l, cas.Nbody)
+		sel.Nbody = l
+		goto out
+	}
+
+	// convert case value arguments to addresses.
+	// this rewrite is used by both the general code and the next optimization.
+	for l := sel.List; l != nil; l = l.Next {
+		cas = l.N
+		setlineno(cas)
+		n = cas.Left
+		if n == nil {
+			continue
+		}
+		switch n.Op {
+		case OSEND:
+			n.Right = Nod(OADDR, n.Right, nil)
+			typecheck(&n.Right, Erv)
+
+		case OSELRECV, OSELRECV2:
+			if n.Op == OSELRECV2 && n.Ntest == nil {
+				n.Op = OSELRECV
+			}
+			if n.Op == OSELRECV2 {
+				n.Ntest = Nod(OADDR, n.Ntest, nil)
+				typecheck(&n.Ntest, Erv)
+			}
+
+			if n.Left == nil {
+				n.Left = nodnil()
+			} else {
+				n.Left = Nod(OADDR, n.Left, nil)
+				typecheck(&n.Left, Erv)
+			}
+		}
+	}
+
+	// optimization: two-case select but one is default: single non-blocking op.
+	if i == 2 && (sel.List.N.Left == nil || sel.List.Next.N.Left == nil) {
+		var cas *Node
+		var dflt *Node
+		if sel.List.N.Left == nil {
+			cas = sel.List.Next.N
+			dflt = sel.List.N
+		} else {
+			dflt = sel.List.Next.N
+			cas = sel.List.N
+		}
+
+		n := cas.Left
+		setlineno(n)
+		r := Nod(OIF, nil, nil)
+		r.Ninit = cas.Ninit
+		switch n.Op {
+		default:
+			Fatal("select %v", Oconv(int(n.Op), 0))
+
+			// if selectnbsend(c, v) { body } else { default body }
+		case OSEND:
+			ch := n.Left
+
+			r.Ntest = mkcall1(chanfn("selectnbsend", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), ch, n.Right)
+
+			// if c != nil && selectnbrecv(&v, c) { body } else { default body }
+		case OSELRECV:
+			r = Nod(OIF, nil, nil)
+
+			r.Ninit = cas.Ninit
+			ch := n.Right.Left
+			r.Ntest = mkcall1(chanfn("selectnbrecv", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, ch)
+
+			// if c != nil && selectnbrecv2(&v, c) { body } else { default body }
+		case OSELRECV2:
+			r = Nod(OIF, nil, nil)
+
+			r.Ninit = cas.Ninit
+			ch := n.Right.Left
+			r.Ntest = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, n.Ntest, ch)
+		}
+
+		typecheck(&r.Ntest, Erv)
+		r.Nbody = cas.Nbody
+		r.Nelse = concat(dflt.Ninit, dflt.Nbody)
+		sel.Nbody = list1(r)
+		goto out
+	}
+
+	init = sel.Ninit
+	sel.Ninit = nil
+
+	// generate sel-struct
+	setlineno(sel)
+
+	selv = temp(selecttype(int32(sel.Xoffset)))
+	r = Nod(OAS, selv, nil)
+	typecheck(&r, Etop)
+	init = list(init, r)
+	var_ = conv(conv(Nod(OADDR, selv, nil), Types[TUNSAFEPTR]), Ptrto(Types[TUINT8]))
+	r = mkcall("newselect", nil, nil, var_, Nodintconst(selv.Type.Width), Nodintconst(sel.Xoffset))
+	typecheck(&r, Etop)
+	init = list(init, r)
+
+	// register cases
+	for l := sel.List; l != nil; l = l.Next {
+		cas = l.N
+		setlineno(cas)
+		n = cas.Left
+		r = Nod(OIF, nil, nil)
+		r.Ninit = cas.Ninit
+		cas.Ninit = nil
+		if n != nil {
+			r.Ninit = concat(r.Ninit, n.Ninit)
+			n.Ninit = nil
+		}
+
+		if n == nil {
+			// selectdefault(sel *byte);
+			r.Ntest = mkcall("selectdefault", Types[TBOOL], &r.Ninit, var_)
+		} else {
+			switch n.Op {
+			default:
+				Fatal("select %v", Oconv(int(n.Op), 0))
+
+				// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
+			case OSEND:
+				r.Ntest = mkcall1(chanfn("selectsend", 2, n.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Left, n.Right)
+
+				// selectrecv(sel *byte, hchan *chan any, elem *any) (selected bool);
+			case OSELRECV:
+				r.Ntest = mkcall1(chanfn("selectrecv", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left)
+
+				// selectrecv2(sel *byte, hchan *chan any, elem *any, received *bool) (selected bool);
+			case OSELRECV2:
+				r.Ntest = mkcall1(chanfn("selectrecv2", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left, n.Ntest)
+			}
+		}
+
+		// selv is no longer alive after use.
+		r.Nbody = list(r.Nbody, Nod(OVARKILL, selv, nil))
+
+		r.Nbody = concat(r.Nbody, cas.Nbody)
+		r.Nbody = list(r.Nbody, Nod(OBREAK, nil, nil))
+		init = list(init, r)
+	}
+
+	// run the select
+	setlineno(sel)
+
+	init = list(init, mkcall("selectgo", nil, nil, var_))
+	sel.Nbody = init
+
+out:
+	sel.List = nil
+	walkstmtlist(sel.Nbody)
+	lineno = int32(lno)
+}
+
+// Keep in sync with src/runtime/runtime2.go and src/runtime/select.go.
+func selecttype(size int32) *Type {
+	// TODO(dvyukov): it's possible to generate SudoG and Scase only once
+	// and then cache; and also cache Select per size.
+	sudog := Nod(OTSTRUCT, nil, nil)
+
+	sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("g")), typenod(Ptrto(Types[TUINT8]))))
+	sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("selectdone")), typenod(Ptrto(Types[TUINT8]))))
+	sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("next")), typenod(Ptrto(Types[TUINT8]))))
+	sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("prev")), typenod(Ptrto(Types[TUINT8]))))
+	sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
+	sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
+	sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("nrelease")), typenod(Types[TINT32])))
+	sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("waitlink")), typenod(Ptrto(Types[TUINT8]))))
+	typecheck(&sudog, Etype)
+	sudog.Type.Noalg = 1
+	sudog.Type.Local = true
+
+	scase := Nod(OTSTRUCT, nil, nil)
+	scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
+	scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("chan")), typenod(Ptrto(Types[TUINT8]))))
+	scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("pc")), typenod(Types[TUINTPTR])))
+	scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("kind")), typenod(Types[TUINT16])))
+	scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("so")), typenod(Types[TUINT16])))
+	scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("receivedp")), typenod(Ptrto(Types[TUINT8]))))
+	scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
+	typecheck(&scase, Etype)
+	scase.Type.Noalg = 1
+	scase.Type.Local = true
+
+	sel := Nod(OTSTRUCT, nil, nil)
+	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("tcase")), typenod(Types[TUINT16])))
+	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("ncase")), typenod(Types[TUINT16])))
+	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorder")), typenod(Ptrto(Types[TUINT8]))))
+	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorder")), typenod(Ptrto(Types[TUINT8]))))
+	arr := Nod(OTARRAY, Nodintconst(int64(size)), scase)
+	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("scase")), arr))
+	arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Ptrto(Types[TUINT8])))
+	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorderarr")), arr))
+	arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Types[TUINT16]))
+	sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorderarr")), arr))
+	typecheck(&sel, Etype)
+	sel.Type.Noalg = 1
+	sel.Type.Local = true
+
+	return sel.Type
+}
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
new file mode 100644
index 0000000..dfaec74
--- /dev/null
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -0,0 +1,1528 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+)
+
+/*
+ * static initialization
+ */
+const (
+	InitNotStarted = 0
+	InitDone       = 1
+	InitPending    = 2
+)
+
+var initlist *NodeList
+
+// init1 walks the AST starting at n, and accumulates in out
+// the list of definitions needing init code in dependency order.
+func init1(n *Node, out **NodeList) {
+	if n == nil {
+		return
+	}
+	init1(n.Left, out)
+	init1(n.Right, out)
+	for l := n.List; l != nil; l = l.Next {
+		init1(l.N, out)
+	}
+
+	if n.Left != nil && n.Type != nil && n.Left.Op == OTYPE && n.Class == PFUNC {
+		// Methods called as Type.Method(receiver, ...).
+		// Definitions for method expressions are stored in type->nname.
+		init1(n.Type.Nname, out)
+	}
+
+	if n.Op != ONAME {
+		return
+	}
+	switch n.Class {
+	case PEXTERN, PFUNC:
+		break
+
+	default:
+		if isblank(n) && n.Curfn == nil && n.Defn != nil && n.Defn.Initorder == InitNotStarted {
+			// blank names initialization is part of init() but not
+			// when they are inside a function.
+			break
+		}
+
+		return
+	}
+
+	if n.Initorder == InitDone {
+		return
+	}
+	if n.Initorder == InitPending {
+		// Since mutually recursive sets of functions are allowed,
+		// we don't necessarily raise an error if n depends on a node
+		// which is already waiting for its dependencies to be visited.
+		//
+		// initlist contains a cycle of identifiers referring to each other.
+		// If this cycle contains a variable, then this variable refers to itself.
+		// Conversely, if there exists an initialization cycle involving
+		// a variable in the program, the tree walk will reach a cycle
+		// involving that variable.
+		var nv *Node
+		if n.Class != PFUNC {
+			nv = n
+			goto foundinitloop
+		}
+
+		for l := initlist; l.N != n; l = l.Next {
+			if l.N.Class != PFUNC {
+				nv = l.N
+				goto foundinitloop
+			}
+		}
+
+		// The loop involves only functions, ok.
+		return
+
+		// if there have already been errors printed,
+		// those errors probably confused us and
+		// there might not be a loop.  let the user
+		// fix those first.
+	foundinitloop:
+		Flusherrors()
+
+		if nerrors > 0 {
+			errorexit()
+		}
+
+		// There is a loop involving nv. We know about
+		// n and initlist = n1 <- ... <- nv <- ... <- n <- ...
+		fmt.Printf("%v: initialization loop:\n", nv.Line())
+
+		// Build back pointers in initlist.
+		for l := initlist; l != nil; l = l.Next {
+			if l.Next != nil {
+				l.Next.End = l
+			}
+		}
+
+		// Print nv -> ... -> n1 -> n.
+		var l *NodeList
+		for l = initlist; l.N != nv; l = l.Next {
+		}
+		for ; l != nil; l = l.End {
+			fmt.Printf("\t%v %v refers to\n", l.N.Line(), l.N.Sym)
+		}
+
+		// Print n -> ... -> nv.
+		for l = initlist; l.N != n; l = l.Next {
+		}
+		for ; l.N != nv; l = l.End {
+			fmt.Printf("\t%v %v refers to\n", l.N.Line(), l.N.Sym)
+		}
+		fmt.Printf("\t%v %v\n", nv.Line(), nv.Sym)
+		errorexit()
+	}
+
+	// reached a new unvisited node.
+	n.Initorder = InitPending
+
+	l := new(NodeList)
+	if l == nil {
+		Flusherrors()
+		Yyerror("out of memory")
+		errorexit()
+	}
+
+	l.Next = initlist
+	l.N = n
+	l.End = nil
+	initlist = l
+
+	// make sure that everything n depends on is initialized.
+	// n->defn is an assignment to n
+	if n.Defn != nil {
+		switch n.Defn.Op {
+		default:
+			goto bad
+
+		case ODCLFUNC:
+			init2list(n.Defn.Nbody, out)
+
+		case OAS:
+			if n.Defn.Left != n {
+				goto bad
+			}
+			if isblank(n.Defn.Left) && candiscard(n.Defn.Right) {
+				n.Defn.Op = OEMPTY
+				n.Defn.Left = nil
+				n.Defn.Right = nil
+				break
+			}
+
+			init2(n.Defn.Right, out)
+			if Debug['j'] != 0 {
+				fmt.Printf("%v\n", n.Sym)
+			}
+			if isblank(n) || !staticinit(n, out) {
+				if Debug['%'] != 0 {
+					Dump("nonstatic", n.Defn)
+				}
+				*out = list(*out, n.Defn)
+			}
+
+		case OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV:
+			if n.Defn.Initorder != InitNotStarted {
+				break
+			}
+			n.Defn.Initorder = InitDone
+			for l := n.Defn.Rlist; l != nil; l = l.Next {
+				init1(l.N, out)
+			}
+			if Debug['%'] != 0 {
+				Dump("nonstatic", n.Defn)
+			}
+			*out = list(*out, n.Defn)
+		}
+	}
+
+	l = initlist
+	initlist = l.Next
+	if l.N != n {
+		Fatal("bad initlist")
+	}
+
+	n.Initorder = InitDone
+	return
+
+bad:
+	Dump("defn", n.Defn)
+	Fatal("init1: bad defn")
+}
+
+// recurse over n, doing init1 everywhere.
+func init2(n *Node, out **NodeList) {
+	if n == nil || n.Initorder == InitDone {
+		return
+	}
+
+	if n.Op == ONAME && n.Ninit != nil {
+		Fatal("name %v with ninit: %v\n", n.Sym, Nconv(n, obj.FmtSign))
+	}
+
+	init1(n, out)
+	init2(n.Left, out)
+	init2(n.Right, out)
+	init2(n.Ntest, out)
+	init2list(n.Ninit, out)
+	init2list(n.List, out)
+	init2list(n.Rlist, out)
+	init2list(n.Nbody, out)
+	init2list(n.Nelse, out)
+
+	if n.Op == OCLOSURE {
+		init2list(n.Closure.Nbody, out)
+	}
+	if n.Op == ODOTMETH || n.Op == OCALLPART {
+		init2(n.Type.Nname, out)
+	}
+}
+
+func init2list(l *NodeList, out **NodeList) {
+	for ; l != nil; l = l.Next {
+		init2(l.N, out)
+	}
+}
+
+func initreorder(l *NodeList, out **NodeList) {
+	var n *Node
+
+	for ; l != nil; l = l.Next {
+		n = l.N
+		switch n.Op {
+		case ODCLFUNC, ODCLCONST, ODCLTYPE:
+			continue
+		}
+
+		initreorder(n.Ninit, out)
+		n.Ninit = nil
+		init1(n, out)
+	}
+}
+
+// initfix computes initialization order for a list l of top-level
+// declarations and outputs the corresponding list of statements
+// to include in the init() function body.
+func initfix(l *NodeList) *NodeList {
+	var lout *NodeList
+	lno := int(lineno)
+	initreorder(l, &lout)
+	lineno = int32(lno)
+	return lout
+}
+
+/*
+ * compilation of top-level (static) assignments
+ * into DATA statements if at all possible.
+ */
+func staticinit(n *Node, out **NodeList) bool {
+	if n.Op != ONAME || n.Class != PEXTERN || n.Defn == nil || n.Defn.Op != OAS {
+		Fatal("staticinit")
+	}
+
+	lineno = n.Lineno
+	l := n.Defn.Left
+	r := n.Defn.Right
+	return staticassign(l, r, out)
+}
+
+// like staticassign but we are copying an already
+// initialized value r.
+func staticcopy(l *Node, r *Node, out **NodeList) bool {
+	if r.Op != ONAME {
+		return false
+	}
+	if r.Class == PFUNC {
+		gdata(l, r, Widthptr)
+		return true
+	}
+	if r.Class != PEXTERN || r.Sym.Pkg != localpkg {
+		return false
+	}
+	if r.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
+		return false
+	}
+	if r.Defn.Op != OAS {
+		return false
+	}
+	orig := r
+	r = r.Defn.Right
+
+	switch r.Op {
+	case ONAME:
+		if staticcopy(l, r, out) {
+			return true
+		}
+		*out = list(*out, Nod(OAS, l, r))
+		return true
+
+	case OLITERAL:
+		if iszero(r) {
+			return true
+		}
+		gdata(l, r, int(l.Type.Width))
+		return true
+
+	case OADDR:
+		switch r.Left.Op {
+		case ONAME:
+			gdata(l, r, int(l.Type.Width))
+			return true
+		}
+
+	case OPTRLIT:
+		switch r.Left.Op {
+		//dump("not static addr", r);
+		default:
+			break
+
+			// copy pointer
+		case OARRAYLIT, OSTRUCTLIT, OMAPLIT:
+			gdata(l, Nod(OADDR, r.Nname, nil), int(l.Type.Width))
+
+			return true
+		}
+
+	case OARRAYLIT:
+		if Isslice(r.Type) {
+			// copy slice
+			a := r.Nname
+
+			n1 := *l
+			n1.Xoffset = l.Xoffset + int64(Array_array)
+			gdata(&n1, Nod(OADDR, a, nil), Widthptr)
+			n1.Xoffset = l.Xoffset + int64(Array_nel)
+			gdata(&n1, r.Right, Widthint)
+			n1.Xoffset = l.Xoffset + int64(Array_cap)
+			gdata(&n1, r.Right, Widthint)
+			return true
+		}
+		fallthrough
+
+		// fall through
+	case OSTRUCTLIT:
+		p := r.Initplan
+
+		n1 := *l
+		var e *InitEntry
+		var ll *Node
+		var rr *Node
+		for i := 0; i < len(p.E); i++ {
+			e = &p.E[i]
+			n1.Xoffset = l.Xoffset + e.Xoffset
+			n1.Type = e.Expr.Type
+			if e.Expr.Op == OLITERAL {
+				gdata(&n1, e.Expr, int(n1.Type.Width))
+			} else {
+				ll = Nod(OXXX, nil, nil)
+				*ll = n1
+				ll.Orig = ll // completely separate copy
+				if !staticassign(ll, e.Expr, out) {
+					// Requires computation, but we're
+					// copying someone else's computation.
+					rr = Nod(OXXX, nil, nil)
+
+					*rr = *orig
+					rr.Orig = rr // completely separate copy
+					rr.Type = ll.Type
+					rr.Xoffset += e.Xoffset
+					*out = list(*out, Nod(OAS, ll, rr))
+				}
+			}
+		}
+
+		return true
+	}
+
+	return false
+}
+
+func staticassign(l *Node, r *Node, out **NodeList) bool {
+	var n1 Node
+
+	switch r.Op {
+	//dump("not static", r);
+	default:
+		break
+
+	case ONAME:
+		return staticcopy(l, r, out)
+
+	case OLITERAL:
+		if iszero(r) {
+			return true
+		}
+		gdata(l, r, int(l.Type.Width))
+		return true
+
+	case OADDR:
+		var nam Node
+		if stataddr(&nam, r.Left) {
+			n1 := *r
+			n1.Left = &nam
+			gdata(l, &n1, int(l.Type.Width))
+			return true
+		}
+		fallthrough
+
+	case OPTRLIT:
+		switch r.Left.Op {
+		//dump("not static ptrlit", r);
+		default:
+			break
+
+			// Init pointer.
+		case OARRAYLIT, OMAPLIT, OSTRUCTLIT:
+			a := staticname(r.Left.Type, 1)
+
+			r.Nname = a
+			gdata(l, Nod(OADDR, a, nil), int(l.Type.Width))
+
+			// Init underlying literal.
+			if !staticassign(a, r.Left, out) {
+				*out = list(*out, Nod(OAS, a, r.Left))
+			}
+			return true
+		}
+
+	case OSTRARRAYBYTE:
+		if l.Class == PEXTERN && r.Left.Op == OLITERAL {
+			sval := r.Left.Val.U.(string)
+			slicebytes(l, sval, len(sval))
+			return true
+		}
+
+	case OARRAYLIT:
+		initplan(r)
+		if Isslice(r.Type) {
+			// Init slice.
+			ta := typ(TARRAY)
+
+			ta.Type = r.Type.Type
+			ta.Bound = Mpgetfix(r.Right.Val.U.(*Mpint))
+			a := staticname(ta, 1)
+			r.Nname = a
+			n1 = *l
+			n1.Xoffset = l.Xoffset + int64(Array_array)
+			gdata(&n1, Nod(OADDR, a, nil), Widthptr)
+			n1.Xoffset = l.Xoffset + int64(Array_nel)
+			gdata(&n1, r.Right, Widthint)
+			n1.Xoffset = l.Xoffset + int64(Array_cap)
+			gdata(&n1, r.Right, Widthint)
+
+			// Fall through to init underlying array.
+			l = a
+		}
+		fallthrough
+
+		// fall through
+	case OSTRUCTLIT:
+		initplan(r)
+
+		p := r.Initplan
+		n1 = *l
+		var e *InitEntry
+		var a *Node
+		for i := 0; i < len(p.E); i++ {
+			e = &p.E[i]
+			n1.Xoffset = l.Xoffset + e.Xoffset
+			n1.Type = e.Expr.Type
+			if e.Expr.Op == OLITERAL {
+				gdata(&n1, e.Expr, int(n1.Type.Width))
+			} else {
+				a = Nod(OXXX, nil, nil)
+				*a = n1
+				a.Orig = a // completely separate copy
+				if !staticassign(a, e.Expr, out) {
+					*out = list(*out, Nod(OAS, a, e.Expr))
+				}
+			}
+		}
+
+		return true
+
+		// TODO: Table-driven map insert.
+	case OMAPLIT:
+		break
+	}
+
+	return false
+}
+
+/*
+ * from here down is the walk analysis
+ * of composite literals.
+ * most of the work is to generate
+ * data statements for the constant
+ * part of the composite literal.
+ */
+func staticname(t *Type, ctxt int) *Node {
+	n := newname(Lookupf("statictmp_%.4d", statuniqgen))
+	statuniqgen++
+	if ctxt == 0 {
+		n.Name.Readonly = true
+	}
+	addvar(n, t, PEXTERN)
+	return n
+}
+
+func isliteral(n *Node) bool {
+	if n.Op == OLITERAL {
+		if n.Val.Ctype != CTNIL {
+			return true
+		}
+	}
+	return false
+}
+
+func simplename(n *Node) bool {
+	if n.Op != ONAME {
+		return false
+	}
+	if !n.Addable {
+		return false
+	}
+	if n.Class&PHEAP != 0 {
+		return false
+	}
+	if n.Class == PPARAMREF {
+		return false
+	}
+	return true
+}
+
+func litas(l *Node, r *Node, init **NodeList) {
+	a := Nod(OAS, l, r)
+	typecheck(&a, Etop)
+	walkexpr(&a, init)
+	*init = list(*init, a)
+}
+
+const (
+	MODEDYNAM = 1
+	MODECONST = 2
+)
+
+func getdyn(n *Node, top int) int {
+	mode := 0
+	switch n.Op {
+	default:
+		if isliteral(n) {
+			return MODECONST
+		}
+		return MODEDYNAM
+
+	case OARRAYLIT:
+		if top == 0 && n.Type.Bound < 0 {
+			return MODEDYNAM
+		}
+		fallthrough
+
+	case OSTRUCTLIT:
+		break
+	}
+
+	var value *Node
+	for nl := n.List; nl != nil; nl = nl.Next {
+		value = nl.N.Right
+		mode |= getdyn(value, 0)
+		if mode == MODEDYNAM|MODECONST {
+			break
+		}
+	}
+
+	return mode
+}
+
+func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
+	var r *Node
+	var a *Node
+	var index *Node
+	var value *Node
+
+	for nl := n.List; nl != nil; nl = nl.Next {
+		r = nl.N
+		if r.Op != OKEY {
+			Fatal("structlit: rhs not OKEY: %v", r)
+		}
+		index = r.Left
+		value = r.Right
+
+		switch value.Op {
+		case OARRAYLIT:
+			if value.Type.Bound < 0 {
+				if pass == 1 && ctxt != 0 {
+					a = Nod(ODOT, var_, newname(index.Sym))
+					slicelit(ctxt, value, a, init)
+				} else if pass == 2 && ctxt == 0 {
+					a = Nod(ODOT, var_, newname(index.Sym))
+					slicelit(ctxt, value, a, init)
+				} else if pass == 3 {
+					break
+				}
+				continue
+			}
+
+			a = Nod(ODOT, var_, newname(index.Sym))
+			arraylit(ctxt, pass, value, a, init)
+			continue
+
+		case OSTRUCTLIT:
+			a = Nod(ODOT, var_, newname(index.Sym))
+			structlit(ctxt, pass, value, a, init)
+			continue
+		}
+
+		if isliteral(value) {
+			if pass == 2 {
+				continue
+			}
+		} else if pass == 1 {
+			continue
+		}
+
+		// build list of var.field = expr
+		a = Nod(ODOT, var_, newname(index.Sym))
+
+		a = Nod(OAS, a, value)
+		typecheck(&a, Etop)
+		if pass == 1 {
+			walkexpr(&a, init) // add any assignments in r to top
+			if a.Op != OAS {
+				Fatal("structlit: not as")
+			}
+			a.Dodata = 2
+		} else {
+			orderstmtinplace(&a)
+			walkstmt(&a)
+		}
+
+		*init = list(*init, a)
+	}
+}
+
+func arraylit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
+	var r *Node
+	var a *Node
+	var index *Node
+	var value *Node
+
+	for l := n.List; l != nil; l = l.Next {
+		r = l.N
+		if r.Op != OKEY {
+			Fatal("arraylit: rhs not OKEY: %v", r)
+		}
+		index = r.Left
+		value = r.Right
+
+		switch value.Op {
+		case OARRAYLIT:
+			if value.Type.Bound < 0 {
+				if pass == 1 && ctxt != 0 {
+					a = Nod(OINDEX, var_, index)
+					slicelit(ctxt, value, a, init)
+				} else if pass == 2 && ctxt == 0 {
+					a = Nod(OINDEX, var_, index)
+					slicelit(ctxt, value, a, init)
+				} else if pass == 3 {
+					break
+				}
+				continue
+			}
+
+			a = Nod(OINDEX, var_, index)
+			arraylit(ctxt, pass, value, a, init)
+			continue
+
+		case OSTRUCTLIT:
+			a = Nod(OINDEX, var_, index)
+			structlit(ctxt, pass, value, a, init)
+			continue
+		}
+
+		if isliteral(index) && isliteral(value) {
+			if pass == 2 {
+				continue
+			}
+		} else if pass == 1 {
+			continue
+		}
+
+		// build list of var[index] = value
+		a = Nod(OINDEX, var_, index)
+
+		a = Nod(OAS, a, value)
+		typecheck(&a, Etop)
+		if pass == 1 {
+			walkexpr(&a, init)
+			if a.Op != OAS {
+				Fatal("arraylit: not as")
+			}
+			a.Dodata = 2
+		} else {
+			orderstmtinplace(&a)
+			walkstmt(&a)
+		}
+
+		*init = list(*init, a)
+	}
+}
+
+func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+	// make an array type
+	t := shallow(n.Type)
+
+	t.Bound = Mpgetfix(n.Right.Val.U.(*Mpint))
+	t.Width = 0
+	t.Sym = nil
+	t.Haspointers = 0
+	dowidth(t)
+
+	if ctxt != 0 {
+		// put everything into static array
+		vstat := staticname(t, ctxt)
+
+		arraylit(ctxt, 1, n, vstat, init)
+		arraylit(ctxt, 2, n, vstat, init)
+
+		// copy static to slice
+		a := Nod(OSLICE, vstat, Nod(OKEY, nil, nil))
+
+		a = Nod(OAS, var_, a)
+		typecheck(&a, Etop)
+		a.Dodata = 2
+		*init = list(*init, a)
+		return
+	}
+
+	// recipe for var = []t{...}
+	// 1. make a static array
+	//	var vstat [...]t
+	// 2. assign (data statements) the constant part
+	//	vstat = constpart{}
+	// 3. make an auto pointer to array and allocate heap to it
+	//	var vauto *[...]t = new([...]t)
+	// 4. copy the static array to the auto array
+	//	*vauto = vstat
+	// 5. assign slice of allocated heap to var
+	//	var = [0:]*auto
+	// 6. for each dynamic part assign to the slice
+	//	var[i] = dynamic part
+	//
+	// an optimization is done if there is no constant part
+	//	3. var vauto *[...]t = new([...]t)
+	//	5. var = [0:]*auto
+	//	6. var[i] = dynamic part
+
+	// if the literal contains constants,
+	// make static initialized array (1),(2)
+	var vstat *Node
+
+	mode := getdyn(n, 1)
+	if mode&MODECONST != 0 {
+		vstat = staticname(t, ctxt)
+		arraylit(ctxt, 1, n, vstat, init)
+	}
+
+	// make new auto *array (3 declare)
+	vauto := temp(Ptrto(t))
+
+	// set auto to point at new temp or heap (3 assign)
+	var a *Node
+	if n.Alloc != nil {
+		// temp allocated during order.c for dddarg
+		n.Alloc.Type = t
+
+		if vstat == nil {
+			a = Nod(OAS, n.Alloc, nil)
+			typecheck(&a, Etop)
+			*init = list(*init, a) // zero new temp
+		}
+
+		a = Nod(OADDR, n.Alloc, nil)
+	} else if n.Esc == EscNone {
+		a = temp(t)
+		if vstat == nil {
+			a = Nod(OAS, temp(t), nil)
+			typecheck(&a, Etop)
+			*init = list(*init, a) // zero new temp
+			a = a.Left
+		}
+
+		a = Nod(OADDR, a, nil)
+	} else {
+		a = Nod(ONEW, nil, nil)
+		a.List = list1(typenod(t))
+	}
+
+	a = Nod(OAS, vauto, a)
+	typecheck(&a, Etop)
+	walkexpr(&a, init)
+	*init = list(*init, a)
+
+	if vstat != nil {
+		// copy static to heap (4)
+		a = Nod(OIND, vauto, nil)
+
+		a = Nod(OAS, a, vstat)
+		typecheck(&a, Etop)
+		walkexpr(&a, init)
+		*init = list(*init, a)
+	}
+
+	// make slice out of heap (5)
+	a = Nod(OAS, var_, Nod(OSLICE, vauto, Nod(OKEY, nil, nil)))
+
+	typecheck(&a, Etop)
+	orderstmtinplace(&a)
+	walkstmt(&a)
+	*init = list(*init, a)
+
+	// put dynamics into slice (6)
+	var value *Node
+	var r *Node
+	var index *Node
+	for l := n.List; l != nil; l = l.Next {
+		r = l.N
+		if r.Op != OKEY {
+			Fatal("slicelit: rhs not OKEY: %v", r)
+		}
+		index = r.Left
+		value = r.Right
+		a = Nod(OINDEX, var_, index)
+		a.Bounded = true
+
+		// TODO need to check bounds?
+
+		switch value.Op {
+		case OARRAYLIT:
+			if value.Type.Bound < 0 {
+				break
+			}
+			arraylit(ctxt, 2, value, a, init)
+			continue
+
+		case OSTRUCTLIT:
+			structlit(ctxt, 2, value, a, init)
+			continue
+		}
+
+		if isliteral(index) && isliteral(value) {
+			continue
+		}
+
+		// build list of var[c] = expr
+		a = Nod(OAS, a, value)
+
+		typecheck(&a, Etop)
+		orderstmtinplace(&a)
+		walkstmt(&a)
+		*init = list(*init, a)
+	}
+}
+
+func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+	var r *Node
+	var index *Node
+	var value *Node
+
+	ctxt = 0
+
+	// make the map var
+	nerr := nerrors
+
+	a := Nod(OMAKE, nil, nil)
+	a.List = list1(typenod(n.Type))
+	litas(var_, a, init)
+
+	// count the initializers
+	b := int64(0)
+
+	for l := n.List; l != nil; l = l.Next {
+		r = l.N
+
+		if r.Op != OKEY {
+			Fatal("maplit: rhs not OKEY: %v", r)
+		}
+		index = r.Left
+		value = r.Right
+
+		if isliteral(index) && isliteral(value) {
+			b++
+		}
+	}
+
+	if b != 0 {
+		// build type [count]struct { a Tindex, b Tvalue }
+		t := n.Type
+
+		tk := t.Down
+		tv := t.Type
+
+		symb := Lookup("b")
+		t = typ(TFIELD)
+		t.Type = tv
+		t.Sym = symb
+
+		syma := Lookup("a")
+		t1 := t
+		t = typ(TFIELD)
+		t.Type = tk
+		t.Sym = syma
+		t.Down = t1
+
+		t1 = t
+		t = typ(TSTRUCT)
+		t.Type = t1
+
+		t1 = t
+		t = typ(TARRAY)
+		t.Bound = b
+		t.Type = t1
+
+		dowidth(t)
+
+		// make and initialize static array
+		vstat := staticname(t, ctxt)
+
+		b := int64(0)
+		var index *Node
+		var r *Node
+		var value *Node
+		for l := n.List; l != nil; l = l.Next {
+			r = l.N
+
+			if r.Op != OKEY {
+				Fatal("maplit: rhs not OKEY: %v", r)
+			}
+			index = r.Left
+			value = r.Right
+
+			if isliteral(index) && isliteral(value) {
+				// build vstat[b].a = key;
+				a = Nodintconst(b)
+
+				a = Nod(OINDEX, vstat, a)
+				a = Nod(ODOT, a, newname(syma))
+				a = Nod(OAS, a, index)
+				typecheck(&a, Etop)
+				walkexpr(&a, init)
+				a.Dodata = 2
+				*init = list(*init, a)
+
+				// build vstat[b].b = value;
+				a = Nodintconst(b)
+
+				a = Nod(OINDEX, vstat, a)
+				a = Nod(ODOT, a, newname(symb))
+				a = Nod(OAS, a, value)
+				typecheck(&a, Etop)
+				walkexpr(&a, init)
+				a.Dodata = 2
+				*init = list(*init, a)
+
+				b++
+			}
+		}
+
+		// loop adding structure elements to map
+		// for i = 0; i < len(vstat); i++ {
+		//	map[vstat[i].a] = vstat[i].b
+		// }
+		index = temp(Types[TINT])
+
+		a = Nod(OINDEX, vstat, index)
+		a.Bounded = true
+		a = Nod(ODOT, a, newname(symb))
+
+		r = Nod(OINDEX, vstat, index)
+		r.Bounded = true
+		r = Nod(ODOT, r, newname(syma))
+		r = Nod(OINDEX, var_, r)
+
+		r = Nod(OAS, r, a)
+
+		a = Nod(OFOR, nil, nil)
+		a.Nbody = list1(r)
+
+		a.Ninit = list1(Nod(OAS, index, Nodintconst(0)))
+		a.Ntest = Nod(OLT, index, Nodintconst(t.Bound))
+		a.Nincr = Nod(OAS, index, Nod(OADD, index, Nodintconst(1)))
+
+		typecheck(&a, Etop)
+		walkstmt(&a)
+		*init = list(*init, a)
+	}
+
+	// put in dynamic entries one-at-a-time
+	var key *Node
+
+	var val *Node
+	for l := n.List; l != nil; l = l.Next {
+		r = l.N
+
+		if r.Op != OKEY {
+			Fatal("maplit: rhs not OKEY: %v", r)
+		}
+		index = r.Left
+		value = r.Right
+
+		if isliteral(index) && isliteral(value) {
+			continue
+		}
+
+		// build list of var[c] = expr.
+		// use temporary so that mapassign1 can have addressable key, val.
+		if key == nil {
+			key = temp(var_.Type.Down)
+			val = temp(var_.Type.Type)
+		}
+
+		a = Nod(OAS, key, r.Left)
+		typecheck(&a, Etop)
+		walkstmt(&a)
+		*init = list(*init, a)
+		a = Nod(OAS, val, r.Right)
+		typecheck(&a, Etop)
+		walkstmt(&a)
+		*init = list(*init, a)
+
+		a = Nod(OAS, Nod(OINDEX, var_, key), val)
+		typecheck(&a, Etop)
+		walkstmt(&a)
+		*init = list(*init, a)
+
+		if nerr != nerrors {
+			break
+		}
+	}
+
+	if key != nil {
+		a = Nod(OVARKILL, key, nil)
+		typecheck(&a, Etop)
+		*init = list(*init, a)
+		a = Nod(OVARKILL, val, nil)
+		typecheck(&a, Etop)
+		*init = list(*init, a)
+	}
+}
+
+func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+	t := n.Type
+	switch n.Op {
+	default:
+		Fatal("anylit: not lit")
+
+	case OPTRLIT:
+		if !Isptr[t.Etype] {
+			Fatal("anylit: not ptr")
+		}
+
+		var r *Node
+		if n.Right != nil {
+			r = Nod(OADDR, n.Right, nil)
+			typecheck(&r, Erv)
+		} else {
+			r = Nod(ONEW, nil, nil)
+			r.Typecheck = 1
+			r.Type = t
+			r.Esc = n.Esc
+		}
+
+		walkexpr(&r, init)
+		a := Nod(OAS, var_, r)
+
+		typecheck(&a, Etop)
+		*init = list(*init, a)
+
+		var_ = Nod(OIND, var_, nil)
+		typecheck(&var_, Erv|Easgn)
+		anylit(ctxt, n.Left, var_, init)
+
+	case OSTRUCTLIT:
+		if t.Etype != TSTRUCT {
+			Fatal("anylit: not struct")
+		}
+
+		if simplename(var_) && count(n.List) > 4 {
+			if ctxt == 0 {
+				// lay out static data
+				vstat := staticname(t, ctxt)
+
+				structlit(ctxt, 1, n, vstat, init)
+
+				// copy static to var
+				a := Nod(OAS, var_, vstat)
+
+				typecheck(&a, Etop)
+				walkexpr(&a, init)
+				*init = list(*init, a)
+
+				// add expressions to automatic
+				structlit(ctxt, 2, n, var_, init)
+
+				break
+			}
+
+			structlit(ctxt, 1, n, var_, init)
+			structlit(ctxt, 2, n, var_, init)
+			break
+		}
+
+		// initialize of not completely specified
+		if simplename(var_) || count(n.List) < structcount(t) {
+			a := Nod(OAS, var_, nil)
+			typecheck(&a, Etop)
+			walkexpr(&a, init)
+			*init = list(*init, a)
+		}
+
+		structlit(ctxt, 3, n, var_, init)
+
+	case OARRAYLIT:
+		if t.Etype != TARRAY {
+			Fatal("anylit: not array")
+		}
+		if t.Bound < 0 {
+			slicelit(ctxt, n, var_, init)
+			break
+		}
+
+		if simplename(var_) && count(n.List) > 4 {
+			if ctxt == 0 {
+				// lay out static data
+				vstat := staticname(t, ctxt)
+
+				arraylit(1, 1, n, vstat, init)
+
+				// copy static to automatic
+				a := Nod(OAS, var_, vstat)
+
+				typecheck(&a, Etop)
+				walkexpr(&a, init)
+				*init = list(*init, a)
+
+				// add expressions to automatic
+				arraylit(ctxt, 2, n, var_, init)
+
+				break
+			}
+
+			arraylit(ctxt, 1, n, var_, init)
+			arraylit(ctxt, 2, n, var_, init)
+			break
+		}
+
+		// initialize of not completely specified
+		if simplename(var_) || int64(count(n.List)) < t.Bound {
+			a := Nod(OAS, var_, nil)
+			typecheck(&a, Etop)
+			walkexpr(&a, init)
+			*init = list(*init, a)
+		}
+
+		arraylit(ctxt, 3, n, var_, init)
+
+	case OMAPLIT:
+		if t.Etype != TMAP {
+			Fatal("anylit: not map")
+		}
+		maplit(ctxt, n, var_, init)
+	}
+}
+
+func oaslit(n *Node, init **NodeList) bool {
+	if n.Left == nil || n.Right == nil {
+		// not a special composit literal assignment
+		return false
+	}
+	if n.Left.Type == nil || n.Right.Type == nil {
+		// not a special composit literal assignment
+		return false
+	}
+	if !simplename(n.Left) {
+		// not a special composit literal assignment
+		return false
+	}
+	if !Eqtype(n.Left.Type, n.Right.Type) {
+		// not a special composit literal assignment
+		return false
+	}
+
+	// context is init() function.
+	// implies generated data executed
+	// exactly once and not subject to races.
+	ctxt := 0
+
+	//	if(n->dodata == 1)
+	//		ctxt = 1;
+
+	switch n.Right.Op {
+	default:
+		// not a special composit literal assignment
+		return false
+
+	case OSTRUCTLIT, OARRAYLIT, OMAPLIT:
+		if vmatch1(n.Left, n.Right) {
+			// not a special composit literal assignment
+			return false
+		}
+		anylit(ctxt, n.Right, n.Left, init)
+	}
+
+	n.Op = OEMPTY
+	return true
+}
+
+func getlit(lit *Node) int {
+	if Smallintconst(lit) {
+		return int(Mpgetfix(lit.Val.U.(*Mpint)))
+	}
+	return -1
+}
+
+func stataddr(nam *Node, n *Node) bool {
+	if n == nil {
+		return false
+	}
+
+	switch n.Op {
+	case ONAME:
+		*nam = *n
+		return n.Addable
+
+	case ODOT:
+		if !stataddr(nam, n.Left) {
+			break
+		}
+		nam.Xoffset += n.Xoffset
+		nam.Type = n.Type
+		return true
+
+	case OINDEX:
+		if n.Left.Type.Bound < 0 {
+			break
+		}
+		if !stataddr(nam, n.Left) {
+			break
+		}
+		l := getlit(n.Right)
+		if l < 0 {
+			break
+		}
+
+		// Check for overflow.
+		if n.Type.Width != 0 && Thearch.MAXWIDTH/n.Type.Width <= int64(l) {
+			break
+		}
+		nam.Xoffset += int64(l) * n.Type.Width
+		nam.Type = n.Type
+		return true
+	}
+
+	return false
+}
+
+func initplan(n *Node) {
+	if n.Initplan != nil {
+		return
+	}
+	p := new(InitPlan)
+	n.Initplan = p
+	switch n.Op {
+	default:
+		Fatal("initplan")
+
+	case OARRAYLIT:
+		var a *Node
+		for l := n.List; l != nil; l = l.Next {
+			a = l.N
+			if a.Op != OKEY || !Smallintconst(a.Left) {
+				Fatal("initplan arraylit")
+			}
+			addvalue(p, n.Type.Type.Width*Mpgetfix(a.Left.Val.U.(*Mpint)), nil, a.Right)
+		}
+
+	case OSTRUCTLIT:
+		var a *Node
+		for l := n.List; l != nil; l = l.Next {
+			a = l.N
+			if a.Op != OKEY || a.Left.Type == nil {
+				Fatal("initplan structlit")
+			}
+			addvalue(p, a.Left.Type.Width, nil, a.Right)
+		}
+
+	case OMAPLIT:
+		var a *Node
+		for l := n.List; l != nil; l = l.Next {
+			a = l.N
+			if a.Op != OKEY {
+				Fatal("initplan maplit")
+			}
+			addvalue(p, -1, a.Left, a.Right)
+		}
+	}
+}
+
+func addvalue(p *InitPlan, xoffset int64, key *Node, n *Node) {
+	// special case: zero can be dropped entirely
+	if iszero(n) {
+		p.Zero += n.Type.Width
+		return
+	}
+
+	// special case: inline struct and array (not slice) literals
+	if isvaluelit(n) {
+		initplan(n)
+		q := n.Initplan
+		var e *InitEntry
+		for i := 0; i < len(q.E); i++ {
+			e = entry(p)
+			*e = q.E[i]
+			e.Xoffset += xoffset
+		}
+
+		return
+	}
+
+	// add to plan
+	if n.Op == OLITERAL {
+		p.Lit += n.Type.Width
+	} else {
+		p.Expr += n.Type.Width
+	}
+
+	e := entry(p)
+	e.Xoffset = xoffset
+	e.Expr = n
+}
+
+func iszero(n *Node) bool {
+	switch n.Op {
+	case OLITERAL:
+		switch n.Val.Ctype {
+		default:
+			Dump("unexpected literal", n)
+			Fatal("iszero")
+
+		case CTNIL:
+			return true
+
+		case CTSTR:
+			return n.Val.U.(string) == ""
+
+		case CTBOOL:
+			return !n.Val.U.(bool)
+
+		case CTINT, CTRUNE:
+			return mpcmpfixc(n.Val.U.(*Mpint), 0) == 0
+
+		case CTFLT:
+			return mpcmpfltc(n.Val.U.(*Mpflt), 0) == 0
+
+		case CTCPLX:
+			return mpcmpfltc(&n.Val.U.(*Mpcplx).Real, 0) == 0 && mpcmpfltc(&n.Val.U.(*Mpcplx).Imag, 0) == 0
+		}
+
+	case OARRAYLIT:
+		if Isslice(n.Type) {
+			break
+		}
+		fallthrough
+
+		// fall through
+	case OSTRUCTLIT:
+		for l := n.List; l != nil; l = l.Next {
+			if !iszero(l.N.Right) {
+				return false
+			}
+		}
+		return true
+	}
+
+	return false
+}
+
+func isvaluelit(n *Node) bool {
+	return (n.Op == OARRAYLIT && Isfixedarray(n.Type)) || n.Op == OSTRUCTLIT
+}
+
+func entry(p *InitPlan) *InitEntry {
+	p.E = append(p.E, InitEntry{})
+	return &p.E[len(p.E)-1]
+}
+
+func gen_as_init(n *Node) bool {
+	var nr *Node
+	var nl *Node
+	var nam Node
+
+	if n.Dodata == 0 {
+		goto no
+	}
+
+	nr = n.Right
+	nl = n.Left
+	if nr == nil {
+		var nam Node
+		if !stataddr(&nam, nl) {
+			goto no
+		}
+		if nam.Class != PEXTERN {
+			goto no
+		}
+		return true
+	}
+
+	if nr.Type == nil || !Eqtype(nl.Type, nr.Type) {
+		goto no
+	}
+
+	if !stataddr(&nam, nl) {
+		goto no
+	}
+
+	if nam.Class != PEXTERN {
+		goto no
+	}
+
+	switch nr.Op {
+	default:
+		goto no
+
+	case OCONVNOP:
+		nr = nr.Left
+		if nr == nil || nr.Op != OSLICEARR {
+			goto no
+		}
+		fallthrough
+
+		// fall through
+	case OSLICEARR:
+		if nr.Right.Op == OKEY && nr.Right.Left == nil && nr.Right.Right == nil {
+			nr = nr.Left
+			gused(nil) // in case the data is the dest of a goto
+			nl := nr
+			if nr == nil || nr.Op != OADDR {
+				goto no
+			}
+			nr = nr.Left
+			if nr == nil || nr.Op != ONAME {
+				goto no
+			}
+
+			// nr is the array being converted to a slice
+			if nr.Type == nil || nr.Type.Etype != TARRAY || nr.Type.Bound < 0 {
+				goto no
+			}
+
+			nam.Xoffset += int64(Array_array)
+			gdata(&nam, nl, int(Types[Tptr].Width))
+
+			nam.Xoffset += int64(Array_nel) - int64(Array_array)
+			var nod1 Node
+			Nodconst(&nod1, Types[TINT], nr.Type.Bound)
+			gdata(&nam, &nod1, Widthint)
+
+			nam.Xoffset += int64(Array_cap) - int64(Array_nel)
+			gdata(&nam, &nod1, Widthint)
+
+			return true
+		}
+
+		goto no
+
+	case OLITERAL:
+		break
+	}
+
+	switch nr.Type.Etype {
+	default:
+		goto no
+
+	case TBOOL,
+		TINT8,
+		TUINT8,
+		TINT16,
+		TUINT16,
+		TINT32,
+		TUINT32,
+		TINT64,
+		TUINT64,
+		TINT,
+		TUINT,
+		TUINTPTR,
+		TPTR32,
+		TPTR64,
+		TFLOAT32,
+		TFLOAT64:
+		gdata(&nam, nr, int(nr.Type.Width))
+
+	case TCOMPLEX64, TCOMPLEX128:
+		gdatacomplex(&nam, nr.Val.U.(*Mpcplx))
+
+	case TSTRING:
+		gdatastring(&nam, nr.Val.U.(string))
+	}
+
+	return true
+
+no:
+	if n.Dodata == 2 {
+		Dump("\ngen_as_init", n)
+		Fatal("gen_as_init couldnt make data statement")
+	}
+
+	return false
+}
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
new file mode 100644
index 0000000..b10a6b3
--- /dev/null
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -0,0 +1,3571 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"bytes"
+	"cmd/internal/obj"
+	"crypto/md5"
+	"encoding/binary"
+	"fmt"
+	"os"
+	"sort"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+type Error struct {
+	lineno int
+	seq    int
+	msg    string
+}
+
+var errors []Error
+
+func errorexit() {
+	Flusherrors()
+	if outfile != "" {
+		os.Remove(outfile)
+	}
+	os.Exit(2)
+}
+
+func parserline() int {
+	if parsing && theparser.Lookahead() > 0 {
+		// parser has one symbol lookahead
+		return int(prevlineno)
+	}
+	return int(lineno)
+}
+
+func adderrorname(n *Node) {
+	if n.Op != ODOT {
+		return
+	}
+	old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), n.Left)
+	if len(errors) > 0 && int32(errors[len(errors)-1].lineno) == n.Lineno && errors[len(errors)-1].msg == old {
+		errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), n.Left, n)
+	}
+}
+
+func adderr(line int, format string, args ...interface{}) {
+	errors = append(errors, Error{
+		seq:    len(errors),
+		lineno: line,
+		msg:    fmt.Sprintf("%v: %s\n", Ctxt.Line(line), fmt.Sprintf(format, args...)),
+	})
+}
+
+type errcmp []Error
+
+func (x errcmp) Len() int {
+	return len(x)
+}
+
+func (x errcmp) Swap(i, j int) {
+	x[i], x[j] = x[j], x[i]
+}
+
+func (x errcmp) Less(i, j int) bool {
+	a := &x[i]
+	b := &x[j]
+	if a.lineno != b.lineno {
+		return a.lineno-b.lineno < 0
+	}
+	if a.seq != b.seq {
+		return a.seq-b.seq < 0
+	}
+	return stringsCompare(a.msg, b.msg) < 0
+}
+
+func Flusherrors() {
+	bstdout.Flush()
+	if len(errors) == 0 {
+		return
+	}
+	sort.Sort(errcmp(errors[:len(errors)]))
+	for i := 0; i < len(errors); i++ {
+		if i == 0 || errors[i].msg != errors[i-1].msg {
+			fmt.Printf("%s", errors[i].msg)
+		}
+	}
+	errors = errors[:0]
+}
+
+func hcrash() {
+	if Debug['h'] != 0 {
+		Flusherrors()
+		if outfile != "" {
+			os.Remove(outfile)
+		}
+		var x *int
+		*x = 0
+	}
+}
+
+func yyerrorl(line int, format string, args ...interface{}) {
+	adderr(line, format, args...)
+
+	hcrash()
+	nerrors++
+	if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
+		Flusherrors()
+		fmt.Printf("%v: too many errors\n", Ctxt.Line(line))
+		errorexit()
+	}
+}
+
+var yyerror_lastsyntax int
+
+func Yyerror(format string, args ...interface{}) {
+	msg := fmt.Sprintf(format, args...)
+	if strings.HasPrefix(msg, "syntax error") {
+		nsyntaxerrors++
+
+		// An unexpected EOF caused a syntax error. Use the previous
+		// line number since getc generated a fake newline character.
+		if curio.eofnl != 0 {
+			lexlineno = prevlineno
+		}
+
+		// only one syntax error per line
+		if int32(yyerror_lastsyntax) == lexlineno {
+			return
+		}
+		yyerror_lastsyntax = int(lexlineno)
+
+		// plain "syntax error" gets "near foo" added
+		if msg == "syntax error" {
+			yyerrorl(int(lexlineno), "syntax error near %s", lexbuf.String())
+			return
+		}
+
+		// The grammar has { and LBRACE but both show up as {.
+		// Rewrite syntax error referring to "{ or {" to say just "{".
+		// The grammar has ? and @ but only for reading imports.
+		// Silence them in ordinary errors.
+		msg = strings.Replace(msg, "{ or {", "{", -1)
+		msg = strings.Replace(msg, " or ?", "", -1)
+		msg = strings.Replace(msg, " or @", "", -1)
+
+		msg = strings.Replace(msg, "LLITERAL", litbuf, -1)
+
+		yyerrorl(int(lexlineno), "%s", msg)
+		return
+	}
+
+	adderr(parserline(), "%s", msg)
+
+	hcrash()
+	nerrors++
+	if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
+		Flusherrors()
+		fmt.Printf("%v: too many errors\n", Ctxt.Line(parserline()))
+		errorexit()
+	}
+}
+
+func Warn(fmt_ string, args ...interface{}) {
+	adderr(parserline(), fmt_, args...)
+
+	hcrash()
+}
+
+func Warnl(line int, fmt_ string, args ...interface{}) {
+	adderr(line, fmt_, args...)
+	if Debug['m'] != 0 {
+		Flusherrors()
+	}
+}
+
+func Fatal(fmt_ string, args ...interface{}) {
+	Flusherrors()
+
+	fmt.Printf("%v: internal compiler error: ", Ctxt.Line(int(lineno)))
+	fmt.Printf(fmt_, args...)
+	fmt.Printf("\n")
+
+	// If this is a released compiler version, ask for a bug report.
+	if strings.HasPrefix(obj.Getgoversion(), "release") {
+		fmt.Printf("\n")
+		fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+		fmt.Printf("https://golang.org/issue/new\n")
+	}
+
+	hcrash()
+	errorexit()
+}
+
+func linehistpragma(file string) {
+	if Debug['i'] != 0 {
+		fmt.Printf("pragma %s at line %v\n", file, Ctxt.Line(int(lexlineno)))
+	}
+	Ctxt.AddImport(file)
+}
+
+func linehistpush(file string) {
+	if Debug['i'] != 0 {
+		fmt.Printf("import %s at line %v\n", file, Ctxt.Line(int(lexlineno)))
+	}
+	Ctxt.LineHist.Push(int(lexlineno), file)
+}
+
+func linehistpop() {
+	if Debug['i'] != 0 {
+		fmt.Printf("end of import at line %v\n", Ctxt.Line(int(lexlineno)))
+	}
+	Ctxt.LineHist.Pop(int(lexlineno))
+}
+
+func linehistupdate(file string, off int) {
+	if Debug['i'] != 0 {
+		fmt.Printf("line %s at line %v\n", file, Ctxt.Line(int(lexlineno)))
+	}
+	Ctxt.LineHist.Update(int(lexlineno), file, off)
+}
+
+func setlineno(n *Node) int32 {
+	lno := lineno
+	if n != nil {
+		switch n.Op {
+		case ONAME, OTYPE, OPACK, OLITERAL:
+			break
+
+		default:
+			lineno = n.Lineno
+			if lineno == 0 {
+				if Debug['K'] != 0 {
+					Warn("setlineno: line 0")
+				}
+				lineno = lno
+			}
+		}
+	}
+
+	return lno
+}
+
+func Lookup(name string) *Sym {
+	return localpkg.Lookup(name)
+}
+
+func Lookupf(format string, a ...interface{}) *Sym {
+	return Lookup(fmt.Sprintf(format, a...))
+}
+
+func LookupBytes(name []byte) *Sym {
+	return localpkg.LookupBytes(name)
+}
+
+var initSyms []*Sym
+
+var nopkg = &Pkg{
+	Syms: make(map[string]*Sym),
+}
+
+func (pkg *Pkg) Lookup(name string) *Sym {
+	if pkg == nil {
+		pkg = nopkg
+	}
+	if s := pkg.Syms[name]; s != nil {
+		return s
+	}
+
+	s := &Sym{
+		Name:    name,
+		Pkg:     pkg,
+		Lexical: LNAME,
+	}
+	if name == "init" {
+		initSyms = append(initSyms, s)
+	}
+	pkg.Syms[name] = s
+	return s
+}
+
+func (pkg *Pkg) LookupBytes(name []byte) *Sym {
+	if pkg == nil {
+		pkg = nopkg
+	}
+	if s := pkg.Syms[string(name)]; s != nil {
+		return s
+	}
+	str := internString(name)
+	return pkg.Lookup(str)
+}
+
+func Pkglookup(name string, pkg *Pkg) *Sym {
+	return pkg.Lookup(name)
+}
+
+func restrictlookup(name string, pkg *Pkg) *Sym {
+	if !exportname(name) && pkg != localpkg {
+		Yyerror("cannot refer to unexported name %s.%s", pkg.Name, name)
+	}
+	return Pkglookup(name, pkg)
+}
+
+// find all the exported symbols in package opkg
+// and make them available in the current package
+func importdot(opkg *Pkg, pack *Node) {
+	var s1 *Sym
+	var pkgerror string
+
+	n := 0
+	for _, s := range opkg.Syms {
+		if s.Def == nil {
+			continue
+		}
+		if !exportname(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
+			continue
+		}
+		s1 = Lookup(s.Name)
+		if s1.Def != nil {
+			pkgerror = fmt.Sprintf("during import %q", opkg.Path)
+			redeclare(s1, pkgerror)
+			continue
+		}
+
+		s1.Def = s.Def
+		s1.Block = s.Block
+		s1.Def.Pack = pack
+		s1.Origpkg = opkg
+		n++
+	}
+
+	if n == 0 {
+		// can't possibly be used - there were no symbols
+		yyerrorl(int(pack.Lineno), "imported and not used: %q", opkg.Path)
+	}
+}
+
+func gethunk() {
+	nh := int32(NHUNK)
+	if thunk >= 10*NHUNK {
+		nh = 10 * NHUNK
+	}
+	h := string(make([]byte, nh))
+	if h == "" {
+		Flusherrors()
+		Yyerror("out of memory")
+		errorexit()
+	}
+
+	hunk = h
+	nhunk = nh
+	thunk += nh
+}
+
+func Nod(op int, nleft *Node, nright *Node) *Node {
+	n := new(Node)
+	n.Op = uint8(op)
+	n.Left = nleft
+	n.Right = nright
+	n.Lineno = int32(parserline())
+	n.Xoffset = BADWIDTH
+	n.Orig = n
+	n.Curfn = Curfn
+	switch op {
+	case OCLOSURE, ODCLFUNC:
+		n.Func = new(Func)
+		n.Param = new(Param)
+	case ONAME:
+		n.Name = new(Name)
+		n.Param = new(Param)
+	case ODCLFIELD:
+		n.Param = new(Param)
+	}
+	return n
+}
+
+func saveorignode(n *Node) {
+	if n.Orig != nil {
+		return
+	}
+	norig := Nod(int(n.Op), nil, nil)
+	*norig = *n
+	n.Orig = norig
+}
+
+// ispaddedfield reports whether the given field
+// is followed by padding. For the case where t is
+// the last field, total gives the size of the enclosing struct.
+func ispaddedfield(t *Type, total int64) bool {
+	if t.Etype != TFIELD {
+		Fatal("ispaddedfield called non-field %v", t)
+	}
+	if t.Down == nil {
+		return t.Width+t.Type.Width != total
+	}
+	return t.Width+t.Type.Width != t.Down.Width
+}
+
+func algtype1(t *Type, bad **Type) int {
+	if bad != nil {
+		*bad = nil
+	}
+	if t.Broke != 0 {
+		return AMEM
+	}
+	if t.Noalg != 0 {
+		return ANOEQ
+	}
+
+	switch t.Etype {
+	// will be defined later.
+	case TANY, TFORW:
+		*bad = t
+
+		return -1
+
+	case TINT8,
+		TUINT8,
+		TINT16,
+		TUINT16,
+		TINT32,
+		TUINT32,
+		TINT64,
+		TUINT64,
+		TINT,
+		TUINT,
+		TUINTPTR,
+		TBOOL,
+		TPTR32,
+		TPTR64,
+		TCHAN,
+		TUNSAFEPTR:
+		return AMEM
+
+	case TFUNC, TMAP:
+		if bad != nil {
+			*bad = t
+		}
+		return ANOEQ
+
+	case TFLOAT32:
+		return AFLOAT32
+
+	case TFLOAT64:
+		return AFLOAT64
+
+	case TCOMPLEX64:
+		return ACPLX64
+
+	case TCOMPLEX128:
+		return ACPLX128
+
+	case TSTRING:
+		return ASTRING
+
+	case TINTER:
+		if isnilinter(t) {
+			return ANILINTER
+		}
+		return AINTER
+
+	case TARRAY:
+		if Isslice(t) {
+			if bad != nil {
+				*bad = t
+			}
+			return ANOEQ
+		}
+
+		a := algtype1(t.Type, bad)
+		if a == ANOEQ || a == AMEM {
+			if a == ANOEQ && bad != nil {
+				*bad = t
+			}
+			return a
+		}
+
+		return -1 // needs special compare
+
+	case TSTRUCT:
+		if t.Type != nil && t.Type.Down == nil && !isblanksym(t.Type.Sym) {
+			// One-field struct is same as that one field alone.
+			return algtype1(t.Type.Type, bad)
+		}
+
+		ret := AMEM
+		var a int
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
+			// All fields must be comparable.
+			a = algtype1(t1.Type, bad)
+
+			if a == ANOEQ {
+				return ANOEQ
+			}
+
+			// Blank fields, padded fields, fields with non-memory
+			// equality need special compare.
+			if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) {
+				ret = -1
+				continue
+			}
+		}
+
+		return ret
+	}
+
+	Fatal("algtype1: unexpected type %v", t)
+	return 0
+}
+
+func algtype(t *Type) int {
+	a := algtype1(t, nil)
+	if a == AMEM || a == ANOEQ {
+		if Isslice(t) {
+			return ASLICE
+		}
+		switch t.Width {
+		case 0:
+			return a + AMEM0 - AMEM
+
+		case 1:
+			return a + AMEM8 - AMEM
+
+		case 2:
+			return a + AMEM16 - AMEM
+
+		case 4:
+			return a + AMEM32 - AMEM
+
+		case 8:
+			return a + AMEM64 - AMEM
+
+		case 16:
+			return a + AMEM128 - AMEM
+		}
+	}
+
+	return a
+}
+
+func maptype(key *Type, val *Type) *Type {
+	if key != nil {
+		var bad *Type
+		atype := algtype1(key, &bad)
+		var mtype int
+		if bad == nil {
+			mtype = int(key.Etype)
+		} else {
+			mtype = int(bad.Etype)
+		}
+		switch mtype {
+		default:
+			if atype == ANOEQ {
+				Yyerror("invalid map key type %v", key)
+			}
+
+			// will be resolved later.
+		case TANY:
+			break
+
+			// map[key] used during definition of key.
+		// postpone check until key is fully defined.
+		// if there are multiple uses of map[key]
+		// before key is fully defined, the error
+		// will only be printed for the first one.
+		// good enough.
+		case TFORW:
+			if key.Maplineno == 0 {
+				key.Maplineno = lineno
+			}
+		}
+	}
+
+	t := typ(TMAP)
+	t.Down = key
+	t.Type = val
+	return t
+}
+
+func typ(et int) *Type {
+	t := new(Type)
+	t.Etype = uint8(et)
+	t.Width = BADWIDTH
+	t.Lineno = int(lineno)
+	t.Orig = t
+	return t
+}
+
+type methcmp []*Type
+
+func (x methcmp) Len() int {
+	return len(x)
+}
+
+func (x methcmp) Swap(i, j int) {
+	x[i], x[j] = x[j], x[i]
+}
+
+func (x methcmp) Less(i, j int) bool {
+	a := x[i]
+	b := x[j]
+	if a.Sym == nil && b.Sym == nil {
+		return false
+	}
+	if a.Sym == nil {
+		return true
+	}
+	if b.Sym == nil {
+		return 1 < 0
+	}
+	k := stringsCompare(a.Sym.Name, b.Sym.Name)
+	if k != 0 {
+		return k < 0
+	}
+	if !exportname(a.Sym.Name) {
+		k := stringsCompare(a.Sym.Pkg.Path, b.Sym.Pkg.Path)
+		if k != 0 {
+			return k < 0
+		}
+	}
+
+	return false
+}
+
+func sortinter(t *Type) *Type {
+	if t.Type == nil || t.Type.Down == nil {
+		return t
+	}
+
+	i := 0
+	for f := t.Type; f != nil; f = f.Down {
+		i++
+	}
+	a := make([]*Type, i)
+	i = 0
+	var f *Type
+	for f = t.Type; f != nil; f = f.Down {
+		a[i] = f
+		i++
+	}
+	sort.Sort(methcmp(a[:i]))
+	for {
+		tmp11 := i
+		i--
+		if tmp11 <= 0 {
+			break
+		}
+		a[i].Down = f
+		f = a[i]
+	}
+
+	t.Type = f
+	return t
+}
+
+func Nodintconst(v int64) *Node {
+	c := Nod(OLITERAL, nil, nil)
+	c.Addable = true
+	c.Val.U = new(Mpint)
+	Mpmovecfix(c.Val.U.(*Mpint), v)
+	c.Val.Ctype = CTINT
+	c.Type = Types[TIDEAL]
+	ullmancalc(c)
+	return c
+}
+
+func nodfltconst(v *Mpflt) *Node {
+	c := Nod(OLITERAL, nil, nil)
+	c.Addable = true
+	c.Val.U = newMpflt()
+	mpmovefltflt(c.Val.U.(*Mpflt), v)
+	c.Val.Ctype = CTFLT
+	c.Type = Types[TIDEAL]
+	ullmancalc(c)
+	return c
+}
+
+func Nodconst(n *Node, t *Type, v int64) {
+	*n = Node{}
+	n.Op = OLITERAL
+	n.Addable = true
+	ullmancalc(n)
+	n.Val.U = new(Mpint)
+	Mpmovecfix(n.Val.U.(*Mpint), v)
+	n.Val.Ctype = CTINT
+	n.Type = t
+
+	if Isfloat[t.Etype] {
+		Fatal("nodconst: bad type %v", t)
+	}
+}
+
+func nodnil() *Node {
+	c := Nodintconst(0)
+	c.Val.Ctype = CTNIL
+	c.Type = Types[TNIL]
+	return c
+}
+
+func Nodbool(b bool) *Node {
+	c := Nodintconst(0)
+	c.Val.Ctype = CTBOOL
+	c.Val.U = b
+	c.Type = idealbool
+	return c
+}
+
+func aindex(b *Node, t *Type) *Type {
+	bound := int64(-1) // open bound
+	typecheck(&b, Erv)
+	if b != nil {
+		switch consttype(b) {
+		default:
+			Yyerror("array bound must be an integer expression")
+
+		case CTINT, CTRUNE:
+			bound = Mpgetfix(b.Val.U.(*Mpint))
+			if bound < 0 {
+				Yyerror("array bound must be non negative")
+			}
+		}
+	}
+
+	// fixed array
+	r := typ(TARRAY)
+
+	r.Type = t
+	r.Bound = bound
+	return r
+}
+
+func treecopy(n *Node) *Node {
+	if n == nil {
+		return nil
+	}
+
+	var m *Node
+	switch n.Op {
+	default:
+		m = Nod(OXXX, nil, nil)
+		*m = *n
+		m.Orig = m
+		m.Left = treecopy(n.Left)
+		m.Right = treecopy(n.Right)
+		m.List = listtreecopy(n.List)
+		if m.Defn != nil {
+			panic("abort")
+		}
+
+	case ONONAME:
+		if n.Sym == Lookup("iota") {
+			// Not sure yet whether this is the real iota,
+			// but make a copy of the Node* just in case,
+			// so that all the copies of this const definition
+			// don't have the same iota value.
+			m = Nod(OXXX, nil, nil)
+
+			*m = *n
+			m.Iota = iota_
+			break
+		}
+		fallthrough
+
+	case ONAME, OLITERAL, OTYPE:
+		m = n
+	}
+
+	return m
+}
+
+func isnil(n *Node) bool {
+	if n == nil {
+		return false
+	}
+	if n.Op != OLITERAL {
+		return false
+	}
+	if n.Val.Ctype != CTNIL {
+		return false
+	}
+	return true
+}
+
+func isptrto(t *Type, et int) bool {
+	if t == nil {
+		return false
+	}
+	if !Isptr[t.Etype] {
+		return false
+	}
+	t = t.Type
+	if t == nil {
+		return false
+	}
+	if int(t.Etype) != et {
+		return false
+	}
+	return true
+}
+
+func Istype(t *Type, et int) bool {
+	return t != nil && int(t.Etype) == et
+}
+
+func Isfixedarray(t *Type) bool {
+	return t != nil && t.Etype == TARRAY && t.Bound >= 0
+}
+
+func Isslice(t *Type) bool {
+	return t != nil && t.Etype == TARRAY && t.Bound < 0
+}
+
+func isblank(n *Node) bool {
+	if n == nil {
+		return false
+	}
+	return isblanksym(n.Sym)
+}
+
+func isblanksym(s *Sym) bool {
+	return s != nil && s.Name == "_"
+}
+
+func Isinter(t *Type) bool {
+	return t != nil && t.Etype == TINTER
+}
+
+func isnilinter(t *Type) bool {
+	if !Isinter(t) {
+		return false
+	}
+	if t.Type != nil {
+		return false
+	}
+	return true
+}
+
+func isideal(t *Type) bool {
+	if t == nil {
+		return false
+	}
+	if t == idealstring || t == idealbool {
+		return true
+	}
+	switch t.Etype {
+	case TNIL, TIDEAL:
+		return true
+	}
+
+	return false
+}
+
+/*
+ * given receiver of type t (t == r or t == *r)
+ * return type to hang methods off (r).
+ */
+func methtype(t *Type, mustname int) *Type {
+	if t == nil {
+		return nil
+	}
+
+	// strip away pointer if it's there
+	if Isptr[t.Etype] {
+		if t.Sym != nil {
+			return nil
+		}
+		t = t.Type
+		if t == nil {
+			return nil
+		}
+	}
+
+	// need a type name
+	if t.Sym == nil && (mustname != 0 || t.Etype != TSTRUCT) {
+		return nil
+	}
+
+	// check types
+	if !issimple[t.Etype] {
+		switch t.Etype {
+		default:
+			return nil
+
+		case TSTRUCT,
+			TARRAY,
+			TMAP,
+			TCHAN,
+			TSTRING,
+			TFUNC:
+			break
+		}
+	}
+
+	return t
+}
+
+func cplxsubtype(et int) int {
+	switch et {
+	case TCOMPLEX64:
+		return TFLOAT32
+
+	case TCOMPLEX128:
+		return TFLOAT64
+	}
+
+	Fatal("cplxsubtype: %v\n", Econv(int(et), 0))
+	return 0
+}
+
+func eqnote(a, b *string) bool {
+	return a == b || a != nil && b != nil && *a == *b
+}
+
+type TypePairList struct {
+	t1   *Type
+	t2   *Type
+	next *TypePairList
+}
+
+func onlist(l *TypePairList, t1 *Type, t2 *Type) bool {
+	for ; l != nil; l = l.next {
+		if (l.t1 == t1 && l.t2 == t2) || (l.t1 == t2 && l.t2 == t1) {
+			return true
+		}
+	}
+	return false
+}
+
+// Return 1 if t1 and t2 are identical, following the spec rules.
+//
+// Any cyclic type must go through a named type, and if one is
+// named, it is only identical to the other if they are the same
+// pointer (t1 == t2), so there's no chance of chasing cycles
+// ad infinitum, so no need for a depth counter.
+func Eqtype(t1 *Type, t2 *Type) bool {
+	return eqtype1(t1, t2, nil)
+}
+
+func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
+	if t1 == t2 {
+		return true
+	}
+	if t1 == nil || t2 == nil || t1.Etype != t2.Etype {
+		return false
+	}
+	if t1.Sym != nil || t2.Sym != nil {
+		// Special case: we keep byte and uint8 separate
+		// for error messages.  Treat them as equal.
+		switch t1.Etype {
+		case TUINT8:
+			if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) {
+				return true
+			}
+
+		case TINT, TINT32:
+			if (t1 == Types[runetype.Etype] || t1 == runetype) && (t2 == Types[runetype.Etype] || t2 == runetype) {
+				return true
+			}
+		}
+
+		return false
+	}
+
+	if onlist(assumed_equal, t1, t2) {
+		return true
+	}
+	var l TypePairList
+	l.next = assumed_equal
+	l.t1 = t1
+	l.t2 = t2
+
+	switch t1.Etype {
+	case TINTER, TSTRUCT:
+		t1 = t1.Type
+		t2 = t2.Type
+		for ; t1 != nil && t2 != nil; t1, t2 = t1.Down, t2.Down {
+			if t1.Etype != TFIELD || t2.Etype != TFIELD {
+				Fatal("struct/interface missing field: %v %v", t1, t2)
+			}
+			if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
+				return false
+			}
+		}
+
+		if t1 == nil && t2 == nil {
+			return true
+		}
+		return false
+
+		// Loop over structs: receiver, in, out.
+	case TFUNC:
+		t1 = t1.Type
+		t2 = t2.Type
+		for ; t1 != nil && t2 != nil; t1, t2 = t1.Down, t2.Down {
+			if t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
+				Fatal("func missing struct: %v %v", t1, t2)
+			}
+
+			// Loop over fields in structs, ignoring argument names.
+			ta := t1.Type
+			tb := t2.Type
+			for ; ta != nil && tb != nil; ta, tb = ta.Down, tb.Down {
+				if ta.Etype != TFIELD || tb.Etype != TFIELD {
+					Fatal("func struct missing field: %v %v", ta, tb)
+				}
+				if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
+					return false
+				}
+			}
+
+			if ta != nil || tb != nil {
+				return false
+			}
+		}
+
+		if t1 == nil && t2 == nil {
+			return true
+		}
+		return false
+
+	case TARRAY:
+		if t1.Bound != t2.Bound {
+			return false
+		}
+
+	case TCHAN:
+		if t1.Chan != t2.Chan {
+			return false
+		}
+	}
+
+	if eqtype1(t1.Down, t2.Down, &l) && eqtype1(t1.Type, t2.Type, &l) {
+		return true
+	}
+	return false
+}
+
+// Are t1 and t2 equal struct types when field names are ignored?
+// For deciding whether the result struct from g can be copied
+// directly when compiling f(g()).
+func eqtypenoname(t1 *Type, t2 *Type) bool {
+	if t1 == nil || t2 == nil || t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
+		return false
+	}
+
+	t1 = t1.Type
+	t2 = t2.Type
+	for {
+		if !Eqtype(t1, t2) {
+			return false
+		}
+		if t1 == nil {
+			return true
+		}
+		t1 = t1.Down
+		t2 = t2.Down
+	}
+}
+
+// Is type src assignment compatible to type dst?
+// If so, return op code to use in conversion.
+// If not, return 0.
+func assignop(src *Type, dst *Type, why *string) int {
+	if why != nil {
+		*why = ""
+	}
+
+	// TODO(rsc,lvd): This behaves poorly in the presence of inlining.
+	// https://golang.org/issue/2795
+	if safemode != 0 && importpkg == nil && src != nil && src.Etype == TUNSAFEPTR {
+		Yyerror("cannot use unsafe.Pointer")
+		errorexit()
+	}
+
+	if src == dst {
+		return OCONVNOP
+	}
+	if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
+		return 0
+	}
+
+	// 1. src type is identical to dst.
+	if Eqtype(src, dst) {
+		return OCONVNOP
+	}
+
+	// 2. src and dst have identical underlying types
+	// and either src or dst is not a named type or
+	// both are empty interface types.
+	// For assignable but different non-empty interface types,
+	// we want to recompute the itab.
+	if Eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || isnilinter(src)) {
+		return OCONVNOP
+	}
+
+	// 3. dst is an interface type and src implements dst.
+	if dst.Etype == TINTER && src.Etype != TNIL {
+		var missing *Type
+		var ptr int
+		var have *Type
+		if implements(src, dst, &missing, &have, &ptr) {
+			return OCONVIFACE
+		}
+
+		// we'll have complained about this method anyway, suppress spurious messages.
+		if have != nil && have.Sym == missing.Sym && (have.Type.Broke != 0 || missing.Type.Broke != 0) {
+			return OCONVIFACE
+		}
+
+		if why != nil {
+			if isptrto(src, TINTER) {
+				*why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
+			} else if have != nil && have.Sym == missing.Sym && have.Nointerface {
+				*why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
+			} else if have != nil && have.Sym == missing.Sym {
+				*why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", src, dst, missing.Sym, have.Sym, Tconv(have.Type, obj.FmtShort|obj.FmtByte), missing.Sym, Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+			} else if ptr != 0 {
+				*why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
+			} else if have != nil {
+				*why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", src, dst, missing.Sym, have.Sym, Tconv(have.Type, obj.FmtShort|obj.FmtByte), missing.Sym, Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+			} else {
+				*why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
+			}
+		}
+
+		return 0
+	}
+
+	if isptrto(dst, TINTER) {
+		if why != nil {
+			*why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
+		}
+		return 0
+	}
+
+	if src.Etype == TINTER && dst.Etype != TBLANK {
+		var have *Type
+		var ptr int
+		var missing *Type
+		if why != nil && implements(dst, src, &missing, &have, &ptr) {
+			*why = ": need type assertion"
+		}
+		return 0
+	}
+
+	// 4. src is a bidirectional channel value, dst is a channel type,
+	// src and dst have identical element types, and
+	// either src or dst is not a named type.
+	if src.Etype == TCHAN && src.Chan == Cboth && dst.Etype == TCHAN {
+		if Eqtype(src.Type, dst.Type) && (src.Sym == nil || dst.Sym == nil) {
+			return OCONVNOP
+		}
+	}
+
+	// 5. src is the predeclared identifier nil and dst is a nillable type.
+	if src.Etype == TNIL {
+		switch dst.Etype {
+		case TARRAY:
+			if dst.Bound != -100 { // not slice
+				break
+			}
+			fallthrough
+
+		case TPTR32,
+			TPTR64,
+			TFUNC,
+			TMAP,
+			TCHAN,
+			TINTER:
+			return OCONVNOP
+		}
+	}
+
+	// 6. rule about untyped constants - already converted by defaultlit.
+
+	// 7. Any typed value can be assigned to the blank identifier.
+	if dst.Etype == TBLANK {
+		return OCONVNOP
+	}
+
+	return 0
+}
+
+// Can we convert a value of type src to a value of type dst?
+// If so, return op code to use in conversion (maybe OCONVNOP).
+// If not, return 0.
+func convertop(src *Type, dst *Type, why *string) int {
+	if why != nil {
+		*why = ""
+	}
+
+	if src == dst {
+		return OCONVNOP
+	}
+	if src == nil || dst == nil {
+		return 0
+	}
+
+	// 1. src can be assigned to dst.
+	op := assignop(src, dst, why)
+	if op != 0 {
+		return op
+	}
+
+	// The rules for interfaces are no different in conversions
+	// than assignments.  If interfaces are involved, stop now
+	// with the good message from assignop.
+	// Otherwise clear the error.
+	if src.Etype == TINTER || dst.Etype == TINTER {
+		return 0
+	}
+	if why != nil {
+		*why = ""
+	}
+
+	// 2. src and dst have identical underlying types.
+	if Eqtype(src.Orig, dst.Orig) {
+		return OCONVNOP
+	}
+
+	// 3. src and dst are unnamed pointer types
+	// and their base types have identical underlying types.
+	if Isptr[src.Etype] && Isptr[dst.Etype] && src.Sym == nil && dst.Sym == nil {
+		if Eqtype(src.Type.Orig, dst.Type.Orig) {
+			return OCONVNOP
+		}
+	}
+
+	// 4. src and dst are both integer or floating point types.
+	if (Isint[src.Etype] || Isfloat[src.Etype]) && (Isint[dst.Etype] || Isfloat[dst.Etype]) {
+		if Simtype[src.Etype] == Simtype[dst.Etype] {
+			return OCONVNOP
+		}
+		return OCONV
+	}
+
+	// 5. src and dst are both complex types.
+	if Iscomplex[src.Etype] && Iscomplex[dst.Etype] {
+		if Simtype[src.Etype] == Simtype[dst.Etype] {
+			return OCONVNOP
+		}
+		return OCONV
+	}
+
+	// 6. src is an integer or has type []byte or []rune
+	// and dst is a string type.
+	if Isint[src.Etype] && dst.Etype == TSTRING {
+		return ORUNESTR
+	}
+
+	if Isslice(src) && dst.Etype == TSTRING {
+		if src.Type.Etype == bytetype.Etype {
+			return OARRAYBYTESTR
+		}
+		if src.Type.Etype == runetype.Etype {
+			return OARRAYRUNESTR
+		}
+	}
+
+	// 7. src is a string and dst is []byte or []rune.
+	// String to slice.
+	if src.Etype == TSTRING && Isslice(dst) {
+		if dst.Type.Etype == bytetype.Etype {
+			return OSTRARRAYBYTE
+		}
+		if dst.Type.Etype == runetype.Etype {
+			return OSTRARRAYRUNE
+		}
+	}
+
+	// 8. src is a pointer or uintptr and dst is unsafe.Pointer.
+	if (Isptr[src.Etype] || src.Etype == TUINTPTR) && dst.Etype == TUNSAFEPTR {
+		return OCONVNOP
+	}
+
+	// 9. src is unsafe.Pointer and dst is a pointer or uintptr.
+	if src.Etype == TUNSAFEPTR && (Isptr[dst.Etype] || dst.Etype == TUINTPTR) {
+		return OCONVNOP
+	}
+
+	return 0
+}
+
+func assignconv(n *Node, t *Type, context string) *Node {
+	return assignconvfn(n, t, func() string { return context })
+}
+
+// Convert node n for assignment to type t.
+func assignconvfn(n *Node, t *Type, context func() string) *Node {
+	if n == nil || n.Type == nil || n.Type.Broke != 0 {
+		return n
+	}
+
+	if t.Etype == TBLANK && n.Type.Etype == TNIL {
+		Yyerror("use of untyped nil")
+	}
+
+	old := n
+	old.Diag++ // silence errors about n; we'll issue one below
+	defaultlit(&n, t)
+	old.Diag--
+	if t.Etype == TBLANK {
+		return n
+	}
+
+	// Convert ideal bool from comparison to plain bool
+	// if the next step is non-bool (like interface{}).
+	if n.Type == idealbool && t.Etype != TBOOL {
+		if n.Op == ONAME || n.Op == OLITERAL {
+			r := Nod(OCONVNOP, n, nil)
+			r.Type = Types[TBOOL]
+			r.Typecheck = 1
+			r.Implicit = true
+			n = r
+		}
+	}
+
+	if Eqtype(n.Type, t) {
+		return n
+	}
+
+	var why string
+	op := assignop(n.Type, t, &why)
+	if op == 0 {
+		Yyerror("cannot use %v as type %v in %s%s", Nconv(n, obj.FmtLong), t, context(), why)
+		op = OCONV
+	}
+
+	r := Nod(op, n, nil)
+	r.Type = t
+	r.Typecheck = 1
+	r.Implicit = true
+	r.Orig = n.Orig
+	return r
+}
+
+// substArgTypes substitutes the given list of types for
+// successive occurrences of the "any" placeholder in the
+// type syntax expression n.Type.
+func substArgTypes(n *Node, types ...*Type) {
+	for _, t := range types {
+		dowidth(t)
+	}
+	substAny(&n.Type, &types)
+	if len(types) > 0 {
+		Fatal("substArgTypes: too many argument types")
+	}
+}
+
+// substAny walks *tp, replacing instances of "any" with successive
+// elements removed from types.
+func substAny(tp **Type, types *[]*Type) {
+	for {
+		t := *tp
+		if t == nil {
+			return
+		}
+		if t.Etype == TANY && t.Copyany != 0 {
+			if len(*types) == 0 {
+				Fatal("substArgTypes: not enough argument types")
+			}
+			*tp = (*types)[0]
+			*types = (*types)[1:]
+		}
+
+		switch t.Etype {
+		case TPTR32, TPTR64, TCHAN, TARRAY:
+			tp = &t.Type
+			continue
+
+		case TMAP:
+			substAny(&t.Down, types)
+			tp = &t.Type
+			continue
+
+		case TFUNC:
+			substAny(&t.Type, types)
+			substAny(&t.Type.Down.Down, types)
+			substAny(&t.Type.Down, types)
+
+		case TSTRUCT:
+			for t = t.Type; t != nil; t = t.Down {
+				substAny(&t.Type, types)
+			}
+		}
+		return
+	}
+}
+
+/*
+ * Is this a 64-bit type?
+ */
+func Is64(t *Type) bool {
+	if t == nil {
+		return false
+	}
+	switch Simtype[t.Etype] {
+	case TINT64, TUINT64, TPTR64:
+		return true
+	}
+
+	return false
+}
+
+/*
+ * Is a conversion between t1 and t2 a no-op?
+ */
+func Noconv(t1 *Type, t2 *Type) bool {
+	e1 := int(Simtype[t1.Etype])
+	e2 := int(Simtype[t2.Etype])
+
+	switch e1 {
+	case TINT8, TUINT8:
+		return e2 == TINT8 || e2 == TUINT8
+
+	case TINT16, TUINT16:
+		return e2 == TINT16 || e2 == TUINT16
+
+	case TINT32, TUINT32, TPTR32:
+		return e2 == TINT32 || e2 == TUINT32 || e2 == TPTR32
+
+	case TINT64, TUINT64, TPTR64:
+		return e2 == TINT64 || e2 == TUINT64 || e2 == TPTR64
+
+	case TFLOAT32:
+		return e2 == TFLOAT32
+
+	case TFLOAT64:
+		return e2 == TFLOAT64
+	}
+
+	return false
+}
+
+func shallow(t *Type) *Type {
+	if t == nil {
+		return nil
+	}
+	nt := typ(0)
+	*nt = *t
+	if t.Orig == t {
+		nt.Orig = nt
+	}
+	return nt
+}
+
+func deep(t *Type) *Type {
+	if t == nil {
+		return nil
+	}
+
+	var nt *Type
+	switch t.Etype {
+	default:
+		nt = t // share from here down
+
+	case TANY:
+		nt = shallow(t)
+		nt.Copyany = 1
+
+	case TPTR32, TPTR64, TCHAN, TARRAY:
+		nt = shallow(t)
+		nt.Type = deep(t.Type)
+
+	case TMAP:
+		nt = shallow(t)
+		nt.Down = deep(t.Down)
+		nt.Type = deep(t.Type)
+
+	case TFUNC:
+		nt = shallow(t)
+		nt.Type = deep(t.Type)
+		nt.Type.Down = deep(t.Type.Down)
+		nt.Type.Down.Down = deep(t.Type.Down.Down)
+
+	case TSTRUCT:
+		nt = shallow(t)
+		nt.Type = shallow(t.Type)
+		xt := nt.Type
+
+		for t = t.Type; t != nil; t = t.Down {
+			xt.Type = deep(t.Type)
+			xt.Down = shallow(t.Down)
+			xt = xt.Down
+		}
+	}
+
+	return nt
+}
+
+func syslook(name string, copy int) *Node {
+	s := Pkglookup(name, Runtimepkg)
+	if s == nil || s.Def == nil {
+		Fatal("syslook: can't find runtime.%s", name)
+	}
+
+	if copy == 0 {
+		return s.Def
+	}
+
+	n := Nod(0, nil, nil)
+	*n = *s.Def
+	n.Type = deep(s.Def.Type)
+
+	return n
+}
+
+/*
+ * compute a hash value for type t.
+ * if t is a method type, ignore the receiver
+ * so that the hash can be used in interface checks.
+ * %T already contains
+ * all the necessary logic to generate a representation
+ * of the type that completely describes it.
+ * using smprint here avoids duplicating that code.
+ * using md5 here is overkill, but i got tired of
+ * accidental collisions making the runtime think
+ * two types are equal when they really aren't.
+ */
+func typehash(t *Type) uint32 {
+	var p string
+
+	if t.Thistuple != 0 {
+		// hide method receiver from Tpretty
+		t.Thistuple = 0
+
+		p = Tconv(t, obj.FmtLeft|obj.FmtUnsigned)
+		t.Thistuple = 1
+	} else {
+		p = Tconv(t, obj.FmtLeft|obj.FmtUnsigned)
+	}
+
+	//print("typehash: %s\n", p);
+	h := md5.Sum([]byte(p))
+	return binary.LittleEndian.Uint32(h[:4])
+}
+
+var initPtrtoDone bool
+
+var (
+	ptrToUint8  *Type
+	ptrToAny    *Type
+	ptrToString *Type
+	ptrToBool   *Type
+	ptrToInt32  *Type
+)
+
+func initPtrto() {
+	ptrToUint8 = ptrto1(Types[TUINT8])
+	ptrToAny = ptrto1(Types[TANY])
+	ptrToString = ptrto1(Types[TSTRING])
+	ptrToBool = ptrto1(Types[TBOOL])
+	ptrToInt32 = ptrto1(Types[TINT32])
+}
+
+func ptrto1(t *Type) *Type {
+	t1 := typ(Tptr)
+	t1.Type = t
+	t1.Width = int64(Widthptr)
+	t1.Align = uint8(Widthptr)
+	return t1
+}
+
+// Ptrto returns the Type *t.
+// The returned struct must not be modified.
+func Ptrto(t *Type) *Type {
+	if Tptr == 0 {
+		Fatal("ptrto: no tptr")
+	}
+	// Reduce allocations by pre-creating common cases.
+	if !initPtrtoDone {
+		initPtrto()
+		initPtrtoDone = true
+	}
+	switch t {
+	case Types[TUINT8]:
+		return ptrToUint8
+	case Types[TINT32]:
+		return ptrToInt32
+	case Types[TANY]:
+		return ptrToAny
+	case Types[TSTRING]:
+		return ptrToString
+	case Types[TBOOL]:
+		return ptrToBool
+	}
+	return ptrto1(t)
+}
+
+func frame(context int) {
+	var l *NodeList
+
+	if context != 0 {
+		fmt.Printf("--- external frame ---\n")
+		l = externdcl
+	} else if Curfn != nil {
+		fmt.Printf("--- %v frame ---\n", Curfn.Nname.Sym)
+		l = Curfn.Func.Dcl
+	} else {
+		return
+	}
+
+	var n *Node
+	var w int64
+	for ; l != nil; l = l.Next {
+		n = l.N
+		w = -1
+		if n.Type != nil {
+			w = n.Type.Width
+		}
+		switch n.Op {
+		case ONAME:
+			fmt.Printf("%v %v G%d %v width=%d\n", Oconv(int(n.Op), 0), n.Sym, n.Vargen, n.Type, w)
+
+		case OTYPE:
+			fmt.Printf("%v %v width=%d\n", Oconv(int(n.Op), 0), n.Type, w)
+		}
+	}
+}
+
+/*
+ * calculate sethi/ullman number
+ * roughly how many registers needed to
+ * compile a node. used to compile the
+ * hardest side first to minimize registers.
+ */
+func ullmancalc(n *Node) {
+	if n == nil {
+		return
+	}
+
+	var ul int
+	var ur int
+	if n.Ninit != nil {
+		ul = UINF
+		goto out
+	}
+
+	switch n.Op {
+	case OREGISTER, OLITERAL, ONAME:
+		ul = 1
+		if n.Class == PPARAMREF || (n.Class&PHEAP != 0) {
+			ul++
+		}
+		goto out
+
+	case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OASWB:
+		ul = UINF
+		goto out
+
+		// hard with race detector
+	case OANDAND, OOROR:
+		if flag_race != 0 {
+			ul = UINF
+			goto out
+		}
+	}
+
+	ul = 1
+	if n.Left != nil {
+		ul = int(n.Left.Ullman)
+	}
+	ur = 1
+	if n.Right != nil {
+		ur = int(n.Right.Ullman)
+	}
+	if ul == ur {
+		ul += 1
+	}
+	if ur > ul {
+		ul = ur
+	}
+
+out:
+	if ul > 200 {
+		ul = 200 // clamp to uchar with room to grow
+	}
+	n.Ullman = uint8(ul)
+}
+
+func badtype(o int, tl *Type, tr *Type) {
+	fmt_ := ""
+	if tl != nil {
+		fmt_ += fmt.Sprintf("\n\t%v", tl)
+	}
+	if tr != nil {
+		fmt_ += fmt.Sprintf("\n\t%v", tr)
+	}
+
+	// common mistake: *struct and *interface.
+	if tl != nil && tr != nil && Isptr[tl.Etype] && Isptr[tr.Etype] {
+		if tl.Type.Etype == TSTRUCT && tr.Type.Etype == TINTER {
+			fmt_ += "\n\t(*struct vs *interface)"
+		} else if tl.Type.Etype == TINTER && tr.Type.Etype == TSTRUCT {
+			fmt_ += "\n\t(*interface vs *struct)"
+		}
+	}
+
+	s := fmt_
+	Yyerror("illegal types for operand: %v%s", Oconv(int(o), 0), s)
+}
+
+/*
+ * iterator to walk a structure declaration
+ */
+func Structfirst(s *Iter, nn **Type) *Type {
+	var t *Type
+
+	n := *nn
+	if n == nil {
+		goto bad
+	}
+
+	switch n.Etype {
+	default:
+		goto bad
+
+	case TSTRUCT, TINTER, TFUNC:
+		break
+	}
+
+	t = n.Type
+	if t == nil {
+		return nil
+	}
+
+	if t.Etype != TFIELD {
+		Fatal("structfirst: not field %v", t)
+	}
+
+	s.T = t
+	return t
+
+bad:
+	Fatal("structfirst: not struct %v", n)
+
+	return nil
+}
+
+func structnext(s *Iter) *Type {
+	n := s.T
+	t := n.Down
+	if t == nil {
+		return nil
+	}
+
+	if t.Etype != TFIELD {
+		Fatal("structnext: not struct %v", n)
+
+		return nil
+	}
+
+	s.T = t
+	return t
+}
+
+/*
+ * iterator to this and inargs in a function
+ */
+func funcfirst(s *Iter, t *Type) *Type {
+	var fp *Type
+
+	if t == nil {
+		goto bad
+	}
+
+	if t.Etype != TFUNC {
+		goto bad
+	}
+
+	s.Tfunc = t
+	s.Done = 0
+	fp = Structfirst(s, getthis(t))
+	if fp == nil {
+		s.Done = 1
+		fp = Structfirst(s, getinarg(t))
+	}
+
+	return fp
+
+bad:
+	Fatal("funcfirst: not func %v", t)
+	return nil
+}
+
+func funcnext(s *Iter) *Type {
+	fp := structnext(s)
+	if fp == nil && s.Done == 0 {
+		s.Done = 1
+		fp = Structfirst(s, getinarg(s.Tfunc))
+	}
+
+	return fp
+}
+
+func getthis(t *Type) **Type {
+	if t.Etype != TFUNC {
+		Fatal("getthis: not a func %v", t)
+	}
+	return &t.Type
+}
+
+func Getoutarg(t *Type) **Type {
+	if t.Etype != TFUNC {
+		Fatal("getoutarg: not a func %v", t)
+	}
+	return &t.Type.Down
+}
+
+func getinarg(t *Type) **Type {
+	if t.Etype != TFUNC {
+		Fatal("getinarg: not a func %v", t)
+	}
+	return &t.Type.Down.Down
+}
+
+func getthisx(t *Type) *Type {
+	return *getthis(t)
+}
+
+func getoutargx(t *Type) *Type {
+	return *Getoutarg(t)
+}
+
+func getinargx(t *Type) *Type {
+	return *getinarg(t)
+}
+
+// Brcom returns !(op).
+// For example, Brcom(==) is !=.
+func Brcom(a int) int {
+	switch a {
+	case OEQ:
+		return ONE
+	case ONE:
+		return OEQ
+	case OLT:
+		return OGE
+	case OGT:
+		return OLE
+	case OLE:
+		return OGT
+	case OGE:
+		return OLT
+	}
+	Fatal("brcom: no com for %v\n", Oconv(a, 0))
+	return a
+}
+
+// Brrev returns reverse(op).
+// For example, Brrev(<) is >.
+func Brrev(a int) int {
+	switch a {
+	case OEQ:
+		return OEQ
+	case ONE:
+		return ONE
+	case OLT:
+		return OGT
+	case OGT:
+		return OLT
+	case OLE:
+		return OGE
+	case OGE:
+		return OLE
+	}
+	Fatal("brrev: no rev for %v\n", Oconv(a, 0))
+	return a
+}
+
+/*
+ * return side effect-free n, appending side effects to init.
+ * result is assignable if n is.
+ */
+func safeexpr(n *Node, init **NodeList) *Node {
+	if n == nil {
+		return nil
+	}
+
+	if n.Ninit != nil {
+		walkstmtlist(n.Ninit)
+		*init = concat(*init, n.Ninit)
+		n.Ninit = nil
+	}
+
+	switch n.Op {
+	case ONAME, OLITERAL:
+		return n
+
+	case ODOT, OLEN, OCAP:
+		l := safeexpr(n.Left, init)
+		if l == n.Left {
+			return n
+		}
+		r := Nod(OXXX, nil, nil)
+		*r = *n
+		r.Left = l
+		typecheck(&r, Erv)
+		walkexpr(&r, init)
+		return r
+
+	case ODOTPTR, OIND:
+		l := safeexpr(n.Left, init)
+		if l == n.Left {
+			return n
+		}
+		a := Nod(OXXX, nil, nil)
+		*a = *n
+		a.Left = l
+		walkexpr(&a, init)
+		return a
+
+	case OINDEX, OINDEXMAP:
+		l := safeexpr(n.Left, init)
+		r := safeexpr(n.Right, init)
+		if l == n.Left && r == n.Right {
+			return n
+		}
+		a := Nod(OXXX, nil, nil)
+		*a = *n
+		a.Left = l
+		a.Right = r
+		walkexpr(&a, init)
+		return a
+	}
+
+	// make a copy; must not be used as an lvalue
+	if islvalue(n) {
+		Fatal("missing lvalue case in safeexpr: %v", n)
+	}
+	return cheapexpr(n, init)
+}
+
+func copyexpr(n *Node, t *Type, init **NodeList) *Node {
+	l := temp(t)
+	a := Nod(OAS, l, n)
+	typecheck(&a, Etop)
+	walkexpr(&a, init)
+	*init = list(*init, a)
+	return l
+}
+
+/*
+ * return side-effect free and cheap n, appending side effects to init.
+ * result may not be assignable.
+ */
+func cheapexpr(n *Node, init **NodeList) *Node {
+	switch n.Op {
+	case ONAME, OLITERAL:
+		return n
+	}
+
+	return copyexpr(n, n.Type, init)
+}
+
+/*
+ * return n in a local variable of type t if it is not already.
+ * the value is guaranteed not to change except by direct
+ * assignment to it.
+ */
+func localexpr(n *Node, t *Type, init **NodeList) *Node {
+	if n.Op == ONAME && (!n.Addrtaken || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
+		return n
+	}
+
+	return copyexpr(n, t, init)
+}
+
+func Setmaxarg(t *Type, extra int32) {
+	dowidth(t)
+	w := t.Argwid
+	if w >= Thearch.MAXWIDTH {
+		Fatal("bad argwid %v", t)
+	}
+	w += int64(extra)
+	if w >= Thearch.MAXWIDTH {
+		Fatal("bad argwid %d + %v", extra, t)
+	}
+	if w > Maxarg {
+		Maxarg = w
+	}
+}
+
+/*
+ * unicode-aware case-insensitive strcmp
+ */
+
+/*
+ * code to resolve elided DOTs
+ * in embedded types
+ */
+
+// search depth 0 --
+// return count of fields+methods
+// found with a given name
+func lookdot0(s *Sym, t *Type, save **Type, ignorecase int) int {
+	u := t
+	if Isptr[u.Etype] {
+		u = u.Type
+	}
+
+	c := 0
+	if u.Etype == TSTRUCT || u.Etype == TINTER {
+		for f := u.Type; f != nil; f = f.Down {
+			if f.Sym == s || (ignorecase != 0 && f.Type.Etype == TFUNC && f.Type.Thistuple > 0 && strings.EqualFold(f.Sym.Name, s.Name)) {
+				if save != nil {
+					*save = f
+				}
+				c++
+			}
+		}
+	}
+
+	u = methtype(t, 0)
+	if u != nil {
+		for f := u.Method; f != nil; f = f.Down {
+			if f.Embedded == 0 && (f.Sym == s || (ignorecase != 0 && strings.EqualFold(f.Sym.Name, s.Name))) {
+				if save != nil {
+					*save = f
+				}
+				c++
+			}
+		}
+	}
+
+	return c
+}
+
+// search depth d for field/method s --
+// return count of fields+methods
+// found at search depth.
+// answer is in dotlist array and
+// count of number of ways is returned.
+func adddot1(s *Sym, t *Type, d int, save **Type, ignorecase int) int {
+	if t.Trecur != 0 {
+		return 0
+	}
+	t.Trecur = 1
+
+	var c int
+	var u *Type
+	var a int
+	if d == 0 {
+		c = lookdot0(s, t, save, ignorecase)
+		goto out
+	}
+
+	c = 0
+	u = t
+	if Isptr[u.Etype] {
+		u = u.Type
+	}
+	if u.Etype != TSTRUCT && u.Etype != TINTER {
+		goto out
+	}
+
+	d--
+	for f := u.Type; f != nil; f = f.Down {
+		if f.Embedded == 0 {
+			continue
+		}
+		if f.Sym == nil {
+			continue
+		}
+		a = adddot1(s, f.Type, d, save, ignorecase)
+		if a != 0 && c == 0 {
+			dotlist[d].field = f
+		}
+		c += a
+	}
+
+out:
+	t.Trecur = 0
+	return c
+}
+
+// in T.field
+// find missing fields that
+// will give shortest unique addressing.
+// modify the tree with missing type names.
+func adddot(n *Node) *Node {
+	typecheck(&n.Left, Etype|Erv)
+	n.Diag |= n.Left.Diag
+	t := n.Left.Type
+	if t == nil {
+		return n
+	}
+
+	if n.Left.Op == OTYPE {
+		return n
+	}
+
+	if n.Right.Op != ONAME {
+		return n
+	}
+	s := n.Right.Sym
+	if s == nil {
+		return n
+	}
+
+	var c int
+	for d := 0; d < len(dotlist); d++ {
+		c = adddot1(s, t, d, nil, 0)
+		if c > 0 {
+			if c > 1 {
+				Yyerror("ambiguous selector %v", n)
+				n.Left = nil
+				return n
+			}
+
+			// rebuild elided dots
+			for c := d - 1; c >= 0; c-- {
+				if n.Left.Type != nil && Isptr[n.Left.Type.Etype] {
+					n.Left.Implicit = true
+				}
+				n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
+			}
+
+			return n
+		}
+	}
+
+	return n
+}
+
+/*
+ * code to help generate trampoline
+ * functions for methods on embedded
+ * subtypes.
+ * these are approx the same as
+ * the corresponding adddot routines
+ * except that they expect to be called
+ * with unique tasks and they return
+ * the actual methods.
+ */
+type Symlink struct {
+	field     *Type
+	good      uint8
+	followptr uint8
+	link      *Symlink
+}
+
+var slist *Symlink
+
+func expand0(t *Type, followptr int) {
+	u := t
+	if Isptr[u.Etype] {
+		followptr = 1
+		u = u.Type
+	}
+
+	if u.Etype == TINTER {
+		var sl *Symlink
+		for f := u.Type; f != nil; f = f.Down {
+			if f.Sym.Flags&SymUniq != 0 {
+				continue
+			}
+			f.Sym.Flags |= SymUniq
+			sl = new(Symlink)
+			sl.field = f
+			sl.link = slist
+			sl.followptr = uint8(followptr)
+			slist = sl
+		}
+
+		return
+	}
+
+	u = methtype(t, 0)
+	if u != nil {
+		var sl *Symlink
+		for f := u.Method; f != nil; f = f.Down {
+			if f.Sym.Flags&SymUniq != 0 {
+				continue
+			}
+			f.Sym.Flags |= SymUniq
+			sl = new(Symlink)
+			sl.field = f
+			sl.link = slist
+			sl.followptr = uint8(followptr)
+			slist = sl
+		}
+	}
+}
+
+func expand1(t *Type, d int, followptr int) {
+	if t.Trecur != 0 {
+		return
+	}
+	if d == 0 {
+		return
+	}
+	t.Trecur = 1
+
+	if d != len(dotlist)-1 {
+		expand0(t, followptr)
+	}
+
+	u := t
+	if Isptr[u.Etype] {
+		followptr = 1
+		u = u.Type
+	}
+
+	if u.Etype != TSTRUCT && u.Etype != TINTER {
+		goto out
+	}
+
+	for f := u.Type; f != nil; f = f.Down {
+		if f.Embedded == 0 {
+			continue
+		}
+		if f.Sym == nil {
+			continue
+		}
+		expand1(f.Type, d-1, followptr)
+	}
+
+out:
+	t.Trecur = 0
+}
+
+func expandmeth(t *Type) {
+	if t == nil || t.Xmethod != nil {
+		return
+	}
+
+	// mark top-level method symbols
+	// so that expand1 doesn't consider them.
+	var f *Type
+	for f = t.Method; f != nil; f = f.Down {
+		f.Sym.Flags |= SymUniq
+	}
+
+	// generate all reachable methods
+	slist = nil
+
+	expand1(t, len(dotlist)-1, 0)
+
+	// check each method to be uniquely reachable
+	var c int
+	var d int
+	for sl := slist; sl != nil; sl = sl.link {
+		sl.field.Sym.Flags &^= SymUniq
+		for d = 0; d < len(dotlist); d++ {
+			c = adddot1(sl.field.Sym, t, d, &f, 0)
+			if c == 0 {
+				continue
+			}
+			if c == 1 {
+				// addot1 may have dug out arbitrary fields, we only want methods.
+				if f.Type.Etype == TFUNC && f.Type.Thistuple > 0 {
+					sl.good = 1
+					sl.field = f
+				}
+			}
+
+			break
+		}
+	}
+
+	for f = t.Method; f != nil; f = f.Down {
+		f.Sym.Flags &^= SymUniq
+	}
+
+	t.Xmethod = t.Method
+	for sl := slist; sl != nil; sl = sl.link {
+		if sl.good != 0 {
+			// add it to the base type method list
+			f = typ(TFIELD)
+
+			*f = *sl.field
+			f.Embedded = 1 // needs a trampoline
+			if sl.followptr != 0 {
+				f.Embedded = 2
+			}
+			f.Down = t.Xmethod
+			t.Xmethod = f
+		}
+	}
+}
+
+/*
+ * Given funarg struct list, return list of ODCLFIELD Node fn args.
+ */
+func structargs(tl **Type, mustname int) *NodeList {
+	var savet Iter
+	var a *Node
+	var n *Node
+	var buf string
+
+	var args *NodeList
+	gen := 0
+	for t := Structfirst(&savet, tl); t != nil; t = structnext(&savet) {
+		n = nil
+		if mustname != 0 && (t.Sym == nil || t.Sym.Name == "_") {
+			// invent a name so that we can refer to it in the trampoline
+			buf = fmt.Sprintf(".anon%d", gen)
+			gen++
+
+			n = newname(Lookup(buf))
+		} else if t.Sym != nil {
+			n = newname(t.Sym)
+		}
+		a = Nod(ODCLFIELD, n, typenod(t.Type))
+		a.Isddd = t.Isddd
+		if n != nil {
+			n.Isddd = t.Isddd
+		}
+		args = list(args, a)
+	}
+
+	return args
+}
+
+/*
+ * Generate a wrapper function to convert from
+ * a receiver of type T to a receiver of type U.
+ * That is,
+ *
+ *	func (t T) M() {
+ *		...
+ *	}
+ *
+ * already exists; this function generates
+ *
+ *	func (u U) M() {
+ *		u.M()
+ *	}
+ *
+ * where the types T and U are such that u.M() is valid
+ * and calls the T.M method.
+ * The resulting function is for use in method tables.
+ *
+ *	rcvr - U
+ *	method - M func (t T)(), a TFIELD type struct
+ *	newnam - the eventual mangled name of this function
+ */
+
+var genwrapper_linehistdone int = 0
+
+func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
+	if false && Debug['r'] != 0 {
+		fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
+	}
+
+	lexlineno++
+	lineno = lexlineno
+	if genwrapper_linehistdone == 0 {
+		// All the wrappers can share the same linehist entry.
+		linehistpush("<autogenerated>")
+
+		genwrapper_linehistdone = 1
+	}
+
+	dclcontext = PEXTERN
+	markdcl()
+
+	this := Nod(ODCLFIELD, newname(Lookup(".this")), typenod(rcvr))
+	this.Left.Ntype = this.Right
+	in := structargs(getinarg(method.Type), 1)
+	out := structargs(Getoutarg(method.Type), 0)
+
+	t := Nod(OTFUNC, nil, nil)
+	l := list1(this)
+	if iface != 0 && rcvr.Width < Types[Tptr].Width {
+		// Building method for interface table and receiver
+		// is smaller than the single pointer-sized word
+		// that the interface call will pass in.
+		// Add a dummy padding argument after the
+		// receiver to make up the difference.
+		tpad := typ(TARRAY)
+
+		tpad.Type = Types[TUINT8]
+		tpad.Bound = Types[Tptr].Width - rcvr.Width
+		pad := Nod(ODCLFIELD, newname(Lookup(".pad")), typenod(tpad))
+		l = list(l, pad)
+	}
+
+	t.List = concat(l, in)
+	t.Rlist = out
+
+	fn := Nod(ODCLFUNC, nil, nil)
+	fn.Nname = newname(newnam)
+	fn.Nname.Defn = fn
+	fn.Nname.Ntype = t
+	declare(fn.Nname, PFUNC)
+	funchdr(fn)
+
+	// arg list
+	var args *NodeList
+
+	isddd := false
+	for l := in; l != nil; l = l.Next {
+		args = list(args, l.N.Left)
+		isddd = l.N.Left.Isddd
+	}
+
+	methodrcvr := getthisx(method.Type).Type.Type
+
+	// generate nil pointer check for better error
+	if Isptr[rcvr.Etype] && rcvr.Type == methodrcvr {
+		// generating wrapper from *T to T.
+		n := Nod(OIF, nil, nil)
+
+		n.Ntest = Nod(OEQ, this.Left, nodnil())
+
+		// these strings are already in the reflect tables,
+		// so no space cost to use them here.
+		var l *NodeList
+
+		var v Val
+		v.Ctype = CTSTR
+		v.U = rcvr.Type.Sym.Pkg.Name // package name
+		l = list(l, nodlit(v))
+		v.U = rcvr.Type.Sym.Name // type name
+		l = list(l, nodlit(v))
+		v.U = method.Sym.Name
+		l = list(l, nodlit(v)) // method name
+		call := Nod(OCALL, syslook("panicwrap", 0), nil)
+		call.List = l
+		n.Nbody = list1(call)
+		fn.Nbody = list(fn.Nbody, n)
+	}
+
+	dot := adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
+
+	// generate call
+	if flag_race == 0 && Isptr[rcvr.Etype] && Isptr[methodrcvr.Etype] && method.Embedded != 0 && !isifacemethod(method.Type) {
+		// generate tail call: adjust pointer receiver and jump to embedded method.
+		dot = dot.Left // skip final .M
+		if !Isptr[dotlist[0].field.Type.Etype] {
+			dot = Nod(OADDR, dot, nil)
+		}
+		as := Nod(OAS, this.Left, Nod(OCONVNOP, dot, nil))
+		as.Right.Type = rcvr
+		fn.Nbody = list(fn.Nbody, as)
+		n := Nod(ORETJMP, nil, nil)
+		n.Left = newname(methodsym(method.Sym, methodrcvr, 0))
+		fn.Nbody = list(fn.Nbody, n)
+	} else {
+		fn.Func.Wrapper = true // ignore frame for panic+recover matching
+		call := Nod(OCALL, dot, nil)
+		call.List = args
+		call.Isddd = isddd
+		if method.Type.Outtuple > 0 {
+			n := Nod(ORETURN, nil, nil)
+			n.List = list1(call)
+			call = n
+		}
+
+		fn.Nbody = list(fn.Nbody, call)
+	}
+
+	if false && Debug['r'] != 0 {
+		dumplist("genwrapper body", fn.Nbody)
+	}
+
+	funcbody(fn)
+	Curfn = fn
+
+	// wrappers where T is anonymous (struct or interface) can be duplicated.
+	if rcvr.Etype == TSTRUCT || rcvr.Etype == TINTER || Isptr[rcvr.Etype] && rcvr.Type.Etype == TSTRUCT {
+		fn.Func.Dupok = true
+	}
+	typecheck(&fn, Etop)
+	typechecklist(fn.Nbody, Etop)
+
+	// Set inl_nonlocal to whether we are calling a method on a
+	// type defined in a different package.  Checked in inlvar.
+	if !methodrcvr.Local {
+		inl_nonlocal = 1
+	}
+
+	inlcalls(fn)
+
+	inl_nonlocal = 0
+
+	Curfn = nil
+	funccompile(fn)
+}
+
+func hashmem(t *Type) *Node {
+	sym := Pkglookup("memhash", Runtimepkg)
+
+	n := newname(sym)
+	n.Class = PFUNC
+	tfn := Nod(OTFUNC, nil, nil)
+	tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+	tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+	tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+	tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+	typecheck(&tfn, Etype)
+	n.Type = tfn.Type
+	return n
+}
+
+func hashfor(t *Type) *Node {
+	var sym *Sym
+
+	a := algtype1(t, nil)
+	switch a {
+	case AMEM:
+		Fatal("hashfor with AMEM type")
+
+	case AINTER:
+		sym = Pkglookup("interhash", Runtimepkg)
+
+	case ANILINTER:
+		sym = Pkglookup("nilinterhash", Runtimepkg)
+
+	case ASTRING:
+		sym = Pkglookup("strhash", Runtimepkg)
+
+	case AFLOAT32:
+		sym = Pkglookup("f32hash", Runtimepkg)
+
+	case AFLOAT64:
+		sym = Pkglookup("f64hash", Runtimepkg)
+
+	case ACPLX64:
+		sym = Pkglookup("c64hash", Runtimepkg)
+
+	case ACPLX128:
+		sym = Pkglookup("c128hash", Runtimepkg)
+
+	default:
+		sym = typesymprefix(".hash", t)
+	}
+
+	n := newname(sym)
+	n.Class = PFUNC
+	tfn := Nod(OTFUNC, nil, nil)
+	tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+	tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+	tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+	typecheck(&tfn, Etype)
+	n.Type = tfn.Type
+	return n
+}
+
+/*
+ * Generate a helper function to compute the hash of a value of type t.
+ */
+func genhash(sym *Sym, t *Type) {
+	if Debug['r'] != 0 {
+		fmt.Printf("genhash %v %v\n", sym, t)
+	}
+
+	lineno = 1 // less confusing than end of input
+	dclcontext = PEXTERN
+	markdcl()
+
+	// func sym(p *T, h uintptr) uintptr
+	fn := Nod(ODCLFUNC, nil, nil)
+
+	fn.Nname = newname(sym)
+	fn.Nname.Class = PFUNC
+	tfn := Nod(OTFUNC, nil, nil)
+	fn.Nname.Ntype = tfn
+
+	n := Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
+	tfn.List = list(tfn.List, n)
+	np := n.Left
+	n = Nod(ODCLFIELD, newname(Lookup("h")), typenod(Types[TUINTPTR]))
+	tfn.List = list(tfn.List, n)
+	nh := n.Left
+	n = Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])) // return value
+	tfn.Rlist = list(tfn.Rlist, n)
+
+	funchdr(fn)
+	typecheck(&fn.Nname.Ntype, Etype)
+
+	// genhash is only called for types that have equality but
+	// cannot be handled by the standard algorithms,
+	// so t must be either an array or a struct.
+	switch t.Etype {
+	default:
+		Fatal("genhash %v", t)
+
+	case TARRAY:
+		if Isslice(t) {
+			Fatal("genhash %v", t)
+		}
+
+		// An array of pure memory would be handled by the
+		// standard algorithm, so the element type must not be
+		// pure memory.
+		hashel := hashfor(t.Type)
+
+		n := Nod(ORANGE, nil, Nod(OIND, np, nil))
+		ni := newname(Lookup("i"))
+		ni.Type = Types[TINT]
+		n.List = list1(ni)
+		n.Colas = true
+		colasdefn(n.List, n)
+		ni = n.List.N
+
+		// TODO: with aeshash we don't need these shift/mul parts
+
+		// h = h<<3 | h>>61
+		n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OOR, Nod(OLSH, nh, Nodintconst(3)), Nod(ORSH, nh, Nodintconst(int64(Widthptr)*8-3)))))
+
+		// h *= mul
+		// Same multipliers as in runtime.memhash.
+		var mul int64
+		if Widthptr == 4 {
+			mul = 3267000013
+		} else {
+			mul = 23344194077549503
+		}
+		n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OMUL, nh, Nodintconst(mul))))
+
+		// h = hashel(&p[i], h)
+		call := Nod(OCALL, hashel, nil)
+
+		nx := Nod(OINDEX, np, ni)
+		nx.Bounded = true
+		na := Nod(OADDR, nx, nil)
+		na.Etype = 1 // no escape to heap
+		call.List = list(call.List, na)
+		call.List = list(call.List, nh)
+		n.Nbody = list(n.Nbody, Nod(OAS, nh, call))
+
+		fn.Nbody = list(fn.Nbody, n)
+
+		// Walk the struct using memhash for runs of AMEM
+	// and calling specific hash functions for the others.
+	case TSTRUCT:
+		var first *Type
+
+		offend := int64(0)
+		var size int64
+		var call *Node
+		var nx *Node
+		var na *Node
+		var hashel *Node
+		for t1 := t.Type; ; t1 = t1.Down {
+			if t1 != nil && algtype1(t1.Type, nil) == AMEM && !isblanksym(t1.Sym) {
+				offend = t1.Width + t1.Type.Width
+				if first == nil {
+					first = t1
+				}
+
+				// If it's a memory field but it's padded, stop here.
+				if ispaddedfield(t1, t.Width) {
+					t1 = t1.Down
+				} else {
+					continue
+				}
+			}
+
+			// Run memhash for fields up to this one.
+			if first != nil {
+				size = offend - first.Width // first->width is offset
+				hashel = hashmem(first.Type)
+
+				// h = hashel(&p.first, size, h)
+				call = Nod(OCALL, hashel, nil)
+
+				nx = Nod(OXDOT, np, newname(first.Sym)) // TODO: fields from other packages?
+				na = Nod(OADDR, nx, nil)
+				na.Etype = 1 // no escape to heap
+				call.List = list(call.List, na)
+				call.List = list(call.List, nh)
+				call.List = list(call.List, Nodintconst(size))
+				fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+
+				first = nil
+			}
+
+			if t1 == nil {
+				break
+			}
+			if isblanksym(t1.Sym) {
+				continue
+			}
+
+			// Run hash for this field.
+			if algtype1(t1.Type, nil) == AMEM {
+				hashel = hashmem(t1.Type)
+
+				// h = memhash(&p.t1, h, size)
+				call = Nod(OCALL, hashel, nil)
+
+				nx = Nod(OXDOT, np, newname(t1.Sym)) // TODO: fields from other packages?
+				na = Nod(OADDR, nx, nil)
+				na.Etype = 1 // no escape to heap
+				call.List = list(call.List, na)
+				call.List = list(call.List, nh)
+				call.List = list(call.List, Nodintconst(t1.Type.Width))
+				fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+			} else {
+				hashel = hashfor(t1.Type)
+
+				// h = hashel(&p.t1, h)
+				call = Nod(OCALL, hashel, nil)
+
+				nx = Nod(OXDOT, np, newname(t1.Sym)) // TODO: fields from other packages?
+				na = Nod(OADDR, nx, nil)
+				na.Etype = 1 // no escape to heap
+				call.List = list(call.List, na)
+				call.List = list(call.List, nh)
+				fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+			}
+		}
+	}
+
+	r := Nod(ORETURN, nil, nil)
+	r.List = list(r.List, nh)
+	fn.Nbody = list(fn.Nbody, r)
+
+	if Debug['r'] != 0 {
+		dumplist("genhash body", fn.Nbody)
+	}
+
+	funcbody(fn)
+	Curfn = fn
+	fn.Func.Dupok = true
+	typecheck(&fn, Etop)
+	typechecklist(fn.Nbody, Etop)
+	Curfn = nil
+
+	// Disable safemode while compiling this code: the code we
+	// generate internally can refer to unsafe.Pointer.
+	// In this case it can happen if we need to generate an ==
+	// for a struct containing a reflect.Value, which itself has
+	// an unexported field of type unsafe.Pointer.
+	old_safemode := safemode
+
+	safemode = 0
+	funccompile(fn)
+	safemode = old_safemode
+}
+
+// Return node for
+//	if p.field != q.field { return false }
+func eqfield(p *Node, q *Node, field *Node) *Node {
+	nx := Nod(OXDOT, p, field)
+	ny := Nod(OXDOT, q, field)
+	nif := Nod(OIF, nil, nil)
+	nif.Ntest = Nod(ONE, nx, ny)
+	r := Nod(ORETURN, nil, nil)
+	r.List = list(r.List, Nodbool(false))
+	nif.Nbody = list(nif.Nbody, r)
+	return nif
+}
+
+func eqmemfunc(size int64, type_ *Type, needsize *int) *Node {
+	var fn *Node
+
+	switch size {
+	default:
+		fn = syslook("memequal", 1)
+		*needsize = 1
+
+	case 1, 2, 4, 8, 16:
+		buf := fmt.Sprintf("memequal%d", int(size)*8)
+		fn = syslook(buf, 1)
+		*needsize = 0
+	}
+
+	substArgTypes(fn, type_, type_)
+	return fn
+}
+
+// Return node for
+//	if !memequal(&p.field, &q.field [, size]) { return false }
+func eqmem(p *Node, q *Node, field *Node, size int64) *Node {
+	var needsize int
+
+	nx := Nod(OADDR, Nod(OXDOT, p, field), nil)
+	nx.Etype = 1 // does not escape
+	ny := Nod(OADDR, Nod(OXDOT, q, field), nil)
+	ny.Etype = 1 // does not escape
+	typecheck(&nx, Erv)
+	typecheck(&ny, Erv)
+
+	call := Nod(OCALL, eqmemfunc(size, nx.Type.Type, &needsize), nil)
+	call.List = list(call.List, nx)
+	call.List = list(call.List, ny)
+	if needsize != 0 {
+		call.List = list(call.List, Nodintconst(size))
+	}
+
+	nif := Nod(OIF, nil, nil)
+	nif.Ntest = Nod(ONOT, call, nil)
+	r := Nod(ORETURN, nil, nil)
+	r.List = list(r.List, Nodbool(false))
+	nif.Nbody = list(nif.Nbody, r)
+	return nif
+}
+
+/*
+ * Generate a helper function to check equality of two values of type t.
+ */
+func geneq(sym *Sym, t *Type) {
+	if Debug['r'] != 0 {
+		fmt.Printf("geneq %v %v\n", sym, t)
+	}
+
+	lineno = 1 // less confusing than end of input
+	dclcontext = PEXTERN
+	markdcl()
+
+	// func sym(p, q *T) bool
+	fn := Nod(ODCLFUNC, nil, nil)
+
+	fn.Nname = newname(sym)
+	fn.Nname.Class = PFUNC
+	tfn := Nod(OTFUNC, nil, nil)
+	fn.Nname.Ntype = tfn
+
+	n := Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
+	tfn.List = list(tfn.List, n)
+	np := n.Left
+	n = Nod(ODCLFIELD, newname(Lookup("q")), typenod(Ptrto(t)))
+	tfn.List = list(tfn.List, n)
+	nq := n.Left
+	n = Nod(ODCLFIELD, nil, typenod(Types[TBOOL]))
+	tfn.Rlist = list(tfn.Rlist, n)
+
+	funchdr(fn)
+
+	// geneq is only called for types that have equality but
+	// cannot be handled by the standard algorithms,
+	// so t must be either an array or a struct.
+	switch t.Etype {
+	default:
+		Fatal("geneq %v", t)
+
+	case TARRAY:
+		if Isslice(t) {
+			Fatal("geneq %v", t)
+		}
+
+		// An array of pure memory would be handled by the
+		// standard memequal, so the element type must not be
+		// pure memory.  Even if we unrolled the range loop,
+		// each iteration would be a function call, so don't bother
+		// unrolling.
+		nrange := Nod(ORANGE, nil, Nod(OIND, np, nil))
+
+		ni := newname(Lookup("i"))
+		ni.Type = Types[TINT]
+		nrange.List = list1(ni)
+		nrange.Colas = true
+		colasdefn(nrange.List, nrange)
+		ni = nrange.List.N
+
+		// if p[i] != q[i] { return false }
+		nx := Nod(OINDEX, np, ni)
+
+		nx.Bounded = true
+		ny := Nod(OINDEX, nq, ni)
+		ny.Bounded = true
+
+		nif := Nod(OIF, nil, nil)
+		nif.Ntest = Nod(ONE, nx, ny)
+		r := Nod(ORETURN, nil, nil)
+		r.List = list(r.List, Nodbool(false))
+		nif.Nbody = list(nif.Nbody, r)
+		nrange.Nbody = list(nrange.Nbody, nif)
+		fn.Nbody = list(fn.Nbody, nrange)
+
+		// Walk the struct using memequal for runs of AMEM
+	// and calling specific equality tests for the others.
+	// Skip blank-named fields.
+	case TSTRUCT:
+		var first *Type
+
+		offend := int64(0)
+		var size int64
+		for t1 := t.Type; ; t1 = t1.Down {
+			if t1 != nil && algtype1(t1.Type, nil) == AMEM && !isblanksym(t1.Sym) {
+				offend = t1.Width + t1.Type.Width
+				if first == nil {
+					first = t1
+				}
+
+				// If it's a memory field but it's padded, stop here.
+				if ispaddedfield(t1, t.Width) {
+					t1 = t1.Down
+				} else {
+					continue
+				}
+			}
+
+			// Run memequal for fields up to this one.
+			// TODO(rsc): All the calls to newname are wrong for
+			// cross-package unexported fields.
+			if first != nil {
+				if first.Down == t1 {
+					fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+				} else if first.Down.Down == t1 {
+					fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+					first = first.Down
+					if !isblanksym(first.Sym) {
+						fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+					}
+				} else {
+					// More than two fields: use memequal.
+					size = offend - first.Width // first->width is offset
+					fn.Nbody = list(fn.Nbody, eqmem(np, nq, newname(first.Sym), size))
+				}
+
+				first = nil
+			}
+
+			if t1 == nil {
+				break
+			}
+			if isblanksym(t1.Sym) {
+				continue
+			}
+
+			// Check this field, which is not just memory.
+			fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(t1.Sym)))
+		}
+	}
+
+	// return true
+	r := Nod(ORETURN, nil, nil)
+
+	r.List = list(r.List, Nodbool(true))
+	fn.Nbody = list(fn.Nbody, r)
+
+	if Debug['r'] != 0 {
+		dumplist("geneq body", fn.Nbody)
+	}
+
+	funcbody(fn)
+	Curfn = fn
+	fn.Func.Dupok = true
+	typecheck(&fn, Etop)
+	typechecklist(fn.Nbody, Etop)
+	Curfn = nil
+
+	// Disable safemode while compiling this code: the code we
+	// generate internally can refer to unsafe.Pointer.
+	// In this case it can happen if we need to generate an ==
+	// for a struct containing a reflect.Value, which itself has
+	// an unexported field of type unsafe.Pointer.
+	old_safemode := safemode
+
+	safemode = 0
+	funccompile(fn)
+	safemode = old_safemode
+}
+
+func ifacelookdot(s *Sym, t *Type, followptr *int, ignorecase int) *Type {
+	*followptr = 0
+
+	if t == nil {
+		return nil
+	}
+
+	var m *Type
+	var i int
+	var c int
+	for d := 0; d < len(dotlist); d++ {
+		c = adddot1(s, t, d, &m, ignorecase)
+		if c > 1 {
+			Yyerror("%v.%v is ambiguous", t, s)
+			return nil
+		}
+
+		if c == 1 {
+			for i = 0; i < d; i++ {
+				if Isptr[dotlist[i].field.Type.Etype] {
+					*followptr = 1
+					break
+				}
+			}
+
+			if m.Type.Etype != TFUNC || m.Type.Thistuple == 0 {
+				Yyerror("%v.%v is a field, not a method", t, s)
+				return nil
+			}
+
+			return m
+		}
+	}
+
+	return nil
+}
+
+func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool {
+	t0 := t
+	if t == nil {
+		return false
+	}
+
+	// if this is too slow,
+	// could sort these first
+	// and then do one loop.
+
+	if t.Etype == TINTER {
+		var tm *Type
+		for im := iface.Type; im != nil; im = im.Down {
+			for tm = t.Type; tm != nil; tm = tm.Down {
+				if tm.Sym == im.Sym {
+					if Eqtype(tm.Type, im.Type) {
+						goto found
+					}
+					*m = im
+					*samename = tm
+					*ptr = 0
+					return false
+				}
+			}
+
+			*m = im
+			*samename = nil
+			*ptr = 0
+			return false
+		found:
+		}
+
+		return true
+	}
+
+	t = methtype(t, 0)
+	if t != nil {
+		expandmeth(t)
+	}
+	var tm *Type
+	var imtype *Type
+	var followptr int
+	var rcvr *Type
+	for im := iface.Type; im != nil; im = im.Down {
+		imtype = methodfunc(im.Type, nil)
+		tm = ifacelookdot(im.Sym, t, &followptr, 0)
+		if tm == nil || tm.Nointerface || !Eqtype(methodfunc(tm.Type, nil), imtype) {
+			if tm == nil {
+				tm = ifacelookdot(im.Sym, t, &followptr, 1)
+			}
+			*m = im
+			*samename = tm
+			*ptr = 0
+			return false
+		}
+
+		// if pointer receiver in method,
+		// the method does not exist for value types.
+		rcvr = getthisx(tm.Type).Type.Type
+
+		if Isptr[rcvr.Etype] && !Isptr[t0.Etype] && followptr == 0 && !isifacemethod(tm.Type) {
+			if false && Debug['r'] != 0 {
+				Yyerror("interface pointer mismatch")
+			}
+
+			*m = im
+			*samename = nil
+			*ptr = 1
+			return false
+		}
+	}
+
+	return true
+}
+
+/*
+ * even simpler simtype; get rid of ptr, bool.
+ * assuming that the front end has rejected
+ * all the invalid conversions (like ptr -> bool)
+ */
+func Simsimtype(t *Type) int {
+	if t == nil {
+		return 0
+	}
+
+	et := int(Simtype[t.Etype])
+	switch et {
+	case TPTR32:
+		et = TUINT32
+
+	case TPTR64:
+		et = TUINT64
+
+	case TBOOL:
+		et = TUINT8
+	}
+
+	return et
+}
+
+func listtreecopy(l *NodeList) *NodeList {
+	var out *NodeList
+	for ; l != nil; l = l.Next {
+		out = list(out, treecopy(l.N))
+	}
+	return out
+}
+
+func liststmt(l *NodeList) *Node {
+	n := Nod(OBLOCK, nil, nil)
+	n.List = l
+	if l != nil {
+		n.Lineno = l.N.Lineno
+	}
+	return n
+}
+
+/*
+ * return nelem of list
+ */
+func structcount(t *Type) int {
+	var s Iter
+
+	v := 0
+	for t = Structfirst(&s, &t); t != nil; t = structnext(&s) {
+		v++
+	}
+	return v
+}
+
+/*
+ * return power of 2 of the constant
+ * operand. -1 if it is not a power of 2.
+ * 1000+ if it is a -(power of 2)
+ */
+func powtwo(n *Node) int {
+	if n == nil || n.Op != OLITERAL || n.Type == nil {
+		return -1
+	}
+	if !Isint[n.Type.Etype] {
+		return -1
+	}
+
+	v := uint64(Mpgetfix(n.Val.U.(*Mpint)))
+	b := uint64(1)
+	for i := 0; i < 64; i++ {
+		if b == v {
+			return i
+		}
+		b = b << 1
+	}
+
+	if !Issigned[n.Type.Etype] {
+		return -1
+	}
+
+	v = -v
+	b = 1
+	for i := 0; i < 64; i++ {
+		if b == v {
+			return i + 1000
+		}
+		b = b << 1
+	}
+
+	return -1
+}
+
+/*
+ * return the unsigned type for
+ * a signed integer type.
+ * returns T if input is not a
+ * signed integer type.
+ */
+func tounsigned(t *Type) *Type {
+	// this is types[et+1], but not sure
+	// that this relation is immutable
+	switch t.Etype {
+	default:
+		fmt.Printf("tounsigned: unknown type %v\n", t)
+		t = nil
+
+	case TINT:
+		t = Types[TUINT]
+
+	case TINT8:
+		t = Types[TUINT8]
+
+	case TINT16:
+		t = Types[TUINT16]
+
+	case TINT32:
+		t = Types[TUINT32]
+
+	case TINT64:
+		t = Types[TUINT64]
+	}
+
+	return t
+}
+
+/*
+ * magic number for signed division
+ * see hacker's delight chapter 10
+ */
+func Smagic(m *Magic) {
+	var mask uint64
+
+	m.Bad = 0
+	switch m.W {
+	default:
+		m.Bad = 1
+		return
+
+	case 8:
+		mask = 0xff
+
+	case 16:
+		mask = 0xffff
+
+	case 32:
+		mask = 0xffffffff
+
+	case 64:
+		mask = 0xffffffffffffffff
+	}
+
+	two31 := mask ^ (mask >> 1)
+
+	p := m.W - 1
+	ad := uint64(m.Sd)
+	if m.Sd < 0 {
+		ad = -uint64(m.Sd)
+	}
+
+	// bad denominators
+	if ad == 0 || ad == 1 || ad == two31 {
+		m.Bad = 1
+		return
+	}
+
+	t := two31
+	ad &= mask
+
+	anc := t - 1 - t%ad
+	anc &= mask
+
+	q1 := two31 / anc
+	r1 := two31 - q1*anc
+	q1 &= mask
+	r1 &= mask
+
+	q2 := two31 / ad
+	r2 := two31 - q2*ad
+	q2 &= mask
+	r2 &= mask
+
+	var delta uint64
+	for {
+		p++
+		q1 <<= 1
+		r1 <<= 1
+		q1 &= mask
+		r1 &= mask
+		if r1 >= anc {
+			q1++
+			r1 -= anc
+			q1 &= mask
+			r1 &= mask
+		}
+
+		q2 <<= 1
+		r2 <<= 1
+		q2 &= mask
+		r2 &= mask
+		if r2 >= ad {
+			q2++
+			r2 -= ad
+			q2 &= mask
+			r2 &= mask
+		}
+
+		delta = ad - r2
+		delta &= mask
+		if q1 < delta || (q1 == delta && r1 == 0) {
+			continue
+		}
+
+		break
+	}
+
+	m.Sm = int64(q2 + 1)
+	if uint64(m.Sm)&two31 != 0 {
+		m.Sm |= ^int64(mask)
+	}
+	m.S = p - m.W
+}
+
+/*
+ * magic number for unsigned division
+ * see hacker's delight chapter 10
+ */
+func Umagic(m *Magic) {
+	var mask uint64
+
+	m.Bad = 0
+	m.Ua = 0
+
+	switch m.W {
+	default:
+		m.Bad = 1
+		return
+
+	case 8:
+		mask = 0xff
+
+	case 16:
+		mask = 0xffff
+
+	case 32:
+		mask = 0xffffffff
+
+	case 64:
+		mask = 0xffffffffffffffff
+	}
+
+	two31 := mask ^ (mask >> 1)
+
+	m.Ud &= mask
+	if m.Ud == 0 || m.Ud == two31 {
+		m.Bad = 1
+		return
+	}
+
+	nc := mask - (-m.Ud&mask)%m.Ud
+	p := m.W - 1
+
+	q1 := two31 / nc
+	r1 := two31 - q1*nc
+	q1 &= mask
+	r1 &= mask
+
+	q2 := (two31 - 1) / m.Ud
+	r2 := (two31 - 1) - q2*m.Ud
+	q2 &= mask
+	r2 &= mask
+
+	var delta uint64
+	for {
+		p++
+		if r1 >= nc-r1 {
+			q1 <<= 1
+			q1++
+			r1 <<= 1
+			r1 -= nc
+		} else {
+			q1 <<= 1
+			r1 <<= 1
+		}
+
+		q1 &= mask
+		r1 &= mask
+		if r2+1 >= m.Ud-r2 {
+			if q2 >= two31-1 {
+				m.Ua = 1
+			}
+
+			q2 <<= 1
+			q2++
+			r2 <<= 1
+			r2++
+			r2 -= m.Ud
+		} else {
+			if q2 >= two31 {
+				m.Ua = 1
+			}
+
+			q2 <<= 1
+			r2 <<= 1
+			r2++
+		}
+
+		q2 &= mask
+		r2 &= mask
+
+		delta = m.Ud - 1 - r2
+		delta &= mask
+
+		if p < m.W+m.W {
+			if q1 < delta || (q1 == delta && r1 == 0) {
+				continue
+			}
+		}
+
+		break
+	}
+
+	m.Um = q2 + 1
+	m.S = p - m.W
+}
+
+func ngotype(n *Node) *Sym {
+	if n.Type != nil {
+		return typenamesym(n.Type)
+	}
+	return nil
+}
+
+/*
+ * Convert raw string to the prefix that will be used in the symbol
+ * table.  All control characters, space, '%' and '"', as well as
+ * non-7-bit clean bytes turn into %xx.  The period needs escaping
+ * only in the last segment of the path, and it makes for happier
+ * users if we escape that as little as possible.
+ *
+ * If you edit this, edit ../ld/lib.c:/^pathtoprefix too.
+ * If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
+ */
+func pathtoprefix(s string) string {
+	slash := strings.LastIndex(s, "/")
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+			var buf bytes.Buffer
+			for i := 0; i < len(s); i++ {
+				c := s[i]
+				if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+					fmt.Fprintf(&buf, "%%%02x", c)
+					continue
+				}
+				buf.WriteByte(c)
+			}
+			return buf.String()
+		}
+	}
+	return s
+}
+
+var pkgMap = make(map[string]*Pkg)
+var pkgs []*Pkg
+
+func mkpkg(path string) *Pkg {
+	if p := pkgMap[path]; p != nil {
+		return p
+	}
+
+	p := new(Pkg)
+	p.Path = path
+	p.Prefix = pathtoprefix(path)
+	p.Syms = make(map[string]*Sym)
+	pkgMap[path] = p
+	pkgs = append(pkgs, p)
+	return p
+}
+
+func addinit(np **Node, init *NodeList) {
+	if init == nil {
+		return
+	}
+
+	n := *np
+	switch n.Op {
+	// There may be multiple refs to this node;
+	// introduce OCONVNOP to hold init list.
+	case ONAME, OLITERAL:
+		n = Nod(OCONVNOP, n, nil)
+
+		n.Type = n.Left.Type
+		n.Typecheck = 1
+		*np = n
+	}
+
+	n.Ninit = concat(init, n.Ninit)
+	n.Ullman = UINF
+}
+
+var reservedimports = []string{
+	"go",
+	"type",
+}
+
+func isbadimport(path string) bool {
+	if strings.Contains(path, "\x00") {
+		Yyerror("import path contains NUL")
+		return true
+	}
+
+	for i := 0; i < len(reservedimports); i++ {
+		if path == reservedimports[i] {
+			Yyerror("import path %q is reserved and cannot be used", path)
+			return true
+		}
+	}
+
+	var s string
+	_ = s
+	var r uint
+	_ = r
+	for _, r := range path {
+		if r == utf8.RuneError {
+			Yyerror("import path contains invalid UTF-8 sequence: %q", path)
+			return true
+		}
+
+		if r < 0x20 || r == 0x7f {
+			Yyerror("import path contains control character: %q", path)
+			return true
+		}
+
+		if r == '\\' {
+			Yyerror("import path contains backslash; use slash: %q", path)
+			return true
+		}
+
+		if unicode.IsSpace(rune(r)) {
+			Yyerror("import path contains space character: %q", path)
+			return true
+		}
+
+		if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
+			Yyerror("import path contains invalid character '%c': %q", r, path)
+			return true
+		}
+	}
+
+	return false
+}
+
+func checknil(x *Node, init **NodeList) {
+	if Isinter(x.Type) {
+		x = Nod(OITAB, x, nil)
+		typecheck(&x, Erv)
+	}
+
+	n := Nod(OCHECKNIL, x, nil)
+	n.Typecheck = 1
+	*init = list(*init, n)
+}
+
+/*
+ * Can this type be stored directly in an interface word?
+ * Yes, if the representation is a single pointer.
+ */
+func isdirectiface(t *Type) bool {
+	switch t.Etype {
+	case TPTR32,
+		TPTR64,
+		TCHAN,
+		TMAP,
+		TFUNC,
+		TUNSAFEPTR:
+		return true
+
+		// Array of 1 direct iface type can be direct.
+	case TARRAY:
+		return t.Bound == 1 && isdirectiface(t.Type)
+
+		// Struct with 1 field of direct iface type can be direct.
+	case TSTRUCT:
+		return t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type)
+	}
+
+	return false
+}
+
+// type2IET returns "T" if t is a concrete type,
+// "I" if t is an interface type, and "E" if t is an empty interface type.
+// It is used to build calls to the conv* and assert* runtime routines.
+func type2IET(t *Type) string {
+	if isnilinter(t) {
+		return "E"
+	}
+	if Isinter(t) {
+		return "I"
+	}
+	return "T"
+}
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
new file mode 100644
index 0000000..e8f15a5
--- /dev/null
+++ b/src/cmd/compile/internal/gc/swt.go
@@ -0,0 +1,840 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+	"sort"
+	"strconv"
+)
+
+const (
+	// expression switch
+	switchKindExpr  = iota // switch a {...} or switch 5 {...}
+	switchKindTrue         // switch true {...} or switch {...}
+	switchKindFalse        // switch false {...}
+
+	// type switch
+	switchKindType // switch a.(type) {...}
+)
+
+const (
+	caseKindDefault = iota // default:
+
+	// expression switch
+	caseKindExprConst // case 5:
+	caseKindExprVar   // case x:
+
+	// type switch
+	caseKindTypeNil   // case nil:
+	caseKindTypeConst // case time.Time: (concrete type, has type hash)
+	caseKindTypeVar   // case io.Reader: (interface type)
+)
+
+const binarySearchMin = 4 // minimum number of cases for binary search
+
+// An exprSwitch walks an expression switch.
+type exprSwitch struct {
+	exprname *Node // node for the expression being switched on
+	kind     int   // kind of switch statement (switchKind*)
+}
+
+// A typeSwitch walks a type switch.
+type typeSwitch struct {
+	hashname *Node // node for the hash of the type of the variable being switched on
+	facename *Node // node for the concrete type of the variable being switched on
+	okname   *Node // boolean node used for comma-ok type assertions
+}
+
+// A caseClause is a single case clause in a switch statement.
+type caseClause struct {
+	node    *Node  // points at case statement
+	ordinal int    // position in switch
+	hash    uint32 // hash of a type switch
+	typ     uint8  // type of case
+}
+
+// typecheckswitch typechecks a switch statement.
+func typecheckswitch(n *Node) {
+	lno := int(lineno)
+	typechecklist(n.Ninit, Etop)
+
+	var nilonly string
+	var top int
+	var t *Type
+
+	if n.Ntest != nil && n.Ntest.Op == OTYPESW {
+		// type switch
+		top = Etype
+		typecheck(&n.Ntest.Right, Erv)
+		t = n.Ntest.Right.Type
+		if t != nil && t.Etype != TINTER {
+			Yyerror("cannot type switch on non-interface value %v", Nconv(n.Ntest.Right, obj.FmtLong))
+		}
+	} else {
+		// expression switch
+		top = Erv
+		if n.Ntest != nil {
+			typecheck(&n.Ntest, Erv)
+			defaultlit(&n.Ntest, nil)
+			t = n.Ntest.Type
+		} else {
+			t = Types[TBOOL]
+		}
+		if t != nil {
+			var badtype *Type
+			switch {
+			case !okforeq[t.Etype]:
+				Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
+			case t.Etype == TARRAY && !Isfixedarray(t):
+				nilonly = "slice"
+			case t.Etype == TARRAY && Isfixedarray(t) && algtype1(t, nil) == ANOEQ:
+				Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
+			case t.Etype == TSTRUCT && algtype1(t, &badtype) == ANOEQ:
+				Yyerror("cannot switch on %v (struct containing %v cannot be compared)", Nconv(n.Ntest, obj.FmtLong), badtype)
+			case t.Etype == TFUNC:
+				nilonly = "func"
+			case t.Etype == TMAP:
+				nilonly = "map"
+			}
+		}
+	}
+
+	n.Type = t
+
+	var def *Node
+	var ll *NodeList
+	for l := n.List; l != nil; l = l.Next {
+		ncase := l.N
+		setlineno(n)
+		if ncase.List == nil {
+			// default
+			if def != nil {
+				Yyerror("multiple defaults in switch (first at %v)", def.Line())
+			} else {
+				def = ncase
+			}
+		} else {
+			for ll = ncase.List; ll != nil; ll = ll.Next {
+				setlineno(ll.N)
+				typecheck(&ll.N, Erv|Etype)
+				if ll.N.Type == nil || t == nil {
+					continue
+				}
+				setlineno(ncase)
+				switch top {
+				// expression switch
+				case Erv:
+					defaultlit(&ll.N, t)
+					switch {
+					case ll.N.Op == OTYPE:
+						Yyerror("type %v is not an expression", ll.N.Type)
+					case ll.N.Type != nil && assignop(ll.N.Type, t, nil) == 0 && assignop(t, ll.N.Type, nil) == 0:
+						if n.Ntest != nil {
+							Yyerror("invalid case %v in switch on %v (mismatched types %v and %v)", ll.N, n.Ntest, ll.N.Type, t)
+						} else {
+							Yyerror("invalid case %v in switch (mismatched types %v and bool)", ll.N, ll.N.Type)
+						}
+					case nilonly != "" && !Isconst(ll.N, CTNIL):
+						Yyerror("invalid case %v in switch (can only compare %s %v to nil)", ll.N, nilonly, n.Ntest)
+					}
+
+				// type switch
+				case Etype:
+					var missing, have *Type
+					var ptr int
+					switch {
+					case ll.N.Op == OLITERAL && Istype(ll.N.Type, TNIL):
+					case ll.N.Op != OTYPE && ll.N.Type != nil: // should this be ||?
+						Yyerror("%v is not a type", Nconv(ll.N, obj.FmtLong))
+						// reset to original type
+						ll.N = n.Ntest.Right
+					case ll.N.Type.Etype != TINTER && t.Etype == TINTER && !implements(ll.N.Type, t, &missing, &have, &ptr):
+						if have != nil && missing.Broke == 0 && have.Broke == 0 {
+							Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (wrong type for %v method)\n\thave %v%v\n\twant %v%v", Nconv(n.Ntest.Right, obj.FmtLong), ll.N.Type, missing.Sym, have.Sym, Tconv(have.Type, obj.FmtShort), missing.Sym, Tconv(missing.Type, obj.FmtShort))
+						} else if missing.Broke == 0 {
+							Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (missing %v method)", Nconv(n.Ntest.Right, obj.FmtLong), ll.N.Type, missing.Sym)
+						}
+					}
+				}
+			}
+		}
+
+		if top == Etype && n.Type != nil {
+			ll = ncase.List
+			nvar := ncase.Nname
+			if nvar != nil {
+				if ll != nil && ll.Next == nil && ll.N.Type != nil && !Istype(ll.N.Type, TNIL) {
+					// single entry type switch
+					nvar.Ntype = typenod(ll.N.Type)
+				} else {
+					// multiple entry type switch or default
+					nvar.Ntype = typenod(n.Type)
+				}
+
+				typecheck(&nvar, Erv|Easgn)
+				ncase.Nname = nvar
+			}
+		}
+
+		typechecklist(ncase.Nbody, Etop)
+	}
+
+	lineno = int32(lno)
+}
+
+// walkswitch walks a switch statement.
+func walkswitch(sw *Node) {
+	// convert switch {...} to switch true {...}
+	if sw.Ntest == nil {
+		sw.Ntest = Nodbool(true)
+		typecheck(&sw.Ntest, Erv)
+	}
+
+	if sw.Ntest.Op == OTYPESW {
+		var s typeSwitch
+		s.walk(sw)
+	} else {
+		var s exprSwitch
+		s.walk(sw)
+	}
+
+	// Discard old AST elements. They can confuse racewalk.
+	sw.Ntest = nil
+	sw.List = nil
+}
+
+// walk generates an AST implementing sw.
+// sw is an expression switch.
+// The AST is generally of the form of a linear
+// search using if..goto, although binary search
+// is used with long runs of constants.
+func (s *exprSwitch) walk(sw *Node) {
+	casebody(sw, nil)
+
+	s.kind = switchKindExpr
+	if Isconst(sw.Ntest, CTBOOL) {
+		s.kind = switchKindTrue
+		if !sw.Ntest.Val.U.(bool) {
+			s.kind = switchKindFalse
+		}
+	}
+
+	walkexpr(&sw.Ntest, &sw.Ninit)
+	t := sw.Type
+	if t == nil {
+		return
+	}
+
+	// convert the switch into OIF statements
+	var cas *NodeList
+	if s.kind == switchKindTrue || s.kind == switchKindFalse {
+		s.exprname = Nodbool(s.kind == switchKindTrue)
+	} else if consttype(sw.Ntest) >= 0 {
+		// leave constants to enable dead code elimination (issue 9608)
+		s.exprname = sw.Ntest
+	} else {
+		s.exprname = temp(sw.Ntest.Type)
+		cas = list1(Nod(OAS, s.exprname, sw.Ntest))
+		typechecklist(cas, Etop)
+	}
+
+	// enumerate the cases, and lop off the default case
+	cc := caseClauses(sw, s.kind)
+	var def *Node
+	if len(cc) > 0 && cc[0].typ == caseKindDefault {
+		def = cc[0].node.Right
+		cc = cc[1:]
+	} else {
+		def = Nod(OBREAK, nil, nil)
+	}
+
+	// handle the cases in order
+	for len(cc) > 0 {
+		// deal with expressions one at a time
+		if !okforcmp[t.Etype] || cc[0].typ != caseKindExprConst {
+			a := s.walkCases(cc[:1])
+			cas = list(cas, a)
+			cc = cc[1:]
+			continue
+		}
+
+		// do binary search on runs of constants
+		var run int
+		for run = 1; run < len(cc) && cc[run].typ == caseKindExprConst; run++ {
+		}
+
+		// sort and compile constants
+		sort.Sort(caseClauseByExpr(cc[:run]))
+		a := s.walkCases(cc[:run])
+		cas = list(cas, a)
+		cc = cc[run:]
+	}
+
+	// handle default case
+	if nerrors == 0 {
+		cas = list(cas, def)
+		sw.Nbody = concat(cas, sw.Nbody)
+		sw.List = nil
+		walkstmtlist(sw.Nbody)
+	}
+}
+
+// walkCases generates an AST implementing the cases in cc.
+func (s *exprSwitch) walkCases(cc []*caseClause) *Node {
+	if len(cc) < binarySearchMin {
+		// linear search
+		var cas *NodeList
+		for _, c := range cc {
+			n := c.node
+			lno := int(setlineno(n))
+
+			a := Nod(OIF, nil, nil)
+			if (s.kind != switchKindTrue && s.kind != switchKindFalse) || assignop(n.Left.Type, s.exprname.Type, nil) == OCONVIFACE || assignop(s.exprname.Type, n.Left.Type, nil) == OCONVIFACE {
+				a.Ntest = Nod(OEQ, s.exprname, n.Left) // if name == val
+				typecheck(&a.Ntest, Erv)
+			} else if s.kind == switchKindTrue {
+				a.Ntest = n.Left // if val
+			} else {
+				// s.kind == switchKindFalse
+				a.Ntest = Nod(ONOT, n.Left, nil) // if !val
+				typecheck(&a.Ntest, Erv)
+			}
+			a.Nbody = list1(n.Right) // goto l
+
+			cas = list(cas, a)
+			lineno = int32(lno)
+		}
+		return liststmt(cas)
+	}
+
+	// find the middle and recur
+	half := len(cc) / 2
+	a := Nod(OIF, nil, nil)
+	mid := cc[half-1].node.Left
+	le := Nod(OLE, s.exprname, mid)
+	if Isconst(mid, CTSTR) {
+		// Search by length and then by value; see exprcmp.
+		lenlt := Nod(OLT, Nod(OLEN, s.exprname, nil), Nod(OLEN, mid, nil))
+		leneq := Nod(OEQ, Nod(OLEN, s.exprname, nil), Nod(OLEN, mid, nil))
+		a.Ntest = Nod(OOROR, lenlt, Nod(OANDAND, leneq, le))
+	} else {
+		a.Ntest = le
+	}
+	typecheck(&a.Ntest, Erv)
+	a.Nbody = list1(s.walkCases(cc[:half]))
+	a.Nelse = list1(s.walkCases(cc[half:]))
+	return a
+}
+
+// casebody builds separate lists of statements and cases.
+// It makes labels between cases and statements
+// and deals with fallthrough, break, and unreachable statements.
+func casebody(sw *Node, typeswvar *Node) {
+	if sw.List == nil {
+		return
+	}
+
+	lno := setlineno(sw)
+
+	var cas *NodeList  // cases
+	var stat *NodeList // statements
+	var def *Node      // defaults
+	br := Nod(OBREAK, nil, nil)
+
+	for l := sw.List; l != nil; l = l.Next {
+		n := l.N
+		setlineno(n)
+		if n.Op != OXCASE {
+			Fatal("casebody %v", Oconv(int(n.Op), 0))
+		}
+		n.Op = OCASE
+		needvar := count(n.List) != 1 || n.List.N.Op == OLITERAL
+
+		jmp := Nod(OGOTO, newCaseLabel(), nil)
+		if n.List == nil {
+			if def != nil {
+				Yyerror("more than one default case")
+			}
+			// reuse original default case
+			n.Right = jmp
+			def = n
+		}
+
+		if n.List != nil && n.List.Next == nil {
+			// one case -- reuse OCASE node
+			n.Left = n.List.N
+			n.Right = jmp
+			n.List = nil
+			cas = list(cas, n)
+		} else {
+			// expand multi-valued cases
+			for lc := n.List; lc != nil; lc = lc.Next {
+				cas = list(cas, Nod(OCASE, lc.N, jmp))
+			}
+		}
+
+		stat = list(stat, Nod(OLABEL, jmp.Left, nil))
+		if typeswvar != nil && needvar && n.Nname != nil {
+			l := list1(Nod(ODCL, n.Nname, nil))
+			l = list(l, Nod(OAS, n.Nname, typeswvar))
+			typechecklist(l, Etop)
+			stat = concat(stat, l)
+		}
+		stat = concat(stat, n.Nbody)
+
+		// botch - shouldn't fall thru declaration
+		last := stat.End.N
+		if last.Xoffset == n.Xoffset && last.Op == OXFALL {
+			if typeswvar != nil {
+				setlineno(last)
+				Yyerror("cannot fallthrough in type switch")
+			}
+
+			if l.Next == nil {
+				setlineno(last)
+				Yyerror("cannot fallthrough final case in switch")
+			}
+
+			last.Op = OFALL
+		} else {
+			stat = list(stat, br)
+		}
+	}
+
+	stat = list(stat, br)
+	if def != nil {
+		cas = list(cas, def)
+	}
+
+	sw.List = cas
+	sw.Nbody = stat
+	lineno = lno
+}
+
+// nSwitchLabel is the number of switch labels generated.
+// This should be per-function, but it is a global counter for now.
+var nSwitchLabel int
+
+func newCaseLabel() *Node {
+	label := strconv.Itoa(nSwitchLabel)
+	nSwitchLabel++
+	return newname(Lookup(label))
+}
+
+// caseClauses generates a slice of caseClauses
+// corresponding to the clauses in the switch statement sw.
+// Kind is the kind of switch statement.
+func caseClauses(sw *Node, kind int) []*caseClause {
+	var cc []*caseClause
+	for l := sw.List; l != nil; l = l.Next {
+		n := l.N
+		c := new(caseClause)
+		cc = append(cc, c)
+		c.ordinal = len(cc)
+		c.node = n
+
+		if n.Left == nil {
+			c.typ = caseKindDefault
+			continue
+		}
+
+		if kind == switchKindType {
+			// type switch
+			switch {
+			case n.Left.Op == OLITERAL:
+				c.typ = caseKindTypeNil
+			case Istype(n.Left.Type, TINTER):
+				c.typ = caseKindTypeVar
+			default:
+				c.typ = caseKindTypeConst
+				c.hash = typehash(n.Left.Type)
+			}
+		} else {
+			// expression switch
+			switch consttype(n.Left) {
+			case CTFLT, CTINT, CTRUNE, CTSTR:
+				c.typ = caseKindExprConst
+			default:
+				c.typ = caseKindExprVar
+			}
+		}
+	}
+
+	if cc == nil {
+		return nil
+	}
+
+	// sort by value and diagnose duplicate cases
+	if kind == switchKindType {
+		// type switch
+		sort.Sort(caseClauseByType(cc))
+		for i, c1 := range cc {
+			if c1.typ == caseKindTypeNil || c1.typ == caseKindDefault {
+				break
+			}
+			for _, c2 := range cc[i+1:] {
+				if c2.typ == caseKindTypeNil || c2.typ == caseKindDefault || c1.hash != c2.hash {
+					break
+				}
+				if Eqtype(c1.node.Left.Type, c2.node.Left.Type) {
+					yyerrorl(int(c2.node.Lineno), "duplicate case %v in type switch\n\tprevious case at %v", c2.node.Left.Type, c1.node.Line())
+				}
+			}
+		}
+	} else {
+		// expression switch
+		sort.Sort(caseClauseByExpr(cc))
+		for i, c1 := range cc {
+			if i+1 == len(cc) {
+				break
+			}
+			c2 := cc[i+1]
+			if exprcmp(c1, c2) != 0 {
+				continue
+			}
+			setlineno(c2.node)
+			Yyerror("duplicate case %v in switch\n\tprevious case at %v", c1.node.Left, c1.node.Line())
+		}
+	}
+
+	// put list back in processing order
+	sort.Sort(caseClauseByOrd(cc))
+	return cc
+}
+
+// walk generates an AST that implements sw,
+// where sw is a type switch.
+// The AST is generally of the form of a linear
+// search using if..goto, although binary search
+// is used with long runs of concrete types.
+func (s *typeSwitch) walk(sw *Node) {
+	if sw.Ntest == nil {
+		return
+	}
+	if sw.Ntest.Right == nil {
+		setlineno(sw)
+		Yyerror("type switch must have an assignment")
+		return
+	}
+
+	walkexpr(&sw.Ntest.Right, &sw.Ninit)
+	if !Istype(sw.Ntest.Right.Type, TINTER) {
+		Yyerror("type switch must be on an interface")
+		return
+	}
+
+	var cas *NodeList
+
+	// predeclare temporary variables and the boolean var
+	s.facename = temp(sw.Ntest.Right.Type)
+
+	a := Nod(OAS, s.facename, sw.Ntest.Right)
+	typecheck(&a, Etop)
+	cas = list(cas, a)
+
+	s.okname = temp(Types[TBOOL])
+	typecheck(&s.okname, Erv)
+
+	s.hashname = temp(Types[TUINT32])
+	typecheck(&s.hashname, Erv)
+
+	// set up labels and jumps
+	casebody(sw, s.facename)
+
+	// calculate type hash
+	t := sw.Ntest.Right.Type
+	if isnilinter(t) {
+		a = syslook("efacethash", 1)
+	} else {
+		a = syslook("ifacethash", 1)
+	}
+	substArgTypes(a, t)
+	a = Nod(OCALL, a, nil)
+	a.List = list1(s.facename)
+	a = Nod(OAS, s.hashname, a)
+	typecheck(&a, Etop)
+	cas = list(cas, a)
+
+	cc := caseClauses(sw, switchKindType)
+	var def *Node
+	if len(cc) > 0 && cc[0].typ == caseKindDefault {
+		def = cc[0].node.Right
+		cc = cc[1:]
+	} else {
+		def = Nod(OBREAK, nil, nil)
+	}
+
+	// insert type equality check into each case block
+	for _, c := range cc {
+		n := c.node
+		switch c.typ {
+		case caseKindTypeNil:
+			var v Val
+			v.Ctype = CTNIL
+			a = Nod(OIF, nil, nil)
+			a.Ntest = Nod(OEQ, s.facename, nodlit(v))
+			typecheck(&a.Ntest, Erv)
+			a.Nbody = list1(n.Right) // if i==nil { goto l }
+			n.Right = a
+
+		case caseKindTypeVar, caseKindTypeConst:
+			n.Right = s.typeone(n)
+		}
+	}
+
+	// generate list of if statements, binary search for constant sequences
+	for len(cc) > 0 {
+		if cc[0].typ != caseKindTypeConst {
+			n := cc[0].node
+			cas = list(cas, n.Right)
+			cc = cc[1:]
+			continue
+		}
+
+		// identify run of constants
+		var run int
+		for run = 1; run < len(cc) && cc[run].typ == caseKindTypeConst; run++ {
+		}
+
+		// sort by hash
+		sort.Sort(caseClauseByType(cc[:run]))
+
+		// for debugging: linear search
+		if false {
+			for i := 0; i < run; i++ {
+				n := cc[i].node
+				cas = list(cas, n.Right)
+			}
+			continue
+		}
+
+		// combine adjacent cases with the same hash
+		ncase := 0
+		for i := 0; i < run; i++ {
+			ncase++
+			hash := list1(cc[i].node.Right)
+			for j := i + 1; j < run && cc[i].hash == cc[j].hash; j++ {
+				hash = list(hash, cc[j].node.Right)
+			}
+			cc[i].node.Right = liststmt(hash)
+		}
+
+		// binary search among cases to narrow by hash
+		cas = list(cas, s.walkCases(cc[:ncase]))
+		cc = cc[ncase:]
+	}
+
+	// handle default case
+	if nerrors == 0 {
+		cas = list(cas, def)
+		sw.Nbody = concat(cas, sw.Nbody)
+		sw.List = nil
+		walkstmtlist(sw.Nbody)
+	}
+}
+
+// typeone generates an AST that jumps to the
+// case body if the variable is of type t.
+func (s *typeSwitch) typeone(t *Node) *Node {
+	name := t.Nname
+	var init *NodeList
+	if name == nil {
+		typecheck(&nblank, Erv|Easgn)
+		name = nblank
+	} else {
+		init = list1(Nod(ODCL, name, nil))
+	}
+
+	a := Nod(OAS2, nil, nil)
+	a.List = list(list1(name), s.okname) // name, ok =
+	b := Nod(ODOTTYPE, s.facename, nil)
+	b.Type = t.Left.Type // interface.(type)
+	a.Rlist = list1(b)
+	typecheck(&a, Etop)
+	init = list(init, a)
+
+	c := Nod(OIF, nil, nil)
+	c.Ntest = s.okname
+	c.Nbody = list1(t.Right) // if ok { goto l }
+
+	return liststmt(list(init, c))
+}
+
+// walkCases generates an AST implementing the cases in cc.
+func (s *typeSwitch) walkCases(cc []*caseClause) *Node {
+	if len(cc) < binarySearchMin {
+		var cas *NodeList
+		for _, c := range cc {
+			n := c.node
+			if c.typ != caseKindTypeConst {
+				Fatal("typeSwitch walkCases")
+			}
+			a := Nod(OIF, nil, nil)
+			a.Ntest = Nod(OEQ, s.hashname, Nodintconst(int64(c.hash)))
+			typecheck(&a.Ntest, Erv)
+			a.Nbody = list1(n.Right)
+			cas = list(cas, a)
+		}
+		return liststmt(cas)
+	}
+
+	// find the middle and recur
+	half := len(cc) / 2
+	a := Nod(OIF, nil, nil)
+	a.Ntest = Nod(OLE, s.hashname, Nodintconst(int64(cc[half-1].hash)))
+	typecheck(&a.Ntest, Erv)
+	a.Nbody = list1(s.walkCases(cc[:half]))
+	a.Nelse = list1(s.walkCases(cc[half:]))
+	return a
+}
+
+type caseClauseByOrd []*caseClause
+
+func (x caseClauseByOrd) Len() int      { return len(x) }
+func (x caseClauseByOrd) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x caseClauseByOrd) Less(i, j int) bool {
+	c1, c2 := x[i], x[j]
+	switch {
+	// sort default first
+	case c1.typ == caseKindDefault:
+		return true
+	case c2.typ == caseKindDefault:
+		return false
+
+	// sort nil second
+	case c1.typ == caseKindTypeNil:
+		return true
+	case c2.typ == caseKindTypeNil:
+		return false
+	}
+
+	// sort by ordinal
+	return c1.ordinal < c2.ordinal
+}
+
+type caseClauseByExpr []*caseClause
+
+func (x caseClauseByExpr) Len() int      { return len(x) }
+func (x caseClauseByExpr) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x caseClauseByExpr) Less(i, j int) bool {
+	return exprcmp(x[i], x[j]) < 0
+}
+
+func exprcmp(c1, c2 *caseClause) int {
+	// sort non-constants last
+	if c1.typ != caseKindExprConst {
+		return +1
+	}
+	if c2.typ != caseKindExprConst {
+		return -1
+	}
+
+	n1 := c1.node.Left
+	n2 := c2.node.Left
+
+	// sort by type (for switches on interface)
+	ct := int(n1.Val.Ctype)
+	if ct > int(n2.Val.Ctype) {
+		return +1
+	}
+	if ct < int(n2.Val.Ctype) {
+		return -1
+	}
+	if !Eqtype(n1.Type, n2.Type) {
+		if n1.Type.Vargen > n2.Type.Vargen {
+			return +1
+		} else {
+			return -1
+		}
+	}
+
+	// sort by constant value to enable binary search
+	switch ct {
+	case CTFLT:
+		return mpcmpfltflt(n1.Val.U.(*Mpflt), n2.Val.U.(*Mpflt))
+	case CTINT, CTRUNE:
+		return Mpcmpfixfix(n1.Val.U.(*Mpint), n2.Val.U.(*Mpint))
+	case CTSTR:
+		// Sort strings by length and then by value.
+		// It is much cheaper to compare lengths than values,
+		// and all we need here is consistency.
+		// We respect this sorting in exprSwitch.walkCases.
+		a := n1.Val.U.(string)
+		b := n2.Val.U.(string)
+		if len(a) < len(b) {
+			return -1
+		}
+		if len(a) > len(b) {
+			return +1
+		}
+		return stringsCompare(a, b)
+	}
+
+	return 0
+}
+
+type caseClauseByType []*caseClause
+
+func (x caseClauseByType) Len() int      { return len(x) }
+func (x caseClauseByType) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x caseClauseByType) Less(i, j int) bool {
+	c1, c2 := x[i], x[j]
+	switch {
+	// sort non-constants last
+	case c1.typ != caseKindTypeConst:
+		return false
+	case c2.typ != caseKindTypeConst:
+		return true
+
+	// sort by hash code
+	case c1.hash != c2.hash:
+		return c1.hash < c2.hash
+	}
+
+	// sort by ordinal
+	return c1.ordinal < c2.ordinal
+}
+
+func dumpcase(cc []*caseClause) {
+	for _, c := range cc {
+		switch c.typ {
+		case caseKindDefault:
+			fmt.Printf("case-default\n")
+			fmt.Printf("\tord=%d\n", c.ordinal)
+
+		case caseKindExprConst:
+			fmt.Printf("case-exprconst\n")
+			fmt.Printf("\tord=%d\n", c.ordinal)
+
+		case caseKindExprVar:
+			fmt.Printf("case-exprvar\n")
+			fmt.Printf("\tord=%d\n", c.ordinal)
+			fmt.Printf("\top=%v\n", Oconv(int(c.node.Left.Op), 0))
+
+		case caseKindTypeNil:
+			fmt.Printf("case-typenil\n")
+			fmt.Printf("\tord=%d\n", c.ordinal)
+
+		case caseKindTypeConst:
+			fmt.Printf("case-typeconst\n")
+			fmt.Printf("\tord=%d\n", c.ordinal)
+			fmt.Printf("\thash=%x\n", c.hash)
+
+		case caseKindTypeVar:
+			fmt.Printf("case-typevar\n")
+			fmt.Printf("\tord=%d\n", c.ordinal)
+
+		default:
+			fmt.Printf("case-???\n")
+			fmt.Printf("\tord=%d\n", c.ordinal)
+			fmt.Printf("\top=%v\n", Oconv(int(c.node.Left.Op), 0))
+			fmt.Printf("\thash=%x\n", c.hash)
+		}
+	}
+
+	fmt.Printf("\n")
+}
diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go
new file mode 100644
index 0000000..69348d1
--- /dev/null
+++ b/src/cmd/compile/internal/gc/syntax.go
@@ -0,0 +1,469 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// “Abstract” syntax representation.
+
+package gc
+
+// A Node is a single node in the syntax tree.
+// Actually the syntax tree is a syntax DAG, because there is only one
+// node with Op=ONAME for a given instance of a variable x.
+// The same is true for Op=OTYPE and Op=OLITERAL.
+type Node struct {
+	// Tree structure.
+	// Generic recursive walks should follow these fields.
+	Left  *Node
+	Right *Node
+	Ntest *Node
+	Nincr *Node
+	Ninit *NodeList
+	Nbody *NodeList
+	Nelse *NodeList
+	List  *NodeList
+	Rlist *NodeList
+
+	// most nodes
+	Type  *Type
+	Orig  *Node // original form, for printing, and tracking copies of ONAMEs
+	Nname *Node
+
+	// func
+	Func *Func
+
+	// ONAME
+	Name     *Name
+	Defn     *Node // ONAME: initializing assignment; OLABEL: labeled statement
+	Pack     *Node // real package for import . names
+	Curfn    *Node // function for local variables
+	Paramfld *Type // TFIELD for this PPARAM; also for ODOT, curfn
+	Alloc    *Node // allocation call
+	*Param
+
+	// OPACK
+	Pkg *Pkg
+
+	// OARRAYLIT, OMAPLIT, OSTRUCTLIT.
+	Initplan *InitPlan
+
+	// Escape analysis.
+	Escflowsrc *NodeList // flow(this, src)
+	Escretval  *NodeList // on OCALLxxx, list of dummy return values
+
+	Sym *Sym // various
+
+	Opt interface{} // for optimization passes
+
+	// OLITERAL
+	Val Val
+
+	Xoffset  int64
+	Stkdelta int64 // offset added by stack frame compaction phase.
+
+	// Escape analysis.
+	Escloopdepth int32 // -1: global, 0: return variables, 1:function top level, increased inside function for every loop or label to mark scopes
+
+	Vargen  int32 // unique name for OTYPE/ONAME within a function.  Function outputs are numbered starting at one.
+	Lineno  int32
+	Iota    int32
+	Walkgen uint32
+
+	Funcdepth int32
+
+	// OREGISTER, OINDREG
+	Reg int16
+
+	// most nodes - smaller fields
+	Esclevel Level
+	Esc      uint16 // EscXXX
+
+	Op          uint8
+	Nointerface bool
+	Ullman      uint8 // sethi/ullman number
+	Addable     bool  // addressable
+	Etype       uint8 // op for OASOP, etype for OTYPE, exclam for export, 6g saved reg
+	Bounded     bool  // bounds check unnecessary
+	Class       uint8 // PPARAM, PAUTO, PEXTERN, etc
+	Embedded    uint8 // ODCLFIELD embedded type
+	Colas       bool  // OAS resulting from :=
+	Diag        uint8 // already printed error about this
+	Noescape    bool  // func arguments do not escape; TODO(rsc): move Noescape to Func struct (see CL 7360)
+	Walkdef     uint8
+	Typecheck   uint8
+	Local       bool
+	Dodata      uint8
+	Initorder   uint8
+	Used        bool
+	Isddd       bool // is the argument variadic
+	Implicit    bool
+	Addrtaken   bool // address taken, even if not moved to heap
+	Assigned    bool // is the variable ever assigned to
+	Likely      int8 // likeliness of if statement
+	Hasbreak    bool // has break statement
+}
+
+// Name holds Node fields used only by ONAME nodes.
+type Name struct {
+	Heapaddr  *Node // temp holding heap address of param
+	Inlvar    *Node // ONAME substitute while inlining
+	Decldepth int32 // declaration loop depth, increased for every loop or label
+	Method    bool  // OCALLMETH name
+	Readonly  bool
+	Captured  bool // is the variable captured by a closure
+	Byval     bool // is the variable captured by value or by reference
+	Needzero  bool // if it contains pointers, needs to be zeroed on function entry
+}
+
+type Param struct {
+	Ntype *Node
+
+	// ONAME func param with PHEAP
+	Outerexpr  *Node // expression copied into closure for variable
+	Stackparam *Node // OPARAM node referring to stack copy of param
+
+	// ONAME closure param with PPARAMREF
+	Outer   *Node // outer PPARAMREF in nested closure
+	Closure *Node // ONAME/PHEAP <-> ONAME/PPARAMREF
+	Top     int   // top context (Ecall, Eproc, etc)
+}
+
+// Func holds Node fields used only with function-like nodes.
+type Func struct {
+	Shortname *Node
+	Enter     *NodeList
+	Exit      *NodeList
+	Cvars     *NodeList // closure params
+	Dcl       *NodeList // autodcl for this func/closure
+	Inldcl    *NodeList // copy of dcl for use in inlining
+	Closgen   int
+	Outerfunc *Node
+
+	Inl     *NodeList // copy of the body for use in inlining
+	InlCost int32
+
+	Endlineno int32
+
+	Nosplit        bool // func should not execute on separate stack
+	Nowritebarrier bool // emit compiler error instead of write barrier
+	Dupok          bool // duplicate definitions ok
+	Wrapper        bool // is method wrapper
+	Needctxt       bool // function uses context register (has closure variables)
+}
+
+// Node ops.
+const (
+	OXXX = iota
+
+	// names
+	ONAME    // var, const or func name
+	ONONAME  // unnamed arg or return value: f(int, string) (int, error) { etc }
+	OTYPE    // type name
+	OPACK    // import
+	OLITERAL // literal
+
+	// expressions
+	OADD             // x + y
+	OSUB             // x - y
+	OOR              // x | y
+	OXOR             // x ^ y
+	OADDSTR          // s + "foo"
+	OADDR            // &x
+	OANDAND          // b0 && b1
+	OAPPEND          // append
+	OARRAYBYTESTR    // string(bytes)
+	OARRAYBYTESTRTMP // string(bytes) ephemeral
+	OARRAYRUNESTR    // string(runes)
+	OSTRARRAYBYTE    // []byte(s)
+	OSTRARRAYBYTETMP // []byte(s) ephemeral
+	OSTRARRAYRUNE    // []rune(s)
+	OAS              // x = y or x := y
+	OAS2             // x, y, z = xx, yy, zz
+	OAS2FUNC         // x, y = f()
+	OAS2RECV         // x, ok = <-c
+	OAS2MAPR         // x, ok = m["foo"]
+	OAS2DOTTYPE      // x, ok = I.(int)
+	OASOP            // x += y
+	OASWB            // OAS but with write barrier
+	OCALL            // function call, method call or type conversion, possibly preceded by defer or go.
+	OCALLFUNC        // f()
+	OCALLMETH        // t.Method()
+	OCALLINTER       // err.Error()
+	OCALLPART        // t.Method (without ())
+	OCAP             // cap
+	OCLOSE           // close
+	OCLOSURE         // f = func() { etc }
+	OCMPIFACE        // err1 == err2
+	OCMPSTR          // s1 == s2
+	OCOMPLIT         // composite literal, typechecking may convert to a more specific OXXXLIT.
+	OMAPLIT          // M{"foo":3, "bar":4}
+	OSTRUCTLIT       // T{x:3, y:4}
+	OARRAYLIT        // [2]int{3, 4}
+	OPTRLIT          // &T{x:3, y:4}
+	OCONV            // var i int; var u uint; i = int(u)
+	OCONVIFACE       // I(t)
+	OCONVNOP         // type Int int; var i int; var j Int; i = int(j)
+	OCOPY            // copy
+	ODCL             // var x int
+	ODCLFUNC         // func f() or func (r) f()
+	ODCLFIELD        // struct field, interface field, or func/method argument/return value.
+	ODCLCONST        // const pi = 3.14
+	ODCLTYPE         // type Int int
+	ODELETE          // delete
+	ODOT             // t.x
+	ODOTPTR          // p.x that is implicitly (*p).x
+	ODOTMETH         // t.Method
+	ODOTINTER        // err.Error
+	OXDOT            // t.x, typechecking may convert to a more specific ODOTXXX.
+	ODOTTYPE         // e = err.(MyErr)
+	ODOTTYPE2        // e, ok = err.(MyErr)
+	OEQ              // x == y
+	ONE              // x != y
+	OLT              // x < y
+	OLE              // x <= y
+	OGE              // x >= y
+	OGT              // x > y
+	OIND             // *p
+	OINDEX           // a[i]
+	OINDEXMAP        // m[s]
+	OKEY             // The x:3 in t{x:3, y:4}, the 1:2 in a[1:2], the 2:20 in [3]int{2:20}, etc.
+	OPARAM           // The on-stack copy of a parameter or return value that escapes.
+	OLEN             // len
+	OMAKE            // make, typechecking may convert to a more specific OMAKEXXX.
+	OMAKECHAN        // make(chan int)
+	OMAKEMAP         // make(map[string]int)
+	OMAKESLICE       // make([]int, 0)
+	OMUL             // *
+	ODIV             // x / y
+	OMOD             // x % y
+	OLSH             // x << u
+	ORSH             // x >> u
+	OAND             // x & y
+	OANDNOT          // x &^ y
+	ONEW             // new
+	ONOT             // !b
+	OCOM             // ^x
+	OPLUS            // +x
+	OMINUS           // -y
+	OOROR            // b1 || b2
+	OPANIC           // panic
+	OPRINT           // print
+	OPRINTN          // println
+	OPAREN           // (x)
+	OSEND            // c <- x
+	OSLICE           // v[1:2], typechecking may convert to a more specific OSLICEXXX.
+	OSLICEARR        // a[1:2]
+	OSLICESTR        // s[1:2]
+	OSLICE3          // v[1:2:3], typechecking may convert to OSLICE3ARR.
+	OSLICE3ARR       // a[1:2:3]
+	ORECOVER         // recover
+	ORECV            // <-c
+	ORUNESTR         // string(i)
+	OSELRECV         // case x = <-c:
+	OSELRECV2        // case x, ok = <-c:
+	OIOTA            // iota
+	OREAL            // real
+	OIMAG            // imag
+	OCOMPLEX         // complex
+
+	// statements
+	OBLOCK    // block of code
+	OBREAK    // break
+	OCASE     // case, after being verified by swt.c's casebody.
+	OXCASE    // case, before verification.
+	OCONTINUE // continue
+	ODEFER    // defer
+	OEMPTY    // no-op
+	OFALL     // fallthrough, after being verified by swt.c's casebody.
+	OXFALL    // fallthrough, before verification.
+	OFOR      // for
+	OGOTO     // goto
+	OIF       // if
+	OLABEL    // label:
+	OPROC     // go
+	ORANGE    // range
+	ORETURN   // return
+	OSELECT   // select
+	OSWITCH   // switch x
+	OTYPESW   // switch err.(type)
+
+	// types
+	OTCHAN   // chan int
+	OTMAP    // map[string]int
+	OTSTRUCT // struct{}
+	OTINTER  // interface{}
+	OTFUNC   // func()
+	OTARRAY  // []int, [8]int, [N]int or [...]int
+
+	// misc
+	ODDD        // func f(args ...int) or f(l...) or var a = [...]int{0, 1, 2}.
+	ODDDARG     // func f(args ...int), introduced by escape analysis.
+	OINLCALL    // intermediary representation of an inlined call.
+	OEFACE      // itable and data words of an empty-interface value.
+	OITAB       // itable word of an interface value.
+	OSPTR       // base pointer of a slice or string.
+	OCLOSUREVAR // variable reference at beginning of closure function
+	OCFUNC      // reference to c function pointer (not go func value)
+	OCHECKNIL   // emit code to ensure pointer/interface not nil
+	OVARKILL    // variable is dead
+
+	// thearch-specific registers
+	OREGISTER // a register, such as AX.
+	OINDREG   // offset plus indirect of a register, such as 8(SP).
+
+	// arch-specific opcodes
+	OCMP    // compare: ACMP.
+	ODEC    // decrement: ADEC.
+	OINC    // increment: AINC.
+	OEXTEND // extend: ACWD/ACDQ/ACQO.
+	OHMUL   // high mul: AMUL/AIMUL for unsigned/signed (OMUL uses AIMUL for both).
+	OLROT   // left rotate: AROL.
+	ORROTC  // right rotate-carry: ARCR.
+	ORETJMP // return to other function
+	OPS     // compare parity set (for x86 NaN check)
+	OPC     // compare parity clear (for x86 NaN check)
+	OSQRT   // sqrt(float64), on systems that have hw support
+	OGETG   // runtime.getg() (read g pointer)
+
+	OEND
+)
+
+/*
+ * Every node has a walkgen field.
+ * If you want to do a traversal of a node graph that
+ * might contain duplicates and want to avoid
+ * visiting the same nodes twice, increment walkgen
+ * before starting.  Then before processing a node, do
+ *
+ *	if(n->walkgen == walkgen)
+ *		return;
+ *	n->walkgen = walkgen;
+ *
+ * Such a walk cannot call another such walk recursively,
+ * because of the use of the global walkgen.
+ */
+var walkgen uint32
+
+// A NodeList is a linked list of nodes.
+// TODO(rsc): Some uses of NodeList should be made into slices.
+// The remaining ones probably just need a simple linked list,
+// not one with concatenation support.
+type NodeList struct {
+	N    *Node
+	Next *NodeList
+	End  *NodeList
+}
+
+// concat returns the concatenation of the lists a and b.
+// The storage taken by both is reused for the result.
+func concat(a *NodeList, b *NodeList) *NodeList {
+	if a == nil {
+		return b
+	}
+	if b == nil {
+		return a
+	}
+
+	a.End.Next = b
+	a.End = b.End
+	b.End = nil
+	return a
+}
+
+// list1 returns a one-element list containing n.
+func list1(n *Node) *NodeList {
+	if n == nil {
+		return nil
+	}
+	if n.Op == OBLOCK && n.Ninit == nil {
+		// Flatten list and steal storage.
+		// Poison pointer to catch errant uses.
+		l := n.List
+
+		n.List = nil
+		return l
+	}
+
+	l := new(NodeList)
+	l.N = n
+	l.End = l
+	return l
+}
+
+// list returns the result of appending n to l.
+func list(l *NodeList, n *Node) *NodeList {
+	return concat(l, list1(n))
+}
+
+// listsort sorts *l in place according to the 3-way comparison function f.
+// The algorithm is mergesort, so it is guaranteed to be O(n log n).
+func listsort(l **NodeList, f func(*Node, *Node) int) {
+	if *l == nil || (*l).Next == nil {
+		return
+	}
+
+	l1 := *l
+	l2 := *l
+	for {
+		l2 = l2.Next
+		if l2 == nil {
+			break
+		}
+		l2 = l2.Next
+		if l2 == nil {
+			break
+		}
+		l1 = l1.Next
+	}
+
+	l2 = l1.Next
+	l1.Next = nil
+	l2.End = (*l).End
+	(*l).End = l1
+
+	l1 = *l
+	listsort(&l1, f)
+	listsort(&l2, f)
+
+	if f(l1.N, l2.N) < 0 {
+		*l = l1
+	} else {
+		*l = l2
+		l2 = l1
+		l1 = *l
+	}
+
+	// now l1 == *l; and l1 < l2
+
+	var le *NodeList
+	for (l1 != nil) && (l2 != nil) {
+		for (l1.Next != nil) && f(l1.Next.N, l2.N) < 0 {
+			l1 = l1.Next
+		}
+
+		// l1 is last one from l1 that is < l2
+		le = l1.Next // le is the rest of l1, first one that is >= l2
+		if le != nil {
+			le.End = (*l).End
+		}
+
+		(*l).End = l1       // cut *l at l1
+		*l = concat(*l, l2) // glue l2 to *l's tail
+
+		l1 = l2 // l1 is the first element of *l that is < the new l2
+		l2 = le // ... because l2 now is the old tail of l1
+	}
+
+	*l = concat(*l, l2) // any remainder
+}
+
+// count returns the length of the list l.
+func count(l *NodeList) int {
+	n := int64(0)
+	for ; l != nil; l = l.Next {
+		n++
+	}
+	if int64(int(n)) != n { // Overflow.
+		Yyerror("too many elements in list")
+	}
+	return int(n)
+}
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
new file mode 100644
index 0000000..0395ec5
--- /dev/null
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -0,0 +1,4069 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+	"math"
+	"strings"
+)
+
+/*
+ * type check the whole tree of an expression.
+ * calculates expression types.
+ * evaluates compile time constants.
+ * marks variables that escape the local frame.
+ * rewrites n->op to be more specific in some cases.
+ */
+var typecheckdefstack *NodeList
+
+/*
+ * resolve ONONAME to definition, if any.
+ */
+func resolve(n *Node) *Node {
+	if n != nil && n.Op == ONONAME && n.Sym != nil {
+		r := n.Sym.Def
+		if r != nil {
+			if r.Op != OIOTA {
+				n = r
+			} else if n.Iota >= 0 {
+				n = Nodintconst(int64(n.Iota))
+			}
+		}
+	}
+
+	return n
+}
+
+func typechecklist(l *NodeList, top int) {
+	for ; l != nil; l = l.Next {
+		typecheck(&l.N, top)
+	}
+}
+
+var _typekind = []string{
+	TINT:        "int",
+	TUINT:       "uint",
+	TINT8:       "int8",
+	TUINT8:      "uint8",
+	TINT16:      "int16",
+	TUINT16:     "uint16",
+	TINT32:      "int32",
+	TUINT32:     "uint32",
+	TINT64:      "int64",
+	TUINT64:     "uint64",
+	TUINTPTR:    "uintptr",
+	TCOMPLEX64:  "complex64",
+	TCOMPLEX128: "complex128",
+	TFLOAT32:    "float32",
+	TFLOAT64:    "float64",
+	TBOOL:       "bool",
+	TSTRING:     "string",
+	TPTR32:      "pointer",
+	TPTR64:      "pointer",
+	TUNSAFEPTR:  "unsafe.Pointer",
+	TSTRUCT:     "struct",
+	TINTER:      "interface",
+	TCHAN:       "chan",
+	TMAP:        "map",
+	TARRAY:      "array",
+	TFUNC:       "func",
+	TNIL:        "nil",
+	TIDEAL:      "untyped number",
+}
+
+func typekind(t *Type) string {
+	if Isslice(t) {
+		return "slice"
+	}
+	et := int(t.Etype)
+	if 0 <= et && et < len(_typekind) {
+		s := _typekind[et]
+		if s != "" {
+			return s
+		}
+	}
+	return fmt.Sprintf("etype=%d", et)
+}
+
+/*
+ * sprint_depchain prints a dependency chain
+ * of nodes into fmt.
+ * It is used by typecheck in the case of OLITERAL nodes
+ * to print constant definition loops.
+ */
+func sprint_depchain(fmt_ *string, stack *NodeList, cur *Node, first *Node) {
+	for l := stack; l != nil; l = l.Next {
+		if l.N.Op == cur.Op {
+			if l.N != first {
+				sprint_depchain(fmt_, l.Next, l.N, first)
+			}
+			*fmt_ += fmt.Sprintf("\n\t%v: %v uses %v", l.N.Line(), l.N, cur)
+			return
+		}
+	}
+}
+
+/*
+ * type check node *np.
+ * replaces *np with a new pointer in some cases.
+ * returns the final value of *np as a convenience.
+ */
+
+var typecheck_tcstack *NodeList
+var typecheck_tcfree *NodeList
+
+func typecheck(np **Node, top int) *Node {
+	// cannot type check until all the source has been parsed
+	if typecheckok == 0 {
+		Fatal("early typecheck")
+	}
+
+	n := *np
+	if n == nil {
+		return nil
+	}
+
+	lno := int(setlineno(n))
+
+	// Skip over parens.
+	for n.Op == OPAREN {
+		n = n.Left
+	}
+
+	// Resolve definition of name and value of iota lazily.
+	n = resolve(n)
+
+	*np = n
+
+	// Skip typecheck if already done.
+	// But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
+	if n.Typecheck == 1 {
+		switch n.Op {
+		case ONAME, OTYPE, OLITERAL, OPACK:
+			break
+
+		default:
+			lineno = int32(lno)
+			return n
+		}
+	}
+
+	if n.Typecheck == 2 {
+		// Typechecking loop. Trying printing a meaningful message,
+		// otherwise a stack trace of typechecking.
+		var fmt_ string
+		switch n.Op {
+		// We can already diagnose variables used as types.
+		case ONAME:
+			if top&(Erv|Etype) == Etype {
+				Yyerror("%v is not a type", n)
+			}
+
+		case OLITERAL:
+			if top&(Erv|Etype) == Etype {
+				Yyerror("%v is not a type", n)
+				break
+			}
+
+			fmt_ = ""
+			sprint_depchain(&fmt_, typecheck_tcstack, n, n)
+			yyerrorl(int(n.Lineno), "constant definition loop%s", fmt_)
+		}
+
+		if nsavederrors+nerrors == 0 {
+			fmt_ = ""
+			for l := typecheck_tcstack; l != nil; l = l.Next {
+				fmt_ += fmt.Sprintf("\n\t%v %v", l.N.Line(), l.N)
+			}
+			Yyerror("typechecking loop involving %v%s", n, fmt_)
+		}
+
+		lineno = int32(lno)
+		return n
+	}
+
+	n.Typecheck = 2
+
+	var l *NodeList
+	if typecheck_tcfree != nil {
+		l = typecheck_tcfree
+		typecheck_tcfree = l.Next
+	} else {
+		l = new(NodeList)
+	}
+	l.Next = typecheck_tcstack
+	l.N = n
+	typecheck_tcstack = l
+
+	typecheck1(&n, top)
+	*np = n
+	n.Typecheck = 1
+
+	if typecheck_tcstack != l {
+		Fatal("typecheck stack out of sync")
+	}
+	typecheck_tcstack = l.Next
+	l.Next = typecheck_tcfree
+	typecheck_tcfree = l
+
+	lineno = int32(lno)
+	return n
+}
+
+/*
+ * does n contain a call or receive operation?
+ */
+func callrecv(n *Node) bool {
+	if n == nil {
+		return false
+	}
+
+	switch n.Op {
+	case OCALL,
+		OCALLMETH,
+		OCALLINTER,
+		OCALLFUNC,
+		ORECV,
+		OCAP,
+		OLEN,
+		OCOPY,
+		ONEW,
+		OAPPEND,
+		ODELETE:
+		return true
+	}
+
+	return callrecv(n.Left) || callrecv(n.Right) || callrecv(n.Ntest) || callrecv(n.Nincr) || callrecvlist(n.Ninit) || callrecvlist(n.Nbody) || callrecvlist(n.Nelse) || callrecvlist(n.List) || callrecvlist(n.Rlist)
+}
+
+func callrecvlist(l *NodeList) bool {
+	for ; l != nil; l = l.Next {
+		if callrecv(l.N) {
+			return true
+		}
+	}
+	return false
+}
+
+// indexlit implements typechecking of untyped values as
+// array/slice indexes. It is equivalent to defaultlit
+// except for constants of numerical kind, which are acceptable
+// whenever they can be represented by a value of type int.
+func indexlit(np **Node) {
+	n := *np
+	if n == nil || !isideal(n.Type) {
+		return
+	}
+	switch consttype(n) {
+	case CTINT, CTRUNE, CTFLT, CTCPLX:
+		defaultlit(np, Types[TINT])
+	}
+
+	defaultlit(np, nil)
+}
+
+func typecheck1(np **Node, top int) {
+	n := *np
+	defer func() {
+		*np = n
+	}()
+
+	if n.Sym != nil {
+		if n.Op == ONAME && n.Etype != 0 && top&Ecall == 0 {
+			Yyerror("use of builtin %v not in function call", n.Sym)
+			n.Type = nil
+			return
+		}
+
+		typecheckdef(n)
+		if n.Op == ONONAME {
+			n.Type = nil
+			return
+		}
+	}
+
+	ok := 0
+OpSwitch:
+	switch n.Op {
+	// until typecheck is complete, do nothing.
+	default:
+		Dump("typecheck", n)
+
+		Fatal("typecheck %v", Oconv(int(n.Op), 0))
+
+		/*
+		 * names
+		 */
+	case OLITERAL:
+		ok |= Erv
+
+		if n.Type == nil && n.Val.Ctype == CTSTR {
+			n.Type = idealstring
+		}
+		break OpSwitch
+
+	case ONONAME:
+		ok |= Erv
+		break OpSwitch
+
+	case ONAME:
+		if n.Name.Decldepth == 0 {
+			n.Name.Decldepth = decldepth
+		}
+		if n.Etype != 0 {
+			ok |= Ecall
+			break OpSwitch
+		}
+
+		if top&Easgn == 0 {
+			// not a write to the variable
+			if isblank(n) {
+				Yyerror("cannot use _ as value")
+				n.Type = nil
+				return
+			}
+
+			n.Used = true
+		}
+
+		if top&Ecall == 0 && isunsafebuiltin(n) {
+			Yyerror("%v is not an expression, must be called", n)
+			n.Type = nil
+			return
+		}
+
+		ok |= Erv
+		break OpSwitch
+
+	case OPACK:
+		Yyerror("use of package %v without selector", n.Sym)
+		n.Type = nil
+		return
+
+	case ODDD:
+		break
+
+		/*
+		 * types (OIND is with exprs)
+		 */
+	case OTYPE:
+		ok |= Etype
+
+		if n.Type == nil {
+			n.Type = nil
+			return
+		}
+
+	case OTARRAY:
+		ok |= Etype
+		t := typ(TARRAY)
+		l := n.Left
+		r := n.Right
+		if l == nil {
+			t.Bound = -1 // slice
+		} else if l.Op == ODDD {
+			t.Bound = -100 // to be filled in
+			if top&Ecomplit == 0 && n.Diag == 0 {
+				t.Broke = 1
+				n.Diag = 1
+				Yyerror("use of [...] array outside of array literal")
+			}
+		} else {
+			l := typecheck(&n.Left, Erv)
+			var v Val
+			switch consttype(l) {
+			case CTINT, CTRUNE:
+				v = l.Val
+
+			case CTFLT:
+				v = toint(l.Val)
+
+			default:
+				if l.Type != nil && Isint[l.Type.Etype] && l.Op != OLITERAL {
+					Yyerror("non-constant array bound %v", l)
+				} else {
+					Yyerror("invalid array bound %v", l)
+				}
+				n.Type = nil
+				return
+			}
+
+			t.Bound = Mpgetfix(v.U.(*Mpint))
+			if doesoverflow(v, Types[TINT]) {
+				Yyerror("array bound is too large")
+				n.Type = nil
+				return
+			} else if t.Bound < 0 {
+				Yyerror("array bound must be non-negative")
+				n.Type = nil
+				return
+			}
+		}
+
+		typecheck(&r, Etype)
+		if r.Type == nil {
+			n.Type = nil
+			return
+		}
+		t.Type = r.Type
+		n.Op = OTYPE
+		n.Type = t
+		n.Left = nil
+		n.Right = nil
+		if t.Bound != -100 {
+			checkwidth(t)
+		}
+
+	case OTMAP:
+		ok |= Etype
+		l := typecheck(&n.Left, Etype)
+		r := typecheck(&n.Right, Etype)
+		if l.Type == nil || r.Type == nil {
+			n.Type = nil
+			return
+		}
+		n.Op = OTYPE
+		n.Type = maptype(l.Type, r.Type)
+		n.Left = nil
+		n.Right = nil
+
+	case OTCHAN:
+		ok |= Etype
+		l := typecheck(&n.Left, Etype)
+		if l.Type == nil {
+			n.Type = nil
+			return
+		}
+		t := typ(TCHAN)
+		t.Type = l.Type
+		t.Chan = n.Etype
+		n.Op = OTYPE
+		n.Type = t
+		n.Left = nil
+		n.Etype = 0
+
+	case OTSTRUCT:
+		ok |= Etype
+		n.Op = OTYPE
+		n.Type = tostruct(n.List)
+		if n.Type == nil || n.Type.Broke != 0 {
+			n.Type = nil
+			return
+		}
+		n.List = nil
+
+	case OTINTER:
+		ok |= Etype
+		n.Op = OTYPE
+		n.Type = tointerface(n.List)
+		if n.Type == nil {
+			n.Type = nil
+			return
+		}
+
+	case OTFUNC:
+		ok |= Etype
+		n.Op = OTYPE
+		n.Type = functype(n.Left, n.List, n.Rlist)
+		if n.Type == nil {
+			n.Type = nil
+			return
+		}
+
+		/*
+		 * type or expr
+		 */
+	case OIND:
+		ntop := Erv | Etype
+
+		if top&Eaddr == 0 { // The *x in &*x is not an indirect.
+			ntop |= Eindir
+		}
+		ntop |= top & Ecomplit
+		l := typecheck(&n.Left, ntop)
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		if l.Op == OTYPE {
+			ok |= Etype
+			n.Op = OTYPE
+			n.Type = Ptrto(l.Type)
+			n.Left = nil
+			break OpSwitch
+		}
+
+		if !Isptr[t.Etype] {
+			if top&(Erv|Etop) != 0 {
+				Yyerror("invalid indirect of %v", Nconv(n.Left, obj.FmtLong))
+				n.Type = nil
+				return
+			}
+
+			break OpSwitch
+		}
+
+		ok |= Erv
+		n.Type = t.Type
+		break OpSwitch
+
+		/*
+		 * arithmetic exprs
+		 */
+	case OASOP,
+		OADD,
+		OAND,
+		OANDAND,
+		OANDNOT,
+		ODIV,
+		OEQ,
+		OGE,
+		OGT,
+		OLE,
+		OLT,
+		OLSH,
+		ORSH,
+		OMOD,
+		OMUL,
+		ONE,
+		OOR,
+		OOROR,
+		OSUB,
+		OXOR:
+		var l *Node
+		var op int
+		var r *Node
+		if n.Op == OASOP {
+			ok |= Etop
+			l = typecheck(&n.Left, Erv)
+			r = typecheck(&n.Right, Erv)
+			checkassign(n, n.Left)
+			if l.Type == nil || r.Type == nil {
+				n.Type = nil
+				return
+			}
+			op = int(n.Etype)
+		} else {
+			ok |= Erv
+			l = typecheck(&n.Left, Erv|top&Eiota)
+			r = typecheck(&n.Right, Erv|top&Eiota)
+			if l.Type == nil || r.Type == nil {
+				n.Type = nil
+				return
+			}
+			op = int(n.Op)
+		}
+		if op == OLSH || op == ORSH {
+			defaultlit(&r, Types[TUINT])
+			n.Right = r
+			t := r.Type
+			if !Isint[t.Etype] || Issigned[t.Etype] {
+				Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", n, r.Type)
+				n.Type = nil
+				return
+			}
+
+			t = l.Type
+			if t != nil && t.Etype != TIDEAL && !Isint[t.Etype] {
+				Yyerror("invalid operation: %v (shift of type %v)", n, t)
+				n.Type = nil
+				return
+			}
+
+			// no defaultlit for left
+			// the outer context gives the type
+			n.Type = l.Type
+
+			break OpSwitch
+		}
+
+		// ideal mixed with non-ideal
+		defaultlit2(&l, &r, 0)
+
+		n.Left = l
+		n.Right = r
+		if l.Type == nil || r.Type == nil {
+			n.Type = nil
+			return
+		}
+		t := l.Type
+		if t.Etype == TIDEAL {
+			t = r.Type
+		}
+		et := int(t.Etype)
+		if et == TIDEAL {
+			et = TINT
+		}
+		aop := 0
+		if iscmp[n.Op] && t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
+			// comparison is okay as long as one side is
+			// assignable to the other.  convert so they have
+			// the same type.
+			//
+			// the only conversion that isn't a no-op is concrete == interface.
+			// in that case, check comparability of the concrete type.
+			// The conversion allocates, so only do it if the concrete type is huge.
+			if r.Type.Etype != TBLANK {
+				aop = assignop(l.Type, r.Type, nil)
+				if aop != 0 {
+					if Isinter(r.Type) && !Isinter(l.Type) && algtype1(l.Type, nil) == ANOEQ {
+						Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(int(op), 0), typekind(l.Type))
+						n.Type = nil
+						return
+					}
+
+					dowidth(l.Type)
+					if Isinter(r.Type) == Isinter(l.Type) || l.Type.Width >= 1<<16 {
+						l = Nod(aop, l, nil)
+						l.Type = r.Type
+						l.Typecheck = 1
+						n.Left = l
+					}
+
+					t = r.Type
+					goto converted
+				}
+			}
+
+			if l.Type.Etype != TBLANK {
+				aop = assignop(r.Type, l.Type, nil)
+				if aop != 0 {
+					if Isinter(l.Type) && !Isinter(r.Type) && algtype1(r.Type, nil) == ANOEQ {
+						Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(int(op), 0), typekind(r.Type))
+						n.Type = nil
+						return
+					}
+
+					dowidth(r.Type)
+					if Isinter(r.Type) == Isinter(l.Type) || r.Type.Width >= 1<<16 {
+						r = Nod(aop, r, nil)
+						r.Type = l.Type
+						r.Typecheck = 1
+						n.Right = r
+					}
+
+					t = l.Type
+				}
+			}
+
+		converted:
+			et = int(t.Etype)
+		}
+
+		if t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
+			defaultlit2(&l, &r, 1)
+			if n.Op == OASOP && n.Implicit {
+				Yyerror("invalid operation: %v (non-numeric type %v)", n, l.Type)
+				n.Type = nil
+				return
+			}
+
+			if Isinter(r.Type) == Isinter(l.Type) || aop == 0 {
+				Yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
+				n.Type = nil
+				return
+			}
+		}
+
+		if !okfor[op][et] {
+			Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(int(op), 0), typekind(t))
+			n.Type = nil
+			return
+		}
+
+		// okfor allows any array == array, map == map, func == func.
+		// restrict to slice/map/func == nil and nil == slice/map/func.
+		if Isfixedarray(l.Type) && algtype1(l.Type, nil) == ANOEQ {
+			Yyerror("invalid operation: %v (%v cannot be compared)", n, l.Type)
+			n.Type = nil
+			return
+		}
+
+		if Isslice(l.Type) && !isnil(l) && !isnil(r) {
+			Yyerror("invalid operation: %v (slice can only be compared to nil)", n)
+			n.Type = nil
+			return
+		}
+
+		if l.Type.Etype == TMAP && !isnil(l) && !isnil(r) {
+			Yyerror("invalid operation: %v (map can only be compared to nil)", n)
+			n.Type = nil
+			return
+		}
+
+		if l.Type.Etype == TFUNC && !isnil(l) && !isnil(r) {
+			Yyerror("invalid operation: %v (func can only be compared to nil)", n)
+			n.Type = nil
+			return
+		}
+
+		var badtype *Type
+		if l.Type.Etype == TSTRUCT && algtype1(l.Type, &badtype) == ANOEQ {
+			Yyerror("invalid operation: %v (struct containing %v cannot be compared)", n, badtype)
+			n.Type = nil
+			return
+		}
+
+		t = l.Type
+		if iscmp[n.Op] {
+			evconst(n)
+			t = idealbool
+			if n.Op != OLITERAL {
+				defaultlit2(&l, &r, 1)
+				n.Left = l
+				n.Right = r
+			}
+		} else if n.Op == OANDAND || n.Op == OOROR {
+			if l.Type == r.Type {
+				t = l.Type
+			} else if l.Type == idealbool {
+				t = r.Type
+			} else if r.Type == idealbool {
+				t = l.Type
+			}
+		} else
+		// non-comparison operators on ideal bools should make them lose their ideal-ness
+		if t == idealbool {
+			t = Types[TBOOL]
+		}
+
+		if et == TSTRING {
+			if iscmp[n.Op] {
+				n.Etype = n.Op
+				n.Op = OCMPSTR
+			} else if n.Op == OADD {
+				// create OADDSTR node with list of strings in x + y + z + (w + v) + ...
+				n.Op = OADDSTR
+
+				if l.Op == OADDSTR {
+					n.List = l.List
+				} else {
+					n.List = list1(l)
+				}
+				if r.Op == OADDSTR {
+					n.List = concat(n.List, r.List)
+				} else {
+					n.List = list(n.List, r)
+				}
+				n.Left = nil
+				n.Right = nil
+			}
+		}
+
+		if et == TINTER {
+			if l.Op == OLITERAL && l.Val.Ctype == CTNIL {
+				// swap for back end
+				n.Left = r
+
+				n.Right = l
+			} else if r.Op == OLITERAL && r.Val.Ctype == CTNIL {
+			} else // leave alone for back end
+			if Isinter(r.Type) == Isinter(l.Type) {
+				n.Etype = n.Op
+				n.Op = OCMPIFACE
+			}
+		}
+
+		if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
+			if mpcmpfixc(r.Val.U.(*Mpint), 0) == 0 {
+				Yyerror("division by zero")
+				n.Type = nil
+				return
+			}
+		}
+
+		n.Type = t
+		break OpSwitch
+
+	case OCOM, OMINUS, ONOT, OPLUS:
+		ok |= Erv
+		l := typecheck(&n.Left, Erv|top&Eiota)
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		if !okfor[n.Op][t.Etype] {
+			Yyerror("invalid operation: %v %v", Oconv(int(n.Op), 0), t)
+			n.Type = nil
+			return
+		}
+
+		n.Type = t
+		break OpSwitch
+
+		/*
+		 * exprs
+		 */
+	case OADDR:
+		ok |= Erv
+
+		typecheck(&n.Left, Erv|Eaddr)
+		if n.Left.Type == nil {
+			n.Type = nil
+			return
+		}
+		checklvalue(n.Left, "take the address of")
+		r := outervalue(n.Left)
+		var l *Node
+		for l = n.Left; l != r; l = l.Left {
+			l.Addrtaken = true
+			if l.Param != nil && l.Closure != nil {
+				l.Closure.Addrtaken = true
+			}
+		}
+
+		if l.Orig != l && l.Op == ONAME {
+			Fatal("found non-orig name node %v", l)
+		}
+		l.Addrtaken = true
+		if l.Param != nil && l.Closure != nil {
+			l.Closure.Addrtaken = true
+		}
+		defaultlit(&n.Left, nil)
+		l = n.Left
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		n.Type = Ptrto(t)
+		break OpSwitch
+
+	case OCOMPLIT:
+		ok |= Erv
+		typecheckcomplit(&n)
+		if n.Type == nil {
+			n.Type = nil
+			return
+		}
+		break OpSwitch
+
+	case OXDOT, ODOT:
+		if n.Op == OXDOT {
+			n = adddot(n)
+			n.Op = ODOT
+			if n.Left == nil {
+				n.Type = nil
+				return
+			}
+		}
+
+		typecheck(&n.Left, Erv|Etype)
+
+		defaultlit(&n.Left, nil)
+		if n.Right.Op != ONAME {
+			Yyerror("rhs of . must be a name") // impossible
+			n.Type = nil
+			return
+		}
+
+		t := n.Left.Type
+		if t == nil {
+			adderrorname(n)
+			n.Type = nil
+			return
+		}
+
+		r := n.Right
+
+		if n.Left.Op == OTYPE {
+			if !looktypedot(n, t, 0) {
+				if looktypedot(n, t, 1) {
+					Yyerror("%v undefined (cannot refer to unexported method %v)", n, n.Right.Sym)
+				} else {
+					Yyerror("%v undefined (type %v has no method %v)", n, t, n.Right.Sym)
+				}
+				n.Type = nil
+				return
+			}
+
+			if n.Type.Etype != TFUNC || n.Type.Thistuple != 1 {
+				Yyerror("type %v has no method %v", n.Left.Type, Sconv(n.Right.Sym, obj.FmtShort))
+				n.Type = nil
+				n.Type = nil
+				return
+			}
+
+			n.Op = ONAME
+			if n.Name == nil {
+				n.Name = new(Name)
+			}
+			n.Sym = n.Right.Sym
+			n.Type = methodfunc(n.Type, n.Left.Type)
+			n.Xoffset = 0
+			n.Class = PFUNC
+			ok = Erv
+			break OpSwitch
+		}
+
+		if Isptr[t.Etype] && t.Type.Etype != TINTER {
+			t = t.Type
+			if t == nil {
+				n.Type = nil
+				return
+			}
+			n.Op = ODOTPTR
+			checkwidth(t)
+		}
+
+		if isblank(n.Right) {
+			Yyerror("cannot refer to blank field or method")
+			n.Type = nil
+			return
+		}
+
+		if lookdot(n, t, 0) == nil {
+			// Legitimate field or method lookup failed, try to explain the error
+			switch {
+			case isnilinter(t):
+				Yyerror("%v undefined (type %v is interface with no methods)", n, n.Left.Type)
+
+			case Isptr[t.Etype] && Isinter(t.Type):
+				// Pointer to interface is almost always a mistake.
+				Yyerror("%v undefined (type %v is pointer to interface, not interface)", n, n.Left.Type)
+
+			case lookdot(n, t, 1) != nil:
+				// Field or method matches by name, but it is not exported.
+				Yyerror("%v undefined (cannot refer to unexported field or method %v)", n, n.Right.Sym)
+
+			default:
+				if mt := lookdot(n, t, 2); mt != nil { // Case-insensitive lookup.
+					Yyerror("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left.Type, n.Right.Sym, mt.Sym)
+				} else {
+					Yyerror("%v undefined (type %v has no field or method %v)", n, n.Left.Type, n.Right.Sym)
+				}
+			}
+			n.Type = nil
+			return
+		}
+
+		switch n.Op {
+		case ODOTINTER, ODOTMETH:
+			if top&Ecall != 0 {
+				ok |= Ecall
+			} else {
+				typecheckpartialcall(n, r)
+				ok |= Erv
+			}
+
+		default:
+			ok |= Erv
+		}
+
+		break OpSwitch
+
+	case ODOTTYPE:
+		ok |= Erv
+		typecheck(&n.Left, Erv)
+		defaultlit(&n.Left, nil)
+		l := n.Left
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		if !Isinter(t) {
+			Yyerror("invalid type assertion: %v (non-interface type %v on left)", n, t)
+			n.Type = nil
+			return
+		}
+
+		if n.Right != nil {
+			typecheck(&n.Right, Etype)
+			n.Type = n.Right.Type
+			n.Right = nil
+			if n.Type == nil {
+				n.Type = nil
+				return
+			}
+		}
+
+		if n.Type != nil && n.Type.Etype != TINTER {
+			var have *Type
+			var missing *Type
+			var ptr int
+			if !implements(n.Type, t, &missing, &have, &ptr) {
+				if have != nil && have.Sym == missing.Sym {
+					Yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", n.Type, t, missing.Sym, have.Sym, Tconv(have.Type, obj.FmtShort|obj.FmtByte), missing.Sym, Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+				} else if ptr != 0 {
+					Yyerror("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type, t, missing.Sym)
+				} else if have != nil {
+					Yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", n.Type, t, missing.Sym, have.Sym, Tconv(have.Type, obj.FmtShort|obj.FmtByte), missing.Sym, Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+				} else {
+					Yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type, t, missing.Sym)
+				}
+				n.Type = nil
+				return
+			}
+		}
+
+		break OpSwitch
+
+	case OINDEX:
+		ok |= Erv
+		typecheck(&n.Left, Erv)
+		defaultlit(&n.Left, nil)
+		implicitstar(&n.Left)
+		l := n.Left
+		typecheck(&n.Right, Erv)
+		r := n.Right
+		t := l.Type
+		if t == nil || r.Type == nil {
+			n.Type = nil
+			return
+		}
+		switch t.Etype {
+		default:
+			Yyerror("invalid operation: %v (type %v does not support indexing)", n, t)
+			n.Type = nil
+			return
+
+		case TSTRING, TARRAY:
+			indexlit(&n.Right)
+			if t.Etype == TSTRING {
+				n.Type = bytetype
+			} else {
+				n.Type = t.Type
+			}
+			why := "string"
+			if t.Etype == TARRAY {
+				if Isfixedarray(t) {
+					why = "array"
+				} else {
+					why = "slice"
+				}
+			}
+
+			if n.Right.Type != nil && !Isint[n.Right.Type.Etype] {
+				Yyerror("non-integer %s index %v", why, n.Right)
+				break
+			}
+
+			if Isconst(n.Right, CTINT) {
+				x := Mpgetfix(n.Right.Val.U.(*Mpint))
+				if x < 0 {
+					Yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
+				} else if Isfixedarray(t) && t.Bound > 0 && x >= t.Bound {
+					Yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.Bound)
+				} else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.Val.U.(string))) {
+					Yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.Val.U.(string)))
+				} else if Mpcmpfixfix(n.Right.Val.U.(*Mpint), Maxintval[TINT]) > 0 {
+					Yyerror("invalid %s index %v (index too large)", why, n.Right)
+				}
+			}
+
+		case TMAP:
+			n.Etype = 0
+			defaultlit(&n.Right, t.Down)
+			if n.Right.Type != nil {
+				n.Right = assignconv(n.Right, t.Down, "map index")
+			}
+			n.Type = t.Type
+			n.Op = OINDEXMAP
+		}
+
+		break OpSwitch
+
+	case ORECV:
+		ok |= Etop | Erv
+		typecheck(&n.Left, Erv)
+		defaultlit(&n.Left, nil)
+		l := n.Left
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		if t.Etype != TCHAN {
+			Yyerror("invalid operation: %v (receive from non-chan type %v)", n, t)
+			n.Type = nil
+			return
+		}
+
+		if t.Chan&Crecv == 0 {
+			Yyerror("invalid operation: %v (receive from send-only type %v)", n, t)
+			n.Type = nil
+			return
+		}
+
+		n.Type = t.Type
+		break OpSwitch
+
+	case OSEND:
+		ok |= Etop
+		l := typecheck(&n.Left, Erv)
+		typecheck(&n.Right, Erv)
+		defaultlit(&n.Left, nil)
+		l = n.Left
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		if t.Etype != TCHAN {
+			Yyerror("invalid operation: %v (send to non-chan type %v)", n, t)
+			n.Type = nil
+			return
+		}
+
+		if t.Chan&Csend == 0 {
+			Yyerror("invalid operation: %v (send to receive-only type %v)", n, t)
+			n.Type = nil
+			return
+		}
+
+		defaultlit(&n.Right, t.Type)
+		r := n.Right
+		if r.Type == nil {
+			n.Type = nil
+			return
+		}
+		n.Right = assignconv(r, l.Type.Type, "send")
+
+		// TODO: more aggressive
+		n.Etype = 0
+
+		n.Type = nil
+		break OpSwitch
+
+	case OSLICE:
+		ok |= Erv
+		typecheck(&n.Left, top)
+		typecheck(&n.Right.Left, Erv)
+		typecheck(&n.Right.Right, Erv)
+		defaultlit(&n.Left, nil)
+		indexlit(&n.Right.Left)
+		indexlit(&n.Right.Right)
+		l := n.Left
+		if Isfixedarray(l.Type) {
+			if !islvalue(n.Left) {
+				Yyerror("invalid operation %v (slice of unaddressable value)", n)
+				n.Type = nil
+				return
+			}
+
+			n.Left = Nod(OADDR, n.Left, nil)
+			n.Left.Implicit = true
+			typecheck(&n.Left, Erv)
+			l = n.Left
+		}
+
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		var tp *Type
+		if Istype(t, TSTRING) {
+			n.Type = t
+			n.Op = OSLICESTR
+		} else if Isptr[t.Etype] && Isfixedarray(t.Type) {
+			tp = t.Type
+			n.Type = typ(TARRAY)
+			n.Type.Type = tp.Type
+			n.Type.Bound = -1
+			dowidth(n.Type)
+			n.Op = OSLICEARR
+		} else if Isslice(t) {
+			n.Type = t
+		} else {
+			Yyerror("cannot slice %v (type %v)", l, t)
+			n.Type = nil
+			return
+		}
+
+		lo := n.Right.Left
+		if lo != nil && checksliceindex(l, lo, tp) < 0 {
+			n.Type = nil
+			return
+		}
+		hi := n.Right.Right
+		if hi != nil && checksliceindex(l, hi, tp) < 0 {
+			n.Type = nil
+			return
+		}
+		if checksliceconst(lo, hi) < 0 {
+			n.Type = nil
+			return
+		}
+		break OpSwitch
+
+	case OSLICE3:
+		ok |= Erv
+		typecheck(&n.Left, top)
+		typecheck(&n.Right.Left, Erv)
+		typecheck(&n.Right.Right.Left, Erv)
+		typecheck(&n.Right.Right.Right, Erv)
+		defaultlit(&n.Left, nil)
+		indexlit(&n.Right.Left)
+		indexlit(&n.Right.Right.Left)
+		indexlit(&n.Right.Right.Right)
+		l := n.Left
+		if Isfixedarray(l.Type) {
+			if !islvalue(n.Left) {
+				Yyerror("invalid operation %v (slice of unaddressable value)", n)
+				n.Type = nil
+				return
+			}
+
+			n.Left = Nod(OADDR, n.Left, nil)
+			n.Left.Implicit = true
+			typecheck(&n.Left, Erv)
+			l = n.Left
+		}
+
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		if Istype(t, TSTRING) {
+			Yyerror("invalid operation %v (3-index slice of string)", n)
+			n.Type = nil
+			return
+		}
+
+		var tp *Type
+		if Isptr[t.Etype] && Isfixedarray(t.Type) {
+			tp = t.Type
+			n.Type = typ(TARRAY)
+			n.Type.Type = tp.Type
+			n.Type.Bound = -1
+			dowidth(n.Type)
+			n.Op = OSLICE3ARR
+		} else if Isslice(t) {
+			n.Type = t
+		} else {
+			Yyerror("cannot slice %v (type %v)", l, t)
+			n.Type = nil
+			return
+		}
+
+		lo := n.Right.Left
+		if lo != nil && checksliceindex(l, lo, tp) < 0 {
+			n.Type = nil
+			return
+		}
+		mid := n.Right.Right.Left
+		if mid != nil && checksliceindex(l, mid, tp) < 0 {
+			n.Type = nil
+			return
+		}
+		hi := n.Right.Right.Right
+		if hi != nil && checksliceindex(l, hi, tp) < 0 {
+			n.Type = nil
+			return
+		}
+		if checksliceconst(lo, hi) < 0 || checksliceconst(lo, mid) < 0 || checksliceconst(mid, hi) < 0 {
+			n.Type = nil
+			return
+		}
+		break OpSwitch
+
+		/*
+		 * call and call like
+		 */
+	case OCALL:
+		l := n.Left
+
+		if l.Op == ONAME {
+			r := unsafenmagic(n)
+			if r != nil {
+				if n.Isddd {
+					Yyerror("invalid use of ... with builtin %v", l)
+				}
+				n = r
+				typecheck1(&n, top)
+				return
+			}
+		}
+
+		typecheck(&n.Left, Erv|Etype|Ecall|top&Eproc)
+		n.Diag |= n.Left.Diag
+		l = n.Left
+		if l.Op == ONAME && l.Etype != 0 {
+			if n.Isddd && l.Etype != OAPPEND {
+				Yyerror("invalid use of ... with builtin %v", l)
+			}
+
+			// builtin: OLEN, OCAP, etc.
+			n.Op = l.Etype
+
+			n.Left = n.Right
+			n.Right = nil
+			typecheck1(&n, top)
+			return
+		}
+
+		defaultlit(&n.Left, nil)
+		l = n.Left
+		if l.Op == OTYPE {
+			if n.Isddd || l.Type.Bound == -100 {
+				if l.Type.Broke == 0 {
+					Yyerror("invalid use of ... in type conversion to %v", l.Type)
+				}
+				n.Diag = 1
+			}
+
+			// pick off before type-checking arguments
+			ok |= Erv
+
+			// turn CALL(type, arg) into CONV(arg) w/ type
+			n.Left = nil
+
+			n.Op = OCONV
+			n.Type = l.Type
+			if onearg(n, "conversion to %v", l.Type) < 0 {
+				n.Type = nil
+				return
+			}
+			typecheck1(&n, top)
+			return
+		}
+
+		if count(n.List) == 1 && !n.Isddd {
+			typecheck(&n.List.N, Erv|Efnstruct)
+		} else {
+			typechecklist(n.List, Erv)
+		}
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		checkwidth(t)
+
+		switch l.Op {
+		case ODOTINTER:
+			n.Op = OCALLINTER
+
+		case ODOTMETH:
+			n.Op = OCALLMETH
+
+			// typecheckaste was used here but there wasn't enough
+			// information further down the call chain to know if we
+			// were testing a method receiver for unexported fields.
+			// It isn't necessary, so just do a sanity check.
+			tp := getthisx(t).Type.Type
+
+			if l.Left == nil || !Eqtype(l.Left.Type, tp) {
+				Fatal("method receiver")
+			}
+
+		default:
+			n.Op = OCALLFUNC
+			if t.Etype != TFUNC {
+				Yyerror("cannot call non-function %v (type %v)", l, t)
+				n.Type = nil
+				return
+			}
+		}
+
+		typecheckaste(OCALL, n.Left, n.Isddd, getinargx(t), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) })
+		ok |= Etop
+		if t.Outtuple == 0 {
+			break OpSwitch
+		}
+		ok |= Erv
+		if t.Outtuple == 1 {
+			t := getoutargx(l.Type).Type
+			if t == nil {
+				n.Type = nil
+				return
+			}
+			if t.Etype == TFIELD {
+				t = t.Type
+			}
+			n.Type = t
+
+			if n.Op == OCALLFUNC && n.Left.Op == ONAME && (compiling_runtime != 0 || n.Left.Sym.Pkg == Runtimepkg) && n.Left.Sym.Name == "getg" {
+				// Emit code for runtime.getg() directly instead of calling function.
+				// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
+				// so that the ordering pass can make sure to preserve the semantics of the original code
+				// (in particular, the exact time of the function call) by introducing temporaries.
+				// In this case, we know getg() always returns the same result within a given function
+				// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
+				n.Op = OGETG
+			}
+
+			break OpSwitch
+		}
+
+		// multiple return
+		if top&(Efnstruct|Etop) == 0 {
+			Yyerror("multiple-value %v() in single-value context", l)
+			break OpSwitch
+		}
+
+		n.Type = getoutargx(l.Type)
+
+		break OpSwitch
+
+	case OCAP, OLEN, OREAL, OIMAG:
+		ok |= Erv
+		if onearg(n, "%v", Oconv(int(n.Op), 0)) < 0 {
+			n.Type = nil
+			return
+		}
+		typecheck(&n.Left, Erv)
+		defaultlit(&n.Left, nil)
+		implicitstar(&n.Left)
+		l := n.Left
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		switch n.Op {
+		case OCAP:
+			if !okforcap[t.Etype] {
+				goto badcall1
+			}
+
+		case OLEN:
+			if !okforlen[t.Etype] {
+				goto badcall1
+			}
+
+		case OREAL, OIMAG:
+			if !Iscomplex[t.Etype] {
+				goto badcall1
+			}
+			if Isconst(l, CTCPLX) {
+				r := n
+				if n.Op == OREAL {
+					n = nodfltconst(&l.Val.U.(*Mpcplx).Real)
+				} else {
+					n = nodfltconst(&l.Val.U.(*Mpcplx).Imag)
+				}
+				n.Orig = r
+			}
+
+			n.Type = Types[cplxsubtype(int(t.Etype))]
+			break OpSwitch
+		}
+
+		// might be constant
+		switch t.Etype {
+		case TSTRING:
+			if Isconst(l, CTSTR) {
+				r := Nod(OXXX, nil, nil)
+				Nodconst(r, Types[TINT], int64(len(l.Val.U.(string))))
+				r.Orig = n
+				n = r
+			}
+
+		case TARRAY:
+			if t.Bound < 0 { // slice
+				break
+			}
+			if callrecv(l) { // has call or receive
+				break
+			}
+			r := Nod(OXXX, nil, nil)
+			Nodconst(r, Types[TINT], t.Bound)
+			r.Orig = n
+			n = r
+		}
+
+		n.Type = Types[TINT]
+		break OpSwitch
+
+	badcall1:
+		Yyerror("invalid argument %v for %v", Nconv(n.Left, obj.FmtLong), Oconv(int(n.Op), 0))
+		n.Type = nil
+		return
+
+	case OCOMPLEX:
+		ok |= Erv
+		var r *Node
+		var l *Node
+		if count(n.List) == 1 {
+			typechecklist(n.List, Efnstruct)
+			if n.List.N.Op != OCALLFUNC && n.List.N.Op != OCALLMETH {
+				Yyerror("invalid operation: complex expects two arguments")
+				n.Type = nil
+				return
+			}
+
+			t := n.List.N.Left.Type
+			if t.Outtuple != 2 {
+				Yyerror("invalid operation: complex expects two arguments, %v returns %d results", n.List.N, t.Outtuple)
+				n.Type = nil
+				return
+			}
+
+			t = n.List.N.Type.Type
+			l = t.Nname
+			r = t.Down.Nname
+		} else {
+			if twoarg(n) < 0 {
+				n.Type = nil
+				return
+			}
+			l = typecheck(&n.Left, Erv|top&Eiota)
+			r = typecheck(&n.Right, Erv|top&Eiota)
+			if l.Type == nil || r.Type == nil {
+				n.Type = nil
+				return
+			}
+			defaultlit2(&l, &r, 0)
+			if l.Type == nil || r.Type == nil {
+				n.Type = nil
+				return
+			}
+			n.Left = l
+			n.Right = r
+		}
+
+		if !Eqtype(l.Type, r.Type) {
+			Yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
+			n.Type = nil
+			return
+		}
+
+		var t *Type
+		switch l.Type.Etype {
+		default:
+			Yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type)
+			n.Type = nil
+			return
+
+		case TIDEAL:
+			t = Types[TIDEAL]
+
+		case TFLOAT32:
+			t = Types[TCOMPLEX64]
+
+		case TFLOAT64:
+			t = Types[TCOMPLEX128]
+		}
+
+		if l.Op == OLITERAL && r.Op == OLITERAL {
+			// make it a complex literal
+			r = nodcplxlit(l.Val, r.Val)
+
+			r.Orig = n
+			n = r
+		}
+
+		n.Type = t
+		break OpSwitch
+
+	case OCLOSE:
+		if onearg(n, "%v", Oconv(int(n.Op), 0)) < 0 {
+			n.Type = nil
+			return
+		}
+		typecheck(&n.Left, Erv)
+		defaultlit(&n.Left, nil)
+		l := n.Left
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		if t.Etype != TCHAN {
+			Yyerror("invalid operation: %v (non-chan type %v)", n, t)
+			n.Type = nil
+			return
+		}
+
+		if t.Chan&Csend == 0 {
+			Yyerror("invalid operation: %v (cannot close receive-only channel)", n)
+			n.Type = nil
+			return
+		}
+
+		ok |= Etop
+		break OpSwitch
+
+	case ODELETE:
+		args := n.List
+		if args == nil {
+			Yyerror("missing arguments to delete")
+			n.Type = nil
+			return
+		}
+
+		if args.Next == nil {
+			Yyerror("missing second (key) argument to delete")
+			n.Type = nil
+			return
+		}
+
+		if args.Next.Next != nil {
+			Yyerror("too many arguments to delete")
+			n.Type = nil
+			return
+		}
+
+		ok |= Etop
+		typechecklist(args, Erv)
+		l := args.N
+		r := args.Next.N
+		if l.Type != nil && l.Type.Etype != TMAP {
+			Yyerror("first argument to delete must be map; have %v", Tconv(l.Type, obj.FmtLong))
+			n.Type = nil
+			return
+		}
+
+		args.Next.N = assignconv(r, l.Type.Down, "delete")
+		break OpSwitch
+
+	case OAPPEND:
+		ok |= Erv
+		args := n.List
+		if args == nil {
+			Yyerror("missing arguments to append")
+			n.Type = nil
+			return
+		}
+
+		if count(args) == 1 && !n.Isddd {
+			typecheck(&args.N, Erv|Efnstruct)
+		} else {
+			typechecklist(args, Erv)
+		}
+
+		t := args.N.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+
+		// Unpack multiple-return result before type-checking.
+		var funarg *Type
+		if Istype(t, TSTRUCT) && t.Funarg != 0 {
+			funarg = t
+			t = t.Type.Type
+		}
+
+		n.Type = t
+		if !Isslice(t) {
+			if Isconst(args.N, CTNIL) {
+				Yyerror("first argument to append must be typed slice; have untyped nil")
+				n.Type = nil
+				return
+			}
+
+			Yyerror("first argument to append must be slice; have %v", Tconv(t, obj.FmtLong))
+			n.Type = nil
+			return
+		}
+
+		if n.Isddd {
+			if args.Next == nil {
+				Yyerror("cannot use ... on first argument to append")
+				n.Type = nil
+				return
+			}
+
+			if args.Next.Next != nil {
+				Yyerror("too many arguments to append")
+				n.Type = nil
+				return
+			}
+
+			if Istype(t.Type, TUINT8) && Istype(args.Next.N.Type, TSTRING) {
+				defaultlit(&args.Next.N, Types[TSTRING])
+				break OpSwitch
+			}
+
+			args.Next.N = assignconv(args.Next.N, t.Orig, "append")
+			break OpSwitch
+		}
+
+		if funarg != nil {
+			for t := funarg.Type.Down; t != nil; t = t.Down {
+				if assignop(t.Type, n.Type.Type, nil) == 0 {
+					Yyerror("cannot append %v value to []%v", t.Type, n.Type.Type)
+				}
+			}
+		} else {
+			for args = args.Next; args != nil; args = args.Next {
+				if args.N.Type == nil {
+					continue
+				}
+				args.N = assignconv(args.N, t.Type, "append")
+			}
+		}
+
+		break OpSwitch
+
+	case OCOPY:
+		ok |= Etop | Erv
+		args := n.List
+		if args == nil || args.Next == nil {
+			Yyerror("missing arguments to copy")
+			n.Type = nil
+			return
+		}
+
+		if args.Next.Next != nil {
+			Yyerror("too many arguments to copy")
+			n.Type = nil
+			return
+		}
+
+		n.Left = args.N
+		n.Right = args.Next.N
+		n.List = nil
+		n.Type = Types[TINT]
+		typecheck(&n.Left, Erv)
+		typecheck(&n.Right, Erv)
+		if n.Left.Type == nil || n.Right.Type == nil {
+			n.Type = nil
+			return
+		}
+		defaultlit(&n.Left, nil)
+		defaultlit(&n.Right, nil)
+		if n.Left.Type == nil || n.Right.Type == nil {
+			n.Type = nil
+			return
+		}
+
+		// copy([]byte, string)
+		if Isslice(n.Left.Type) && n.Right.Type.Etype == TSTRING {
+			if Eqtype(n.Left.Type.Type, bytetype) {
+				break OpSwitch
+			}
+			Yyerror("arguments to copy have different element types: %v and string", Tconv(n.Left.Type, obj.FmtLong))
+			n.Type = nil
+			return
+		}
+
+		if !Isslice(n.Left.Type) || !Isslice(n.Right.Type) {
+			if !Isslice(n.Left.Type) && !Isslice(n.Right.Type) {
+				Yyerror("arguments to copy must be slices; have %v, %v", Tconv(n.Left.Type, obj.FmtLong), Tconv(n.Right.Type, obj.FmtLong))
+			} else if !Isslice(n.Left.Type) {
+				Yyerror("first argument to copy should be slice; have %v", Tconv(n.Left.Type, obj.FmtLong))
+			} else {
+				Yyerror("second argument to copy should be slice or string; have %v", Tconv(n.Right.Type, obj.FmtLong))
+			}
+			n.Type = nil
+			return
+		}
+
+		if !Eqtype(n.Left.Type.Type, n.Right.Type.Type) {
+			Yyerror("arguments to copy have different element types: %v and %v", Tconv(n.Left.Type, obj.FmtLong), Tconv(n.Right.Type, obj.FmtLong))
+			n.Type = nil
+			return
+		}
+
+		break OpSwitch
+
+	case OCONV:
+		ok |= Erv
+		saveorignode(n)
+		typecheck(&n.Left, Erv|top&(Eindir|Eiota))
+		convlit1(&n.Left, n.Type, true)
+		t := n.Left.Type
+		if t == nil || n.Type == nil {
+			n.Type = nil
+			return
+		}
+		var why string
+		n.Op = uint8(convertop(t, n.Type, &why))
+		if (n.Op) == 0 {
+			if n.Diag == 0 && n.Type.Broke == 0 {
+				Yyerror("cannot convert %v to type %v%s", Nconv(n.Left, obj.FmtLong), n.Type, why)
+				n.Diag = 1
+			}
+
+			n.Op = OCONV
+		}
+
+		switch n.Op {
+		case OCONVNOP:
+			if n.Left.Op == OLITERAL && n.Type != Types[TBOOL] {
+				r := Nod(OXXX, nil, nil)
+				n.Op = OCONV
+				n.Orig = r
+				*r = *n
+				n.Op = OLITERAL
+				n.Val = n.Left.Val
+			}
+
+			// do not use stringtoarraylit.
+		// generated code and compiler memory footprint is better without it.
+		case OSTRARRAYBYTE:
+			break
+
+		case OSTRARRAYRUNE:
+			if n.Left.Op == OLITERAL {
+				stringtoarraylit(&n)
+			}
+		}
+
+		break OpSwitch
+
+	case OMAKE:
+		ok |= Erv
+		args := n.List
+		if args == nil {
+			Yyerror("missing argument to make")
+			n.Type = nil
+			return
+		}
+
+		n.List = nil
+		l := args.N
+		args = args.Next
+		typecheck(&l, Etype)
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+
+		switch t.Etype {
+		default:
+			Yyerror("cannot make type %v", t)
+			n.Type = nil
+			return
+
+		case TARRAY:
+			if !Isslice(t) {
+				Yyerror("cannot make type %v", t)
+				n.Type = nil
+				return
+			}
+
+			if args == nil {
+				Yyerror("missing len argument to make(%v)", t)
+				n.Type = nil
+				return
+			}
+
+			l = args.N
+			args = args.Next
+			typecheck(&l, Erv)
+			var r *Node
+			if args != nil {
+				r = args.N
+				args = args.Next
+				typecheck(&r, Erv)
+			}
+
+			if l.Type == nil || (r != nil && r.Type == nil) {
+				n.Type = nil
+				return
+			}
+			et := obj.Bool2int(checkmake(t, "len", l) < 0)
+			et |= obj.Bool2int(r != nil && checkmake(t, "cap", r) < 0)
+			if et != 0 {
+				n.Type = nil
+				return
+			}
+			if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && Mpcmpfixfix(l.Val.U.(*Mpint), r.Val.U.(*Mpint)) > 0 {
+				Yyerror("len larger than cap in make(%v)", t)
+				n.Type = nil
+				return
+			}
+
+			n.Left = l
+			n.Right = r
+			n.Op = OMAKESLICE
+
+		case TMAP:
+			if args != nil {
+				l = args.N
+				args = args.Next
+				typecheck(&l, Erv)
+				defaultlit(&l, Types[TINT])
+				if l.Type == nil {
+					n.Type = nil
+					return
+				}
+				if checkmake(t, "size", l) < 0 {
+					n.Type = nil
+					return
+				}
+				n.Left = l
+			} else {
+				n.Left = Nodintconst(0)
+			}
+			n.Op = OMAKEMAP
+
+		case TCHAN:
+			l = nil
+			if args != nil {
+				l = args.N
+				args = args.Next
+				typecheck(&l, Erv)
+				defaultlit(&l, Types[TINT])
+				if l.Type == nil {
+					n.Type = nil
+					return
+				}
+				if checkmake(t, "buffer", l) < 0 {
+					n.Type = nil
+					return
+				}
+				n.Left = l
+			} else {
+				n.Left = Nodintconst(0)
+			}
+			n.Op = OMAKECHAN
+		}
+
+		if args != nil {
+			Yyerror("too many arguments to make(%v)", t)
+			n.Op = OMAKE
+			n.Type = nil
+			return
+		}
+
+		n.Type = t
+		break OpSwitch
+
+	case ONEW:
+		ok |= Erv
+		args := n.List
+		if args == nil {
+			Yyerror("missing argument to new")
+			n.Type = nil
+			return
+		}
+
+		l := args.N
+		typecheck(&l, Etype)
+		t := l.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		if args.Next != nil {
+			Yyerror("too many arguments to new(%v)", t)
+			n.Type = nil
+			return
+		}
+
+		n.Left = l
+		n.Type = Ptrto(t)
+		break OpSwitch
+
+	case OPRINT, OPRINTN:
+		ok |= Etop
+		typechecklist(n.List, Erv|Eindir) // Eindir: address does not escape
+		for args := n.List; args != nil; args = args.Next {
+			// Special case for print: int constant is int64, not int.
+			if Isconst(args.N, CTINT) {
+				defaultlit(&args.N, Types[TINT64])
+			} else {
+				defaultlit(&args.N, nil)
+			}
+		}
+
+		break OpSwitch
+
+	case OPANIC:
+		ok |= Etop
+		if onearg(n, "panic") < 0 {
+			n.Type = nil
+			return
+		}
+		typecheck(&n.Left, Erv)
+		defaultlit(&n.Left, Types[TINTER])
+		if n.Left.Type == nil {
+			n.Type = nil
+			return
+		}
+		break OpSwitch
+
+	case ORECOVER:
+		ok |= Erv | Etop
+		if n.List != nil {
+			Yyerror("too many arguments to recover")
+			n.Type = nil
+			return
+		}
+
+		n.Type = Types[TINTER]
+		break OpSwitch
+
+	case OCLOSURE:
+		ok |= Erv
+		typecheckclosure(n, top)
+		if n.Type == nil {
+			n.Type = nil
+			return
+		}
+		break OpSwitch
+
+	case OITAB:
+		ok |= Erv
+		typecheck(&n.Left, Erv)
+		t := n.Left.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		if t.Etype != TINTER {
+			Fatal("OITAB of %v", t)
+		}
+		n.Type = Ptrto(Types[TUINTPTR])
+		break OpSwitch
+
+	case OSPTR:
+		ok |= Erv
+		typecheck(&n.Left, Erv)
+		t := n.Left.Type
+		if t == nil {
+			n.Type = nil
+			return
+		}
+		if !Isslice(t) && t.Etype != TSTRING {
+			Fatal("OSPTR of %v", t)
+		}
+		if t.Etype == TSTRING {
+			n.Type = Ptrto(Types[TUINT8])
+		} else {
+			n.Type = Ptrto(t.Type)
+		}
+		break OpSwitch
+
+	case OCLOSUREVAR:
+		ok |= Erv
+		break OpSwitch
+
+	case OCFUNC:
+		ok |= Erv
+		typecheck(&n.Left, Erv)
+		n.Type = Types[TUINTPTR]
+		break OpSwitch
+
+	case OCONVNOP:
+		ok |= Erv
+		typecheck(&n.Left, Erv)
+		break OpSwitch
+
+		/*
+		 * statements
+		 */
+	case OAS:
+		ok |= Etop
+
+		typecheckas(n)
+
+		// Code that creates temps does not bother to set defn, so do it here.
+		if n.Left.Op == ONAME && strings.HasPrefix(n.Left.Sym.Name, "autotmp_") {
+			n.Left.Defn = n
+		}
+		break OpSwitch
+
+	case OAS2:
+		ok |= Etop
+		typecheckas2(n)
+		break OpSwitch
+
+	case OBREAK,
+		OCONTINUE,
+		ODCL,
+		OEMPTY,
+		OGOTO,
+		OXFALL,
+		OVARKILL:
+		ok |= Etop
+		break OpSwitch
+
+	case OLABEL:
+		ok |= Etop
+		decldepth++
+		break OpSwitch
+
+	case ODEFER:
+		ok |= Etop
+		typecheck(&n.Left, Etop|Erv)
+		if n.Left.Diag == 0 {
+			checkdefergo(n)
+		}
+		break OpSwitch
+
+	case OPROC:
+		ok |= Etop
+		typecheck(&n.Left, Etop|Eproc|Erv)
+		checkdefergo(n)
+		break OpSwitch
+
+	case OFOR:
+		ok |= Etop
+		typechecklist(n.Ninit, Etop)
+		decldepth++
+		typecheck(&n.Ntest, Erv)
+		if n.Ntest != nil {
+			t := n.Ntest.Type
+			if t != nil && t.Etype != TBOOL {
+				Yyerror("non-bool %v used as for condition", Nconv(n.Ntest, obj.FmtLong))
+			}
+		}
+		typecheck(&n.Nincr, Etop)
+		typechecklist(n.Nbody, Etop)
+		decldepth--
+		break OpSwitch
+
+	case OIF:
+		ok |= Etop
+		typechecklist(n.Ninit, Etop)
+		typecheck(&n.Ntest, Erv)
+		if n.Ntest != nil {
+			t := n.Ntest.Type
+			if t != nil && t.Etype != TBOOL {
+				Yyerror("non-bool %v used as if condition", Nconv(n.Ntest, obj.FmtLong))
+			}
+		}
+		typechecklist(n.Nbody, Etop)
+		typechecklist(n.Nelse, Etop)
+		break OpSwitch
+
+	case ORETURN:
+		ok |= Etop
+		if count(n.List) == 1 {
+			typechecklist(n.List, Erv|Efnstruct)
+		} else {
+			typechecklist(n.List, Erv)
+		}
+		if Curfn == nil {
+			Yyerror("return outside function")
+			n.Type = nil
+			return
+		}
+
+		if Curfn.Type.Outnamed != 0 && n.List == nil {
+			break OpSwitch
+		}
+		typecheckaste(ORETURN, nil, false, getoutargx(Curfn.Type), n.List, func() string { return "return argument" })
+		break OpSwitch
+
+	case ORETJMP:
+		ok |= Etop
+		break OpSwitch
+
+	case OSELECT:
+		ok |= Etop
+		typecheckselect(n)
+		break OpSwitch
+
+	case OSWITCH:
+		ok |= Etop
+		typecheckswitch(n)
+		break OpSwitch
+
+	case ORANGE:
+		ok |= Etop
+		typecheckrange(n)
+		break OpSwitch
+
+	case OTYPESW:
+		Yyerror("use of .(type) outside type switch")
+		n.Type = nil
+		return
+
+	case OXCASE:
+		ok |= Etop
+		typechecklist(n.List, Erv)
+		typechecklist(n.Nbody, Etop)
+		break OpSwitch
+
+	case ODCLFUNC:
+		ok |= Etop
+		typecheckfunc(n)
+		break OpSwitch
+
+	case ODCLCONST:
+		ok |= Etop
+		typecheck(&n.Left, Erv)
+		break OpSwitch
+
+	case ODCLTYPE:
+		ok |= Etop
+		typecheck(&n.Left, Etype)
+		if incannedimport == 0 {
+			checkwidth(n.Left.Type)
+		}
+		break OpSwitch
+	}
+
+	t := n.Type
+	if t != nil && t.Funarg == 0 && n.Op != OTYPE {
+		switch t.Etype {
+		case TFUNC, // might have TANY; wait until its called
+			TANY,
+			TFORW,
+			TIDEAL,
+			TNIL,
+			TBLANK:
+			break
+
+		default:
+			checkwidth(t)
+		}
+	}
+
+	if safemode != 0 && incannedimport == 0 && importpkg == nil && compiling_wrappers == 0 && t != nil && t.Etype == TUNSAFEPTR {
+		Yyerror("cannot use unsafe.Pointer")
+	}
+
+	evconst(n)
+	if n.Op == OTYPE && top&Etype == 0 {
+		Yyerror("type %v is not an expression", n.Type)
+		n.Type = nil
+		return
+	}
+
+	if top&(Erv|Etype) == Etype && n.Op != OTYPE {
+		Yyerror("%v is not a type", n)
+		n.Type = nil
+		return
+	}
+
+	// TODO(rsc): simplify
+	if (top&(Ecall|Erv|Etype) != 0) && top&Etop == 0 && ok&(Erv|Etype|Ecall) == 0 {
+		Yyerror("%v used as value", n)
+		n.Type = nil
+		return
+	}
+
+	if (top&Etop != 0) && top&(Ecall|Erv|Etype) == 0 && ok&Etop == 0 {
+		if n.Diag == 0 {
+			Yyerror("%v evaluated but not used", n)
+			n.Diag = 1
+		}
+
+		n.Type = nil
+		return
+	}
+
+	/* TODO
+	if(n->type == T)
+		fatal("typecheck nil type");
+	*/
+}
+
+func checksliceindex(l *Node, r *Node, tp *Type) int {
+	t := r.Type
+	if t == nil {
+		return -1
+	}
+	if !Isint[t.Etype] {
+		Yyerror("invalid slice index %v (type %v)", r, t)
+		return -1
+	}
+
+	if r.Op == OLITERAL {
+		if Mpgetfix(r.Val.U.(*Mpint)) < 0 {
+			Yyerror("invalid slice index %v (index must be non-negative)", r)
+			return -1
+		} else if tp != nil && tp.Bound > 0 && Mpgetfix(r.Val.U.(*Mpint)) > tp.Bound {
+			Yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.Bound)
+			return -1
+		} else if Isconst(l, CTSTR) && Mpgetfix(r.Val.U.(*Mpint)) > int64(len(l.Val.U.(string))) {
+			Yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.Val.U.(string)))
+			return -1
+		} else if Mpcmpfixfix(r.Val.U.(*Mpint), Maxintval[TINT]) > 0 {
+			Yyerror("invalid slice index %v (index too large)", r)
+			return -1
+		}
+	}
+
+	return 0
+}
+
+func checksliceconst(lo *Node, hi *Node) int {
+	if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && Mpcmpfixfix(lo.Val.U.(*Mpint), hi.Val.U.(*Mpint)) > 0 {
+		Yyerror("invalid slice index: %v > %v", lo, hi)
+		return -1
+	}
+
+	return 0
+}
+
+func checkdefergo(n *Node) {
+	what := "defer"
+	if n.Op == OPROC {
+		what = "go"
+	}
+
+	switch n.Left.Op {
+	// ok
+	case OCALLINTER,
+		OCALLMETH,
+		OCALLFUNC,
+		OCLOSE,
+		OCOPY,
+		ODELETE,
+		OPANIC,
+		OPRINT,
+		OPRINTN,
+		ORECOVER:
+		return
+
+	case OAPPEND,
+		OCAP,
+		OCOMPLEX,
+		OIMAG,
+		OLEN,
+		OMAKE,
+		OMAKESLICE,
+		OMAKECHAN,
+		OMAKEMAP,
+		ONEW,
+		OREAL,
+		OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
+		if n.Left.Orig != nil && n.Left.Orig.Op == OCONV {
+			break
+		}
+		Yyerror("%s discards result of %v", what, n.Left)
+		return
+	}
+
+	// type is broken or missing, most likely a method call on a broken type
+	// we will warn about the broken type elsewhere. no need to emit a potentially confusing error
+	if n.Left.Type == nil || n.Left.Type.Broke != 0 {
+		return
+	}
+
+	if n.Diag == 0 {
+		// The syntax made sure it was a call, so this must be
+		// a conversion.
+		n.Diag = 1
+
+		Yyerror("%s requires function call, not conversion", what)
+	}
+}
+
+func implicitstar(nn **Node) {
+	// insert implicit * if needed for fixed array
+	n := *nn
+
+	t := n.Type
+	if t == nil || !Isptr[t.Etype] {
+		return
+	}
+	t = t.Type
+	if t == nil {
+		return
+	}
+	if !Isfixedarray(t) {
+		return
+	}
+	n = Nod(OIND, n, nil)
+	n.Implicit = true
+	typecheck(&n, Erv)
+	*nn = n
+}
+
+func onearg(n *Node, f string, args ...interface{}) int {
+	if n.Left != nil {
+		return 0
+	}
+	if n.List == nil {
+		p := fmt.Sprintf(f, args...)
+		Yyerror("missing argument to %s: %v", p, n)
+		return -1
+	}
+
+	if n.List.Next != nil {
+		p := fmt.Sprintf(f, args...)
+		Yyerror("too many arguments to %s: %v", p, n)
+		n.Left = n.List.N
+		n.List = nil
+		return -1
+	}
+
+	n.Left = n.List.N
+	n.List = nil
+	return 0
+}
+
+func twoarg(n *Node) int {
+	if n.Left != nil {
+		return 0
+	}
+	if n.List == nil {
+		Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), n)
+		return -1
+	}
+
+	n.Left = n.List.N
+	if n.List.Next == nil {
+		Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), n)
+		n.List = nil
+		return -1
+	}
+
+	if n.List.Next.Next != nil {
+		Yyerror("too many arguments to %v - %v", Oconv(int(n.Op), 0), n)
+		n.List = nil
+		return -1
+	}
+
+	n.Right = n.List.Next.N
+	n.List = nil
+	return 0
+}
+
+func lookdot1(errnode *Node, s *Sym, t *Type, f *Type, dostrcmp int) *Type {
+	var r *Type
+	for ; f != nil; f = f.Down {
+		if dostrcmp != 0 && f.Sym.Name == s.Name {
+			return f
+		}
+		if dostrcmp == 2 && strings.EqualFold(f.Sym.Name, s.Name) {
+			return f
+		}
+		if f.Sym != s {
+			continue
+		}
+		if r != nil {
+			if errnode != nil {
+				Yyerror("ambiguous selector %v", errnode)
+			} else if Isptr[t.Etype] {
+				Yyerror("ambiguous selector (%v).%v", t, s)
+			} else {
+				Yyerror("ambiguous selector %v.%v", t, s)
+			}
+			break
+		}
+
+		r = f
+	}
+
+	return r
+}
+
+func looktypedot(n *Node, t *Type, dostrcmp int) bool {
+	s := n.Right.Sym
+
+	if t.Etype == TINTER {
+		f1 := lookdot1(n, s, t, t.Type, dostrcmp)
+		if f1 == nil {
+			return false
+		}
+
+		n.Right = methodname(n.Right, t)
+		n.Xoffset = f1.Width
+		n.Type = f1.Type
+		n.Op = ODOTINTER
+		return true
+	}
+
+	// Find the base type: methtype will fail if t
+	// is not of the form T or *T.
+	f2 := methtype(t, 0)
+
+	if f2 == nil {
+		return false
+	}
+
+	expandmeth(f2)
+	f2 = lookdot1(n, s, f2, f2.Xmethod, dostrcmp)
+	if f2 == nil {
+		return false
+	}
+
+	// disallow T.m if m requires *T receiver
+	if Isptr[getthisx(f2.Type).Type.Type.Etype] && !Isptr[t.Etype] && f2.Embedded != 2 && !isifacemethod(f2.Type) {
+		Yyerror("invalid method expression %v (needs pointer receiver: (*%v).%v)", n, t, Sconv(f2.Sym, obj.FmtShort))
+		return false
+	}
+
+	n.Right = methodname(n.Right, t)
+	n.Xoffset = f2.Width
+	n.Type = f2.Type
+	n.Op = ODOTMETH
+	return true
+}
+
+func derefall(t *Type) *Type {
+	for t != nil && int(t.Etype) == Tptr {
+		t = t.Type
+	}
+	return t
+}
+
+func lookdot(n *Node, t *Type, dostrcmp int) *Type {
+	s := n.Right.Sym
+
+	dowidth(t)
+	var f1 *Type
+	if t.Etype == TSTRUCT || t.Etype == TINTER {
+		f1 = lookdot1(n, s, t, t.Type, dostrcmp)
+	}
+
+	var f2 *Type
+	if n.Left.Type == t || n.Left.Type.Sym == nil {
+		f2 = methtype(t, 0)
+		if f2 != nil {
+			// Use f2->method, not f2->xmethod: adddot has
+			// already inserted all the necessary embedded dots.
+			f2 = lookdot1(n, s, f2, f2.Method, dostrcmp)
+		}
+	}
+
+	if f1 != nil {
+		if dostrcmp > 1 {
+			// Already in the process of diagnosing an error.
+			return f1
+		}
+		if f2 != nil {
+			Yyerror("%v is both field and method", n.Right.Sym)
+		}
+		if f1.Width == BADWIDTH {
+			Fatal("lookdot badwidth %v %p", f1, f1)
+		}
+		n.Xoffset = f1.Width
+		n.Type = f1.Type
+		n.Paramfld = f1
+		if t.Etype == TINTER {
+			if Isptr[n.Left.Type.Etype] {
+				n.Left = Nod(OIND, n.Left, nil) // implicitstar
+				n.Left.Implicit = true
+				typecheck(&n.Left, Erv)
+			}
+
+			n.Op = ODOTINTER
+		}
+
+		return f1
+	}
+
+	if f2 != nil {
+		if dostrcmp > 1 {
+			// Already in the process of diagnosing an error.
+			return f2
+		}
+		tt := n.Left.Type
+		dowidth(tt)
+		rcvr := getthisx(f2.Type).Type.Type
+		if !Eqtype(rcvr, tt) {
+			if int(rcvr.Etype) == Tptr && Eqtype(rcvr.Type, tt) {
+				checklvalue(n.Left, "call pointer method on")
+				n.Left = Nod(OADDR, n.Left, nil)
+				n.Left.Implicit = true
+				typecheck(&n.Left, Etype|Erv)
+			} else if int(tt.Etype) == Tptr && int(rcvr.Etype) != Tptr && Eqtype(tt.Type, rcvr) {
+				n.Left = Nod(OIND, n.Left, nil)
+				n.Left.Implicit = true
+				typecheck(&n.Left, Etype|Erv)
+			} else if int(tt.Etype) == Tptr && int(tt.Type.Etype) == Tptr && Eqtype(derefall(tt), derefall(rcvr)) {
+				Yyerror("calling method %v with receiver %v requires explicit dereference", n.Right, Nconv(n.Left, obj.FmtLong))
+				for int(tt.Etype) == Tptr {
+					// Stop one level early for method with pointer receiver.
+					if int(rcvr.Etype) == Tptr && int(tt.Type.Etype) != Tptr {
+						break
+					}
+					n.Left = Nod(OIND, n.Left, nil)
+					n.Left.Implicit = true
+					typecheck(&n.Left, Etype|Erv)
+					tt = tt.Type
+				}
+			} else {
+				Fatal("method mismatch: %v for %v", rcvr, tt)
+			}
+		}
+
+		ll := n.Left
+		for ll.Left != nil {
+			ll = ll.Left
+		}
+		if ll.Implicit {
+			if Isptr[ll.Type.Etype] && ll.Type.Sym != nil && ll.Type.Sym.Def != nil && ll.Type.Sym.Def.Op == OTYPE {
+				// It is invalid to automatically dereference a named pointer type when selecting a method.
+				// Make n->left == ll to clarify error message.
+				n.Left = ll
+				return nil
+			}
+		}
+
+		n.Right = methodname(n.Right, n.Left.Type)
+		n.Xoffset = f2.Width
+		n.Type = f2.Type
+
+		//		print("lookdot found [%p] %T\n", f2->type, f2->type);
+		n.Op = ODOTMETH
+
+		return f2
+	}
+
+	return nil
+}
+
+func nokeys(l *NodeList) bool {
+	for ; l != nil; l = l.Next {
+		if l.N.Op == OKEY {
+			return false
+		}
+	}
+	return true
+}
+
+func hasddd(t *Type) bool {
+	for tl := t.Type; tl != nil; tl = tl.Down {
+		if tl.Isddd {
+			return true
+		}
+	}
+
+	return false
+}
+
+func downcount(t *Type) int {
+	n := 0
+	for tl := t.Type; tl != nil; tl = tl.Down {
+		n++
+	}
+
+	return n
+}
+
+/*
+ * typecheck assignment: type list = expression list
+ */
+func typecheckaste(op int, call *Node, isddd bool, tstruct *Type, nl *NodeList, desc func() string) {
+	var t *Type
+	var n *Node
+	var n1 int
+	var n2 int
+
+	lno := int(lineno)
+
+	if tstruct.Broke != 0 {
+		goto out
+	}
+
+	n = nil
+	if nl != nil && nl.Next == nil {
+		n = nl.N
+		if n.Type != nil {
+			if n.Type.Etype == TSTRUCT && n.Type.Funarg != 0 {
+				if !hasddd(tstruct) {
+					n1 := downcount(tstruct)
+					n2 := downcount(n.Type)
+					if n2 > n1 {
+						goto toomany
+					}
+					if n2 < n1 {
+						goto notenough
+					}
+				}
+
+				tn := n.Type.Type
+				var why string
+				for tl := tstruct.Type; tl != nil; tl = tl.Down {
+					if tl.Isddd {
+						for ; tn != nil; tn = tn.Down {
+							if assignop(tn.Type, tl.Type.Type, &why) == 0 {
+								if call != nil {
+									Yyerror("cannot use %v as type %v in argument to %v%s", tn.Type, tl.Type.Type, call, why)
+								} else {
+									Yyerror("cannot use %v as type %v in %s%s", tn.Type, tl.Type.Type, desc(), why)
+								}
+							}
+						}
+
+						goto out
+					}
+
+					if tn == nil {
+						goto notenough
+					}
+					if assignop(tn.Type, tl.Type, &why) == 0 {
+						if call != nil {
+							Yyerror("cannot use %v as type %v in argument to %v%s", tn.Type, tl.Type, call, why)
+						} else {
+							Yyerror("cannot use %v as type %v in %s%s", tn.Type, tl.Type, desc(), why)
+						}
+					}
+
+					tn = tn.Down
+				}
+
+				if tn != nil {
+					goto toomany
+				}
+				goto out
+			}
+		}
+	}
+
+	n1 = downcount(tstruct)
+	n2 = count(nl)
+	if !hasddd(tstruct) {
+		if n2 > n1 {
+			goto toomany
+		}
+		if n2 < n1 {
+			goto notenough
+		}
+	} else {
+		if !isddd {
+			if n2 < n1-1 {
+				goto notenough
+			}
+		} else {
+			if n2 > n1 {
+				goto toomany
+			}
+			if n2 < n1 {
+				goto notenough
+			}
+		}
+	}
+
+	for tl := tstruct.Type; tl != nil; tl = tl.Down {
+		t = tl.Type
+		if tl.Isddd {
+			if isddd {
+				if nl == nil {
+					goto notenough
+				}
+				if nl.Next != nil {
+					goto toomany
+				}
+				n = nl.N
+				setlineno(n)
+				if n.Type != nil {
+					nl.N = assignconvfn(n, t, desc)
+				}
+				goto out
+			}
+
+			for ; nl != nil; nl = nl.Next {
+				n = nl.N
+				setlineno(nl.N)
+				if n.Type != nil {
+					nl.N = assignconvfn(n, t.Type, desc)
+				}
+			}
+
+			goto out
+		}
+
+		if nl == nil {
+			goto notenough
+		}
+		n = nl.N
+		setlineno(n)
+		if n.Type != nil {
+			nl.N = assignconvfn(n, t, desc)
+		}
+		nl = nl.Next
+	}
+
+	if nl != nil {
+		goto toomany
+	}
+	if isddd {
+		if call != nil {
+			Yyerror("invalid use of ... in call to %v", call)
+		} else {
+			Yyerror("invalid use of ... in %v", Oconv(int(op), 0))
+		}
+	}
+
+out:
+	lineno = int32(lno)
+	return
+
+notenough:
+	if n == nil || n.Diag == 0 {
+		if call != nil {
+			Yyerror("not enough arguments in call to %v", call)
+		} else {
+			Yyerror("not enough arguments to %v", Oconv(int(op), 0))
+		}
+		if n != nil {
+			n.Diag = 1
+		}
+	}
+
+	goto out
+
+toomany:
+	if call != nil {
+		Yyerror("too many arguments in call to %v", call)
+	} else {
+		Yyerror("too many arguments to %v", Oconv(int(op), 0))
+	}
+	goto out
+}
+
+/*
+ * type check composite
+ */
+func fielddup(n *Node, hash map[string]bool) {
+	if n.Op != ONAME {
+		Fatal("fielddup: not ONAME")
+	}
+	name := n.Sym.Name
+	if hash[name] {
+		Yyerror("duplicate field name in struct literal: %s", name)
+		return
+	}
+	hash[name] = true
+}
+
+func keydup(n *Node, hash []*Node) {
+	orign := n
+	if n.Op == OCONVIFACE {
+		n = n.Left
+	}
+	evconst(n)
+	if n.Op != OLITERAL {
+		return // we dont check variables
+	}
+
+	var b uint32
+	switch n.Val.Ctype {
+	default: // unknown, bool, nil
+		b = 23
+
+	case CTINT, CTRUNE:
+		b = uint32(Mpgetfix(n.Val.U.(*Mpint)))
+
+	case CTFLT:
+		d := mpgetflt(n.Val.U.(*Mpflt))
+		x := math.Float64bits(d)
+		for i := 0; i < 8; i++ {
+			b = b*PRIME1 + uint32(x&0xFF)
+			x >>= 8
+		}
+
+	case CTSTR:
+		b = 0
+		s := n.Val.U.(string)
+		for i := len(n.Val.U.(string)); i > 0; i-- {
+			b = b*PRIME1 + uint32(s[0])
+			s = s[1:]
+		}
+	}
+
+	h := uint(b % uint32(len(hash)))
+	var cmp Node
+	for a := hash[h]; a != nil; a = a.Ntest {
+		cmp.Op = OEQ
+		cmp.Left = n
+		b = 0
+		if a.Op == OCONVIFACE && orign.Op == OCONVIFACE {
+			if Eqtype(a.Left.Type, n.Type) {
+				cmp.Right = a.Left
+				evconst(&cmp)
+				b = uint32(obj.Bool2int(cmp.Val.U.(bool)))
+			}
+		} else if Eqtype(a.Type, n.Type) {
+			cmp.Right = a
+			evconst(&cmp)
+			b = uint32(obj.Bool2int(cmp.Val.U.(bool)))
+		}
+
+		if b != 0 {
+			Yyerror("duplicate key %v in map literal", n)
+			return
+		}
+	}
+
+	orign.Ntest = hash[h]
+	hash[h] = orign
+}
+
+func indexdup(n *Node, hash []*Node) {
+	if n.Op != OLITERAL {
+		Fatal("indexdup: not OLITERAL")
+	}
+
+	b := uint32(Mpgetfix(n.Val.U.(*Mpint)))
+	h := uint(b % uint32(len(hash)))
+	var c uint32
+	for a := hash[h]; a != nil; a = a.Ntest {
+		c = uint32(Mpgetfix(a.Val.U.(*Mpint)))
+		if b == c {
+			Yyerror("duplicate index in array literal: %d", b)
+			return
+		}
+	}
+
+	n.Ntest = hash[h]
+	hash[h] = n
+}
+
+func prime(h uint32, sr uint32) bool {
+	for n := uint32(3); n <= sr; n += 2 {
+		if h%n == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func inithash(n *Node, autohash []*Node) []*Node {
+	// count the number of entries
+	h := uint32(0)
+
+	for ll := n.List; ll != nil; ll = ll.Next {
+		h++
+	}
+
+	// if the auto hash table is
+	// large enough use it.
+	if h <= uint32(len(autohash)) {
+		for i := range autohash {
+			autohash[i] = nil
+		}
+		return autohash
+	}
+
+	// make hash size odd and 12% larger than entries
+	h += h / 8
+
+	h |= 1
+
+	// calculate sqrt of h
+	sr := h / 2
+
+	for i := 0; i < 5; i++ {
+		sr = (sr + h/sr) / 2
+	}
+
+	// check for primeality
+	for !prime(h, sr) {
+		h += 2
+	}
+
+	// build and return a throw-away hash table
+	return make([]*Node, h)
+}
+
+func iscomptype(t *Type) bool {
+	switch t.Etype {
+	case TARRAY, TSTRUCT, TMAP:
+		return true
+
+	case TPTR32, TPTR64:
+		switch t.Type.Etype {
+		case TARRAY, TSTRUCT, TMAP:
+			return true
+		}
+	}
+
+	return false
+}
+
+func pushtype(n *Node, t *Type) {
+	if n == nil || n.Op != OCOMPLIT || !iscomptype(t) {
+		return
+	}
+
+	if n.Right == nil {
+		n.Right = typenod(t)
+		n.Implicit = true       // don't print
+		n.Right.Implicit = true // * is okay
+	} else if Debug['s'] != 0 {
+		typecheck(&n.Right, Etype)
+		if n.Right.Type != nil && Eqtype(n.Right.Type, t) {
+			fmt.Printf("%v: redundant type: %v\n", n.Line(), t)
+		}
+	}
+}
+
+func typecheckcomplit(np **Node) {
+	n := *np
+	lno := lineno
+	defer func() {
+		lineno = lno
+		*np = n
+	}()
+
+	if n.Right == nil {
+		if n.List != nil {
+			setlineno(n.List.N)
+		}
+		Yyerror("missing type in composite literal")
+		n.Type = nil
+		return
+	}
+
+	// Save original node (including n->right)
+	norig := Nod(int(n.Op), nil, nil)
+
+	*norig = *n
+
+	setlineno(n.Right)
+	l := typecheck(&n.Right, Etype|Ecomplit) /* sic */
+	t := l.Type
+	if t == nil {
+		n.Type = nil
+		return
+	}
+	nerr := nerrors
+	n.Type = t
+
+	if Isptr[t.Etype] {
+		// For better or worse, we don't allow pointers as the composite literal type,
+		// except when using the &T syntax, which sets implicit on the OIND.
+		if !n.Right.Implicit {
+			Yyerror("invalid pointer type %v for composite literal (use &%v instead)", t, t.Type)
+			n.Type = nil
+			return
+		}
+
+		// Also, the underlying type must be a struct, map, slice, or array.
+		if !iscomptype(t) {
+			Yyerror("invalid pointer type %v for composite literal", t)
+			n.Type = nil
+			return
+		}
+
+		t = t.Type
+	}
+
+	var r *Node
+	switch t.Etype {
+	default:
+		Yyerror("invalid type for composite literal: %v", t)
+		n.Type = nil
+
+	case TARRAY:
+		var autohash [101]*Node
+		hash := inithash(n, autohash[:])
+
+		length := int64(0)
+		i := 0
+		var l *Node
+		for ll := n.List; ll != nil; ll = ll.Next {
+			l = ll.N
+			setlineno(l)
+			if l.Op != OKEY {
+				l = Nod(OKEY, Nodintconst(int64(i)), l)
+				l.Left.Type = Types[TINT]
+				l.Left.Typecheck = 1
+				ll.N = l
+			}
+
+			typecheck(&l.Left, Erv)
+			evconst(l.Left)
+			i = nonnegconst(l.Left)
+			if i < 0 && l.Left.Diag == 0 {
+				Yyerror("array index must be non-negative integer constant")
+				l.Left.Diag = 1
+				i = -(1 << 30) // stay negative for a while
+			}
+
+			if i >= 0 {
+				indexdup(l.Left, hash)
+			}
+			i++
+			if int64(i) > length {
+				length = int64(i)
+				if t.Bound >= 0 && length > t.Bound {
+					setlineno(l)
+					Yyerror("array index %d out of bounds [0:%d]", length-1, t.Bound)
+					t.Bound = -1 // no more errors
+				}
+			}
+
+			r = l.Right
+			pushtype(r, t.Type)
+			typecheck(&r, Erv)
+			defaultlit(&r, t.Type)
+			l.Right = assignconv(r, t.Type, "array element")
+		}
+
+		if t.Bound == -100 {
+			t.Bound = length
+		}
+		if t.Bound < 0 {
+			n.Right = Nodintconst(length)
+		}
+		n.Op = OARRAYLIT
+
+	case TMAP:
+		var autohash [101]*Node
+		hash := inithash(n, autohash[:])
+
+		var l *Node
+		for ll := n.List; ll != nil; ll = ll.Next {
+			l = ll.N
+			setlineno(l)
+			if l.Op != OKEY {
+				typecheck(&ll.N, Erv)
+				Yyerror("missing key in map literal")
+				continue
+			}
+
+			typecheck(&l.Left, Erv)
+			defaultlit(&l.Left, t.Down)
+			l.Left = assignconv(l.Left, t.Down, "map key")
+			if l.Left.Op != OCONV {
+				keydup(l.Left, hash)
+			}
+
+			r = l.Right
+			pushtype(r, t.Type)
+			typecheck(&r, Erv)
+			defaultlit(&r, t.Type)
+			l.Right = assignconv(r, t.Type, "map value")
+		}
+
+		n.Op = OMAPLIT
+
+	case TSTRUCT:
+		bad := 0
+		if n.List != nil && nokeys(n.List) {
+			// simple list of variables
+			f := t.Type
+
+			var s *Sym
+			for ll := n.List; ll != nil; ll = ll.Next {
+				setlineno(ll.N)
+				typecheck(&ll.N, Erv)
+				if f == nil {
+					tmp12 := bad
+					bad++
+					if tmp12 == 0 {
+						Yyerror("too many values in struct initializer")
+					}
+					continue
+				}
+
+				s = f.Sym
+				if s != nil && !exportname(s.Name) && s.Pkg != localpkg {
+					Yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+				}
+
+				// No pushtype allowed here.  Must name fields for that.
+				ll.N = assignconv(ll.N, f.Type, "field value")
+
+				ll.N = Nod(OKEY, newname(f.Sym), ll.N)
+				ll.N.Left.Type = f
+				ll.N.Left.Typecheck = 1
+				f = f.Down
+			}
+
+			if f != nil {
+				Yyerror("too few values in struct initializer")
+			}
+		} else {
+			hash := make(map[string]bool)
+
+			// keyed list
+			var s *Sym
+			var f *Type
+			var l *Node
+			var s1 *Sym
+			for ll := n.List; ll != nil; ll = ll.Next {
+				l = ll.N
+				setlineno(l)
+				if l.Op != OKEY {
+					tmp13 := bad
+					bad++
+					if tmp13 == 0 {
+						Yyerror("mixture of field:value and value initializers")
+					}
+					typecheck(&ll.N, Erv)
+					continue
+				}
+
+				s = l.Left.Sym
+				if s == nil {
+					Yyerror("invalid field name %v in struct initializer", l.Left)
+					typecheck(&l.Right, Erv)
+					continue
+				}
+
+				// Sym might have resolved to name in other top-level
+				// package, because of import dot.  Redirect to correct sym
+				// before we do the lookup.
+				if s.Pkg != localpkg && exportname(s.Name) {
+					s1 = Lookup(s.Name)
+					if s1.Origpkg == s.Pkg {
+						s = s1
+					}
+				}
+
+				f = lookdot1(nil, s, t, t.Type, 0)
+				if f == nil {
+					Yyerror("unknown %v field '%v' in struct literal", t, s)
+					continue
+				}
+
+				l.Left = newname(s)
+				l.Left.Typecheck = 1
+				l.Left.Type = f
+				s = f.Sym
+				fielddup(newname(s), hash)
+				r = l.Right
+
+				// No pushtype allowed here.  Tried and rejected.
+				typecheck(&r, Erv)
+
+				l.Right = assignconv(r, f.Type, "field value")
+			}
+		}
+
+		n.Op = OSTRUCTLIT
+	}
+
+	if nerr != nerrors {
+		n.Type = nil
+		return
+	}
+
+	n.Orig = norig
+	if Isptr[n.Type.Etype] {
+		n = Nod(OPTRLIT, n, nil)
+		n.Typecheck = 1
+		n.Type = n.Left.Type
+		n.Left.Type = t
+		n.Left.Typecheck = 1
+	}
+
+	n.Orig = norig
+	return
+}
+
+/*
+ * lvalue etc
+ */
+func islvalue(n *Node) bool {
+	switch n.Op {
+	case OINDEX:
+		if Isfixedarray(n.Left.Type) {
+			return islvalue(n.Left)
+		}
+		if n.Left.Type != nil && n.Left.Type.Etype == TSTRING {
+			return false
+		}
+		fallthrough
+
+		// fall through
+	case OIND, ODOTPTR, OCLOSUREVAR, OPARAM:
+		return true
+
+	case ODOT:
+		return islvalue(n.Left)
+
+	case ONAME:
+		if n.Class == PFUNC {
+			return false
+		}
+		return true
+	}
+
+	return false
+}
+
+func checklvalue(n *Node, verb string) {
+	if !islvalue(n) {
+		Yyerror("cannot %s %v", verb, n)
+	}
+}
+
+func checkassign(stmt *Node, n *Node) {
+	// Variables declared in ORANGE are assigned on every iteration.
+	if n.Defn != stmt || stmt.Op == ORANGE {
+		r := outervalue(n)
+		var l *Node
+		for l = n; l != r; l = l.Left {
+			l.Assigned = true
+			if l.Param != nil && l.Closure != nil {
+				l.Closure.Assigned = true
+			}
+		}
+
+		l.Assigned = true
+		if l.Param != nil && l.Closure != nil {
+			l.Closure.Assigned = true
+		}
+	}
+
+	if islvalue(n) {
+		return
+	}
+	if n.Op == OINDEXMAP {
+		n.Etype = 1
+		return
+	}
+
+	// have already complained about n being undefined
+	if n.Op == ONONAME {
+		return
+	}
+
+	Yyerror("cannot assign to %v", n)
+}
+
+func checkassignlist(stmt *Node, l *NodeList) {
+	for ; l != nil; l = l.Next {
+		checkassign(stmt, l.N)
+	}
+}
+
+// Check whether l and r are the same side effect-free expression,
+// so that it is safe to reuse one instead of computing both.
+func samesafeexpr(l *Node, r *Node) bool {
+	if l.Op != r.Op || !Eqtype(l.Type, r.Type) {
+		return false
+	}
+
+	switch l.Op {
+	case ONAME, OCLOSUREVAR:
+		return l == r
+
+	case ODOT, ODOTPTR:
+		return l.Right != nil && r.Right != nil && l.Right.Sym == r.Right.Sym && samesafeexpr(l.Left, r.Left)
+
+	case OIND:
+		return samesafeexpr(l.Left, r.Left)
+
+	case OINDEX:
+		return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
+	}
+
+	return false
+}
+
+/*
+ * type check assignment.
+ * if this assignment is the definition of a var on the left side,
+ * fill in the var's type.
+ */
+func typecheckas(n *Node) {
+	// delicate little dance.
+	// the definition of n may refer to this assignment
+	// as its definition, in which case it will call typecheckas.
+	// in that case, do not call typecheck back, or it will cycle.
+	// if the variable has a type (ntype) then typechecking
+	// will not look at defn, so it is okay (and desirable,
+	// so that the conversion below happens).
+	n.Left = resolve(n.Left)
+
+	if n.Left.Defn != n || n.Left.Ntype != nil {
+		typecheck(&n.Left, Erv|Easgn)
+	}
+
+	typecheck(&n.Right, Erv)
+	checkassign(n, n.Left)
+	if n.Right != nil && n.Right.Type != nil {
+		if n.Left.Type != nil {
+			n.Right = assignconv(n.Right, n.Left.Type, "assignment")
+		}
+	}
+
+	if n.Left.Defn == n && n.Left.Ntype == nil {
+		defaultlit(&n.Right, nil)
+		n.Left.Type = n.Right.Type
+	}
+
+	// second half of dance.
+	// now that right is done, typecheck the left
+	// just to get it over with.  see dance above.
+	n.Typecheck = 1
+
+	if n.Left.Typecheck == 0 {
+		typecheck(&n.Left, Erv|Easgn)
+	}
+}
+
+func checkassignto(src *Type, dst *Node) {
+	var why string
+
+	if assignop(src, dst.Type, &why) == 0 {
+		Yyerror("cannot assign %v to %v in multiple assignment%s", src, Nconv(dst, obj.FmtLong), why)
+		return
+	}
+}
+
+func typecheckas2(n *Node) {
+	for ll := n.List; ll != nil; ll = ll.Next {
+		// delicate little dance.
+		ll.N = resolve(ll.N)
+
+		if ll.N.Defn != n || ll.N.Ntype != nil {
+			typecheck(&ll.N, Erv|Easgn)
+		}
+	}
+
+	cl := count(n.List)
+	cr := count(n.Rlist)
+	if cl > 1 && cr == 1 {
+		typecheck(&n.Rlist.N, Erv|Efnstruct)
+	} else {
+		typechecklist(n.Rlist, Erv)
+	}
+	checkassignlist(n, n.List)
+
+	var l *Node
+	var r *Node
+	if cl == cr {
+		// easy
+		ll := n.List
+		lr := n.Rlist
+		for ; ll != nil; ll, lr = ll.Next, lr.Next {
+			if ll.N.Type != nil && lr.N.Type != nil {
+				lr.N = assignconv(lr.N, ll.N.Type, "assignment")
+			}
+			if ll.N.Defn == n && ll.N.Ntype == nil {
+				defaultlit(&lr.N, nil)
+				ll.N.Type = lr.N.Type
+			}
+		}
+
+		goto out
+	}
+
+	l = n.List.N
+	r = n.Rlist.N
+
+	// x,y,z = f()
+	if cr == 1 {
+		if r.Type == nil {
+			goto out
+		}
+		switch r.Op {
+		case OCALLMETH, OCALLINTER, OCALLFUNC:
+			if r.Type.Etype != TSTRUCT || r.Type.Funarg == 0 {
+				break
+			}
+			cr = structcount(r.Type)
+			if cr != cl {
+				goto mismatch
+			}
+			n.Op = OAS2FUNC
+			var s Iter
+			t := Structfirst(&s, &r.Type)
+			for ll := n.List; ll != nil; ll = ll.Next {
+				if t.Type != nil && ll.N.Type != nil {
+					checkassignto(t.Type, ll.N)
+				}
+				if ll.N.Defn == n && ll.N.Ntype == nil {
+					ll.N.Type = t.Type
+				}
+				t = structnext(&s)
+			}
+
+			goto out
+		}
+	}
+
+	// x, ok = y
+	if cl == 2 && cr == 1 {
+		if r.Type == nil {
+			goto out
+		}
+		switch r.Op {
+		case OINDEXMAP, ORECV, ODOTTYPE:
+			switch r.Op {
+			case OINDEXMAP:
+				n.Op = OAS2MAPR
+
+			case ORECV:
+				n.Op = OAS2RECV
+
+			case ODOTTYPE:
+				n.Op = OAS2DOTTYPE
+				r.Op = ODOTTYPE2
+			}
+
+			if l.Type != nil {
+				checkassignto(r.Type, l)
+			}
+			if l.Defn == n {
+				l.Type = r.Type
+			}
+			l := n.List.Next.N
+			if l.Type != nil && l.Type.Etype != TBOOL {
+				checkassignto(Types[TBOOL], l)
+			}
+			if l.Defn == n && l.Ntype == nil {
+				l.Type = Types[TBOOL]
+			}
+			goto out
+		}
+	}
+
+mismatch:
+	Yyerror("assignment count mismatch: %d = %d", cl, cr)
+
+	// second half of dance
+out:
+	n.Typecheck = 1
+
+	for ll := n.List; ll != nil; ll = ll.Next {
+		if ll.N.Typecheck == 0 {
+			typecheck(&ll.N, Erv|Easgn)
+		}
+	}
+}
+
+/*
+ * type check function definition
+ */
+func typecheckfunc(n *Node) {
+	typecheck(&n.Nname, Erv|Easgn)
+	t := n.Nname.Type
+	if t == nil {
+		return
+	}
+	n.Type = t
+	t.Nname = n.Nname
+	rcvr := getthisx(t).Type
+	if rcvr != nil && n.Func.Shortname != nil && !isblank(n.Func.Shortname) {
+		addmethod(n.Func.Shortname.Sym, t, true, n.Nname.Nointerface)
+	}
+
+	for l := n.Func.Dcl; l != nil; l = l.Next {
+		if l.N.Op == ONAME && (l.N.Class == PPARAM || l.N.Class == PPARAMOUT) {
+			l.N.Name.Decldepth = 1
+		}
+	}
+}
+
+func stringtoarraylit(np **Node) {
+	n := *np
+	if n.Left.Op != OLITERAL || n.Left.Val.Ctype != CTSTR {
+		Fatal("stringtoarraylit %v", n)
+	}
+
+	s := n.Left.Val.U.(string)
+	var l *NodeList
+	if n.Type.Type.Etype == TUINT8 {
+		// []byte
+		for i := 0; i < len(s); i++ {
+			l = list(l, Nod(OKEY, Nodintconst(int64(i)), Nodintconst(int64(s[0]))))
+		}
+	} else {
+		// []rune
+		i := 0
+		for _, r := range s {
+			l = list(l, Nod(OKEY, Nodintconst(int64(i)), Nodintconst(int64(r))))
+			i++
+		}
+	}
+
+	nn := Nod(OCOMPLIT, nil, typenod(n.Type))
+	nn.List = l
+	typecheck(&nn, Erv)
+	*np = nn
+}
+
+var ntypecheckdeftype int
+
+var methodqueue *NodeList
+
+func domethod(n *Node) {
+	nt := n.Type.Nname
+	typecheck(&nt, Etype)
+	if nt.Type == nil {
+		// type check failed; leave empty func
+		n.Type.Etype = TFUNC
+
+		n.Type.Nod = nil
+		return
+	}
+
+	// If we have
+	//	type I interface {
+	//		M(_ int)
+	//	}
+	// then even though I.M looks like it doesn't care about the
+	// value of its argument, a specific implementation of I may
+	// care.  The _ would suppress the assignment to that argument
+	// while generating a call, so remove it.
+	for t := getinargx(nt.Type).Type; t != nil; t = t.Down {
+		if t.Sym != nil && t.Sym.Name == "_" {
+			t.Sym = nil
+		}
+	}
+
+	*n.Type = *nt.Type
+	n.Type.Nod = nil
+	checkwidth(n.Type)
+}
+
+var mapqueue *NodeList
+
+func copytype(n *Node, t *Type) {
+	if t.Etype == TFORW {
+		// This type isn't computed yet; when it is, update n.
+		t.Copyto = list(t.Copyto, n)
+
+		return
+	}
+
+	maplineno := int(n.Type.Maplineno)
+	embedlineno := int(n.Type.Embedlineno)
+
+	l := n.Type.Copyto
+	*n.Type = *t
+
+	t = n.Type
+	t.Sym = n.Sym
+	t.Local = n.Local
+	t.Vargen = n.Vargen
+	t.Siggen = 0
+	t.Method = nil
+	t.Xmethod = nil
+	t.Nod = nil
+	t.Printed = 0
+	t.Deferwidth = 0
+	t.Copyto = nil
+
+	// Update nodes waiting on this type.
+	for ; l != nil; l = l.Next {
+		copytype(l.N, t)
+	}
+
+	// Double-check use of type as embedded type.
+	lno := int(lineno)
+
+	if embedlineno != 0 {
+		lineno = int32(embedlineno)
+		if Isptr[t.Etype] {
+			Yyerror("embedded type cannot be a pointer")
+		}
+	}
+
+	lineno = int32(lno)
+
+	// Queue check for map until all the types are done settling.
+	if maplineno != 0 {
+		t.Maplineno = int32(maplineno)
+		mapqueue = list(mapqueue, n)
+	}
+}
+
+func typecheckdeftype(n *Node) {
+	ntypecheckdeftype++
+	lno := int(lineno)
+	setlineno(n)
+	n.Type.Sym = n.Sym
+	n.Typecheck = 1
+	typecheck(&n.Ntype, Etype)
+	t := n.Ntype.Type
+	if t == nil {
+		n.Diag = 1
+		n.Type = nil
+		goto ret
+	}
+
+	if n.Type == nil {
+		n.Diag = 1
+		goto ret
+	}
+
+	// copy new type and clear fields
+	// that don't come along.
+	// anything zeroed here must be zeroed in
+	// typedcl2 too.
+	copytype(n, t)
+
+ret:
+	lineno = int32(lno)
+
+	// if there are no type definitions going on, it's safe to
+	// try to resolve the method types for the interfaces
+	// we just read.
+	if ntypecheckdeftype == 1 {
+		var l *NodeList
+		for {
+			l = methodqueue
+			if l == nil {
+				break
+			}
+			methodqueue = nil
+			for ; l != nil; l = l.Next {
+				domethod(l.N)
+			}
+		}
+
+		for l := mapqueue; l != nil; l = l.Next {
+			lineno = l.N.Type.Maplineno
+			maptype(l.N.Type, Types[TBOOL])
+		}
+
+		lineno = int32(lno)
+	}
+
+	ntypecheckdeftype--
+}
+
+func queuemethod(n *Node) {
+	if ntypecheckdeftype == 0 {
+		domethod(n)
+		return
+	}
+
+	methodqueue = list(methodqueue, n)
+}
+
+func typecheckdef(n *Node) *Node {
+	lno := int(lineno)
+	setlineno(n)
+
+	if n.Op == ONONAME {
+		if n.Diag == 0 {
+			n.Diag = 1
+			if n.Lineno != 0 {
+				lineno = n.Lineno
+			}
+
+			// Note: adderrorname looks for this string and
+			// adds context about the outer expression
+			Yyerror("undefined: %v", n.Sym)
+		}
+
+		return n
+	}
+
+	if n.Walkdef == 1 {
+		return n
+	}
+
+	l := new(NodeList)
+	l.N = n
+	l.Next = typecheckdefstack
+	typecheckdefstack = l
+
+	if n.Walkdef == 2 {
+		Flusherrors()
+		fmt.Printf("typecheckdef loop:")
+		for l := typecheckdefstack; l != nil; l = l.Next {
+			fmt.Printf(" %v", l.N.Sym)
+		}
+		fmt.Printf("\n")
+		Fatal("typecheckdef loop")
+	}
+
+	n.Walkdef = 2
+
+	if n.Type != nil || n.Sym == nil { // builtin or no name
+		goto ret
+	}
+
+	switch n.Op {
+	default:
+		Fatal("typecheckdef %v", Oconv(int(n.Op), 0))
+
+		// not really syms
+	case OGOTO, OLABEL:
+		break
+
+	case OLITERAL:
+		if n.Ntype != nil {
+			typecheck(&n.Ntype, Etype)
+			n.Type = n.Ntype.Type
+			n.Ntype = nil
+			if n.Type == nil {
+				n.Diag = 1
+				goto ret
+			}
+		}
+
+		e := n.Defn
+		n.Defn = nil
+		if e == nil {
+			lineno = n.Lineno
+			Dump("typecheckdef nil defn", n)
+			Yyerror("xxx")
+		}
+
+		typecheck(&e, Erv|Eiota)
+		if Isconst(e, CTNIL) {
+			Yyerror("const initializer cannot be nil")
+			goto ret
+		}
+
+		if e.Type != nil && e.Op != OLITERAL || !isgoconst(e) {
+			if e.Diag == 0 {
+				Yyerror("const initializer %v is not a constant", e)
+				e.Diag = 1
+			}
+
+			goto ret
+		}
+
+		t := n.Type
+		if t != nil {
+			if !okforconst[t.Etype] {
+				Yyerror("invalid constant type %v", t)
+				goto ret
+			}
+
+			if !isideal(e.Type) && !Eqtype(t, e.Type) {
+				Yyerror("cannot use %v as type %v in const initializer", Nconv(e, obj.FmtLong), t)
+				goto ret
+			}
+
+			Convlit(&e, t)
+		}
+
+		n.Val = e.Val
+		n.Type = e.Type
+
+	case ONAME:
+		if n.Ntype != nil {
+			typecheck(&n.Ntype, Etype)
+			n.Type = n.Ntype.Type
+
+			if n.Type == nil {
+				n.Diag = 1
+				goto ret
+			}
+		}
+
+		if n.Type != nil {
+			break
+		}
+		if n.Defn == nil {
+			if n.Etype != 0 { // like OPRINTN
+				break
+			}
+			if nsavederrors+nerrors > 0 {
+				// Can have undefined variables in x := foo
+				// that make x have an n->ndefn == nil.
+				// If there are other errors anyway, don't
+				// bother adding to the noise.
+				break
+			}
+
+			Fatal("var without type, init: %v", n.Sym)
+		}
+
+		if n.Defn.Op == ONAME {
+			typecheck(&n.Defn, Erv)
+			n.Type = n.Defn.Type
+			break
+		}
+
+		typecheck(&n.Defn, Etop) // fills in n->type
+
+	case OTYPE:
+		if Curfn != nil {
+			defercheckwidth()
+		}
+		n.Walkdef = 1
+		n.Type = typ(TFORW)
+		n.Type.Sym = n.Sym
+		nerrors0 := nerrors
+		typecheckdeftype(n)
+		if n.Type.Etype == TFORW && nerrors > nerrors0 {
+			// Something went wrong during type-checking,
+			// but it was reported. Silence future errors.
+			n.Type.Broke = 1
+		}
+
+		if Curfn != nil {
+			resumecheckwidth()
+		}
+
+		// nothing to see here
+	case OPACK:
+		break
+	}
+
+ret:
+	if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) {
+		Fatal("got %v for %v", n.Type, n)
+	}
+	if typecheckdefstack.N != n {
+		Fatal("typecheckdefstack mismatch")
+	}
+	l = typecheckdefstack
+	typecheckdefstack = l.Next
+
+	lineno = int32(lno)
+	n.Walkdef = 1
+	return n
+}
+
+func checkmake(t *Type, arg string, n *Node) int {
+	if n.Op == OLITERAL {
+		switch n.Val.Ctype {
+		case CTINT, CTRUNE, CTFLT, CTCPLX:
+			n.Val = toint(n.Val)
+			if mpcmpfixc(n.Val.U.(*Mpint), 0) < 0 {
+				Yyerror("negative %s argument in make(%v)", arg, t)
+				return -1
+			}
+
+			if Mpcmpfixfix(n.Val.U.(*Mpint), Maxintval[TINT]) > 0 {
+				Yyerror("%s argument too large in make(%v)", arg, t)
+				return -1
+			}
+
+			// Delay defaultlit until after we've checked range, to avoid
+			// a redundant "constant NNN overflows int" error.
+			defaultlit(&n, Types[TINT])
+
+			return 0
+
+		default:
+			break
+		}
+	}
+
+	if !Isint[n.Type.Etype] && n.Type.Etype != TIDEAL {
+		Yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
+		return -1
+	}
+
+	// Defaultlit still necessary for non-constant: n might be 1<<k.
+	defaultlit(&n, Types[TINT])
+
+	return 0
+}
+
+func markbreak(n *Node, implicit *Node) {
+	if n == nil {
+		return
+	}
+
+	switch n.Op {
+	case OBREAK:
+		if n.Left == nil {
+			if implicit != nil {
+				implicit.Hasbreak = true
+			}
+		} else {
+			lab := n.Left.Sym.Label
+			if lab != nil {
+				lab.Def.Hasbreak = true
+			}
+		}
+
+	case OFOR,
+		OSWITCH,
+		OTYPESW,
+		OSELECT,
+		ORANGE:
+		implicit = n
+		fallthrough
+
+		// fall through
+	default:
+		markbreak(n.Left, implicit)
+
+		markbreak(n.Right, implicit)
+		markbreak(n.Ntest, implicit)
+		markbreak(n.Nincr, implicit)
+		markbreaklist(n.Ninit, implicit)
+		markbreaklist(n.Nbody, implicit)
+		markbreaklist(n.Nelse, implicit)
+		markbreaklist(n.List, implicit)
+		markbreaklist(n.Rlist, implicit)
+	}
+}
+
+func markbreaklist(l *NodeList, implicit *Node) {
+	var n *Node
+	var lab *Label
+
+	for ; l != nil; l = l.Next {
+		n = l.N
+		if n.Op == OLABEL && l.Next != nil && n.Defn == l.Next.N {
+			switch n.Defn.Op {
+			case OFOR,
+				OSWITCH,
+				OTYPESW,
+				OSELECT,
+				ORANGE:
+				lab = new(Label)
+				lab.Def = n.Defn
+				n.Left.Sym.Label = lab
+				markbreak(n.Defn, n.Defn)
+				n.Left.Sym.Label = nil
+				l = l.Next
+				continue
+			}
+		}
+
+		markbreak(n, implicit)
+	}
+}
+
+func isterminating(l *NodeList, top int) bool {
+	if l == nil {
+		return false
+	}
+	if top != 0 {
+		for l.Next != nil && l.N.Op != OLABEL {
+			l = l.Next
+		}
+		markbreaklist(l, nil)
+	}
+
+	for l.Next != nil {
+		l = l.Next
+	}
+	n := l.N
+
+	if n == nil {
+		return false
+	}
+
+	switch n.Op {
+	// NOTE: OLABEL is treated as a separate statement,
+	// not a separate prefix, so skipping to the last statement
+	// in the block handles the labeled statement case by
+	// skipping over the label. No case OLABEL here.
+
+	case OBLOCK:
+		return isterminating(n.List, 0)
+
+	case OGOTO,
+		ORETURN,
+		ORETJMP,
+		OPANIC,
+		OXFALL:
+		return true
+
+	case OFOR:
+		if n.Ntest != nil {
+			return false
+		}
+		if n.Hasbreak {
+			return false
+		}
+		return true
+
+	case OIF:
+		return isterminating(n.Nbody, 0) && isterminating(n.Nelse, 0)
+
+	case OSWITCH, OTYPESW, OSELECT:
+		if n.Hasbreak {
+			return false
+		}
+		def := 0
+		for l = n.List; l != nil; l = l.Next {
+			if !isterminating(l.N.Nbody, 0) {
+				return false
+			}
+			if l.N.List == nil { // default
+				def = 1
+			}
+		}
+
+		if n.Op != OSELECT && def == 0 {
+			return false
+		}
+		return true
+	}
+
+	return false
+}
+
+func checkreturn(fn *Node) {
+	if fn.Type.Outtuple != 0 && fn.Nbody != nil {
+		if !isterminating(fn.Nbody, 1) {
+			yyerrorl(int(fn.Func.Endlineno), "missing return at end of function")
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go
new file mode 100644
index 0000000..824ecd0
--- /dev/null
+++ b/src/cmd/compile/internal/gc/unsafe.go
@@ -0,0 +1,167 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * look for
+ *	unsafe.Sizeof
+ *	unsafe.Offsetof
+ *	unsafe.Alignof
+ * rewrite with a constant
+ */
+func unsafenmagic(nn *Node) *Node {
+	fn := nn.Left
+	args := nn.List
+
+	if safemode != 0 || fn == nil || fn.Op != ONAME {
+		return nil
+	}
+	s := fn.Sym
+	if s == nil {
+		return nil
+	}
+	if s.Pkg != unsafepkg {
+		return nil
+	}
+
+	if args == nil {
+		Yyerror("missing argument for %v", s)
+		return nil
+	}
+
+	r := args.N
+
+	var v int64
+	if s.Name == "Sizeof" {
+		typecheck(&r, Erv)
+		defaultlit(&r, nil)
+		tr := r.Type
+		if tr == nil {
+			goto bad
+		}
+		dowidth(tr)
+		v = tr.Width
+		goto yes
+	}
+
+	if s.Name == "Offsetof" {
+		// must be a selector.
+		if r.Op != OXDOT {
+			goto bad
+		}
+
+		// Remember base of selector to find it back after dot insertion.
+		// Since r->left may be mutated by typechecking, check it explicitly
+		// first to track it correctly.
+		typecheck(&r.Left, Erv)
+
+		base := r.Left
+		typecheck(&r, Erv)
+		switch r.Op {
+		case ODOT, ODOTPTR:
+			break
+
+		case OCALLPART:
+			Yyerror("invalid expression %v: argument is a method value", nn)
+			v = 0
+			goto ret
+
+		default:
+			goto bad
+		}
+
+		v = 0
+
+		// add offsets for inserted dots.
+		var r1 *Node
+		for r1 = r; r1.Left != base; r1 = r1.Left {
+			switch r1.Op {
+			case ODOT:
+				v += r1.Xoffset
+
+			case ODOTPTR:
+				Yyerror("invalid expression %v: selector implies indirection of embedded %v", nn, r1.Left)
+				goto ret
+
+			default:
+				Dump("unsafenmagic", r)
+				Fatal("impossible %v node after dot insertion", Oconv(int(r1.Op), obj.FmtSharp))
+				goto bad
+			}
+		}
+
+		v += r1.Xoffset
+		goto yes
+	}
+
+	if s.Name == "Alignof" {
+		typecheck(&r, Erv)
+		defaultlit(&r, nil)
+		tr := r.Type
+		if tr == nil {
+			goto bad
+		}
+
+		// make struct { byte; T; }
+		t := typ(TSTRUCT)
+
+		t.Type = typ(TFIELD)
+		t.Type.Type = Types[TUINT8]
+		t.Type.Down = typ(TFIELD)
+		t.Type.Down.Type = tr
+
+		// compute struct widths
+		dowidth(t)
+
+		// the offset of T is its required alignment
+		v = t.Type.Down.Width
+
+		goto yes
+	}
+
+	return nil
+
+bad:
+	Yyerror("invalid expression %v", nn)
+	v = 0
+	goto ret
+
+yes:
+	if args.Next != nil {
+		Yyerror("extra arguments for %v", s)
+	}
+
+	// any side effects disappear; ignore init
+ret:
+	var val Val
+	val.Ctype = CTINT
+
+	val.U = new(Mpint)
+	Mpmovecfix(val.U.(*Mpint), v)
+	n := Nod(OLITERAL, nil, nil)
+	n.Orig = nn
+	n.Val = val
+	n.Type = Types[TUINTPTR]
+	nn.Type = Types[TUINTPTR]
+	return n
+}
+
+func isunsafebuiltin(n *Node) bool {
+	if n == nil || n.Op != ONAME || n.Sym == nil || n.Sym.Pkg != unsafepkg {
+		return false
+	}
+	if n.Sym.Name == "Sizeof" {
+		return true
+	}
+	if n.Sym.Name == "Offsetof" {
+		return true
+	}
+	if n.Sym.Name == "Alignof" {
+		return true
+	}
+	return false
+}
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
new file mode 100644
index 0000000..c59af06
--- /dev/null
+++ b/src/cmd/compile/internal/gc/util.go
@@ -0,0 +1,103 @@
+package gc
+
+import (
+	"os"
+	"runtime"
+	"runtime/pprof"
+	"strconv"
+	"strings"
+)
+
+func (n *Node) Line() string {
+	return Ctxt.LineHist.LineString(int(n.Lineno))
+}
+
+func atoi(s string) int {
+	// NOTE: Not strconv.Atoi, accepts hex and octal prefixes.
+	n, _ := strconv.ParseInt(s, 0, 0)
+	return int(n)
+}
+
+func isalnum(c int) bool {
+	return isalpha(c) || isdigit(c)
+}
+
+func isalpha(c int) bool {
+	return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
+}
+
+func isdigit(c int) bool {
+	return '0' <= c && c <= '9'
+}
+
+func plan9quote(s string) string {
+	if s == "" {
+		return "'" + strings.Replace(s, "'", "''", -1) + "'"
+	}
+	for i := 0; i < len(s); i++ {
+		if s[i] <= ' ' || s[i] == '\'' {
+			return "'" + strings.Replace(s, "'", "''", -1) + "'"
+		}
+	}
+	return s
+}
+
+// strings.Compare, introduced in Go 1.5.
+func stringsCompare(a, b string) int {
+	if a == b {
+		return 0
+	}
+	if a < b {
+		return -1
+	}
+	return +1
+}
+
+var atExitFuncs []func()
+
+func AtExit(f func()) {
+	atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit(code int) {
+	for i := len(atExitFuncs) - 1; i >= 0; i-- {
+		f := atExitFuncs[i]
+		atExitFuncs = atExitFuncs[:i]
+		f()
+	}
+	os.Exit(code)
+}
+
+var (
+	cpuprofile     string
+	memprofile     string
+	memprofilerate int64
+)
+
+func startProfile() {
+	if cpuprofile != "" {
+		f, err := os.Create(cpuprofile)
+		if err != nil {
+			Fatal("%v", err)
+		}
+		if err := pprof.StartCPUProfile(f); err != nil {
+			Fatal("%v", err)
+		}
+		AtExit(pprof.StopCPUProfile)
+	}
+	if memprofile != "" {
+		if memprofilerate != 0 {
+			runtime.MemProfileRate = int(memprofilerate)
+		}
+		f, err := os.Create(memprofile)
+		if err != nil {
+			Fatal("%v", err)
+		}
+		AtExit(func() {
+			runtime.GC() // profile all outstanding allocations
+			if err := pprof.WriteHeapProfile(f); err != nil {
+				Fatal("%v", err)
+			}
+		})
+	}
+}
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
new file mode 100644
index 0000000..b5b8611
--- /dev/null
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -0,0 +1,4090 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"cmd/internal/obj"
+	"fmt"
+	"strings"
+)
+
+var mpzero Mpint
+
+// The constant is known to runtime.
+const (
+	tmpstringbufsize = 32
+)
+
+func walk(fn *Node) {
+	Curfn = fn
+
+	if Debug['W'] != 0 {
+		s := fmt.Sprintf("\nbefore %v", Curfn.Nname.Sym)
+		dumplist(s, Curfn.Nbody)
+	}
+
+	lno := int(lineno)
+
+	// Final typecheck for any unused variables.
+	// It's hard to be on the heap when not-used, but best to be consistent about &~PHEAP here and below.
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		if l.N.Op == ONAME && l.N.Class&^PHEAP == PAUTO {
+			typecheck(&l.N, Erv|Easgn)
+		}
+	}
+
+	// Propagate the used flag for typeswitch variables up to the NONAME in it's definition.
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		if l.N.Op == ONAME && l.N.Class&^PHEAP == PAUTO && l.N.Defn != nil && l.N.Defn.Op == OTYPESW && l.N.Used {
+			l.N.Defn.Left.Used = true
+		}
+	}
+
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		if l.N.Op != ONAME || l.N.Class&^PHEAP != PAUTO || l.N.Sym.Name[0] == '&' || l.N.Used {
+			continue
+		}
+		if l.N.Defn != nil && l.N.Defn.Op == OTYPESW {
+			if l.N.Defn.Left.Used {
+				continue
+			}
+			lineno = l.N.Defn.Left.Lineno
+			Yyerror("%v declared and not used", l.N.Sym)
+			l.N.Defn.Left.Used = true // suppress repeats
+		} else {
+			lineno = l.N.Lineno
+			Yyerror("%v declared and not used", l.N.Sym)
+		}
+	}
+
+	lineno = int32(lno)
+	if nerrors != 0 {
+		return
+	}
+	walkstmtlist(Curfn.Nbody)
+	if Debug['W'] != 0 {
+		s := fmt.Sprintf("after walk %v", Curfn.Nname.Sym)
+		dumplist(s, Curfn.Nbody)
+	}
+
+	heapmoves()
+	if Debug['W'] != 0 && Curfn.Func.Enter != nil {
+		s := fmt.Sprintf("enter %v", Curfn.Nname.Sym)
+		dumplist(s, Curfn.Func.Enter)
+	}
+}
+
+func walkstmtlist(l *NodeList) {
+	for ; l != nil; l = l.Next {
+		walkstmt(&l.N)
+	}
+}
+
+func samelist(a *NodeList, b *NodeList) bool {
+	for ; a != nil && b != nil; a, b = a.Next, b.Next {
+		if a.N != b.N {
+			return false
+		}
+	}
+	return a == b
+}
+
+func paramoutheap(fn *Node) bool {
+	for l := fn.Func.Dcl; l != nil; l = l.Next {
+		switch l.N.Class {
+		case PPARAMOUT,
+			PPARAMOUT | PHEAP:
+			return l.N.Addrtaken
+
+			// stop early - parameters are over
+		case PAUTO,
+			PAUTO | PHEAP:
+			return false
+		}
+	}
+
+	return false
+}
+
+// adds "adjust" to all the argument locations for the call n.
+// n must be a defer or go node that has already been walked.
+func adjustargs(n *Node, adjust int) {
+	var arg *Node
+	var lhs *Node
+
+	callfunc := n.Left
+	for args := callfunc.List; args != nil; args = args.Next {
+		arg = args.N
+		if arg.Op != OAS {
+			Yyerror("call arg not assignment")
+		}
+		lhs = arg.Left
+		if lhs.Op == ONAME {
+			// This is a temporary introduced by reorder1.
+			// The real store to the stack appears later in the arg list.
+			continue
+		}
+
+		if lhs.Op != OINDREG {
+			Yyerror("call argument store does not use OINDREG")
+		}
+
+		// can't really check this in machine-indep code.
+		//if(lhs->val.u.reg != D_SP)
+		//      yyerror("call arg assign not indreg(SP)");
+		lhs.Xoffset += int64(adjust)
+	}
+}
+
+func walkstmt(np **Node) {
+	n := *np
+	if n == nil {
+		return
+	}
+	if n.Dodata == 2 { // don't walk, generated by anylit.
+		return
+	}
+
+	setlineno(n)
+
+	walkstmtlist(n.Ninit)
+
+	switch n.Op {
+	default:
+		if n.Op == ONAME {
+			Yyerror("%v is not a top level statement", n.Sym)
+		} else {
+			Yyerror("%v is not a top level statement", Oconv(int(n.Op), 0))
+		}
+		Dump("nottop", n)
+
+	case OAS,
+		OASOP,
+		OAS2,
+		OAS2DOTTYPE,
+		OAS2RECV,
+		OAS2FUNC,
+		OAS2MAPR,
+		OCLOSE,
+		OCOPY,
+		OCALLMETH,
+		OCALLINTER,
+		OCALL,
+		OCALLFUNC,
+		ODELETE,
+		OSEND,
+		OPRINT,
+		OPRINTN,
+		OPANIC,
+		OEMPTY,
+		ORECOVER,
+		OGETG:
+		if n.Typecheck == 0 {
+			Fatal("missing typecheck: %v", Nconv(n, obj.FmtSign))
+		}
+		init := n.Ninit
+		n.Ninit = nil
+		walkexpr(&n, &init)
+		addinit(&n, init)
+		if (*np).Op == OCOPY && n.Op == OCONVNOP {
+			n.Op = OEMPTY // don't leave plain values as statements.
+		}
+
+		// special case for a receive where we throw away
+	// the value received.
+	case ORECV:
+		if n.Typecheck == 0 {
+			Fatal("missing typecheck: %v", Nconv(n, obj.FmtSign))
+		}
+		init := n.Ninit
+		n.Ninit = nil
+
+		walkexpr(&n.Left, &init)
+		n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, typename(n.Left.Type), n.Left, nodnil())
+		walkexpr(&n, &init)
+
+		addinit(&n, init)
+
+	case OBREAK,
+		ODCL,
+		OCONTINUE,
+		OFALL,
+		OGOTO,
+		OLABEL,
+		ODCLCONST,
+		ODCLTYPE,
+		OCHECKNIL,
+		OVARKILL:
+		break
+
+	case OBLOCK:
+		walkstmtlist(n.List)
+
+	case OXCASE:
+		Yyerror("case statement out of place")
+		n.Op = OCASE
+		fallthrough
+
+	case OCASE:
+		walkstmt(&n.Right)
+
+	case ODEFER:
+		Hasdefer = 1
+		switch n.Left.Op {
+		case OPRINT, OPRINTN:
+			walkprintfunc(&n.Left, &n.Ninit)
+
+		case OCOPY:
+			n.Left = copyany(n.Left, &n.Ninit, 1)
+
+		default:
+			walkexpr(&n.Left, &n.Ninit)
+		}
+
+		// make room for size & fn arguments.
+		adjustargs(n, 2*Widthptr)
+
+	case OFOR:
+		if n.Ntest != nil {
+			walkstmtlist(n.Ntest.Ninit)
+			init := n.Ntest.Ninit
+			n.Ntest.Ninit = nil
+			walkexpr(&n.Ntest, &init)
+			addinit(&n.Ntest, init)
+		}
+
+		walkstmt(&n.Nincr)
+		walkstmtlist(n.Nbody)
+
+	case OIF:
+		walkexpr(&n.Ntest, &n.Ninit)
+		walkstmtlist(n.Nbody)
+		walkstmtlist(n.Nelse)
+
+	case OPROC:
+		switch n.Left.Op {
+		case OPRINT, OPRINTN:
+			walkprintfunc(&n.Left, &n.Ninit)
+
+		case OCOPY:
+			n.Left = copyany(n.Left, &n.Ninit, 1)
+
+		default:
+			walkexpr(&n.Left, &n.Ninit)
+		}
+
+		// make room for size & fn arguments.
+		adjustargs(n, 2*Widthptr)
+
+	case ORETURN:
+		walkexprlist(n.List, &n.Ninit)
+		if n.List == nil {
+			break
+		}
+		if (Curfn.Type.Outnamed != 0 && count(n.List) > 1) || paramoutheap(Curfn) {
+			// assign to the function out parameters,
+			// so that reorder3 can fix up conflicts
+			var rl *NodeList
+
+			var cl uint8
+			for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+				cl = ll.N.Class &^ PHEAP
+				if cl == PAUTO {
+					break
+				}
+				if cl == PPARAMOUT {
+					rl = list(rl, ll.N)
+				}
+			}
+
+			if samelist(rl, n.List) {
+				// special return in disguise
+				n.List = nil
+
+				break
+			}
+
+			if count(n.List) == 1 && count(rl) > 1 {
+				// OAS2FUNC in disguise
+				f := n.List.N
+
+				if f.Op != OCALLFUNC && f.Op != OCALLMETH && f.Op != OCALLINTER {
+					Fatal("expected return of call, have %v", f)
+				}
+				n.List = concat(list1(f), ascompatet(int(n.Op), rl, &f.Type, 0, &n.Ninit))
+				break
+			}
+
+			// move function calls out, to make reorder3's job easier.
+			walkexprlistsafe(n.List, &n.Ninit)
+
+			ll := ascompatee(int(n.Op), rl, n.List, &n.Ninit)
+			n.List = reorder3(ll)
+			break
+		}
+
+		ll := ascompatte(int(n.Op), nil, false, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
+		n.List = ll
+
+	case ORETJMP:
+		break
+
+	case OSELECT:
+		walkselect(n)
+
+	case OSWITCH:
+		walkswitch(n)
+
+	case ORANGE:
+		walkrange(n)
+
+	case OXFALL:
+		Yyerror("fallthrough statement out of place")
+		n.Op = OFALL
+	}
+
+	if n.Op == ONAME {
+		Fatal("walkstmt ended up with name: %v", Nconv(n, obj.FmtSign))
+	}
+
+	*np = n
+}
+
+/*
+ * walk the whole tree of the body of an
+ * expression or simple statement.
+ * the types expressions are calculated.
+ * compile-time constants are evaluated.
+ * complex side effects like statements are appended to init
+ */
+func walkexprlist(l *NodeList, init **NodeList) {
+	for ; l != nil; l = l.Next {
+		walkexpr(&l.N, init)
+	}
+}
+
+func walkexprlistsafe(l *NodeList, init **NodeList) {
+	for ; l != nil; l = l.Next {
+		l.N = safeexpr(l.N, init)
+		walkexpr(&l.N, init)
+	}
+}
+
+func walkexprlistcheap(l *NodeList, init **NodeList) {
+	for ; l != nil; l = l.Next {
+		l.N = cheapexpr(l.N, init)
+		walkexpr(&l.N, init)
+	}
+}
+
+func walkexpr(np **Node, init **NodeList) {
+	n := *np
+
+	if n == nil {
+		return
+	}
+
+	if init == &n.Ninit {
+		// not okay to use n->ninit when walking n,
+		// because we might replace n with some other node
+		// and would lose the init list.
+		Fatal("walkexpr init == &n->ninit")
+	}
+
+	if n.Ninit != nil {
+		walkstmtlist(n.Ninit)
+		*init = concat(*init, n.Ninit)
+		n.Ninit = nil
+	}
+
+	// annoying case - not typechecked
+	if n.Op == OKEY {
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right, init)
+		return
+	}
+
+	lno := setlineno(n)
+
+	if Debug['w'] > 1 {
+		Dump("walk-before", n)
+	}
+
+	if n.Typecheck != 1 {
+		Fatal("missed typecheck: %v\n", Nconv(n, obj.FmtSign))
+	}
+
+	switch n.Op {
+	default:
+		Dump("walk", n)
+		Fatal("walkexpr: switch 1 unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+	case OTYPE,
+		ONONAME,
+		OINDREG,
+		OEMPTY,
+		OPARAM,
+		OGETG:
+		goto ret
+
+	case ONOT,
+		OMINUS,
+		OPLUS,
+		OCOM,
+		OREAL,
+		OIMAG,
+		ODOTMETH,
+		ODOTINTER:
+		walkexpr(&n.Left, init)
+		goto ret
+
+	case OIND:
+		walkexpr(&n.Left, init)
+		goto ret
+
+	case ODOT:
+		usefield(n)
+		walkexpr(&n.Left, init)
+		goto ret
+
+	case ODOTPTR:
+		usefield(n)
+		if n.Op == ODOTPTR && n.Left.Type.Type.Width == 0 {
+			// No actual copy will be generated, so emit an explicit nil check.
+			n.Left = cheapexpr(n.Left, init)
+
+			checknil(n.Left, init)
+		}
+
+		walkexpr(&n.Left, init)
+		goto ret
+
+	case OEFACE:
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right, init)
+		goto ret
+
+	case OSPTR, OITAB:
+		walkexpr(&n.Left, init)
+		goto ret
+
+	case OLEN, OCAP:
+		walkexpr(&n.Left, init)
+
+		// replace len(*[10]int) with 10.
+		// delayed until now to preserve side effects.
+		t := n.Left.Type
+
+		if Isptr[t.Etype] {
+			t = t.Type
+		}
+		if Isfixedarray(t) {
+			safeexpr(n.Left, init)
+			Nodconst(n, n.Type, t.Bound)
+			n.Typecheck = 1
+		}
+
+		goto ret
+
+	case OLSH, ORSH:
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right, init)
+		t := n.Left.Type
+		n.Bounded = bounded(n.Right, 8*t.Width)
+		if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) {
+			Warn("shift bounds check elided")
+		}
+		goto ret
+
+		// Use results from call expression as arguments for complex.
+	case OAND,
+		OSUB,
+		OHMUL,
+		OLT,
+		OLE,
+		OGE,
+		OGT,
+		OADD,
+		OCOMPLEX,
+		OLROT:
+		if n.Op == OCOMPLEX && n.Left == nil && n.Right == nil {
+			n.Left = n.List.N
+			n.Right = n.List.Next.N
+		}
+
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right, init)
+		goto ret
+
+	case OOR, OXOR:
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right, init)
+		walkrotate(&n)
+		goto ret
+
+	case OEQ, ONE:
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right, init)
+
+		// Disable safemode while compiling this code: the code we
+		// generate internally can refer to unsafe.Pointer.
+		// In this case it can happen if we need to generate an ==
+		// for a struct containing a reflect.Value, which itself has
+		// an unexported field of type unsafe.Pointer.
+		old_safemode := safemode
+
+		safemode = 0
+		walkcompare(&n, init)
+		safemode = old_safemode
+		goto ret
+
+	case OANDAND, OOROR:
+		walkexpr(&n.Left, init)
+
+		// cannot put side effects from n.Right on init,
+		// because they cannot run before n.Left is checked.
+		// save elsewhere and store on the eventual n.Right.
+		var ll *NodeList
+
+		walkexpr(&n.Right, &ll)
+		addinit(&n.Right, ll)
+		goto ret
+
+	case OPRINT, OPRINTN:
+		walkexprlist(n.List, init)
+		n = walkprint(n, init)
+		goto ret
+
+	case OPANIC:
+		n = mkcall("gopanic", nil, init, n.Left)
+		goto ret
+
+	case ORECOVER:
+		n = mkcall("gorecover", n.Type, init, Nod(OADDR, nodfp, nil))
+		goto ret
+
+	case OLITERAL:
+		n.Addable = true
+		goto ret
+
+	case OCLOSUREVAR, OCFUNC:
+		n.Addable = true
+		goto ret
+
+	case ONAME:
+		if n.Class&PHEAP == 0 && n.Class != PPARAMREF {
+			n.Addable = true
+		}
+		goto ret
+
+	case OCALLINTER:
+		t := n.Left.Type
+		if n.List != nil && n.List.N.Op == OAS {
+			goto ret
+		}
+		walkexpr(&n.Left, init)
+		walkexprlist(n.List, init)
+		ll := ascompatte(int(n.Op), n, n.Isddd, getinarg(t), n.List, 0, init)
+		n.List = reorder1(ll)
+		goto ret
+
+	case OCALLFUNC:
+		if n.Left.Op == OCLOSURE {
+			// Transform direct call of a closure to call of a normal function.
+			// transformclosure already did all preparation work.
+
+			// Append captured variables to argument list.
+			n.List = concat(n.List, n.Left.Func.Enter)
+
+			n.Left.Func.Enter = nil
+
+			// Replace OCLOSURE with ONAME/PFUNC.
+			n.Left = n.Left.Closure.Nname
+
+			// Update type of OCALLFUNC node.
+			// Output arguments had not changed, but their offsets could.
+			if n.Left.Type.Outtuple == 1 {
+				t := getoutargx(n.Left.Type).Type
+				if t.Etype == TFIELD {
+					t = t.Type
+				}
+				n.Type = t
+			} else {
+				n.Type = getoutargx(n.Left.Type)
+			}
+		}
+
+		t := n.Left.Type
+		if n.List != nil && n.List.N.Op == OAS {
+			goto ret
+		}
+
+		walkexpr(&n.Left, init)
+		walkexprlist(n.List, init)
+
+		if n.Left.Op == ONAME && n.Left.Sym.Name == "Sqrt" && n.Left.Sym.Pkg.Path == "math" {
+			switch Thearch.Thechar {
+			case '5', '6', '7':
+				n.Op = OSQRT
+				n.Left = n.List.N
+				n.List = nil
+				goto ret
+			}
+		}
+
+		ll := ascompatte(int(n.Op), n, n.Isddd, getinarg(t), n.List, 0, init)
+		n.List = reorder1(ll)
+		goto ret
+
+	case OCALLMETH:
+		t := n.Left.Type
+		if n.List != nil && n.List.N.Op == OAS {
+			goto ret
+		}
+		walkexpr(&n.Left, init)
+		walkexprlist(n.List, init)
+		ll := ascompatte(int(n.Op), n, false, getthis(t), list1(n.Left.Left), 0, init)
+		lr := ascompatte(int(n.Op), n, n.Isddd, getinarg(t), n.List, 0, init)
+		ll = concat(ll, lr)
+		n.Left.Left = nil
+		ullmancalc(n.Left)
+		n.List = reorder1(ll)
+		goto ret
+
+	case OAS:
+		*init = concat(*init, n.Ninit)
+		n.Ninit = nil
+
+		walkexpr(&n.Left, init)
+		n.Left = safeexpr(n.Left, init)
+
+		if oaslit(n, init) {
+			goto ret
+		}
+
+		if n.Right == nil || iszero(n.Right) && flag_race == 0 {
+			goto ret
+		}
+
+		switch n.Right.Op {
+		default:
+			walkexpr(&n.Right, init)
+
+		case ODOTTYPE:
+			// TODO(rsc): The Isfat is for consistency with componentgen and orderexpr.
+			// It needs to be removed in all three places.
+			// That would allow inlining x.(struct{*int}) the same as x.(*int).
+			if isdirectiface(n.Right.Type) && !Isfat(n.Right.Type) && flag_race == 0 {
+				// handled directly during cgen
+				walkexpr(&n.Right, init)
+				break
+			}
+
+			// x = i.(T); n.Left is x, n.Right.Left is i.
+			// orderstmt made sure x is addressable.
+			walkexpr(&n.Right.Left, init)
+
+			n1 := Nod(OADDR, n.Left, nil)
+			r := n.Right // i.(T)
+
+			if Debug_typeassert > 0 {
+				Warn("type assertion not inlined")
+			}
+
+			buf := "assert" + type2IET(r.Left.Type) + "2" + type2IET(r.Type)
+			fn := syslook(buf, 1)
+			substArgTypes(fn, r.Left.Type, r.Type)
+
+			n = mkcall1(fn, nil, init, typename(r.Type), r.Left, n1)
+			walkexpr(&n, init)
+			goto ret
+
+		case ORECV:
+			// x = <-c; n.Left is x, n.Right.Left is c.
+			// orderstmt made sure x is addressable.
+			walkexpr(&n.Right.Left, init)
+
+			n1 := Nod(OADDR, n.Left, nil)
+			r := n.Right.Left // the channel
+			n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, typename(r.Type), r, n1)
+			walkexpr(&n, init)
+			goto ret
+
+		case OAPPEND:
+			// x = append(...)
+			r := n.Right
+			if r.Isddd {
+				r = appendslice(r, init) // also works for append(slice, string).
+			} else {
+				r = walkappend(r, init, n)
+			}
+			n.Right = r
+			if r.Op == OAPPEND {
+				// Left in place for back end.
+				// Do not add a new write barrier.
+				goto ret
+			}
+			// Otherwise, lowered for race detector.
+			// Treat as ordinary assignment.
+		}
+
+		if n.Left != nil && n.Right != nil {
+			r := convas(Nod(OAS, n.Left, n.Right), init)
+			r.Dodata = n.Dodata
+			n = r
+			n = applywritebarrier(n, init)
+		}
+
+		goto ret
+
+	case OAS2:
+		*init = concat(*init, n.Ninit)
+		n.Ninit = nil
+		walkexprlistsafe(n.List, init)
+		walkexprlistsafe(n.Rlist, init)
+		ll := ascompatee(OAS, n.List, n.Rlist, init)
+		ll = reorder3(ll)
+		for lr := ll; lr != nil; lr = lr.Next {
+			lr.N = applywritebarrier(lr.N, init)
+		}
+		n = liststmt(ll)
+		goto ret
+
+		// a,b,... = fn()
+	case OAS2FUNC:
+		*init = concat(*init, n.Ninit)
+
+		n.Ninit = nil
+		r := n.Rlist.N
+		walkexprlistsafe(n.List, init)
+		walkexpr(&r, init)
+
+		ll := ascompatet(int(n.Op), n.List, &r.Type, 0, init)
+		for lr := ll; lr != nil; lr = lr.Next {
+			lr.N = applywritebarrier(lr.N, init)
+		}
+		n = liststmt(concat(list1(r), ll))
+		goto ret
+
+		// x, y = <-c
+	// orderstmt made sure x is addressable.
+	case OAS2RECV:
+		*init = concat(*init, n.Ninit)
+
+		n.Ninit = nil
+		r := n.Rlist.N
+		walkexprlistsafe(n.List, init)
+		walkexpr(&r.Left, init)
+		var n1 *Node
+		if isblank(n.List.N) {
+			n1 = nodnil()
+		} else {
+			n1 = Nod(OADDR, n.List.N, nil)
+		}
+		n1.Etype = 1 // addr does not escape
+		fn := chanfn("chanrecv2", 2, r.Left.Type)
+		r = mkcall1(fn, n.List.Next.N.Type, init, typename(r.Left.Type), r.Left, n1)
+		n = Nod(OAS, n.List.Next.N, r)
+		typecheck(&n, Etop)
+		goto ret
+
+		// a,b = m[i];
+	case OAS2MAPR:
+		*init = concat(*init, n.Ninit)
+
+		n.Ninit = nil
+		r := n.Rlist.N
+		walkexprlistsafe(n.List, init)
+		walkexpr(&r.Left, init)
+		walkexpr(&r.Right, init)
+		t := r.Left.Type
+		p := ""
+		if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
+			switch Simsimtype(t.Down) {
+			case TINT32, TUINT32:
+				p = "mapaccess2_fast32"
+
+			case TINT64, TUINT64:
+				p = "mapaccess2_fast64"
+
+			case TSTRING:
+				p = "mapaccess2_faststr"
+			}
+		}
+
+		var key *Node
+		if p != "" {
+			// fast versions take key by value
+			key = r.Right
+		} else {
+			// standard version takes key by reference
+			// orderexpr made sure key is addressable.
+			key = Nod(OADDR, r.Right, nil)
+
+			p = "mapaccess2"
+		}
+
+		// from:
+		//   a,b = m[i]
+		// to:
+		//   var,b = mapaccess2*(t, m, i)
+		//   a = *var
+		a := n.List.N
+
+		fn := mapfn(p, t)
+		r = mkcall1(fn, getoutargx(fn.Type), init, typename(t), r.Left, key)
+
+		// mapaccess2* returns a typed bool, but due to spec changes,
+		// the boolean result of i.(T) is now untyped so we make it the
+		// same type as the variable on the lhs.
+		if !isblank(n.List.Next.N) {
+			r.Type.Type.Down.Type = n.List.Next.N.Type
+		}
+		n.Rlist = list1(r)
+		n.Op = OAS2FUNC
+
+		// don't generate a = *var if a is _
+		if !isblank(a) {
+			var_ := temp(Ptrto(t.Type))
+			var_.Typecheck = 1
+			n.List.N = var_
+			walkexpr(&n, init)
+			*init = list(*init, n)
+			n = Nod(OAS, a, Nod(OIND, var_, nil))
+		}
+
+		typecheck(&n, Etop)
+		walkexpr(&n, init)
+
+		// mapaccess needs a zero value to be at least this big.
+		if zerosize < t.Type.Width {
+			zerosize = t.Type.Width
+		}
+
+		// TODO: ptr is always non-nil, so disable nil check for this OIND op.
+		goto ret
+
+	case ODELETE:
+		*init = concat(*init, n.Ninit)
+		n.Ninit = nil
+		map_ := n.List.N
+		key := n.List.Next.N
+		walkexpr(&map_, init)
+		walkexpr(&key, init)
+
+		// orderstmt made sure key is addressable.
+		key = Nod(OADDR, key, nil)
+
+		t := map_.Type
+		n = mkcall1(mapfndel("mapdelete", t), nil, init, typename(t), map_, key)
+		goto ret
+
+	case OAS2DOTTYPE:
+		e := n.Rlist.N // i.(T)
+		// TODO(rsc): The Isfat is for consistency with componentgen and orderexpr.
+		// It needs to be removed in all three places.
+		// That would allow inlining x.(struct{*int}) the same as x.(*int).
+		if isdirectiface(e.Type) && !Isfat(e.Type) && flag_race == 0 {
+			// handled directly during gen.
+			walkexprlistsafe(n.List, init)
+			walkexpr(&e.Left, init)
+			goto ret
+		}
+
+		// res, ok = i.(T)
+		// orderstmt made sure a is addressable.
+		*init = concat(*init, n.Ninit)
+		n.Ninit = nil
+
+		walkexprlistsafe(n.List, init)
+		walkexpr(&e.Left, init)
+		t := e.Type    // T
+		from := e.Left // i
+
+		oktype := Types[TBOOL]
+		ok := n.List.Next.N
+		if !isblank(ok) {
+			oktype = ok.Type
+		}
+
+		fromKind := type2IET(from.Type)
+		toKind := type2IET(t)
+
+		// Avoid runtime calls in a few cases of the form _, ok := i.(T).
+		// This is faster and shorter and allows the corresponding assertX2X2
+		// routines to skip nil checks on their last argument.
+		if isblank(n.List.N) {
+			var fast *Node
+			switch {
+			case fromKind == "E" && toKind == "T":
+				tab := Nod(OITAB, from, nil) // type:eface::tab:iface
+				typ := Nod(OCONVNOP, typename(t), nil)
+				typ.Type = Ptrto(Types[TUINTPTR])
+				fast = Nod(OEQ, tab, typ)
+			case fromKind == "I" && toKind == "E",
+				fromKind == "E" && toKind == "E":
+				tab := Nod(OITAB, from, nil)
+				fast = Nod(ONE, nodnil(), tab)
+			}
+			if fast != nil {
+				if Debug_typeassert > 0 {
+					Warn("type assertion (ok only) inlined")
+				}
+				n = Nod(OAS, ok, fast)
+				typecheck(&n, Etop)
+				goto ret
+			}
+		}
+
+		var resptr *Node // &res
+		if isblank(n.List.N) {
+			resptr = nodnil()
+		} else {
+			resptr = Nod(OADDR, n.List.N, nil)
+		}
+		resptr.Etype = 1 // addr does not escape
+
+		if Debug_typeassert > 0 {
+			Warn("type assertion not inlined")
+		}
+		buf := "assert" + fromKind + "2" + toKind + "2"
+		fn := syslook(buf, 1)
+		substArgTypes(fn, from.Type, t)
+		call := mkcall1(fn, oktype, init, typename(t), from, resptr)
+		n = Nod(OAS, ok, call)
+		typecheck(&n, Etop)
+		goto ret
+
+	case ODOTTYPE, ODOTTYPE2:
+		if !isdirectiface(n.Type) || Isfat(n.Type) {
+			Fatal("walkexpr ODOTTYPE") // should see inside OAS only
+		}
+		walkexpr(&n.Left, init)
+		goto ret
+
+	case OCONVIFACE:
+		walkexpr(&n.Left, init)
+
+		// Optimize convT2E as a two-word copy when T is pointer-shaped.
+		if isnilinter(n.Type) && isdirectiface(n.Left.Type) {
+			l := Nod(OEFACE, typename(n.Left.Type), n.Left)
+			l.Type = n.Type
+			l.Typecheck = n.Typecheck
+			n = l
+			goto ret
+		}
+
+		// Build name of function: convI2E etc.
+		// Not all names are possible
+		// (e.g., we'll never generate convE2E or convE2I).
+		buf := "conv" + type2IET(n.Left.Type) + "2" + type2IET(n.Type)
+		fn := syslook(buf, 1)
+		var ll *NodeList
+		if !Isinter(n.Left.Type) {
+			ll = list(ll, typename(n.Left.Type))
+		}
+		if !isnilinter(n.Type) {
+			ll = list(ll, typename(n.Type))
+		}
+		if !Isinter(n.Left.Type) && !isnilinter(n.Type) {
+			sym := Pkglookup(Tconv(n.Left.Type, obj.FmtLeft)+"."+Tconv(n.Type, obj.FmtLeft), itabpkg)
+			if sym.Def == nil {
+				l := Nod(ONAME, nil, nil)
+				l.Sym = sym
+				l.Type = Ptrto(Types[TUINT8])
+				l.Addable = true
+				l.Class = PEXTERN
+				l.Xoffset = 0
+				sym.Def = l
+				ggloblsym(sym, int32(Widthptr), obj.DUPOK|obj.NOPTR)
+			}
+
+			l := Nod(OADDR, sym.Def, nil)
+			l.Addable = true
+			ll = list(ll, l)
+
+			if isdirectiface(n.Left.Type) {
+				/* For pointer types, we can make a special form of optimization
+				 *
+				 * These statements are put onto the expression init list:
+				 * 	Itab *tab = atomicloadtype(&cache);
+				 * 	if(tab == nil)
+				 * 		tab = typ2Itab(type, itype, &cache);
+				 *
+				 * The CONVIFACE expression is replaced with this:
+				 * 	OEFACE{tab, ptr};
+				 */
+				l := temp(Ptrto(Types[TUINT8]))
+
+				n1 := Nod(OAS, l, sym.Def)
+				typecheck(&n1, Etop)
+				*init = list(*init, n1)
+
+				fn := syslook("typ2Itab", 1)
+				n1 = Nod(OCALL, fn, nil)
+				n1.List = ll
+				typecheck(&n1, Erv)
+				walkexpr(&n1, init)
+
+				n2 := Nod(OIF, nil, nil)
+				n2.Ntest = Nod(OEQ, l, nodnil())
+				n2.Nbody = list1(Nod(OAS, l, n1))
+				n2.Likely = -1
+				typecheck(&n2, Etop)
+				*init = list(*init, n2)
+
+				l = Nod(OEFACE, l, n.Left)
+				l.Typecheck = n.Typecheck
+				l.Type = n.Type
+				n = l
+				goto ret
+			}
+		}
+
+		if Isinter(n.Left.Type) {
+			ll = list(ll, n.Left)
+		} else {
+			// regular types are passed by reference to avoid C vararg calls
+			// orderexpr arranged for n.Left to be a temporary for all
+			// the conversions it could see. comparison of an interface
+			// with a non-interface, especially in a switch on interface value
+			// with non-interface cases, is not visible to orderstmt, so we
+			// have to fall back on allocating a temp here.
+			if islvalue(n.Left) {
+				ll = list(ll, Nod(OADDR, n.Left, nil))
+			} else {
+				ll = list(ll, Nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil))
+			}
+			dowidth(n.Left.Type)
+			r := nodnil()
+			if n.Esc == EscNone && n.Left.Type.Width <= 1024 {
+				// Allocate stack buffer for value stored in interface.
+				r = temp(n.Left.Type)
+				r = Nod(OAS, r, nil) // zero temp
+				typecheck(&r, Etop)
+				*init = list(*init, r)
+				r = Nod(OADDR, r.Left, nil)
+				typecheck(&r, Erv)
+			}
+			ll = list(ll, r)
+		}
+
+		if !Isinter(n.Left.Type) {
+			substArgTypes(fn, n.Left.Type, n.Left.Type, n.Type)
+		} else {
+			substArgTypes(fn, n.Left.Type, n.Type)
+		}
+		dowidth(fn.Type)
+		n = Nod(OCALL, fn, nil)
+		n.List = ll
+		typecheck(&n, Erv)
+		walkexpr(&n, init)
+		goto ret
+
+	case OCONV, OCONVNOP:
+		if Thearch.Thechar == '5' {
+			if Isfloat[n.Left.Type.Etype] {
+				if n.Type.Etype == TINT64 {
+					n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+					goto ret
+				}
+
+				if n.Type.Etype == TUINT64 {
+					n = mkcall("float64touint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+					goto ret
+				}
+			}
+
+			if Isfloat[n.Type.Etype] {
+				if n.Left.Type.Etype == TINT64 {
+					n = mkcall("int64tofloat64", n.Type, init, conv(n.Left, Types[TINT64]))
+					goto ret
+				}
+
+				if n.Left.Type.Etype == TUINT64 {
+					n = mkcall("uint64tofloat64", n.Type, init, conv(n.Left, Types[TUINT64]))
+					goto ret
+				}
+			}
+		}
+
+		walkexpr(&n.Left, init)
+		goto ret
+
+	case OANDNOT:
+		walkexpr(&n.Left, init)
+		n.Op = OAND
+		n.Right = Nod(OCOM, n.Right, nil)
+		typecheck(&n.Right, Erv)
+		walkexpr(&n.Right, init)
+		goto ret
+
+	case OMUL:
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right, init)
+		walkmul(&n, init)
+		goto ret
+
+	case ODIV, OMOD:
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right, init)
+
+		/*
+		 * rewrite complex div into function call.
+		 */
+		et := int(n.Left.Type.Etype)
+
+		if Iscomplex[et] && n.Op == ODIV {
+			t := n.Type
+			n = mkcall("complex128div", Types[TCOMPLEX128], init, conv(n.Left, Types[TCOMPLEX128]), conv(n.Right, Types[TCOMPLEX128]))
+			n = conv(n, t)
+			goto ret
+		}
+
+		// Nothing to do for float divisions.
+		if Isfloat[et] {
+			goto ret
+		}
+
+		// Try rewriting as shifts or magic multiplies.
+		walkdiv(&n, init)
+
+		/*
+		 * rewrite 64-bit div and mod into function calls
+		 * on 32-bit architectures.
+		 */
+		switch n.Op {
+		case OMOD, ODIV:
+			if Widthreg >= 8 || (et != TUINT64 && et != TINT64) {
+				goto ret
+			}
+			var fn string
+			if et == TINT64 {
+				fn = "int64"
+			} else {
+				fn = "uint64"
+			}
+			if n.Op == ODIV {
+				fn += "div"
+			} else {
+				fn += "mod"
+			}
+			n = mkcall(fn, n.Type, init, conv(n.Left, Types[et]), conv(n.Right, Types[et]))
+
+		default:
+			break
+		}
+
+		goto ret
+
+	case OINDEX:
+		walkexpr(&n.Left, init)
+
+		// save the original node for bounds checking elision.
+		// If it was a ODIV/OMOD walk might rewrite it.
+		r := n.Right
+
+		walkexpr(&n.Right, init)
+
+		// if range of type cannot exceed static array bound,
+		// disable bounds check.
+		if n.Bounded {
+			goto ret
+		}
+		t := n.Left.Type
+		if t != nil && Isptr[t.Etype] {
+			t = t.Type
+		}
+		if Isfixedarray(t) {
+			n.Bounded = bounded(r, t.Bound)
+			if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
+				Warn("index bounds check elided")
+			}
+			if Smallintconst(n.Right) && !n.Bounded {
+				Yyerror("index out of bounds")
+			}
+		} else if Isconst(n.Left, CTSTR) {
+			n.Bounded = bounded(r, int64(len(n.Left.Val.U.(string))))
+			if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
+				Warn("index bounds check elided")
+			}
+			if Smallintconst(n.Right) {
+				if !n.Bounded {
+					Yyerror("index out of bounds")
+				} else {
+					// replace "abc"[1] with 'b'.
+					// delayed until now because "abc"[1] is not
+					// an ideal constant.
+					v := Mpgetfix(n.Right.Val.U.(*Mpint))
+
+					Nodconst(n, n.Type, int64(n.Left.Val.U.(string)[v]))
+					n.Typecheck = 1
+				}
+			}
+		}
+
+		if Isconst(n.Right, CTINT) {
+			if Mpcmpfixfix(n.Right.Val.U.(*Mpint), &mpzero) < 0 || Mpcmpfixfix(n.Right.Val.U.(*Mpint), Maxintval[TINT]) > 0 {
+				Yyerror("index out of bounds")
+			}
+		}
+		goto ret
+
+	case OINDEXMAP:
+		if n.Etype == 1 {
+			goto ret
+		}
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right, init)
+
+		t := n.Left.Type
+		p := ""
+		if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
+			switch Simsimtype(t.Down) {
+			case TINT32, TUINT32:
+				p = "mapaccess1_fast32"
+
+			case TINT64, TUINT64:
+				p = "mapaccess1_fast64"
+
+			case TSTRING:
+				p = "mapaccess1_faststr"
+			}
+		}
+
+		var key *Node
+		if p != "" {
+			// fast versions take key by value
+			key = n.Right
+		} else {
+			// standard version takes key by reference.
+			// orderexpr made sure key is addressable.
+			key = Nod(OADDR, n.Right, nil)
+
+			p = "mapaccess1"
+		}
+
+		n = mkcall1(mapfn(p, t), Ptrto(t.Type), init, typename(t), n.Left, key)
+		n = Nod(OIND, n, nil)
+		n.Type = t.Type
+		n.Typecheck = 1
+
+		// mapaccess needs a zero value to be at least this big.
+		if zerosize < t.Type.Width {
+			zerosize = t.Type.Width
+		}
+		goto ret
+
+	case ORECV:
+		Fatal("walkexpr ORECV") // should see inside OAS only
+
+	case OSLICE, OSLICEARR, OSLICESTR:
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right.Left, init)
+		if n.Right.Left != nil && iszero(n.Right.Left) {
+			// Reduce x[0:j] to x[:j].
+			n.Right.Left = nil
+		}
+		walkexpr(&n.Right.Right, init)
+		n = reduceSlice(n)
+		goto ret
+
+	case OSLICE3, OSLICE3ARR:
+		walkexpr(&n.Left, init)
+		walkexpr(&n.Right.Left, init)
+		if n.Right.Left != nil && iszero(n.Right.Left) {
+			// Reduce x[0:j:k] to x[:j:k].
+			n.Right.Left = nil
+		}
+		walkexpr(&n.Right.Right.Left, init)
+		walkexpr(&n.Right.Right.Right, init)
+
+		r := n.Right.Right.Right
+		if r != nil && r.Op == OCAP && samesafeexpr(n.Left, r.Left) {
+			// Reduce x[i:j:cap(x)] to x[i:j].
+			n.Right.Right = n.Right.Right.Left
+			if n.Op == OSLICE3 {
+				n.Op = OSLICE
+			} else {
+				n.Op = OSLICEARR
+			}
+			n = reduceSlice(n)
+			goto ret
+		}
+		goto ret
+
+	case OADDR:
+		walkexpr(&n.Left, init)
+		goto ret
+
+	case ONEW:
+		if n.Esc == EscNone && n.Type.Type.Width < 1<<16 {
+			r := temp(n.Type.Type)
+			r = Nod(OAS, r, nil) // zero temp
+			typecheck(&r, Etop)
+			*init = list(*init, r)
+			r = Nod(OADDR, r.Left, nil)
+			typecheck(&r, Erv)
+			n = r
+		} else {
+			n = callnew(n.Type.Type)
+		}
+
+		goto ret
+
+		// If one argument to the comparison is an empty string,
+	// comparing the lengths instead will yield the same result
+	// without the function call.
+	case OCMPSTR:
+		if (Isconst(n.Left, CTSTR) && len(n.Left.Val.U.(string)) == 0) || (Isconst(n.Right, CTSTR) && len(n.Right.Val.U.(string)) == 0) {
+			r := Nod(int(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
+			typecheck(&r, Erv)
+			walkexpr(&r, init)
+			r.Type = n.Type
+			n = r
+			goto ret
+		}
+
+		// s + "badgerbadgerbadger" == "badgerbadgerbadger"
+		if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && cmpslit(n.Right, n.Left.List.Next.N) == 0 {
+			r := Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
+			typecheck(&r, Erv)
+			walkexpr(&r, init)
+			r.Type = n.Type
+			n = r
+			goto ret
+		}
+
+		var r *Node
+		if n.Etype == OEQ || n.Etype == ONE {
+			// prepare for rewrite below
+			n.Left = cheapexpr(n.Left, init)
+
+			n.Right = cheapexpr(n.Right, init)
+
+			r = mkcall("eqstring", Types[TBOOL], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
+
+			// quick check of len before full compare for == or !=
+			// eqstring assumes that the lengths are equal
+			if n.Etype == OEQ {
+				// len(left) == len(right) && eqstring(left, right)
+				r = Nod(OANDAND, Nod(OEQ, Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil)), r)
+			} else {
+				// len(left) != len(right) || !eqstring(left, right)
+				r = Nod(ONOT, r, nil)
+
+				r = Nod(OOROR, Nod(ONE, Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil)), r)
+			}
+
+			typecheck(&r, Erv)
+			walkexpr(&r, nil)
+		} else {
+			// sys_cmpstring(s1, s2) :: 0
+			r = mkcall("cmpstring", Types[TINT], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
+
+			r = Nod(int(n.Etype), r, Nodintconst(0))
+		}
+
+		typecheck(&r, Erv)
+		if n.Type.Etype != TBOOL {
+			Fatal("cmp %v", n.Type)
+		}
+		r.Type = n.Type
+		n = r
+		goto ret
+
+	case OADDSTR:
+		n = addstr(n, init)
+		goto ret
+
+	case OAPPEND:
+		// order should make sure we only see OAS(node, OAPPEND), which we handle above.
+		Fatal("append outside assignment")
+
+	case OCOPY:
+		n = copyany(n, init, flag_race)
+		goto ret
+
+		// cannot use chanfn - closechan takes any, not chan any
+	case OCLOSE:
+		fn := syslook("closechan", 1)
+
+		substArgTypes(fn, n.Left.Type)
+		n = mkcall1(fn, nil, init, n.Left)
+		goto ret
+
+	case OMAKECHAN:
+		n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]))
+		goto ret
+
+	case OMAKEMAP:
+		t := n.Type
+
+		fn := syslook("makemap", 1)
+
+		a := nodnil() // hmap buffer
+		r := nodnil() // bucket buffer
+		if n.Esc == EscNone {
+			// Allocate hmap buffer on stack.
+			var_ := temp(hmap(t))
+
+			a = Nod(OAS, var_, nil) // zero temp
+			typecheck(&a, Etop)
+			*init = list(*init, a)
+			a = Nod(OADDR, var_, nil)
+
+			// Allocate one bucket on stack.
+			// Maximum key/value size is 128 bytes, larger objects
+			// are stored with an indirection. So max bucket size is 2048+eps.
+			var_ = temp(mapbucket(t))
+
+			r = Nod(OAS, var_, nil) // zero temp
+			typecheck(&r, Etop)
+			*init = list(*init, r)
+			r = Nod(OADDR, var_, nil)
+		}
+
+		substArgTypes(fn, hmap(t), mapbucket(t), t.Down, t.Type)
+		n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]), a, r)
+		goto ret
+
+	case OMAKESLICE:
+		l := n.Left
+		r := n.Right
+		if r == nil {
+			r = safeexpr(l, init)
+			l = r
+		}
+		t := n.Type
+		if n.Esc == EscNone && Smallintconst(l) && Smallintconst(r) && (t.Type.Width == 0 || Mpgetfix(r.Val.U.(*Mpint)) < (1<<16)/t.Type.Width) {
+			// var arr [r]T
+			// n = arr[:l]
+			t = aindex(r, t.Type) // [r]T
+			var_ := temp(t)
+			a := Nod(OAS, var_, nil) // zero temp
+			typecheck(&a, Etop)
+			*init = list(*init, a)
+			r := Nod(OSLICE, var_, Nod(OKEY, nil, l)) // arr[:l]
+			r = conv(r, n.Type)                       // in case n.Type is named.
+			typecheck(&r, Erv)
+			walkexpr(&r, init)
+			n = r
+		} else {
+			// makeslice(t *Type, nel int64, max int64) (ary []any)
+			fn := syslook("makeslice", 1)
+
+			substArgTypes(fn, t.Type) // any-1
+			n = mkcall1(fn, n.Type, init, typename(n.Type), conv(l, Types[TINT64]), conv(r, Types[TINT64]))
+		}
+
+		goto ret
+
+	case ORUNESTR:
+		a := nodnil()
+		if n.Esc == EscNone {
+			t := aindex(Nodintconst(4), Types[TUINT8])
+			var_ := temp(t)
+			a = Nod(OADDR, var_, nil)
+		}
+
+		// intstring(*[4]byte, rune)
+		n = mkcall("intstring", n.Type, init, a, conv(n.Left, Types[TINT64]))
+
+		goto ret
+
+	case OARRAYBYTESTR:
+		a := nodnil()
+		if n.Esc == EscNone {
+			// Create temporary buffer for string on stack.
+			t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+			a = Nod(OADDR, temp(t), nil)
+		}
+
+		// slicebytetostring(*[32]byte, []byte) string;
+		n = mkcall("slicebytetostring", n.Type, init, a, n.Left)
+
+		goto ret
+
+		// slicebytetostringtmp([]byte) string;
+	case OARRAYBYTESTRTMP:
+		n = mkcall("slicebytetostringtmp", n.Type, init, n.Left)
+
+		goto ret
+
+		// slicerunetostring(*[32]byte, []rune) string;
+	case OARRAYRUNESTR:
+		a := nodnil()
+
+		if n.Esc == EscNone {
+			// Create temporary buffer for string on stack.
+			t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+			a = Nod(OADDR, temp(t), nil)
+		}
+
+		n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
+		goto ret
+
+		// stringtoslicebyte(*32[byte], string) []byte;
+	case OSTRARRAYBYTE:
+		a := nodnil()
+
+		if n.Esc == EscNone {
+			// Create temporary buffer for slice on stack.
+			t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+			a = Nod(OADDR, temp(t), nil)
+		}
+
+		n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, Types[TSTRING]))
+		goto ret
+
+		// stringtoslicebytetmp(string) []byte;
+	case OSTRARRAYBYTETMP:
+		n = mkcall("stringtoslicebytetmp", n.Type, init, conv(n.Left, Types[TSTRING]))
+
+		goto ret
+
+		// stringtoslicerune(*[32]rune, string) []rune
+	case OSTRARRAYRUNE:
+		a := nodnil()
+
+		if n.Esc == EscNone {
+			// Create temporary buffer for slice on stack.
+			t := aindex(Nodintconst(tmpstringbufsize), Types[TINT32])
+
+			a = Nod(OADDR, temp(t), nil)
+		}
+
+		n = mkcall("stringtoslicerune", n.Type, init, a, n.Left)
+		goto ret
+
+		// ifaceeq(i1 any-1, i2 any-2) (ret bool);
+	case OCMPIFACE:
+		if !Eqtype(n.Left.Type, n.Right.Type) {
+			Fatal("ifaceeq %v %v %v", Oconv(int(n.Op), 0), n.Left.Type, n.Right.Type)
+		}
+		var fn *Node
+		if isnilinter(n.Left.Type) {
+			fn = syslook("efaceeq", 1)
+		} else {
+			fn = syslook("ifaceeq", 1)
+		}
+
+		n.Right = cheapexpr(n.Right, init)
+		n.Left = cheapexpr(n.Left, init)
+		substArgTypes(fn, n.Right.Type, n.Left.Type)
+		r := mkcall1(fn, n.Type, init, n.Left, n.Right)
+		if n.Etype == ONE {
+			r = Nod(ONOT, r, nil)
+		}
+
+		// check itable/type before full compare.
+		if n.Etype == OEQ {
+			r = Nod(OANDAND, Nod(OEQ, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
+		} else {
+			r = Nod(OOROR, Nod(ONE, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
+		}
+		typecheck(&r, Erv)
+		walkexpr(&r, init)
+		r.Type = n.Type
+		n = r
+		goto ret
+
+	case OARRAYLIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
+		var_ := temp(n.Type)
+		anylit(0, n, var_, init)
+		n = var_
+		goto ret
+
+	case OSEND:
+		n1 := n.Right
+		n1 = assignconv(n1, n.Left.Type.Type, "chan send")
+		walkexpr(&n1, init)
+		n1 = Nod(OADDR, n1, nil)
+		n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, typename(n.Left.Type), n.Left, n1)
+		goto ret
+
+	case OCLOSURE:
+		n = walkclosure(n, init)
+		goto ret
+
+	case OCALLPART:
+		n = walkpartialcall(n, init)
+		goto ret
+	}
+
+	Fatal("missing switch %v", Oconv(int(n.Op), 0))
+
+	// Expressions that are constant at run time but not
+	// considered const by the language spec are not turned into
+	// constants until walk. For example, if n is y%1 == 0, the
+	// walk of y%1 may have replaced it by 0.
+	// Check whether n with its updated args is itself now a constant.
+ret:
+	t := n.Type
+
+	evconst(n)
+	n.Type = t
+	if n.Op == OLITERAL {
+		typecheck(&n, Erv)
+	}
+
+	ullmancalc(n)
+
+	if Debug['w'] != 0 && n != nil {
+		Dump("walk", n)
+	}
+
+	lineno = lno
+	*np = n
+}
+
+func reduceSlice(n *Node) *Node {
+	r := n.Right.Right
+	if r != nil && r.Op == OLEN && samesafeexpr(n.Left, r.Left) {
+		// Reduce x[i:len(x)] to x[i:].
+		n.Right.Right = nil
+	}
+	if (n.Op == OSLICE || n.Op == OSLICESTR) && n.Right.Left == nil && n.Right.Right == nil {
+		// Reduce x[:] to x.
+		if Debug_slice > 0 {
+			Warn("slice: omit slice operation")
+		}
+		return n.Left
+	}
+	return n
+}
+
+func ascompatee1(op int, l *Node, r *Node, init **NodeList) *Node {
+	// convas will turn map assigns into function calls,
+	// making it impossible for reorder3 to work.
+	n := Nod(OAS, l, r)
+
+	if l.Op == OINDEXMAP {
+		return n
+	}
+
+	return convas(n, init)
+}
+
+func ascompatee(op int, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
+	/*
+	 * check assign expression list to
+	 * a expression list. called in
+	 *	expr-list = expr-list
+	 */
+
+	// ensure order of evaluation for function calls
+	for ll := nl; ll != nil; ll = ll.Next {
+		ll.N = safeexpr(ll.N, init)
+	}
+	for lr := nr; lr != nil; lr = lr.Next {
+		lr.N = safeexpr(lr.N, init)
+	}
+
+	var nn *NodeList
+	ll := nl
+	lr := nr
+	for ; ll != nil && lr != nil; ll, lr = ll.Next, lr.Next {
+		// Do not generate 'x = x' during return. See issue 4014.
+		if op == ORETURN && ll.N == lr.N {
+			continue
+		}
+		nn = list(nn, ascompatee1(op, ll.N, lr.N, init))
+	}
+
+	// cannot happen: caller checked that lists had same length
+	if ll != nil || lr != nil {
+		Yyerror("error in shape across %v %v %v / %d %d [%s]", Hconv(nl, obj.FmtSign), Oconv(int(op), 0), Hconv(nr, obj.FmtSign), count(nl), count(nr), Curfn.Nname.Sym.Name)
+	}
+	return nn
+}
+
+/*
+ * l is an lv and rt is the type of an rv
+ * return 1 if this implies a function call
+ * evaluating the lv or a function call
+ * in the conversion of the types
+ */
+func fncall(l *Node, rt *Type) bool {
+	if l.Ullman >= UINF || l.Op == OINDEXMAP {
+		return true
+	}
+	var r Node
+	if needwritebarrier(l, &r) {
+		return true
+	}
+	if Eqtype(l.Type, rt) {
+		return false
+	}
+	return true
+}
+
+func ascompatet(op int, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
+	var l *Node
+	var tmp *Node
+	var a *Node
+	var ll *NodeList
+	var saver Iter
+
+	/*
+	 * check assign type list to
+	 * a expression list. called in
+	 *	expr-list = func()
+	 */
+	r := Structfirst(&saver, nr)
+
+	var nn *NodeList
+	var mm *NodeList
+	ucount := 0
+	for ll = nl; ll != nil; ll = ll.Next {
+		if r == nil {
+			break
+		}
+		l = ll.N
+		if isblank(l) {
+			r = structnext(&saver)
+			continue
+		}
+
+		// any lv that causes a fn call must be
+		// deferred until all the return arguments
+		// have been pulled from the output arguments
+		if fncall(l, r.Type) {
+			tmp = temp(r.Type)
+			typecheck(&tmp, Erv)
+			a = Nod(OAS, l, tmp)
+			a = convas(a, init)
+			mm = list(mm, a)
+			l = tmp
+		}
+
+		a = Nod(OAS, l, nodarg(r, fp))
+		a = convas(a, init)
+		ullmancalc(a)
+		if a.Ullman >= UINF {
+			Dump("ascompatet ucount", a)
+			ucount++
+		}
+
+		nn = list(nn, a)
+		r = structnext(&saver)
+	}
+
+	if ll != nil || r != nil {
+		Yyerror("ascompatet: assignment count mismatch: %d = %d", count(nl), structcount(*nr))
+	}
+
+	if ucount != 0 {
+		Fatal("ascompatet: too many function calls evaluating parameters")
+	}
+	return concat(nn, mm)
+}
+
+/*
+* package all the arguments that match a ... T parameter into a []T.
+ */
+func mkdotargslice(lr0 *NodeList, nn *NodeList, l *Type, fp int, init **NodeList, ddd *Node) *NodeList {
+	esc := uint16(EscUnknown)
+	if ddd != nil {
+		esc = ddd.Esc
+	}
+
+	tslice := typ(TARRAY)
+	tslice.Type = l.Type.Type
+	tslice.Bound = -1
+
+	var n *Node
+	if count(lr0) == 0 {
+		n = nodnil()
+		n.Type = tslice
+	} else {
+		n = Nod(OCOMPLIT, nil, typenod(tslice))
+		if ddd != nil {
+			n.Alloc = ddd.Alloc // temporary to use
+		}
+		n.List = lr0
+		n.Esc = esc
+		typecheck(&n, Erv)
+		if n.Type == nil {
+			Fatal("mkdotargslice: typecheck failed")
+		}
+		walkexpr(&n, init)
+	}
+
+	a := Nod(OAS, nodarg(l, fp), n)
+	nn = list(nn, convas(a, init))
+	return nn
+}
+
+/*
+ * helpers for shape errors
+ */
+func dumptypes(nl **Type, what string) string {
+	var savel Iter
+
+	fmt_ := ""
+	fmt_ += "\t"
+	first := 1
+	for l := Structfirst(&savel, nl); l != nil; l = structnext(&savel) {
+		if first != 0 {
+			first = 0
+		} else {
+			fmt_ += ", "
+		}
+		fmt_ += Tconv(l, 0)
+	}
+
+	if first != 0 {
+		fmt_ += fmt.Sprintf("[no arguments %s]", what)
+	}
+	return fmt_
+}
+
+func dumpnodetypes(l *NodeList, what string) string {
+	var r *Node
+
+	fmt_ := ""
+	fmt_ += "\t"
+	first := 1
+	for ; l != nil; l = l.Next {
+		r = l.N
+		if first != 0 {
+			first = 0
+		} else {
+			fmt_ += ", "
+		}
+		fmt_ += Tconv(r.Type, 0)
+	}
+
+	if first != 0 {
+		fmt_ += fmt.Sprintf("[no arguments %s]", what)
+	}
+	return fmt_
+}
+
+/*
+ * check assign expression list to
+ * a type list. called in
+ *	return expr-list
+ *	func(expr-list)
+ */
+func ascompatte(op int, call *Node, isddd bool, nl **Type, lr *NodeList, fp int, init **NodeList) *NodeList {
+	var savel Iter
+
+	lr0 := lr
+	l := Structfirst(&savel, nl)
+	var r *Node
+	if lr != nil {
+		r = lr.N
+	}
+	var nn *NodeList
+
+	// f(g()) where g has multiple return values
+	var a *Node
+	var l2 string
+	var ll *Type
+	var l1 string
+	if r != nil && lr.Next == nil && r.Type.Etype == TSTRUCT && r.Type.Funarg != 0 {
+		// optimization - can do block copy
+		if eqtypenoname(r.Type, *nl) {
+			a := nodarg(*nl, fp)
+			r = Nod(OCONVNOP, r, nil)
+			r.Type = a.Type
+			nn = list1(convas(Nod(OAS, a, r), init))
+			goto ret
+		}
+
+		// conversions involved.
+		// copy into temporaries.
+		var alist *NodeList
+
+		for l := Structfirst(&savel, &r.Type); l != nil; l = structnext(&savel) {
+			a = temp(l.Type)
+			alist = list(alist, a)
+		}
+
+		a = Nod(OAS2, nil, nil)
+		a.List = alist
+		a.Rlist = lr
+		typecheck(&a, Etop)
+		walkstmt(&a)
+		*init = list(*init, a)
+		lr = alist
+		r = lr.N
+		l = Structfirst(&savel, nl)
+	}
+
+loop:
+	if l != nil && l.Isddd {
+		// the ddd parameter must be last
+		ll = structnext(&savel)
+
+		if ll != nil {
+			Yyerror("... must be last argument")
+		}
+
+		// special case --
+		// only if we are assigning a single ddd
+		// argument to a ddd parameter then it is
+		// passed thru unencapsulated
+		if r != nil && lr.Next == nil && isddd && Eqtype(l.Type, r.Type) {
+			a = Nod(OAS, nodarg(l, fp), r)
+			a = convas(a, init)
+			nn = list(nn, a)
+			goto ret
+		}
+
+		// normal case -- make a slice of all
+		// remaining arguments and pass it to
+		// the ddd parameter.
+		nn = mkdotargslice(lr, nn, l, fp, init, call.Right)
+
+		goto ret
+	}
+
+	if l == nil || r == nil {
+		if l != nil || r != nil {
+			l1 = dumptypes(nl, "expected")
+			l2 = dumpnodetypes(lr0, "given")
+			if l != nil {
+				Yyerror("not enough arguments to %v\n%s\n%s", Oconv(int(op), 0), l1, l2)
+			} else {
+				Yyerror("too many arguments to %v\n%s\n%s", Oconv(int(op), 0), l1, l2)
+			}
+		}
+
+		goto ret
+	}
+
+	a = Nod(OAS, nodarg(l, fp), r)
+	a = convas(a, init)
+	nn = list(nn, a)
+
+	l = structnext(&savel)
+	r = nil
+	lr = lr.Next
+	if lr != nil {
+		r = lr.N
+	}
+	goto loop
+
+ret:
+	for lr = nn; lr != nil; lr = lr.Next {
+		lr.N.Typecheck = 1
+	}
+	return nn
+}
+
+// generate code for print
+func walkprint(nn *Node, init **NodeList) *Node {
+	var r *Node
+	var n *Node
+	var on *Node
+	var t *Type
+	var et int
+
+	op := int(nn.Op)
+	all := nn.List
+	var calls *NodeList
+	notfirst := false
+
+	// Hoist all the argument evaluation up before the lock.
+	walkexprlistcheap(all, init)
+
+	calls = list(calls, mkcall("printlock", nil, init))
+
+	for l := all; l != nil; l = l.Next {
+		if notfirst {
+			calls = list(calls, mkcall("printsp", nil, init))
+		}
+
+		notfirst = op == OPRINTN
+
+		n = l.N
+		if n.Op == OLITERAL {
+			switch n.Val.Ctype {
+			case CTRUNE:
+				defaultlit(&n, runetype)
+
+			case CTINT:
+				defaultlit(&n, Types[TINT64])
+
+			case CTFLT:
+				defaultlit(&n, Types[TFLOAT64])
+			}
+		}
+
+		if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
+			defaultlit(&n, Types[TINT64])
+		}
+		defaultlit(&n, nil)
+		l.N = n
+		if n.Type == nil || n.Type.Etype == TFORW {
+			continue
+		}
+
+		t = n.Type
+		et = int(n.Type.Etype)
+		if Isinter(n.Type) {
+			if isnilinter(n.Type) {
+				on = syslook("printeface", 1)
+			} else {
+				on = syslook("printiface", 1)
+			}
+			substArgTypes(on, n.Type) // any-1
+		} else if Isptr[et] || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR {
+			on = syslook("printpointer", 1)
+			substArgTypes(on, n.Type) // any-1
+		} else if Isslice(n.Type) {
+			on = syslook("printslice", 1)
+			substArgTypes(on, n.Type) // any-1
+		} else if Isint[et] {
+			if et == TUINT64 {
+				if (t.Sym.Pkg == Runtimepkg || compiling_runtime != 0) && t.Sym.Name == "hex" {
+					on = syslook("printhex", 0)
+				} else {
+					on = syslook("printuint", 0)
+				}
+			} else {
+				on = syslook("printint", 0)
+			}
+		} else if Isfloat[et] {
+			on = syslook("printfloat", 0)
+		} else if Iscomplex[et] {
+			on = syslook("printcomplex", 0)
+		} else if et == TBOOL {
+			on = syslook("printbool", 0)
+		} else if et == TSTRING {
+			on = syslook("printstring", 0)
+		} else {
+			badtype(OPRINT, n.Type, nil)
+			continue
+		}
+
+		t = *getinarg(on.Type)
+		if t != nil {
+			t = t.Type
+		}
+		if t != nil {
+			t = t.Type
+		}
+
+		if !Eqtype(t, n.Type) {
+			n = Nod(OCONV, n, nil)
+			n.Type = t
+		}
+
+		r = Nod(OCALL, on, nil)
+		r.List = list1(n)
+		calls = list(calls, r)
+	}
+
+	if op == OPRINTN {
+		calls = list(calls, mkcall("printnl", nil, nil))
+	}
+
+	calls = list(calls, mkcall("printunlock", nil, init))
+
+	typechecklist(calls, Etop)
+	walkexprlist(calls, init)
+
+	r = Nod(OEMPTY, nil, nil)
+	typecheck(&r, Etop)
+	walkexpr(&r, init)
+	r.Ninit = calls
+	return r
+}
+
+func callnew(t *Type) *Node {
+	dowidth(t)
+	fn := syslook("newobject", 1)
+	substArgTypes(fn, t)
+	return mkcall1(fn, Ptrto(t), nil, typename(t))
+}
+
+func isstack(n *Node) bool {
+	n = outervalue(n)
+
+	// If n is *autotmp and autotmp = &foo, replace n with foo.
+	// We introduce such temps when initializing struct literals.
+	if n.Op == OIND && n.Left.Op == ONAME && strings.HasPrefix(n.Left.Sym.Name, "autotmp_") {
+		defn := n.Left.Defn
+		if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR {
+			n = defn.Right.Left
+		}
+	}
+
+	switch n.Op {
+	case OINDREG:
+		return n.Reg == int16(Thearch.REGSP)
+
+	case ONAME:
+		switch n.Class {
+		case PAUTO, PPARAM, PPARAMOUT:
+			return true
+		}
+	}
+
+	return false
+}
+
+func isglobal(n *Node) bool {
+	n = outervalue(n)
+
+	switch n.Op {
+	case ONAME:
+		switch n.Class {
+		case PEXTERN:
+			return true
+		}
+	}
+
+	return false
+}
+
+// Do we need a write barrier for the assignment l = r?
+func needwritebarrier(l *Node, r *Node) bool {
+	if use_writebarrier == 0 {
+		return false
+	}
+
+	if l == nil || isblank(l) {
+		return false
+	}
+
+	// No write barrier for write of non-pointers.
+	dowidth(l.Type)
+
+	if !haspointers(l.Type) {
+		return false
+	}
+
+	// No write barrier for write to stack.
+	if isstack(l) {
+		return false
+	}
+
+	// No write barrier for implicit or explicit zeroing.
+	if r == nil || iszero(r) {
+		return false
+	}
+
+	// No write barrier for initialization to constant.
+	if r.Op == OLITERAL {
+		return false
+	}
+
+	// No write barrier for storing static (read-only) data.
+	if r.Op == ONAME && strings.HasPrefix(r.Sym.Name, "statictmp_") {
+		return false
+	}
+
+	// No write barrier for storing address of stack values,
+	// which are guaranteed only to be written to the stack.
+	if r.Op == OADDR && isstack(r.Left) {
+		return false
+	}
+
+	// No write barrier for storing address of global, which
+	// is live no matter what.
+	if r.Op == OADDR && isglobal(r.Left) {
+		return false
+	}
+
+	// Otherwise, be conservative and use write barrier.
+	return true
+}
+
+// TODO(rsc): Perhaps componentgen should run before this.
+
+var applywritebarrier_bv Bvec
+
+func applywritebarrier(n *Node, init **NodeList) *Node {
+	if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
+		if flag_race == 0 {
+			if Debug_wb > 1 {
+				Warnl(int(n.Lineno), "marking %v for barrier", Nconv(n.Left, 0))
+			}
+			n.Op = OASWB
+			return n
+		}
+		// Use slow path always for race detector.
+		if Curfn != nil && Curfn.Func.Nowritebarrier {
+			Yyerror("write barrier prohibited")
+		}
+		if Debug_wb > 0 {
+			Warnl(int(n.Lineno), "write barrier")
+		}
+		t := n.Left.Type
+		l := Nod(OADDR, n.Left, nil)
+		l.Etype = 1 // addr does not escape
+		if t.Width == int64(Widthptr) {
+			n = mkcall1(writebarrierfn("writebarrierptr", t, n.Right.Type), nil, init, l, n.Right)
+		} else if t.Etype == TSTRING {
+			n = mkcall1(writebarrierfn("writebarrierstring", t, n.Right.Type), nil, init, l, n.Right)
+		} else if Isslice(t) {
+			n = mkcall1(writebarrierfn("writebarrierslice", t, n.Right.Type), nil, init, l, n.Right)
+		} else if Isinter(t) {
+			n = mkcall1(writebarrierfn("writebarrieriface", t, n.Right.Type), nil, init, l, n.Right)
+		} else if t.Width <= int64(4*Widthptr) {
+			x := int64(0)
+			if applywritebarrier_bv.b == nil {
+				applywritebarrier_bv = bvalloc(4)
+			}
+			bvresetall(applywritebarrier_bv)
+			onebitwalktype1(t, &x, applywritebarrier_bv)
+			var name string
+			switch t.Width / int64(Widthptr) {
+			default:
+				Fatal("found writebarrierfat for %d-byte object of type %v", int(t.Width), t)
+
+			case 2:
+				name = fmt.Sprintf("writebarrierfat%d%d", bvget(applywritebarrier_bv, 0), bvget(applywritebarrier_bv, 1))
+
+			case 3:
+				name = fmt.Sprintf("writebarrierfat%d%d%d", bvget(applywritebarrier_bv, 0), bvget(applywritebarrier_bv, 1), bvget(applywritebarrier_bv, 2))
+
+			case 4:
+				name = fmt.Sprintf("writebarrierfat%d%d%d%d", bvget(applywritebarrier_bv, 0), bvget(applywritebarrier_bv, 1), bvget(applywritebarrier_bv, 2), bvget(applywritebarrier_bv, 3))
+			}
+
+			n = mkcall1(writebarrierfn(name, t, n.Right.Type), nil, init, l, Nodintconst(0), n.Right)
+		} else {
+			r := n.Right
+			for r.Op == OCONVNOP {
+				r = r.Left
+			}
+			r = Nod(OADDR, r, nil)
+			r.Etype = 1 // addr does not escape
+
+			//warnl(n->lineno, "typedmemmove %T %N", t, r);
+			n = mkcall1(writebarrierfn("typedmemmove", t, r.Left.Type), nil, init, typename(t), l, r)
+		}
+	}
+	return n
+}
+
+func convas(n *Node, init **NodeList) *Node {
+	if n.Op != OAS {
+		Fatal("convas: not OAS %v", Oconv(int(n.Op), 0))
+	}
+
+	n.Typecheck = 1
+
+	var lt *Type
+	var rt *Type
+	if n.Left == nil || n.Right == nil {
+		goto out
+	}
+
+	lt = n.Left.Type
+	rt = n.Right.Type
+	if lt == nil || rt == nil {
+		goto out
+	}
+
+	if isblank(n.Left) {
+		defaultlit(&n.Right, nil)
+		goto out
+	}
+
+	if n.Left.Op == OINDEXMAP {
+		map_ := n.Left.Left
+		key := n.Left.Right
+		val := n.Right
+		walkexpr(&map_, init)
+		walkexpr(&key, init)
+		walkexpr(&val, init)
+
+		// orderexpr made sure key and val are addressable.
+		key = Nod(OADDR, key, nil)
+
+		val = Nod(OADDR, val, nil)
+		n = mkcall1(mapfn("mapassign1", map_.Type), nil, init, typename(map_.Type), map_, key, val)
+		goto out
+	}
+
+	if !Eqtype(lt, rt) {
+		n.Right = assignconv(n.Right, lt, "assignment")
+		walkexpr(&n.Right, init)
+	}
+
+out:
+	ullmancalc(n)
+	return n
+}
+
+/*
+ * from ascompat[te]
+ * evaluating actual function arguments.
+ *	f(a,b)
+ * if there is exactly one function expr,
+ * then it is done first. otherwise must
+ * make temp variables
+ */
+func reorder1(all *NodeList) *NodeList {
+	var n *Node
+
+	c := 0 // function calls
+	t := 0 // total parameters
+
+	for l := all; l != nil; l = l.Next {
+		n = l.N
+		t++
+		ullmancalc(n)
+		if n.Ullman >= UINF {
+			c++
+		}
+	}
+
+	if c == 0 || t == 1 {
+		return all
+	}
+
+	var g *NodeList // fncalls assigned to tempnames
+	var f *Node     // last fncall assigned to stack
+	var r *NodeList // non fncalls and tempnames assigned to stack
+	d := 0
+	var a *Node
+	for l := all; l != nil; l = l.Next {
+		n = l.N
+		if n.Ullman < UINF {
+			r = list(r, n)
+			continue
+		}
+
+		d++
+		if d == c {
+			f = n
+			continue
+		}
+
+		// make assignment of fncall to tempname
+		a = temp(n.Right.Type)
+
+		a = Nod(OAS, a, n.Right)
+		g = list(g, a)
+
+		// put normal arg assignment on list
+		// with fncall replaced by tempname
+		n.Right = a.Left
+
+		r = list(r, n)
+	}
+
+	if f != nil {
+		g = list(g, f)
+	}
+	return concat(g, r)
+}
+
+/*
+ * from ascompat[ee]
+ *	a,b = c,d
+ * simultaneous assignment. there cannot
+ * be later use of an earlier lvalue.
+ *
+ * function calls have been removed.
+ */
+func reorder3(all *NodeList) *NodeList {
+	var l *Node
+
+	// If a needed expression may be affected by an
+	// earlier assignment, make an early copy of that
+	// expression and use the copy instead.
+	var early *NodeList
+
+	var mapinit *NodeList
+	for list := all; list != nil; list = list.Next {
+		l = list.N.Left
+
+		// Save subexpressions needed on left side.
+		// Drill through non-dereferences.
+		for {
+			if l.Op == ODOT || l.Op == OPAREN {
+				l = l.Left
+				continue
+			}
+
+			if l.Op == OINDEX && Isfixedarray(l.Left.Type) {
+				reorder3save(&l.Right, all, list, &early)
+				l = l.Left
+				continue
+			}
+
+			break
+		}
+
+		switch l.Op {
+		default:
+			Fatal("reorder3 unexpected lvalue %v", Oconv(int(l.Op), obj.FmtSharp))
+
+		case ONAME:
+			break
+
+		case OINDEX, OINDEXMAP:
+			reorder3save(&l.Left, all, list, &early)
+			reorder3save(&l.Right, all, list, &early)
+			if l.Op == OINDEXMAP {
+				list.N = convas(list.N, &mapinit)
+			}
+
+		case OIND, ODOTPTR:
+			reorder3save(&l.Left, all, list, &early)
+		}
+
+		// Save expression on right side.
+		reorder3save(&list.N.Right, all, list, &early)
+	}
+
+	early = concat(mapinit, early)
+	return concat(early, all)
+}
+
+/*
+ * if the evaluation of *np would be affected by the
+ * assignments in all up to but not including stop,
+ * copy into a temporary during *early and
+ * replace *np with that temp.
+ */
+func reorder3save(np **Node, all *NodeList, stop *NodeList, early **NodeList) {
+	n := *np
+	if !aliased(n, all, stop) {
+		return
+	}
+
+	q := temp(n.Type)
+	q = Nod(OAS, q, n)
+	typecheck(&q, Etop)
+	*early = list(*early, q)
+	*np = q.Left
+}
+
+/*
+ * what's the outer value that a write to n affects?
+ * outer value means containing struct or array.
+ */
+func outervalue(n *Node) *Node {
+	for {
+		if n.Op == OXDOT {
+			Fatal("OXDOT in walk")
+		}
+		if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP {
+			n = n.Left
+			continue
+		}
+
+		if n.Op == OINDEX && Isfixedarray(n.Left.Type) {
+			n = n.Left
+			continue
+		}
+
+		break
+	}
+
+	return n
+}
+
+/*
+ * Is it possible that the computation of n might be
+ * affected by writes in as up to but not including stop?
+ */
+func aliased(n *Node, all *NodeList, stop *NodeList) bool {
+	if n == nil {
+		return false
+	}
+
+	// Look for obvious aliasing: a variable being assigned
+	// during the all list and appearing in n.
+	// Also record whether there are any writes to main memory.
+	// Also record whether there are any writes to variables
+	// whose addresses have been taken.
+	memwrite := 0
+
+	varwrite := 0
+	var a *Node
+	for l := all; l != stop; l = l.Next {
+		a = outervalue(l.N.Left)
+		if a.Op != ONAME {
+			memwrite = 1
+			continue
+		}
+
+		switch n.Class {
+		default:
+			varwrite = 1
+			continue
+
+		case PAUTO, PPARAM, PPARAMOUT:
+			if n.Addrtaken {
+				varwrite = 1
+				continue
+			}
+
+			if vmatch2(a, n) {
+				// Direct hit.
+				return true
+			}
+		}
+	}
+
+	// The variables being written do not appear in n.
+	// However, n might refer to computed addresses
+	// that are being written.
+
+	// If no computed addresses are affected by the writes, no aliasing.
+	if memwrite == 0 && varwrite == 0 {
+		return false
+	}
+
+	// If n does not refer to computed addresses
+	// (that is, if n only refers to variables whose addresses
+	// have not been taken), no aliasing.
+	if varexpr(n) {
+		return false
+	}
+
+	// Otherwise, both the writes and n refer to computed memory addresses.
+	// Assume that they might conflict.
+	return true
+}
+
+/*
+ * does the evaluation of n only refer to variables
+ * whose addresses have not been taken?
+ * (and no other memory)
+ */
+func varexpr(n *Node) bool {
+	if n == nil {
+		return true
+	}
+
+	switch n.Op {
+	case OLITERAL:
+		return true
+
+	case ONAME:
+		switch n.Class {
+		case PAUTO, PPARAM, PPARAMOUT:
+			if !n.Addrtaken {
+				return true
+			}
+		}
+
+		return false
+
+	case OADD,
+		OSUB,
+		OOR,
+		OXOR,
+		OMUL,
+		ODIV,
+		OMOD,
+		OLSH,
+		ORSH,
+		OAND,
+		OANDNOT,
+		OPLUS,
+		OMINUS,
+		OCOM,
+		OPAREN,
+		OANDAND,
+		OOROR,
+		ODOT, // but not ODOTPTR
+		OCONV,
+		OCONVNOP,
+		OCONVIFACE,
+		ODOTTYPE:
+		return varexpr(n.Left) && varexpr(n.Right)
+	}
+
+	// Be conservative.
+	return false
+}
+
+/*
+ * is the name l mentioned in r?
+ */
+func vmatch2(l *Node, r *Node) bool {
+	if r == nil {
+		return false
+	}
+	switch r.Op {
+	// match each right given left
+	case ONAME:
+		return l == r
+
+	case OLITERAL:
+		return false
+	}
+
+	if vmatch2(l, r.Left) {
+		return true
+	}
+	if vmatch2(l, r.Right) {
+		return true
+	}
+	for ll := r.List; ll != nil; ll = ll.Next {
+		if vmatch2(l, ll.N) {
+			return true
+		}
+	}
+	return false
+}
+
+/*
+ * is any name mentioned in l also mentioned in r?
+ * called by sinit.go
+ */
+func vmatch1(l *Node, r *Node) bool {
+	/*
+	 * isolate all left sides
+	 */
+	if l == nil || r == nil {
+		return false
+	}
+	switch l.Op {
+	case ONAME:
+		switch l.Class {
+		case PPARAM, PPARAMREF, PAUTO:
+			break
+
+			// assignment to non-stack variable
+		// must be delayed if right has function calls.
+		default:
+			if r.Ullman >= UINF {
+				return true
+			}
+		}
+
+		return vmatch2(l, r)
+
+	case OLITERAL:
+		return false
+	}
+
+	if vmatch1(l.Left, r) {
+		return true
+	}
+	if vmatch1(l.Right, r) {
+		return true
+	}
+	for ll := l.List; ll != nil; ll = ll.Next {
+		if vmatch1(ll.N, r) {
+			return true
+		}
+	}
+	return false
+}
+
+/*
+ * walk through argin parameters.
+ * generate and return code to allocate
+ * copies of escaped parameters to the heap.
+ */
+func paramstoheap(argin **Type, out int) *NodeList {
+	var savet Iter
+	var v *Node
+	var as *Node
+
+	var nn *NodeList
+	for t := Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+		v = t.Nname
+		if v != nil && v.Sym != nil && v.Sym.Name[0] == '~' && v.Sym.Name[1] == 'r' { // unnamed result
+			v = nil
+		}
+
+		// For precise stacks, the garbage collector assumes results
+		// are always live, so zero them always.
+		if out != 0 {
+			// Defer might stop a panic and show the
+			// return values as they exist at the time of panic.
+			// Make sure to zero them on entry to the function.
+			nn = list(nn, Nod(OAS, nodarg(t, 1), nil))
+		}
+
+		if v == nil || v.Class&PHEAP == 0 {
+			continue
+		}
+
+		// generate allocation & copying code
+		if compiling_runtime != 0 {
+			Yyerror("%v escapes to heap, not allowed in runtime.", v)
+		}
+		if v.Alloc == nil {
+			v.Alloc = callnew(v.Type)
+		}
+		nn = list(nn, Nod(OAS, v.Name.Heapaddr, v.Alloc))
+		if v.Class&^PHEAP != PPARAMOUT {
+			as = Nod(OAS, v, v.Stackparam)
+			v.Stackparam.Typecheck = 1
+			typecheck(&as, Etop)
+			as = applywritebarrier(as, &nn)
+			nn = list(nn, as)
+		}
+	}
+
+	return nn
+}
+
+/*
+ * walk through argout parameters copying back to stack
+ */
+func returnsfromheap(argin **Type) *NodeList {
+	var savet Iter
+	var v *Node
+
+	var nn *NodeList
+	for t := Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+		v = t.Nname
+		if v == nil || v.Class != PHEAP|PPARAMOUT {
+			continue
+		}
+		nn = list(nn, Nod(OAS, v.Stackparam, v))
+	}
+
+	return nn
+}
+
+/*
+ * take care of migrating any function in/out args
+ * between the stack and the heap.  adds code to
+ * curfn's before and after lists.
+ */
+func heapmoves() {
+	lno := lineno
+	lineno = Curfn.Lineno
+	nn := paramstoheap(getthis(Curfn.Type), 0)
+	nn = concat(nn, paramstoheap(getinarg(Curfn.Type), 0))
+	nn = concat(nn, paramstoheap(Getoutarg(Curfn.Type), 1))
+	Curfn.Func.Enter = concat(Curfn.Func.Enter, nn)
+	lineno = Curfn.Func.Endlineno
+	Curfn.Func.Exit = returnsfromheap(Getoutarg(Curfn.Type))
+	lineno = lno
+}
+
+func vmkcall(fn *Node, t *Type, init **NodeList, va []*Node) *Node {
+	if fn.Type == nil || fn.Type.Etype != TFUNC {
+		Fatal("mkcall %v %v", fn, fn.Type)
+	}
+
+	var args *NodeList
+	n := fn.Type.Intuple
+	for i := 0; i < n; i++ {
+		args = list(args, va[i])
+	}
+
+	r := Nod(OCALL, fn, nil)
+	r.List = args
+	if fn.Type.Outtuple > 0 {
+		typecheck(&r, Erv|Efnstruct)
+	} else {
+		typecheck(&r, Etop)
+	}
+	walkexpr(&r, init)
+	r.Type = t
+	return r
+}
+
+func mkcall(name string, t *Type, init **NodeList, args ...*Node) *Node {
+	return vmkcall(syslook(name, 0), t, init, args)
+}
+
+func mkcall1(fn *Node, t *Type, init **NodeList, args ...*Node) *Node {
+	return vmkcall(fn, t, init, args)
+}
+
+func conv(n *Node, t *Type) *Node {
+	if Eqtype(n.Type, t) {
+		return n
+	}
+	n = Nod(OCONV, n, nil)
+	n.Type = t
+	typecheck(&n, Erv)
+	return n
+}
+
+func chanfn(name string, n int, t *Type) *Node {
+	if t.Etype != TCHAN {
+		Fatal("chanfn %v", t)
+	}
+	fn := syslook(name, 1)
+	switch n {
+	default:
+		Fatal("chanfn %d", n)
+	case 1:
+		substArgTypes(fn, t.Type)
+	case 2:
+		substArgTypes(fn, t.Type, t.Type)
+	}
+	return fn
+}
+
+func mapfn(name string, t *Type) *Node {
+	if t.Etype != TMAP {
+		Fatal("mapfn %v", t)
+	}
+	fn := syslook(name, 1)
+	substArgTypes(fn, t.Down, t.Type, t.Down, t.Type)
+	return fn
+}
+
+func mapfndel(name string, t *Type) *Node {
+	if t.Etype != TMAP {
+		Fatal("mapfn %v", t)
+	}
+	fn := syslook(name, 1)
+	substArgTypes(fn, t.Down, t.Type, t.Down)
+	return fn
+}
+
+func writebarrierfn(name string, l *Type, r *Type) *Node {
+	fn := syslook(name, 1)
+	substArgTypes(fn, l, r)
+	return fn
+}
+
+func addstr(n *Node, init **NodeList) *Node {
+	// orderexpr rewrote OADDSTR to have a list of strings.
+	c := count(n.List)
+
+	if c < 2 {
+		Yyerror("addstr count %d too small", c)
+	}
+
+	buf := nodnil()
+	if n.Esc == EscNone {
+		sz := int64(0)
+		for l := n.List; l != nil; l = l.Next {
+			if n.Op == OLITERAL {
+				sz += int64(len(n.Val.U.(string)))
+			}
+		}
+
+		// Don't allocate the buffer if the result won't fit.
+		if sz < tmpstringbufsize {
+			// Create temporary buffer for result string on stack.
+			t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+			buf = Nod(OADDR, temp(t), nil)
+		}
+	}
+
+	// build list of string arguments
+	args := list1(buf)
+
+	for l := n.List; l != nil; l = l.Next {
+		args = list(args, conv(l.N, Types[TSTRING]))
+	}
+
+	var fn string
+	if c <= 5 {
+		// small numbers of strings use direct runtime helpers.
+		// note: orderexpr knows this cutoff too.
+		fn = fmt.Sprintf("concatstring%d", c)
+	} else {
+		// large numbers of strings are passed to the runtime as a slice.
+		fn = "concatstrings"
+
+		t := typ(TARRAY)
+		t.Type = Types[TSTRING]
+		t.Bound = -1
+		slice := Nod(OCOMPLIT, nil, typenod(t))
+		slice.Alloc = n.Alloc
+		slice.List = args.Next // skip buf arg
+		args = list1(buf)
+		args = list(args, slice)
+		slice.Esc = EscNone
+	}
+
+	cat := syslook(fn, 1)
+	r := Nod(OCALL, cat, nil)
+	r.List = args
+	typecheck(&r, Erv)
+	walkexpr(&r, init)
+	r.Type = n.Type
+
+	return r
+}
+
+// expand append(l1, l2...) to
+//   init {
+//     s := l1
+//     if n := len(l1) + len(l2) - cap(s); n > 0 {
+//       s = growslice(s, n)
+//     }
+//     s = s[:len(l1)+len(l2)]
+//     memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+//   }
+//   s
+//
+// l2 is allowed to be a string.
+func appendslice(n *Node, init **NodeList) *Node {
+	walkexprlistsafe(n.List, init)
+
+	// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+	// and n are name or literal, but those may index the slice we're
+	// modifying here.  Fix explicitly.
+	for l := n.List; l != nil; l = l.Next {
+		l.N = cheapexpr(l.N, init)
+	}
+
+	l1 := n.List.N
+	l2 := n.List.Next.N
+
+	s := temp(l1.Type) // var s []T
+	var l *NodeList
+	l = list(l, Nod(OAS, s, l1)) // s = l1
+
+	nt := temp(Types[TINT])
+
+	nif := Nod(OIF, nil, nil)
+
+	// n := len(s) + len(l2) - cap(s)
+	nif.Ninit = list1(Nod(OAS, nt, Nod(OSUB, Nod(OADD, Nod(OLEN, s, nil), Nod(OLEN, l2, nil)), Nod(OCAP, s, nil))))
+
+	nif.Ntest = Nod(OGT, nt, Nodintconst(0))
+
+	// instantiate growslice(Type*, []any, int) []any
+	fn := syslook("growslice", 1) //   growslice(<type>, old []T, n int64) (ret []T)
+	substArgTypes(fn, s.Type.Type, s.Type.Type)
+
+	// s = growslice(T, s, n)
+	nif.Nbody = list1(Nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type), s, nt)))
+
+	l = list(l, nif)
+
+	if haspointers(l1.Type.Type) {
+		// copy(s[len(l1):len(l1)+len(l2)], l2)
+		nptr1 := Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
+
+		nptr1.Etype = 1
+		nptr2 := l2
+		fn := syslook("typedslicecopy", 1)
+		substArgTypes(fn, l1.Type, l2.Type)
+		nt := mkcall1(fn, Types[TINT], &l, typename(l1.Type.Type), nptr1, nptr2)
+		l = list(l, nt)
+	} else if flag_race != 0 {
+		// rely on runtime to instrument copy.
+		// copy(s[len(l1):len(l1)+len(l2)], l2)
+		nptr1 := Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
+
+		nptr1.Etype = 1
+		nptr2 := l2
+		var fn *Node
+		if l2.Type.Etype == TSTRING {
+			fn = syslook("slicestringcopy", 1)
+		} else {
+			fn = syslook("slicecopy", 1)
+		}
+		substArgTypes(fn, l1.Type, l2.Type)
+		nt := mkcall1(fn, Types[TINT], &l, nptr1, nptr2, Nodintconst(s.Type.Type.Width))
+		l = list(l, nt)
+	} else {
+		// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+		nptr1 := Nod(OINDEX, s, Nod(OLEN, l1, nil))
+
+		nptr1.Bounded = true
+		nptr1 = Nod(OADDR, nptr1, nil)
+
+		nptr2 := Nod(OSPTR, l2, nil)
+
+		fn := syslook("memmove", 1)
+		substArgTypes(fn, s.Type.Type, s.Type.Type)
+
+		nwid := cheapexpr(conv(Nod(OLEN, l2, nil), Types[TUINTPTR]), &l)
+
+		nwid = Nod(OMUL, nwid, Nodintconst(s.Type.Type.Width))
+		nt := mkcall1(fn, nil, &l, nptr1, nptr2, nwid)
+		l = list(l, nt)
+	}
+
+	// s = s[:len(l1)+len(l2)]
+	nt = Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))
+
+	nt = Nod(OSLICE, s, Nod(OKEY, nil, nt))
+	nt.Etype = 1
+	l = list(l, Nod(OAS, s, nt))
+
+	typechecklist(l, Etop)
+	walkstmtlist(l)
+	*init = concat(*init, l)
+	return s
+}
+
+// Rewrite append(src, x, y, z) so that any side effects in
+// x, y, z (including runtime panics) are evaluated in
+// initialization statements before the append.
+// For normal code generation, stop there and leave the
+// rest to cgen_append.
+//
+// For race detector, expand append(src, a [, b]* ) to
+//
+//   init {
+//     s := src
+//     const argc = len(args) - 1
+//     if cap(s) - len(s) < argc {
+//	    s = growslice(s, argc)
+//     }
+//     n := len(s)
+//     s = s[:n+argc]
+//     s[n] = a
+//     s[n+1] = b
+//     ...
+//   }
+//   s
+func walkappend(n *Node, init **NodeList, dst *Node) *Node {
+	if !samesafeexpr(dst, n.List.N) {
+		l := n.List
+		l.N = safeexpr(l.N, init)
+		walkexpr(&l.N, init)
+	}
+	walkexprlistsafe(n.List.Next, init)
+
+	// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+	// and n are name or literal, but those may index the slice we're
+	// modifying here.  Fix explicitly.
+	// Using cheapexpr also makes sure that the evaluation
+	// of all arguments (and especially any panics) happen
+	// before we begin to modify the slice in a visible way.
+	for l := n.List.Next; l != nil; l = l.Next {
+		l.N = cheapexpr(l.N, init)
+	}
+
+	nsrc := n.List.N
+
+	// Resolve slice type of multi-valued return.
+	if Istype(nsrc.Type, TSTRUCT) {
+		nsrc.Type = nsrc.Type.Type.Type
+	}
+	argc := count(n.List) - 1
+	if argc < 1 {
+		return nsrc
+	}
+
+	// General case, with no function calls left as arguments.
+	// Leave for gen, except that race detector requires old form
+	if flag_race == 0 {
+		return n
+	}
+
+	var l *NodeList
+
+	ns := temp(nsrc.Type)
+	l = list(l, Nod(OAS, ns, nsrc)) // s = src
+
+	na := Nodintconst(int64(argc)) // const argc
+	nx := Nod(OIF, nil, nil)       // if cap(s) - len(s) < argc
+	nx.Ntest = Nod(OLT, Nod(OSUB, Nod(OCAP, ns, nil), Nod(OLEN, ns, nil)), na)
+
+	fn := syslook("growslice", 1) //   growslice(<type>, old []T, n int) (ret []T)
+	substArgTypes(fn, ns.Type.Type, ns.Type.Type)
+
+	nx.Nbody = list1(Nod(OAS, ns, mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type), ns, na)))
+
+	l = list(l, nx)
+
+	nn := temp(Types[TINT])
+	l = list(l, Nod(OAS, nn, Nod(OLEN, ns, nil))) // n = len(s)
+
+	nx = Nod(OSLICE, ns, Nod(OKEY, nil, Nod(OADD, nn, na))) // ...s[:n+argc]
+	nx.Etype = 1
+	l = list(l, Nod(OAS, ns, nx)) // s = s[:n+argc]
+
+	for a := n.List.Next; a != nil; a = a.Next {
+		nx = Nod(OINDEX, ns, nn) // s[n] ...
+		nx.Bounded = true
+		l = list(l, Nod(OAS, nx, a.N)) // s[n] = arg
+		if a.Next != nil {
+			l = list(l, Nod(OAS, nn, Nod(OADD, nn, Nodintconst(1)))) // n = n + 1
+		}
+	}
+
+	typechecklist(l, Etop)
+	walkstmtlist(l)
+	*init = concat(*init, l)
+	return ns
+}
+
+// Lower copy(a, b) to a memmove call or a runtime call.
+//
+// init {
+//   n := len(a)
+//   if n > len(b) { n = len(b) }
+//   memmove(a.ptr, b.ptr, n*sizeof(elem(a)))
+// }
+// n;
+//
+// Also works if b is a string.
+//
+func copyany(n *Node, init **NodeList, runtimecall int) *Node {
+	if haspointers(n.Left.Type.Type) {
+		fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
+		return mkcall1(fn, n.Type, init, typename(n.Left.Type.Type), n.Left, n.Right)
+	}
+
+	if runtimecall != 0 {
+		var fn *Node
+		if n.Right.Type.Etype == TSTRING {
+			fn = syslook("slicestringcopy", 1)
+		} else {
+			fn = syslook("slicecopy", 1)
+		}
+		substArgTypes(fn, n.Left.Type, n.Right.Type)
+		return mkcall1(fn, n.Type, init, n.Left, n.Right, Nodintconst(n.Left.Type.Type.Width))
+	}
+
+	walkexpr(&n.Left, init)
+	walkexpr(&n.Right, init)
+	nl := temp(n.Left.Type)
+	nr := temp(n.Right.Type)
+	var l *NodeList
+	l = list(l, Nod(OAS, nl, n.Left))
+	l = list(l, Nod(OAS, nr, n.Right))
+
+	nfrm := Nod(OSPTR, nr, nil)
+	nto := Nod(OSPTR, nl, nil)
+
+	nlen := temp(Types[TINT])
+
+	// n = len(to)
+	l = list(l, Nod(OAS, nlen, Nod(OLEN, nl, nil)))
+
+	// if n > len(frm) { n = len(frm) }
+	nif := Nod(OIF, nil, nil)
+
+	nif.Ntest = Nod(OGT, nlen, Nod(OLEN, nr, nil))
+	nif.Nbody = list(nif.Nbody, Nod(OAS, nlen, Nod(OLEN, nr, nil)))
+	l = list(l, nif)
+
+	// Call memmove.
+	fn := syslook("memmove", 1)
+
+	substArgTypes(fn, nl.Type.Type, nl.Type.Type)
+	nwid := temp(Types[TUINTPTR])
+	l = list(l, Nod(OAS, nwid, conv(nlen, Types[TUINTPTR])))
+	nwid = Nod(OMUL, nwid, Nodintconst(nl.Type.Type.Width))
+	l = list(l, mkcall1(fn, nil, init, nto, nfrm, nwid))
+
+	typechecklist(l, Etop)
+	walkstmtlist(l)
+	*init = concat(*init, l)
+	return nlen
+}
+
+func eqfor(t *Type, needsize *int) *Node {
+	// Should only arrive here with large memory or
+	// a struct/array containing a non-memory field/element.
+	// Small memory is handled inline, and single non-memory
+	// is handled during type check (OCMPSTR etc).
+	a := algtype1(t, nil)
+
+	if a != AMEM && a != -1 {
+		Fatal("eqfor %v", t)
+	}
+
+	if a == AMEM {
+		n := syslook("memequal", 1)
+		substArgTypes(n, t, t)
+		*needsize = 1
+		return n
+	}
+
+	sym := typesymprefix(".eq", t)
+	n := newname(sym)
+	n.Class = PFUNC
+	ntype := Nod(OTFUNC, nil, nil)
+	ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+	ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+	ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TBOOL])))
+	typecheck(&ntype, Etype)
+	n.Type = ntype.Type
+	*needsize = 0
+	return n
+}
+
+func countfield(t *Type) int {
+	n := 0
+	for t1 := t.Type; t1 != nil; t1 = t1.Down {
+		n++
+	}
+	return n
+}
+
+func walkcompare(np **Node, init **NodeList) {
+	n := *np
+
+	// Given interface value l and concrete value r, rewrite
+	//   l == r
+	// to
+	//   x, ok := l.(type(r)); ok && x == r
+	// Handle != similarly.
+	// This avoids the allocation that would be required
+	// to convert r to l for comparison.
+	var l *Node
+
+	var r *Node
+	if Isinter(n.Left.Type) && !Isinter(n.Right.Type) {
+		l = n.Left
+		r = n.Right
+	} else if !Isinter(n.Left.Type) && Isinter(n.Right.Type) {
+		l = n.Right
+		r = n.Left
+	}
+
+	if l != nil {
+		x := temp(r.Type)
+		ok := temp(Types[TBOOL])
+
+		// l.(type(r))
+		a := Nod(ODOTTYPE, l, nil)
+
+		a.Type = r.Type
+
+		// x, ok := l.(type(r))
+		expr := Nod(OAS2, nil, nil)
+
+		expr.List = list1(x)
+		expr.List = list(expr.List, ok)
+		expr.Rlist = list1(a)
+		typecheck(&expr, Etop)
+		walkexpr(&expr, init)
+
+		if n.Op == OEQ {
+			r = Nod(OANDAND, ok, Nod(OEQ, x, r))
+		} else {
+			r = Nod(OOROR, Nod(ONOT, ok, nil), Nod(ONE, x, r))
+		}
+		*init = list(*init, expr)
+		finishcompare(np, n, r, init)
+		return
+	}
+
+	// Must be comparison of array or struct.
+	// Otherwise back end handles it.
+	t := n.Left.Type
+
+	switch t.Etype {
+	default:
+		return
+
+	case TARRAY:
+		if Isslice(t) {
+			return
+		}
+
+	case TSTRUCT:
+		break
+	}
+
+	cmpl := n.Left
+	for cmpl != nil && cmpl.Op == OCONVNOP {
+		cmpl = cmpl.Left
+	}
+	cmpr := n.Right
+	for cmpr != nil && cmpr.Op == OCONVNOP {
+		cmpr = cmpr.Left
+	}
+
+	if !islvalue(cmpl) || !islvalue(cmpr) {
+		Fatal("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+	}
+
+	l = temp(Ptrto(t))
+	a := Nod(OAS, l, Nod(OADDR, cmpl, nil))
+	a.Right.Etype = 1 // addr does not escape
+	typecheck(&a, Etop)
+	*init = list(*init, a)
+
+	r = temp(Ptrto(t))
+	a = Nod(OAS, r, Nod(OADDR, cmpr, nil))
+	a.Right.Etype = 1 // addr does not escape
+	typecheck(&a, Etop)
+	*init = list(*init, a)
+
+	andor := OANDAND
+	if n.Op == ONE {
+		andor = OOROR
+	}
+
+	var expr *Node
+	if t.Etype == TARRAY && t.Bound <= 4 && issimple[t.Type.Etype] {
+		// Four or fewer elements of a basic type.
+		// Unroll comparisons.
+		var li *Node
+		var ri *Node
+		for i := 0; int64(i) < t.Bound; i++ {
+			li = Nod(OINDEX, l, Nodintconst(int64(i)))
+			ri = Nod(OINDEX, r, Nodintconst(int64(i)))
+			a = Nod(int(n.Op), li, ri)
+			if expr == nil {
+				expr = a
+			} else {
+				expr = Nod(andor, expr, a)
+			}
+		}
+
+		if expr == nil {
+			expr = Nodbool(n.Op == OEQ)
+		}
+		finishcompare(np, n, expr, init)
+		return
+	}
+
+	if t.Etype == TSTRUCT && countfield(t) <= 4 {
+		// Struct of four or fewer fields.
+		// Inline comparisons.
+		var li *Node
+		var ri *Node
+		for t1 := t.Type; t1 != nil; t1 = t1.Down {
+			if isblanksym(t1.Sym) {
+				continue
+			}
+			li = Nod(OXDOT, l, newname(t1.Sym))
+			ri = Nod(OXDOT, r, newname(t1.Sym))
+			a = Nod(int(n.Op), li, ri)
+			if expr == nil {
+				expr = a
+			} else {
+				expr = Nod(andor, expr, a)
+			}
+		}
+
+		if expr == nil {
+			expr = Nodbool(n.Op == OEQ)
+		}
+		finishcompare(np, n, expr, init)
+		return
+	}
+
+	// Chose not to inline.  Call equality function directly.
+	var needsize int
+	call := Nod(OCALL, eqfor(t, &needsize), nil)
+
+	call.List = list(call.List, l)
+	call.List = list(call.List, r)
+	if needsize != 0 {
+		call.List = list(call.List, Nodintconst(t.Width))
+	}
+	r = call
+	if n.Op != OEQ {
+		r = Nod(ONOT, r, nil)
+	}
+
+	finishcompare(np, n, r, init)
+	return
+}
+
+func finishcompare(np **Node, n, r *Node, init **NodeList) {
+	// Using np here to avoid passing &r to typecheck.
+	*np = r
+	typecheck(np, Erv)
+	walkexpr(np, init)
+	r = *np
+	if r.Type != n.Type {
+		r = Nod(OCONVNOP, r, nil)
+		r.Type = n.Type
+		r.Typecheck = 1
+		*np = r
+	}
+}
+
+func samecheap(a *Node, b *Node) bool {
+	var ar *Node
+	var br *Node
+	for a != nil && b != nil && a.Op == b.Op {
+		switch a.Op {
+		default:
+			return false
+
+		case ONAME:
+			return a == b
+
+		case ODOT, ODOTPTR:
+			ar = a.Right
+			br = b.Right
+			if ar.Op != ONAME || br.Op != ONAME || ar.Sym != br.Sym {
+				return false
+			}
+
+		case OINDEX:
+			ar = a.Right
+			br = b.Right
+			if !Isconst(ar, CTINT) || !Isconst(br, CTINT) || Mpcmpfixfix(ar.Val.U.(*Mpint), br.Val.U.(*Mpint)) != 0 {
+				return false
+			}
+		}
+
+		a = a.Left
+		b = b.Left
+	}
+
+	return false
+}
+
+func walkrotate(np **Node) {
+	if Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+		return
+	}
+
+	n := *np
+
+	// Want << | >> or >> | << or << ^ >> or >> ^ << on unsigned value.
+	l := n.Left
+
+	r := n.Right
+	if (n.Op != OOR && n.Op != OXOR) || (l.Op != OLSH && l.Op != ORSH) || (r.Op != OLSH && r.Op != ORSH) || n.Type == nil || Issigned[n.Type.Etype] || l.Op == r.Op {
+		return
+	}
+
+	// Want same, side effect-free expression on lhs of both shifts.
+	if !samecheap(l.Left, r.Left) {
+		return
+	}
+
+	// Constants adding to width?
+	w := int(l.Type.Width * 8)
+
+	if Smallintconst(l.Right) && Smallintconst(r.Right) {
+		sl := int(Mpgetfix(l.Right.Val.U.(*Mpint)))
+		if sl >= 0 {
+			sr := int(Mpgetfix(r.Right.Val.U.(*Mpint)))
+			if sr >= 0 && sl+sr == w {
+				// Rewrite left shift half to left rotate.
+				if l.Op == OLSH {
+					n = l
+				} else {
+					n = r
+				}
+				n.Op = OLROT
+
+				// Remove rotate 0 and rotate w.
+				s := int(Mpgetfix(n.Right.Val.U.(*Mpint)))
+
+				if s == 0 || s == w {
+					n = n.Left
+				}
+
+				*np = n
+				return
+			}
+		}
+		return
+	}
+
+	// TODO: Could allow s and 32-s if s is bounded (maybe s&31 and 32-s&31).
+	return
+}
+
+/*
+ * walkmul rewrites integer multiplication by powers of two as shifts.
+ */
+func walkmul(np **Node, init **NodeList) {
+	n := *np
+	if !Isint[n.Type.Etype] {
+		return
+	}
+
+	var nr *Node
+	var nl *Node
+	if n.Right.Op == OLITERAL {
+		nl = n.Left
+		nr = n.Right
+	} else if n.Left.Op == OLITERAL {
+		nl = n.Right
+		nr = n.Left
+	} else {
+		return
+	}
+
+	neg := 0
+
+	// x*0 is 0 (and side effects of x).
+	var pow int
+	var w int
+	if Mpgetfix(nr.Val.U.(*Mpint)) == 0 {
+		cheapexpr(nl, init)
+		Nodconst(n, n.Type, 0)
+		goto ret
+	}
+
+	// nr is a constant.
+	pow = powtwo(nr)
+
+	if pow < 0 {
+		return
+	}
+	if pow >= 1000 {
+		// negative power of 2, like -16
+		neg = 1
+
+		pow -= 1000
+	}
+
+	w = int(nl.Type.Width * 8)
+	if pow+1 >= w { // too big, shouldn't happen
+		return
+	}
+
+	nl = cheapexpr(nl, init)
+
+	if pow == 0 {
+		// x*1 is x
+		n = nl
+
+		goto ret
+	}
+
+	n = Nod(OLSH, nl, Nodintconst(int64(pow)))
+
+ret:
+	if neg != 0 {
+		n = Nod(OMINUS, n, nil)
+	}
+
+	typecheck(&n, Erv)
+	walkexpr(&n, init)
+	*np = n
+}
+
+/*
+ * walkdiv rewrites division by a constant as less expensive
+ * operations.
+ */
+func walkdiv(np **Node, init **NodeList) {
+	// if >= 0, nr is 1<<pow // 1 if nr is negative.
+
+	// TODO(minux)
+	if Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+		return
+	}
+
+	n := *np
+	if n.Right.Op != OLITERAL {
+		return
+	}
+
+	// nr is a constant.
+	nl := cheapexpr(n.Left, init)
+
+	nr := n.Right
+
+	// special cases of mod/div
+	// by a constant
+	w := int(nl.Type.Width * 8)
+
+	s := 0            // 1 if nr is negative.
+	pow := powtwo(nr) // if >= 0, nr is 1<<pow
+	if pow >= 1000 {
+		// negative power of 2
+		s = 1
+
+		pow -= 1000
+	}
+
+	if pow+1 >= w {
+		// divisor too large.
+		return
+	}
+
+	if pow < 0 {
+		// try to do division by multiply by (2^w)/d
+		// see hacker's delight chapter 10
+		// TODO: support 64-bit magic multiply here.
+		var m Magic
+		m.W = w
+
+		if Issigned[nl.Type.Etype] {
+			m.Sd = Mpgetfix(nr.Val.U.(*Mpint))
+			Smagic(&m)
+		} else {
+			m.Ud = uint64(Mpgetfix(nr.Val.U.(*Mpint)))
+			Umagic(&m)
+		}
+
+		if m.Bad != 0 {
+			return
+		}
+
+		// We have a quick division method so use it
+		// for modulo too.
+		if n.Op == OMOD {
+			// rewrite as A%B = A - (A/B*B).
+			n1 := Nod(ODIV, nl, nr)
+
+			n2 := Nod(OMUL, n1, nr)
+			n = Nod(OSUB, nl, n2)
+			goto ret
+		}
+
+		switch Simtype[nl.Type.Etype] {
+		default:
+			return
+
+			// n1 = nl * magic >> w (HMUL)
+		case TUINT8, TUINT16, TUINT32:
+			nc := Nod(OXXX, nil, nil)
+
+			Nodconst(nc, nl.Type, int64(m.Um))
+			n1 := Nod(OMUL, nl, nc)
+			typecheck(&n1, Erv)
+			n1.Op = OHMUL
+			if m.Ua != 0 {
+				// Select a Go type with (at least) twice the width.
+				var twide *Type
+				switch Simtype[nl.Type.Etype] {
+				default:
+					return
+
+				case TUINT8, TUINT16:
+					twide = Types[TUINT32]
+
+				case TUINT32:
+					twide = Types[TUINT64]
+
+				case TINT8, TINT16:
+					twide = Types[TINT32]
+
+				case TINT32:
+					twide = Types[TINT64]
+				}
+
+				// add numerator (might overflow).
+				// n2 = (n1 + nl)
+				n2 := Nod(OADD, conv(n1, twide), conv(nl, twide))
+
+				// shift by m.s
+				nc := Nod(OXXX, nil, nil)
+
+				Nodconst(nc, Types[TUINT], int64(m.S))
+				n = conv(Nod(ORSH, n2, nc), nl.Type)
+			} else {
+				// n = n1 >> m.s
+				nc := Nod(OXXX, nil, nil)
+
+				Nodconst(nc, Types[TUINT], int64(m.S))
+				n = Nod(ORSH, n1, nc)
+			}
+
+			// n1 = nl * magic >> w
+		case TINT8, TINT16, TINT32:
+			nc := Nod(OXXX, nil, nil)
+
+			Nodconst(nc, nl.Type, m.Sm)
+			n1 := Nod(OMUL, nl, nc)
+			typecheck(&n1, Erv)
+			n1.Op = OHMUL
+			if m.Sm < 0 {
+				// add the numerator.
+				n1 = Nod(OADD, n1, nl)
+			}
+
+			// shift by m.s
+			nc = Nod(OXXX, nil, nil)
+
+			Nodconst(nc, Types[TUINT], int64(m.S))
+			n2 := conv(Nod(ORSH, n1, nc), nl.Type)
+
+			// add 1 iff n1 is negative.
+			nc = Nod(OXXX, nil, nil)
+
+			Nodconst(nc, Types[TUINT], int64(w)-1)
+			n3 := Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
+			n = Nod(OSUB, n2, n3)
+
+			// apply sign.
+			if m.Sd < 0 {
+				n = Nod(OMINUS, n, nil)
+			}
+		}
+
+		goto ret
+	}
+
+	switch pow {
+	case 0:
+		if n.Op == OMOD {
+			// nl % 1 is zero.
+			Nodconst(n, n.Type, 0)
+		} else if s != 0 {
+			// divide by -1
+			n.Op = OMINUS
+
+			n.Right = nil
+		} else {
+			// divide by 1
+			n = nl
+		}
+
+	default:
+		if Issigned[n.Type.Etype] {
+			if n.Op == OMOD {
+				// signed modulo 2^pow is like ANDing
+				// with the last pow bits, but if nl < 0,
+				// nl & (2^pow-1) is (nl+1)%2^pow - 1.
+				nc := Nod(OXXX, nil, nil)
+
+				Nodconst(nc, Types[Simtype[TUINT]], int64(w)-1)
+				n1 := Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
+				if pow == 1 {
+					typecheck(&n1, Erv)
+					n1 = cheapexpr(n1, init)
+
+					// n = (nl+ε)&1 -ε where ε=1 iff nl<0.
+					n2 := Nod(OSUB, nl, n1)
+
+					nc := Nod(OXXX, nil, nil)
+					Nodconst(nc, nl.Type, 1)
+					n3 := Nod(OAND, n2, nc)
+					n = Nod(OADD, n3, n1)
+				} else {
+					// n = (nl+ε)&(nr-1) - ε where ε=2^pow-1 iff nl<0.
+					nc := Nod(OXXX, nil, nil)
+
+					Nodconst(nc, nl.Type, (1<<uint(pow))-1)
+					n2 := Nod(OAND, n1, nc) // n2 = 2^pow-1 iff nl<0.
+					typecheck(&n2, Erv)
+					n2 = cheapexpr(n2, init)
+
+					n3 := Nod(OADD, nl, n2)
+					n4 := Nod(OAND, n3, nc)
+					n = Nod(OSUB, n4, n2)
+				}
+
+				break
+			} else {
+				// arithmetic right shift does not give the correct rounding.
+				// if nl >= 0, nl >> n == nl / nr
+				// if nl < 0, we want to add 2^n-1 first.
+				nc := Nod(OXXX, nil, nil)
+
+				Nodconst(nc, Types[Simtype[TUINT]], int64(w)-1)
+				n1 := Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
+				if pow == 1 {
+					// nl+1 is nl-(-1)
+					n.Left = Nod(OSUB, nl, n1)
+				} else {
+					// Do a logical right right on -1 to keep pow bits.
+					nc := Nod(OXXX, nil, nil)
+
+					Nodconst(nc, Types[Simtype[TUINT]], int64(w)-int64(pow))
+					n2 := Nod(ORSH, conv(n1, tounsigned(nl.Type)), nc)
+					n.Left = Nod(OADD, nl, conv(n2, nl.Type))
+				}
+
+				// n = (nl + 2^pow-1) >> pow
+				n.Op = ORSH
+
+				nc = Nod(OXXX, nil, nil)
+				Nodconst(nc, Types[Simtype[TUINT]], int64(pow))
+				n.Right = nc
+				n.Typecheck = 0
+			}
+
+			if s != 0 {
+				n = Nod(OMINUS, n, nil)
+			}
+			break
+		}
+
+		nc := Nod(OXXX, nil, nil)
+		if n.Op == OMOD {
+			// n = nl & (nr-1)
+			n.Op = OAND
+
+			Nodconst(nc, nl.Type, Mpgetfix(nr.Val.U.(*Mpint))-1)
+		} else {
+			// n = nl >> pow
+			n.Op = ORSH
+
+			Nodconst(nc, Types[Simtype[TUINT]], int64(pow))
+		}
+
+		n.Typecheck = 0
+		n.Right = nc
+	}
+
+	goto ret
+
+ret:
+	typecheck(&n, Erv)
+	walkexpr(&n, init)
+	*np = n
+}
+
+// return 1 if integer n must be in range [0, max), 0 otherwise
+func bounded(n *Node, max int64) bool {
+	if n.Type == nil || !Isint[n.Type.Etype] {
+		return false
+	}
+
+	sign := Issigned[n.Type.Etype]
+	bits := int32(8 * n.Type.Width)
+
+	if Smallintconst(n) {
+		v := Mpgetfix(n.Val.U.(*Mpint))
+		return 0 <= v && v < max
+	}
+
+	switch n.Op {
+	case OAND:
+		v := int64(-1)
+		if Smallintconst(n.Left) {
+			v = Mpgetfix(n.Left.Val.U.(*Mpint))
+		} else if Smallintconst(n.Right) {
+			v = Mpgetfix(n.Right.Val.U.(*Mpint))
+		}
+
+		if 0 <= v && v < max {
+			return true
+		}
+
+	case OMOD:
+		if !sign && Smallintconst(n.Right) {
+			v := Mpgetfix(n.Right.Val.U.(*Mpint))
+			if 0 <= v && v <= max {
+				return true
+			}
+		}
+
+	case ODIV:
+		if !sign && Smallintconst(n.Right) {
+			v := Mpgetfix(n.Right.Val.U.(*Mpint))
+			for bits > 0 && v >= 2 {
+				bits--
+				v >>= 1
+			}
+		}
+
+	case ORSH:
+		if !sign && Smallintconst(n.Right) {
+			v := Mpgetfix(n.Right.Val.U.(*Mpint))
+			if v > int64(bits) {
+				return true
+			}
+			bits -= int32(v)
+		}
+	}
+
+	if !sign && bits <= 62 && 1<<uint(bits) <= max {
+		return true
+	}
+
+	return false
+}
+
+func usefield(n *Node) {
+	if obj.Fieldtrack_enabled == 0 {
+		return
+	}
+
+	switch n.Op {
+	default:
+		Fatal("usefield %v", Oconv(int(n.Op), 0))
+
+	case ODOT, ODOTPTR:
+		break
+	}
+
+	field := n.Paramfld
+	if field == nil {
+		Fatal("usefield %v %v without paramfld", n.Left.Type, n.Right.Sym)
+	}
+	if field.Note == nil || !strings.Contains(*field.Note, "go:\"track\"") {
+		return
+	}
+
+	// dedup on list
+	if field.Lastfn == Curfn {
+		return
+	}
+	field.Lastfn = Curfn
+	field.Outer = n.Left.Type
+	if Isptr[field.Outer.Etype] {
+		field.Outer = field.Outer.Type
+	}
+	if field.Outer.Sym == nil {
+		Yyerror("tracked field must be in named struct type")
+	}
+	if !exportname(field.Sym.Name) {
+		Yyerror("tracked field must be exported (upper case)")
+	}
+
+	l := typ(0)
+	l.Type = field
+	l.Down = Curfn.Paramfld
+	Curfn.Paramfld = l
+}
+
+func candiscardlist(l *NodeList) bool {
+	for ; l != nil; l = l.Next {
+		if !candiscard(l.N) {
+			return false
+		}
+	}
+	return true
+}
+
+func candiscard(n *Node) bool {
+	if n == nil {
+		return true
+	}
+
+	switch n.Op {
+	default:
+		return false
+
+		// Discardable as long as the subpieces are.
+	case ONAME,
+		ONONAME,
+		OTYPE,
+		OPACK,
+		OLITERAL,
+		OADD,
+		OSUB,
+		OOR,
+		OXOR,
+		OADDSTR,
+		OADDR,
+		OANDAND,
+		OARRAYBYTESTR,
+		OARRAYRUNESTR,
+		OSTRARRAYBYTE,
+		OSTRARRAYRUNE,
+		OCAP,
+		OCMPIFACE,
+		OCMPSTR,
+		OCOMPLIT,
+		OMAPLIT,
+		OSTRUCTLIT,
+		OARRAYLIT,
+		OPTRLIT,
+		OCONV,
+		OCONVIFACE,
+		OCONVNOP,
+		ODOT,
+		OEQ,
+		ONE,
+		OLT,
+		OLE,
+		OGT,
+		OGE,
+		OKEY,
+		OLEN,
+		OMUL,
+		OLSH,
+		ORSH,
+		OAND,
+		OANDNOT,
+		ONEW,
+		ONOT,
+		OCOM,
+		OPLUS,
+		OMINUS,
+		OOROR,
+		OPAREN,
+		ORUNESTR,
+		OREAL,
+		OIMAG,
+		OCOMPLEX:
+		break
+
+		// Discardable as long as we know it's not division by zero.
+	case ODIV, OMOD:
+		if Isconst(n.Right, CTINT) && mpcmpfixc(n.Right.Val.U.(*Mpint), 0) != 0 {
+			break
+		}
+		if Isconst(n.Right, CTFLT) && mpcmpfltc(n.Right.Val.U.(*Mpflt), 0) != 0 {
+			break
+		}
+		return false
+
+		// Discardable as long as we know it won't fail because of a bad size.
+	case OMAKECHAN, OMAKEMAP:
+		if Isconst(n.Left, CTINT) && mpcmpfixc(n.Left.Val.U.(*Mpint), 0) == 0 {
+			break
+		}
+		return false
+
+		// Difficult to tell what sizes are okay.
+	case OMAKESLICE:
+		return false
+	}
+
+	if !candiscard(n.Left) || !candiscard(n.Right) || !candiscard(n.Ntest) || !candiscard(n.Nincr) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.Nelse) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
+		return false
+	}
+
+	return true
+}
+
+// rewrite
+//	print(x, y, z)
+// into
+//	func(a1, a2, a3) {
+//		print(a1, a2, a3)
+//	}(x, y, z)
+// and same for println.
+
+var walkprintfunc_prgen int
+
+func walkprintfunc(np **Node, init **NodeList) {
+	n := *np
+
+	if n.Ninit != nil {
+		walkstmtlist(n.Ninit)
+		*init = concat(*init, n.Ninit)
+		n.Ninit = nil
+	}
+
+	t := Nod(OTFUNC, nil, nil)
+	num := 0
+	var printargs *NodeList
+	var a *Node
+	var buf string
+	for l := n.List; l != nil; l = l.Next {
+		buf = fmt.Sprintf("a%d", num)
+		num++
+		a = Nod(ODCLFIELD, newname(Lookup(buf)), typenod(l.N.Type))
+		t.List = list(t.List, a)
+		printargs = list(printargs, a.Left)
+	}
+
+	fn := Nod(ODCLFUNC, nil, nil)
+	walkprintfunc_prgen++
+	buf = fmt.Sprintf("print·%d", walkprintfunc_prgen)
+	fn.Nname = newname(Lookup(buf))
+	fn.Nname.Defn = fn
+	fn.Nname.Ntype = t
+	declare(fn.Nname, PFUNC)
+
+	oldfn := Curfn
+	Curfn = nil
+	funchdr(fn)
+
+	a = Nod(int(n.Op), nil, nil)
+	a.List = printargs
+	typecheck(&a, Etop)
+	walkstmt(&a)
+
+	fn.Nbody = list1(a)
+
+	funcbody(fn)
+
+	typecheck(&fn, Etop)
+	typechecklist(fn.Nbody, Etop)
+	xtop = list(xtop, fn)
+	Curfn = oldfn
+
+	a = Nod(OCALL, nil, nil)
+	a.Left = fn.Nname
+	a.List = n.List
+	typecheck(&a, Etop)
+	walkexpr(&a, init)
+	*np = a
+}
diff --git a/src/cmd/compile/internal/gc/y.go b/src/cmd/compile/internal/gc/y.go
new file mode 100644
index 0000000..72bce9a
--- /dev/null
+++ b/src/cmd/compile/internal/gc/y.go
@@ -0,0 +1,3512 @@
+//line go.y:21
+package gc
+
+import __yyfmt__ "fmt"
+
+//line go.y:21
+import (
+	"fmt"
+	"strings"
+)
+
+//line go.y:28
+type yySymType struct {
+	yys  int
+	node *Node
+	list *NodeList
+	typ  *Type
+	sym  *Sym
+	val  Val
+	i    int
+}
+
+const LLITERAL = 57346
+const LASOP = 57347
+const LCOLAS = 57348
+const LBREAK = 57349
+const LCASE = 57350
+const LCHAN = 57351
+const LCONST = 57352
+const LCONTINUE = 57353
+const LDDD = 57354
+const LDEFAULT = 57355
+const LDEFER = 57356
+const LELSE = 57357
+const LFALL = 57358
+const LFOR = 57359
+const LFUNC = 57360
+const LGO = 57361
+const LGOTO = 57362
+const LIF = 57363
+const LIMPORT = 57364
+const LINTERFACE = 57365
+const LMAP = 57366
+const LNAME = 57367
+const LPACKAGE = 57368
+const LRANGE = 57369
+const LRETURN = 57370
+const LSELECT = 57371
+const LSTRUCT = 57372
+const LSWITCH = 57373
+const LTYPE = 57374
+const LVAR = 57375
+const LANDAND = 57376
+const LANDNOT = 57377
+const LBODY = 57378
+const LCOMM = 57379
+const LDEC = 57380
+const LEQ = 57381
+const LGE = 57382
+const LGT = 57383
+const LIGNORE = 57384
+const LINC = 57385
+const LLE = 57386
+const LLSH = 57387
+const LLT = 57388
+const LNE = 57389
+const LOROR = 57390
+const LRSH = 57391
+const NotPackage = 57392
+const NotParen = 57393
+const PreferToRightParen = 57394
+
+var yyToknames = [...]string{
+	"$end",
+	"error",
+	"$unk",
+	"LLITERAL",
+	"LASOP",
+	"LCOLAS",
+	"LBREAK",
+	"LCASE",
+	"LCHAN",
+	"LCONST",
+	"LCONTINUE",
+	"LDDD",
+	"LDEFAULT",
+	"LDEFER",
+	"LELSE",
+	"LFALL",
+	"LFOR",
+	"LFUNC",
+	"LGO",
+	"LGOTO",
+	"LIF",
+	"LIMPORT",
+	"LINTERFACE",
+	"LMAP",
+	"LNAME",
+	"LPACKAGE",
+	"LRANGE",
+	"LRETURN",
+	"LSELECT",
+	"LSTRUCT",
+	"LSWITCH",
+	"LTYPE",
+	"LVAR",
+	"LANDAND",
+	"LANDNOT",
+	"LBODY",
+	"LCOMM",
+	"LDEC",
+	"LEQ",
+	"LGE",
+	"LGT",
+	"LIGNORE",
+	"LINC",
+	"LLE",
+	"LLSH",
+	"LLT",
+	"LNE",
+	"LOROR",
+	"LRSH",
+	"'+'",
+	"'-'",
+	"'|'",
+	"'^'",
+	"'*'",
+	"'/'",
+	"'%'",
+	"'&'",
+	"NotPackage",
+	"NotParen",
+	"'('",
+	"')'",
+	"PreferToRightParen",
+	"';'",
+	"'.'",
+	"'$'",
+	"'='",
+	"':'",
+	"'{'",
+	"'}'",
+	"'!'",
+	"'~'",
+	"'['",
+	"']'",
+	"'?'",
+	"'@'",
+	"','",
+}
+var yyStatenames = [...]string{}
+
+const yyEofCode = 1
+const yyErrCode = 2
+const yyMaxDepth = 200
+
+//line go.y:2304
+func fixlbrace(lbr int) {
+	// If the opening brace was an LBODY,
+	// set up for another one now that we're done.
+	// See comment in lex.C about loophack.
+	if lbr == LBODY {
+		loophack = 1
+	}
+}
+
+//line yacctab:1
+var yyExca = [...]int{
+	-1, 1,
+	1, -1,
+	-2, 0,
+	-1, 17,
+	1, 1,
+	63, 23,
+	-2, 0,
+	-1, 48,
+	6, 276,
+	66, 276,
+	76, 276,
+	-2, 49,
+	-1, 56,
+	67, 153,
+	-2, 162,
+	-1, 74,
+	60, 181,
+	-2, 215,
+	-1, 75,
+	60, 182,
+	-2, 183,
+	-1, 121,
+	60, 134,
+	64, 134,
+	68, 134,
+	72, 134,
+	-2, 266,
+	-1, 125,
+	60, 134,
+	64, 134,
+	68, 134,
+	72, 134,
+	-2, 267,
+	-1, 176,
+	2, 215,
+	36, 215,
+	60, 181,
+	68, 215,
+	-2, 173,
+	-1, 177,
+	36, 183,
+	60, 182,
+	68, 183,
+	-2, 174,
+	-1, 184,
+	63, 251,
+	69, 251,
+	-2, 0,
+	-1, 242,
+	63, 251,
+	69, 251,
+	-2, 0,
+	-1, 252,
+	8, 251,
+	13, 251,
+	63, 251,
+	69, 251,
+	-2, 0,
+	-1, 325,
+	4, 236,
+	63, 236,
+	69, 236,
+	-2, 157,
+	-1, 407,
+	36, 176,
+	60, 176,
+	68, 176,
+	-2, 167,
+	-1, 408,
+	36, 177,
+	60, 177,
+	68, 177,
+	-2, 168,
+	-1, 409,
+	36, 178,
+	60, 178,
+	68, 178,
+	-2, 169,
+	-1, 410,
+	36, 179,
+	60, 179,
+	68, 179,
+	-2, 170,
+	-1, 416,
+	8, 251,
+	13, 251,
+	63, 251,
+	69, 251,
+	-2, 0,
+	-1, 417,
+	63, 251,
+	69, 251,
+	-2, 0,
+	-1, 497,
+	63, 251,
+	69, 251,
+	-2, 0,
+	-1, 552,
+	60, 157,
+	-2, 318,
+	-1, 553,
+	60, 158,
+	-2, 317,
+	-1, 578,
+	8, 251,
+	13, 251,
+	63, 251,
+	69, 251,
+	-2, 0,
+	-1, 592,
+	36, 180,
+	60, 180,
+	68, 180,
+	-2, 171,
+}
+
+const yyNprod = 352
+const yyPrivate = 57344
+
+var yyTokenNames []string
+var yyStates []string
+
+const yyLast = 2282
+
+var yyAct = [...]int{
+
+	74, 381, 304, 285, 291, 486, 610, 398, 545, 478,
+	549, 296, 186, 75, 400, 229, 302, 401, 103, 389,
+	458, 356, 290, 318, 457, 34, 303, 338, 230, 245,
+	466, 109, 339, 101, 337, 332, 85, 104, 374, 248,
+	246, 174, 467, 286, 14, 324, 479, 328, 241, 212,
+	108, 6, 325, 155, 243, 469, 226, 181, 468, 516,
+	413, 320, 373, 392, 325, 219, 13, 208, 176, 10,
+	11, 584, 172, 469, 651, 385, 599, 583, 106, 191,
+	13, 177, 460, 541, 422, 160, 310, 331, 613, 161,
+	309, 446, 192, 322, 193, 626, 327, 162, 198, 321,
+	88, 12, 13, 10, 227, 238, 662, 194, 317, 227,
+	632, 448, 227, 12, 13, 227, 209, 228, 12, 13,
+	447, 10, 228, 203, 175, 228, 108, 393, 228, 461,
+	54, 660, 205, 445, 184, 384, 222, 460, 459, 204,
+	199, 200, 239, 88, 506, 155, 214, 216, 218, 507,
+	427, 631, 12, 13, 233, 625, 624, 202, 10, 88,
+	90, 176, 55, 288, 10, 627, 213, 213, 213, 213,
+	12, 13, 118, 118, 177, 295, 126, 154, 308, 176,
+	10, 416, 282, 282, 461, 282, 603, 620, 416, 10,
+	600, 227, 177, 301, 593, 416, 227, 227, 404, 227,
+	280, 484, 444, 90, 228, 622, 536, 12, 13, 228,
+	228, 506, 228, 12, 13, 86, 507, 175, 527, 90,
+	298, 163, 164, 165, 166, 167, 168, 169, 170, 12,
+	13, 523, 227, 580, 515, 175, 182, 153, 12, 13,
+	242, 171, 325, 397, 416, 228, 330, 155, 227, 334,
+	415, 227, 227, 116, 227, 185, 358, 367, 463, 371,
+	360, 228, 355, 362, 228, 228, 353, 228, 183, 365,
+	210, 322, 504, 369, 434, 314, 68, 321, 91, 379,
+	614, 78, 416, 340, 609, 340, 340, 376, 375, 182,
+	124, 12, 13, 604, 176, 83, 79, 10, 394, 325,
+	407, 336, 82, 351, 352, 10, 378, 177, 380, 414,
+	227, 227, 601, 408, 574, 409, 608, 10, 568, 558,
+	227, 183, 48, 228, 228, 465, 10, 464, 410, 391,
+	323, 329, 67, 228, 331, 348, 443, 656, 442, 412,
+	293, 163, 170, 605, 77, 436, 12, 13, 12, 13,
+	175, 424, 423, 234, 12, 13, 388, 383, 370, 366,
+	359, 114, 435, 333, 655, 227, 12, 13, 100, 129,
+	441, 99, 10, 490, 227, 12, 13, 439, 228, 84,
+	454, 20, 453, 429, 432, 480, 491, 228, 492, 654,
+	173, 10, 508, 473, 176, 10, 653, 645, 511, 619,
+	188, 493, 483, 494, 616, 607, 227, 177, 221, 282,
+	514, 606, 227, 597, 282, 519, 520, 340, 340, 228,
+	596, 12, 13, 227, 595, 228, 110, 498, 340, 489,
+	107, 510, 502, 592, 525, 449, 228, 582, 517, 227,
+	12, 13, 562, 524, 12, 13, 470, 539, 528, 531,
+	175, 522, 228, 254, 513, 512, 255, 256, 257, 258,
+	259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+	269, 270, 271, 272, 273, 274, 129, 129, 277, 554,
+	559, 227, 330, 173, 537, 294, 509, 557, 561, 227,
+	69, 564, 532, 538, 228, 534, 490, 490, 496, 495,
+	482, 572, 228, 300, 476, 475, 472, 176, 440, 491,
+	491, 492, 492, 567, 573, 340, 10, 340, 420, 553,
+	177, 577, 372, 340, 493, 493, 340, 590, 591, 579,
+	297, 585, 570, 540, 586, 542, 456, 551, 431, 438,
+	249, 555, 340, 571, 556, 354, 253, 129, 251, 180,
+	431, 102, 489, 489, 382, 323, 501, 530, 287, 129,
+	566, 117, 7, 175, 70, 12, 13, 329, 5, 197,
+	431, 227, 211, 433, 24, 16, 529, 19, 617, 430,
+	650, 455, 364, 533, 228, 428, 560, 480, 305, 335,
+	207, 206, 21, 93, 197, 623, 252, 629, 490, 197,
+	282, 630, 197, 635, 120, 197, 26, 386, 121, 125,
+	637, 491, 340, 492, 641, 639, 173, 340, 621, 402,
+	57, 565, 306, 76, 402, 618, 493, 158, 176, 642,
+	598, 387, 511, 340, 157, 602, 640, 665, 652, 581,
+	28, 177, 390, 643, 223, 644, 490, 159, 156, 235,
+	96, 657, 240, 661, 489, 497, 578, 417, 98, 491,
+	663, 492, 94, 664, 122, 122, 31, 22, 667, 666,
+	340, 15, 97, 95, 493, 553, 23, 201, 340, 49,
+	18, 197, 594, 129, 175, 3, 197, 197, 636, 197,
+	129, 282, 8, 551, 4, 2, 1, 450, 215, 543,
+	544, 547, 489, 548, 611, 92, 487, 129, 129, 189,
+	80, 81, 437, 72, 71, 237, 173, 615, 477, 316,
+	188, 220, 197, 326, 340, 244, 128, 340, 648, 628,
+	649, 311, 127, 17, 399, 319, 312, 313, 197, 315,
+	25, 197, 197, 27, 197, 36, 633, 634, 78, 37,
+	281, 66, 111, 638, 39, 38, 35, 124, 279, 278,
+	73, 217, 83, 79, 10, 113, 587, 149, 503, 82,
+	505, 87, 363, 0, 123, 0, 232, 150, 0, 0,
+	9, 151, 141, 142, 143, 144, 145, 146, 147, 148,
+	197, 377, 56, 196, 89, 0, 0, 0, 0, 231,
+	197, 197, 0, 0, 0, 105, 105, 112, 115, 0,
+	197, 77, 0, 12, 13, 426, 119, 119, 0, 0,
+	119, 0, 575, 576, 0, 0, 0, 0, 0, 173,
+	0, 0, 0, 275, 276, 0, 283, 0, 0, 402,
+	406, 588, 402, 402, 0, 0, 0, 0, 0, 0,
+	418, 419, 0, 0, 0, 197, 0, 0, 78, 0,
+	425, 89, 0, 197, 197, 0, 0, 124, 0, 0,
+	0, 0, 83, 79, 10, 0, 0, 105, 149, 82,
+	0, 0, 105, 0, 0, 112, 232, 0, 150, 247,
+	0, 0, 151, 0, 0, 0, 197, 145, 146, 147,
+	148, 0, 197, 196, 361, 406, 0, 188, 0, 231,
+	0, 0, 0, 197, 0, 236, 368, 78, 0, 0,
+	250, 77, 0, 12, 13, 225, 124, 0, 0, 197,
+	0, 83, 79, 10, 0, 0, 292, 0, 82, 0,
+	0, 0, 0, 0, 0, 232, 311, 0, 646, 647,
+	173, 0, 521, 402, 0, 0, 0, 0, 0, 56,
+	0, 0, 196, 526, 0, 0, 0, 0, 231, 0,
+	0, 197, 0, 0, 119, 119, 0, 0, 0, 197,
+	77, 0, 12, 13, 0, 0, 197, 197, 0, 0,
+	0, 0, 134, 149, 357, 152, 0, 135, 139, 140,
+	105, 0, 138, 150, 137, 136, 133, 151, 141, 142,
+	143, 144, 145, 146, 147, 148, 0, 56, 0, 0,
+	0, 569, 0, 0, 0, 0, 247, 56, 247, 0,
+	68, 0, 0, 0, 413, 78, 0, 0, 0, 78,
+	474, 0, 0, 0, 124, 0, 0, 481, 124, 83,
+	79, 10, 0, 83, 79, 10, 82, 0, 0, 0,
+	82, 197, 0, 65, 275, 276, 0, 232, 0, 0,
+	0, 0, 0, 0, 0, 0, 60, 61, 0, 64,
+	58, 0, 0, 59, 196, 0, 67, 0, 197, 421,
+	488, 0, 0, 0, 403, 0, 62, 63, 77, 0,
+	12, 13, 77, 0, 12, 13, 0, 68, 89, 0,
+	0, 0, 78, 0, 0, 0, 0, 0, 0, 0,
+	0, 124, 0, 347, 0, 462, 83, 79, 10, 357,
+	0, 0, 349, 82, 105, 0, 197, 345, 343, 341,
+	65, 105, 0, 0, 344, 112, 0, 485, 247, 0,
+	0, 348, 0, 60, 61, 0, 64, 58, 0, 0,
+	59, 0, 0, 67, 0, 0, 0, 78, 346, 0,
+	0, 589, 0, 62, 63, 77, 124, 12, 13, 0,
+	350, 83, 79, 10, 0, 0, 342, 0, 82, 13,
+	0, 56, 56, 0, 0, 232, 0, 0, 0, 119,
+	0, 119, 0, 0, 0, 0, 0, 0, 0, 535,
+	0, 119, 196, 247, 0, 0, 0, 0, 231, 0,
+	0, 0, 546, 550, 0, 0, 0, 0, 0, 0,
+	77, 357, 12, 13, 462, 0, 0, 0, 462, 0,
+	0, 0, 0, 563, 357, 0, 0, 0, 0, 0,
+	0, 0, 307, 0, 68, 0, 0, 41, 0, 78,
+	47, 42, 0, 247, 44, 0, 40, 50, 124, 43,
+	45, 53, 56, 83, 79, 10, 0, 0, 46, 52,
+	82, 51, 32, 30, 0, 0, 0, 65, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	60, 61, 0, 64, 58, 0, 0, 59, 0, 0,
+	67, 0, 0, 0, 0, 0, 0, 0, 308, 0,
+	62, 63, 77, 0, 12, 13, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 29, 105, 68, 247, 0, 41, 0, 78,
+	47, 42, 0, 56, 44, 0, 40, 50, 33, 43,
+	45, 53, 0, 83, 79, 10, 0, 0, 46, 52,
+	82, 51, 32, 30, 0, 0, 546, 65, 0, 550,
+	357, 0, 0, 462, 0, 0, 0, 357, 0, 357,
+	60, 61, 0, 64, 58, 0, 0, 59, 0, 68,
+	67, 0, 0, 0, 78, 0, 0, 0, 0, 0,
+	62, 63, 77, 124, 12, 13, 0, 0, 83, 79,
+	10, 0, 500, 0, 0, 82, 0, 0, 0, 0,
+	0, 0, 65, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 60, 61, 0, 64, 58,
+	0, 0, 59, 0, 68, 67, 0, 0, 0, 78,
+	0, 0, 0, 0, 0, 62, 63, 77, 124, 12,
+	13, 0, 0, 83, 79, 10, 0, 499, 0, 0,
+	82, 0, 0, 0, 0, 0, 0, 65, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	60, 61, 0, 64, 58, 0, 0, 59, 0, 68,
+	67, 0, 0, 0, 78, 0, 0, 0, 299, 0,
+	62, 63, 77, 124, 12, 13, 0, 124, 83, 79,
+	10, 0, 83, 79, 10, 82, 0, 395, 0, 82,
+	0, 0, 179, 0, 0, 0, 232, 0, 0, 0,
+	0, 0, 68, 0, 0, 60, 61, 78, 64, 178,
+	0, 0, 59, 196, 0, 67, 124, 0, 0, 231,
+	0, 83, 79, 10, 0, 62, 63, 77, 82, 12,
+	13, 77, 0, 12, 13, 179, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 60, 61,
+	0, 64, 178, 0, 0, 59, 0, 68, 67, 289,
+	0, 0, 78, 0, 0, 0, 0, 0, 62, 63,
+	77, 124, 12, 13, 0, 0, 83, 79, 10, 0,
+	284, 0, 0, 82, 0, 0, 0, 0, 0, 0,
+	65, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	68, 0, 0, 60, 61, 78, 64, 58, 187, 0,
+	59, 0, 0, 67, 124, 0, 0, 0, 0, 83,
+	79, 10, 0, 62, 63, 77, 82, 12, 13, 0,
+	0, 0, 0, 65, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 68, 0, 0, 60, 61, 78, 64,
+	58, 0, 0, 59, 0, 0, 67, 124, 0, 0,
+	0, 0, 83, 79, 10, 0, 62, 63, 77, 82,
+	12, 13, 0, 0, 0, 0, 65, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 68, 0, 0, 60,
+	61, 78, 64, 58, 0, 0, 59, 0, 0, 67,
+	124, 0, 0, 0, 0, 83, 79, 10, 0, 62,
+	63, 77, 82, 12, 13, 0, 0, 0, 0, 179,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 60, 61, 0, 64, 178, 0, 0, 59,
+	0, 0, 67, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 62, 63, 77, 0, 12, 13, 134, 149,
+	0, 152, 0, 135, 139, 140, 0, 0, 138, 150,
+	137, 136, 133, 151, 141, 142, 143, 144, 145, 146,
+	147, 148, 68, 0, 0, 0, 0, 299, 0, 0,
+	0, 0, 0, 0, 0, 0, 124, 396, 347, 0,
+	0, 83, 79, 10, 0, 0, 0, 349, 82, 78,
+	0, 0, 345, 343, 552, 65, 0, 0, 124, 344,
+	0, 0, 0, 83, 79, 10, 348, 0, 60, 61,
+	82, 64, 58, 0, 0, 59, 0, 232, 67, 0,
+	0, 0, 0, 346, 0, 0, 0, 0, 62, 63,
+	77, 0, 12, 13, 196, 0, 0, 0, 0, 0,
+	231, 342, 0, 12, 13, 0, 224, 0, 0, 0,
+	0, 0, 77, 0, 12, 13, 225, 134, 149, 0,
+	152, 0, 135, 139, 140, 0, 0, 138, 150, 137,
+	136, 133, 151, 141, 142, 143, 144, 145, 146, 147,
+	148, 134, 149, 0, 152, 0, 135, 139, 140, 0,
+	659, 138, 150, 137, 136, 133, 151, 141, 142, 143,
+	144, 145, 146, 147, 148, 134, 149, 0, 152, 0,
+	135, 139, 140, 0, 658, 138, 150, 137, 136, 133,
+	151, 141, 142, 143, 144, 145, 146, 147, 148, 0,
+	78, 0, 0, 0, 78, 0, 0, 0, 518, 124,
+	0, 0, 0, 124, 83, 79, 10, 0, 83, 79,
+	10, 82, 0, 0, 0, 82, 347, 0, 405, 0,
+	0, 0, 190, 0, 0, 349, 0, 0, 0, 0,
+	345, 343, 341, 0, 0, 196, 0, 344, 0, 196,
+	0, 411, 0, 0, 348, 195, 0, 0, 0, 347,
+	0, 0, 471, 77, 0, 12, 13, 77, 349, 12,
+	13, 346, 0, 345, 343, 341, 0, 612, 0, 347,
+	344, 0, 0, 0, 0, 0, 0, 348, 349, 342,
+	0, 0, 13, 345, 343, 341, 0, 0, 0, 347,
+	344, 0, 0, 0, 346, 0, 0, 452, 349, 0,
+	0, 0, 0, 345, 343, 341, 0, 0, 0, 0,
+	344, 0, 342, 0, 346, 13, 0, 348, 0, 0,
+	451, 0, 0, 0, 130, 0, 0, 0, 0, 0,
+	0, 0, 342, 0, 346, 13, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 342, 134, 149, 13, 152, 132, 135, 139,
+	140, 0, 131, 138, 150, 137, 136, 133, 151, 141,
+	142, 143, 144, 145, 146, 147, 148, 134, 149, 0,
+	152, 0, 135, 139, 140, 0, 0, 138, 150, 137,
+	136, 133, 151, 141, 142, 143, 144, 145, 146, 147,
+	148, 134, 149, 0, 0, 0, 135, 139, 140, 0,
+	0, 138, 150, 137, 136, 133, 151, 141, 142, 143,
+	144, 145, 146, 147, 148, 134, 149, 0, 0, 0,
+	135, 139, 140, 0, 0, 138, 150, 137, 136, 0,
+	151, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+	0, 0, 0, 135, 139, 140, 0, 0, 138, 150,
+	137, 136, 0, 151, 141, 142, 143, 144, 145, 146,
+	147, 148,
+}
+var yyPact = [...]int{
+
+	-1000, -1000, 542, 536, -1000, 164, -1000, 550, 555, 318,
+	-1000, -1000, -1000, 588, -1000, -1000, 549, 1340, 316, 155,
+	-1000, 214, 640, 308, -1000, 305, -1000, -1000, -1000, -1000,
+	491, 370, 366, 301, -1000, -1000, -1000, -1000, -1000, 186,
+	-1000, 164, 164, 272, 272, 164, 1689, -1000, 2129, 171,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, 25, 1689, 1689,
+	1689, 1689, 1689, 1689, 1689, 1689, 173, 1732, -1000, -1000,
+	-1000, 489, 200, -1000, -1000, -1000, 253, 1646, 1995, 26,
+	-1000, -1000, 200, 200, -1000, -1000, 96, 536, -1000, 587,
+	586, 42, 205, -1000, 547, -9, -9, -9, 5, -1000,
+	-1000, -1000, 347, 1850, -1000, -1000, -1000, 292, 849, -1000,
+	44, 1158, -1000, 172, 908, 488, -1000, -1000, -1000, -1000,
+	-1000, -1000, 25, -1000, 486, -1000, -1000, -1000, -23, 2153,
+	1689, -1000, -1000, 1689, 1689, 1689, 1689, 1689, 1689, 1689,
+	1689, 1689, 1689, 1689, 1689, 1689, 1689, 1689, 1689, 1689,
+	1689, 1689, 1689, 1689, 1689, 1689, 1603, 1689, 522, 1689,
+	1548, 280, 1689, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, 469, 2153, -1000, -1000, -1000, -1000, 1732, 1828,
+	1689, -1000, -1000, -1000, 1250, -1000, 17, 13, 2153, -1000,
+	1158, -1000, -1000, -1000, -1000, 1158, 1158, 211, 1158, 39,
+	27, 300, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, 585, 2090, -1000, 1114, 2090, -1000, 172, 485, 164,
+	297, -1000, -1000, 194, 1689, 164, -1000, -1000, -1000, -1000,
+	-1000, 1158, 573, 296, -1000, 191, 1689, 295, -1000, -1000,
+	-1000, -1000, 1250, 461, -14, -1000, -1000, 908, -1000, -1000,
+	1158, 908, 1250, 908, 2153, 2201, 2224, 732, 732, 732,
+	732, 732, 732, 843, 843, 843, 843, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, 2177, -23, -23, 2153, -1000, 518,
+	294, -1000, -1000, 69, 1689, -1000, 293, -1000, -1000, -1000,
+	51, -1000, -1000, 1505, 1774, 176, 1026, 130, -1000, 1991,
+	958, 1026, 181, -1000, -1000, -1000, -1000, -1000, -1000, 1158,
+	1158, -1000, 457, -1000, 164, 11, 288, -1000, -1000, 739,
+	581, 525, 513, -1000, -1000, 210, 282, -1000, -1000, 479,
+	-1000, 545, 447, 139, -1000, 275, 273, -1000, -1000, -1000,
+	-1000, -1000, 129, 19, 52, 43, 2090, 2070, 572, 476,
+	78, 192, 264, 262, 164, -3, -1000, 2050, 445, 164,
+	1689, -23, -1000, 444, 1158, 443, 164, 1689, -23, 439,
+	164, 132, 1030, 908, -1000, -1000, -1000, -1000, 438, -1000,
+	437, -1000, -1000, 1689, 1450, 1395, 2153, 520, 1689, 203,
+	518, 425, -16, 1732, 394, 393, -1000, 1689, 165, -17,
+	-1000, -1000, 1941, -1000, -1000, 1509, -1000, -1000, -1000, -1000,
+	-1000, 1158, 390, -1000, 162, -1000, 1250, 1250, -1000, -1000,
+	-1000, -1000, 1158, 149, 217, 581, 164, -1000, -1000, 388,
+	545, 210, 581, 545, 164, 137, 274, -1000, 908, 386,
+	-1000, -1000, -1000, -1000, 2090, 10, 2090, 164, 1839, -1000,
+	-1000, 298, 2090, -1000, -1000, 2090, 164, 256, -1000, 133,
+	-1000, 582, -1000, 78, -1000, -1000, 381, -21, 164, 164,
+	581, 2090, -1000, -1000, -23, -1000, -1000, 255, -1000, -1000,
+	849, -23, -1000, -1000, -1000, 472, -1000, -1000, 908, -1000,
+	-1000, -1000, -1000, -1000, -1000, 1030, 1030, 1250, 251, 1689,
+	1689, -1000, -1000, -1000, -1000, -1000, 1732, 166, -1000, -1000,
+	376, -1000, -1000, -1000, 4, -1000, 1026, -1000, 1103, 1026,
+	1026, 372, -1000, -1000, -1000, 125, -1000, -1000, -1000, -1000,
+	-1000, 581, 363, -1000, 359, -1000, -1000, -1000, 352, -1000,
+	-1000, 2090, 3, 121, 249, -1000, 2090, 117, 230, -1000,
+	283, -1000, -1000, -1000, 350, -1000, -1000, 344, -1000, 266,
+	-1000, 221, 2017, 220, -1000, -1000, 581, 343, 164, 191,
+	908, 338, -1000, 118, 1689, 2153, 2153, 136, 1250, 89,
+	-1000, -1000, -1000, -1000, 1689, -1000, -1000, -1000, 2153, -1000,
+	82, 41, -1000, -1000, -1000, 581, 581, 1030, -1000, 2090,
+	-1000, 164, 581, -1000, 1839, 164, -1000, 2017, 133, -1000,
+	-1000, -1000, 164, -1000, 164, -1000, -1000, -1000, 336, -1000,
+	-1000, -1000, -1000, 219, -1000, 1689, 1689, 1732, 565, 1,
+	1026, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	335, -1000, 328, 303, 276, 1030, 1917, 1893, -1000, -1000,
+	110, -1000, 37, 2017, -1000, -1000, 2017, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, 1689, 518, -1000,
+}
+var yyPgo = [...]int{
+
+	0, 57, 771, 774, 45, 150, 26, 540, 29, 770,
+	768, 2, 28, 61, 322, 766, 17, 4, 765, 761,
+	760, 759, 758, 756, 3, 755, 622, 47, 14, 754,
+	490, 40, 41, 130, 37, 12, 752, 561, 43, 620,
+	751, 564, 750, 749, 25, 745, 162, 743, 31, 11,
+	740, 48, 5, 1, 18, 735, 679, 734, 7, 22,
+	733, 732, 19, 730, 729, 728, 16, 54, 725, 723,
+	33, 721, 23, 719, 588, 46, 9, 718, 715, 714,
+	713, 39, 712, 711, 710, 15, 56, 709, 13, 706,
+	0, 70, 49, 24, 20, 21, 10, 8, 704, 6,
+	42, 30, 703, 701, 700, 699, 88, 34, 698, 32,
+	27, 697, 696, 695, 694, 692, 685, 51, 44, 680,
+	36, 677, 35, 676, 671, 667, 666, 657, 656, 655,
+	648, 647, 642, 639, 637, 634, 631, 627, 38, 623,
+	596, 593,
+}
+var yyR1 = [...]int{
+
+	0, 112, 114, 114, 116, 113, 115, 115, 119, 119,
+	119, 120, 120, 121, 121, 2, 2, 2, 117, 123,
+	123, 124, 118, 50, 50, 50, 50, 50, 74, 74,
+	74, 74, 74, 74, 74, 74, 74, 74, 126, 70,
+	70, 70, 75, 75, 76, 76, 76, 36, 48, 44,
+	44, 44, 44, 44, 44, 9, 9, 9, 9, 127,
+	11, 128, 10, 62, 62, 129, 53, 42, 42, 42,
+	22, 22, 22, 21, 130, 23, 24, 24, 131, 132,
+	133, 25, 134, 63, 64, 64, 65, 65, 135, 136,
+	45, 137, 43, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 46, 46, 46, 46, 46, 46,
+	46, 46, 46, 41, 41, 41, 40, 40, 40, 40,
+	40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+	49, 28, 16, 16, 15, 15, 39, 39, 17, 17,
+	31, 1, 1, 33, 34, 37, 37, 3, 3, 3,
+	91, 91, 30, 29, 81, 81, 7, 7, 7, 7,
+	7, 7, 32, 32, 32, 32, 87, 87, 87, 87,
+	87, 79, 79, 80, 89, 89, 89, 89, 89, 12,
+	12, 88, 88, 88, 88, 88, 88, 88, 85, 86,
+	84, 84, 83, 83, 47, 18, 18, 19, 19, 90,
+	51, 51, 52, 52, 52, 139, 20, 20, 60, 60,
+	71, 71, 77, 77, 78, 78, 73, 73, 69, 69,
+	72, 72, 72, 72, 72, 72, 4, 4, 13, 27,
+	27, 27, 82, 8, 8, 8, 8, 68, 68, 67,
+	67, 6, 6, 6, 6, 6, 26, 26, 26, 26,
+	26, 140, 26, 26, 26, 26, 26, 26, 26, 26,
+	66, 66, 55, 55, 54, 54, 56, 56, 59, 59,
+	57, 57, 57, 57, 58, 58, 122, 122, 138, 138,
+	35, 35, 61, 61, 38, 38, 101, 101, 105, 105,
+	103, 103, 5, 5, 141, 141, 141, 141, 141, 141,
+	92, 108, 106, 106, 106, 111, 111, 107, 107, 107,
+	107, 107, 107, 107, 107, 107, 107, 107, 110, 109,
+	95, 95, 97, 96, 96, 99, 99, 98, 98, 94,
+	94, 94, 93, 93, 125, 125, 100, 100, 104, 104,
+	102, 102,
+}
+var yyR2 = [...]int{
+
+	0, 4, 0, 3, 0, 3, 0, 3, 2, 5,
+	3, 3, 2, 1, 3, 1, 2, 2, 4, 0,
+	1, 0, 4, 0, 1, 1, 1, 1, 2, 5,
+	3, 2, 5, 7, 3, 2, 5, 3, 1, 2,
+	4, 3, 4, 3, 1, 2, 1, 1, 2, 1,
+	3, 3, 3, 2, 2, 3, 5, 5, 2, 0,
+	4, 0, 3, 0, 2, 0, 4, 4, 4, 2,
+	5, 1, 1, 2, 0, 3, 1, 3, 0, 0,
+	0, 8, 0, 5, 0, 2, 0, 2, 0, 0,
+	7, 0, 5, 1, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 1, 2, 2, 2, 2, 2,
+	2, 2, 2, 3, 5, 6, 1, 1, 3, 5,
+	5, 4, 6, 8, 1, 5, 5, 5, 7, 1,
+	0, 3, 1, 4, 1, 4, 1, 3, 1, 1,
+	1, 1, 1, 1, 1, 0, 1, 1, 1, 1,
+	4, 4, 1, 1, 1, 2, 1, 1, 1, 1,
+	1, 3, 1, 1, 1, 2, 1, 1, 1, 1,
+	3, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	3, 4, 4, 2, 3, 5, 1, 1, 2, 3,
+	5, 3, 5, 3, 3, 5, 8, 5, 8, 5,
+	0, 3, 0, 1, 3, 1, 4, 2, 0, 3,
+	1, 3, 1, 3, 1, 3, 1, 3, 1, 3,
+	3, 2, 4, 3, 5, 5, 1, 3, 1, 2,
+	1, 3, 4, 1, 2, 2, 1, 1, 3, 0,
+	2, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+	1, 0, 4, 1, 2, 2, 2, 2, 2, 2,
+	1, 3, 1, 3, 1, 3, 1, 3, 1, 3,
+	1, 1, 3, 3, 0, 2, 0, 1, 0, 1,
+	0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+	0, 1, 0, 1, 4, 4, 5, 6, 4, 4,
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
+	4, 5, 4, 4, 2, 2, 4, 3, 3, 5,
+	3, 4, 3, 5, 1, 0, 1, 3, 1, 1,
+	2, 1, 1, 5, 0, 2, 1, 3, 1, 3,
+	1, 3,
+}
+var yyChk = [...]int{
+
+	-1000, -112, -113, -116, -114, 26, -117, 26, -115, -3,
+	25, -91, 74, 75, -118, -124, 25, -60, -119, 22,
+	63, 4, -125, -123, 25, -50, -74, -47, -26, 2,
+	33, -126, 32, 18, -44, -23, -45, -43, -25, -29,
+	16, 7, 11, 19, 14, 20, 28, 10, -14, -56,
+	17, 31, 29, 21, -33, -46, -3, -39, 54, 57,
+	50, 51, 70, 71, 53, 37, -40, 60, 4, -30,
+	-41, -79, -80, -20, -90, -88, -139, 72, 9, 24,
+	-84, -83, 30, 23, 63, -120, 60, -2, 4, -3,
+	64, 64, 65, -141, 22, 33, 10, 32, 18, 63,
+	63, -70, 60, -54, -34, -3, -75, 60, -54, -48,
+	60, -36, -3, -18, 60, -3, 67, -37, -33, -3,
+	-37, -41, -39, -3, 18, -41, -33, -61, -56, -14,
+	5, 43, 38, 48, 34, 39, 47, 46, 44, 40,
+	41, 50, 51, 52, 53, 54, 55, 56, 57, 35,
+	45, 49, 37, 66, 6, 76, -130, -135, -137, -131,
+	60, 64, 72, -46, -46, -46, -46, -46, -46, -46,
+	-46, 68, -17, -14, -32, -86, -90, -88, 54, 37,
+	60, -1, 36, 68, -1, 2, -35, 12, -14, -87,
+	37, -90, -88, -85, -12, 60, 54, -30, 72, -1,
+	-1, -121, 61, -120, -117, -118, 4, 4, 25, 74,
+	65, 25, -92, -91, -92, -108, -92, -19, -92, 60,
+	-71, 61, -70, -7, 66, 76, -86, -90, -88, -85,
+	-12, 60, 37, -75, 61, -7, 66, -78, 61, -48,
+	-7, -51, 68, -67, -68, -8, -31, -3, -81, -7,
+	12, 60, -140, 60, -14, -14, -14, -14, -14, -14,
+	-14, -14, -14, -14, -14, -14, -14, -14, -14, -14,
+	-14, -14, -14, -14, -14, -56, -56, -14, -21, -22,
+	-38, -42, -44, -56, 27, -24, -38, 36, -24, 61,
+	-59, -17, -3, 60, -14, -35, -49, 61, -32, 9,
+	-14, -49, -66, -6, -11, -74, -26, 2, 68, 73,
+	73, -7, -7, -7, 64, -7, -73, 69, -72, -55,
+	-13, 60, 54, -33, -4, 25, -69, 69, -27, -33,
+	-4, 60, -122, 63, -118, 4, -106, -107, -110, -109,
+	-91, 25, 72, 24, 30, 23, 54, 9, 37, 18,
+	66, -106, -106, -51, 60, -100, -95, -3, -122, 63,
+	66, -56, -34, -7, 9, -122, 63, 66, -56, -122,
+	63, -66, 61, 76, -138, -31, -81, -7, -67, -6,
+	-67, -53, 36, 63, 66, 6, -14, -136, 63, -62,
+	-132, -138, 12, 76, -17, 32, 73, 67, -58, -57,
+	-28, -16, -14, 68, 68, 37, -7, -90, -88, -85,
+	-12, 60, -138, 76, -58, 69, 63, -127, -7, -7,
+	61, -3, 73, -122, 63, -7, 76, -5, 4, -13,
+	54, 25, -13, 60, 64, -122, 63, -82, 60, -4,
+	61, -120, 63, 63, 73, 4, 72, 68, 68, -106,
+	-111, 60, 37, -107, -109, 9, 60, -93, -94, 60,
+	4, 51, -3, 66, 63, 63, -101, -100, 61, 76,
+	-106, 12, 61, -70, -56, 61, 61, -77, -76, -75,
+	-54, -56, 61, -48, 69, -3, -52, -89, 60, -86,
+	-90, -88, -85, -12, -8, 61, 61, -129, -38, 27,
+	27, 36, -38, -10, 69, -9, 8, 13, -53, 61,
+	-138, -17, 61, 61, -35, 69, 76, -138, 67, -49,
+	-49, -7, 61, 69, -6, -66, -7, 69, -72, -5,
+	-33, 61, -13, -5, -13, -3, 69, -27, -67, 61,
+	-106, 73, -106, -105, -104, -97, -3, -103, -102, -96,
+	-3, -106, 25, -91, -110, -106, -106, -101, 63, -94,
+	4, -93, 61, -3, -95, -5, -106, -122, 63, -7,
+	60, -67, -52, -66, 63, -14, -14, -62, -128, -59,
+	67, -133, 61, 73, 67, -28, -16, -15, -14, 68,
+	-58, -58, 61, 69, -5, 61, 61, 61, -106, 73,
+	69, 63, -106, 69, 63, 60, 61, 61, 50, 63,
+	-99, -98, 60, -106, 60, -5, 61, -76, -67, 61,
+	69, -38, 69, -66, 67, 66, 6, 76, -64, -35,
+	-49, 69, 69, -5, -5, -52, -106, -97, -5, -96,
+	-101, -99, -94, -101, -101, 61, -14, -14, -65, -63,
+	15, 73, -58, 61, 61, 61, 61, -52, 67, 67,
+	21, -11, 69, -99, -99, -134, -24, -53,
+}
+var yyDef = [...]int{
+
+	4, -2, 2, 0, 6, 0, 21, 0, 218, 0,
+	157, 158, 159, 0, 5, 344, 19, -2, 0, 0,
+	3, 0, 0, 0, 20, 0, 24, 25, 26, 27,
+	0, 0, 0, 0, 256, 257, 258, 259, 260, 0,
+	263, 155, 155, 0, 0, 0, 292, 38, -2, 0,
+	74, 88, 91, 78, 163, 93, -2, 114, 0, 0,
+	0, 0, 0, 0, 0, 0, 146, 0, 126, 127,
+	134, 0, 0, 139, -2, -2, 0, 290, 0, 0,
+	196, 197, 0, 0, 7, 8, 0, 21, 15, 0,
+	0, 0, 0, 345, 0, 0, 0, 0, 0, 18,
+	219, 28, 0, 0, 274, 154, 31, 0, 0, 35,
+	0, 0, 47, 210, 249, 0, 261, 264, 156, 153,
+	265, -2, 0, 162, 0, -2, 268, 269, 293, 276,
+	0, 53, 54, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 294, 294, 0, 294,
+	0, 0, 290, 115, 116, 117, 118, 119, 120, 121,
+	122, 140, 0, 148, 149, 172, -2, -2, 0, 0,
+	0, 140, 151, 152, -2, 217, 0, 0, 291, 193,
+	0, 176, 177, 178, 179, 0, 0, 189, 0, 0,
+	0, 286, 10, 13, 21, 12, 16, 17, 160, 161,
+	22, 0, 0, 310, 0, 0, 311, 210, 0, 0,
+	286, 30, 220, 39, 0, 0, 166, 167, 168, 169,
+	170, 0, 0, 286, 34, 0, 0, 286, 37, 224,
+	48, 204, -2, 0, 288, 247, 243, 162, 246, 150,
+	164, 249, -2, 249, 50, 94, 95, 96, 97, 98,
+	99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+	109, 110, 111, 112, 113, 51, 52, 277, 75, 0,
+	71, 72, 295, 0, 0, 89, 76, 63, 79, 123,
+	288, 278, 128, 0, 291, 0, 284, 147, 175, 0,
+	288, 284, 0, 270, 252, 253, 254, 255, 59, 0,
+	0, 194, 0, 198, 0, 0, 286, 201, 226, 0,
+	302, 0, 0, 272, 238, -2, 286, 203, 228, 0,
+	240, 0, 0, 287, 11, 0, 0, 312, 313, 314,
+	317, 318, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 296, 0, 346, 0, 0, 287,
+	0, 41, 275, 0, 0, 0, 287, 0, 43, 0,
+	287, 0, 212, 289, 250, 244, 245, 165, 0, 262,
+	0, 73, 65, 294, 0, 0, 69, 0, 294, 0,
+	0, 0, 288, 289, 0, 0, 131, 290, 0, 288,
+	280, 281, 142, 140, 140, 0, 199, -2, -2, -2,
+	-2, 0, 0, 289, 0, 216, -2, -2, 191, 192,
+	180, 190, 0, 0, 287, 302, 0, 231, 303, 0,
+	0, 236, 302, 0, 0, 0, 287, 239, 249, 0,
+	9, 14, 304, 305, 0, 0, 0, 298, 300, 324,
+	325, 0, 0, 315, 316, 0, 296, 0, 342, 0,
+	339, 0, 341, 0, 308, 309, 0, 297, 0, 0,
+	302, 0, 29, 221, 40, 171, 32, 286, 222, 44,
+	46, 42, 36, 225, 211, 162, 209, 213, 249, 184,
+	185, 186, 187, 188, 248, 212, 212, -2, 0, 0,
+	0, 63, 77, 64, 92, 61, 0, 0, 80, 124,
+	0, 279, 129, 130, 0, 137, 289, 285, 0, 284,
+	284, 0, 135, 136, 271, 0, 195, 200, 227, 230,
+	273, 302, 0, 233, 0, 237, 202, 229, 0, 241,
+	319, 0, 0, 0, 299, 348, 0, 0, 301, 350,
+	0, 334, -2, -2, 0, 327, 328, 0, 306, 0,
+	340, 0, 335, 0, 347, 330, 302, 0, 287, 45,
+	249, 0, 205, 0, 294, 67, 68, 0, -2, 0,
+	58, 84, 125, 132, 290, 282, 283, 141, 144, 140,
+	0, 0, -2, 60, 232, 302, 302, 212, 320, 0,
+	322, 0, 302, 323, 0, 296, 326, 335, 0, 307,
+	207, 336, 296, 338, 296, 331, 33, 223, 0, 214,
+	66, 70, 90, 62, 55, 0, 0, 0, 86, 0,
+	284, 143, 138, 234, 235, 242, 321, 349, 332, 351,
+	0, 329, 0, 0, 0, 212, 0, 0, 81, 85,
+	0, 133, 0, 335, 343, 337, 335, 206, 56, 57,
+	82, 87, 145, 333, 208, 294, 0, 83,
+}
+var yyTok1 = [...]int{
+
+	1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 70, 3, 3, 65, 56, 57, 3,
+	60, 61, 54, 50, 76, 51, 64, 55, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 67, 63,
+	3, 66, 3, 74, 75, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 72, 3, 73, 53, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 68, 52, 69, 71,
+}
+var yyTok2 = [...]int{
+
+	2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+	12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+	22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+	32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+	42, 43, 44, 45, 46, 47, 48, 49, 58, 59,
+	62,
+}
+var yyTok3 = [...]int{
+	0,
+}
+
+var yyErrorMessages = [...]struct {
+	state int
+	token int
+	msg   string
+}{
+	{332, 76, "unexpected comma during import block"},
+	{89, 63, "missing import path; require quoted string"},
+	{390, 63, "missing { after if clause"},
+	{387, 63, "missing { after switch clause"},
+	{279, 63, "missing { after for clause"},
+	{498, 36, "missing { after for clause"},
+	{17, 68, "unexpected semicolon or newline before {"},
+	{111, 63, "unexpected semicolon or newline in type declaration"},
+	{78, 69, "unexpected } in channel type"},
+	{78, 61, "unexpected ) in channel type"},
+	{78, 76, "unexpected comma in channel type"},
+	{416, 15, "unexpected semicolon or newline before else"},
+	{329, 76, "name list not allowed in interface type"},
+	{279, 33, "var declaration not allowed in for initializer"},
+	{25, 68, "unexpected { at end of statement"},
+	{371, 68, "unexpected { at end of statement"},
+	{122, 63, "argument to go/defer must be function call"},
+	{398, 63, "need trailing comma before newline in composite literal"},
+	{414, 63, "need trailing comma before newline in composite literal"},
+	{124, 25, "nested func not allowed"},
+	{650, 63, "else must be followed by if or statement block"},
+}
+
+//line yaccpar:1
+
+/*	parser for yacc output	*/
+
+var (
+	yyDebug        = 0
+	yyErrorVerbose = false
+)
+
+type yyLexer interface {
+	Lex(lval *yySymType) int
+	Error(s string)
+}
+
+type yyParser interface {
+	Parse(yyLexer) int
+	Lookahead() int
+}
+
+type yyParserImpl struct {
+	lookahead func() int
+}
+
+func (p *yyParserImpl) Lookahead() int {
+	return p.lookahead()
+}
+
+func yyNewParser() yyParser {
+	p := &yyParserImpl{
+		lookahead: func() int { return -1 },
+	}
+	return p
+}
+
+const yyFlag = -1000
+
+func yyTokname(c int) string {
+	if c >= 1 && c-1 < len(yyToknames) {
+		if yyToknames[c-1] != "" {
+			return yyToknames[c-1]
+		}
+	}
+	return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func yyStatname(s int) string {
+	if s >= 0 && s < len(yyStatenames) {
+		if yyStatenames[s] != "" {
+			return yyStatenames[s]
+		}
+	}
+	return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func yyErrorMessage(state, lookAhead int) string {
+	const TOKSTART = 4
+
+	if !yyErrorVerbose {
+		return "syntax error"
+	}
+
+	for _, e := range yyErrorMessages {
+		if e.state == state && e.token == lookAhead {
+			return "syntax error: " + e.msg
+		}
+	}
+
+	res := "syntax error: unexpected " + yyTokname(lookAhead)
+
+	// To match Bison, suggest at most four expected tokens.
+	expected := make([]int, 0, 4)
+
+	// Look for shiftable tokens.
+	base := yyPact[state]
+	for tok := TOKSTART; tok-1 < len(yyToknames); tok++ {
+		if n := base + tok; n >= 0 && n < yyLast && yyChk[yyAct[n]] == tok {
+			if len(expected) == cap(expected) {
+				return res
+			}
+			expected = append(expected, tok)
+		}
+	}
+
+	if yyDef[state] == -2 {
+		i := 0
+		for yyExca[i] != -1 || yyExca[i+1] != state {
+			i += 2
+		}
+
+		// Look for tokens that we accept or reduce.
+		for i += 2; yyExca[i] >= 0; i += 2 {
+			tok := yyExca[i]
+			if tok < TOKSTART || yyExca[i+1] == 0 {
+				continue
+			}
+			if len(expected) == cap(expected) {
+				return res
+			}
+			expected = append(expected, tok)
+		}
+
+		// If the default action is to accept or reduce, give up.
+		if yyExca[i+1] != 0 {
+			return res
+		}
+	}
+
+	for i, tok := range expected {
+		if i == 0 {
+			res += ", expecting "
+		} else {
+			res += " or "
+		}
+		res += yyTokname(tok)
+	}
+	return res
+}
+
+func yylex1(lex yyLexer, lval *yySymType) (char, token int) {
+	token = 0
+	char = lex.Lex(lval)
+	if char <= 0 {
+		token = yyTok1[0]
+		goto out
+	}
+	if char < len(yyTok1) {
+		token = yyTok1[char]
+		goto out
+	}
+	if char >= yyPrivate {
+		if char < yyPrivate+len(yyTok2) {
+			token = yyTok2[char-yyPrivate]
+			goto out
+		}
+	}
+	for i := 0; i < len(yyTok3); i += 2 {
+		token = yyTok3[i+0]
+		if token == char {
+			token = yyTok3[i+1]
+			goto out
+		}
+	}
+
+out:
+	if token == 0 {
+		token = yyTok2[1] /* unknown char */
+	}
+	if yyDebug >= 3 {
+		__yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char))
+	}
+	return char, token
+}
+
+func yyParse(yylex yyLexer) int {
+	return yyNewParser().Parse(yylex)
+}
+
+func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int {
+	var yyn int
+	var yylval yySymType
+	var yyVAL yySymType
+	var yyDollar []yySymType
+	yyS := make([]yySymType, yyMaxDepth)
+
+	Nerrs := 0   /* number of errors */
+	Errflag := 0 /* error recovery flag */
+	yystate := 0
+	yychar := -1
+	yytoken := -1 // yychar translated into internal numbering
+	yyrcvr.lookahead = func() int { return yychar }
+	defer func() {
+		// Make sure we report no lookahead when not parsing.
+		yystate = -1
+		yychar = -1
+		yytoken = -1
+	}()
+	yyp := -1
+	goto yystack
+
+ret0:
+	return 0
+
+ret1:
+	return 1
+
+yystack:
+	/* put a state and value onto the stack */
+	if yyDebug >= 4 {
+		__yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate))
+	}
+
+	yyp++
+	if yyp >= len(yyS) {
+		nyys := make([]yySymType, len(yyS)*2)
+		copy(nyys, yyS)
+		yyS = nyys
+	}
+	yyS[yyp] = yyVAL
+	yyS[yyp].yys = yystate
+
+yynewstate:
+	yyn = yyPact[yystate]
+	if yyn <= yyFlag {
+		goto yydefault /* simple state */
+	}
+	if yychar < 0 {
+		yychar, yytoken = yylex1(yylex, &yylval)
+	}
+	yyn += yytoken
+	if yyn < 0 || yyn >= yyLast {
+		goto yydefault
+	}
+	yyn = yyAct[yyn]
+	if yyChk[yyn] == yytoken { /* valid shift */
+		yychar = -1
+		yytoken = -1
+		yyVAL = yylval
+		yystate = yyn
+		if Errflag > 0 {
+			Errflag--
+		}
+		goto yystack
+	}
+
+yydefault:
+	/* default state action */
+	yyn = yyDef[yystate]
+	if yyn == -2 {
+		if yychar < 0 {
+			yychar, yytoken = yylex1(yylex, &yylval)
+		}
+
+		/* look through exception table */
+		xi := 0
+		for {
+			if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
+				break
+			}
+			xi += 2
+		}
+		for xi += 2; ; xi += 2 {
+			yyn = yyExca[xi+0]
+			if yyn < 0 || yyn == yytoken {
+				break
+			}
+		}
+		yyn = yyExca[xi+1]
+		if yyn < 0 {
+			goto ret0
+		}
+	}
+	if yyn == 0 {
+		/* error ... attempt to resume parsing */
+		switch Errflag {
+		case 0: /* brand new error */
+			yylex.Error(yyErrorMessage(yystate, yytoken))
+			Nerrs++
+			if yyDebug >= 1 {
+				__yyfmt__.Printf("%s", yyStatname(yystate))
+				__yyfmt__.Printf(" saw %s\n", yyTokname(yytoken))
+			}
+			fallthrough
+
+		case 1, 2: /* incompletely recovered error ... try again */
+			Errflag = 3
+
+			/* find a state where "error" is a legal shift action */
+			for yyp >= 0 {
+				yyn = yyPact[yyS[yyp].yys] + yyErrCode
+				if yyn >= 0 && yyn < yyLast {
+					yystate = yyAct[yyn] /* simulate a shift of "error" */
+					if yyChk[yystate] == yyErrCode {
+						goto yystack
+					}
+				}
+
+				/* the current p has no shift on "error", pop stack */
+				if yyDebug >= 2 {
+					__yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
+				}
+				yyp--
+			}
+			/* there is no state on the stack with an error shift ... abort */
+			goto ret1
+
+		case 3: /* no shift yet; clobber input char */
+			if yyDebug >= 2 {
+				__yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken))
+			}
+			if yytoken == yyEofCode {
+				goto ret1
+			}
+			yychar = -1
+			yytoken = -1
+			goto yynewstate /* try again in the same state */
+		}
+	}
+
+	/* reduction by production yyn */
+	if yyDebug >= 2 {
+		__yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
+	}
+
+	yynt := yyn
+	yypt := yyp
+	_ = yypt // guard against "declared and not used"
+
+	yyp -= yyR2[yyn]
+	// yyp is now the index of $0. Perform the default action. Iff the
+	// reduced production is ε, $1 is possibly out of range.
+	if yyp+1 >= len(yyS) {
+		nyys := make([]yySymType, len(yyS)*2)
+		copy(nyys, yyS)
+		yyS = nyys
+	}
+	yyVAL = yyS[yyp+1]
+
+	/* consult goto table to find next state */
+	yyn = yyR1[yyn]
+	yyg := yyPgo[yyn]
+	yyj := yyg + yyS[yyp].yys + 1
+
+	if yyj >= yyLast {
+		yystate = yyAct[yyg]
+	} else {
+		yystate = yyAct[yyj]
+		if yyChk[yystate] != -yyn {
+			yystate = yyAct[yyg]
+		}
+	}
+	// dummy call; replaced with literal code
+	switch yynt {
+
+	case 1:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:189
+		{
+			xtop = concat(xtop, yyDollar[4].list)
+		}
+	case 2:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:195
+		{
+			prevlineno = lineno
+			Yyerror("package statement must be first")
+			errorexit()
+		}
+	case 3:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:201
+		{
+			mkpackage(yyDollar[2].sym.Name)
+		}
+	case 4:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:211
+		{
+			importpkg = Runtimepkg
+
+			if Debug['A'] != 0 {
+				cannedimports("runtime.Builtin", "package runtime\n\n$$\n\n")
+			} else {
+				cannedimports("runtime.Builtin", runtimeimport)
+			}
+			curio.importsafe = true
+		}
+	case 5:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:223
+		{
+			importpkg = nil
+		}
+	case 11:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:237
+		{
+			ipkg := importpkg
+			my := importmyname
+			importpkg = nil
+			importmyname = nil
+
+			if my == nil {
+				my = Lookup(ipkg.Name)
+			}
+
+			pack := Nod(OPACK, nil, nil)
+			pack.Sym = my
+			pack.Pkg = ipkg
+			pack.Lineno = int32(yyDollar[1].i)
+
+			if strings.HasPrefix(my.Name, ".") {
+				importdot(ipkg, pack)
+				break
+			}
+			if my.Name == "init" {
+				Yyerror("cannot import package as init - init must be a func")
+				break
+			}
+			if my.Name == "_" {
+				break
+			}
+			if my.Def != nil {
+				lineno = int32(yyDollar[1].i)
+				redeclare(my, "as imported package name")
+			}
+			my.Def = pack
+			my.Lastlineno = int32(yyDollar[1].i)
+			my.Block = 1 // at top level
+		}
+	case 12:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:272
+		{
+			// When an invalid import path is passed to importfile,
+			// it calls Yyerror and then sets up a fake import with
+			// no package statement. This allows us to test more
+			// than one invalid import statement in a single file.
+			if nerrors == 0 {
+				Fatal("phase error in import")
+			}
+		}
+	case 15:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:288
+		{
+			// import with original name
+			yyVAL.i = parserline()
+			importmyname = nil
+			importfile(&yyDollar[1].val, yyVAL.i)
+		}
+	case 16:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:295
+		{
+			// import with given name
+			yyVAL.i = parserline()
+			importmyname = yyDollar[1].sym
+			importfile(&yyDollar[2].val, yyVAL.i)
+		}
+	case 17:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:302
+		{
+			// import into my name space
+			yyVAL.i = parserline()
+			importmyname = Lookup(".")
+			importfile(&yyDollar[2].val, yyVAL.i)
+		}
+	case 18:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:311
+		{
+			if importpkg.Name == "" {
+				importpkg.Name = yyDollar[2].sym.Name
+				numImport[yyDollar[2].sym.Name]++
+			} else if importpkg.Name != yyDollar[2].sym.Name {
+				Yyerror("conflicting names %s and %s for package %q", importpkg.Name, yyDollar[2].sym.Name, importpkg.Path)
+			}
+			importpkg.Direct = 1
+			importpkg.Safe = curio.importsafe
+
+			if safemode != 0 && !curio.importsafe {
+				Yyerror("cannot import unsafe package %q", importpkg.Path)
+			}
+		}
+	case 20:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:328
+		{
+			if yyDollar[1].sym.Name == "safe" {
+				curio.importsafe = true
+			}
+		}
+	case 21:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:335
+		{
+			defercheckwidth()
+		}
+	case 22:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:339
+		{
+			resumecheckwidth()
+			unimportfile()
+		}
+	case 23:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:348
+		{
+			Yyerror("empty top-level declaration")
+			yyVAL.list = nil
+		}
+	case 25:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:354
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 26:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:358
+		{
+			Yyerror("non-declaration statement outside function body")
+			yyVAL.list = nil
+		}
+	case 27:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:363
+		{
+			yyVAL.list = nil
+		}
+	case 28:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:369
+		{
+			yyVAL.list = yyDollar[2].list
+		}
+	case 29:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:373
+		{
+			yyVAL.list = yyDollar[3].list
+		}
+	case 30:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:377
+		{
+			yyVAL.list = nil
+		}
+	case 31:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:381
+		{
+			yyVAL.list = yyDollar[2].list
+			iota_ = -100000
+			lastconst = nil
+		}
+	case 32:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:387
+		{
+			yyVAL.list = yyDollar[3].list
+			iota_ = -100000
+			lastconst = nil
+		}
+	case 33:
+		yyDollar = yyS[yypt-7 : yypt+1]
+		//line go.y:393
+		{
+			yyVAL.list = concat(yyDollar[3].list, yyDollar[5].list)
+			iota_ = -100000
+			lastconst = nil
+		}
+	case 34:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:399
+		{
+			yyVAL.list = nil
+			iota_ = -100000
+		}
+	case 35:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:404
+		{
+			yyVAL.list = list1(yyDollar[2].node)
+		}
+	case 36:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:408
+		{
+			yyVAL.list = yyDollar[3].list
+		}
+	case 37:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:412
+		{
+			yyVAL.list = nil
+		}
+	case 38:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:418
+		{
+			iota_ = 0
+		}
+	case 39:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:424
+		{
+			yyVAL.list = variter(yyDollar[1].list, yyDollar[2].node, nil)
+		}
+	case 40:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:428
+		{
+			yyVAL.list = variter(yyDollar[1].list, yyDollar[2].node, yyDollar[4].list)
+		}
+	case 41:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:432
+		{
+			yyVAL.list = variter(yyDollar[1].list, nil, yyDollar[3].list)
+		}
+	case 42:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:438
+		{
+			yyVAL.list = constiter(yyDollar[1].list, yyDollar[2].node, yyDollar[4].list)
+		}
+	case 43:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:442
+		{
+			yyVAL.list = constiter(yyDollar[1].list, nil, yyDollar[3].list)
+		}
+	case 45:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:449
+		{
+			yyVAL.list = constiter(yyDollar[1].list, yyDollar[2].node, nil)
+		}
+	case 46:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:453
+		{
+			yyVAL.list = constiter(yyDollar[1].list, nil, nil)
+		}
+	case 47:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:459
+		{
+			// different from dclname because the name
+			// becomes visible right here, not at the end
+			// of the declaration.
+			yyVAL.node = typedcl0(yyDollar[1].sym)
+		}
+	case 48:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:468
+		{
+			yyVAL.node = typedcl1(yyDollar[1].node, yyDollar[2].node, true)
+		}
+	case 49:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:474
+		{
+			yyVAL.node = yyDollar[1].node
+
+			// These nodes do not carry line numbers.
+			// Since a bare name used as an expression is an error,
+			// introduce a wrapper node to give the correct line.
+			switch yyVAL.node.Op {
+			case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+				yyVAL.node = Nod(OPAREN, yyVAL.node, nil)
+				yyVAL.node.Implicit = true
+				break
+			}
+		}
+	case 50:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:488
+		{
+			yyVAL.node = Nod(OASOP, yyDollar[1].node, yyDollar[3].node)
+			yyVAL.node.Etype = uint8(yyDollar[2].i) // rathole to pass opcode
+		}
+	case 51:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:493
+		{
+			if yyDollar[1].list.Next == nil && yyDollar[3].list.Next == nil {
+				// simple
+				yyVAL.node = Nod(OAS, yyDollar[1].list.N, yyDollar[3].list.N)
+				break
+			}
+			// multiple
+			yyVAL.node = Nod(OAS2, nil, nil)
+			yyVAL.node.List = yyDollar[1].list
+			yyVAL.node.Rlist = yyDollar[3].list
+		}
+	case 52:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:505
+		{
+			if yyDollar[3].list.N.Op == OTYPESW {
+				yyVAL.node = Nod(OTYPESW, nil, yyDollar[3].list.N.Right)
+				if yyDollar[3].list.Next != nil {
+					Yyerror("expr.(type) must be alone in list")
+				}
+				if yyDollar[1].list.Next != nil {
+					Yyerror("argument count mismatch: %d = %d", count(yyDollar[1].list), 1)
+				} else if (yyDollar[1].list.N.Op != ONAME && yyDollar[1].list.N.Op != OTYPE && yyDollar[1].list.N.Op != ONONAME) || isblank(yyDollar[1].list.N) {
+					Yyerror("invalid variable name %s in type switch", yyDollar[1].list.N)
+				} else {
+					yyVAL.node.Left = dclname(yyDollar[1].list.N.Sym)
+				} // it's a colas, so must not re-use an oldname.
+				break
+			}
+			yyVAL.node = colas(yyDollar[1].list, yyDollar[3].list, int32(yyDollar[2].i))
+		}
+	case 53:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:523
+		{
+			yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
+			yyVAL.node.Implicit = true
+			yyVAL.node.Etype = OADD
+		}
+	case 54:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:529
+		{
+			yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
+			yyVAL.node.Implicit = true
+			yyVAL.node.Etype = OSUB
+		}
+	case 55:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:537
+		{
+			var n, nn *Node
+
+			// will be converted to OCASE
+			// right will point to next case
+			// done in casebody()
+			markdcl()
+			yyVAL.node = Nod(OXCASE, nil, nil)
+			yyVAL.node.List = yyDollar[2].list
+			if typesw != nil && typesw.Right != nil {
+				n = typesw.Right.Left
+				if n != nil {
+					// type switch - declare variable
+					nn = newname(n.Sym)
+					declare(nn, dclcontext)
+					yyVAL.node.Nname = nn
+
+					// keep track of the instances for reporting unused
+					nn.Defn = typesw.Right
+				}
+			}
+		}
+	case 56:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:560
+		{
+			var n *Node
+
+			// will be converted to OCASE
+			// right will point to next case
+			// done in casebody()
+			markdcl()
+			yyVAL.node = Nod(OXCASE, nil, nil)
+			if yyDollar[2].list.Next == nil {
+				n = Nod(OAS, yyDollar[2].list.N, yyDollar[4].node)
+			} else {
+				n = Nod(OAS2, nil, nil)
+				n.List = yyDollar[2].list
+				n.Rlist = list1(yyDollar[4].node)
+			}
+			yyVAL.node.List = list1(n)
+		}
+	case 57:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:578
+		{
+			// will be converted to OCASE
+			// right will point to next case
+			// done in casebody()
+			markdcl()
+			yyVAL.node = Nod(OXCASE, nil, nil)
+			yyVAL.node.List = list1(colas(yyDollar[2].list, list1(yyDollar[4].node), int32(yyDollar[3].i)))
+		}
+	case 58:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:587
+		{
+			var n, nn *Node
+
+			markdcl()
+			yyVAL.node = Nod(OXCASE, nil, nil)
+			if typesw != nil && typesw.Right != nil {
+				n = typesw.Right.Left
+				if n != nil {
+					// type switch - declare variable
+					nn = newname(n.Sym)
+					declare(nn, dclcontext)
+					yyVAL.node.Nname = nn
+
+					// keep track of the instances for reporting unused
+					nn.Defn = typesw.Right
+				}
+			}
+		}
+	case 59:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:608
+		{
+			markdcl()
+		}
+	case 60:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:612
+		{
+			if yyDollar[3].list == nil {
+				yyVAL.node = Nod(OEMPTY, nil, nil)
+			} else {
+				yyVAL.node = liststmt(yyDollar[3].list)
+			}
+			popdcl()
+		}
+	case 61:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:623
+		{
+			// If the last token read by the lexer was consumed
+			// as part of the case, clear it (parser has cleared yychar).
+			// If the last token read by the lexer was the lookahead
+			// leave it alone (parser has it cached in yychar).
+			// This is so that the stmt_list action doesn't look at
+			// the case tokens if the stmt_list is empty.
+			yylast = yychar
+			yyDollar[1].node.Xoffset = int64(block)
+		}
+	case 62:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:634
+		{
+			// This is the only place in the language where a statement
+			// list is not allowed to drop the final semicolon, because
+			// it's the only place where a statement list is not followed
+			// by a closing brace.  Handle the error for pedantry.
+
+			// Find the final token of the statement list.
+			// yylast is lookahead; yyprev is last of stmt_list
+			last := yyprev
+
+			if last > 0 && last != ';' && yychar != '}' {
+				Yyerror("missing statement after label")
+			}
+			yyVAL.node = yyDollar[1].node
+			yyVAL.node.Nbody = yyDollar[3].list
+			popdcl()
+		}
+	case 63:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:653
+		{
+			yyVAL.list = nil
+		}
+	case 64:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:657
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[2].node)
+		}
+	case 65:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:663
+		{
+			markdcl()
+		}
+	case 66:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:667
+		{
+			yyVAL.list = yyDollar[3].list
+			popdcl()
+		}
+	case 67:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:674
+		{
+			yyVAL.node = Nod(ORANGE, nil, yyDollar[4].node)
+			yyVAL.node.List = yyDollar[1].list
+			yyVAL.node.Etype = 0 // := flag
+		}
+	case 68:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:680
+		{
+			yyVAL.node = Nod(ORANGE, nil, yyDollar[4].node)
+			yyVAL.node.List = yyDollar[1].list
+			yyVAL.node.Colas = true
+			colasdefn(yyDollar[1].list, yyVAL.node)
+		}
+	case 69:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:687
+		{
+			yyVAL.node = Nod(ORANGE, nil, yyDollar[2].node)
+			yyVAL.node.Etype = 0 // := flag
+		}
+	case 70:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:694
+		{
+			// init ; test ; incr
+			if yyDollar[5].node != nil && yyDollar[5].node.Colas {
+				Yyerror("cannot declare in the for-increment")
+			}
+			yyVAL.node = Nod(OFOR, nil, nil)
+			if yyDollar[1].node != nil {
+				yyVAL.node.Ninit = list1(yyDollar[1].node)
+			}
+			yyVAL.node.Ntest = yyDollar[3].node
+			yyVAL.node.Nincr = yyDollar[5].node
+		}
+	case 71:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:707
+		{
+			// normal test
+			yyVAL.node = Nod(OFOR, nil, nil)
+			yyVAL.node.Ntest = yyDollar[1].node
+		}
+	case 73:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:716
+		{
+			yyVAL.node = yyDollar[1].node
+			yyVAL.node.Nbody = concat(yyVAL.node.Nbody, yyDollar[2].list)
+		}
+	case 74:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:723
+		{
+			markdcl()
+		}
+	case 75:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:727
+		{
+			yyVAL.node = yyDollar[3].node
+			popdcl()
+		}
+	case 76:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:734
+		{
+			// test
+			yyVAL.node = Nod(OIF, nil, nil)
+			yyVAL.node.Ntest = yyDollar[1].node
+		}
+	case 77:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:740
+		{
+			// init ; test
+			yyVAL.node = Nod(OIF, nil, nil)
+			if yyDollar[1].node != nil {
+				yyVAL.node.Ninit = list1(yyDollar[1].node)
+			}
+			yyVAL.node.Ntest = yyDollar[3].node
+		}
+	case 78:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:752
+		{
+			markdcl()
+		}
+	case 79:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:756
+		{
+			if yyDollar[3].node.Ntest == nil {
+				Yyerror("missing condition in if statement")
+			}
+		}
+	case 80:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:762
+		{
+			yyDollar[3].node.Nbody = yyDollar[5].list
+		}
+	case 81:
+		yyDollar = yyS[yypt-8 : yypt+1]
+		//line go.y:766
+		{
+			var n *Node
+			var nn *NodeList
+
+			yyVAL.node = yyDollar[3].node
+			n = yyDollar[3].node
+			popdcl()
+			for nn = concat(yyDollar[7].list, yyDollar[8].list); nn != nil; nn = nn.Next {
+				if nn.N.Op == OIF {
+					popdcl()
+				}
+				n.Nelse = list1(nn.N)
+				n = nn.N
+			}
+		}
+	case 82:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:784
+		{
+			markdcl()
+		}
+	case 83:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:788
+		{
+			if yyDollar[4].node.Ntest == nil {
+				Yyerror("missing condition in if statement")
+			}
+			yyDollar[4].node.Nbody = yyDollar[5].list
+			yyVAL.list = list1(yyDollar[4].node)
+		}
+	case 84:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:797
+		{
+			yyVAL.list = nil
+		}
+	case 85:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:801
+		{
+			yyVAL.list = concat(yyDollar[1].list, yyDollar[2].list)
+		}
+	case 86:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:806
+		{
+			yyVAL.list = nil
+		}
+	case 87:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:810
+		{
+			l := &NodeList{N: yyDollar[2].node}
+			l.End = l
+			yyVAL.list = l
+		}
+	case 88:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:818
+		{
+			markdcl()
+		}
+	case 89:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:822
+		{
+			var n *Node
+			n = yyDollar[3].node.Ntest
+			if n != nil && n.Op != OTYPESW {
+				n = nil
+			}
+			typesw = Nod(OXXX, typesw, n)
+		}
+	case 90:
+		yyDollar = yyS[yypt-7 : yypt+1]
+		//line go.y:831
+		{
+			yyVAL.node = yyDollar[3].node
+			yyVAL.node.Op = OSWITCH
+			yyVAL.node.List = yyDollar[6].list
+			typesw = typesw.Left
+			popdcl()
+		}
+	case 91:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:841
+		{
+			typesw = Nod(OXXX, typesw, nil)
+		}
+	case 92:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:845
+		{
+			yyVAL.node = Nod(OSELECT, nil, nil)
+			yyVAL.node.Lineno = typesw.Lineno
+			yyVAL.node.List = yyDollar[4].list
+			typesw = typesw.Left
+		}
+	case 94:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:858
+		{
+			yyVAL.node = Nod(OOROR, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 95:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:862
+		{
+			yyVAL.node = Nod(OANDAND, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 96:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:866
+		{
+			yyVAL.node = Nod(OEQ, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 97:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:870
+		{
+			yyVAL.node = Nod(ONE, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 98:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:874
+		{
+			yyVAL.node = Nod(OLT, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 99:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:878
+		{
+			yyVAL.node = Nod(OLE, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 100:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:882
+		{
+			yyVAL.node = Nod(OGE, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 101:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:886
+		{
+			yyVAL.node = Nod(OGT, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 102:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:890
+		{
+			yyVAL.node = Nod(OADD, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 103:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:894
+		{
+			yyVAL.node = Nod(OSUB, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 104:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:898
+		{
+			yyVAL.node = Nod(OOR, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 105:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:902
+		{
+			yyVAL.node = Nod(OXOR, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 106:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:906
+		{
+			yyVAL.node = Nod(OMUL, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 107:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:910
+		{
+			yyVAL.node = Nod(ODIV, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 108:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:914
+		{
+			yyVAL.node = Nod(OMOD, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 109:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:918
+		{
+			yyVAL.node = Nod(OAND, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 110:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:922
+		{
+			yyVAL.node = Nod(OANDNOT, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 111:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:926
+		{
+			yyVAL.node = Nod(OLSH, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 112:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:930
+		{
+			yyVAL.node = Nod(ORSH, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 113:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:935
+		{
+			yyVAL.node = Nod(OSEND, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 115:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:942
+		{
+			yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
+		}
+	case 116:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:946
+		{
+			if yyDollar[2].node.Op == OCOMPLIT {
+				// Special case for &T{...}: turn into (*T){...}.
+				yyVAL.node = yyDollar[2].node
+				yyVAL.node.Right = Nod(OIND, yyVAL.node.Right, nil)
+				yyVAL.node.Right.Implicit = true
+			} else {
+				yyVAL.node = Nod(OADDR, yyDollar[2].node, nil)
+			}
+		}
+	case 117:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:957
+		{
+			yyVAL.node = Nod(OPLUS, yyDollar[2].node, nil)
+		}
+	case 118:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:961
+		{
+			yyVAL.node = Nod(OMINUS, yyDollar[2].node, nil)
+		}
+	case 119:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:965
+		{
+			yyVAL.node = Nod(ONOT, yyDollar[2].node, nil)
+		}
+	case 120:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:969
+		{
+			Yyerror("the bitwise complement operator is ^")
+			yyVAL.node = Nod(OCOM, yyDollar[2].node, nil)
+		}
+	case 121:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:974
+		{
+			yyVAL.node = Nod(OCOM, yyDollar[2].node, nil)
+		}
+	case 122:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:978
+		{
+			yyVAL.node = Nod(ORECV, yyDollar[2].node, nil)
+		}
+	case 123:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:988
+		{
+			yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+		}
+	case 124:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:992
+		{
+			yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+			yyVAL.node.List = yyDollar[3].list
+		}
+	case 125:
+		yyDollar = yyS[yypt-6 : yypt+1]
+		//line go.y:997
+		{
+			yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+			yyVAL.node.List = yyDollar[3].list
+			yyVAL.node.Isddd = true
+		}
+	case 126:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1005
+		{
+			yyVAL.node = nodlit(yyDollar[1].val)
+		}
+	case 128:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1010
+		{
+			if yyDollar[1].node.Op == OPACK {
+				var s *Sym
+				s = restrictlookup(yyDollar[3].sym.Name, yyDollar[1].node.Pkg)
+				yyDollar[1].node.Used = true
+				yyVAL.node = oldname(s)
+				break
+			}
+			yyVAL.node = Nod(OXDOT, yyDollar[1].node, newname(yyDollar[3].sym))
+		}
+	case 129:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1021
+		{
+			yyVAL.node = Nod(ODOTTYPE, yyDollar[1].node, yyDollar[4].node)
+		}
+	case 130:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1025
+		{
+			yyVAL.node = Nod(OTYPESW, nil, yyDollar[1].node)
+		}
+	case 131:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1029
+		{
+			yyVAL.node = Nod(OINDEX, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 132:
+		yyDollar = yyS[yypt-6 : yypt+1]
+		//line go.y:1033
+		{
+			yyVAL.node = Nod(OSLICE, yyDollar[1].node, Nod(OKEY, yyDollar[3].node, yyDollar[5].node))
+		}
+	case 133:
+		yyDollar = yyS[yypt-8 : yypt+1]
+		//line go.y:1037
+		{
+			if yyDollar[5].node == nil {
+				Yyerror("middle index required in 3-index slice")
+			}
+			if yyDollar[7].node == nil {
+				Yyerror("final index required in 3-index slice")
+			}
+			yyVAL.node = Nod(OSLICE3, yyDollar[1].node, Nod(OKEY, yyDollar[3].node, Nod(OKEY, yyDollar[5].node, yyDollar[7].node)))
+		}
+	case 135:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1048
+		{
+			// conversion
+			yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+			yyVAL.node.List = list1(yyDollar[3].node)
+		}
+	case 136:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1054
+		{
+			yyVAL.node = yyDollar[3].node
+			yyVAL.node.Right = yyDollar[1].node
+			yyVAL.node.List = yyDollar[4].list
+			fixlbrace(yyDollar[2].i)
+		}
+	case 137:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1061
+		{
+			yyVAL.node = yyDollar[3].node
+			yyVAL.node.Right = yyDollar[1].node
+			yyVAL.node.List = yyDollar[4].list
+		}
+	case 138:
+		yyDollar = yyS[yypt-7 : yypt+1]
+		//line go.y:1067
+		{
+			Yyerror("cannot parenthesize type in composite literal")
+			yyVAL.node = yyDollar[5].node
+			yyVAL.node.Right = yyDollar[2].node
+			yyVAL.node.List = yyDollar[6].list
+		}
+	case 140:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1076
+		{
+			// composite expression.
+			// make node early so we get the right line number.
+			yyVAL.node = Nod(OCOMPLIT, nil, nil)
+		}
+	case 141:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1084
+		{
+			yyVAL.node = Nod(OKEY, yyDollar[1].node, yyDollar[3].node)
+		}
+	case 142:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1090
+		{
+			// These nodes do not carry line numbers.
+			// Since a composite literal commonly spans several lines,
+			// the line number on errors may be misleading.
+			// Introduce a wrapper node to give the correct line.
+			yyVAL.node = yyDollar[1].node
+			switch yyVAL.node.Op {
+			case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+				yyVAL.node = Nod(OPAREN, yyVAL.node, nil)
+				yyVAL.node.Implicit = true
+			}
+		}
+	case 143:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1103
+		{
+			yyVAL.node = yyDollar[2].node
+			yyVAL.node.List = yyDollar[3].list
+		}
+	case 145:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1111
+		{
+			yyVAL.node = yyDollar[2].node
+			yyVAL.node.List = yyDollar[3].list
+		}
+	case 147:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1119
+		{
+			yyVAL.node = yyDollar[2].node
+
+			// Need to know on lhs of := whether there are ( ).
+			// Don't bother with the OPAREN in other cases:
+			// it's just a waste of memory and time.
+			switch yyVAL.node.Op {
+			case ONAME, ONONAME, OPACK, OTYPE, OLITERAL, OTYPESW:
+				yyVAL.node = Nod(OPAREN, yyVAL.node, nil)
+			}
+		}
+	case 151:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1140
+		{
+			yyVAL.i = LBODY
+		}
+	case 152:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1144
+		{
+			yyVAL.i = '{'
+		}
+	case 153:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1155
+		{
+			if yyDollar[1].sym == nil {
+				yyVAL.node = nil
+			} else {
+				yyVAL.node = newname(yyDollar[1].sym)
+			}
+		}
+	case 154:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1165
+		{
+			yyVAL.node = dclname(yyDollar[1].sym)
+		}
+	case 155:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1170
+		{
+			yyVAL.node = nil
+		}
+	case 157:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1177
+		{
+			yyVAL.sym = yyDollar[1].sym
+			// during imports, unqualified non-exported identifiers are from builtinpkg
+			if importpkg != nil && !exportname(yyDollar[1].sym.Name) {
+				yyVAL.sym = Pkglookup(yyDollar[1].sym.Name, builtinpkg)
+			}
+		}
+	case 159:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1186
+		{
+			yyVAL.sym = nil
+		}
+	case 160:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1192
+		{
+			var p *Pkg
+
+			if yyDollar[2].val.U.(string) == "" {
+				p = importpkg
+			} else {
+				if isbadimport(yyDollar[2].val.U.(string)) {
+					errorexit()
+				}
+				p = mkpkg(yyDollar[2].val.U.(string))
+			}
+			yyVAL.sym = Pkglookup(yyDollar[4].sym.Name, p)
+		}
+	case 161:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1206
+		{
+			var p *Pkg
+
+			if yyDollar[2].val.U.(string) == "" {
+				p = importpkg
+			} else {
+				if isbadimport(yyDollar[2].val.U.(string)) {
+					errorexit()
+				}
+				p = mkpkg(yyDollar[2].val.U.(string))
+			}
+			yyVAL.sym = Pkglookup("?", p)
+		}
+	case 162:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1222
+		{
+			yyVAL.node = oldname(yyDollar[1].sym)
+			if yyVAL.node.Pack != nil {
+				yyVAL.node.Pack.Used = true
+			}
+		}
+	case 164:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1243
+		{
+			Yyerror("final argument in variadic function missing type")
+			yyVAL.node = Nod(ODDD, typenod(typ(TINTER)), nil)
+		}
+	case 165:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1248
+		{
+			yyVAL.node = Nod(ODDD, yyDollar[2].node, nil)
+		}
+	case 171:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1259
+		{
+			yyVAL.node = yyDollar[2].node
+		}
+	case 175:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1268
+		{
+			yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
+		}
+	case 180:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1278
+		{
+			yyVAL.node = yyDollar[2].node
+		}
+	case 190:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1299
+		{
+			if yyDollar[1].node.Op == OPACK {
+				var s *Sym
+				s = restrictlookup(yyDollar[3].sym.Name, yyDollar[1].node.Pkg)
+				yyDollar[1].node.Used = true
+				yyVAL.node = oldname(s)
+				break
+			}
+			yyVAL.node = Nod(OXDOT, yyDollar[1].node, newname(yyDollar[3].sym))
+		}
+	case 191:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1312
+		{
+			yyVAL.node = Nod(OTARRAY, yyDollar[2].node, yyDollar[4].node)
+		}
+	case 192:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1316
+		{
+			// array literal of nelem
+			yyVAL.node = Nod(OTARRAY, Nod(ODDD, nil, nil), yyDollar[4].node)
+		}
+	case 193:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1321
+		{
+			yyVAL.node = Nod(OTCHAN, yyDollar[2].node, nil)
+			yyVAL.node.Etype = Cboth
+		}
+	case 194:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1326
+		{
+			yyVAL.node = Nod(OTCHAN, yyDollar[3].node, nil)
+			yyVAL.node.Etype = Csend
+		}
+	case 195:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1331
+		{
+			yyVAL.node = Nod(OTMAP, yyDollar[3].node, yyDollar[5].node)
+		}
+	case 198:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1339
+		{
+			yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
+		}
+	case 199:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1345
+		{
+			yyVAL.node = Nod(OTCHAN, yyDollar[3].node, nil)
+			yyVAL.node.Etype = Crecv
+		}
+	case 200:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1352
+		{
+			yyVAL.node = Nod(OTSTRUCT, nil, nil)
+			yyVAL.node.List = yyDollar[3].list
+			fixlbrace(yyDollar[2].i)
+		}
+	case 201:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1358
+		{
+			yyVAL.node = Nod(OTSTRUCT, nil, nil)
+			fixlbrace(yyDollar[2].i)
+		}
+	case 202:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1365
+		{
+			yyVAL.node = Nod(OTINTER, nil, nil)
+			yyVAL.node.List = yyDollar[3].list
+			fixlbrace(yyDollar[2].i)
+		}
+	case 203:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1371
+		{
+			yyVAL.node = Nod(OTINTER, nil, nil)
+			fixlbrace(yyDollar[2].i)
+		}
+	case 204:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1382
+		{
+			yyVAL.node = yyDollar[2].node
+			if yyVAL.node == nil {
+				break
+			}
+			if noescape && yyDollar[3].list != nil {
+				Yyerror("can only use //go:noescape with external func implementations")
+			}
+			yyVAL.node.Nbody = yyDollar[3].list
+			yyVAL.node.Func.Endlineno = lineno
+			yyVAL.node.Noescape = noescape
+			yyVAL.node.Func.Nosplit = nosplit
+			yyVAL.node.Func.Nowritebarrier = nowritebarrier
+			funcbody(yyVAL.node)
+		}
+	case 205:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1400
+		{
+			var t *Node
+
+			yyVAL.node = nil
+			yyDollar[3].list = checkarglist(yyDollar[3].list, 1)
+
+			if yyDollar[1].sym.Name == "init" {
+				yyDollar[1].sym = renameinit()
+				if yyDollar[3].list != nil || yyDollar[5].list != nil {
+					Yyerror("func init must have no arguments and no return values")
+				}
+			}
+			if localpkg.Name == "main" && yyDollar[1].sym.Name == "main" {
+				if yyDollar[3].list != nil || yyDollar[5].list != nil {
+					Yyerror("func main must have no arguments and no return values")
+				}
+			}
+
+			t = Nod(OTFUNC, nil, nil)
+			t.List = yyDollar[3].list
+			t.Rlist = yyDollar[5].list
+
+			yyVAL.node = Nod(ODCLFUNC, nil, nil)
+			yyVAL.node.Nname = newfuncname(yyDollar[1].sym)
+			yyVAL.node.Nname.Defn = yyVAL.node
+			yyVAL.node.Nname.Ntype = t // TODO: check if nname already has an ntype
+			declare(yyVAL.node.Nname, PFUNC)
+
+			funchdr(yyVAL.node)
+		}
+	case 206:
+		yyDollar = yyS[yypt-8 : yypt+1]
+		//line go.y:1431
+		{
+			var rcvr, t *Node
+
+			yyVAL.node = nil
+			yyDollar[2].list = checkarglist(yyDollar[2].list, 0)
+			yyDollar[6].list = checkarglist(yyDollar[6].list, 1)
+
+			if yyDollar[2].list == nil {
+				Yyerror("method has no receiver")
+				break
+			}
+			if yyDollar[2].list.Next != nil {
+				Yyerror("method has multiple receivers")
+				break
+			}
+			rcvr = yyDollar[2].list.N
+			if rcvr.Op != ODCLFIELD {
+				Yyerror("bad receiver in method")
+				break
+			}
+
+			t = Nod(OTFUNC, rcvr, nil)
+			t.List = yyDollar[6].list
+			t.Rlist = yyDollar[8].list
+
+			yyVAL.node = Nod(ODCLFUNC, nil, nil)
+			yyVAL.node.Func.Shortname = newfuncname(yyDollar[4].sym)
+			yyVAL.node.Nname = methodname1(yyVAL.node.Func.Shortname, rcvr.Right)
+			yyVAL.node.Nname.Defn = yyVAL.node
+			yyVAL.node.Nname.Ntype = t
+			yyVAL.node.Nname.Nointerface = nointerface
+			declare(yyVAL.node.Nname, PFUNC)
+
+			funchdr(yyVAL.node)
+		}
+	case 207:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1469
+		{
+			var s *Sym
+			var t *Type
+
+			yyVAL.node = nil
+
+			s = yyDollar[1].sym
+			t = functype(nil, yyDollar[3].list, yyDollar[5].list)
+
+			importsym(s, ONAME)
+			if s.Def != nil && s.Def.Op == ONAME {
+				if Eqtype(t, s.Def.Type) {
+					dclcontext = PDISCARD // since we skip funchdr below
+					break
+				}
+				Yyerror("inconsistent definition for func %v during import\n\t%v\n\t%v", s, s.Def.Type, t)
+			}
+
+			yyVAL.node = newfuncname(s)
+			yyVAL.node.Type = t
+			declare(yyVAL.node, PFUNC)
+
+			funchdr(yyVAL.node)
+		}
+	case 208:
+		yyDollar = yyS[yypt-8 : yypt+1]
+		//line go.y:1494
+		{
+			yyVAL.node = methodname1(newname(yyDollar[4].sym), yyDollar[2].list.N.Right)
+			yyVAL.node.Type = functype(yyDollar[2].list.N, yyDollar[6].list, yyDollar[8].list)
+
+			checkwidth(yyVAL.node.Type)
+			addmethod(yyDollar[4].sym, yyVAL.node.Type, false, nointerface)
+			nointerface = false
+			funchdr(yyVAL.node)
+
+			// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
+			// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
+			// out by typecheck's lookdot as this $$.ttype.  So by providing
+			// this back link here we avoid special casing there.
+			yyVAL.node.Type.Nname = yyVAL.node
+		}
+	case 209:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1512
+		{
+			yyDollar[3].list = checkarglist(yyDollar[3].list, 1)
+			yyVAL.node = Nod(OTFUNC, nil, nil)
+			yyVAL.node.List = yyDollar[3].list
+			yyVAL.node.Rlist = yyDollar[5].list
+		}
+	case 210:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1520
+		{
+			yyVAL.list = nil
+		}
+	case 211:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1524
+		{
+			yyVAL.list = yyDollar[2].list
+			if yyVAL.list == nil {
+				yyVAL.list = list1(Nod(OEMPTY, nil, nil))
+			}
+		}
+	case 212:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1533
+		{
+			yyVAL.list = nil
+		}
+	case 213:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1537
+		{
+			yyVAL.list = list1(Nod(ODCLFIELD, nil, yyDollar[1].node))
+		}
+	case 214:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1541
+		{
+			yyDollar[2].list = checkarglist(yyDollar[2].list, 0)
+			yyVAL.list = yyDollar[2].list
+		}
+	case 215:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1548
+		{
+			closurehdr(yyDollar[1].node)
+		}
+	case 216:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1554
+		{
+			yyVAL.node = closurebody(yyDollar[3].list)
+			fixlbrace(yyDollar[2].i)
+		}
+	case 217:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1559
+		{
+			yyVAL.node = closurebody(nil)
+		}
+	case 218:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1570
+		{
+			yyVAL.list = nil
+		}
+	case 219:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1574
+		{
+			yyVAL.list = concat(yyDollar[1].list, yyDollar[2].list)
+			if nsyntaxerrors == 0 {
+				testdclstack()
+			}
+			nointerface = false
+			noescape = false
+			nosplit = false
+			nowritebarrier = false
+		}
+	case 221:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1588
+		{
+			yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
+		}
+	case 223:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1595
+		{
+			yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
+		}
+	case 224:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1601
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 225:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1605
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 227:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1612
+		{
+			yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
+		}
+	case 228:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1618
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 229:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1622
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 230:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1628
+		{
+			var l *NodeList
+
+			var n *Node
+			l = yyDollar[1].list
+			if l == nil {
+				// ? symbol, during import (list1(nil) == nil)
+				n = yyDollar[2].node
+				if n.Op == OIND {
+					n = n.Left
+				}
+				n = embedded(n.Sym, importpkg)
+				n.Right = yyDollar[2].node
+				n.Val = yyDollar[3].val
+				yyVAL.list = list1(n)
+				break
+			}
+
+			for l = yyDollar[1].list; l != nil; l = l.Next {
+				l.N = Nod(ODCLFIELD, l.N, yyDollar[2].node)
+				l.N.Val = yyDollar[3].val
+			}
+		}
+	case 231:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1652
+		{
+			yyDollar[1].node.Val = yyDollar[2].val
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 232:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1657
+		{
+			yyDollar[2].node.Val = yyDollar[4].val
+			yyVAL.list = list1(yyDollar[2].node)
+			Yyerror("cannot parenthesize embedded type")
+		}
+	case 233:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1663
+		{
+			yyDollar[2].node.Right = Nod(OIND, yyDollar[2].node.Right, nil)
+			yyDollar[2].node.Val = yyDollar[3].val
+			yyVAL.list = list1(yyDollar[2].node)
+		}
+	case 234:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1669
+		{
+			yyDollar[3].node.Right = Nod(OIND, yyDollar[3].node.Right, nil)
+			yyDollar[3].node.Val = yyDollar[5].val
+			yyVAL.list = list1(yyDollar[3].node)
+			Yyerror("cannot parenthesize embedded type")
+		}
+	case 235:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:1676
+		{
+			yyDollar[3].node.Right = Nod(OIND, yyDollar[3].node.Right, nil)
+			yyDollar[3].node.Val = yyDollar[5].val
+			yyVAL.list = list1(yyDollar[3].node)
+			Yyerror("cannot parenthesize embedded type")
+		}
+	case 236:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1685
+		{
+			var n *Node
+
+			yyVAL.sym = yyDollar[1].sym
+			n = oldname(yyDollar[1].sym)
+			if n.Pack != nil {
+				n.Pack.Used = true
+			}
+		}
+	case 237:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1695
+		{
+			var pkg *Pkg
+
+			if yyDollar[1].sym.Def == nil || yyDollar[1].sym.Def.Op != OPACK {
+				Yyerror("%v is not a package", yyDollar[1].sym)
+				pkg = localpkg
+			} else {
+				yyDollar[1].sym.Def.Used = true
+				pkg = yyDollar[1].sym.Def.Pkg
+			}
+			yyVAL.sym = restrictlookup(yyDollar[3].sym.Name, pkg)
+		}
+	case 238:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1710
+		{
+			yyVAL.node = embedded(yyDollar[1].sym, localpkg)
+		}
+	case 239:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1716
+		{
+			yyVAL.node = Nod(ODCLFIELD, yyDollar[1].node, yyDollar[2].node)
+			ifacedcl(yyVAL.node)
+		}
+	case 240:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1721
+		{
+			yyVAL.node = Nod(ODCLFIELD, nil, oldname(yyDollar[1].sym))
+		}
+	case 241:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1725
+		{
+			yyVAL.node = Nod(ODCLFIELD, nil, oldname(yyDollar[2].sym))
+			Yyerror("cannot parenthesize embedded type")
+		}
+	case 242:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1732
+		{
+			// without func keyword
+			yyDollar[2].list = checkarglist(yyDollar[2].list, 1)
+			yyVAL.node = Nod(OTFUNC, fakethis(), nil)
+			yyVAL.node.List = yyDollar[2].list
+			yyVAL.node.Rlist = yyDollar[4].list
+		}
+	case 244:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1746
+		{
+			yyVAL.node = Nod(ONONAME, nil, nil)
+			yyVAL.node.Sym = yyDollar[1].sym
+			yyVAL.node = Nod(OKEY, yyVAL.node, yyDollar[2].node)
+		}
+	case 245:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1752
+		{
+			yyVAL.node = Nod(ONONAME, nil, nil)
+			yyVAL.node.Sym = yyDollar[1].sym
+			yyVAL.node = Nod(OKEY, yyVAL.node, yyDollar[2].node)
+		}
+	case 247:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1761
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 248:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1765
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 249:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1770
+		{
+			yyVAL.list = nil
+		}
+	case 250:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1774
+		{
+			yyVAL.list = yyDollar[1].list
+		}
+	case 251:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1782
+		{
+			yyVAL.node = nil
+		}
+	case 253:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1787
+		{
+			yyVAL.node = liststmt(yyDollar[1].list)
+		}
+	case 255:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1792
+		{
+			yyVAL.node = nil
+		}
+	case 261:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1803
+		{
+			yyDollar[1].node = Nod(OLABEL, yyDollar[1].node, nil)
+			yyDollar[1].node.Sym = dclstack // context, for goto restrictions
+		}
+	case 262:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:1808
+		{
+			var l *NodeList
+
+			yyDollar[1].node.Defn = yyDollar[4].node
+			l = list1(yyDollar[1].node)
+			if yyDollar[4].node != nil {
+				l = list(l, yyDollar[4].node)
+			}
+			yyVAL.node = liststmt(l)
+		}
+	case 263:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1819
+		{
+			// will be converted to OFALL
+			yyVAL.node = Nod(OXFALL, nil, nil)
+			yyVAL.node.Xoffset = int64(block)
+		}
+	case 264:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1825
+		{
+			yyVAL.node = Nod(OBREAK, yyDollar[2].node, nil)
+		}
+	case 265:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1829
+		{
+			yyVAL.node = Nod(OCONTINUE, yyDollar[2].node, nil)
+		}
+	case 266:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1833
+		{
+			yyVAL.node = Nod(OPROC, yyDollar[2].node, nil)
+		}
+	case 267:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1837
+		{
+			yyVAL.node = Nod(ODEFER, yyDollar[2].node, nil)
+		}
+	case 268:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1841
+		{
+			yyVAL.node = Nod(OGOTO, yyDollar[2].node, nil)
+			yyVAL.node.Sym = dclstack // context, for goto restrictions
+		}
+	case 269:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1846
+		{
+			yyVAL.node = Nod(ORETURN, nil, nil)
+			yyVAL.node.List = yyDollar[2].list
+			if yyVAL.node.List == nil && Curfn != nil {
+				var l *NodeList
+
+				for l = Curfn.Func.Dcl; l != nil; l = l.Next {
+					if l.N.Class == PPARAM {
+						continue
+					}
+					if l.N.Class != PPARAMOUT {
+						break
+					}
+					if l.N.Sym.Def != l.N {
+						Yyerror("%s is shadowed during return", l.N.Sym.Name)
+					}
+				}
+			}
+		}
+	case 270:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1868
+		{
+			yyVAL.list = nil
+			if yyDollar[1].node != nil {
+				yyVAL.list = list1(yyDollar[1].node)
+			}
+		}
+	case 271:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1875
+		{
+			yyVAL.list = yyDollar[1].list
+			if yyDollar[3].node != nil {
+				yyVAL.list = list(yyVAL.list, yyDollar[3].node)
+			}
+		}
+	case 272:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1884
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 273:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1888
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 274:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1894
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 275:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1898
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 276:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1904
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 277:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1908
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 278:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1914
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 279:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1918
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 280:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1927
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 281:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:1931
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 282:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1935
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 283:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:1939
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 284:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1944
+		{
+			yyVAL.list = nil
+		}
+	case 285:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:1948
+		{
+			yyVAL.list = yyDollar[1].list
+		}
+	case 290:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1962
+		{
+			yyVAL.node = nil
+		}
+	case 292:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1968
+		{
+			yyVAL.list = nil
+		}
+	case 294:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1974
+		{
+			yyVAL.node = nil
+		}
+	case 296:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1980
+		{
+			yyVAL.list = nil
+		}
+	case 298:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1986
+		{
+			yyVAL.list = nil
+		}
+	case 300:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1992
+		{
+			yyVAL.list = nil
+		}
+	case 302:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:1998
+		{
+			yyVAL.val.Ctype = CTxxx
+		}
+	case 304:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:2008
+		{
+			importimport(yyDollar[2].sym, yyDollar[3].val.U.(string))
+		}
+	case 305:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:2012
+		{
+			importvar(yyDollar[2].sym, yyDollar[3].typ)
+		}
+	case 306:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:2016
+		{
+			importconst(yyDollar[2].sym, Types[TIDEAL], yyDollar[4].node)
+		}
+	case 307:
+		yyDollar = yyS[yypt-6 : yypt+1]
+		//line go.y:2020
+		{
+			importconst(yyDollar[2].sym, yyDollar[3].typ, yyDollar[5].node)
+		}
+	case 308:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:2024
+		{
+			importtype(yyDollar[2].typ, yyDollar[3].typ)
+		}
+	case 309:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:2028
+		{
+			if yyDollar[2].node == nil {
+				dclcontext = PEXTERN // since we skip the funcbody below
+				break
+			}
+
+			yyDollar[2].node.Func.Inl = yyDollar[3].list
+
+			funcbody(yyDollar[2].node)
+			importlist = list(importlist, yyDollar[2].node)
+
+			if Debug['E'] > 0 {
+				fmt.Printf("import [%q] func %v \n", importpkg.Path, yyDollar[2].node)
+				if Debug['m'] > 2 && yyDollar[2].node.Func.Inl != nil {
+					fmt.Printf("inl body:%v\n", yyDollar[2].node.Func.Inl)
+				}
+			}
+		}
+	case 310:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2049
+		{
+			yyVAL.sym = yyDollar[1].sym
+			structpkg = yyVAL.sym.Pkg
+		}
+	case 311:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2056
+		{
+			yyVAL.typ = pkgtype(yyDollar[1].sym)
+			importsym(yyDollar[1].sym, OTYPE)
+		}
+	case 317:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2076
+		{
+			yyVAL.typ = pkgtype(yyDollar[1].sym)
+		}
+	case 318:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2080
+		{
+			// predefined name like uint8
+			yyDollar[1].sym = Pkglookup(yyDollar[1].sym.Name, builtinpkg)
+			if yyDollar[1].sym.Def == nil || yyDollar[1].sym.Def.Op != OTYPE {
+				Yyerror("%s is not a type", yyDollar[1].sym.Name)
+				yyVAL.typ = nil
+			} else {
+				yyVAL.typ = yyDollar[1].sym.Def.Type
+			}
+		}
+	case 319:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:2091
+		{
+			yyVAL.typ = aindex(nil, yyDollar[3].typ)
+		}
+	case 320:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:2095
+		{
+			yyVAL.typ = aindex(nodlit(yyDollar[2].val), yyDollar[4].typ)
+		}
+	case 321:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:2099
+		{
+			yyVAL.typ = maptype(yyDollar[3].typ, yyDollar[5].typ)
+		}
+	case 322:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:2103
+		{
+			yyVAL.typ = tostruct(yyDollar[3].list)
+		}
+	case 323:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:2107
+		{
+			yyVAL.typ = tointerface(yyDollar[3].list)
+		}
+	case 324:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:2111
+		{
+			yyVAL.typ = Ptrto(yyDollar[2].typ)
+		}
+	case 325:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:2115
+		{
+			yyVAL.typ = typ(TCHAN)
+			yyVAL.typ.Type = yyDollar[2].typ
+			yyVAL.typ.Chan = Cboth
+		}
+	case 326:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:2121
+		{
+			yyVAL.typ = typ(TCHAN)
+			yyVAL.typ.Type = yyDollar[3].typ
+			yyVAL.typ.Chan = Cboth
+		}
+	case 327:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:2127
+		{
+			yyVAL.typ = typ(TCHAN)
+			yyVAL.typ.Type = yyDollar[3].typ
+			yyVAL.typ.Chan = Csend
+		}
+	case 328:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:2135
+		{
+			yyVAL.typ = typ(TCHAN)
+			yyVAL.typ.Type = yyDollar[3].typ
+			yyVAL.typ.Chan = Crecv
+		}
+	case 329:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:2143
+		{
+			yyVAL.typ = functype(nil, yyDollar[3].list, yyDollar[5].list)
+		}
+	case 330:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:2149
+		{
+			yyVAL.node = Nod(ODCLFIELD, nil, typenod(yyDollar[2].typ))
+			if yyDollar[1].sym != nil {
+				yyVAL.node.Left = newname(yyDollar[1].sym)
+			}
+			yyVAL.node.Val = yyDollar[3].val
+		}
+	case 331:
+		yyDollar = yyS[yypt-4 : yypt+1]
+		//line go.y:2157
+		{
+			var t *Type
+
+			t = typ(TARRAY)
+			t.Bound = -1
+			t.Type = yyDollar[3].typ
+
+			yyVAL.node = Nod(ODCLFIELD, nil, typenod(t))
+			if yyDollar[1].sym != nil {
+				yyVAL.node.Left = newname(yyDollar[1].sym)
+			}
+			yyVAL.node.Isddd = true
+			yyVAL.node.Val = yyDollar[4].val
+		}
+	case 332:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:2174
+		{
+			var s *Sym
+			var p *Pkg
+
+			if yyDollar[1].sym != nil && yyDollar[1].sym.Name != "?" {
+				yyVAL.node = Nod(ODCLFIELD, newname(yyDollar[1].sym), typenod(yyDollar[2].typ))
+				yyVAL.node.Val = yyDollar[3].val
+			} else {
+				s = yyDollar[2].typ.Sym
+				if s == nil && Isptr[yyDollar[2].typ.Etype] {
+					s = yyDollar[2].typ.Type.Sym
+				}
+				p = importpkg
+				if yyDollar[1].sym != nil {
+					p = yyDollar[1].sym.Pkg
+				}
+				yyVAL.node = embedded(s, p)
+				yyVAL.node.Right = typenod(yyDollar[2].typ)
+				yyVAL.node.Val = yyDollar[3].val
+			}
+		}
+	case 333:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:2198
+		{
+			yyVAL.node = Nod(ODCLFIELD, newname(yyDollar[1].sym), typenod(functype(fakethis(), yyDollar[3].list, yyDollar[5].list)))
+		}
+	case 334:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2202
+		{
+			yyVAL.node = Nod(ODCLFIELD, nil, typenod(yyDollar[1].typ))
+		}
+	case 335:
+		yyDollar = yyS[yypt-0 : yypt+1]
+		//line go.y:2207
+		{
+			yyVAL.list = nil
+		}
+	case 337:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:2214
+		{
+			yyVAL.list = yyDollar[2].list
+		}
+	case 338:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2218
+		{
+			yyVAL.list = list1(Nod(ODCLFIELD, nil, typenod(yyDollar[1].typ)))
+		}
+	case 339:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2228
+		{
+			yyVAL.node = nodlit(yyDollar[1].val)
+		}
+	case 340:
+		yyDollar = yyS[yypt-2 : yypt+1]
+		//line go.y:2232
+		{
+			yyVAL.node = nodlit(yyDollar[2].val)
+			switch yyVAL.node.Val.Ctype {
+			case CTINT, CTRUNE:
+				mpnegfix(yyVAL.node.Val.U.(*Mpint))
+				break
+			case CTFLT:
+				mpnegflt(yyVAL.node.Val.U.(*Mpflt))
+				break
+			case CTCPLX:
+				mpnegflt(&yyVAL.node.Val.U.(*Mpcplx).Real)
+				mpnegflt(&yyVAL.node.Val.U.(*Mpcplx).Imag)
+				break
+			default:
+				Yyerror("bad negated constant")
+			}
+		}
+	case 341:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2250
+		{
+			yyVAL.node = oldname(Pkglookup(yyDollar[1].sym.Name, builtinpkg))
+			if yyVAL.node.Op != OLITERAL {
+				Yyerror("bad constant %v", yyVAL.node.Sym)
+			}
+		}
+	case 343:
+		yyDollar = yyS[yypt-5 : yypt+1]
+		//line go.y:2260
+		{
+			if yyDollar[2].node.Val.Ctype == CTRUNE && yyDollar[4].node.Val.Ctype == CTINT {
+				yyVAL.node = yyDollar[2].node
+				mpaddfixfix(yyDollar[2].node.Val.U.(*Mpint), yyDollar[4].node.Val.U.(*Mpint), 0)
+				break
+			}
+			yyDollar[4].node.Val.U.(*Mpcplx).Real = yyDollar[4].node.Val.U.(*Mpcplx).Imag
+			Mpmovecflt(&yyDollar[4].node.Val.U.(*Mpcplx).Imag, 0.0)
+			yyVAL.node = nodcplxlit(yyDollar[2].node.Val, yyDollar[4].node.Val)
+		}
+	case 346:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2276
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 347:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:2280
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 348:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2286
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 349:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:2290
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	case 350:
+		yyDollar = yyS[yypt-1 : yypt+1]
+		//line go.y:2296
+		{
+			yyVAL.list = list1(yyDollar[1].node)
+		}
+	case 351:
+		yyDollar = yyS[yypt-3 : yypt+1]
+		//line go.y:2300
+		{
+			yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+		}
+	}
+	goto yystack /* stack new state and value */
+}
diff --git a/src/cmd/compile/internal/gc/y.output b/src/cmd/compile/internal/gc/y.output
new file mode 100644
index 0000000..2821702
--- /dev/null
+++ b/src/cmd/compile/internal/gc/y.output
@@ -0,0 +1,10411 @@
+
+state 0
+	$accept: .file $end 
+	$$4: .    (4)
+
+	.  reduce 4 (src line 210)
+
+	file  goto 1
+	loadsys  goto 2
+	$$4  goto 3
+
+state 1
+	$accept:  file.$end 
+
+	$end  accept
+	.  error
+
+
+state 2
+	file:  loadsys.package imports xdcl_list 
+	package: .    (2)
+
+	LPACKAGE  shift 5
+	.  reduce 2 (src line 193)
+
+	package  goto 4
+
+state 3
+	loadsys:  $$4.import_package import_there 
+
+	LPACKAGE  shift 7
+	.  error
+
+	import_package  goto 6
+
+state 4
+	file:  loadsys package.imports xdcl_list 
+	imports: .    (6)
+
+	.  reduce 6 (src line 227)
+
+	imports  goto 8
+
+state 5
+	package:  LPACKAGE.sym ';' 
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 9
+	hidden_importsym  goto 11
+
+state 6
+	loadsys:  $$4 import_package.import_there 
+	$$21: .    (21)
+
+	.  reduce 21 (src line 334)
+
+	import_there  goto 14
+	$$21  goto 15
+
+state 7
+	import_package:  LPACKAGE.LNAME import_safety ';' 
+
+	LNAME  shift 16
+	.  error
+
+
+state 8
+	file:  loadsys package imports.xdcl_list 
+	imports:  imports.import ';' 
+	xdcl_list: .    (218)
+
+	LIMPORT  shift 19
+	.  reduce 218 (src line 1569)
+
+	xdcl_list  goto 17
+	import  goto 18
+
+state 9
+	package:  LPACKAGE sym.';' 
+
+	';'  shift 20
+	.  error
+
+
+state 10
+	sym:  LNAME.    (157)
+
+	.  reduce 157 (src line 1175)
+
+
+state 11
+	sym:  hidden_importsym.    (158)
+
+	.  reduce 158 (src line 1184)
+
+
+state 12
+	sym:  '?'.    (159)
+
+	.  reduce 159 (src line 1185)
+
+
+state 13
+	hidden_importsym:  '@'.LLITERAL '.' LNAME 
+	hidden_importsym:  '@'.LLITERAL '.' '?' 
+
+	LLITERAL  shift 21
+	.  error
+
+
+state 14
+	loadsys:  $$4 import_package import_there.    (5)
+
+	.  reduce 5 (src line 221)
+
+
+state 15
+	import_there:  $$21.hidden_import_list '$' '$' 
+	hidden_import_list: .    (344)
+
+	.  reduce 344 (src line 2271)
+
+	hidden_import_list  goto 22
+
+state 16
+	import_package:  LPACKAGE LNAME.import_safety ';' 
+	import_safety: .    (19)
+
+	LNAME  shift 24
+	.  reduce 19 (src line 326)
+
+	import_safety  goto 23
+
+state 17
+	file:  loadsys package imports xdcl_list.    (1)
+	xdcl_list:  xdcl_list.xdcl ';' 
+	xdcl: .    (23)
+
+	$end  reduce 1 (src line 184)
+	error  shift 29
+	LLITERAL  shift 68
+	LBREAK  shift 41
+	LCHAN  shift 78
+	LCONST  shift 47
+	LCONTINUE  shift 42
+	LDEFER  shift 44
+	LFALL  shift 40
+	LFOR  shift 50
+	LFUNC  shift 33
+	LGO  shift 43
+	LGOTO  shift 45
+	LIF  shift 53
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRETURN  shift 46
+	LSELECT  shift 52
+	LSTRUCT  shift 82
+	LSWITCH  shift 51
+	LTYPE  shift 32
+	LVAR  shift 30
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	';'  reduce 23 (src line 347)
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 56
+	expr  goto 48
+	fnliteral  goto 73
+	for_stmt  goto 35
+	if_stmt  goto 38
+	non_dcl_stmt  goto 28
+	labelname  goto 39
+	name  goto 69
+	new_name  goto 54
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	select_stmt  goto 37
+	simple_stmt  goto 34
+	switch_stmt  goto 36
+	uexpr  goto 55
+	xfndcl  goto 27
+	xdcl  goto 25
+	expr_list  goto 49
+	common_dcl  goto 26
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	lconst  goto 31
+	fnlitdcl  goto 76
+
+state 18
+	imports:  imports import.';' 
+
+	';'  shift 84
+	.  error
+
+
+state 19
+	import:  LIMPORT.import_stmt 
+	import:  LIMPORT.'(' import_stmt_list osemi ')' 
+	import:  LIMPORT.'(' ')' 
+
+	LLITERAL  shift 88
+	LNAME  shift 10
+	'('  shift 86
+	'.'  shift 90
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	import_here  goto 87
+	sym  goto 89
+	hidden_importsym  goto 11
+	import_stmt  goto 85
+
+state 20
+	package:  LPACKAGE sym ';'.    (3)
+
+	.  reduce 3 (src line 200)
+
+
+state 21
+	hidden_importsym:  '@' LLITERAL.'.' LNAME 
+	hidden_importsym:  '@' LLITERAL.'.' '?' 
+
+	'.'  shift 91
+	.  error
+
+
+state 22
+	import_there:  $$21 hidden_import_list.'$' '$' 
+	hidden_import_list:  hidden_import_list.hidden_import 
+
+	LCONST  shift 96
+	LFUNC  shift 98
+	LIMPORT  shift 94
+	LTYPE  shift 97
+	LVAR  shift 95
+	'$'  shift 92
+	.  error
+
+	hidden_import  goto 93
+
+state 23
+	import_package:  LPACKAGE LNAME import_safety.';' 
+
+	';'  shift 99
+	.  error
+
+
+state 24
+	import_safety:  LNAME.    (20)
+
+	.  reduce 20 (src line 327)
+
+
+state 25
+	xdcl_list:  xdcl_list xdcl.';' 
+
+	';'  shift 100
+	.  error
+
+
+state 26
+	xdcl:  common_dcl.    (24)
+
+	.  reduce 24 (src line 352)
+
+
+state 27
+	xdcl:  xfndcl.    (25)
+
+	.  reduce 25 (src line 353)
+
+
+state 28
+	xdcl:  non_dcl_stmt.    (26)
+
+	.  reduce 26 (src line 357)
+
+
+state 29
+	xdcl:  error.    (27)
+
+	.  reduce 27 (src line 362)
+
+
+state 30
+	common_dcl:  LVAR.vardcl 
+	common_dcl:  LVAR.'(' vardcl_list osemi ')' 
+	common_dcl:  LVAR.'(' ')' 
+
+	LNAME  shift 10
+	'('  shift 102
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 105
+	dcl_name  goto 104
+	dcl_name_list  goto 103
+	vardcl  goto 101
+	hidden_importsym  goto 11
+
+state 31
+	common_dcl:  lconst.constdcl 
+	common_dcl:  lconst.'(' constdcl osemi ')' 
+	common_dcl:  lconst.'(' constdcl ';' constdcl_list osemi ')' 
+	common_dcl:  lconst.'(' ')' 
+
+	LNAME  shift 10
+	'('  shift 107
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 105
+	dcl_name  goto 104
+	dcl_name_list  goto 108
+	constdcl  goto 106
+	hidden_importsym  goto 11
+
+state 32
+	common_dcl:  LTYPE.typedcl 
+	common_dcl:  LTYPE.'(' typedcl_list osemi ')' 
+	common_dcl:  LTYPE.'(' ')' 
+
+	LNAME  shift 10
+	'('  shift 110
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 112
+	typedclname  goto 111
+	typedcl  goto 109
+	hidden_importsym  goto 11
+
+state 33
+	xfndcl:  LFUNC.fndcl fnbody 
+	fntype:  LFUNC.'(' oarg_type_list_ocomma ')' fnres 
+
+	LNAME  shift 10
+	'('  shift 114
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 115
+	fndcl  goto 113
+	hidden_importsym  goto 11
+
+state 34
+	non_dcl_stmt:  simple_stmt.    (256)
+
+	.  reduce 256 (src line 1796)
+
+
+state 35
+	non_dcl_stmt:  for_stmt.    (257)
+
+	.  reduce 257 (src line 1798)
+
+
+state 36
+	non_dcl_stmt:  switch_stmt.    (258)
+
+	.  reduce 258 (src line 1799)
+
+
+state 37
+	non_dcl_stmt:  select_stmt.    (259)
+
+	.  reduce 259 (src line 1800)
+
+
+state 38
+	non_dcl_stmt:  if_stmt.    (260)
+
+	.  reduce 260 (src line 1801)
+
+
+state 39
+	non_dcl_stmt:  labelname.':' $$261 stmt 
+
+	':'  shift 116
+	.  error
+
+
+state 40
+	non_dcl_stmt:  LFALL.    (263)
+
+	.  reduce 263 (src line 1818)
+
+
+state 41
+	non_dcl_stmt:  LBREAK.onew_name 
+	onew_name: .    (155)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 155 (src line 1169)
+
+	sym  goto 119
+	new_name  goto 118
+	onew_name  goto 117
+	hidden_importsym  goto 11
+
+state 42
+	non_dcl_stmt:  LCONTINUE.onew_name 
+	onew_name: .    (155)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 155 (src line 1169)
+
+	sym  goto 119
+	new_name  goto 118
+	onew_name  goto 120
+	hidden_importsym  goto 11
+
+state 43
+	non_dcl_stmt:  LGO.pseudocall 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	'('  shift 67
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 122
+	pexpr_no_paren  goto 66
+	pseudocall  goto 121
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 44
+	non_dcl_stmt:  LDEFER.pseudocall 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	'('  shift 67
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 122
+	pexpr_no_paren  goto 66
+	pseudocall  goto 125
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 45
+	non_dcl_stmt:  LGOTO.new_name 
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 119
+	new_name  goto 126
+	hidden_importsym  goto 11
+
+state 46
+	non_dcl_stmt:  LRETURN.oexpr_list 
+	oexpr_list: .    (292)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 292 (src line 1967)
+
+	sym  goto 123
+	expr  goto 129
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_list  goto 128
+	oexpr_list  goto 127
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 47
+	lconst:  LCONST.    (38)
+
+	.  reduce 38 (src line 416)
+
+
+state 48
+	simple_stmt:  expr.    (49)
+	simple_stmt:  expr.LASOP expr 
+	simple_stmt:  expr.LINC 
+	simple_stmt:  expr.LDEC 
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+	expr_list:  expr.    (276)
+
+	LASOP  shift 130
+	LCOLAS  reduce 276 (src line 1902)
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LDEC  shift 132
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LINC  shift 131
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	'='  reduce 276 (src line 1902)
+	','  reduce 276 (src line 1902)
+	.  reduce 49 (src line 472)
+
+
+state 49
+	simple_stmt:  expr_list.'=' expr_list 
+	simple_stmt:  expr_list.LCOLAS expr_list 
+	expr_list:  expr_list.',' expr 
+
+	LCOLAS  shift 154
+	'='  shift 153
+	','  shift 155
+	.  error
+
+
+state 50
+	for_stmt:  LFOR.$$74 for_body 
+	$$74: .    (74)
+
+	.  reduce 74 (src line 721)
+
+	$$74  goto 156
+
+state 51
+	switch_stmt:  LSWITCH.$$88 if_header $$89 LBODY caseblock_list '}' 
+	$$88: .    (88)
+
+	.  reduce 88 (src line 816)
+
+	$$88  goto 157
+
+state 52
+	select_stmt:  LSELECT.$$91 LBODY caseblock_list '}' 
+	$$91: .    (91)
+
+	.  reduce 91 (src line 839)
+
+	$$91  goto 158
+
+state 53
+	if_stmt:  LIF.$$78 if_header $$79 loop_body $$80 elseif_list else 
+	$$78: .    (78)
+
+	.  reduce 78 (src line 750)
+
+	$$78  goto 159
+
+state 54
+	labelname:  new_name.    (163)
+
+	.  reduce 163 (src line 1229)
+
+
+state 55
+	expr:  uexpr.    (93)
+
+	.  reduce 93 (src line 855)
+
+
+state 56
+	new_name:  sym.    (153)
+	name:  sym.    (162)
+
+	':'  reduce 153 (src line 1153)
+	.  reduce 162 (src line 1220)
+
+
+state 57
+	uexpr:  pexpr.    (114)
+	pseudocall:  pexpr.'(' ')' 
+	pseudocall:  pexpr.'(' expr_or_type_list ocomma ')' 
+	pseudocall:  pexpr.'(' expr_or_type_list LDDD ocomma ')' 
+	pexpr_no_paren:  pexpr.'.' sym 
+	pexpr_no_paren:  pexpr.'.' '(' expr_or_type ')' 
+	pexpr_no_paren:  pexpr.'.' '(' LTYPE ')' 
+	pexpr_no_paren:  pexpr.'[' expr ']' 
+	pexpr_no_paren:  pexpr.'[' oexpr ':' oexpr ']' 
+	pexpr_no_paren:  pexpr.'[' oexpr ':' oexpr ':' oexpr ']' 
+
+	'('  shift 160
+	'.'  shift 161
+	'['  shift 162
+	.  reduce 114 (src line 939)
+
+
+state 58
+	uexpr:  '*'.uexpr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 163
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 59
+	uexpr:  '&'.uexpr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 164
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 60
+	uexpr:  '+'.uexpr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 165
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 61
+	uexpr:  '-'.uexpr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 166
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 62
+	uexpr:  '!'.uexpr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 167
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 63
+	uexpr:  '~'.uexpr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 168
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 64
+	uexpr:  '^'.uexpr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 169
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 65
+	uexpr:  LCOMM.uexpr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 170
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 66
+	pexpr_no_paren:  pexpr_no_paren.'{' start_complit braced_keyval_list '}' 
+	pexpr:  pexpr_no_paren.    (146)
+
+	'{'  shift 171
+	.  reduce 146 (src line 1116)
+
+
+state 67
+	pexpr_no_paren:  '('.expr_or_type ')' '{' start_complit braced_keyval_list '}' 
+	pexpr:  '('.expr_or_type ')' 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 179
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 178
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 173
+	expr_or_type  goto 172
+	fnliteral  goto 73
+	name  goto 69
+	non_expr_type  goto 174
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	recvchantype  goto 175
+	othertype  goto 177
+	fntype  goto 176
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 68
+	pexpr_no_paren:  LLITERAL.    (126)
+
+	.  reduce 126 (src line 1003)
+
+
+state 69
+	pexpr_no_paren:  name.    (127)
+
+	.  reduce 127 (src line 1008)
+
+
+state 70
+	pexpr_no_paren:  pseudocall.    (134)
+
+	.  reduce 134 (src line 1046)
+
+
+state 71
+	pexpr_no_paren:  convtype.'(' expr ocomma ')' 
+
+	'('  shift 180
+	.  error
+
+
+state 72
+	pexpr_no_paren:  comptype.lbrace start_complit braced_keyval_list '}' 
+
+	LBODY  shift 182
+	'{'  shift 183
+	.  error
+
+	lbrace  goto 181
+
+state 73
+	pexpr_no_paren:  fnliteral.    (139)
+
+	.  reduce 139 (src line 1073)
+
+
+state 74
+	convtype:  fntype.    (181)
+	fnlitdcl:  fntype.    (215)
+
+	'('  reduce 181 (src line 1282)
+	.  reduce 215 (src line 1546)
+
+
+state 75
+	convtype:  othertype.    (182)
+	comptype:  othertype.    (183)
+
+	'('  reduce 182 (src line 1284)
+	.  reduce 183 (src line 1286)
+
+
+state 76
+	fnliteral:  fnlitdcl.lbrace stmt_list '}' 
+	fnliteral:  fnlitdcl.error 
+
+	error  shift 185
+	LBODY  shift 182
+	'{'  shift 183
+	.  error
+
+	lbrace  goto 184
+
+state 77
+	othertype:  '['.oexpr ']' ntype 
+	othertype:  '['.LDDD ']' ntype 
+	oexpr: .    (290)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LDDD  shift 187
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 290 (src line 1961)
+
+	sym  goto 123
+	expr  goto 188
+	fnliteral  goto 73
+	name  goto 69
+	oexpr  goto 186
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 78
+	othertype:  LCHAN.non_recvchantype 
+	othertype:  LCHAN.LCOMM ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 190
+	'*'  shift 196
+	'('  shift 195
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	dotname  goto 194
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 193
+	non_recvchantype  goto 189
+	othertype  goto 192
+	fntype  goto 191
+	hidden_importsym  goto 11
+
+state 79
+	othertype:  LMAP.'[' ntype ']' ntype 
+
+	'['  shift 198
+	.  error
+
+
+state 80
+	othertype:  structtype.    (196)
+
+	.  reduce 196 (src line 1334)
+
+
+state 81
+	othertype:  interfacetype.    (197)
+
+	.  reduce 197 (src line 1335)
+
+
+state 82
+	structtype:  LSTRUCT.lbrace structdcl_list osemi '}' 
+	structtype:  LSTRUCT.lbrace '}' 
+
+	LBODY  shift 182
+	'{'  shift 183
+	.  error
+
+	lbrace  goto 199
+
+state 83
+	interfacetype:  LINTERFACE.lbrace interfacedcl_list osemi '}' 
+	interfacetype:  LINTERFACE.lbrace '}' 
+
+	LBODY  shift 182
+	'{'  shift 183
+	.  error
+
+	lbrace  goto 200
+
+state 84
+	imports:  imports import ';'.    (7)
+
+	.  reduce 7 (src line 228)
+
+
+state 85
+	import:  LIMPORT import_stmt.    (8)
+
+	.  reduce 8 (src line 230)
+
+
+state 86
+	import:  LIMPORT '('.import_stmt_list osemi ')' 
+	import:  LIMPORT '('.')' 
+
+	LLITERAL  shift 88
+	LNAME  shift 10
+	')'  shift 202
+	'.'  shift 90
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	import_here  goto 87
+	sym  goto 89
+	hidden_importsym  goto 11
+	import_stmt  goto 203
+	import_stmt_list  goto 201
+
+state 87
+	import_stmt:  import_here.import_package import_there 
+	import_stmt:  import_here.import_there 
+	$$21: .    (21)
+
+	LPACKAGE  shift 7
+	.  reduce 21 (src line 334)
+
+	import_package  goto 204
+	import_there  goto 205
+	$$21  goto 15
+
+state 88
+	import_here:  LLITERAL.    (15)
+
+	.  reduce 15 (src line 286)
+
+
+state 89
+	import_here:  sym.LLITERAL 
+
+	LLITERAL  shift 206
+	.  error
+
+
+state 90
+	import_here:  '.'.LLITERAL 
+
+	LLITERAL  shift 207
+	.  error
+
+
+state 91
+	hidden_importsym:  '@' LLITERAL '.'.LNAME 
+	hidden_importsym:  '@' LLITERAL '.'.'?' 
+
+	LNAME  shift 208
+	'?'  shift 209
+	.  error
+
+
+state 92
+	import_there:  $$21 hidden_import_list '$'.'$' 
+
+	'$'  shift 210
+	.  error
+
+
+state 93
+	hidden_import_list:  hidden_import_list hidden_import.    (345)
+
+	.  reduce 345 (src line 2272)
+
+
+state 94
+	hidden_import:  LIMPORT.LNAME LLITERAL ';' 
+
+	LNAME  shift 211
+	.  error
+
+
+state 95
+	hidden_import:  LVAR.hidden_pkg_importsym hidden_type ';' 
+
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 213
+	hidden_pkg_importsym  goto 212
+
+state 96
+	hidden_import:  LCONST.hidden_pkg_importsym '=' hidden_constant ';' 
+	hidden_import:  LCONST.hidden_pkg_importsym hidden_type '=' hidden_constant ';' 
+
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 213
+	hidden_pkg_importsym  goto 214
+
+state 97
+	hidden_import:  LTYPE.hidden_pkgtype hidden_type ';' 
+
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 213
+	hidden_pkg_importsym  goto 216
+	hidden_pkgtype  goto 215
+
+state 98
+	hidden_import:  LFUNC.hidden_fndcl fnbody ';' 
+
+	'('  shift 219
+	'@'  shift 13
+	.  error
+
+	hidden_fndcl  goto 217
+	hidden_importsym  goto 213
+	hidden_pkg_importsym  goto 218
+
+state 99
+	import_package:  LPACKAGE LNAME import_safety ';'.    (18)
+
+	.  reduce 18 (src line 309)
+
+
+state 100
+	xdcl_list:  xdcl_list xdcl ';'.    (219)
+
+	.  reduce 219 (src line 1573)
+
+
+state 101
+	common_dcl:  LVAR vardcl.    (28)
+
+	.  reduce 28 (src line 367)
+
+
+state 102
+	common_dcl:  LVAR '('.vardcl_list osemi ')' 
+	common_dcl:  LVAR '('.')' 
+
+	LNAME  shift 10
+	')'  shift 221
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 105
+	dcl_name  goto 104
+	dcl_name_list  goto 103
+	vardcl  goto 222
+	vardcl_list  goto 220
+	hidden_importsym  goto 11
+
+state 103
+	vardcl:  dcl_name_list.ntype 
+	vardcl:  dcl_name_list.ntype '=' expr_list 
+	vardcl:  dcl_name_list.'=' expr_list 
+	dcl_name_list:  dcl_name_list.',' dcl_name 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'='  shift 224
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	','  shift 225
+	.  error
+
+	sym  goto 123
+	ntype  goto 223
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 104
+	dcl_name_list:  dcl_name.    (274)
+
+	.  reduce 274 (src line 1892)
+
+
+state 105
+	dcl_name:  sym.    (154)
+
+	.  reduce 154 (src line 1163)
+
+
+state 106
+	common_dcl:  lconst constdcl.    (31)
+
+	.  reduce 31 (src line 380)
+
+
+state 107
+	common_dcl:  lconst '('.constdcl osemi ')' 
+	common_dcl:  lconst '('.constdcl ';' constdcl_list osemi ')' 
+	common_dcl:  lconst '('.')' 
+
+	LNAME  shift 10
+	')'  shift 234
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 105
+	dcl_name  goto 104
+	dcl_name_list  goto 108
+	constdcl  goto 233
+	hidden_importsym  goto 11
+
+state 108
+	constdcl:  dcl_name_list.ntype '=' expr_list 
+	constdcl:  dcl_name_list.'=' expr_list 
+	dcl_name_list:  dcl_name_list.',' dcl_name 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'='  shift 236
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	','  shift 225
+	.  error
+
+	sym  goto 123
+	ntype  goto 235
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 109
+	common_dcl:  LTYPE typedcl.    (35)
+
+	.  reduce 35 (src line 403)
+
+
+state 110
+	common_dcl:  LTYPE '('.typedcl_list osemi ')' 
+	common_dcl:  LTYPE '('.')' 
+
+	LNAME  shift 10
+	')'  shift 238
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 112
+	typedclname  goto 111
+	typedcl  goto 239
+	typedcl_list  goto 237
+	hidden_importsym  goto 11
+
+state 111
+	typedcl:  typedclname.ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 240
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 112
+	typedclname:  sym.    (47)
+
+	.  reduce 47 (src line 457)
+
+
+state 113
+	xfndcl:  LFUNC fndcl.fnbody 
+	fnbody: .    (210)
+
+	'{'  shift 242
+	.  reduce 210 (src line 1519)
+
+	fnbody  goto 241
+
+state 114
+	fndcl:  '('.oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')' fnres 
+	fntype:  LFUNC '('.oarg_type_list_ocomma ')' fnres 
+	oarg_type_list_ocomma: .    (249)
+
+	LCHAN  shift 78
+	LDDD  shift 250
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 249 (src line 1769)
+
+	sym  goto 247
+	ntype  goto 249
+	arg_type  goto 245
+	dotname  goto 230
+	name  goto 197
+	name_or_type  goto 246
+	oarg_type_list_ocomma  goto 243
+	arg_type_list  goto 244
+	dotdotdot  goto 248
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 115
+	fndcl:  sym.'(' oarg_type_list_ocomma ')' fnres 
+
+	'('  shift 251
+	.  error
+
+
+state 116
+	non_dcl_stmt:  labelname ':'.$$261 stmt 
+	$$261: .    (261)
+
+	.  reduce 261 (src line 1802)
+
+	$$261  goto 252
+
+state 117
+	non_dcl_stmt:  LBREAK onew_name.    (264)
+
+	.  reduce 264 (src line 1824)
+
+
+state 118
+	onew_name:  new_name.    (156)
+
+	.  reduce 156 (src line 1173)
+
+
+state 119
+	new_name:  sym.    (153)
+
+	.  reduce 153 (src line 1153)
+
+
+state 120
+	non_dcl_stmt:  LCONTINUE onew_name.    (265)
+
+	.  reduce 265 (src line 1828)
+
+
+state 121
+	pexpr_no_paren:  pseudocall.    (134)
+	non_dcl_stmt:  LGO pseudocall.    (266)
+
+	'('  reduce 134 (src line 1046)
+	'.'  reduce 134 (src line 1046)
+	'{'  reduce 134 (src line 1046)
+	'['  reduce 134 (src line 1046)
+	.  reduce 266 (src line 1832)
+
+
+state 122
+	pseudocall:  pexpr.'(' ')' 
+	pseudocall:  pexpr.'(' expr_or_type_list ocomma ')' 
+	pseudocall:  pexpr.'(' expr_or_type_list LDDD ocomma ')' 
+	pexpr_no_paren:  pexpr.'.' sym 
+	pexpr_no_paren:  pexpr.'.' '(' expr_or_type ')' 
+	pexpr_no_paren:  pexpr.'.' '(' LTYPE ')' 
+	pexpr_no_paren:  pexpr.'[' expr ']' 
+	pexpr_no_paren:  pexpr.'[' oexpr ':' oexpr ']' 
+	pexpr_no_paren:  pexpr.'[' oexpr ':' oexpr ':' oexpr ']' 
+
+	'('  shift 160
+	'.'  shift 161
+	'['  shift 162
+	.  error
+
+
+state 123
+	name:  sym.    (162)
+
+	.  reduce 162 (src line 1220)
+
+
+state 124
+	fntype:  LFUNC.'(' oarg_type_list_ocomma ')' fnres 
+
+	'('  shift 253
+	.  error
+
+
+state 125
+	pexpr_no_paren:  pseudocall.    (134)
+	non_dcl_stmt:  LDEFER pseudocall.    (267)
+
+	'('  reduce 134 (src line 1046)
+	'.'  reduce 134 (src line 1046)
+	'{'  reduce 134 (src line 1046)
+	'['  reduce 134 (src line 1046)
+	.  reduce 267 (src line 1836)
+
+
+state 126
+	non_dcl_stmt:  LGOTO new_name.    (268)
+
+	.  reduce 268 (src line 1840)
+
+
+state 127
+	non_dcl_stmt:  LRETURN oexpr_list.    (269)
+
+	.  reduce 269 (src line 1845)
+
+
+state 128
+	expr_list:  expr_list.',' expr 
+	oexpr_list:  expr_list.    (293)
+
+	','  shift 155
+	.  reduce 293 (src line 1971)
+
+
+state 129
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+	expr_list:  expr.    (276)
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 276 (src line 1902)
+
+
+state 130
+	simple_stmt:  expr LASOP.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 254
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 131
+	simple_stmt:  expr LINC.    (53)
+
+	.  reduce 53 (src line 522)
+
+
+state 132
+	simple_stmt:  expr LDEC.    (54)
+
+	.  reduce 54 (src line 528)
+
+
+state 133
+	expr:  expr LOROR.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 255
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 134
+	expr:  expr LANDAND.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 256
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 135
+	expr:  expr LEQ.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 257
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 136
+	expr:  expr LNE.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 258
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 137
+	expr:  expr LLT.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 259
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 138
+	expr:  expr LLE.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 260
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 139
+	expr:  expr LGE.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 261
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 140
+	expr:  expr LGT.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 262
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 141
+	expr:  expr '+'.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 263
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 142
+	expr:  expr '-'.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 264
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 143
+	expr:  expr '|'.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 265
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 144
+	expr:  expr '^'.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 266
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 145
+	expr:  expr '*'.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 267
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 146
+	expr:  expr '/'.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 268
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 147
+	expr:  expr '%'.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 269
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 148
+	expr:  expr '&'.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 270
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 149
+	expr:  expr LANDNOT.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 271
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 150
+	expr:  expr LLSH.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 272
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 151
+	expr:  expr LRSH.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 273
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 152
+	expr:  expr LCOMM.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 274
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 153
+	simple_stmt:  expr_list '='.expr_list 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 129
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_list  goto 275
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 154
+	simple_stmt:  expr_list LCOLAS.expr_list 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 129
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_list  goto 276
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 155
+	expr_list:  expr_list ','.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 277
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 156
+	for_stmt:  LFOR $$74.for_body 
+	osimple_stmt: .    (294)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRANGE  shift 284
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 294 (src line 1973)
+
+	sym  goto 123
+	expr  goto 48
+	fnliteral  goto 73
+	for_body  goto 278
+	for_header  goto 279
+	name  goto 69
+	osimple_stmt  goto 280
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	range_stmt  goto 281
+	simple_stmt  goto 282
+	uexpr  goto 55
+	expr_list  goto 283
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 157
+	switch_stmt:  LSWITCH $$88.if_header $$89 LBODY caseblock_list '}' 
+	osimple_stmt: .    (294)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 294 (src line 1973)
+
+	sym  goto 123
+	expr  goto 48
+	fnliteral  goto 73
+	if_header  goto 285
+	name  goto 69
+	osimple_stmt  goto 286
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	simple_stmt  goto 282
+	uexpr  goto 55
+	expr_list  goto 49
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 158
+	select_stmt:  LSELECT $$91.LBODY caseblock_list '}' 
+
+	LBODY  shift 287
+	.  error
+
+
+state 159
+	if_stmt:  LIF $$78.if_header $$79 loop_body $$80 elseif_list else 
+	osimple_stmt: .    (294)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 294 (src line 1973)
+
+	sym  goto 123
+	expr  goto 48
+	fnliteral  goto 73
+	if_header  goto 288
+	name  goto 69
+	osimple_stmt  goto 286
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	simple_stmt  goto 282
+	uexpr  goto 55
+	expr_list  goto 49
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 160
+	pseudocall:  pexpr '('.')' 
+	pseudocall:  pexpr '('.expr_or_type_list ocomma ')' 
+	pseudocall:  pexpr '('.expr_or_type_list LDDD ocomma ')' 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 179
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 178
+	'&'  shift 59
+	'('  shift 67
+	')'  shift 289
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 173
+	expr_or_type  goto 291
+	fnliteral  goto 73
+	name  goto 69
+	non_expr_type  goto 174
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_or_type_list  goto 290
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	recvchantype  goto 175
+	othertype  goto 177
+	fntype  goto 176
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 161
+	pexpr_no_paren:  pexpr '.'.sym 
+	pexpr_no_paren:  pexpr '.'.'(' expr_or_type ')' 
+	pexpr_no_paren:  pexpr '.'.'(' LTYPE ')' 
+
+	LNAME  shift 10
+	'('  shift 293
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 292
+	hidden_importsym  goto 11
+
+state 162
+	pexpr_no_paren:  pexpr '['.expr ']' 
+	pexpr_no_paren:  pexpr '['.oexpr ':' oexpr ']' 
+	pexpr_no_paren:  pexpr '['.oexpr ':' oexpr ':' oexpr ']' 
+	oexpr: .    (290)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 290 (src line 1961)
+
+	sym  goto 123
+	expr  goto 294
+	fnliteral  goto 73
+	name  goto 69
+	oexpr  goto 295
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 163
+	uexpr:  '*' uexpr.    (115)
+
+	.  reduce 115 (src line 941)
+
+
+state 164
+	uexpr:  '&' uexpr.    (116)
+
+	.  reduce 116 (src line 945)
+
+
+state 165
+	uexpr:  '+' uexpr.    (117)
+
+	.  reduce 117 (src line 956)
+
+
+state 166
+	uexpr:  '-' uexpr.    (118)
+
+	.  reduce 118 (src line 960)
+
+
+state 167
+	uexpr:  '!' uexpr.    (119)
+
+	.  reduce 119 (src line 964)
+
+
+state 168
+	uexpr:  '~' uexpr.    (120)
+
+	.  reduce 120 (src line 968)
+
+
+state 169
+	uexpr:  '^' uexpr.    (121)
+
+	.  reduce 121 (src line 973)
+
+
+state 170
+	uexpr:  LCOMM uexpr.    (122)
+
+	.  reduce 122 (src line 977)
+
+
+state 171
+	pexpr_no_paren:  pexpr_no_paren '{'.start_complit braced_keyval_list '}' 
+	start_complit: .    (140)
+
+	.  reduce 140 (src line 1075)
+
+	start_complit  goto 296
+
+state 172
+	pexpr_no_paren:  '(' expr_or_type.')' '{' start_complit braced_keyval_list '}' 
+	pexpr:  '(' expr_or_type.')' 
+
+	')'  shift 297
+	.  error
+
+
+state 173
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+	expr_or_type:  expr.    (148)
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 148 (src line 1131)
+
+
+state 174
+	expr_or_type:  non_expr_type.    (149)
+
+	.  reduce 149 (src line 1133)
+
+
+state 175
+	non_expr_type:  recvchantype.    (172)
+
+	.  reduce 172 (src line 1263)
+
+
+state 176
+	non_expr_type:  fntype.    (173)
+	convtype:  fntype.    (181)
+	fnlitdcl:  fntype.    (215)
+
+	error  reduce 215 (src line 1546)
+	LBODY  reduce 215 (src line 1546)
+	'('  reduce 181 (src line 1282)
+	'{'  reduce 215 (src line 1546)
+	.  reduce 173 (src line 1265)
+
+
+state 177
+	non_expr_type:  othertype.    (174)
+	convtype:  othertype.    (182)
+	comptype:  othertype.    (183)
+
+	LBODY  reduce 183 (src line 1286)
+	'('  reduce 182 (src line 1284)
+	'{'  reduce 183 (src line 1286)
+	.  reduce 174 (src line 1266)
+
+
+state 178
+	uexpr:  '*'.uexpr 
+	non_expr_type:  '*'.non_expr_type 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 179
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 178
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	non_expr_type  goto 298
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 163
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	recvchantype  goto 175
+	othertype  goto 177
+	fntype  goto 176
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 179
+	uexpr:  LCOMM.uexpr 
+	recvchantype:  LCOMM.LCHAN ntype 
+
+	LLITERAL  shift 68
+	LCHAN  shift 299
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 170
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 180
+	pexpr_no_paren:  convtype '('.expr ocomma ')' 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 300
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 181
+	pexpr_no_paren:  comptype lbrace.start_complit braced_keyval_list '}' 
+	start_complit: .    (140)
+
+	.  reduce 140 (src line 1075)
+
+	start_complit  goto 301
+
+state 182
+	lbrace:  LBODY.    (151)
+
+	.  reduce 151 (src line 1138)
+
+
+state 183
+	lbrace:  '{'.    (152)
+
+	.  reduce 152 (src line 1143)
+
+
+state 184
+	fnliteral:  fnlitdcl lbrace.stmt_list '}' 
+	stmt: .    (251)
+
+	error  shift 307
+	LLITERAL  shift 68
+	LBREAK  shift 41
+	LCHAN  shift 78
+	LCONST  shift 47
+	LCONTINUE  shift 42
+	LDEFER  shift 44
+	LFALL  shift 40
+	LFOR  shift 50
+	LFUNC  shift 124
+	LGO  shift 43
+	LGOTO  shift 45
+	LIF  shift 53
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRETURN  shift 46
+	LSELECT  shift 52
+	LSTRUCT  shift 82
+	LSWITCH  shift 51
+	LTYPE  shift 32
+	LVAR  shift 30
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	';'  reduce 251 (src line 1781)
+	'{'  shift 308
+	'}'  reduce 251 (src line 1781)
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 56
+	stmt  goto 303
+	compound_stmt  goto 304
+	expr  goto 48
+	fnliteral  goto 73
+	for_stmt  goto 35
+	if_stmt  goto 38
+	non_dcl_stmt  goto 306
+	labelname  goto 39
+	name  goto 69
+	new_name  goto 54
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	select_stmt  goto 37
+	simple_stmt  goto 34
+	switch_stmt  goto 36
+	uexpr  goto 55
+	expr_list  goto 49
+	stmt_list  goto 302
+	common_dcl  goto 305
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	lconst  goto 31
+	fnlitdcl  goto 76
+
+state 185
+	fnliteral:  fnlitdcl error.    (217)
+
+	.  reduce 217 (src line 1558)
+
+
+state 186
+	othertype:  '[' oexpr.']' ntype 
+
+	']'  shift 309
+	.  error
+
+
+state 187
+	othertype:  '[' LDDD.']' ntype 
+
+	']'  shift 310
+	.  error
+
+
+state 188
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+	oexpr:  expr.    (291)
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 291 (src line 1965)
+
+
+state 189
+	othertype:  LCHAN non_recvchantype.    (193)
+
+	.  reduce 193 (src line 1320)
+
+
+state 190
+	othertype:  LCHAN LCOMM.ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 311
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 191
+	non_recvchantype:  fntype.    (176)
+
+	.  reduce 176 (src line 1272)
+
+
+state 192
+	non_recvchantype:  othertype.    (177)
+
+	.  reduce 177 (src line 1274)
+
+
+state 193
+	non_recvchantype:  ptrtype.    (178)
+
+	.  reduce 178 (src line 1275)
+
+
+state 194
+	non_recvchantype:  dotname.    (179)
+
+	.  reduce 179 (src line 1276)
+
+
+state 195
+	non_recvchantype:  '('.ntype ')' 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 312
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 196
+	ptrtype:  '*'.ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 313
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 197
+	dotname:  name.    (189)
+	dotname:  name.'.' sym 
+
+	'.'  shift 314
+	.  reduce 189 (src line 1296)
+
+
+state 198
+	othertype:  LMAP '['.ntype ']' ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 315
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 199
+	structtype:  LSTRUCT lbrace.structdcl_list osemi '}' 
+	structtype:  LSTRUCT lbrace.'}' 
+
+	LNAME  shift 325
+	'*'  shift 322
+	'('  shift 321
+	'}'  shift 317
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 119
+	packname  goto 324
+	embed  goto 320
+	new_name  goto 323
+	new_name_list  goto 319
+	structdcl  goto 318
+	structdcl_list  goto 316
+	hidden_importsym  goto 11
+
+state 200
+	interfacetype:  LINTERFACE lbrace.interfacedcl_list osemi '}' 
+	interfacetype:  LINTERFACE lbrace.'}' 
+
+	LNAME  shift 325
+	'('  shift 331
+	'}'  shift 327
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 119
+	packname  goto 330
+	interfacedcl  goto 328
+	new_name  goto 329
+	interfacedcl_list  goto 326
+	hidden_importsym  goto 11
+
+state 201
+	import:  LIMPORT '(' import_stmt_list.osemi ')' 
+	import_stmt_list:  import_stmt_list.';' import_stmt 
+	osemi: .    (286)
+
+	';'  shift 333
+	.  reduce 286 (src line 1955)
+
+	osemi  goto 332
+
+state 202
+	import:  LIMPORT '(' ')'.    (10)
+
+	.  reduce 10 (src line 233)
+
+
+state 203
+	import_stmt_list:  import_stmt.    (13)
+
+	.  reduce 13 (src line 282)
+
+
+state 204
+	import_stmt:  import_here import_package.import_there 
+	$$21: .    (21)
+
+	.  reduce 21 (src line 334)
+
+	import_there  goto 334
+	$$21  goto 15
+
+state 205
+	import_stmt:  import_here import_there.    (12)
+
+	.  reduce 12 (src line 271)
+
+
+state 206
+	import_here:  sym LLITERAL.    (16)
+
+	.  reduce 16 (src line 294)
+
+
+state 207
+	import_here:  '.' LLITERAL.    (17)
+
+	.  reduce 17 (src line 301)
+
+
+state 208
+	hidden_importsym:  '@' LLITERAL '.' LNAME.    (160)
+
+	.  reduce 160 (src line 1190)
+
+
+state 209
+	hidden_importsym:  '@' LLITERAL '.' '?'.    (161)
+
+	.  reduce 161 (src line 1205)
+
+
+state 210
+	import_there:  $$21 hidden_import_list '$' '$'.    (22)
+
+	.  reduce 22 (src line 338)
+
+
+state 211
+	hidden_import:  LIMPORT LNAME.LLITERAL ';' 
+
+	LLITERAL  shift 335
+	.  error
+
+
+state 212
+	hidden_import:  LVAR hidden_pkg_importsym.hidden_type ';' 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 336
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 213
+	hidden_pkg_importsym:  hidden_importsym.    (310)
+
+	.  reduce 310 (src line 2047)
+
+
+state 214
+	hidden_import:  LCONST hidden_pkg_importsym.'=' hidden_constant ';' 
+	hidden_import:  LCONST hidden_pkg_importsym.hidden_type '=' hidden_constant ';' 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'='  shift 350
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 351
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 215
+	hidden_import:  LTYPE hidden_pkgtype.hidden_type ';' 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 352
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 216
+	hidden_pkgtype:  hidden_pkg_importsym.    (311)
+
+	.  reduce 311 (src line 2054)
+
+
+state 217
+	hidden_import:  LFUNC hidden_fndcl.fnbody ';' 
+	fnbody: .    (210)
+
+	'{'  shift 242
+	.  reduce 210 (src line 1519)
+
+	fnbody  goto 353
+
+state 218
+	hidden_fndcl:  hidden_pkg_importsym.'(' ohidden_funarg_list ')' ohidden_funres 
+
+	'('  shift 354
+	.  error
+
+
+state 219
+	hidden_fndcl:  '('.hidden_funarg_list ')' sym '(' ohidden_funarg_list ')' ohidden_funres 
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 357
+	hidden_importsym  goto 11
+	hidden_funarg  goto 356
+	hidden_funarg_list  goto 355
+
+state 220
+	common_dcl:  LVAR '(' vardcl_list.osemi ')' 
+	vardcl_list:  vardcl_list.';' vardcl 
+	osemi: .    (286)
+
+	';'  shift 359
+	.  reduce 286 (src line 1955)
+
+	osemi  goto 358
+
+state 221
+	common_dcl:  LVAR '(' ')'.    (30)
+
+	.  reduce 30 (src line 376)
+
+
+state 222
+	vardcl_list:  vardcl.    (220)
+
+	.  reduce 220 (src line 1585)
+
+
+state 223
+	vardcl:  dcl_name_list ntype.    (39)
+	vardcl:  dcl_name_list ntype.'=' expr_list 
+
+	'='  shift 360
+	.  reduce 39 (src line 422)
+
+
+state 224
+	vardcl:  dcl_name_list '='.expr_list 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 129
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_list  goto 361
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 225
+	dcl_name_list:  dcl_name_list ','.dcl_name 
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 105
+	dcl_name  goto 362
+	hidden_importsym  goto 11
+
+state 226
+	ntype:  recvchantype.    (166)
+
+	.  reduce 166 (src line 1252)
+
+
+state 227
+	ntype:  fntype.    (167)
+
+	.  reduce 167 (src line 1254)
+
+
+state 228
+	ntype:  othertype.    (168)
+
+	.  reduce 168 (src line 1255)
+
+
+state 229
+	ntype:  ptrtype.    (169)
+
+	.  reduce 169 (src line 1256)
+
+
+state 230
+	ntype:  dotname.    (170)
+
+	.  reduce 170 (src line 1257)
+
+
+state 231
+	ntype:  '('.ntype ')' 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 363
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 232
+	recvchantype:  LCOMM.LCHAN ntype 
+
+	LCHAN  shift 364
+	.  error
+
+
+state 233
+	common_dcl:  lconst '(' constdcl.osemi ')' 
+	common_dcl:  lconst '(' constdcl.';' constdcl_list osemi ')' 
+	osemi: .    (286)
+
+	';'  shift 366
+	.  reduce 286 (src line 1955)
+
+	osemi  goto 365
+
+state 234
+	common_dcl:  lconst '(' ')'.    (34)
+
+	.  reduce 34 (src line 398)
+
+
+state 235
+	constdcl:  dcl_name_list ntype.'=' expr_list 
+
+	'='  shift 367
+	.  error
+
+
+state 236
+	constdcl:  dcl_name_list '='.expr_list 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 129
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_list  goto 368
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 237
+	common_dcl:  LTYPE '(' typedcl_list.osemi ')' 
+	typedcl_list:  typedcl_list.';' typedcl 
+	osemi: .    (286)
+
+	';'  shift 370
+	.  reduce 286 (src line 1955)
+
+	osemi  goto 369
+
+state 238
+	common_dcl:  LTYPE '(' ')'.    (37)
+
+	.  reduce 37 (src line 411)
+
+
+state 239
+	typedcl_list:  typedcl.    (224)
+
+	.  reduce 224 (src line 1599)
+
+
+state 240
+	typedcl:  typedclname ntype.    (48)
+
+	.  reduce 48 (src line 466)
+
+
+state 241
+	xfndcl:  LFUNC fndcl fnbody.    (204)
+
+	.  reduce 204 (src line 1380)
+
+
+state 242
+	fnbody:  '{'.stmt_list '}' 
+	stmt: .    (251)
+
+	error  shift 307
+	LLITERAL  shift 68
+	LBREAK  shift 41
+	LCHAN  shift 78
+	LCONST  shift 47
+	LCONTINUE  shift 42
+	LDEFER  shift 44
+	LFALL  shift 40
+	LFOR  shift 50
+	LFUNC  shift 124
+	LGO  shift 43
+	LGOTO  shift 45
+	LIF  shift 53
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRETURN  shift 46
+	LSELECT  shift 52
+	LSTRUCT  shift 82
+	LSWITCH  shift 51
+	LTYPE  shift 32
+	LVAR  shift 30
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	';'  reduce 251 (src line 1781)
+	'{'  shift 308
+	'}'  reduce 251 (src line 1781)
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 56
+	stmt  goto 303
+	compound_stmt  goto 304
+	expr  goto 48
+	fnliteral  goto 73
+	for_stmt  goto 35
+	if_stmt  goto 38
+	non_dcl_stmt  goto 306
+	labelname  goto 39
+	name  goto 69
+	new_name  goto 54
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	select_stmt  goto 37
+	simple_stmt  goto 34
+	switch_stmt  goto 36
+	uexpr  goto 55
+	expr_list  goto 49
+	stmt_list  goto 371
+	common_dcl  goto 305
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	lconst  goto 31
+	fnlitdcl  goto 76
+
+state 243
+	fndcl:  '(' oarg_type_list_ocomma.')' sym '(' oarg_type_list_ocomma ')' fnres 
+	fntype:  LFUNC '(' oarg_type_list_ocomma.')' fnres 
+
+	')'  shift 372
+	.  error
+
+
+state 244
+	arg_type_list:  arg_type_list.',' arg_type 
+	oarg_type_list_ocomma:  arg_type_list.ocomma 
+	ocomma: .    (288)
+
+	','  shift 373
+	.  reduce 288 (src line 1958)
+
+	ocomma  goto 374
+
+state 245
+	arg_type_list:  arg_type.    (247)
+
+	.  reduce 247 (src line 1759)
+
+
+state 246
+	arg_type:  name_or_type.    (243)
+
+	.  reduce 243 (src line 1743)
+
+
+state 247
+	name:  sym.    (162)
+	arg_type:  sym.name_or_type 
+	arg_type:  sym.dotdotdot 
+
+	LCHAN  shift 78
+	LDDD  shift 250
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 162 (src line 1220)
+
+	sym  goto 123
+	ntype  goto 249
+	dotname  goto 230
+	name  goto 197
+	name_or_type  goto 375
+	dotdotdot  goto 376
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 248
+	arg_type:  dotdotdot.    (246)
+
+	.  reduce 246 (src line 1757)
+
+
+state 249
+	name_or_type:  ntype.    (150)
+
+	.  reduce 150 (src line 1135)
+
+
+state 250
+	dotdotdot:  LDDD.    (164)
+	dotdotdot:  LDDD.ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 164 (src line 1241)
+
+	sym  goto 123
+	ntype  goto 377
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 251
+	fndcl:  sym '('.oarg_type_list_ocomma ')' fnres 
+	oarg_type_list_ocomma: .    (249)
+
+	LCHAN  shift 78
+	LDDD  shift 250
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 249 (src line 1769)
+
+	sym  goto 247
+	ntype  goto 249
+	arg_type  goto 245
+	dotname  goto 230
+	name  goto 197
+	name_or_type  goto 246
+	oarg_type_list_ocomma  goto 378
+	arg_type_list  goto 244
+	dotdotdot  goto 248
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 252
+	non_dcl_stmt:  labelname ':' $$261.stmt 
+	stmt: .    (251)
+
+	error  shift 307
+	LLITERAL  shift 68
+	LBREAK  shift 41
+	LCASE  reduce 251 (src line 1781)
+	LCHAN  shift 78
+	LCONST  shift 47
+	LCONTINUE  shift 42
+	LDEFAULT  reduce 251 (src line 1781)
+	LDEFER  shift 44
+	LFALL  shift 40
+	LFOR  shift 50
+	LFUNC  shift 124
+	LGO  shift 43
+	LGOTO  shift 45
+	LIF  shift 53
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRETURN  shift 46
+	LSELECT  shift 52
+	LSTRUCT  shift 82
+	LSWITCH  shift 51
+	LTYPE  shift 32
+	LVAR  shift 30
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	';'  reduce 251 (src line 1781)
+	'{'  shift 308
+	'}'  reduce 251 (src line 1781)
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 56
+	stmt  goto 379
+	compound_stmt  goto 304
+	expr  goto 48
+	fnliteral  goto 73
+	for_stmt  goto 35
+	if_stmt  goto 38
+	non_dcl_stmt  goto 306
+	labelname  goto 39
+	name  goto 69
+	new_name  goto 54
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	select_stmt  goto 37
+	simple_stmt  goto 34
+	switch_stmt  goto 36
+	uexpr  goto 55
+	expr_list  goto 49
+	common_dcl  goto 305
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	lconst  goto 31
+	fnlitdcl  goto 76
+
+state 253
+	fntype:  LFUNC '('.oarg_type_list_ocomma ')' fnres 
+	oarg_type_list_ocomma: .    (249)
+
+	LCHAN  shift 78
+	LDDD  shift 250
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 249 (src line 1769)
+
+	sym  goto 247
+	ntype  goto 249
+	arg_type  goto 245
+	dotname  goto 230
+	name  goto 197
+	name_or_type  goto 246
+	oarg_type_list_ocomma  goto 380
+	arg_type_list  goto 244
+	dotdotdot  goto 248
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 254
+	simple_stmt:  expr LASOP expr.    (50)
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 50 (src line 487)
+
+
+state 255
+	expr:  expr.LOROR expr 
+	expr:  expr LOROR expr.    (94)
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 94 (src line 857)
+
+
+state 256
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr LANDAND expr.    (95)
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 95 (src line 861)
+
+
+state 257
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr LEQ expr.    (96)
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LLSH  shift 150
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 96 (src line 865)
+
+
+state 258
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr LNE expr.    (97)
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LLSH  shift 150
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 97 (src line 869)
+
+
+state 259
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr LLT expr.    (98)
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LLSH  shift 150
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 98 (src line 873)
+
+
+state 260
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr LLE expr.    (99)
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LLSH  shift 150
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 99 (src line 877)
+
+
+state 261
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr LGE expr.    (100)
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LLSH  shift 150
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 100 (src line 881)
+
+
+state 262
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr LGT expr.    (101)
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LLSH  shift 150
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 101 (src line 885)
+
+
+state 263
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr '+' expr.    (102)
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LLSH  shift 150
+	LRSH  shift 151
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 102 (src line 889)
+
+
+state 264
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr '-' expr.    (103)
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LLSH  shift 150
+	LRSH  shift 151
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 103 (src line 893)
+
+
+state 265
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr '|' expr.    (104)
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LLSH  shift 150
+	LRSH  shift 151
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 104 (src line 897)
+
+
+state 266
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr '^' expr.    (105)
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDNOT  shift 149
+	LLSH  shift 150
+	LRSH  shift 151
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 105 (src line 901)
+
+
+state 267
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr '*' expr.    (106)
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	.  reduce 106 (src line 905)
+
+
+state 268
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr '/' expr.    (107)
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	.  reduce 107 (src line 909)
+
+
+state 269
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr '%' expr.    (108)
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	.  reduce 108 (src line 913)
+
+
+state 270
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr '&' expr.    (109)
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	.  reduce 109 (src line 917)
+
+
+state 271
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr LANDNOT expr.    (110)
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	.  reduce 110 (src line 921)
+
+
+state 272
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr LLSH expr.    (111)
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	.  reduce 111 (src line 925)
+
+
+state 273
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr LRSH expr.    (112)
+	expr:  expr.LCOMM expr 
+
+	.  reduce 112 (src line 929)
+
+
+state 274
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+	expr:  expr LCOMM expr.    (113)
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 113 (src line 934)
+
+
+state 275
+	simple_stmt:  expr_list '=' expr_list.    (51)
+	expr_list:  expr_list.',' expr 
+
+	','  shift 155
+	.  reduce 51 (src line 492)
+
+
+state 276
+	simple_stmt:  expr_list LCOLAS expr_list.    (52)
+	expr_list:  expr_list.',' expr 
+
+	','  shift 155
+	.  reduce 52 (src line 504)
+
+
+state 277
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+	expr_list:  expr_list ',' expr.    (277)
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 277 (src line 1907)
+
+
+state 278
+	for_stmt:  LFOR $$74 for_body.    (75)
+
+	.  reduce 75 (src line 726)
+
+
+state 279
+	for_body:  for_header.loop_body 
+
+	LBODY  shift 382
+	.  error
+
+	loop_body  goto 381
+
+state 280
+	for_header:  osimple_stmt.';' osimple_stmt ';' osimple_stmt 
+	for_header:  osimple_stmt.    (71)
+
+	';'  shift 383
+	.  reduce 71 (src line 706)
+
+
+state 281
+	for_header:  range_stmt.    (72)
+
+	.  reduce 72 (src line 712)
+
+
+state 282
+	osimple_stmt:  simple_stmt.    (295)
+
+	.  reduce 295 (src line 1977)
+
+
+state 283
+	simple_stmt:  expr_list.'=' expr_list 
+	simple_stmt:  expr_list.LCOLAS expr_list 
+	range_stmt:  expr_list.'=' LRANGE expr 
+	range_stmt:  expr_list.LCOLAS LRANGE expr 
+	expr_list:  expr_list.',' expr 
+
+	LCOLAS  shift 385
+	'='  shift 384
+	','  shift 155
+	.  error
+
+
+state 284
+	range_stmt:  LRANGE.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 386
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 285
+	switch_stmt:  LSWITCH $$88 if_header.$$89 LBODY caseblock_list '}' 
+	$$89: .    (89)
+
+	.  reduce 89 (src line 821)
+
+	$$89  goto 387
+
+state 286
+	if_header:  osimple_stmt.    (76)
+	if_header:  osimple_stmt.';' osimple_stmt 
+
+	';'  shift 388
+	.  reduce 76 (src line 732)
+
+
+state 287
+	select_stmt:  LSELECT $$91 LBODY.caseblock_list '}' 
+	caseblock_list: .    (63)
+
+	.  reduce 63 (src line 652)
+
+	caseblock_list  goto 389
+
+state 288
+	if_stmt:  LIF $$78 if_header.$$79 loop_body $$80 elseif_list else 
+	$$79: .    (79)
+
+	.  reduce 79 (src line 755)
+
+	$$79  goto 390
+
+state 289
+	pseudocall:  pexpr '(' ')'.    (123)
+
+	.  reduce 123 (src line 986)
+
+
+state 290
+	pseudocall:  pexpr '(' expr_or_type_list.ocomma ')' 
+	pseudocall:  pexpr '(' expr_or_type_list.LDDD ocomma ')' 
+	expr_or_type_list:  expr_or_type_list.',' expr_or_type 
+	ocomma: .    (288)
+
+	LDDD  shift 392
+	','  shift 393
+	.  reduce 288 (src line 1958)
+
+	ocomma  goto 391
+
+state 291
+	expr_or_type_list:  expr_or_type.    (278)
+
+	.  reduce 278 (src line 1912)
+
+
+state 292
+	pexpr_no_paren:  pexpr '.' sym.    (128)
+
+	.  reduce 128 (src line 1009)
+
+
+state 293
+	pexpr_no_paren:  pexpr '.' '('.expr_or_type ')' 
+	pexpr_no_paren:  pexpr '.' '('.LTYPE ')' 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LTYPE  shift 395
+	LCOMM  shift 179
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 178
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 173
+	expr_or_type  goto 394
+	fnliteral  goto 73
+	name  goto 69
+	non_expr_type  goto 174
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	recvchantype  goto 175
+	othertype  goto 177
+	fntype  goto 176
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 294
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+	pexpr_no_paren:  pexpr '[' expr.']' 
+	oexpr:  expr.    (291)
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	']'  shift 396
+	.  reduce 291 (src line 1965)
+
+
+state 295
+	pexpr_no_paren:  pexpr '[' oexpr.':' oexpr ']' 
+	pexpr_no_paren:  pexpr '[' oexpr.':' oexpr ':' oexpr ']' 
+
+	':'  shift 397
+	.  error
+
+
+state 296
+	pexpr_no_paren:  pexpr_no_paren '{' start_complit.braced_keyval_list '}' 
+	braced_keyval_list: .    (284)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'{'  shift 403
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 284 (src line 1943)
+
+	sym  goto 123
+	expr  goto 402
+	bare_complitexpr  goto 401
+	fnliteral  goto 73
+	keyval  goto 400
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	keyval_list  goto 399
+	braced_keyval_list  goto 398
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 297
+	pexpr_no_paren:  '(' expr_or_type ')'.'{' start_complit braced_keyval_list '}' 
+	pexpr:  '(' expr_or_type ')'.    (147)
+
+	'{'  shift 404
+	.  reduce 147 (src line 1118)
+
+
+state 298
+	non_expr_type:  '*' non_expr_type.    (175)
+
+	.  reduce 175 (src line 1267)
+
+
+state 299
+	othertype:  LCHAN.non_recvchantype 
+	othertype:  LCHAN.LCOMM ntype 
+	recvchantype:  LCOMM LCHAN.ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 405
+	'*'  shift 196
+	'('  shift 411
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 406
+	dotname  goto 410
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 409
+	recvchantype  goto 226
+	non_recvchantype  goto 189
+	othertype  goto 408
+	fntype  goto 407
+	hidden_importsym  goto 11
+
+state 300
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+	pexpr_no_paren:  convtype '(' expr.ocomma ')' 
+	ocomma: .    (288)
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	','  shift 413
+	.  reduce 288 (src line 1958)
+
+	ocomma  goto 412
+
+state 301
+	pexpr_no_paren:  comptype lbrace start_complit.braced_keyval_list '}' 
+	braced_keyval_list: .    (284)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'{'  shift 403
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 284 (src line 1943)
+
+	sym  goto 123
+	expr  goto 402
+	bare_complitexpr  goto 401
+	fnliteral  goto 73
+	keyval  goto 400
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	keyval_list  goto 399
+	braced_keyval_list  goto 414
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 302
+	fnliteral:  fnlitdcl lbrace stmt_list.'}' 
+	stmt_list:  stmt_list.';' stmt 
+
+	';'  shift 416
+	'}'  shift 415
+	.  error
+
+
+state 303
+	stmt_list:  stmt.    (270)
+
+	.  reduce 270 (src line 1866)
+
+
+state 304
+	stmt:  compound_stmt.    (252)
+
+	.  reduce 252 (src line 1785)
+
+
+state 305
+	stmt:  common_dcl.    (253)
+
+	.  reduce 253 (src line 1786)
+
+
+state 306
+	stmt:  non_dcl_stmt.    (254)
+
+	.  reduce 254 (src line 1790)
+
+
+state 307
+	stmt:  error.    (255)
+
+	.  reduce 255 (src line 1791)
+
+
+state 308
+	compound_stmt:  '{'.$$59 stmt_list '}' 
+	$$59: .    (59)
+
+	.  reduce 59 (src line 606)
+
+	$$59  goto 417
+
+state 309
+	othertype:  '[' oexpr ']'.ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 418
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 310
+	othertype:  '[' LDDD ']'.ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 419
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 311
+	othertype:  LCHAN LCOMM ntype.    (194)
+
+	.  reduce 194 (src line 1325)
+
+
+state 312
+	non_recvchantype:  '(' ntype.')' 
+
+	')'  shift 420
+	.  error
+
+
+state 313
+	ptrtype:  '*' ntype.    (198)
+
+	.  reduce 198 (src line 1337)
+
+
+state 314
+	dotname:  name '.'.sym 
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 421
+	hidden_importsym  goto 11
+
+state 315
+	othertype:  LMAP '[' ntype.']' ntype 
+
+	']'  shift 422
+	.  error
+
+
+state 316
+	structtype:  LSTRUCT lbrace structdcl_list.osemi '}' 
+	structdcl_list:  structdcl_list.';' structdcl 
+	osemi: .    (286)
+
+	';'  shift 424
+	.  reduce 286 (src line 1955)
+
+	osemi  goto 423
+
+state 317
+	structtype:  LSTRUCT lbrace '}'.    (201)
+
+	.  reduce 201 (src line 1357)
+
+
+state 318
+	structdcl_list:  structdcl.    (226)
+
+	.  reduce 226 (src line 1609)
+
+
+state 319
+	structdcl:  new_name_list.ntype oliteral 
+	new_name_list:  new_name_list.',' new_name 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	','  shift 426
+	.  error
+
+	sym  goto 123
+	ntype  goto 425
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 320
+	structdcl:  embed.oliteral 
+	oliteral: .    (302)
+
+	LLITERAL  shift 428
+	.  reduce 302 (src line 1997)
+
+	oliteral  goto 427
+
+state 321
+	structdcl:  '('.embed ')' oliteral 
+	structdcl:  '('.'*' embed ')' oliteral 
+
+	LNAME  shift 431
+	'*'  shift 430
+	.  error
+
+	packname  goto 324
+	embed  goto 429
+
+state 322
+	structdcl:  '*'.embed oliteral 
+	structdcl:  '*'.'(' embed ')' oliteral 
+
+	LNAME  shift 431
+	'('  shift 433
+	.  error
+
+	packname  goto 324
+	embed  goto 432
+
+state 323
+	new_name_list:  new_name.    (272)
+
+	.  reduce 272 (src line 1882)
+
+
+state 324
+	embed:  packname.    (238)
+
+	.  reduce 238 (src line 1708)
+
+
+state 325
+	sym:  LNAME.    (157)
+	packname:  LNAME.    (236)
+	packname:  LNAME.'.' sym 
+
+	LLITERAL  reduce 236 (src line 1683)
+	';'  reduce 236 (src line 1683)
+	'.'  shift 434
+	'}'  reduce 236 (src line 1683)
+	.  reduce 157 (src line 1175)
+
+
+state 326
+	interfacetype:  LINTERFACE lbrace interfacedcl_list.osemi '}' 
+	interfacedcl_list:  interfacedcl_list.';' interfacedcl 
+	osemi: .    (286)
+
+	';'  shift 436
+	.  reduce 286 (src line 1955)
+
+	osemi  goto 435
+
+state 327
+	interfacetype:  LINTERFACE lbrace '}'.    (203)
+
+	.  reduce 203 (src line 1370)
+
+
+state 328
+	interfacedcl_list:  interfacedcl.    (228)
+
+	.  reduce 228 (src line 1616)
+
+
+state 329
+	interfacedcl:  new_name.indcl 
+
+	'('  shift 438
+	.  error
+
+	indcl  goto 437
+
+state 330
+	interfacedcl:  packname.    (240)
+
+	.  reduce 240 (src line 1720)
+
+
+state 331
+	interfacedcl:  '('.packname ')' 
+
+	LNAME  shift 431
+	.  error
+
+	packname  goto 439
+
+state 332
+	import:  LIMPORT '(' import_stmt_list osemi.')' 
+
+	')'  shift 440
+	.  error
+
+
+state 333
+	import_stmt_list:  import_stmt_list ';'.import_stmt 
+	osemi:  ';'.    (287)
+
+	LLITERAL  shift 88
+	LNAME  shift 10
+	'.'  shift 90
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 287 (src line 1956)
+
+	import_here  goto 87
+	sym  goto 89
+	hidden_importsym  goto 11
+	import_stmt  goto 441
+
+state 334
+	import_stmt:  import_here import_package import_there.    (11)
+
+	.  reduce 11 (src line 235)
+
+
+state 335
+	hidden_import:  LIMPORT LNAME LLITERAL.';' 
+
+	';'  shift 442
+	.  error
+
+
+state 336
+	hidden_import:  LVAR hidden_pkg_importsym hidden_type.';' 
+
+	';'  shift 443
+	.  error
+
+
+state 337
+	hidden_type:  hidden_type_misc.    (312)
+
+	.  reduce 312 (src line 2065)
+
+
+state 338
+	hidden_type:  hidden_type_recv_chan.    (313)
+
+	.  reduce 313 (src line 2067)
+
+
+state 339
+	hidden_type:  hidden_type_func.    (314)
+
+	.  reduce 314 (src line 2068)
+
+
+state 340
+	hidden_type_misc:  hidden_importsym.    (317)
+
+	.  reduce 317 (src line 2074)
+
+
+state 341
+	hidden_type_misc:  LNAME.    (318)
+
+	.  reduce 318 (src line 2079)
+
+
+state 342
+	hidden_type_misc:  '['.']' hidden_type 
+	hidden_type_misc:  '['.LLITERAL ']' hidden_type 
+
+	LLITERAL  shift 445
+	']'  shift 444
+	.  error
+
+
+state 343
+	hidden_type_misc:  LMAP.'[' hidden_type ']' hidden_type 
+
+	'['  shift 446
+	.  error
+
+
+state 344
+	hidden_type_misc:  LSTRUCT.'{' ohidden_structdcl_list '}' 
+
+	'{'  shift 447
+	.  error
+
+
+state 345
+	hidden_type_misc:  LINTERFACE.'{' ohidden_interfacedcl_list '}' 
+
+	'{'  shift 448
+	.  error
+
+
+state 346
+	hidden_type_misc:  '*'.hidden_type 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 449
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 347
+	hidden_type_misc:  LCHAN.hidden_type_non_recv_chan 
+	hidden_type_misc:  LCHAN.'(' hidden_type_recv_chan ')' 
+	hidden_type_misc:  LCHAN.LCOMM hidden_type 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 452
+	'*'  shift 346
+	'('  shift 451
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type_misc  goto 453
+	hidden_type_func  goto 454
+	hidden_type_non_recv_chan  goto 450
+
+state 348
+	hidden_type_recv_chan:  LCOMM.LCHAN hidden_type 
+
+	LCHAN  shift 455
+	.  error
+
+
+state 349
+	hidden_type_func:  LFUNC.'(' ohidden_funarg_list ')' ohidden_funres 
+
+	'('  shift 456
+	.  error
+
+
+state 350
+	hidden_import:  LCONST hidden_pkg_importsym '='.hidden_constant ';' 
+
+	LLITERAL  shift 460
+	LNAME  shift 10
+	'-'  shift 461
+	'('  shift 459
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 462
+	hidden_importsym  goto 11
+	hidden_constant  goto 457
+	hidden_literal  goto 458
+
+state 351
+	hidden_import:  LCONST hidden_pkg_importsym hidden_type.'=' hidden_constant ';' 
+
+	'='  shift 463
+	.  error
+
+
+state 352
+	hidden_import:  LTYPE hidden_pkgtype hidden_type.';' 
+
+	';'  shift 464
+	.  error
+
+
+state 353
+	hidden_import:  LFUNC hidden_fndcl fnbody.';' 
+
+	';'  shift 465
+	.  error
+
+
+state 354
+	hidden_fndcl:  hidden_pkg_importsym '('.ohidden_funarg_list ')' ohidden_funres 
+	ohidden_funarg_list: .    (296)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 296 (src line 1979)
+
+	sym  goto 357
+	hidden_importsym  goto 11
+	hidden_funarg  goto 356
+	hidden_funarg_list  goto 467
+	ohidden_funarg_list  goto 466
+
+state 355
+	hidden_fndcl:  '(' hidden_funarg_list.')' sym '(' ohidden_funarg_list ')' ohidden_funres 
+	hidden_funarg_list:  hidden_funarg_list.',' hidden_funarg 
+
+	')'  shift 468
+	','  shift 469
+	.  error
+
+
+state 356
+	hidden_funarg_list:  hidden_funarg.    (346)
+
+	.  reduce 346 (src line 2274)
+
+
+state 357
+	hidden_funarg:  sym.hidden_type oliteral 
+	hidden_funarg:  sym.LDDD hidden_type oliteral 
+
+	LCHAN  shift 347
+	LDDD  shift 471
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 470
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 358
+	common_dcl:  LVAR '(' vardcl_list osemi.')' 
+
+	')'  shift 472
+	.  error
+
+
+state 359
+	vardcl_list:  vardcl_list ';'.vardcl 
+	osemi:  ';'.    (287)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 287 (src line 1956)
+
+	sym  goto 105
+	dcl_name  goto 104
+	dcl_name_list  goto 103
+	vardcl  goto 473
+	hidden_importsym  goto 11
+
+state 360
+	vardcl:  dcl_name_list ntype '='.expr_list 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 129
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_list  goto 474
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 361
+	vardcl:  dcl_name_list '=' expr_list.    (41)
+	expr_list:  expr_list.',' expr 
+
+	','  shift 155
+	.  reduce 41 (src line 431)
+
+
+state 362
+	dcl_name_list:  dcl_name_list ',' dcl_name.    (275)
+
+	.  reduce 275 (src line 1897)
+
+
+state 363
+	ntype:  '(' ntype.')' 
+
+	')'  shift 475
+	.  error
+
+
+state 364
+	recvchantype:  LCOMM LCHAN.ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 406
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 365
+	common_dcl:  lconst '(' constdcl osemi.')' 
+
+	')'  shift 476
+	.  error
+
+
+state 366
+	common_dcl:  lconst '(' constdcl ';'.constdcl_list osemi ')' 
+	osemi:  ';'.    (287)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 287 (src line 1956)
+
+	sym  goto 105
+	dcl_name  goto 104
+	dcl_name_list  goto 480
+	constdcl  goto 479
+	constdcl1  goto 478
+	constdcl_list  goto 477
+	hidden_importsym  goto 11
+
+state 367
+	constdcl:  dcl_name_list ntype '='.expr_list 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 129
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_list  goto 481
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 368
+	constdcl:  dcl_name_list '=' expr_list.    (43)
+	expr_list:  expr_list.',' expr 
+
+	','  shift 155
+	.  reduce 43 (src line 441)
+
+
+state 369
+	common_dcl:  LTYPE '(' typedcl_list osemi.')' 
+
+	')'  shift 482
+	.  error
+
+
+state 370
+	typedcl_list:  typedcl_list ';'.typedcl 
+	osemi:  ';'.    (287)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 287 (src line 1956)
+
+	sym  goto 112
+	typedclname  goto 111
+	typedcl  goto 483
+	hidden_importsym  goto 11
+
+state 371
+	fnbody:  '{' stmt_list.'}' 
+	stmt_list:  stmt_list.';' stmt 
+
+	';'  shift 416
+	'}'  shift 484
+	.  error
+
+
+state 372
+	fndcl:  '(' oarg_type_list_ocomma ')'.sym '(' oarg_type_list_ocomma ')' fnres 
+	fntype:  LFUNC '(' oarg_type_list_ocomma ')'.fnres 
+	fnres: .    (212)
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 488
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 212 (src line 1531)
+
+	sym  goto 485
+	dotname  goto 493
+	name  goto 197
+	fnres  goto 486
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 492
+	recvchantype  goto 489
+	othertype  goto 491
+	fnret_type  goto 487
+	fntype  goto 490
+	hidden_importsym  goto 11
+
+state 373
+	arg_type_list:  arg_type_list ','.arg_type 
+	ocomma:  ','.    (289)
+
+	LCHAN  shift 78
+	LDDD  shift 250
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 289 (src line 1959)
+
+	sym  goto 247
+	ntype  goto 249
+	arg_type  goto 494
+	dotname  goto 230
+	name  goto 197
+	name_or_type  goto 246
+	dotdotdot  goto 248
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 374
+	oarg_type_list_ocomma:  arg_type_list ocomma.    (250)
+
+	.  reduce 250 (src line 1773)
+
+
+state 375
+	arg_type:  sym name_or_type.    (244)
+
+	.  reduce 244 (src line 1745)
+
+
+state 376
+	arg_type:  sym dotdotdot.    (245)
+
+	.  reduce 245 (src line 1751)
+
+
+state 377
+	dotdotdot:  LDDD ntype.    (165)
+
+	.  reduce 165 (src line 1247)
+
+
+state 378
+	fndcl:  sym '(' oarg_type_list_ocomma.')' fnres 
+
+	')'  shift 495
+	.  error
+
+
+state 379
+	non_dcl_stmt:  labelname ':' $$261 stmt.    (262)
+
+	.  reduce 262 (src line 1807)
+
+
+state 380
+	fntype:  LFUNC '(' oarg_type_list_ocomma.')' fnres 
+
+	')'  shift 496
+	.  error
+
+
+state 381
+	for_body:  for_header loop_body.    (73)
+
+	.  reduce 73 (src line 714)
+
+
+state 382
+	loop_body:  LBODY.$$65 stmt_list '}' 
+	$$65: .    (65)
+
+	.  reduce 65 (src line 661)
+
+	$$65  goto 497
+
+state 383
+	for_header:  osimple_stmt ';'.osimple_stmt ';' osimple_stmt 
+	osimple_stmt: .    (294)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 294 (src line 1973)
+
+	sym  goto 123
+	expr  goto 48
+	fnliteral  goto 73
+	name  goto 69
+	osimple_stmt  goto 498
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	simple_stmt  goto 282
+	uexpr  goto 55
+	expr_list  goto 49
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 384
+	simple_stmt:  expr_list '='.expr_list 
+	range_stmt:  expr_list '='.LRANGE expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRANGE  shift 499
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 129
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_list  goto 275
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 385
+	simple_stmt:  expr_list LCOLAS.expr_list 
+	range_stmt:  expr_list LCOLAS.LRANGE expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRANGE  shift 500
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 129
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_list  goto 276
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 386
+	range_stmt:  LRANGE expr.    (69)
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 69 (src line 686)
+
+
+state 387
+	switch_stmt:  LSWITCH $$88 if_header $$89.LBODY caseblock_list '}' 
+
+	LBODY  shift 501
+	.  error
+
+
+state 388
+	if_header:  osimple_stmt ';'.osimple_stmt 
+	osimple_stmt: .    (294)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 294 (src line 1973)
+
+	sym  goto 123
+	expr  goto 48
+	fnliteral  goto 73
+	name  goto 69
+	osimple_stmt  goto 502
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	simple_stmt  goto 282
+	uexpr  goto 55
+	expr_list  goto 49
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 389
+	caseblock_list:  caseblock_list.caseblock 
+	select_stmt:  LSELECT $$91 LBODY caseblock_list.'}' 
+
+	LCASE  shift 506
+	LDEFAULT  shift 507
+	'}'  shift 504
+	.  error
+
+	case  goto 505
+	caseblock  goto 503
+
+state 390
+	if_stmt:  LIF $$78 if_header $$79.loop_body $$80 elseif_list else 
+
+	LBODY  shift 382
+	.  error
+
+	loop_body  goto 508
+
+state 391
+	pseudocall:  pexpr '(' expr_or_type_list ocomma.')' 
+
+	')'  shift 509
+	.  error
+
+
+state 392
+	pseudocall:  pexpr '(' expr_or_type_list LDDD.ocomma ')' 
+	ocomma: .    (288)
+
+	','  shift 413
+	.  reduce 288 (src line 1958)
+
+	ocomma  goto 510
+
+state 393
+	expr_or_type_list:  expr_or_type_list ','.expr_or_type 
+	ocomma:  ','.    (289)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 179
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 178
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 289 (src line 1959)
+
+	sym  goto 123
+	expr  goto 173
+	expr_or_type  goto 511
+	fnliteral  goto 73
+	name  goto 69
+	non_expr_type  goto 174
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	recvchantype  goto 175
+	othertype  goto 177
+	fntype  goto 176
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 394
+	pexpr_no_paren:  pexpr '.' '(' expr_or_type.')' 
+
+	')'  shift 512
+	.  error
+
+
+state 395
+	pexpr_no_paren:  pexpr '.' '(' LTYPE.')' 
+
+	')'  shift 513
+	.  error
+
+
+state 396
+	pexpr_no_paren:  pexpr '[' expr ']'.    (131)
+
+	.  reduce 131 (src line 1028)
+
+
+state 397
+	pexpr_no_paren:  pexpr '[' oexpr ':'.oexpr ']' 
+	pexpr_no_paren:  pexpr '[' oexpr ':'.oexpr ':' oexpr ']' 
+	oexpr: .    (290)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 290 (src line 1961)
+
+	sym  goto 123
+	expr  goto 188
+	fnliteral  goto 73
+	name  goto 69
+	oexpr  goto 514
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 398
+	pexpr_no_paren:  pexpr_no_paren '{' start_complit braced_keyval_list.'}' 
+
+	'}'  shift 515
+	.  error
+
+
+state 399
+	keyval_list:  keyval_list.',' keyval 
+	keyval_list:  keyval_list.',' bare_complitexpr 
+	braced_keyval_list:  keyval_list.ocomma 
+	ocomma: .    (288)
+
+	','  shift 516
+	.  reduce 288 (src line 1958)
+
+	ocomma  goto 517
+
+state 400
+	keyval_list:  keyval.    (280)
+
+	.  reduce 280 (src line 1925)
+
+
+state 401
+	keyval_list:  bare_complitexpr.    (281)
+
+	.  reduce 281 (src line 1930)
+
+
+state 402
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+	keyval:  expr.':' complitexpr 
+	bare_complitexpr:  expr.    (142)
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	':'  shift 518
+	.  reduce 142 (src line 1088)
+
+
+state 403
+	bare_complitexpr:  '{'.start_complit braced_keyval_list '}' 
+	start_complit: .    (140)
+
+	.  reduce 140 (src line 1075)
+
+	start_complit  goto 519
+
+state 404
+	pexpr_no_paren:  '(' expr_or_type ')' '{'.start_complit braced_keyval_list '}' 
+	start_complit: .    (140)
+
+	.  reduce 140 (src line 1075)
+
+	start_complit  goto 520
+
+state 405
+	othertype:  LCHAN LCOMM.ntype 
+	recvchantype:  LCOMM.LCHAN ntype 
+
+	LCHAN  shift 299
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 311
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 406
+	recvchantype:  LCOMM LCHAN ntype.    (199)
+
+	.  reduce 199 (src line 1343)
+
+
+state 407
+	ntype:  fntype.    (167)
+	non_recvchantype:  fntype.    (176)
+
+	LBODY  reduce 176 (src line 1272)
+	'('  reduce 176 (src line 1272)
+	'{'  reduce 176 (src line 1272)
+	.  reduce 167 (src line 1254)
+
+
+state 408
+	ntype:  othertype.    (168)
+	non_recvchantype:  othertype.    (177)
+
+	LBODY  reduce 177 (src line 1274)
+	'('  reduce 177 (src line 1274)
+	'{'  reduce 177 (src line 1274)
+	.  reduce 168 (src line 1255)
+
+
+state 409
+	ntype:  ptrtype.    (169)
+	non_recvchantype:  ptrtype.    (178)
+
+	LBODY  reduce 178 (src line 1275)
+	'('  reduce 178 (src line 1275)
+	'{'  reduce 178 (src line 1275)
+	.  reduce 169 (src line 1256)
+
+
+state 410
+	ntype:  dotname.    (170)
+	non_recvchantype:  dotname.    (179)
+
+	LBODY  reduce 179 (src line 1276)
+	'('  reduce 179 (src line 1276)
+	'{'  reduce 179 (src line 1276)
+	.  reduce 170 (src line 1257)
+
+
+state 411
+	ntype:  '('.ntype ')' 
+	non_recvchantype:  '('.ntype ')' 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 521
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 412
+	pexpr_no_paren:  convtype '(' expr ocomma.')' 
+
+	')'  shift 522
+	.  error
+
+
+state 413
+	ocomma:  ','.    (289)
+
+	.  reduce 289 (src line 1959)
+
+
+state 414
+	pexpr_no_paren:  comptype lbrace start_complit braced_keyval_list.'}' 
+
+	'}'  shift 523
+	.  error
+
+
+state 415
+	fnliteral:  fnlitdcl lbrace stmt_list '}'.    (216)
+
+	.  reduce 216 (src line 1552)
+
+
+state 416
+	stmt_list:  stmt_list ';'.stmt 
+	stmt: .    (251)
+
+	error  shift 307
+	LLITERAL  shift 68
+	LBREAK  shift 41
+	LCASE  reduce 251 (src line 1781)
+	LCHAN  shift 78
+	LCONST  shift 47
+	LCONTINUE  shift 42
+	LDEFAULT  reduce 251 (src line 1781)
+	LDEFER  shift 44
+	LFALL  shift 40
+	LFOR  shift 50
+	LFUNC  shift 124
+	LGO  shift 43
+	LGOTO  shift 45
+	LIF  shift 53
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRETURN  shift 46
+	LSELECT  shift 52
+	LSTRUCT  shift 82
+	LSWITCH  shift 51
+	LTYPE  shift 32
+	LVAR  shift 30
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	';'  reduce 251 (src line 1781)
+	'{'  shift 308
+	'}'  reduce 251 (src line 1781)
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 56
+	stmt  goto 524
+	compound_stmt  goto 304
+	expr  goto 48
+	fnliteral  goto 73
+	for_stmt  goto 35
+	if_stmt  goto 38
+	non_dcl_stmt  goto 306
+	labelname  goto 39
+	name  goto 69
+	new_name  goto 54
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	select_stmt  goto 37
+	simple_stmt  goto 34
+	switch_stmt  goto 36
+	uexpr  goto 55
+	expr_list  goto 49
+	common_dcl  goto 305
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	lconst  goto 31
+	fnlitdcl  goto 76
+
+state 417
+	compound_stmt:  '{' $$59.stmt_list '}' 
+	stmt: .    (251)
+
+	error  shift 307
+	LLITERAL  shift 68
+	LBREAK  shift 41
+	LCHAN  shift 78
+	LCONST  shift 47
+	LCONTINUE  shift 42
+	LDEFER  shift 44
+	LFALL  shift 40
+	LFOR  shift 50
+	LFUNC  shift 124
+	LGO  shift 43
+	LGOTO  shift 45
+	LIF  shift 53
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRETURN  shift 46
+	LSELECT  shift 52
+	LSTRUCT  shift 82
+	LSWITCH  shift 51
+	LTYPE  shift 32
+	LVAR  shift 30
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	';'  reduce 251 (src line 1781)
+	'{'  shift 308
+	'}'  reduce 251 (src line 1781)
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 56
+	stmt  goto 303
+	compound_stmt  goto 304
+	expr  goto 48
+	fnliteral  goto 73
+	for_stmt  goto 35
+	if_stmt  goto 38
+	non_dcl_stmt  goto 306
+	labelname  goto 39
+	name  goto 69
+	new_name  goto 54
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	select_stmt  goto 37
+	simple_stmt  goto 34
+	switch_stmt  goto 36
+	uexpr  goto 55
+	expr_list  goto 49
+	stmt_list  goto 525
+	common_dcl  goto 305
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	lconst  goto 31
+	fnlitdcl  goto 76
+
+state 418
+	othertype:  '[' oexpr ']' ntype.    (191)
+
+	.  reduce 191 (src line 1310)
+
+
+state 419
+	othertype:  '[' LDDD ']' ntype.    (192)
+
+	.  reduce 192 (src line 1315)
+
+
+state 420
+	non_recvchantype:  '(' ntype ')'.    (180)
+
+	.  reduce 180 (src line 1277)
+
+
+state 421
+	dotname:  name '.' sym.    (190)
+
+	.  reduce 190 (src line 1298)
+
+
+state 422
+	othertype:  LMAP '[' ntype ']'.ntype 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	ntype  goto 526
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 423
+	structtype:  LSTRUCT lbrace structdcl_list osemi.'}' 
+
+	'}'  shift 527
+	.  error
+
+
+state 424
+	structdcl_list:  structdcl_list ';'.structdcl 
+	osemi:  ';'.    (287)
+
+	LNAME  shift 325
+	'*'  shift 322
+	'('  shift 321
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 287 (src line 1956)
+
+	sym  goto 119
+	packname  goto 324
+	embed  goto 320
+	new_name  goto 323
+	new_name_list  goto 319
+	structdcl  goto 528
+	hidden_importsym  goto 11
+
+state 425
+	structdcl:  new_name_list ntype.oliteral 
+	oliteral: .    (302)
+
+	LLITERAL  shift 428
+	.  reduce 302 (src line 1997)
+
+	oliteral  goto 529
+
+state 426
+	new_name_list:  new_name_list ','.new_name 
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 119
+	new_name  goto 530
+	hidden_importsym  goto 11
+
+state 427
+	structdcl:  embed oliteral.    (231)
+
+	.  reduce 231 (src line 1651)
+
+
+state 428
+	oliteral:  LLITERAL.    (303)
+
+	.  reduce 303 (src line 2001)
+
+
+state 429
+	structdcl:  '(' embed.')' oliteral 
+
+	')'  shift 531
+	.  error
+
+
+state 430
+	structdcl:  '(' '*'.embed ')' oliteral 
+
+	LNAME  shift 431
+	.  error
+
+	packname  goto 324
+	embed  goto 532
+
+state 431
+	packname:  LNAME.    (236)
+	packname:  LNAME.'.' sym 
+
+	'.'  shift 434
+	.  reduce 236 (src line 1683)
+
+
+state 432
+	structdcl:  '*' embed.oliteral 
+	oliteral: .    (302)
+
+	LLITERAL  shift 428
+	.  reduce 302 (src line 1997)
+
+	oliteral  goto 533
+
+state 433
+	structdcl:  '*' '('.embed ')' oliteral 
+
+	LNAME  shift 431
+	.  error
+
+	packname  goto 324
+	embed  goto 534
+
+state 434
+	packname:  LNAME '.'.sym 
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 535
+	hidden_importsym  goto 11
+
+state 435
+	interfacetype:  LINTERFACE lbrace interfacedcl_list osemi.'}' 
+
+	'}'  shift 536
+	.  error
+
+
+state 436
+	interfacedcl_list:  interfacedcl_list ';'.interfacedcl 
+	osemi:  ';'.    (287)
+
+	LNAME  shift 325
+	'('  shift 331
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 287 (src line 1956)
+
+	sym  goto 119
+	packname  goto 330
+	interfacedcl  goto 537
+	new_name  goto 329
+	hidden_importsym  goto 11
+
+state 437
+	interfacedcl:  new_name indcl.    (239)
+
+	.  reduce 239 (src line 1714)
+
+
+state 438
+	indcl:  '('.oarg_type_list_ocomma ')' fnres 
+	oarg_type_list_ocomma: .    (249)
+
+	LCHAN  shift 78
+	LDDD  shift 250
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 249 (src line 1769)
+
+	sym  goto 247
+	ntype  goto 249
+	arg_type  goto 245
+	dotname  goto 230
+	name  goto 197
+	name_or_type  goto 246
+	oarg_type_list_ocomma  goto 538
+	arg_type_list  goto 244
+	dotdotdot  goto 248
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 439
+	interfacedcl:  '(' packname.')' 
+
+	')'  shift 539
+	.  error
+
+
+state 440
+	import:  LIMPORT '(' import_stmt_list osemi ')'.    (9)
+
+	.  reduce 9 (src line 232)
+
+
+state 441
+	import_stmt_list:  import_stmt_list ';' import_stmt.    (14)
+
+	.  reduce 14 (src line 284)
+
+
+state 442
+	hidden_import:  LIMPORT LNAME LLITERAL ';'.    (304)
+
+	.  reduce 304 (src line 2006)
+
+
+state 443
+	hidden_import:  LVAR hidden_pkg_importsym hidden_type ';'.    (305)
+
+	.  reduce 305 (src line 2011)
+
+
+state 444
+	hidden_type_misc:  '[' ']'.hidden_type 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 540
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 445
+	hidden_type_misc:  '[' LLITERAL.']' hidden_type 
+
+	']'  shift 541
+	.  error
+
+
+state 446
+	hidden_type_misc:  LMAP '['.hidden_type ']' hidden_type 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 542
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 447
+	hidden_type_misc:  LSTRUCT '{'.ohidden_structdcl_list '}' 
+	ohidden_structdcl_list: .    (298)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 298 (src line 1985)
+
+	sym  goto 546
+	hidden_importsym  goto 11
+	hidden_structdcl  goto 545
+	hidden_structdcl_list  goto 544
+	ohidden_structdcl_list  goto 543
+
+state 448
+	hidden_type_misc:  LINTERFACE '{'.ohidden_interfacedcl_list '}' 
+	ohidden_interfacedcl_list: .    (300)
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 552
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 300 (src line 1991)
+
+	sym  goto 550
+	hidden_importsym  goto 553
+	hidden_interfacedcl  goto 549
+	hidden_interfacedcl_list  goto 548
+	ohidden_interfacedcl_list  goto 547
+	hidden_type  goto 551
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 449
+	hidden_type_misc:  '*' hidden_type.    (324)
+
+	.  reduce 324 (src line 2110)
+
+
+state 450
+	hidden_type_misc:  LCHAN hidden_type_non_recv_chan.    (325)
+
+	.  reduce 325 (src line 2114)
+
+
+state 451
+	hidden_type_misc:  LCHAN '('.hidden_type_recv_chan ')' 
+
+	LCOMM  shift 348
+	.  error
+
+	hidden_type_recv_chan  goto 554
+
+state 452
+	hidden_type_misc:  LCHAN LCOMM.hidden_type 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 555
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 453
+	hidden_type_non_recv_chan:  hidden_type_misc.    (315)
+
+	.  reduce 315 (src line 2070)
+
+
+state 454
+	hidden_type_non_recv_chan:  hidden_type_func.    (316)
+
+	.  reduce 316 (src line 2072)
+
+
+state 455
+	hidden_type_recv_chan:  LCOMM LCHAN.hidden_type 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 556
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 456
+	hidden_type_func:  LFUNC '('.ohidden_funarg_list ')' ohidden_funres 
+	ohidden_funarg_list: .    (296)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 296 (src line 1979)
+
+	sym  goto 357
+	hidden_importsym  goto 11
+	hidden_funarg  goto 356
+	hidden_funarg_list  goto 467
+	ohidden_funarg_list  goto 557
+
+state 457
+	hidden_import:  LCONST hidden_pkg_importsym '=' hidden_constant.';' 
+
+	';'  shift 558
+	.  error
+
+
+state 458
+	hidden_constant:  hidden_literal.    (342)
+
+	.  reduce 342 (src line 2257)
+
+
+state 459
+	hidden_constant:  '('.hidden_literal '+' hidden_literal ')' 
+
+	LLITERAL  shift 460
+	LNAME  shift 10
+	'-'  shift 461
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 462
+	hidden_importsym  goto 11
+	hidden_literal  goto 559
+
+state 460
+	hidden_literal:  LLITERAL.    (339)
+
+	.  reduce 339 (src line 2226)
+
+
+state 461
+	hidden_literal:  '-'.LLITERAL 
+
+	LLITERAL  shift 560
+	.  error
+
+
+state 462
+	hidden_literal:  sym.    (341)
+
+	.  reduce 341 (src line 2249)
+
+
+state 463
+	hidden_import:  LCONST hidden_pkg_importsym hidden_type '='.hidden_constant ';' 
+
+	LLITERAL  shift 460
+	LNAME  shift 10
+	'-'  shift 461
+	'('  shift 459
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 462
+	hidden_importsym  goto 11
+	hidden_constant  goto 561
+	hidden_literal  goto 458
+
+state 464
+	hidden_import:  LTYPE hidden_pkgtype hidden_type ';'.    (308)
+
+	.  reduce 308 (src line 2023)
+
+
+state 465
+	hidden_import:  LFUNC hidden_fndcl fnbody ';'.    (309)
+
+	.  reduce 309 (src line 2027)
+
+
+state 466
+	hidden_fndcl:  hidden_pkg_importsym '(' ohidden_funarg_list.')' ohidden_funres 
+
+	')'  shift 562
+	.  error
+
+
+state 467
+	ohidden_funarg_list:  hidden_funarg_list.    (297)
+	hidden_funarg_list:  hidden_funarg_list.',' hidden_funarg 
+
+	','  shift 469
+	.  reduce 297 (src line 1983)
+
+
+state 468
+	hidden_fndcl:  '(' hidden_funarg_list ')'.sym '(' ohidden_funarg_list ')' ohidden_funres 
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 563
+	hidden_importsym  goto 11
+
+state 469
+	hidden_funarg_list:  hidden_funarg_list ','.hidden_funarg 
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 357
+	hidden_importsym  goto 11
+	hidden_funarg  goto 564
+
+state 470
+	hidden_funarg:  sym hidden_type.oliteral 
+	oliteral: .    (302)
+
+	LLITERAL  shift 428
+	.  reduce 302 (src line 1997)
+
+	oliteral  goto 565
+
+state 471
+	hidden_funarg:  sym LDDD.hidden_type oliteral 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 566
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 472
+	common_dcl:  LVAR '(' vardcl_list osemi ')'.    (29)
+
+	.  reduce 29 (src line 372)
+
+
+state 473
+	vardcl_list:  vardcl_list ';' vardcl.    (221)
+
+	.  reduce 221 (src line 1587)
+
+
+state 474
+	vardcl:  dcl_name_list ntype '=' expr_list.    (40)
+	expr_list:  expr_list.',' expr 
+
+	','  shift 155
+	.  reduce 40 (src line 427)
+
+
+state 475
+	ntype:  '(' ntype ')'.    (171)
+
+	.  reduce 171 (src line 1258)
+
+
+state 476
+	common_dcl:  lconst '(' constdcl osemi ')'.    (32)
+
+	.  reduce 32 (src line 386)
+
+
+state 477
+	common_dcl:  lconst '(' constdcl ';' constdcl_list.osemi ')' 
+	constdcl_list:  constdcl_list.';' constdcl1 
+	osemi: .    (286)
+
+	';'  shift 568
+	.  reduce 286 (src line 1955)
+
+	osemi  goto 567
+
+state 478
+	constdcl_list:  constdcl1.    (222)
+
+	.  reduce 222 (src line 1592)
+
+
+state 479
+	constdcl1:  constdcl.    (44)
+
+	.  reduce 44 (src line 446)
+
+
+state 480
+	constdcl:  dcl_name_list.ntype '=' expr_list 
+	constdcl:  dcl_name_list.'=' expr_list 
+	constdcl1:  dcl_name_list.ntype 
+	constdcl1:  dcl_name_list.    (46)
+	dcl_name_list:  dcl_name_list.',' dcl_name 
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'='  shift 236
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	','  shift 225
+	.  reduce 46 (src line 452)
+
+	sym  goto 123
+	ntype  goto 569
+	dotname  goto 230
+	name  goto 197
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 481
+	constdcl:  dcl_name_list ntype '=' expr_list.    (42)
+	expr_list:  expr_list.',' expr 
+
+	','  shift 155
+	.  reduce 42 (src line 436)
+
+
+state 482
+	common_dcl:  LTYPE '(' typedcl_list osemi ')'.    (36)
+
+	.  reduce 36 (src line 407)
+
+
+state 483
+	typedcl_list:  typedcl_list ';' typedcl.    (225)
+
+	.  reduce 225 (src line 1604)
+
+
+state 484
+	fnbody:  '{' stmt_list '}'.    (211)
+
+	.  reduce 211 (src line 1523)
+
+
+state 485
+	name:  sym.    (162)
+	fndcl:  '(' oarg_type_list_ocomma ')' sym.'(' oarg_type_list_ocomma ')' fnres 
+
+	'('  shift 570
+	.  reduce 162 (src line 1220)
+
+
+state 486
+	fntype:  LFUNC '(' oarg_type_list_ocomma ')' fnres.    (209)
+
+	.  reduce 209 (src line 1510)
+
+
+state 487
+	fnres:  fnret_type.    (213)
+
+	.  reduce 213 (src line 1536)
+
+
+state 488
+	fnres:  '('.oarg_type_list_ocomma ')' 
+	oarg_type_list_ocomma: .    (249)
+
+	LCHAN  shift 78
+	LDDD  shift 250
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 249 (src line 1769)
+
+	sym  goto 247
+	ntype  goto 249
+	arg_type  goto 245
+	dotname  goto 230
+	name  goto 197
+	name_or_type  goto 246
+	oarg_type_list_ocomma  goto 571
+	arg_type_list  goto 244
+	dotdotdot  goto 248
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 489
+	fnret_type:  recvchantype.    (184)
+
+	.  reduce 184 (src line 1289)
+
+
+state 490
+	fnret_type:  fntype.    (185)
+
+	.  reduce 185 (src line 1291)
+
+
+state 491
+	fnret_type:  othertype.    (186)
+
+	.  reduce 186 (src line 1292)
+
+
+state 492
+	fnret_type:  ptrtype.    (187)
+
+	.  reduce 187 (src line 1293)
+
+
+state 493
+	fnret_type:  dotname.    (188)
+
+	.  reduce 188 (src line 1294)
+
+
+state 494
+	arg_type_list:  arg_type_list ',' arg_type.    (248)
+
+	.  reduce 248 (src line 1764)
+
+
+state 495
+	fndcl:  sym '(' oarg_type_list_ocomma ')'.fnres 
+	fnres: .    (212)
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 488
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 212 (src line 1531)
+
+	sym  goto 123
+	dotname  goto 493
+	name  goto 197
+	fnres  goto 572
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 492
+	recvchantype  goto 489
+	othertype  goto 491
+	fnret_type  goto 487
+	fntype  goto 490
+	hidden_importsym  goto 11
+
+state 496
+	fntype:  LFUNC '(' oarg_type_list_ocomma ')'.fnres 
+	fnres: .    (212)
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 488
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 212 (src line 1531)
+
+	sym  goto 123
+	dotname  goto 493
+	name  goto 197
+	fnres  goto 486
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 492
+	recvchantype  goto 489
+	othertype  goto 491
+	fnret_type  goto 487
+	fntype  goto 490
+	hidden_importsym  goto 11
+
+state 497
+	loop_body:  LBODY $$65.stmt_list '}' 
+	stmt: .    (251)
+
+	error  shift 307
+	LLITERAL  shift 68
+	LBREAK  shift 41
+	LCHAN  shift 78
+	LCONST  shift 47
+	LCONTINUE  shift 42
+	LDEFER  shift 44
+	LFALL  shift 40
+	LFOR  shift 50
+	LFUNC  shift 124
+	LGO  shift 43
+	LGOTO  shift 45
+	LIF  shift 53
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRETURN  shift 46
+	LSELECT  shift 52
+	LSTRUCT  shift 82
+	LSWITCH  shift 51
+	LTYPE  shift 32
+	LVAR  shift 30
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	';'  reduce 251 (src line 1781)
+	'{'  shift 308
+	'}'  reduce 251 (src line 1781)
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 56
+	stmt  goto 303
+	compound_stmt  goto 304
+	expr  goto 48
+	fnliteral  goto 73
+	for_stmt  goto 35
+	if_stmt  goto 38
+	non_dcl_stmt  goto 306
+	labelname  goto 39
+	name  goto 69
+	new_name  goto 54
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	select_stmt  goto 37
+	simple_stmt  goto 34
+	switch_stmt  goto 36
+	uexpr  goto 55
+	expr_list  goto 49
+	stmt_list  goto 573
+	common_dcl  goto 305
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	lconst  goto 31
+	fnlitdcl  goto 76
+
+state 498
+	for_header:  osimple_stmt ';' osimple_stmt.';' osimple_stmt 
+
+	';'  shift 574
+	.  error
+
+
+state 499
+	range_stmt:  expr_list '=' LRANGE.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 575
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 500
+	range_stmt:  expr_list LCOLAS LRANGE.expr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 576
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 501
+	switch_stmt:  LSWITCH $$88 if_header $$89 LBODY.caseblock_list '}' 
+	caseblock_list: .    (63)
+
+	.  reduce 63 (src line 652)
+
+	caseblock_list  goto 577
+
+state 502
+	if_header:  osimple_stmt ';' osimple_stmt.    (77)
+
+	.  reduce 77 (src line 739)
+
+
+state 503
+	caseblock_list:  caseblock_list caseblock.    (64)
+
+	.  reduce 64 (src line 656)
+
+
+state 504
+	select_stmt:  LSELECT $$91 LBODY caseblock_list '}'.    (92)
+
+	.  reduce 92 (src line 844)
+
+
+state 505
+	caseblock:  case.$$61 stmt_list 
+	$$61: .    (61)
+
+	.  reduce 61 (src line 621)
+
+	$$61  goto 578
+
+state 506
+	case:  LCASE.expr_or_type_list ':' 
+	case:  LCASE.expr_or_type_list '=' expr ':' 
+	case:  LCASE.expr_or_type_list LCOLAS expr ':' 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 179
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 178
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 173
+	expr_or_type  goto 291
+	fnliteral  goto 73
+	name  goto 69
+	non_expr_type  goto 174
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	expr_or_type_list  goto 579
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	recvchantype  goto 175
+	othertype  goto 177
+	fntype  goto 176
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 507
+	case:  LDEFAULT.':' 
+
+	':'  shift 580
+	.  error
+
+
+state 508
+	if_stmt:  LIF $$78 if_header $$79 loop_body.$$80 elseif_list else 
+	$$80: .    (80)
+
+	.  reduce 80 (src line 761)
+
+	$$80  goto 581
+
+state 509
+	pseudocall:  pexpr '(' expr_or_type_list ocomma ')'.    (124)
+
+	.  reduce 124 (src line 991)
+
+
+state 510
+	pseudocall:  pexpr '(' expr_or_type_list LDDD ocomma.')' 
+
+	')'  shift 582
+	.  error
+
+
+state 511
+	expr_or_type_list:  expr_or_type_list ',' expr_or_type.    (279)
+
+	.  reduce 279 (src line 1917)
+
+
+state 512
+	pexpr_no_paren:  pexpr '.' '(' expr_or_type ')'.    (129)
+
+	.  reduce 129 (src line 1020)
+
+
+state 513
+	pexpr_no_paren:  pexpr '.' '(' LTYPE ')'.    (130)
+
+	.  reduce 130 (src line 1024)
+
+
+state 514
+	pexpr_no_paren:  pexpr '[' oexpr ':' oexpr.']' 
+	pexpr_no_paren:  pexpr '[' oexpr ':' oexpr.':' oexpr ']' 
+
+	':'  shift 584
+	']'  shift 583
+	.  error
+
+
+state 515
+	pexpr_no_paren:  pexpr_no_paren '{' start_complit braced_keyval_list '}'.    (137)
+
+	.  reduce 137 (src line 1060)
+
+
+state 516
+	keyval_list:  keyval_list ','.keyval 
+	keyval_list:  keyval_list ','.bare_complitexpr 
+	ocomma:  ','.    (289)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'{'  shift 403
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 289 (src line 1959)
+
+	sym  goto 123
+	expr  goto 402
+	bare_complitexpr  goto 586
+	fnliteral  goto 73
+	keyval  goto 585
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 517
+	braced_keyval_list:  keyval_list ocomma.    (285)
+
+	.  reduce 285 (src line 1947)
+
+
+state 518
+	keyval:  expr ':'.complitexpr 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'{'  shift 589
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 588
+	complitexpr  goto 587
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 519
+	bare_complitexpr:  '{' start_complit.braced_keyval_list '}' 
+	braced_keyval_list: .    (284)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'{'  shift 403
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 284 (src line 1943)
+
+	sym  goto 123
+	expr  goto 402
+	bare_complitexpr  goto 401
+	fnliteral  goto 73
+	keyval  goto 400
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	keyval_list  goto 399
+	braced_keyval_list  goto 590
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 520
+	pexpr_no_paren:  '(' expr_or_type ')' '{' start_complit.braced_keyval_list '}' 
+	braced_keyval_list: .    (284)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'{'  shift 403
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 284 (src line 1943)
+
+	sym  goto 123
+	expr  goto 402
+	bare_complitexpr  goto 401
+	fnliteral  goto 73
+	keyval  goto 400
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	keyval_list  goto 399
+	braced_keyval_list  goto 591
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 521
+	ntype:  '(' ntype.')' 
+	non_recvchantype:  '(' ntype.')' 
+
+	')'  shift 592
+	.  error
+
+
+state 522
+	pexpr_no_paren:  convtype '(' expr ocomma ')'.    (135)
+
+	.  reduce 135 (src line 1047)
+
+
+state 523
+	pexpr_no_paren:  comptype lbrace start_complit braced_keyval_list '}'.    (136)
+
+	.  reduce 136 (src line 1053)
+
+
+state 524
+	stmt_list:  stmt_list ';' stmt.    (271)
+
+	.  reduce 271 (src line 1874)
+
+
+state 525
+	compound_stmt:  '{' $$59 stmt_list.'}' 
+	stmt_list:  stmt_list.';' stmt 
+
+	';'  shift 416
+	'}'  shift 593
+	.  error
+
+
+state 526
+	othertype:  LMAP '[' ntype ']' ntype.    (195)
+
+	.  reduce 195 (src line 1330)
+
+
+state 527
+	structtype:  LSTRUCT lbrace structdcl_list osemi '}'.    (200)
+
+	.  reduce 200 (src line 1350)
+
+
+state 528
+	structdcl_list:  structdcl_list ';' structdcl.    (227)
+
+	.  reduce 227 (src line 1611)
+
+
+state 529
+	structdcl:  new_name_list ntype oliteral.    (230)
+
+	.  reduce 230 (src line 1626)
+
+
+state 530
+	new_name_list:  new_name_list ',' new_name.    (273)
+
+	.  reduce 273 (src line 1887)
+
+
+state 531
+	structdcl:  '(' embed ')'.oliteral 
+	oliteral: .    (302)
+
+	LLITERAL  shift 428
+	.  reduce 302 (src line 1997)
+
+	oliteral  goto 594
+
+state 532
+	structdcl:  '(' '*' embed.')' oliteral 
+
+	')'  shift 595
+	.  error
+
+
+state 533
+	structdcl:  '*' embed oliteral.    (233)
+
+	.  reduce 233 (src line 1662)
+
+
+state 534
+	structdcl:  '*' '(' embed.')' oliteral 
+
+	')'  shift 596
+	.  error
+
+
+state 535
+	packname:  LNAME '.' sym.    (237)
+
+	.  reduce 237 (src line 1694)
+
+
+state 536
+	interfacetype:  LINTERFACE lbrace interfacedcl_list osemi '}'.    (202)
+
+	.  reduce 202 (src line 1363)
+
+
+state 537
+	interfacedcl_list:  interfacedcl_list ';' interfacedcl.    (229)
+
+	.  reduce 229 (src line 1621)
+
+
+state 538
+	indcl:  '(' oarg_type_list_ocomma.')' fnres 
+
+	')'  shift 597
+	.  error
+
+
+state 539
+	interfacedcl:  '(' packname ')'.    (241)
+
+	.  reduce 241 (src line 1724)
+
+
+state 540
+	hidden_type_misc:  '[' ']' hidden_type.    (319)
+
+	.  reduce 319 (src line 2090)
+
+
+state 541
+	hidden_type_misc:  '[' LLITERAL ']'.hidden_type 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 598
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 542
+	hidden_type_misc:  LMAP '[' hidden_type.']' hidden_type 
+
+	']'  shift 599
+	.  error
+
+
+state 543
+	hidden_type_misc:  LSTRUCT '{' ohidden_structdcl_list.'}' 
+
+	'}'  shift 600
+	.  error
+
+
+state 544
+	ohidden_structdcl_list:  hidden_structdcl_list.    (299)
+	hidden_structdcl_list:  hidden_structdcl_list.';' hidden_structdcl 
+
+	';'  shift 601
+	.  reduce 299 (src line 1989)
+
+
+state 545
+	hidden_structdcl_list:  hidden_structdcl.    (348)
+
+	.  reduce 348 (src line 2284)
+
+
+state 546
+	hidden_structdcl:  sym.hidden_type oliteral 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 602
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 547
+	hidden_type_misc:  LINTERFACE '{' ohidden_interfacedcl_list.'}' 
+
+	'}'  shift 603
+	.  error
+
+
+state 548
+	ohidden_interfacedcl_list:  hidden_interfacedcl_list.    (301)
+	hidden_interfacedcl_list:  hidden_interfacedcl_list.';' hidden_interfacedcl 
+
+	';'  shift 604
+	.  reduce 301 (src line 1995)
+
+
+state 549
+	hidden_interfacedcl_list:  hidden_interfacedcl.    (350)
+
+	.  reduce 350 (src line 2294)
+
+
+state 550
+	hidden_interfacedcl:  sym.'(' ohidden_funarg_list ')' ohidden_funres 
+
+	'('  shift 605
+	.  error
+
+
+state 551
+	hidden_interfacedcl:  hidden_type.    (334)
+
+	.  reduce 334 (src line 2201)
+
+
+state 552
+	sym:  LNAME.    (157)
+	hidden_type_misc:  LNAME.    (318)
+
+	'('  reduce 157 (src line 1175)
+	.  reduce 318 (src line 2079)
+
+
+state 553
+	sym:  hidden_importsym.    (158)
+	hidden_type_misc:  hidden_importsym.    (317)
+
+	'('  reduce 158 (src line 1184)
+	.  reduce 317 (src line 2074)
+
+
+state 554
+	hidden_type_misc:  LCHAN '(' hidden_type_recv_chan.')' 
+
+	')'  shift 606
+	.  error
+
+
+state 555
+	hidden_type_misc:  LCHAN LCOMM hidden_type.    (327)
+
+	.  reduce 327 (src line 2126)
+
+
+state 556
+	hidden_type_recv_chan:  LCOMM LCHAN hidden_type.    (328)
+
+	.  reduce 328 (src line 2133)
+
+
+state 557
+	hidden_type_func:  LFUNC '(' ohidden_funarg_list.')' ohidden_funres 
+
+	')'  shift 607
+	.  error
+
+
+state 558
+	hidden_import:  LCONST hidden_pkg_importsym '=' hidden_constant ';'.    (306)
+
+	.  reduce 306 (src line 2015)
+
+
+state 559
+	hidden_constant:  '(' hidden_literal.'+' hidden_literal ')' 
+
+	'+'  shift 608
+	.  error
+
+
+state 560
+	hidden_literal:  '-' LLITERAL.    (340)
+
+	.  reduce 340 (src line 2231)
+
+
+state 561
+	hidden_import:  LCONST hidden_pkg_importsym hidden_type '=' hidden_constant.';' 
+
+	';'  shift 609
+	.  error
+
+
+state 562
+	hidden_fndcl:  hidden_pkg_importsym '(' ohidden_funarg_list ')'.ohidden_funres 
+	ohidden_funres: .    (335)
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'('  shift 612
+	'['  shift 342
+	'@'  shift 13
+	.  reduce 335 (src line 2206)
+
+	hidden_importsym  goto 340
+	hidden_funres  goto 611
+	ohidden_funres  goto 610
+	hidden_type  goto 613
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 563
+	hidden_fndcl:  '(' hidden_funarg_list ')' sym.'(' ohidden_funarg_list ')' ohidden_funres 
+
+	'('  shift 614
+	.  error
+
+
+state 564
+	hidden_funarg_list:  hidden_funarg_list ',' hidden_funarg.    (347)
+
+	.  reduce 347 (src line 2279)
+
+
+state 565
+	hidden_funarg:  sym hidden_type oliteral.    (330)
+
+	.  reduce 330 (src line 2147)
+
+
+state 566
+	hidden_funarg:  sym LDDD hidden_type.oliteral 
+	oliteral: .    (302)
+
+	LLITERAL  shift 428
+	.  reduce 302 (src line 1997)
+
+	oliteral  goto 615
+
+state 567
+	common_dcl:  lconst '(' constdcl ';' constdcl_list osemi.')' 
+
+	')'  shift 616
+	.  error
+
+
+state 568
+	constdcl_list:  constdcl_list ';'.constdcl1 
+	osemi:  ';'.    (287)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 287 (src line 1956)
+
+	sym  goto 105
+	dcl_name  goto 104
+	dcl_name_list  goto 480
+	constdcl  goto 479
+	constdcl1  goto 617
+	hidden_importsym  goto 11
+
+state 569
+	constdcl:  dcl_name_list ntype.'=' expr_list 
+	constdcl1:  dcl_name_list ntype.    (45)
+
+	'='  shift 367
+	.  reduce 45 (src line 448)
+
+
+state 570
+	fndcl:  '(' oarg_type_list_ocomma ')' sym '('.oarg_type_list_ocomma ')' fnres 
+	oarg_type_list_ocomma: .    (249)
+
+	LCHAN  shift 78
+	LDDD  shift 250
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 231
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 249 (src line 1769)
+
+	sym  goto 247
+	ntype  goto 249
+	arg_type  goto 245
+	dotname  goto 230
+	name  goto 197
+	name_or_type  goto 246
+	oarg_type_list_ocomma  goto 618
+	arg_type_list  goto 244
+	dotdotdot  goto 248
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 229
+	recvchantype  goto 226
+	othertype  goto 228
+	fntype  goto 227
+	hidden_importsym  goto 11
+
+state 571
+	fnres:  '(' oarg_type_list_ocomma.')' 
+
+	')'  shift 619
+	.  error
+
+
+state 572
+	fndcl:  sym '(' oarg_type_list_ocomma ')' fnres.    (205)
+
+	.  reduce 205 (src line 1398)
+
+
+state 573
+	loop_body:  LBODY $$65 stmt_list.'}' 
+	stmt_list:  stmt_list.';' stmt 
+
+	';'  shift 416
+	'}'  shift 620
+	.  error
+
+
+state 574
+	for_header:  osimple_stmt ';' osimple_stmt ';'.osimple_stmt 
+	osimple_stmt: .    (294)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 294 (src line 1973)
+
+	sym  goto 123
+	expr  goto 48
+	fnliteral  goto 73
+	name  goto 69
+	osimple_stmt  goto 621
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	simple_stmt  goto 282
+	uexpr  goto 55
+	expr_list  goto 49
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 575
+	range_stmt:  expr_list '=' LRANGE expr.    (67)
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 67 (src line 672)
+
+
+state 576
+	range_stmt:  expr_list LCOLAS LRANGE expr.    (68)
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 68 (src line 679)
+
+
+state 577
+	caseblock_list:  caseblock_list.caseblock 
+	switch_stmt:  LSWITCH $$88 if_header $$89 LBODY caseblock_list.'}' 
+
+	LCASE  shift 506
+	LDEFAULT  shift 507
+	'}'  shift 622
+	.  error
+
+	case  goto 505
+	caseblock  goto 503
+
+state 578
+	caseblock:  case $$61.stmt_list 
+	stmt: .    (251)
+
+	error  shift 307
+	LLITERAL  shift 68
+	LBREAK  shift 41
+	LCASE  reduce 251 (src line 1781)
+	LCHAN  shift 78
+	LCONST  shift 47
+	LCONTINUE  shift 42
+	LDEFAULT  reduce 251 (src line 1781)
+	LDEFER  shift 44
+	LFALL  shift 40
+	LFOR  shift 50
+	LFUNC  shift 124
+	LGO  shift 43
+	LGOTO  shift 45
+	LIF  shift 53
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LRETURN  shift 46
+	LSELECT  shift 52
+	LSTRUCT  shift 82
+	LSWITCH  shift 51
+	LTYPE  shift 32
+	LVAR  shift 30
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	';'  reduce 251 (src line 1781)
+	'{'  shift 308
+	'}'  reduce 251 (src line 1781)
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 56
+	stmt  goto 303
+	compound_stmt  goto 304
+	expr  goto 48
+	fnliteral  goto 73
+	for_stmt  goto 35
+	if_stmt  goto 38
+	non_dcl_stmt  goto 306
+	labelname  goto 39
+	name  goto 69
+	new_name  goto 54
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	select_stmt  goto 37
+	simple_stmt  goto 34
+	switch_stmt  goto 36
+	uexpr  goto 55
+	expr_list  goto 49
+	stmt_list  goto 623
+	common_dcl  goto 305
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	lconst  goto 31
+	fnlitdcl  goto 76
+
+state 579
+	case:  LCASE expr_or_type_list.':' 
+	case:  LCASE expr_or_type_list.'=' expr ':' 
+	case:  LCASE expr_or_type_list.LCOLAS expr ':' 
+	expr_or_type_list:  expr_or_type_list.',' expr_or_type 
+
+	LCOLAS  shift 626
+	'='  shift 625
+	':'  shift 624
+	','  shift 627
+	.  error
+
+
+state 580
+	case:  LDEFAULT ':'.    (58)
+
+	.  reduce 58 (src line 586)
+
+
+state 581
+	if_stmt:  LIF $$78 if_header $$79 loop_body $$80.elseif_list else 
+	elseif_list: .    (84)
+
+	.  reduce 84 (src line 796)
+
+	elseif_list  goto 628
+
+state 582
+	pseudocall:  pexpr '(' expr_or_type_list LDDD ocomma ')'.    (125)
+
+	.  reduce 125 (src line 996)
+
+
+state 583
+	pexpr_no_paren:  pexpr '[' oexpr ':' oexpr ']'.    (132)
+
+	.  reduce 132 (src line 1032)
+
+
+state 584
+	pexpr_no_paren:  pexpr '[' oexpr ':' oexpr ':'.oexpr ']' 
+	oexpr: .    (290)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 290 (src line 1961)
+
+	sym  goto 123
+	expr  goto 188
+	fnliteral  goto 73
+	name  goto 69
+	oexpr  goto 629
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 585
+	keyval_list:  keyval_list ',' keyval.    (282)
+
+	.  reduce 282 (src line 1934)
+
+
+state 586
+	keyval_list:  keyval_list ',' bare_complitexpr.    (283)
+
+	.  reduce 283 (src line 1938)
+
+
+state 587
+	keyval:  expr ':' complitexpr.    (141)
+
+	.  reduce 141 (src line 1082)
+
+
+state 588
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+	complitexpr:  expr.    (144)
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	.  reduce 144 (src line 1108)
+
+
+state 589
+	complitexpr:  '{'.start_complit braced_keyval_list '}' 
+	start_complit: .    (140)
+
+	.  reduce 140 (src line 1075)
+
+	start_complit  goto 630
+
+state 590
+	bare_complitexpr:  '{' start_complit braced_keyval_list.'}' 
+
+	'}'  shift 631
+	.  error
+
+
+state 591
+	pexpr_no_paren:  '(' expr_or_type ')' '{' start_complit braced_keyval_list.'}' 
+
+	'}'  shift 632
+	.  error
+
+
+state 592
+	ntype:  '(' ntype ')'.    (171)
+	non_recvchantype:  '(' ntype ')'.    (180)
+
+	LBODY  reduce 180 (src line 1277)
+	'('  reduce 180 (src line 1277)
+	'{'  reduce 180 (src line 1277)
+	.  reduce 171 (src line 1258)
+
+
+state 593
+	compound_stmt:  '{' $$59 stmt_list '}'.    (60)
+
+	.  reduce 60 (src line 611)
+
+
+state 594
+	structdcl:  '(' embed ')' oliteral.    (232)
+
+	.  reduce 232 (src line 1656)
+
+
+state 595
+	structdcl:  '(' '*' embed ')'.oliteral 
+	oliteral: .    (302)
+
+	LLITERAL  shift 428
+	.  reduce 302 (src line 1997)
+
+	oliteral  goto 633
+
+state 596
+	structdcl:  '*' '(' embed ')'.oliteral 
+	oliteral: .    (302)
+
+	LLITERAL  shift 428
+	.  reduce 302 (src line 1997)
+
+	oliteral  goto 634
+
+state 597
+	indcl:  '(' oarg_type_list_ocomma ')'.fnres 
+	fnres: .    (212)
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 488
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 212 (src line 1531)
+
+	sym  goto 123
+	dotname  goto 493
+	name  goto 197
+	fnres  goto 635
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 492
+	recvchantype  goto 489
+	othertype  goto 491
+	fnret_type  goto 487
+	fntype  goto 490
+	hidden_importsym  goto 11
+
+state 598
+	hidden_type_misc:  '[' LLITERAL ']' hidden_type.    (320)
+
+	.  reduce 320 (src line 2094)
+
+
+state 599
+	hidden_type_misc:  LMAP '[' hidden_type ']'.hidden_type 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'@'  shift 13
+	.  error
+
+	hidden_importsym  goto 340
+	hidden_type  goto 636
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 600
+	hidden_type_misc:  LSTRUCT '{' ohidden_structdcl_list '}'.    (322)
+
+	.  reduce 322 (src line 2102)
+
+
+state 601
+	hidden_structdcl_list:  hidden_structdcl_list ';'.hidden_structdcl 
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 546
+	hidden_importsym  goto 11
+	hidden_structdcl  goto 637
+
+state 602
+	hidden_structdcl:  sym hidden_type.oliteral 
+	oliteral: .    (302)
+
+	LLITERAL  shift 428
+	.  reduce 302 (src line 1997)
+
+	oliteral  goto 638
+
+state 603
+	hidden_type_misc:  LINTERFACE '{' ohidden_interfacedcl_list '}'.    (323)
+
+	.  reduce 323 (src line 2106)
+
+
+state 604
+	hidden_interfacedcl_list:  hidden_interfacedcl_list ';'.hidden_interfacedcl 
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 552
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'['  shift 342
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 550
+	hidden_importsym  goto 553
+	hidden_interfacedcl  goto 639
+	hidden_type  goto 551
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 605
+	hidden_interfacedcl:  sym '('.ohidden_funarg_list ')' ohidden_funres 
+	ohidden_funarg_list: .    (296)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 296 (src line 1979)
+
+	sym  goto 357
+	hidden_importsym  goto 11
+	hidden_funarg  goto 356
+	hidden_funarg_list  goto 467
+	ohidden_funarg_list  goto 640
+
+state 606
+	hidden_type_misc:  LCHAN '(' hidden_type_recv_chan ')'.    (326)
+
+	.  reduce 326 (src line 2120)
+
+
+state 607
+	hidden_type_func:  LFUNC '(' ohidden_funarg_list ')'.ohidden_funres 
+	ohidden_funres: .    (335)
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'('  shift 612
+	'['  shift 342
+	'@'  shift 13
+	.  reduce 335 (src line 2206)
+
+	hidden_importsym  goto 340
+	hidden_funres  goto 611
+	ohidden_funres  goto 641
+	hidden_type  goto 613
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 608
+	hidden_constant:  '(' hidden_literal '+'.hidden_literal ')' 
+
+	LLITERAL  shift 460
+	LNAME  shift 10
+	'-'  shift 461
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 462
+	hidden_importsym  goto 11
+	hidden_literal  goto 642
+
+state 609
+	hidden_import:  LCONST hidden_pkg_importsym hidden_type '=' hidden_constant ';'.    (307)
+
+	.  reduce 307 (src line 2019)
+
+
+state 610
+	hidden_fndcl:  hidden_pkg_importsym '(' ohidden_funarg_list ')' ohidden_funres.    (207)
+
+	.  reduce 207 (src line 1467)
+
+
+state 611
+	ohidden_funres:  hidden_funres.    (336)
+
+	.  reduce 336 (src line 2210)
+
+
+state 612
+	hidden_funres:  '('.ohidden_funarg_list ')' 
+	ohidden_funarg_list: .    (296)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 296 (src line 1979)
+
+	sym  goto 357
+	hidden_importsym  goto 11
+	hidden_funarg  goto 356
+	hidden_funarg_list  goto 467
+	ohidden_funarg_list  goto 643
+
+state 613
+	hidden_funres:  hidden_type.    (338)
+
+	.  reduce 338 (src line 2217)
+
+
+state 614
+	hidden_fndcl:  '(' hidden_funarg_list ')' sym '('.ohidden_funarg_list ')' ohidden_funres 
+	ohidden_funarg_list: .    (296)
+
+	LNAME  shift 10
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 296 (src line 1979)
+
+	sym  goto 357
+	hidden_importsym  goto 11
+	hidden_funarg  goto 356
+	hidden_funarg_list  goto 467
+	ohidden_funarg_list  goto 644
+
+state 615
+	hidden_funarg:  sym LDDD hidden_type oliteral.    (331)
+
+	.  reduce 331 (src line 2156)
+
+
+state 616
+	common_dcl:  lconst '(' constdcl ';' constdcl_list osemi ')'.    (33)
+
+	.  reduce 33 (src line 392)
+
+
+state 617
+	constdcl_list:  constdcl_list ';' constdcl1.    (223)
+
+	.  reduce 223 (src line 1594)
+
+
+state 618
+	fndcl:  '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma.')' fnres 
+
+	')'  shift 645
+	.  error
+
+
+state 619
+	fnres:  '(' oarg_type_list_ocomma ')'.    (214)
+
+	.  reduce 214 (src line 1540)
+
+
+state 620
+	loop_body:  LBODY $$65 stmt_list '}'.    (66)
+
+	.  reduce 66 (src line 666)
+
+
+state 621
+	for_header:  osimple_stmt ';' osimple_stmt ';' osimple_stmt.    (70)
+
+	.  reduce 70 (src line 692)
+
+
+state 622
+	switch_stmt:  LSWITCH $$88 if_header $$89 LBODY caseblock_list '}'.    (90)
+
+	.  reduce 90 (src line 830)
+
+
+state 623
+	caseblock:  case $$61 stmt_list.    (62)
+	stmt_list:  stmt_list.';' stmt 
+
+	';'  shift 416
+	.  reduce 62 (src line 633)
+
+
+state 624
+	case:  LCASE expr_or_type_list ':'.    (55)
+
+	.  reduce 55 (src line 535)
+
+
+state 625
+	case:  LCASE expr_or_type_list '='.expr ':' 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 646
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 626
+	case:  LCASE expr_or_type_list LCOLAS.expr ':' 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 647
+	fnliteral  goto 73
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 627
+	expr_or_type_list:  expr_or_type_list ','.expr_or_type 
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 179
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 178
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  error
+
+	sym  goto 123
+	expr  goto 173
+	expr_or_type  goto 511
+	fnliteral  goto 73
+	name  goto 69
+	non_expr_type  goto 174
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	recvchantype  goto 175
+	othertype  goto 177
+	fntype  goto 176
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 628
+	if_stmt:  LIF $$78 if_header $$79 loop_body $$80 elseif_list.else 
+	elseif_list:  elseif_list.elseif 
+	else: .    (86)
+
+	LELSE  shift 650
+	.  reduce 86 (src line 805)
+
+	elseif  goto 649
+	else  goto 648
+
+state 629
+	pexpr_no_paren:  pexpr '[' oexpr ':' oexpr ':' oexpr.']' 
+
+	']'  shift 651
+	.  error
+
+
+state 630
+	complitexpr:  '{' start_complit.braced_keyval_list '}' 
+	braced_keyval_list: .    (284)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'{'  shift 403
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 284 (src line 1943)
+
+	sym  goto 123
+	expr  goto 402
+	bare_complitexpr  goto 401
+	fnliteral  goto 73
+	keyval  goto 400
+	name  goto 69
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	uexpr  goto 55
+	keyval_list  goto 399
+	braced_keyval_list  goto 652
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 631
+	bare_complitexpr:  '{' start_complit braced_keyval_list '}'.    (143)
+
+	.  reduce 143 (src line 1102)
+
+
+state 632
+	pexpr_no_paren:  '(' expr_or_type ')' '{' start_complit braced_keyval_list '}'.    (138)
+
+	.  reduce 138 (src line 1066)
+
+
+state 633
+	structdcl:  '(' '*' embed ')' oliteral.    (234)
+
+	.  reduce 234 (src line 1668)
+
+
+state 634
+	structdcl:  '*' '(' embed ')' oliteral.    (235)
+
+	.  reduce 235 (src line 1675)
+
+
+state 635
+	indcl:  '(' oarg_type_list_ocomma ')' fnres.    (242)
+
+	.  reduce 242 (src line 1730)
+
+
+state 636
+	hidden_type_misc:  LMAP '[' hidden_type ']' hidden_type.    (321)
+
+	.  reduce 321 (src line 2098)
+
+
+state 637
+	hidden_structdcl_list:  hidden_structdcl_list ';' hidden_structdcl.    (349)
+
+	.  reduce 349 (src line 2289)
+
+
+state 638
+	hidden_structdcl:  sym hidden_type oliteral.    (332)
+
+	.  reduce 332 (src line 2172)
+
+
+state 639
+	hidden_interfacedcl_list:  hidden_interfacedcl_list ';' hidden_interfacedcl.    (351)
+
+	.  reduce 351 (src line 2299)
+
+
+state 640
+	hidden_interfacedcl:  sym '(' ohidden_funarg_list.')' ohidden_funres 
+
+	')'  shift 653
+	.  error
+
+
+state 641
+	hidden_type_func:  LFUNC '(' ohidden_funarg_list ')' ohidden_funres.    (329)
+
+	.  reduce 329 (src line 2141)
+
+
+state 642
+	hidden_constant:  '(' hidden_literal '+' hidden_literal.')' 
+
+	')'  shift 654
+	.  error
+
+
+state 643
+	hidden_funres:  '(' ohidden_funarg_list.')' 
+
+	')'  shift 655
+	.  error
+
+
+state 644
+	hidden_fndcl:  '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list.')' ohidden_funres 
+
+	')'  shift 656
+	.  error
+
+
+state 645
+	fndcl:  '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')'.fnres 
+	fnres: .    (212)
+
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 232
+	'*'  shift 196
+	'('  shift 488
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 212 (src line 1531)
+
+	sym  goto 123
+	dotname  goto 493
+	name  goto 197
+	fnres  goto 657
+	interfacetype  goto 81
+	structtype  goto 80
+	ptrtype  goto 492
+	recvchantype  goto 489
+	othertype  goto 491
+	fnret_type  goto 487
+	fntype  goto 490
+	hidden_importsym  goto 11
+
+state 646
+	case:  LCASE expr_or_type_list '=' expr.':' 
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	':'  shift 658
+	.  error
+
+
+state 647
+	case:  LCASE expr_or_type_list LCOLAS expr.':' 
+	expr:  expr.LOROR expr 
+	expr:  expr.LANDAND expr 
+	expr:  expr.LEQ expr 
+	expr:  expr.LNE expr 
+	expr:  expr.LLT expr 
+	expr:  expr.LLE expr 
+	expr:  expr.LGE expr 
+	expr:  expr.LGT expr 
+	expr:  expr.'+' expr 
+	expr:  expr.'-' expr 
+	expr:  expr.'|' expr 
+	expr:  expr.'^' expr 
+	expr:  expr.'*' expr 
+	expr:  expr.'/' expr 
+	expr:  expr.'%' expr 
+	expr:  expr.'&' expr 
+	expr:  expr.LANDNOT expr 
+	expr:  expr.LLSH expr 
+	expr:  expr.LRSH expr 
+	expr:  expr.LCOMM expr 
+
+	LANDAND  shift 134
+	LANDNOT  shift 149
+	LCOMM  shift 152
+	LEQ  shift 135
+	LGE  shift 139
+	LGT  shift 140
+	LLE  shift 138
+	LLSH  shift 150
+	LLT  shift 137
+	LNE  shift 136
+	LOROR  shift 133
+	LRSH  shift 151
+	'+'  shift 141
+	'-'  shift 142
+	'|'  shift 143
+	'^'  shift 144
+	'*'  shift 145
+	'/'  shift 146
+	'%'  shift 147
+	'&'  shift 148
+	':'  shift 659
+	.  error
+
+
+state 648
+	if_stmt:  LIF $$78 if_header $$79 loop_body $$80 elseif_list else.    (81)
+
+	.  reduce 81 (src line 765)
+
+
+state 649
+	elseif_list:  elseif_list elseif.    (85)
+
+	.  reduce 85 (src line 800)
+
+
+state 650
+	elseif:  LELSE.LIF $$82 if_header loop_body 
+	else:  LELSE.compound_stmt 
+
+	LIF  shift 660
+	'{'  shift 308
+	.  error
+
+	compound_stmt  goto 661
+
+state 651
+	pexpr_no_paren:  pexpr '[' oexpr ':' oexpr ':' oexpr ']'.    (133)
+
+	.  reduce 133 (src line 1036)
+
+
+state 652
+	complitexpr:  '{' start_complit braced_keyval_list.'}' 
+
+	'}'  shift 662
+	.  error
+
+
+state 653
+	hidden_interfacedcl:  sym '(' ohidden_funarg_list ')'.ohidden_funres 
+	ohidden_funres: .    (335)
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'('  shift 612
+	'['  shift 342
+	'@'  shift 13
+	.  reduce 335 (src line 2206)
+
+	hidden_importsym  goto 340
+	hidden_funres  goto 611
+	ohidden_funres  goto 663
+	hidden_type  goto 613
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 654
+	hidden_constant:  '(' hidden_literal '+' hidden_literal ')'.    (343)
+
+	.  reduce 343 (src line 2259)
+
+
+state 655
+	hidden_funres:  '(' ohidden_funarg_list ')'.    (337)
+
+	.  reduce 337 (src line 2212)
+
+
+state 656
+	hidden_fndcl:  '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list ')'.ohidden_funres 
+	ohidden_funres: .    (335)
+
+	LCHAN  shift 347
+	LFUNC  shift 349
+	LINTERFACE  shift 345
+	LMAP  shift 343
+	LNAME  shift 341
+	LSTRUCT  shift 344
+	LCOMM  shift 348
+	'*'  shift 346
+	'('  shift 612
+	'['  shift 342
+	'@'  shift 13
+	.  reduce 335 (src line 2206)
+
+	hidden_importsym  goto 340
+	hidden_funres  goto 611
+	ohidden_funres  goto 664
+	hidden_type  goto 613
+	hidden_type_misc  goto 337
+	hidden_type_func  goto 339
+	hidden_type_recv_chan  goto 338
+
+state 657
+	fndcl:  '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')' fnres.    (206)
+
+	.  reduce 206 (src line 1430)
+
+
+state 658
+	case:  LCASE expr_or_type_list '=' expr ':'.    (56)
+
+	.  reduce 56 (src line 559)
+
+
+state 659
+	case:  LCASE expr_or_type_list LCOLAS expr ':'.    (57)
+
+	.  reduce 57 (src line 577)
+
+
+state 660
+	elseif:  LELSE LIF.$$82 if_header loop_body 
+	$$82: .    (82)
+
+	.  reduce 82 (src line 782)
+
+	$$82  goto 665
+
+state 661
+	else:  LELSE compound_stmt.    (87)
+
+	.  reduce 87 (src line 809)
+
+
+state 662
+	complitexpr:  '{' start_complit braced_keyval_list '}'.    (145)
+
+	.  reduce 145 (src line 1110)
+
+
+state 663
+	hidden_interfacedcl:  sym '(' ohidden_funarg_list ')' ohidden_funres.    (333)
+
+	.  reduce 333 (src line 2196)
+
+
+state 664
+	hidden_fndcl:  '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list ')' ohidden_funres.    (208)
+
+	.  reduce 208 (src line 1493)
+
+
+state 665
+	elseif:  LELSE LIF $$82.if_header loop_body 
+	osimple_stmt: .    (294)
+
+	LLITERAL  shift 68
+	LCHAN  shift 78
+	LFUNC  shift 124
+	LINTERFACE  shift 83
+	LMAP  shift 79
+	LNAME  shift 10
+	LSTRUCT  shift 82
+	LCOMM  shift 65
+	'+'  shift 60
+	'-'  shift 61
+	'^'  shift 64
+	'*'  shift 58
+	'&'  shift 59
+	'('  shift 67
+	'!'  shift 62
+	'~'  shift 63
+	'['  shift 77
+	'?'  shift 12
+	'@'  shift 13
+	.  reduce 294 (src line 1973)
+
+	sym  goto 123
+	expr  goto 48
+	fnliteral  goto 73
+	if_header  goto 666
+	name  goto 69
+	osimple_stmt  goto 286
+	pexpr  goto 57
+	pexpr_no_paren  goto 66
+	pseudocall  goto 70
+	simple_stmt  goto 282
+	uexpr  goto 55
+	expr_list  goto 49
+	convtype  goto 71
+	comptype  goto 72
+	interfacetype  goto 81
+	structtype  goto 80
+	othertype  goto 75
+	fntype  goto 74
+	hidden_importsym  goto 11
+	fnlitdcl  goto 76
+
+state 666
+	elseif:  LELSE LIF $$82 if_header.loop_body 
+
+	LBODY  shift 382
+	.  error
+
+	loop_body  goto 667
+
+state 667
+	elseif:  LELSE LIF $$82 if_header loop_body.    (83)
+
+	.  reduce 83 (src line 787)
+
+
+76 terminals, 142 nonterminals
+352 grammar rules, 668/2000 states
+0 shift/reduce, 0 reduce/reduce conflicts reported
+191 working sets used
+memory: parser 3749/30000
+446 extra closures
+3093 shift entries, 64 exceptions
+603 goto entries
+1650 entries saved by goto default
+Optimizer space used: output 2282/30000
+2282 table entries, 722 zero
+maximum spread: 76, maximum offset: 666
diff --git a/src/cmd/compile/internal/ppc64/cgen.go b/src/cmd/compile/internal/ppc64/cgen.go
new file mode 100644
index 0000000..37dd6ce
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/cgen.go
@@ -0,0 +1,149 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/ppc64"
+)
+
+func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
+	// determine alignment.
+	// want to avoid unaligned access, so have to use
+	// smaller operations for less aligned types.
+	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
+	align := int(n.Type.Align)
+
+	var op int
+	switch align {
+	default:
+		gc.Fatal("sgen: invalid alignment %d for %v", align, n.Type)
+
+	case 1:
+		op = ppc64.AMOVBU
+
+	case 2:
+		op = ppc64.AMOVHU
+
+	case 4:
+		op = ppc64.AMOVWZU // there is no lwau, only lwaux
+
+	case 8:
+		op = ppc64.AMOVDU
+	}
+
+	if w%int64(align) != 0 {
+		gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
+	}
+	c := int32(w / int64(align))
+
+	// if we are copying forward on the stack and
+	// the src and dst overlap, then reverse direction
+	dir := align
+
+	if osrc < odst && int64(odst) < int64(osrc)+w {
+		dir = -dir
+	}
+
+	var dst gc.Node
+	var src gc.Node
+	if n.Ullman >= res.Ullman {
+		gc.Agenr(n, &dst, res) // temporarily use dst
+		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
+		gins(ppc64.AMOVD, &dst, &src)
+		if res.Op == gc.ONAME {
+			gc.Gvardef(res)
+		}
+		gc.Agen(res, &dst)
+	} else {
+		if res.Op == gc.ONAME {
+			gc.Gvardef(res)
+		}
+		gc.Agenr(res, &dst, res)
+		gc.Agenr(n, &src, nil)
+	}
+
+	var tmp gc.Node
+	gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
+
+	// set up end marker
+	var nend gc.Node
+
+	// move src and dest to the end of block if necessary
+	if dir < 0 {
+		if c >= 4 {
+			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
+			gins(ppc64.AMOVD, &src, &nend)
+		}
+
+		p := gins(ppc64.AADD, nil, &src)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = w
+
+		p = gins(ppc64.AADD, nil, &dst)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = w
+	} else {
+		p := gins(ppc64.AADD, nil, &src)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(-dir)
+
+		p = gins(ppc64.AADD, nil, &dst)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(-dir)
+
+		if c >= 4 {
+			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
+			p := gins(ppc64.AMOVD, &src, &nend)
+			p.From.Type = obj.TYPE_ADDR
+			p.From.Offset = w
+		}
+	}
+
+	// move
+	// TODO: enable duffcopy for larger copies.
+	if c >= 4 {
+		p := gins(op, &src, &tmp)
+		p.From.Type = obj.TYPE_MEM
+		p.From.Offset = int64(dir)
+		ploop := p
+
+		p = gins(op, &tmp, &dst)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Offset = int64(dir)
+
+		p = gins(ppc64.ACMP, &src, &nend)
+
+		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
+		gc.Regfree(&nend)
+	} else {
+		// TODO(austin): Instead of generating ADD $-8,R8; ADD
+		// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
+		// generate the offsets directly and eliminate the
+		// ADDs.  That will produce shorter, more
+		// pipeline-able code.
+		var p *obj.Prog
+		for {
+			tmp14 := c
+			c--
+			if tmp14 <= 0 {
+				break
+			}
+
+			p = gins(op, &src, &tmp)
+			p.From.Type = obj.TYPE_MEM
+			p.From.Offset = int64(dir)
+
+			p = gins(op, &tmp, &dst)
+			p.To.Type = obj.TYPE_MEM
+			p.To.Offset = int64(dir)
+		}
+	}
+
+	gc.Regfree(&dst)
+	gc.Regfree(&src)
+	gc.Regfree(&tmp)
+}
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
new file mode 100644
index 0000000..73aef6f
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -0,0 +1,100 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/ppc64"
+)
+
+var thechar int = '9'
+
+var thestring string = "ppc64"
+
+var thelinkarch *obj.LinkArch
+
+func linkarchinit() {
+	thestring = obj.Getgoarch()
+	gc.Thearch.Thestring = thestring
+	if thestring == "ppc64le" {
+		thelinkarch = &ppc64.Linkppc64le
+	} else {
+		thelinkarch = &ppc64.Linkppc64
+	}
+	gc.Thearch.Thelinkarch = thelinkarch
+}
+
+var MAXWIDTH int64 = 1 << 50
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, and uintptr
+ */
+var typedefs = []gc.Typedef{
+	gc.Typedef{"int", gc.TINT, gc.TINT64},
+	gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
+	gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+}
+
+func betypeinit() {
+	gc.Widthptr = 8
+	gc.Widthint = 8
+	gc.Widthreg = 8
+}
+
+func Main() {
+	gc.Thearch.Thechar = thechar
+	gc.Thearch.Thestring = thestring
+	gc.Thearch.Thelinkarch = thelinkarch
+	gc.Thearch.Typedefs = typedefs
+	gc.Thearch.REGSP = ppc64.REGSP
+	gc.Thearch.REGCTXT = ppc64.REGCTXT
+	gc.Thearch.REGCALLX = ppc64.REG_R3
+	gc.Thearch.REGCALLX2 = ppc64.REG_R4
+	gc.Thearch.REGRETURN = ppc64.REG_R3
+	gc.Thearch.REGMIN = ppc64.REG_R0
+	gc.Thearch.REGMAX = ppc64.REG_R31
+	gc.Thearch.FREGMIN = ppc64.REG_F0
+	gc.Thearch.FREGMAX = ppc64.REG_F31
+	gc.Thearch.MAXWIDTH = MAXWIDTH
+	gc.Thearch.ReservedRegs = resvd
+
+	gc.Thearch.Betypeinit = betypeinit
+	gc.Thearch.Cgen_hmul = cgen_hmul
+	gc.Thearch.Cgen_shift = cgen_shift
+	gc.Thearch.Clearfat = clearfat
+	gc.Thearch.Defframe = defframe
+	gc.Thearch.Dodiv = dodiv
+	gc.Thearch.Excise = excise
+	gc.Thearch.Expandchecks = expandchecks
+	gc.Thearch.Getg = getg
+	gc.Thearch.Gins = gins
+	gc.Thearch.Ginscmp = ginscmp
+	gc.Thearch.Ginscon = ginscon
+	gc.Thearch.Ginsnop = ginsnop
+	gc.Thearch.Gmove = gmove
+	gc.Thearch.Linkarchinit = linkarchinit
+	gc.Thearch.Peep = peep
+	gc.Thearch.Proginfo = proginfo
+	gc.Thearch.Regtyp = regtyp
+	gc.Thearch.Sameaddr = sameaddr
+	gc.Thearch.Smallindir = smallindir
+	gc.Thearch.Stackaddr = stackaddr
+	gc.Thearch.Blockcopy = blockcopy
+	gc.Thearch.Sudoaddable = sudoaddable
+	gc.Thearch.Sudoclean = sudoclean
+	gc.Thearch.Excludedregs = excludedregs
+	gc.Thearch.RtoB = RtoB
+	gc.Thearch.FtoB = RtoB
+	gc.Thearch.BtoR = BtoR
+	gc.Thearch.BtoF = BtoF
+	gc.Thearch.Optoas = optoas
+	gc.Thearch.Doregbits = doregbits
+	gc.Thearch.Regnames = regnames
+
+	gc.Main()
+	gc.Exit(0)
+}
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
new file mode 100644
index 0000000..1b936b8
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -0,0 +1,558 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/ppc64"
+	"fmt"
+)
+
+func defframe(ptxt *obj.Prog) {
+	var n *gc.Node
+
+	// fill in argument size, stack size
+	ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+	ptxt.To.Offset = int64(frame)
+
+	// insert code to zero ambiguously live variables
+	// so that the garbage collector only sees initialized values
+	// when it looks for pointers.
+	p := ptxt
+
+	hi := int64(0)
+	lo := hi
+
+	// iterate through declarations - they are sorted in decreasing xoffset order.
+	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
+		n = l.N
+		if !n.Name.Needzero {
+			continue
+		}
+		if n.Class != gc.PAUTO {
+			gc.Fatal("needzero class %d", n.Class)
+		}
+		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+			gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+		}
+
+		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
+			// merge with range we already have
+			lo = n.Xoffset
+
+			continue
+		}
+
+		// zero old range
+		p = zerorange(p, int64(frame), lo, hi)
+
+		// set new range
+		hi = n.Xoffset + n.Type.Width
+
+		lo = n.Xoffset
+	}
+
+	// zero final range
+	zerorange(p, int64(frame), lo, hi)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
+	cnt := hi - lo
+	if cnt == 0 {
+		return p
+	}
+	if cnt < int64(4*gc.Widthptr) {
+		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+			p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
+		}
+	} else if cnt <= int64(128*gc.Widthptr) {
+		p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+		p.Reg = ppc64.REGSP
+		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+		f := gc.Sysfunc("duffzero")
+		gc.Naddr(&p.To, f)
+		gc.Afunclit(&p.To, f)
+		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+	} else {
+		p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+		p.Reg = ppc64.REGSP
+		p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+		p.Reg = ppc64.REGRT1
+		p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
+		p1 := p
+		p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+		p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+		gc.Patch(p, p1)
+	}
+
+	return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+	q := gc.Ctxt.NewProg()
+	gc.Clearp(q)
+	q.As = int16(as)
+	q.Lineno = p.Lineno
+	q.From.Type = int16(ftype)
+	q.From.Reg = int16(freg)
+	q.From.Offset = foffset
+	q.To.Type = int16(ttype)
+	q.To.Reg = int16(treg)
+	q.To.Offset = toffset
+	q.Link = p.Link
+	p.Link = q
+	return q
+}
+
+func ginsnop() {
+	var reg gc.Node
+	gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
+	gins(ppc64.AOR, &reg, &reg)
+}
+
+var panicdiv *gc.Node
+
+/*
+ * generate division.
+ * generates one of:
+ *	res = nl / nr
+ *	res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	// Have to be careful about handling
+	// most negative int divided by -1 correctly.
+	// The hardware will generate undefined result.
+	// Also need to explicitly trap on division on zero,
+	// the hardware will silently generate undefined result.
+	// DIVW will leave unpredicable result in higher 32-bit,
+	// so always use DIVD/DIVDU.
+	t := nl.Type
+
+	t0 := t
+	check := 0
+	if gc.Issigned[t.Etype] {
+		check = 1
+		if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
+			check = 0
+		} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
+			check = 0
+		}
+	}
+
+	if t.Width < 8 {
+		if gc.Issigned[t.Etype] {
+			t = gc.Types[gc.TINT64]
+		} else {
+			t = gc.Types[gc.TUINT64]
+		}
+		check = 0
+	}
+
+	a := optoas(gc.ODIV, t)
+
+	var tl gc.Node
+	gc.Regalloc(&tl, t0, nil)
+	var tr gc.Node
+	gc.Regalloc(&tr, t0, nil)
+	if nl.Ullman >= nr.Ullman {
+		gc.Cgen(nl, &tl)
+		gc.Cgen(nr, &tr)
+	} else {
+		gc.Cgen(nr, &tr)
+		gc.Cgen(nl, &tl)
+	}
+
+	if t != t0 {
+		// Convert
+		tl2 := tl
+
+		tr2 := tr
+		tl.Type = t
+		tr.Type = t
+		gmove(&tl2, &tl)
+		gmove(&tr2, &tr)
+	}
+
+	// Handle divide-by-zero panic.
+	p1 := gins(optoas(gc.OCMP, t), &tr, nil)
+
+	p1.To.Type = obj.TYPE_REG
+	p1.To.Reg = ppc64.REGZERO
+	p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+	if panicdiv == nil {
+		panicdiv = gc.Sysfunc("panicdivide")
+	}
+	gc.Ginscall(panicdiv, -1)
+	gc.Patch(p1, gc.Pc)
+
+	var p2 *obj.Prog
+	if check != 0 {
+		var nm1 gc.Node
+		gc.Nodconst(&nm1, t, -1)
+		gins(optoas(gc.OCMP, t), &tr, &nm1)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		if op == gc.ODIV {
+			// a / (-1) is -a.
+			gins(optoas(gc.OMINUS, t), nil, &tl)
+
+			gmove(&tl, res)
+		} else {
+			// a % (-1) is 0.
+			var nz gc.Node
+			gc.Nodconst(&nz, t, 0)
+
+			gmove(&nz, res)
+		}
+
+		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+	}
+
+	p1 = gins(a, &tr, &tl)
+	if op == gc.ODIV {
+		gc.Regfree(&tr)
+		gmove(&tl, res)
+	} else {
+		// A%B = A-(A/B*B)
+		var tm gc.Node
+		gc.Regalloc(&tm, t, nil)
+
+		// patch div to use the 3 register form
+		// TODO(minux): add gins3?
+		p1.Reg = p1.To.Reg
+
+		p1.To.Reg = tm.Reg
+		gins(optoas(gc.OMUL, t), &tr, &tm)
+		gc.Regfree(&tr)
+		gins(optoas(gc.OSUB, t), &tm, &tl)
+		gc.Regfree(&tm)
+		gmove(&tl, res)
+	}
+
+	gc.Regfree(&tl)
+	if check != 0 {
+		gc.Patch(p2, gc.Pc)
+	}
+}
+
+/*
+ * generate high multiply:
+ *   res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	// largest ullman on left.
+	if nl.Ullman < nr.Ullman {
+		tmp := (*gc.Node)(nl)
+		nl = nr
+		nr = tmp
+	}
+
+	t := (*gc.Type)(nl.Type)
+	w := int(int(t.Width * 8))
+	var n1 gc.Node
+	gc.Cgenr(nl, &n1, res)
+	var n2 gc.Node
+	gc.Cgenr(nr, &n2, nil)
+	switch gc.Simtype[t.Etype] {
+	case gc.TINT8,
+		gc.TINT16,
+		gc.TINT32:
+		gins(optoas(gc.OMUL, t), &n2, &n1)
+		p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(w)
+
+	case gc.TUINT8,
+		gc.TUINT16,
+		gc.TUINT32:
+		gins(optoas(gc.OMUL, t), &n2, &n1)
+		p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = int64(w)
+
+	case gc.TINT64,
+		gc.TUINT64:
+		if gc.Issigned[t.Etype] {
+			gins(ppc64.AMULHD, &n2, &n1)
+		} else {
+			gins(ppc64.AMULHDU, &n2, &n1)
+		}
+
+	default:
+		gc.Fatal("cgen_hmul %v", t)
+	}
+
+	gc.Cgen(&n1, res)
+	gc.Regfree(&n1)
+	gc.Regfree(&n2)
+}
+
+/*
+ * generate shift according to op, one of:
+ *	res = nl << nr
+ *	res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	a := int(optoas(op, nl.Type))
+
+	if nr.Op == gc.OLITERAL {
+		var n1 gc.Node
+		gc.Regalloc(&n1, nl.Type, res)
+		gc.Cgen(nl, &n1)
+		sc := uint64(nr.Int())
+		if sc >= uint64(nl.Type.Width*8) {
+			// large shift gets 2 shifts by width-1
+			var n3 gc.Node
+			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+
+			gins(a, &n3, &n1)
+			gins(a, &n3, &n1)
+		} else {
+			gins(a, nr, &n1)
+		}
+		gmove(&n1, res)
+		gc.Regfree(&n1)
+		return
+	}
+
+	if nl.Ullman >= gc.UINF {
+		var n4 gc.Node
+		gc.Tempname(&n4, nl.Type)
+		gc.Cgen(nl, &n4)
+		nl = &n4
+	}
+
+	if nr.Ullman >= gc.UINF {
+		var n5 gc.Node
+		gc.Tempname(&n5, nr.Type)
+		gc.Cgen(nr, &n5)
+		nr = &n5
+	}
+
+	// Allow either uint32 or uint64 as shift type,
+	// to avoid unnecessary conversion from uint32 to uint64
+	// just to do the comparison.
+	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
+
+	if tcount.Etype < gc.TUINT32 {
+		tcount = gc.Types[gc.TUINT32]
+	}
+
+	var n1 gc.Node
+	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
+	var n3 gc.Node
+	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
+
+	var n2 gc.Node
+	gc.Regalloc(&n2, nl.Type, res)
+
+	if nl.Ullman >= nr.Ullman {
+		gc.Cgen(nl, &n2)
+		gc.Cgen(nr, &n1)
+		gmove(&n1, &n3)
+	} else {
+		gc.Cgen(nr, &n1)
+		gmove(&n1, &n3)
+		gc.Cgen(nl, &n2)
+	}
+
+	gc.Regfree(&n3)
+
+	// test and fix up large shifts
+	if !bounded {
+		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
+		gins(optoas(gc.OCMP, tcount), &n1, &n3)
+		p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
+		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
+			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+			gins(a, &n3, &n2)
+		} else {
+			gc.Nodconst(&n3, nl.Type, 0)
+			gmove(&n3, &n2)
+		}
+
+		gc.Patch(p1, gc.Pc)
+	}
+
+	gins(a, &n1, &n2)
+
+	gmove(&n2, res)
+
+	gc.Regfree(&n1)
+	gc.Regfree(&n2)
+}
+
+func clearfat(nl *gc.Node) {
+	/* clear a fat object */
+	if gc.Debug['g'] != 0 {
+		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
+	}
+
+	w := uint64(uint64(nl.Type.Width))
+
+	// Avoid taking the address for simple enough types.
+	if gc.Componentgen(nil, nl) {
+		return
+	}
+
+	c := uint64(w % 8) // bytes
+	q := uint64(w / 8) // dwords
+
+	if gc.Reginuse(ppc64.REGRT1) {
+		gc.Fatal("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
+	}
+
+	var r0 gc.Node
+	gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
+	var dst gc.Node
+	gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
+	gc.Regrealloc(&dst)
+	gc.Agen(nl, &dst)
+
+	var boff uint64
+	if q > 128 {
+		p := gins(ppc64.ASUB, nil, &dst)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = 8
+
+		var end gc.Node
+		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
+		p = gins(ppc64.AMOVD, &dst, &end)
+		p.From.Type = obj.TYPE_ADDR
+		p.From.Offset = int64(q * 8)
+
+		p = gins(ppc64.AMOVDU, &r0, &dst)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Offset = 8
+		pl := (*obj.Prog)(p)
+
+		p = gins(ppc64.ACMP, &dst, &end)
+		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
+
+		gc.Regfree(&end)
+
+		// The loop leaves R3 on the last zeroed dword
+		boff = 8
+	} else if q >= 4 {
+		p := gins(ppc64.ASUB, nil, &dst)
+		p.From.Type = obj.TYPE_CONST
+		p.From.Offset = 8
+		f := (*gc.Node)(gc.Sysfunc("duffzero"))
+		p = gins(obj.ADUFFZERO, nil, f)
+		gc.Afunclit(&p.To, f)
+
+		// 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
+		p.To.Offset = int64(4 * (128 - q))
+
+		// duffzero leaves R3 on the last zeroed dword
+		boff = 8
+	} else {
+		var p *obj.Prog
+		for t := uint64(0); t < q; t++ {
+			p = gins(ppc64.AMOVD, &r0, &dst)
+			p.To.Type = obj.TYPE_MEM
+			p.To.Offset = int64(8 * t)
+		}
+
+		boff = 8 * q
+	}
+
+	var p *obj.Prog
+	for t := uint64(0); t < c; t++ {
+		p = gins(ppc64.AMOVB, &r0, &dst)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Offset = int64(t + boff)
+	}
+
+	gc.Regfree(&dst)
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+	var p1 *obj.Prog
+	var p2 *obj.Prog
+
+	for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
+		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
+			fmt.Printf("expandchecks: %v\n", p)
+		}
+		if p.As != obj.ACHECKNIL {
+			continue
+		}
+		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+			gc.Warnl(int(p.Lineno), "generated nil check")
+		}
+		if p.From.Type != obj.TYPE_REG {
+			gc.Fatal("invalid nil check %v\n", p)
+		}
+
+		/*
+			// check is
+			//	TD $4, R0, arg (R0 is always zero)
+			// eqv. to:
+			// 	tdeq r0, arg
+			// NOTE: this needs special runtime support to make SIGTRAP recoverable.
+			reg = p->from.reg;
+			p->as = ATD;
+			p->from = p->to = p->from3 = zprog.from;
+			p->from.type = TYPE_CONST;
+			p->from.offset = 4;
+			p->from.reg = 0;
+			p->reg = REGZERO;
+			p->to.type = TYPE_REG;
+			p->to.reg = reg;
+		*/
+		// check is
+		//	CMP arg, R0
+		//	BNE 2(PC) [likely]
+		//	MOVD R0, 0(R0)
+		p1 = gc.Ctxt.NewProg()
+
+		p2 = gc.Ctxt.NewProg()
+		gc.Clearp(p1)
+		gc.Clearp(p2)
+		p1.Link = p2
+		p2.Link = p.Link
+		p.Link = p1
+		p1.Lineno = p.Lineno
+		p2.Lineno = p.Lineno
+		p1.Pc = 9999
+		p2.Pc = 9999
+		p.As = ppc64.ACMP
+		p.To.Type = obj.TYPE_REG
+		p.To.Reg = ppc64.REGZERO
+		p1.As = ppc64.ABNE
+
+		//p1->from.type = TYPE_CONST;
+		//p1->from.offset = 1; // likely
+		p1.To.Type = obj.TYPE_BRANCH
+
+		p1.To.Val = p2.Link
+
+		// crash by write to memory address 0.
+		p2.As = ppc64.AMOVD
+
+		p2.From.Type = obj.TYPE_REG
+		p2.From.Reg = ppc64.REGZERO
+		p2.To.Type = obj.TYPE_MEM
+		p2.To.Reg = ppc64.REGZERO
+		p2.To.Offset = 0
+	}
+}
+
+// res = runtime.getg()
+func getg(res *gc.Node) {
+	var n1 gc.Node
+	gc.Nodreg(&n1, res.Type, ppc64.REGG)
+	gmove(&n1, res)
+}
diff --git a/src/cmd/compile/internal/ppc64/gsubr.go b/src/cmd/compile/internal/ppc64/gsubr.go
new file mode 100644
index 0000000..2501972
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/gsubr.go
@@ -0,0 +1,1031 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+	"cmd/compile/internal/big"
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/ppc64"
+	"fmt"
+)
+
+var resvd = []int{
+	ppc64.REGZERO,
+	ppc64.REGSP, // reserved for SP
+	// We need to preserve the C ABI TLS pointer because sigtramp
+	// may happen during C code and needs to access the g.  C
+	// clobbers REGG, so if Go were to clobber REGTLS, sigtramp
+	// won't know which convention to use.  By preserving REGTLS,
+	// we can just retrieve g from TLS when we aren't sure.
+	ppc64.REGTLS,
+
+	// TODO(austin): Consolidate REGTLS and REGG?
+	ppc64.REGG,
+	ppc64.REGTMP, // REGTMP
+	ppc64.FREGCVI,
+	ppc64.FREGZERO,
+	ppc64.FREGHALF,
+	ppc64.FREGONE,
+	ppc64.FREGTWO,
+}
+
+/*
+ * generate
+ *	as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+	var n1 gc.Node
+
+	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+	if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD {
+		// cannot have more than 16-bit of immediate in ADD, etc.
+		// instead, MOV into register first.
+		var ntmp gc.Node
+		gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+		rawgins(ppc64.AMOVD, &n1, &ntmp)
+		rawgins(as, &ntmp, n2)
+		gc.Regfree(&ntmp)
+		return
+	}
+
+	rawgins(as, &n1, n2)
+}
+
+/*
+ * generate
+ *	as n, $c (CMP/CMPU)
+ */
+func ginscon2(as int, n2 *gc.Node, c int64) {
+	var n1 gc.Node
+
+	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+	switch as {
+	default:
+		gc.Fatal("ginscon2")
+
+	case ppc64.ACMP:
+		if -ppc64.BIG <= c && c <= ppc64.BIG {
+			rawgins(as, n2, &n1)
+			return
+		}
+
+	case ppc64.ACMPU:
+		if 0 <= c && c <= 2*ppc64.BIG {
+			rawgins(as, n2, &n1)
+			return
+		}
+	}
+
+	// MOV n1 into register first
+	var ntmp gc.Node
+	gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+	rawgins(ppc64.AMOVD, &n1, &ntmp)
+	rawgins(as, n2, &ntmp)
+	gc.Regfree(&ntmp)
+}
+
+func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
+		// Reverse comparison to place constant last.
+		op = gc.Brrev(op)
+		n1, n2 = n2, n1
+	}
+
+	var r1, r2, g1, g2 gc.Node
+	gc.Regalloc(&r1, t, n1)
+	gc.Regalloc(&g1, n1.Type, &r1)
+	gc.Cgen(n1, &g1)
+	gmove(&g1, &r1)
+	if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
+		ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
+	} else {
+		gc.Regalloc(&r2, t, n2)
+		gc.Regalloc(&g2, n1.Type, &r2)
+		gc.Cgen(n2, &g2)
+		gmove(&g2, &r2)
+		rawgins(optoas(gc.OCMP, t), &r1, &r2)
+		gc.Regfree(&g2)
+		gc.Regfree(&r2)
+	}
+	gc.Regfree(&g1)
+	gc.Regfree(&r1)
+	return gc.Gbranch(optoas(op, t), nil, likely)
+}
+
+// set up nodes representing 2^63
+var (
+	bigi         gc.Node
+	bigf         gc.Node
+	bignodes_did bool
+)
+
+func bignodes() {
+	if bignodes_did {
+		return
+	}
+	bignodes_did = true
+
+	var i big.Int
+	i.SetInt64(1)
+	i.Lsh(&i, 63)
+
+	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
+	bigi.SetBigInt(&i)
+
+	bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
+}
+
+/*
+ * generate move:
+ *	t = f
+ * hard part is conversions.
+ */
+func gmove(f *gc.Node, t *gc.Node) {
+	if gc.Debug['M'] != 0 {
+		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+	}
+
+	ft := int(gc.Simsimtype(f.Type))
+	tt := int(gc.Simsimtype(t.Type))
+	cvt := (*gc.Type)(t.Type)
+
+	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
+		gc.Complexmove(f, t)
+		return
+	}
+
+	// cannot have two memory operands
+	var r2 gc.Node
+	var r1 gc.Node
+	var a int
+	if gc.Ismem(f) && gc.Ismem(t) {
+		goto hard
+	}
+
+	// convert constant to desired type
+	if f.Op == gc.OLITERAL {
+		var con gc.Node
+		switch tt {
+		default:
+			f.Convconst(&con, t.Type)
+
+		case gc.TINT32,
+			gc.TINT16,
+			gc.TINT8:
+			var con gc.Node
+			f.Convconst(&con, gc.Types[gc.TINT64])
+			var r1 gc.Node
+			gc.Regalloc(&r1, con.Type, t)
+			gins(ppc64.AMOVD, &con, &r1)
+			gmove(&r1, t)
+			gc.Regfree(&r1)
+			return
+
+		case gc.TUINT32,
+			gc.TUINT16,
+			gc.TUINT8:
+			var con gc.Node
+			f.Convconst(&con, gc.Types[gc.TUINT64])
+			var r1 gc.Node
+			gc.Regalloc(&r1, con.Type, t)
+			gins(ppc64.AMOVD, &con, &r1)
+			gmove(&r1, t)
+			gc.Regfree(&r1)
+			return
+		}
+
+		f = &con
+		ft = tt // so big switch will choose a simple mov
+
+		// constants can't move directly to memory.
+		if gc.Ismem(t) {
+			goto hard
+		}
+	}
+
+	// float constants come from memory.
+	//if(isfloat[tt])
+	//	goto hard;
+
+	// 64-bit immediates are also from memory.
+	//if(isint[tt])
+	//	goto hard;
+	//// 64-bit immediates are really 32-bit sign-extended
+	//// unless moving into a register.
+	//if(isint[tt]) {
+	//	if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
+	//		goto hard;
+	//	if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
+	//		goto hard;
+	//}
+
+	// value -> value copy, only one memory operand.
+	// figure out the instruction to use.
+	// break out of switch for one-instruction gins.
+	// goto rdst for "destination must be register".
+	// goto hard for "convert to cvt type first".
+	// otherwise handle and return.
+
+	switch uint32(ft)<<16 | uint32(tt) {
+	default:
+		gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+
+		/*
+		 * integer copy and truncate
+		 */
+	case gc.TINT8<<16 | gc.TINT8, // same size
+		gc.TUINT8<<16 | gc.TINT8,
+		gc.TINT16<<16 | gc.TINT8,
+		// truncate
+		gc.TUINT16<<16 | gc.TINT8,
+		gc.TINT32<<16 | gc.TINT8,
+		gc.TUINT32<<16 | gc.TINT8,
+		gc.TINT64<<16 | gc.TINT8,
+		gc.TUINT64<<16 | gc.TINT8:
+		a = ppc64.AMOVB
+
+	case gc.TINT8<<16 | gc.TUINT8, // same size
+		gc.TUINT8<<16 | gc.TUINT8,
+		gc.TINT16<<16 | gc.TUINT8,
+		// truncate
+		gc.TUINT16<<16 | gc.TUINT8,
+		gc.TINT32<<16 | gc.TUINT8,
+		gc.TUINT32<<16 | gc.TUINT8,
+		gc.TINT64<<16 | gc.TUINT8,
+		gc.TUINT64<<16 | gc.TUINT8:
+		a = ppc64.AMOVBZ
+
+	case gc.TINT16<<16 | gc.TINT16, // same size
+		gc.TUINT16<<16 | gc.TINT16,
+		gc.TINT32<<16 | gc.TINT16,
+		// truncate
+		gc.TUINT32<<16 | gc.TINT16,
+		gc.TINT64<<16 | gc.TINT16,
+		gc.TUINT64<<16 | gc.TINT16:
+		a = ppc64.AMOVH
+
+	case gc.TINT16<<16 | gc.TUINT16, // same size
+		gc.TUINT16<<16 | gc.TUINT16,
+		gc.TINT32<<16 | gc.TUINT16,
+		// truncate
+		gc.TUINT32<<16 | gc.TUINT16,
+		gc.TINT64<<16 | gc.TUINT16,
+		gc.TUINT64<<16 | gc.TUINT16:
+		a = ppc64.AMOVHZ
+
+	case gc.TINT32<<16 | gc.TINT32, // same size
+		gc.TUINT32<<16 | gc.TINT32,
+		gc.TINT64<<16 | gc.TINT32,
+		// truncate
+		gc.TUINT64<<16 | gc.TINT32:
+		a = ppc64.AMOVW
+
+	case gc.TINT32<<16 | gc.TUINT32, // same size
+		gc.TUINT32<<16 | gc.TUINT32,
+		gc.TINT64<<16 | gc.TUINT32,
+		gc.TUINT64<<16 | gc.TUINT32:
+		a = ppc64.AMOVWZ
+
+	case gc.TINT64<<16 | gc.TINT64, // same size
+		gc.TINT64<<16 | gc.TUINT64,
+		gc.TUINT64<<16 | gc.TINT64,
+		gc.TUINT64<<16 | gc.TUINT64:
+		a = ppc64.AMOVD
+
+		/*
+		 * integer up-conversions
+		 */
+	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+		gc.TINT8<<16 | gc.TUINT16,
+		gc.TINT8<<16 | gc.TINT32,
+		gc.TINT8<<16 | gc.TUINT32,
+		gc.TINT8<<16 | gc.TINT64,
+		gc.TINT8<<16 | gc.TUINT64:
+		a = ppc64.AMOVB
+
+		goto rdst
+
+	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+		gc.TUINT8<<16 | gc.TUINT16,
+		gc.TUINT8<<16 | gc.TINT32,
+		gc.TUINT8<<16 | gc.TUINT32,
+		gc.TUINT8<<16 | gc.TINT64,
+		gc.TUINT8<<16 | gc.TUINT64:
+		a = ppc64.AMOVBZ
+
+		goto rdst
+
+	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+		gc.TINT16<<16 | gc.TUINT32,
+		gc.TINT16<<16 | gc.TINT64,
+		gc.TINT16<<16 | gc.TUINT64:
+		a = ppc64.AMOVH
+
+		goto rdst
+
+	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+		gc.TUINT16<<16 | gc.TUINT32,
+		gc.TUINT16<<16 | gc.TINT64,
+		gc.TUINT16<<16 | gc.TUINT64:
+		a = ppc64.AMOVHZ
+
+		goto rdst
+
+	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+		gc.TINT32<<16 | gc.TUINT64:
+		a = ppc64.AMOVW
+
+		goto rdst
+
+	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+		gc.TUINT32<<16 | gc.TUINT64:
+		a = ppc64.AMOVWZ
+
+		goto rdst
+
+		//warn("gmove: convert float to int not implemented: %N -> %N\n", f, t);
+	//return;
+	// algorithm is:
+	//	if small enough, use native float64 -> int64 conversion.
+	//	otherwise, subtract 2^63, convert, and add it back.
+	/*
+	* float to integer
+	 */
+	case gc.TFLOAT32<<16 | gc.TINT32,
+		gc.TFLOAT64<<16 | gc.TINT32,
+		gc.TFLOAT32<<16 | gc.TINT64,
+		gc.TFLOAT64<<16 | gc.TINT64,
+		gc.TFLOAT32<<16 | gc.TINT16,
+		gc.TFLOAT32<<16 | gc.TINT8,
+		gc.TFLOAT32<<16 | gc.TUINT16,
+		gc.TFLOAT32<<16 | gc.TUINT8,
+		gc.TFLOAT64<<16 | gc.TINT16,
+		gc.TFLOAT64<<16 | gc.TINT8,
+		gc.TFLOAT64<<16 | gc.TUINT16,
+		gc.TFLOAT64<<16 | gc.TUINT8,
+		gc.TFLOAT32<<16 | gc.TUINT32,
+		gc.TFLOAT64<<16 | gc.TUINT32,
+		gc.TFLOAT32<<16 | gc.TUINT64,
+		gc.TFLOAT64<<16 | gc.TUINT64:
+		bignodes()
+
+		var r1 gc.Node
+		gc.Regalloc(&r1, gc.Types[ft], f)
+		gmove(f, &r1)
+		if tt == gc.TUINT64 {
+			gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+			gmove(&bigf, &r2)
+			gins(ppc64.AFCMPU, &r1, &r2)
+			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
+			gins(ppc64.AFSUB, &r2, &r1)
+			gc.Patch(p1, gc.Pc)
+			gc.Regfree(&r2)
+		}
+
+		gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+		var r3 gc.Node
+		gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
+		gins(ppc64.AFCTIDZ, &r1, &r2)
+		p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
+		p1.To.Type = obj.TYPE_MEM
+		p1.To.Reg = ppc64.REGSP
+		p1.To.Offset = -8
+		p1 = gins(ppc64.AMOVD, nil, &r3)
+		p1.From.Type = obj.TYPE_MEM
+		p1.From.Reg = ppc64.REGSP
+		p1.From.Offset = -8
+		gc.Regfree(&r2)
+		gc.Regfree(&r1)
+		if tt == gc.TUINT64 {
+			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
+			gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
+			gins(ppc64.AMOVD, &bigi, &r1)
+			gins(ppc64.AADD, &r1, &r3)
+			gc.Patch(p1, gc.Pc)
+		}
+
+		gmove(&r3, t)
+		gc.Regfree(&r3)
+		return
+
+		//warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
+	//return;
+	// algorithm is:
+	//	if small enough, use native int64 -> uint64 conversion.
+	//	otherwise, halve (rounding to odd?), convert, and double.
+	/*
+	 * integer to float
+	 */
+	case gc.TINT32<<16 | gc.TFLOAT32,
+		gc.TINT32<<16 | gc.TFLOAT64,
+		gc.TINT64<<16 | gc.TFLOAT32,
+		gc.TINT64<<16 | gc.TFLOAT64,
+		gc.TINT16<<16 | gc.TFLOAT32,
+		gc.TINT16<<16 | gc.TFLOAT64,
+		gc.TINT8<<16 | gc.TFLOAT32,
+		gc.TINT8<<16 | gc.TFLOAT64,
+		gc.TUINT16<<16 | gc.TFLOAT32,
+		gc.TUINT16<<16 | gc.TFLOAT64,
+		gc.TUINT8<<16 | gc.TFLOAT32,
+		gc.TUINT8<<16 | gc.TFLOAT64,
+		gc.TUINT32<<16 | gc.TFLOAT32,
+		gc.TUINT32<<16 | gc.TFLOAT64,
+		gc.TUINT64<<16 | gc.TFLOAT32,
+		gc.TUINT64<<16 | gc.TFLOAT64:
+		bignodes()
+
+		var r1 gc.Node
+		gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
+		gmove(f, &r1)
+		if ft == gc.TUINT64 {
+			gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
+			gmove(&bigi, &r2)
+			gins(ppc64.ACMPU, &r1, &r2)
+			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
+			p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
+			p2.From.Type = obj.TYPE_CONST
+			p2.From.Offset = 1
+			gc.Patch(p1, gc.Pc)
+		}
+
+		gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
+		p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
+		p1.To.Type = obj.TYPE_MEM
+		p1.To.Reg = ppc64.REGSP
+		p1.To.Offset = -8
+		p1 = gins(ppc64.AFMOVD, nil, &r2)
+		p1.From.Type = obj.TYPE_MEM
+		p1.From.Reg = ppc64.REGSP
+		p1.From.Offset = -8
+		gins(ppc64.AFCFID, &r2, &r2)
+		gc.Regfree(&r1)
+		if ft == gc.TUINT64 {
+			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
+			gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
+			gins(ppc64.AFMUL, &r1, &r2)
+			gc.Patch(p1, gc.Pc)
+		}
+
+		gmove(&r2, t)
+		gc.Regfree(&r2)
+		return
+
+		/*
+		 * float to float
+		 */
+	case gc.TFLOAT32<<16 | gc.TFLOAT32:
+		a = ppc64.AFMOVS
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT64:
+		a = ppc64.AFMOVD
+
+	case gc.TFLOAT32<<16 | gc.TFLOAT64:
+		a = ppc64.AFMOVS
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT32:
+		a = ppc64.AFRSP
+		goto rdst
+	}
+
+	gins(a, f, t)
+	return
+
+	// requires register destination
+rdst:
+	{
+		gc.Regalloc(&r1, t.Type, t)
+
+		gins(a, f, &r1)
+		gmove(&r1, t)
+		gc.Regfree(&r1)
+		return
+	}
+
+	// requires register intermediate
+hard:
+	gc.Regalloc(&r1, cvt, t)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	gc.Regfree(&r1)
+	return
+}
+
+func intLiteral(n *gc.Node) (x int64, ok bool) {
+	switch {
+	case n == nil:
+		return
+	case gc.Isconst(n, gc.CTINT):
+		return n.Int(), true
+	case gc.Isconst(n, gc.CTBOOL):
+		return int64(obj.Bool2int(n.Bool())), true
+	}
+	return
+}
+
+// gins is called by the front end.
+// It synthesizes some multiple-instruction sequences
+// so the front end can stay simpler.
+func gins(as int, f, t *gc.Node) *obj.Prog {
+	if as >= obj.A_ARCHSPECIFIC {
+		if x, ok := intLiteral(f); ok {
+			ginscon(as, x, t)
+			return nil // caller must not use
+		}
+	}
+	if as == ppc64.ACMP || as == ppc64.ACMPU {
+		if x, ok := intLiteral(t); ok {
+			ginscon2(as, f, x)
+			return nil // caller must not use
+		}
+	}
+	return rawgins(as, f, t)
+}
+
+/*
+ * generate one instruction:
+ *	as f, t
+ */
+func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+	// TODO(austin): Add self-move test like in 6g (but be careful
+	// of truncation moves)
+
+	p := gc.Prog(as)
+	gc.Naddr(&p.From, f)
+	gc.Naddr(&p.To, t)
+
+	switch as {
+	case obj.ACALL:
+		if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
+			// Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
+			pp := gc.Prog(as)
+			pp.From = p.From
+			pp.To.Type = obj.TYPE_REG
+			pp.To.Reg = ppc64.REG_CTR
+
+			p.As = ppc64.AMOVD
+			p.From = p.To
+			p.To.Type = obj.TYPE_REG
+			p.To.Reg = ppc64.REG_CTR
+
+			if gc.Debug['g'] != 0 {
+				fmt.Printf("%v\n", p)
+				fmt.Printf("%v\n", pp)
+			}
+
+			return pp
+		}
+
+	// Bad things the front end has done to us. Crash to find call stack.
+	case ppc64.AAND, ppc64.AMULLD:
+		if p.From.Type == obj.TYPE_CONST {
+			gc.Debug['h'] = 1
+			gc.Fatal("bad inst: %v", p)
+		}
+	case ppc64.ACMP, ppc64.ACMPU:
+		if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
+			gc.Debug['h'] = 1
+			gc.Fatal("bad inst: %v", p)
+		}
+	}
+
+	if gc.Debug['g'] != 0 {
+		fmt.Printf("%v\n", p)
+	}
+
+	w := int32(0)
+	switch as {
+	case ppc64.AMOVB,
+		ppc64.AMOVBU,
+		ppc64.AMOVBZ,
+		ppc64.AMOVBZU:
+		w = 1
+
+	case ppc64.AMOVH,
+		ppc64.AMOVHU,
+		ppc64.AMOVHZ,
+		ppc64.AMOVHZU:
+		w = 2
+
+	case ppc64.AMOVW,
+		ppc64.AMOVWU,
+		ppc64.AMOVWZ,
+		ppc64.AMOVWZU:
+		w = 4
+
+	case ppc64.AMOVD,
+		ppc64.AMOVDU:
+		if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
+			break
+		}
+		w = 8
+	}
+
+	if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
+		gc.Dump("f", f)
+		gc.Dump("t", t)
+		gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+	}
+
+	return p
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+	if t == nil {
+		gc.Fatal("optoas: t is nil")
+	}
+
+	a := int(obj.AXXX)
+	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+	default:
+		gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+
+	case gc.OEQ<<16 | gc.TBOOL,
+		gc.OEQ<<16 | gc.TINT8,
+		gc.OEQ<<16 | gc.TUINT8,
+		gc.OEQ<<16 | gc.TINT16,
+		gc.OEQ<<16 | gc.TUINT16,
+		gc.OEQ<<16 | gc.TINT32,
+		gc.OEQ<<16 | gc.TUINT32,
+		gc.OEQ<<16 | gc.TINT64,
+		gc.OEQ<<16 | gc.TUINT64,
+		gc.OEQ<<16 | gc.TPTR32,
+		gc.OEQ<<16 | gc.TPTR64,
+		gc.OEQ<<16 | gc.TFLOAT32,
+		gc.OEQ<<16 | gc.TFLOAT64:
+		a = ppc64.ABEQ
+
+	case gc.ONE<<16 | gc.TBOOL,
+		gc.ONE<<16 | gc.TINT8,
+		gc.ONE<<16 | gc.TUINT8,
+		gc.ONE<<16 | gc.TINT16,
+		gc.ONE<<16 | gc.TUINT16,
+		gc.ONE<<16 | gc.TINT32,
+		gc.ONE<<16 | gc.TUINT32,
+		gc.ONE<<16 | gc.TINT64,
+		gc.ONE<<16 | gc.TUINT64,
+		gc.ONE<<16 | gc.TPTR32,
+		gc.ONE<<16 | gc.TPTR64,
+		gc.ONE<<16 | gc.TFLOAT32,
+		gc.ONE<<16 | gc.TFLOAT64:
+		a = ppc64.ABNE
+
+	case gc.OLT<<16 | gc.TINT8, // ACMP
+		gc.OLT<<16 | gc.TINT16,
+		gc.OLT<<16 | gc.TINT32,
+		gc.OLT<<16 | gc.TINT64,
+		gc.OLT<<16 | gc.TUINT8,
+		// ACMPU
+		gc.OLT<<16 | gc.TUINT16,
+		gc.OLT<<16 | gc.TUINT32,
+		gc.OLT<<16 | gc.TUINT64,
+		gc.OLT<<16 | gc.TFLOAT32,
+		// AFCMPU
+		gc.OLT<<16 | gc.TFLOAT64:
+		a = ppc64.ABLT
+
+	case gc.OLE<<16 | gc.TINT8, // ACMP
+		gc.OLE<<16 | gc.TINT16,
+		gc.OLE<<16 | gc.TINT32,
+		gc.OLE<<16 | gc.TINT64,
+		gc.OLE<<16 | gc.TUINT8,
+		// ACMPU
+		gc.OLE<<16 | gc.TUINT16,
+		gc.OLE<<16 | gc.TUINT32,
+		gc.OLE<<16 | gc.TUINT64:
+		// No OLE for floats, because it mishandles NaN.
+		// Front end must reverse comparison or use OLT and OEQ together.
+		a = ppc64.ABLE
+
+	case gc.OGT<<16 | gc.TINT8,
+		gc.OGT<<16 | gc.TINT16,
+		gc.OGT<<16 | gc.TINT32,
+		gc.OGT<<16 | gc.TINT64,
+		gc.OGT<<16 | gc.TUINT8,
+		gc.OGT<<16 | gc.TUINT16,
+		gc.OGT<<16 | gc.TUINT32,
+		gc.OGT<<16 | gc.TUINT64,
+		gc.OGT<<16 | gc.TFLOAT32,
+		gc.OGT<<16 | gc.TFLOAT64:
+		a = ppc64.ABGT
+
+	case gc.OGE<<16 | gc.TINT8,
+		gc.OGE<<16 | gc.TINT16,
+		gc.OGE<<16 | gc.TINT32,
+		gc.OGE<<16 | gc.TINT64,
+		gc.OGE<<16 | gc.TUINT8,
+		gc.OGE<<16 | gc.TUINT16,
+		gc.OGE<<16 | gc.TUINT32,
+		gc.OGE<<16 | gc.TUINT64:
+		// No OGE for floats, because it mishandles NaN.
+		// Front end must reverse comparison or use OLT and OEQ together.
+		a = ppc64.ABGE
+
+	case gc.OCMP<<16 | gc.TBOOL,
+		gc.OCMP<<16 | gc.TINT8,
+		gc.OCMP<<16 | gc.TINT16,
+		gc.OCMP<<16 | gc.TINT32,
+		gc.OCMP<<16 | gc.TPTR32,
+		gc.OCMP<<16 | gc.TINT64:
+		a = ppc64.ACMP
+
+	case gc.OCMP<<16 | gc.TUINT8,
+		gc.OCMP<<16 | gc.TUINT16,
+		gc.OCMP<<16 | gc.TUINT32,
+		gc.OCMP<<16 | gc.TUINT64,
+		gc.OCMP<<16 | gc.TPTR64:
+		a = ppc64.ACMPU
+
+	case gc.OCMP<<16 | gc.TFLOAT32,
+		gc.OCMP<<16 | gc.TFLOAT64:
+		a = ppc64.AFCMPU
+
+	case gc.OAS<<16 | gc.TBOOL,
+		gc.OAS<<16 | gc.TINT8:
+		a = ppc64.AMOVB
+
+	case gc.OAS<<16 | gc.TUINT8:
+		a = ppc64.AMOVBZ
+
+	case gc.OAS<<16 | gc.TINT16:
+		a = ppc64.AMOVH
+
+	case gc.OAS<<16 | gc.TUINT16:
+		a = ppc64.AMOVHZ
+
+	case gc.OAS<<16 | gc.TINT32:
+		a = ppc64.AMOVW
+
+	case gc.OAS<<16 | gc.TUINT32,
+		gc.OAS<<16 | gc.TPTR32:
+		a = ppc64.AMOVWZ
+
+	case gc.OAS<<16 | gc.TINT64,
+		gc.OAS<<16 | gc.TUINT64,
+		gc.OAS<<16 | gc.TPTR64:
+		a = ppc64.AMOVD
+
+	case gc.OAS<<16 | gc.TFLOAT32:
+		a = ppc64.AFMOVS
+
+	case gc.OAS<<16 | gc.TFLOAT64:
+		a = ppc64.AFMOVD
+
+	case gc.OADD<<16 | gc.TINT8,
+		gc.OADD<<16 | gc.TUINT8,
+		gc.OADD<<16 | gc.TINT16,
+		gc.OADD<<16 | gc.TUINT16,
+		gc.OADD<<16 | gc.TINT32,
+		gc.OADD<<16 | gc.TUINT32,
+		gc.OADD<<16 | gc.TPTR32,
+		gc.OADD<<16 | gc.TINT64,
+		gc.OADD<<16 | gc.TUINT64,
+		gc.OADD<<16 | gc.TPTR64:
+		a = ppc64.AADD
+
+	case gc.OADD<<16 | gc.TFLOAT32:
+		a = ppc64.AFADDS
+
+	case gc.OADD<<16 | gc.TFLOAT64:
+		a = ppc64.AFADD
+
+	case gc.OSUB<<16 | gc.TINT8,
+		gc.OSUB<<16 | gc.TUINT8,
+		gc.OSUB<<16 | gc.TINT16,
+		gc.OSUB<<16 | gc.TUINT16,
+		gc.OSUB<<16 | gc.TINT32,
+		gc.OSUB<<16 | gc.TUINT32,
+		gc.OSUB<<16 | gc.TPTR32,
+		gc.OSUB<<16 | gc.TINT64,
+		gc.OSUB<<16 | gc.TUINT64,
+		gc.OSUB<<16 | gc.TPTR64:
+		a = ppc64.ASUB
+
+	case gc.OSUB<<16 | gc.TFLOAT32:
+		a = ppc64.AFSUBS
+
+	case gc.OSUB<<16 | gc.TFLOAT64:
+		a = ppc64.AFSUB
+
+	case gc.OMINUS<<16 | gc.TINT8,
+		gc.OMINUS<<16 | gc.TUINT8,
+		gc.OMINUS<<16 | gc.TINT16,
+		gc.OMINUS<<16 | gc.TUINT16,
+		gc.OMINUS<<16 | gc.TINT32,
+		gc.OMINUS<<16 | gc.TUINT32,
+		gc.OMINUS<<16 | gc.TPTR32,
+		gc.OMINUS<<16 | gc.TINT64,
+		gc.OMINUS<<16 | gc.TUINT64,
+		gc.OMINUS<<16 | gc.TPTR64:
+		a = ppc64.ANEG
+
+	case gc.OAND<<16 | gc.TINT8,
+		gc.OAND<<16 | gc.TUINT8,
+		gc.OAND<<16 | gc.TINT16,
+		gc.OAND<<16 | gc.TUINT16,
+		gc.OAND<<16 | gc.TINT32,
+		gc.OAND<<16 | gc.TUINT32,
+		gc.OAND<<16 | gc.TPTR32,
+		gc.OAND<<16 | gc.TINT64,
+		gc.OAND<<16 | gc.TUINT64,
+		gc.OAND<<16 | gc.TPTR64:
+		a = ppc64.AAND
+
+	case gc.OOR<<16 | gc.TINT8,
+		gc.OOR<<16 | gc.TUINT8,
+		gc.OOR<<16 | gc.TINT16,
+		gc.OOR<<16 | gc.TUINT16,
+		gc.OOR<<16 | gc.TINT32,
+		gc.OOR<<16 | gc.TUINT32,
+		gc.OOR<<16 | gc.TPTR32,
+		gc.OOR<<16 | gc.TINT64,
+		gc.OOR<<16 | gc.TUINT64,
+		gc.OOR<<16 | gc.TPTR64:
+		a = ppc64.AOR
+
+	case gc.OXOR<<16 | gc.TINT8,
+		gc.OXOR<<16 | gc.TUINT8,
+		gc.OXOR<<16 | gc.TINT16,
+		gc.OXOR<<16 | gc.TUINT16,
+		gc.OXOR<<16 | gc.TINT32,
+		gc.OXOR<<16 | gc.TUINT32,
+		gc.OXOR<<16 | gc.TPTR32,
+		gc.OXOR<<16 | gc.TINT64,
+		gc.OXOR<<16 | gc.TUINT64,
+		gc.OXOR<<16 | gc.TPTR64:
+		a = ppc64.AXOR
+
+		// TODO(minux): handle rotates
+	//case CASE(OLROT, TINT8):
+	//case CASE(OLROT, TUINT8):
+	//case CASE(OLROT, TINT16):
+	//case CASE(OLROT, TUINT16):
+	//case CASE(OLROT, TINT32):
+	//case CASE(OLROT, TUINT32):
+	//case CASE(OLROT, TPTR32):
+	//case CASE(OLROT, TINT64):
+	//case CASE(OLROT, TUINT64):
+	//case CASE(OLROT, TPTR64):
+	//	a = 0//???; RLDC?
+	//	break;
+
+	case gc.OLSH<<16 | gc.TINT8,
+		gc.OLSH<<16 | gc.TUINT8,
+		gc.OLSH<<16 | gc.TINT16,
+		gc.OLSH<<16 | gc.TUINT16,
+		gc.OLSH<<16 | gc.TINT32,
+		gc.OLSH<<16 | gc.TUINT32,
+		gc.OLSH<<16 | gc.TPTR32,
+		gc.OLSH<<16 | gc.TINT64,
+		gc.OLSH<<16 | gc.TUINT64,
+		gc.OLSH<<16 | gc.TPTR64:
+		a = ppc64.ASLD
+
+	case gc.ORSH<<16 | gc.TUINT8,
+		gc.ORSH<<16 | gc.TUINT16,
+		gc.ORSH<<16 | gc.TUINT32,
+		gc.ORSH<<16 | gc.TPTR32,
+		gc.ORSH<<16 | gc.TUINT64,
+		gc.ORSH<<16 | gc.TPTR64:
+		a = ppc64.ASRD
+
+	case gc.ORSH<<16 | gc.TINT8,
+		gc.ORSH<<16 | gc.TINT16,
+		gc.ORSH<<16 | gc.TINT32,
+		gc.ORSH<<16 | gc.TINT64:
+		a = ppc64.ASRAD
+
+		// TODO(minux): handle rotates
+	//case CASE(ORROTC, TINT8):
+	//case CASE(ORROTC, TUINT8):
+	//case CASE(ORROTC, TINT16):
+	//case CASE(ORROTC, TUINT16):
+	//case CASE(ORROTC, TINT32):
+	//case CASE(ORROTC, TUINT32):
+	//case CASE(ORROTC, TINT64):
+	//case CASE(ORROTC, TUINT64):
+	//	a = 0//??? RLDC??
+	//	break;
+
+	case gc.OHMUL<<16 | gc.TINT64:
+		a = ppc64.AMULHD
+
+	case gc.OHMUL<<16 | gc.TUINT64,
+		gc.OHMUL<<16 | gc.TPTR64:
+		a = ppc64.AMULHDU
+
+	case gc.OMUL<<16 | gc.TINT8,
+		gc.OMUL<<16 | gc.TINT16,
+		gc.OMUL<<16 | gc.TINT32,
+		gc.OMUL<<16 | gc.TINT64:
+		a = ppc64.AMULLD
+
+	case gc.OMUL<<16 | gc.TUINT8,
+		gc.OMUL<<16 | gc.TUINT16,
+		gc.OMUL<<16 | gc.TUINT32,
+		gc.OMUL<<16 | gc.TPTR32,
+		// don't use word multiply, the high 32-bit are undefined.
+		gc.OMUL<<16 | gc.TUINT64,
+		gc.OMUL<<16 | gc.TPTR64:
+		// for 64-bit multiplies, signedness doesn't matter.
+		a = ppc64.AMULLD
+
+	case gc.OMUL<<16 | gc.TFLOAT32:
+		a = ppc64.AFMULS
+
+	case gc.OMUL<<16 | gc.TFLOAT64:
+		a = ppc64.AFMUL
+
+	case gc.ODIV<<16 | gc.TINT8,
+		gc.ODIV<<16 | gc.TINT16,
+		gc.ODIV<<16 | gc.TINT32,
+		gc.ODIV<<16 | gc.TINT64:
+		a = ppc64.ADIVD
+
+	case gc.ODIV<<16 | gc.TUINT8,
+		gc.ODIV<<16 | gc.TUINT16,
+		gc.ODIV<<16 | gc.TUINT32,
+		gc.ODIV<<16 | gc.TPTR32,
+		gc.ODIV<<16 | gc.TUINT64,
+		gc.ODIV<<16 | gc.TPTR64:
+		a = ppc64.ADIVDU
+
+	case gc.ODIV<<16 | gc.TFLOAT32:
+		a = ppc64.AFDIVS
+
+	case gc.ODIV<<16 | gc.TFLOAT64:
+		a = ppc64.AFDIV
+	}
+
+	return a
+}
+
+const (
+	ODynam   = 1 << 0
+	OAddable = 1 << 1
+)
+
+func xgen(n *gc.Node, a *gc.Node, o int) bool {
+	// TODO(minux)
+
+	return -1 != 0 /*TypeKind(100016)*/
+}
+
+func sudoclean() {
+	return
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+	// TODO(minux)
+
+	*a = obj.Addr{}
+	return false
+}
diff --git a/src/cmd/compile/internal/ppc64/opt.go b/src/cmd/compile/internal/ppc64/opt.go
new file mode 100644
index 0000000..1704f63
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/opt.go
@@ -0,0 +1,12 @@
+// Copyright 2014 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+// Many Power ISA arithmetic and logical instructions come in four
+// standard variants.  These bits let us map between variants.
+const (
+	V_CC = 1 << 0 // xCC (affect CR field 0 flags)
+	V_V  = 1 << 1 // xV (affect SO and OV flags)
+)
diff --git a/src/cmd/compile/internal/ppc64/peep.go b/src/cmd/compile/internal/ppc64/peep.go
new file mode 100644
index 0000000..16eeb39
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/peep.go
@@ -0,0 +1,1051 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/ppc64"
+	"fmt"
+)
+
+var gactive uint32
+
+func peep(firstp *obj.Prog) {
+	g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+	if g == nil {
+		return
+	}
+	gactive = 0
+
+	var p *obj.Prog
+	var r *gc.Flow
+	var t int
+loop1:
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		gc.Dumpit("loop1", g.Start, 0)
+	}
+
+	t = 0
+	for r = g.Start; r != nil; r = r.Link {
+		p = r.Prog
+
+		// TODO(austin) Handle smaller moves.  arm and amd64
+		// distinguish between moves that moves that *must*
+		// sign/zero extend and moves that don't care so they
+		// can eliminate moves that don't care without
+		// breaking moves that do care.  This might let us
+		// simplify or remove the next peep loop, too.
+		if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
+			if regtyp(&p.To) {
+				// Try to eliminate reg->reg moves
+				if regtyp(&p.From) {
+					if p.From.Type == p.To.Type {
+						if copyprop(r) {
+							excise(r)
+							t++
+						} else if subprop(r) && copyprop(r) {
+							excise(r)
+							t++
+						}
+					}
+				}
+
+				// Convert uses to $0 to uses of R0 and
+				// propagate R0
+				if regzer(&p.From) != 0 {
+					if p.To.Type == obj.TYPE_REG {
+						p.From.Type = obj.TYPE_REG
+						p.From.Reg = ppc64.REGZERO
+						if copyprop(r) {
+							excise(r)
+							t++
+						} else if subprop(r) && copyprop(r) {
+							excise(r)
+							t++
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if t != 0 {
+		goto loop1
+	}
+
+	/*
+	 * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
+	 */
+	var p1 *obj.Prog
+	var r1 *gc.Flow
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		default:
+			continue
+
+		case ppc64.AMOVH,
+			ppc64.AMOVHZ,
+			ppc64.AMOVB,
+			ppc64.AMOVBZ,
+			ppc64.AMOVW,
+			ppc64.AMOVWZ:
+			if p.To.Type != obj.TYPE_REG {
+				continue
+			}
+		}
+
+		r1 = r.Link
+		if r1 == nil {
+			continue
+		}
+		p1 = r1.Prog
+		if p1.As != p.As {
+			continue
+		}
+		if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
+			continue
+		}
+		if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
+			continue
+		}
+		excise(r1)
+	}
+
+	if gc.Debug['D'] > 1 {
+		goto ret /* allow following code improvement to be suppressed */
+	}
+
+	/*
+	 * look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
+	 * when OP can set condition codes correctly
+	 */
+	for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		case ppc64.ACMP,
+			ppc64.ACMPW: /* always safe? */
+			if regzer(&p.To) == 0 {
+				continue
+			}
+			r1 = r.S1
+			if r1 == nil {
+				continue
+			}
+			switch r1.Prog.As {
+			default:
+				continue
+
+				/* the conditions can be complex and these are currently little used */
+			case ppc64.ABCL,
+				ppc64.ABC:
+				continue
+
+			case ppc64.ABEQ,
+				ppc64.ABGE,
+				ppc64.ABGT,
+				ppc64.ABLE,
+				ppc64.ABLT,
+				ppc64.ABNE,
+				ppc64.ABVC,
+				ppc64.ABVS:
+				break
+			}
+
+			r1 = r
+			for {
+				r1 = gc.Uniqp(r1)
+				if r1 == nil || r1.Prog.As != obj.ANOP {
+					break
+				}
+			}
+
+			if r1 == nil {
+				continue
+			}
+			p1 = r1.Prog
+			if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.From.Reg {
+				continue
+			}
+			switch p1.As {
+			/* irregular instructions */
+			case ppc64.ASUB,
+				ppc64.AADD,
+				ppc64.AXOR,
+				ppc64.AOR:
+				if p1.From.Type == obj.TYPE_CONST || p1.From.Type == obj.TYPE_ADDR {
+					continue
+				}
+			}
+
+			switch p1.As {
+			default:
+				continue
+
+			case ppc64.AMOVW,
+				ppc64.AMOVD:
+				if p1.From.Type != obj.TYPE_REG {
+					continue
+				}
+				continue
+
+			case ppc64.AANDCC,
+				ppc64.AANDNCC,
+				ppc64.AORCC,
+				ppc64.AORNCC,
+				ppc64.AXORCC,
+				ppc64.ASUBCC,
+				ppc64.ASUBECC,
+				ppc64.ASUBMECC,
+				ppc64.ASUBZECC,
+				ppc64.AADDCC,
+				ppc64.AADDCCC,
+				ppc64.AADDECC,
+				ppc64.AADDMECC,
+				ppc64.AADDZECC,
+				ppc64.ARLWMICC,
+				ppc64.ARLWNMCC,
+				/* don't deal with floating point instructions for now */
+				/*
+					case AFABS:
+					case AFADD:
+					case AFADDS:
+					case AFCTIW:
+					case AFCTIWZ:
+					case AFDIV:
+					case AFDIVS:
+					case AFMADD:
+					case AFMADDS:
+					case AFMOVD:
+					case AFMSUB:
+					case AFMSUBS:
+					case AFMUL:
+					case AFMULS:
+					case AFNABS:
+					case AFNEG:
+					case AFNMADD:
+					case AFNMADDS:
+					case AFNMSUB:
+					case AFNMSUBS:
+					case AFRSP:
+					case AFSUB:
+					case AFSUBS:
+					case ACNTLZW:
+					case AMTFSB0:
+					case AMTFSB1:
+				*/
+				ppc64.AADD,
+				ppc64.AADDV,
+				ppc64.AADDC,
+				ppc64.AADDCV,
+				ppc64.AADDME,
+				ppc64.AADDMEV,
+				ppc64.AADDE,
+				ppc64.AADDEV,
+				ppc64.AADDZE,
+				ppc64.AADDZEV,
+				ppc64.AAND,
+				ppc64.AANDN,
+				ppc64.ADIVW,
+				ppc64.ADIVWV,
+				ppc64.ADIVWU,
+				ppc64.ADIVWUV,
+				ppc64.ADIVD,
+				ppc64.ADIVDV,
+				ppc64.ADIVDU,
+				ppc64.ADIVDUV,
+				ppc64.AEQV,
+				ppc64.AEXTSB,
+				ppc64.AEXTSH,
+				ppc64.AEXTSW,
+				ppc64.AMULHW,
+				ppc64.AMULHWU,
+				ppc64.AMULLW,
+				ppc64.AMULLWV,
+				ppc64.AMULHD,
+				ppc64.AMULHDU,
+				ppc64.AMULLD,
+				ppc64.AMULLDV,
+				ppc64.ANAND,
+				ppc64.ANEG,
+				ppc64.ANEGV,
+				ppc64.ANOR,
+				ppc64.AOR,
+				ppc64.AORN,
+				ppc64.AREM,
+				ppc64.AREMV,
+				ppc64.AREMU,
+				ppc64.AREMUV,
+				ppc64.AREMD,
+				ppc64.AREMDV,
+				ppc64.AREMDU,
+				ppc64.AREMDUV,
+				ppc64.ARLWMI,
+				ppc64.ARLWNM,
+				ppc64.ASLW,
+				ppc64.ASRAW,
+				ppc64.ASRW,
+				ppc64.ASLD,
+				ppc64.ASRAD,
+				ppc64.ASRD,
+				ppc64.ASUB,
+				ppc64.ASUBV,
+				ppc64.ASUBC,
+				ppc64.ASUBCV,
+				ppc64.ASUBME,
+				ppc64.ASUBMEV,
+				ppc64.ASUBE,
+				ppc64.ASUBEV,
+				ppc64.ASUBZE,
+				ppc64.ASUBZEV,
+				ppc64.AXOR:
+				t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC)
+			}
+
+			if gc.Debug['D'] != 0 {
+				fmt.Printf("cmp %v; %v -> ", p1, p)
+			}
+			p1.As = int16(t)
+			if gc.Debug['D'] != 0 {
+				fmt.Printf("%v\n", p1)
+			}
+			excise(r)
+			continue
+		}
+	}
+
+ret:
+	gc.Flowend(g)
+}
+
+func excise(r *gc.Flow) {
+	p := (*obj.Prog)(r.Prog)
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		fmt.Printf("%v ===delete===\n", p)
+	}
+	obj.Nopout(p)
+	gc.Ostats.Ndelmov++
+}
+
+/*
+ * regzer returns 1 if a's value is 0 (a is R0 or $0)
+ */
+func regzer(a *obj.Addr) int {
+	if a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_ADDR {
+		if a.Sym == nil && a.Reg == 0 {
+			if a.Offset == 0 {
+				return 1
+			}
+		}
+	}
+	if a.Type == obj.TYPE_REG {
+		if a.Reg == ppc64.REGZERO {
+			return 1
+		}
+	}
+	return 0
+}
+
+func regtyp(a *obj.Addr) bool {
+	// TODO(rsc): Floating point register exclusions?
+	return a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ *	MOV	a, R1
+ *	ADD	b, R1	/ no use of R2
+ *	MOV	R1, R2
+ * would be converted to
+ *	MOV	a, R2
+ *	ADD	b, R2
+ *	MOV	R2, R1
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ *
+ * r0 (the argument, not the register) is the MOV at the end of the
+ * above sequences.  This returns 1 if it modified any instructions.
+ */
+func subprop(r0 *gc.Flow) bool {
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	if !regtyp(v1) {
+		return false
+	}
+	v2 := (*obj.Addr)(&p.To)
+	if !regtyp(v2) {
+		return false
+	}
+	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+		if gc.Uniqs(r) == nil {
+			break
+		}
+		p = r.Prog
+		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+			continue
+		}
+		if p.Info.Flags&gc.Call != 0 {
+			return false
+		}
+
+		if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
+			if p.To.Type == v1.Type {
+				if p.To.Reg == v1.Reg {
+					copysub(&p.To, v1, v2, 1)
+					if gc.Debug['P'] != 0 {
+						fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+						if p.From.Type == v2.Type {
+							fmt.Printf(" excise")
+						}
+						fmt.Printf("\n")
+					}
+
+					for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+						p = r.Prog
+						copysub(&p.From, v1, v2, 1)
+						copysub1(p, v1, v2, 1)
+						copysub(&p.To, v1, v2, 1)
+						if gc.Debug['P'] != 0 {
+							fmt.Printf("%v\n", r.Prog)
+						}
+					}
+
+					t := int(int(v1.Reg))
+					v1.Reg = v2.Reg
+					v2.Reg = int16(t)
+					if gc.Debug['P'] != 0 {
+						fmt.Printf("%v last\n", r.Prog)
+					}
+					return true
+				}
+			}
+		}
+
+		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
+			break
+		}
+		if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+			break
+		}
+	}
+
+	return false
+}
+
+/*
+ * The idea is to remove redundant copies.
+ *	v1->v2	F=0
+ *	(use v2	s/v2/v1/)*
+ *	set v1	F=1
+ *	use v2	return fail (v1->v2 move must remain)
+ *	-----------------
+ *	v1->v2	F=0
+ *	(use v2	s/v2/v1/)*
+ *	set v1	F=1
+ *	set v2	return success (caller can remove v1->v2 move)
+ */
+func copyprop(r0 *gc.Flow) bool {
+	p := (*obj.Prog)(r0.Prog)
+	v1 := (*obj.Addr)(&p.From)
+	v2 := (*obj.Addr)(&p.To)
+	if copyas(v1, v2) {
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("eliminating self-move: %v\n", r0.Prog)
+		}
+		return true
+	}
+
+	gactive++
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
+	}
+	return copy1(v1, v2, r0.S1, 0)
+}
+
+// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
+// all uses were rewritten.
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+	if uint32(r.Active) == gactive {
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("act set; return 1\n")
+		}
+		return true
+	}
+
+	r.Active = int32(gactive)
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
+	}
+	var t int
+	var p *obj.Prog
+	for ; r != nil; r = r.S1 {
+		p = r.Prog
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("%v", p)
+		}
+		if f == 0 && gc.Uniqp(r) == nil {
+			// Multiple predecessors; conservatively
+			// assume v1 was set on other path
+			f = 1
+
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; merge; f=%d", f)
+			}
+		}
+
+		t = copyu(p, v2, nil)
+		switch t {
+		case 2: /* rar, can't split */
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+			}
+			return false
+
+		case 3: /* set */
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+			}
+			return true
+
+		case 1, /* used, substitute */
+			4: /* use and set */
+			if f != 0 {
+				if gc.Debug['P'] == 0 {
+					return false
+				}
+				if t == 4 {
+					fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+				} else {
+					fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+				}
+				return false
+			}
+
+			if copyu(p, v2, v1) != 0 {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; sub fail; return 0\n")
+				}
+				return false
+			}
+
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
+			}
+			if t == 4 {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+				}
+				return true
+			}
+		}
+
+		if f == 0 {
+			t = copyu(p, v1, nil)
+			if f == 0 && (t == 2 || t == 3 || t == 4) {
+				f = 1
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+				}
+			}
+		}
+
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("\n")
+		}
+		if r.S2 != nil {
+			if !copy1(v1, v2, r.S2, f) {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+// If s==nil, copyu returns the set/use of v in p; otherwise, it
+// modifies p to replace reads of v with reads of s and returns 0 for
+// success or non-zero for failure.
+//
+// If s==nil, copy returns one of the following values:
+// 	1 if v only used
+//	2 if v is set and used in one address (read-alter-rewrite;
+// 	  can't substitute)
+//	3 if v is only set
+//	4 if v is set in one address and used in another (so addresses
+// 	  can be rewritten independently)
+//	0 otherwise (not touched)
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+	if p.From3.Type != obj.TYPE_NONE {
+		// 9g never generates a from3
+		fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(&p.From3))
+	}
+
+	switch p.As {
+	default:
+		fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+		return 2
+
+	case obj.ANOP, /* read p->from, write p->to */
+		ppc64.AMOVH,
+		ppc64.AMOVHZ,
+		ppc64.AMOVB,
+		ppc64.AMOVBZ,
+		ppc64.AMOVW,
+		ppc64.AMOVWZ,
+		ppc64.AMOVD,
+		ppc64.ANEG,
+		ppc64.ANEGCC,
+		ppc64.AADDME,
+		ppc64.AADDMECC,
+		ppc64.AADDZE,
+		ppc64.AADDZECC,
+		ppc64.ASUBME,
+		ppc64.ASUBMECC,
+		ppc64.ASUBZE,
+		ppc64.ASUBZECC,
+		ppc64.AFCTIW,
+		ppc64.AFCTIWZ,
+		ppc64.AFCTID,
+		ppc64.AFCTIDZ,
+		ppc64.AFCFID,
+		ppc64.AFCFIDCC,
+		ppc64.AFMOVS,
+		ppc64.AFMOVD,
+		ppc64.AFRSP,
+		ppc64.AFNEG,
+		ppc64.AFNEGCC:
+		if s != nil {
+			if copysub(&p.From, v, s, 1) != 0 {
+				return 1
+			}
+
+			// Update only indirect uses of v in p->to
+			if !copyas(&p.To, v) {
+				if copysub(&p.To, v, s, 1) != 0 {
+					return 1
+				}
+			}
+			return 0
+		}
+
+		if copyas(&p.To, v) {
+			// Fix up implicit from
+			if p.From.Type == obj.TYPE_NONE {
+				p.From = p.To
+			}
+			if copyau(&p.From, v) {
+				return 4
+			}
+			return 3
+		}
+
+		if copyau(&p.From, v) {
+			return 1
+		}
+		if copyau(&p.To, v) {
+			// p->to only indirectly uses v
+			return 1
+		}
+
+		return 0
+
+	case ppc64.AMOVBU, /* rar p->from, write p->to or read p->from, rar p->to */
+		ppc64.AMOVBZU,
+		ppc64.AMOVHU,
+		ppc64.AMOVHZU,
+		ppc64.AMOVWZU,
+		ppc64.AMOVDU:
+		if p.From.Type == obj.TYPE_MEM {
+			if copyas(&p.From, v) {
+				// No s!=nil check; need to fail
+				// anyway in that case
+				return 2
+			}
+
+			if s != nil {
+				if copysub(&p.To, v, s, 1) != 0 {
+					return 1
+				}
+				return 0
+			}
+
+			if copyas(&p.To, v) {
+				return 3
+			}
+		} else if p.To.Type == obj.TYPE_MEM {
+			if copyas(&p.To, v) {
+				return 2
+			}
+			if s != nil {
+				if copysub(&p.From, v, s, 1) != 0 {
+					return 1
+				}
+				return 0
+			}
+
+			if copyau(&p.From, v) {
+				return 1
+			}
+		} else {
+			fmt.Printf("copyu: bad %v\n", p)
+		}
+
+		return 0
+
+	case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
+		ppc64.ARLWMICC:
+		if copyas(&p.To, v) {
+			return 2
+		}
+		fallthrough
+
+		/* fall through */
+	case ppc64.AADD,
+		/* read p->from, read p->reg, write p->to */
+		ppc64.AADDC,
+		ppc64.AADDE,
+		ppc64.ASUB,
+		ppc64.ASLW,
+		ppc64.ASRW,
+		ppc64.ASRAW,
+		ppc64.ASLD,
+		ppc64.ASRD,
+		ppc64.ASRAD,
+		ppc64.AOR,
+		ppc64.AORCC,
+		ppc64.AORN,
+		ppc64.AORNCC,
+		ppc64.AAND,
+		ppc64.AANDCC,
+		ppc64.AANDN,
+		ppc64.AANDNCC,
+		ppc64.ANAND,
+		ppc64.ANANDCC,
+		ppc64.ANOR,
+		ppc64.ANORCC,
+		ppc64.AXOR,
+		ppc64.AMULHW,
+		ppc64.AMULHWU,
+		ppc64.AMULLW,
+		ppc64.AMULLD,
+		ppc64.ADIVW,
+		ppc64.ADIVD,
+		ppc64.ADIVWU,
+		ppc64.ADIVDU,
+		ppc64.AREM,
+		ppc64.AREMU,
+		ppc64.AREMD,
+		ppc64.AREMDU,
+		ppc64.ARLWNM,
+		ppc64.ARLWNMCC,
+		ppc64.AFADDS,
+		ppc64.AFADD,
+		ppc64.AFSUBS,
+		ppc64.AFSUB,
+		ppc64.AFMULS,
+		ppc64.AFMUL,
+		ppc64.AFDIVS,
+		ppc64.AFDIV:
+		if s != nil {
+			if copysub(&p.From, v, s, 1) != 0 {
+				return 1
+			}
+			if copysub1(p, v, s, 1) != 0 {
+				return 1
+			}
+
+			// Update only indirect uses of v in p->to
+			if !copyas(&p.To, v) {
+				if copysub(&p.To, v, s, 1) != 0 {
+					return 1
+				}
+			}
+			return 0
+		}
+
+		if copyas(&p.To, v) {
+			if p.Reg == 0 {
+				// Fix up implicit reg (e.g., ADD
+				// R3,R4 -> ADD R3,R4,R4) so we can
+				// update reg and to separately.
+				p.Reg = p.To.Reg
+			}
+
+			if copyau(&p.From, v) {
+				return 4
+			}
+			if copyau1(p, v) {
+				return 4
+			}
+			return 3
+		}
+
+		if copyau(&p.From, v) {
+			return 1
+		}
+		if copyau1(p, v) {
+			return 1
+		}
+		if copyau(&p.To, v) {
+			return 1
+		}
+		return 0
+
+	case ppc64.ABEQ,
+		ppc64.ABGT,
+		ppc64.ABGE,
+		ppc64.ABLT,
+		ppc64.ABLE,
+		ppc64.ABNE,
+		ppc64.ABVC,
+		ppc64.ABVS:
+		return 0
+
+	case obj.ACHECKNIL, /* read p->from */
+		ppc64.ACMP, /* read p->from, read p->to */
+		ppc64.ACMPU,
+		ppc64.ACMPW,
+		ppc64.ACMPWU,
+		ppc64.AFCMPO,
+		ppc64.AFCMPU:
+		if s != nil {
+			if copysub(&p.From, v, s, 1) != 0 {
+				return 1
+			}
+			return copysub(&p.To, v, s, 1)
+		}
+
+		if copyau(&p.From, v) {
+			return 1
+		}
+		if copyau(&p.To, v) {
+			return 1
+		}
+		return 0
+
+		// 9g never generates a branch to a GPR (this isn't
+	// even a normal instruction; liblink turns it in to a
+	// mov and a branch).
+	case ppc64.ABR: /* read p->to */
+		if s != nil {
+			if copysub(&p.To, v, s, 1) != 0 {
+				return 1
+			}
+			return 0
+		}
+
+		if copyau(&p.To, v) {
+			return 1
+		}
+		return 0
+
+	case ppc64.ARETURN: /* funny */
+		if s != nil {
+			return 0
+		}
+
+		// All registers die at this point, so claim
+		// everything is set (and not used).
+		return 3
+
+	case ppc64.ABL: /* funny */
+		if v.Type == obj.TYPE_REG {
+			// TODO(rsc): REG_R0 and REG_F0 used to be
+			// (when register numbers started at 0) exregoffset and exfregoffset,
+			// which are unset entirely.
+			// It's strange that this handles R0 and F0 differently from the other
+			// registers. Possible failure to optimize?
+			if ppc64.REG_R0 < v.Reg && v.Reg <= ppc64.REGEXT {
+				return 2
+			}
+			if v.Reg == ppc64.REGARG {
+				return 2
+			}
+			if ppc64.REG_F0 < v.Reg && v.Reg <= ppc64.FREGEXT {
+				return 2
+			}
+		}
+
+		if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
+			return 2
+		}
+
+		if s != nil {
+			if copysub(&p.To, v, s, 1) != 0 {
+				return 1
+			}
+			return 0
+		}
+
+		if copyau(&p.To, v) {
+			return 4
+		}
+		return 3
+
+		// R0 is zero, used by DUFFZERO, cannot be substituted.
+	// R3 is ptr to memory, used and set, cannot be substituted.
+	case obj.ADUFFZERO:
+		if v.Type == obj.TYPE_REG {
+			if v.Reg == 0 {
+				return 1
+			}
+			if v.Reg == 3 {
+				return 2
+			}
+		}
+
+		return 0
+
+		// R3, R4 are ptr to src, dst, used and set, cannot be substituted.
+	// R5 is scratch, set by DUFFCOPY, cannot be substituted.
+	case obj.ADUFFCOPY:
+		if v.Type == obj.TYPE_REG {
+			if v.Reg == 3 || v.Reg == 4 {
+				return 2
+			}
+			if v.Reg == 5 {
+				return 3
+			}
+		}
+
+		return 0
+
+	case obj.ATEXT: /* funny */
+		if v.Type == obj.TYPE_REG {
+			if v.Reg == ppc64.REGARG {
+				return 3
+			}
+		}
+		return 0
+
+	case obj.APCDATA,
+		obj.AFUNCDATA,
+		obj.AVARDEF,
+		obj.AVARKILL:
+		return 0
+	}
+}
+
+// copyas returns 1 if a and v address the same register.
+//
+// If a is the from operand, this means this operation reads the
+// register in v.  If a is the to operand, this means this operation
+// writes the register in v.
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+	if regtyp(v) {
+		if a.Type == v.Type {
+			if a.Reg == v.Reg {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// copyau returns 1 if a either directly or indirectly addresses the
+// same register as v.
+//
+// If a is the from operand, this means this operation reads the
+// register in v.  If a is the to operand, this means the operation
+// either reads or writes the register in v (if !copyas(a, v), then
+// the operation reads the register in v).
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+	if copyas(a, v) {
+		return true
+	}
+	if v.Type == obj.TYPE_REG {
+		if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
+			if v.Reg == a.Reg {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// copyau1 returns 1 if p->reg references the same register as v and v
+// is a direct reference.
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
+	if regtyp(v) && v.Reg != 0 {
+		if p.Reg == v.Reg {
+			return true
+		}
+	}
+	return false
+}
+
+// copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on ppc64).
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+	if f != 0 {
+		if copyau(a, v) {
+			a.Reg = s.Reg
+		}
+	}
+	return 0
+}
+
+// copysub1 replaces v with s in p1->reg if f!=0 or indicates if it could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on ppc64).
+func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
+	if f != 0 {
+		if copyau1(p1, v) {
+			p1.Reg = s.Reg
+		}
+	}
+	return 0
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+	if a.Type != v.Type {
+		return false
+	}
+	if regtyp(v) && a.Reg == v.Reg {
+		return true
+	}
+	if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
+		if v.Offset == a.Offset {
+			return true
+		}
+	}
+	return false
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+	return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
+}
+
+func stackaddr(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP
+}
diff --git a/src/cmd/compile/internal/ppc64/prog.go b/src/cmd/compile/internal/ppc64/prog.go
new file mode 100644
index 0000000..c7e1827
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/prog.go
@@ -0,0 +1,314 @@
+// Copyright 2014 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/ppc64"
+)
+
+const (
+	LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
+	RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [ppc64.ALAST]obj.ProgInfo{
+	obj.ATYPE:     {gc.Pseudo | gc.Skip, 0, 0, 0},
+	obj.ATEXT:     {gc.Pseudo, 0, 0, 0},
+	obj.AFUNCDATA: {gc.Pseudo, 0, 0, 0},
+	obj.APCDATA:   {gc.Pseudo, 0, 0, 0},
+	obj.AUNDEF:    {gc.Break, 0, 0, 0},
+	obj.AUSEFIELD: {gc.OK, 0, 0, 0},
+	obj.ACHECKNIL: {gc.LeftRead, 0, 0, 0},
+	obj.AVARDEF:   {gc.Pseudo | gc.RightWrite, 0, 0, 0},
+	obj.AVARKILL:  {gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+	// NOP is an internal no-op that also stands
+	// for USED and SET annotations, not the Power opcode.
+	obj.ANOP: {gc.LeftRead | gc.RightWrite, 0, 0, 0},
+
+	// Integer
+	ppc64.AADD:    {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.ASUB:    {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.ANEG:    {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AAND:    {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AOR:     {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AXOR:    {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AMULLD:  {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AMULLW:  {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AMULHD:  {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AMULHDU: {gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.ADIVD:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.ADIVDU:  {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.ASLD:    {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.ASRD:    {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.ASRAD:   {gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.ACMP:    {gc.SizeQ | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	ppc64.ACMPU:   {gc.SizeQ | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	ppc64.ATD:     {gc.SizeQ | gc.RightRead, 0, 0, 0},
+
+	// Floating point.
+	ppc64.AFADD:   {gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AFADDS:  {gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AFSUB:   {gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AFSUBS:  {gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AFMUL:   {gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AFMULS:  {gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AFDIV:   {gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AFDIVS:  {gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AFCTIDZ: {gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AFCFID:  {gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+	ppc64.AFCMPU:  {gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	ppc64.AFRSP:   {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+	// Moves
+	ppc64.AMOVB:  {gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	ppc64.AMOVBU: {gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc, 0, 0, 0},
+	ppc64.AMOVBZ: {gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	ppc64.AMOVH:  {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	ppc64.AMOVHU: {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc, 0, 0, 0},
+	ppc64.AMOVHZ: {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	ppc64.AMOVW:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+
+	// there is no AMOVWU.
+	ppc64.AMOVWZU: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc, 0, 0, 0},
+	ppc64.AMOVWZ:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	ppc64.AMOVD:   {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	ppc64.AMOVDU:  {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc, 0, 0, 0},
+	ppc64.AFMOVS:  {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+	ppc64.AFMOVD:  {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+	// Jumps
+	ppc64.ABR:     {gc.Jump | gc.Break, 0, 0, 0},
+	ppc64.ABL:     {gc.Call, 0, 0, 0},
+	ppc64.ABEQ:    {gc.Cjmp, 0, 0, 0},
+	ppc64.ABNE:    {gc.Cjmp, 0, 0, 0},
+	ppc64.ABGE:    {gc.Cjmp, 0, 0, 0},
+	ppc64.ABLT:    {gc.Cjmp, 0, 0, 0},
+	ppc64.ABGT:    {gc.Cjmp, 0, 0, 0},
+	ppc64.ABLE:    {gc.Cjmp, 0, 0, 0},
+	ppc64.ARETURN: {gc.Break, 0, 0, 0},
+	obj.ADUFFZERO: {gc.Call, 0, 0, 0},
+	obj.ADUFFCOPY: {gc.Call, 0, 0, 0},
+}
+
+var initproginfo_initialized int
+
+func initproginfo() {
+	var addvariant = []int{V_CC, V_V, V_CC | V_V}
+
+	if initproginfo_initialized != 0 {
+		return
+	}
+	initproginfo_initialized = 1
+
+	// Perform one-time expansion of instructions in progtable to
+	// their CC, V, and VCC variants
+	var as2 int
+	var i int
+	var variant int
+	for as := int(0); as < len(progtable); as++ {
+		if progtable[as].Flags == 0 {
+			continue
+		}
+		variant = as2variant(as)
+		for i = 0; i < len(addvariant); i++ {
+			as2 = variant2as(as, variant|addvariant[i])
+			if as2 != 0 && progtable[as2].Flags == 0 {
+				progtable[as2] = progtable[as]
+			}
+		}
+	}
+}
+
+func proginfo(p *obj.Prog) {
+	initproginfo()
+
+	info := &p.Info
+	*info = progtable[p.As]
+	if info.Flags == 0 {
+		gc.Fatal("proginfo: unknown instruction %v", p)
+	}
+
+	if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
+		info.Flags &^= gc.RegRead
+		info.Flags |= gc.RightRead /*CanRegRead |*/
+	}
+
+	if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
+		info.Regindex |= RtoB(int(p.From.Reg))
+		if info.Flags&gc.PostInc != 0 {
+			info.Regset |= RtoB(int(p.From.Reg))
+		}
+	}
+
+	if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
+		info.Regindex |= RtoB(int(p.To.Reg))
+		if info.Flags&gc.PostInc != 0 {
+			info.Regset |= RtoB(int(p.To.Reg))
+		}
+	}
+
+	if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
+		info.Flags &^= gc.LeftRead
+		info.Flags |= gc.LeftAddr
+	}
+
+	if p.As == obj.ADUFFZERO {
+		info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3)
+		info.Regset |= RtoB(ppc64.REG_R3)
+	}
+
+	if p.As == obj.ADUFFCOPY {
+		// TODO(austin) Revisit when duffcopy is implemented
+		info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5)
+
+		info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4)
+	}
+}
+
+// Instruction variants table.  Initially this contains entries only
+// for the "base" form of each instruction.  On the first call to
+// as2variant or variant2as, we'll add the variants to the table.
+var varianttable = [ppc64.ALAST][4]int{
+	ppc64.AADD:     [4]int{ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
+	ppc64.AADDC:    [4]int{ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
+	ppc64.AADDE:    [4]int{ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
+	ppc64.AADDME:   [4]int{ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
+	ppc64.AADDZE:   [4]int{ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
+	ppc64.AAND:     [4]int{ppc64.AAND, ppc64.AANDCC, 0, 0},
+	ppc64.AANDN:    [4]int{ppc64.AANDN, ppc64.AANDNCC, 0, 0},
+	ppc64.ACNTLZD:  [4]int{ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
+	ppc64.ACNTLZW:  [4]int{ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
+	ppc64.ADIVD:    [4]int{ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
+	ppc64.ADIVDU:   [4]int{ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
+	ppc64.ADIVW:    [4]int{ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
+	ppc64.ADIVWU:   [4]int{ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
+	ppc64.AEQV:     [4]int{ppc64.AEQV, ppc64.AEQVCC, 0, 0},
+	ppc64.AEXTSB:   [4]int{ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
+	ppc64.AEXTSH:   [4]int{ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
+	ppc64.AEXTSW:   [4]int{ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
+	ppc64.AFABS:    [4]int{ppc64.AFABS, ppc64.AFABSCC, 0, 0},
+	ppc64.AFADD:    [4]int{ppc64.AFADD, ppc64.AFADDCC, 0, 0},
+	ppc64.AFADDS:   [4]int{ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
+	ppc64.AFCFID:   [4]int{ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
+	ppc64.AFCTID:   [4]int{ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
+	ppc64.AFCTIDZ:  [4]int{ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
+	ppc64.AFCTIW:   [4]int{ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
+	ppc64.AFCTIWZ:  [4]int{ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
+	ppc64.AFDIV:    [4]int{ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
+	ppc64.AFDIVS:   [4]int{ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
+	ppc64.AFMADD:   [4]int{ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
+	ppc64.AFMADDS:  [4]int{ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
+	ppc64.AFMOVD:   [4]int{ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
+	ppc64.AFMSUB:   [4]int{ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
+	ppc64.AFMSUBS:  [4]int{ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
+	ppc64.AFMUL:    [4]int{ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
+	ppc64.AFMULS:   [4]int{ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
+	ppc64.AFNABS:   [4]int{ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
+	ppc64.AFNEG:    [4]int{ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
+	ppc64.AFNMADD:  [4]int{ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
+	ppc64.AFNMADDS: [4]int{ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
+	ppc64.AFNMSUB:  [4]int{ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
+	ppc64.AFNMSUBS: [4]int{ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
+	ppc64.AFRES:    [4]int{ppc64.AFRES, ppc64.AFRESCC, 0, 0},
+	ppc64.AFRSP:    [4]int{ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
+	ppc64.AFRSQRTE: [4]int{ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
+	ppc64.AFSEL:    [4]int{ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
+	ppc64.AFSQRT:   [4]int{ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
+	ppc64.AFSQRTS:  [4]int{ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
+	ppc64.AFSUB:    [4]int{ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
+	ppc64.AFSUBS:   [4]int{ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
+	ppc64.AMTFSB0:  [4]int{ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
+	ppc64.AMTFSB1:  [4]int{ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
+	ppc64.AMULHD:   [4]int{ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
+	ppc64.AMULHDU:  [4]int{ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
+	ppc64.AMULHW:   [4]int{ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
+	ppc64.AMULHWU:  [4]int{ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
+	ppc64.AMULLD:   [4]int{ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
+	ppc64.AMULLW:   [4]int{ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
+	ppc64.ANAND:    [4]int{ppc64.ANAND, ppc64.ANANDCC, 0, 0},
+	ppc64.ANEG:     [4]int{ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
+	ppc64.ANOR:     [4]int{ppc64.ANOR, ppc64.ANORCC, 0, 0},
+	ppc64.AOR:      [4]int{ppc64.AOR, ppc64.AORCC, 0, 0},
+	ppc64.AORN:     [4]int{ppc64.AORN, ppc64.AORNCC, 0, 0},
+	ppc64.AREM:     [4]int{ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
+	ppc64.AREMD:    [4]int{ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
+	ppc64.AREMDU:   [4]int{ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
+	ppc64.AREMU:    [4]int{ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
+	ppc64.ARLDC:    [4]int{ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
+	ppc64.ARLDCL:   [4]int{ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
+	ppc64.ARLDCR:   [4]int{ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
+	ppc64.ARLDMI:   [4]int{ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
+	ppc64.ARLWMI:   [4]int{ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
+	ppc64.ARLWNM:   [4]int{ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
+	ppc64.ASLD:     [4]int{ppc64.ASLD, ppc64.ASLDCC, 0, 0},
+	ppc64.ASLW:     [4]int{ppc64.ASLW, ppc64.ASLWCC, 0, 0},
+	ppc64.ASRAD:    [4]int{ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
+	ppc64.ASRAW:    [4]int{ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
+	ppc64.ASRD:     [4]int{ppc64.ASRD, ppc64.ASRDCC, 0, 0},
+	ppc64.ASRW:     [4]int{ppc64.ASRW, ppc64.ASRWCC, 0, 0},
+	ppc64.ASUB:     [4]int{ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
+	ppc64.ASUBC:    [4]int{ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
+	ppc64.ASUBE:    [4]int{ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
+	ppc64.ASUBME:   [4]int{ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
+	ppc64.ASUBZE:   [4]int{ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
+	ppc64.AXOR:     [4]int{ppc64.AXOR, ppc64.AXORCC, 0, 0},
+}
+
+var initvariants_initialized int
+
+func initvariants() {
+	if initvariants_initialized != 0 {
+		return
+	}
+	initvariants_initialized = 1
+
+	var j int
+	for i := int(0); i < len(varianttable); i++ {
+		if varianttable[i][0] == 0 {
+			// Instruction has no variants
+			varianttable[i][0] = i
+
+			continue
+		}
+
+		// Copy base form to other variants
+		if varianttable[i][0] == i {
+			for j = 0; j < len(varianttable[i]); j++ {
+				varianttable[varianttable[i][j]] = varianttable[i]
+			}
+		}
+	}
+}
+
+// as2variant returns the variant (V_*) flags of instruction as.
+func as2variant(as int) int {
+	initvariants()
+	for i := int(0); i < len(varianttable[as]); i++ {
+		if varianttable[as][i] == as {
+			return i
+		}
+	}
+	gc.Fatal("as2variant: instruction %v is not a variant of itself", obj.Aconv(as))
+	return 0
+}
+
+// variant2as returns the instruction as with the given variant (V_*) flags.
+// If no such variant exists, this returns 0.
+func variant2as(as int, flags int) int {
+	initvariants()
+	return varianttable[as][flags]
+}
diff --git a/src/cmd/compile/internal/ppc64/reg.go b/src/cmd/compile/internal/ppc64/reg.go
new file mode 100644
index 0000000..fa1cb71
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/reg.go
@@ -0,0 +1,162 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import "cmd/internal/obj/ppc64"
+import "cmd/compile/internal/gc"
+
+const (
+	NREGVAR = 64 /* 32 general + 32 floating */
+)
+
+var regname = []string{
+	".R0",
+	".R1",
+	".R2",
+	".R3",
+	".R4",
+	".R5",
+	".R6",
+	".R7",
+	".R8",
+	".R9",
+	".R10",
+	".R11",
+	".R12",
+	".R13",
+	".R14",
+	".R15",
+	".R16",
+	".R17",
+	".R18",
+	".R19",
+	".R20",
+	".R21",
+	".R22",
+	".R23",
+	".R24",
+	".R25",
+	".R26",
+	".R27",
+	".R28",
+	".R29",
+	".R30",
+	".R31",
+	".F0",
+	".F1",
+	".F2",
+	".F3",
+	".F4",
+	".F5",
+	".F6",
+	".F7",
+	".F8",
+	".F9",
+	".F10",
+	".F11",
+	".F12",
+	".F13",
+	".F14",
+	".F15",
+	".F16",
+	".F17",
+	".F18",
+	".F19",
+	".F20",
+	".F21",
+	".F22",
+	".F23",
+	".F24",
+	".F25",
+	".F26",
+	".F27",
+	".F28",
+	".F29",
+	".F30",
+	".F31",
+}
+
+func regnames(n *int) []string {
+	*n = NREGVAR
+	return regname
+}
+
+func excludedregs() uint64 {
+	// Exclude registers with fixed functions
+	regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS))
+
+	// Also exclude floating point registers with fixed constants
+	regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
+
+	return regbits
+}
+
+func doregbits(r int) uint64 {
+	return 0
+}
+
+/*
+ * track register variables including external registers:
+ *	bit	reg
+ *	0	R0
+ *	1	R1
+ *	...	...
+ *	31	R31
+ *	32+0	F0
+ *	32+1	F1
+ *	...	...
+ *	32+31	F31
+ */
+func RtoB(r int) uint64 {
+	if r > ppc64.REG_R0 && r <= ppc64.REG_R31 {
+		return 1 << uint(r-ppc64.REG_R0)
+	}
+	if r >= ppc64.REG_F0 && r <= ppc64.REG_F31 {
+		return 1 << uint(32+r-ppc64.REG_F0)
+	}
+	return 0
+}
+
+func BtoR(b uint64) int {
+	b &= 0xffffffff
+	if b == 0 {
+		return 0
+	}
+	return gc.Bitno(b) + ppc64.REG_R0
+}
+
+func BtoF(b uint64) int {
+	b >>= 32
+	if b == 0 {
+		return 0
+	}
+	return gc.Bitno(b) + ppc64.REG_F0
+}
diff --git a/src/cmd/compile/internal/x86/cgen.go b/src/cmd/compile/internal/x86/cgen.go
new file mode 100644
index 0000000..1768674
--- /dev/null
+++ b/src/cmd/compile/internal/x86/cgen.go
@@ -0,0 +1,159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+)
+
+/*
+ * generate an addressable node in res, containing the value of n.
+ * n is an array index, and might be any size; res width is <= 32-bit.
+ * returns Prog* to patch to panic call.
+ */
+func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
+	if !gc.Is64(n.Type) {
+		if n.Addable && (gc.Simtype[n.Etype] == gc.TUINT32 || gc.Simtype[n.Etype] == gc.TINT32) {
+			// nothing to do.
+			*res = *n
+		} else {
+			gc.Tempname(res, gc.Types[gc.TUINT32])
+			gc.Cgen(n, res)
+		}
+
+		return nil
+	}
+
+	var tmp gc.Node
+	gc.Tempname(&tmp, gc.Types[gc.TINT64])
+	gc.Cgen(n, &tmp)
+	var lo gc.Node
+	var hi gc.Node
+	split64(&tmp, &lo, &hi)
+	gc.Tempname(res, gc.Types[gc.TUINT32])
+	gmove(&lo, res)
+	if bounded {
+		splitclean()
+		return nil
+	}
+
+	var zero gc.Node
+	gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
+	gins(x86.ACMPL, &hi, &zero)
+	splitclean()
+	return gc.Gbranch(x86.AJNE, nil, +1)
+}
+
+func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
+	var dst gc.Node
+	gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
+	var src gc.Node
+	gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
+
+	var tsrc gc.Node
+	gc.Tempname(&tsrc, gc.Types[gc.Tptr])
+	var tdst gc.Node
+	gc.Tempname(&tdst, gc.Types[gc.Tptr])
+	if !n.Addable {
+		gc.Agen(n, &tsrc)
+	}
+	if !res.Addable {
+		gc.Agen(res, &tdst)
+	}
+	if n.Addable {
+		gc.Agen(n, &src)
+	} else {
+		gmove(&tsrc, &src)
+	}
+
+	if res.Op == gc.ONAME {
+		gc.Gvardef(res)
+	}
+
+	if res.Addable {
+		gc.Agen(res, &dst)
+	} else {
+		gmove(&tdst, &dst)
+	}
+
+	c := int32(w % 4) // bytes
+	q := int32(w / 4) // doublewords
+
+	// if we are copying forward on the stack and
+	// the src and dst overlap, then reverse direction
+	if osrc < odst && int64(odst) < int64(osrc)+w {
+		// reverse direction
+		gins(x86.ASTD, nil, nil) // set direction flag
+		if c > 0 {
+			gconreg(x86.AADDL, w-1, x86.REG_SI)
+			gconreg(x86.AADDL, w-1, x86.REG_DI)
+
+			gconreg(x86.AMOVL, int64(c), x86.REG_CX)
+			gins(x86.AREP, nil, nil)   // repeat
+			gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
+		}
+
+		if q > 0 {
+			if c > 0 {
+				gconreg(x86.AADDL, -3, x86.REG_SI)
+				gconreg(x86.AADDL, -3, x86.REG_DI)
+			} else {
+				gconreg(x86.AADDL, w-4, x86.REG_SI)
+				gconreg(x86.AADDL, w-4, x86.REG_DI)
+			}
+
+			gconreg(x86.AMOVL, int64(q), x86.REG_CX)
+			gins(x86.AREP, nil, nil)   // repeat
+			gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
+		}
+
+		// we leave with the flag clear
+		gins(x86.ACLD, nil, nil)
+	} else {
+		gins(x86.ACLD, nil, nil) // paranoia.  TODO(rsc): remove?
+
+		// normal direction
+		if q > 128 || (q >= 4 && gc.Nacl) {
+			gconreg(x86.AMOVL, int64(q), x86.REG_CX)
+			gins(x86.AREP, nil, nil)   // repeat
+			gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
+		} else if q >= 4 {
+			p := gins(obj.ADUFFCOPY, nil, nil)
+			p.To.Type = obj.TYPE_ADDR
+			p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
+
+			// 10 and 128 = magic constants: see ../../runtime/asm_386.s
+			p.To.Offset = 10 * (128 - int64(q))
+		} else if !gc.Nacl && c == 0 {
+			var cx gc.Node
+			gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
+
+			// We don't need the MOVSL side-effect of updating SI and DI,
+			// and issuing a sequence of MOVLs directly is faster.
+			src.Op = gc.OINDREG
+
+			dst.Op = gc.OINDREG
+			for q > 0 {
+				gmove(&src, &cx) // MOVL x+(SI),CX
+				gmove(&cx, &dst) // MOVL CX,x+(DI)
+				src.Xoffset += 4
+				dst.Xoffset += 4
+				q--
+			}
+		} else {
+			for q > 0 {
+				gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
+				q--
+			}
+		}
+
+		for c > 0 {
+			gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
+			c--
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/x86/cgen64.go b/src/cmd/compile/internal/x86/cgen64.go
new file mode 100644
index 0000000..0b061ff
--- /dev/null
+++ b/src/cmd/compile/internal/x86/cgen64.go
@@ -0,0 +1,598 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+)
+
+/*
+ * attempt to generate 64-bit
+ *	res = n
+ * return 1 on success, 0 if op not handled.
+ */
+func cgen64(n *gc.Node, res *gc.Node) {
+	if res.Op != gc.OINDREG && res.Op != gc.ONAME {
+		gc.Dump("n", n)
+		gc.Dump("res", res)
+		gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
+	}
+
+	switch n.Op {
+	default:
+		gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
+
+	case gc.OMINUS:
+		gc.Cgen(n.Left, res)
+		var hi1 gc.Node
+		var lo1 gc.Node
+		split64(res, &lo1, &hi1)
+		gins(x86.ANEGL, nil, &lo1)
+		gins(x86.AADCL, ncon(0), &hi1)
+		gins(x86.ANEGL, nil, &hi1)
+		splitclean()
+		return
+
+	case gc.OCOM:
+		gc.Cgen(n.Left, res)
+		var lo1 gc.Node
+		var hi1 gc.Node
+		split64(res, &lo1, &hi1)
+		gins(x86.ANOTL, nil, &lo1)
+		gins(x86.ANOTL, nil, &hi1)
+		splitclean()
+		return
+
+		// binary operators.
+	// common setup below.
+	case gc.OADD,
+		gc.OSUB,
+		gc.OMUL,
+		gc.OLROT,
+		gc.OLSH,
+		gc.ORSH,
+		gc.OAND,
+		gc.OOR,
+		gc.OXOR:
+		break
+	}
+
+	l := n.Left
+	r := n.Right
+	if !l.Addable {
+		var t1 gc.Node
+		gc.Tempname(&t1, l.Type)
+		gc.Cgen(l, &t1)
+		l = &t1
+	}
+
+	if r != nil && !r.Addable {
+		var t2 gc.Node
+		gc.Tempname(&t2, r.Type)
+		gc.Cgen(r, &t2)
+		r = &t2
+	}
+
+	var ax gc.Node
+	gc.Nodreg(&ax, gc.Types[gc.TINT32], x86.REG_AX)
+	var cx gc.Node
+	gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
+	var dx gc.Node
+	gc.Nodreg(&dx, gc.Types[gc.TINT32], x86.REG_DX)
+
+	// Setup for binary operation.
+	var hi1 gc.Node
+	var lo1 gc.Node
+	split64(l, &lo1, &hi1)
+
+	var lo2 gc.Node
+	var hi2 gc.Node
+	if gc.Is64(r.Type) {
+		split64(r, &lo2, &hi2)
+	}
+
+	// Do op.  Leave result in DX:AX.
+	switch n.Op {
+	// TODO: Constants
+	case gc.OADD:
+		gins(x86.AMOVL, &lo1, &ax)
+
+		gins(x86.AMOVL, &hi1, &dx)
+		gins(x86.AADDL, &lo2, &ax)
+		gins(x86.AADCL, &hi2, &dx)
+
+		// TODO: Constants.
+	case gc.OSUB:
+		gins(x86.AMOVL, &lo1, &ax)
+
+		gins(x86.AMOVL, &hi1, &dx)
+		gins(x86.ASUBL, &lo2, &ax)
+		gins(x86.ASBBL, &hi2, &dx)
+
+		// let's call the next two EX and FX.
+	case gc.OMUL:
+		var ex gc.Node
+		gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil)
+
+		var fx gc.Node
+		gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil)
+
+		// load args into DX:AX and EX:CX.
+		gins(x86.AMOVL, &lo1, &ax)
+
+		gins(x86.AMOVL, &hi1, &dx)
+		gins(x86.AMOVL, &lo2, &cx)
+		gins(x86.AMOVL, &hi2, &ex)
+
+		// if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply.
+		gins(x86.AMOVL, &dx, &fx)
+
+		gins(x86.AORL, &ex, &fx)
+		p1 := gc.Gbranch(x86.AJNE, nil, 0)
+		gins(x86.AMULL, &cx, nil) // implicit &ax
+		p2 := gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+
+		// full 64x64 -> 64, from 32x32 -> 64.
+		gins(x86.AIMULL, &cx, &dx)
+
+		gins(x86.AMOVL, &ax, &fx)
+		gins(x86.AIMULL, &ex, &fx)
+		gins(x86.AADDL, &dx, &fx)
+		gins(x86.AMOVL, &cx, &dx)
+		gins(x86.AMULL, &dx, nil) // implicit &ax
+		gins(x86.AADDL, &fx, &dx)
+		gc.Patch(p2, gc.Pc)
+
+		gc.Regfree(&ex)
+		gc.Regfree(&fx)
+
+		// We only rotate by a constant c in [0,64).
+	// if c >= 32:
+	//	lo, hi = hi, lo
+	//	c -= 32
+	// if c == 0:
+	//	no-op
+	// else:
+	//	t = hi
+	//	shld hi:lo, c
+	//	shld lo:t, c
+	case gc.OLROT:
+		v := uint64(r.Int())
+
+		if v >= 32 {
+			// reverse during load to do the first 32 bits of rotate
+			v -= 32
+
+			gins(x86.AMOVL, &lo1, &dx)
+			gins(x86.AMOVL, &hi1, &ax)
+		} else {
+			gins(x86.AMOVL, &lo1, &ax)
+			gins(x86.AMOVL, &hi1, &dx)
+		}
+
+		if v == 0 {
+		} else // done
+		{
+			gins(x86.AMOVL, &dx, &cx)
+			p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx)
+			p1.From.Index = x86.REG_AX // double-width shift
+			p1.From.Scale = 0
+			p1 = gins(x86.ASHLL, ncon(uint32(v)), &ax)
+			p1.From.Index = x86.REG_CX // double-width shift
+			p1.From.Scale = 0
+		}
+
+	case gc.OLSH:
+		if r.Op == gc.OLITERAL {
+			v := uint64(r.Int())
+			if v >= 64 {
+				if gc.Is64(r.Type) {
+					splitclean()
+				}
+				splitclean()
+				split64(res, &lo2, &hi2)
+				gins(x86.AMOVL, ncon(0), &lo2)
+				gins(x86.AMOVL, ncon(0), &hi2)
+				splitclean()
+				return
+			}
+
+			if v >= 32 {
+				if gc.Is64(r.Type) {
+					splitclean()
+				}
+				split64(res, &lo2, &hi2)
+				gmove(&lo1, &hi2)
+				if v > 32 {
+					gins(x86.ASHLL, ncon(uint32(v-32)), &hi2)
+				}
+
+				gins(x86.AMOVL, ncon(0), &lo2)
+				splitclean()
+				splitclean()
+				return
+			}
+
+			// general shift
+			gins(x86.AMOVL, &lo1, &ax)
+
+			gins(x86.AMOVL, &hi1, &dx)
+			p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx)
+			p1.From.Index = x86.REG_AX // double-width shift
+			p1.From.Scale = 0
+			gins(x86.ASHLL, ncon(uint32(v)), &ax)
+			break
+		}
+
+		// load value into DX:AX.
+		gins(x86.AMOVL, &lo1, &ax)
+
+		gins(x86.AMOVL, &hi1, &dx)
+
+		// load shift value into register.
+		// if high bits are set, zero value.
+		var p1 *obj.Prog
+
+		if gc.Is64(r.Type) {
+			gins(x86.ACMPL, &hi2, ncon(0))
+			p1 = gc.Gbranch(x86.AJNE, nil, +1)
+			gins(x86.AMOVL, &lo2, &cx)
+		} else {
+			cx.Type = gc.Types[gc.TUINT32]
+			gmove(r, &cx)
+		}
+
+		// if shift count is >=64, zero value
+		gins(x86.ACMPL, &cx, ncon(64))
+
+		p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+		if p1 != nil {
+			gc.Patch(p1, gc.Pc)
+		}
+		gins(x86.AXORL, &dx, &dx)
+		gins(x86.AXORL, &ax, &ax)
+		gc.Patch(p2, gc.Pc)
+
+		// if shift count is >= 32, zero low.
+		gins(x86.ACMPL, &cx, ncon(32))
+
+		p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+		gins(x86.AMOVL, &ax, &dx)
+		gins(x86.ASHLL, &cx, &dx) // SHLL only uses bottom 5 bits of count
+		gins(x86.AXORL, &ax, &ax)
+		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+
+		// general shift
+		p1 = gins(x86.ASHLL, &cx, &dx)
+
+		p1.From.Index = x86.REG_AX // double-width shift
+		p1.From.Scale = 0
+		gins(x86.ASHLL, &cx, &ax)
+		gc.Patch(p2, gc.Pc)
+
+	case gc.ORSH:
+		if r.Op == gc.OLITERAL {
+			v := uint64(r.Int())
+			if v >= 64 {
+				if gc.Is64(r.Type) {
+					splitclean()
+				}
+				splitclean()
+				split64(res, &lo2, &hi2)
+				if hi1.Type.Etype == gc.TINT32 {
+					gmove(&hi1, &lo2)
+					gins(x86.ASARL, ncon(31), &lo2)
+					gmove(&hi1, &hi2)
+					gins(x86.ASARL, ncon(31), &hi2)
+				} else {
+					gins(x86.AMOVL, ncon(0), &lo2)
+					gins(x86.AMOVL, ncon(0), &hi2)
+				}
+
+				splitclean()
+				return
+			}
+
+			if v >= 32 {
+				if gc.Is64(r.Type) {
+					splitclean()
+				}
+				split64(res, &lo2, &hi2)
+				gmove(&hi1, &lo2)
+				if v > 32 {
+					gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v-32)), &lo2)
+				}
+				if hi1.Type.Etype == gc.TINT32 {
+					gmove(&hi1, &hi2)
+					gins(x86.ASARL, ncon(31), &hi2)
+				} else {
+					gins(x86.AMOVL, ncon(0), &hi2)
+				}
+				splitclean()
+				splitclean()
+				return
+			}
+
+			// general shift
+			gins(x86.AMOVL, &lo1, &ax)
+
+			gins(x86.AMOVL, &hi1, &dx)
+			p1 := gins(x86.ASHRL, ncon(uint32(v)), &ax)
+			p1.From.Index = x86.REG_DX // double-width shift
+			p1.From.Scale = 0
+			gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx)
+			break
+		}
+
+		// load value into DX:AX.
+		gins(x86.AMOVL, &lo1, &ax)
+
+		gins(x86.AMOVL, &hi1, &dx)
+
+		// load shift value into register.
+		// if high bits are set, zero value.
+		var p1 *obj.Prog
+
+		if gc.Is64(r.Type) {
+			gins(x86.ACMPL, &hi2, ncon(0))
+			p1 = gc.Gbranch(x86.AJNE, nil, +1)
+			gins(x86.AMOVL, &lo2, &cx)
+		} else {
+			cx.Type = gc.Types[gc.TUINT32]
+			gmove(r, &cx)
+		}
+
+		// if shift count is >=64, zero or sign-extend value
+		gins(x86.ACMPL, &cx, ncon(64))
+
+		p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+		if p1 != nil {
+			gc.Patch(p1, gc.Pc)
+		}
+		if hi1.Type.Etype == gc.TINT32 {
+			gins(x86.ASARL, ncon(31), &dx)
+			gins(x86.AMOVL, &dx, &ax)
+		} else {
+			gins(x86.AXORL, &dx, &dx)
+			gins(x86.AXORL, &ax, &ax)
+		}
+
+		gc.Patch(p2, gc.Pc)
+
+		// if shift count is >= 32, sign-extend hi.
+		gins(x86.ACMPL, &cx, ncon(32))
+
+		p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+		gins(x86.AMOVL, &dx, &ax)
+		if hi1.Type.Etype == gc.TINT32 {
+			gins(x86.ASARL, &cx, &ax) // SARL only uses bottom 5 bits of count
+			gins(x86.ASARL, ncon(31), &dx)
+		} else {
+			gins(x86.ASHRL, &cx, &ax)
+			gins(x86.AXORL, &dx, &dx)
+		}
+
+		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+
+		// general shift
+		p1 = gins(x86.ASHRL, &cx, &ax)
+
+		p1.From.Index = x86.REG_DX // double-width shift
+		p1.From.Scale = 0
+		gins(optoas(gc.ORSH, hi1.Type), &cx, &dx)
+		gc.Patch(p2, gc.Pc)
+
+		// make constant the right side (it usually is anyway).
+	case gc.OXOR,
+		gc.OAND,
+		gc.OOR:
+		if lo1.Op == gc.OLITERAL {
+			nswap(&lo1, &lo2)
+			nswap(&hi1, &hi2)
+		}
+
+		if lo2.Op == gc.OLITERAL {
+			// special cases for constants.
+			lv := uint32(lo2.Int())
+			hv := uint32(hi2.Int())
+			splitclean() // right side
+			split64(res, &lo2, &hi2)
+			switch n.Op {
+			case gc.OXOR:
+				gmove(&lo1, &lo2)
+				gmove(&hi1, &hi2)
+				switch lv {
+				case 0:
+					break
+
+				case 0xffffffff:
+					gins(x86.ANOTL, nil, &lo2)
+
+				default:
+					gins(x86.AXORL, ncon(lv), &lo2)
+				}
+
+				switch hv {
+				case 0:
+					break
+
+				case 0xffffffff:
+					gins(x86.ANOTL, nil, &hi2)
+
+				default:
+					gins(x86.AXORL, ncon(hv), &hi2)
+				}
+
+			case gc.OAND:
+				switch lv {
+				case 0:
+					gins(x86.AMOVL, ncon(0), &lo2)
+
+				default:
+					gmove(&lo1, &lo2)
+					if lv != 0xffffffff {
+						gins(x86.AANDL, ncon(lv), &lo2)
+					}
+				}
+
+				switch hv {
+				case 0:
+					gins(x86.AMOVL, ncon(0), &hi2)
+
+				default:
+					gmove(&hi1, &hi2)
+					if hv != 0xffffffff {
+						gins(x86.AANDL, ncon(hv), &hi2)
+					}
+				}
+
+			case gc.OOR:
+				switch lv {
+				case 0:
+					gmove(&lo1, &lo2)
+
+				case 0xffffffff:
+					gins(x86.AMOVL, ncon(0xffffffff), &lo2)
+
+				default:
+					gmove(&lo1, &lo2)
+					gins(x86.AORL, ncon(lv), &lo2)
+				}
+
+				switch hv {
+				case 0:
+					gmove(&hi1, &hi2)
+
+				case 0xffffffff:
+					gins(x86.AMOVL, ncon(0xffffffff), &hi2)
+
+				default:
+					gmove(&hi1, &hi2)
+					gins(x86.AORL, ncon(hv), &hi2)
+				}
+			}
+
+			splitclean()
+			splitclean()
+			return
+		}
+
+		gins(x86.AMOVL, &lo1, &ax)
+		gins(x86.AMOVL, &hi1, &dx)
+		gins(optoas(int(n.Op), lo1.Type), &lo2, &ax)
+		gins(optoas(int(n.Op), lo1.Type), &hi2, &dx)
+	}
+
+	if gc.Is64(r.Type) {
+		splitclean()
+	}
+	splitclean()
+
+	split64(res, &lo1, &hi1)
+	gins(x86.AMOVL, &ax, &lo1)
+	gins(x86.AMOVL, &dx, &hi1)
+	splitclean()
+}
+
+/*
+ * generate comparison of nl, nr, both 64-bit.
+ * nl is memory; nr is constant or memory.
+ */
+func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
+	var lo1 gc.Node
+	var hi1 gc.Node
+	var lo2 gc.Node
+	var hi2 gc.Node
+	var rr gc.Node
+
+	split64(nl, &lo1, &hi1)
+	split64(nr, &lo2, &hi2)
+
+	// compare most significant word;
+	// if they differ, we're done.
+	t := hi1.Type
+
+	if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
+		gins(x86.ACMPL, &hi1, &hi2)
+	} else {
+		gc.Regalloc(&rr, gc.Types[gc.TINT32], nil)
+		gins(x86.AMOVL, &hi1, &rr)
+		gins(x86.ACMPL, &rr, &hi2)
+		gc.Regfree(&rr)
+	}
+
+	var br *obj.Prog
+	switch op {
+	default:
+		gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), t)
+
+		// cmp hi
+	// jne L
+	// cmp lo
+	// jeq to
+	// L:
+	case gc.OEQ:
+		br = gc.Gbranch(x86.AJNE, nil, -likely)
+
+		// cmp hi
+	// jne to
+	// cmp lo
+	// jne to
+	case gc.ONE:
+		gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)
+
+		// cmp hi
+	// jgt to
+	// jlt L
+	// cmp lo
+	// jge to (or jgt to)
+	// L:
+	case gc.OGE,
+		gc.OGT:
+		gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
+
+		br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
+
+		// cmp hi
+	// jlt to
+	// jgt L
+	// cmp lo
+	// jle to (or jlt to)
+	// L:
+	case gc.OLE,
+		gc.OLT:
+		gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
+
+		br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
+	}
+
+	// compare least significant word
+	t = lo1.Type
+
+	if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
+		gins(x86.ACMPL, &lo1, &lo2)
+	} else {
+		gc.Regalloc(&rr, gc.Types[gc.TINT32], nil)
+		gins(x86.AMOVL, &lo1, &rr)
+		gins(x86.ACMPL, &rr, &lo2)
+		gc.Regfree(&rr)
+	}
+
+	// jump again
+	gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
+
+	// point first branch down here if appropriate
+	if br != nil {
+		gc.Patch(br, gc.Pc)
+	}
+
+	splitclean()
+	splitclean()
+}
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
new file mode 100644
index 0000000..2b602e1
--- /dev/null
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -0,0 +1,110 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+	"fmt"
+	"os"
+)
+
+var thechar int = '8'
+
+var thestring string = "386"
+
+var thelinkarch *obj.LinkArch = &x86.Link386
+
+func linkarchinit() {
+}
+
+var MAXWIDTH int64 = (1 << 32) - 1
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, and uintptr
+ */
+var typedefs = []gc.Typedef{
+	gc.Typedef{"int", gc.TINT, gc.TINT32},
+	gc.Typedef{"uint", gc.TUINT, gc.TUINT32},
+	gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT32},
+}
+
+func betypeinit() {
+	gc.Widthptr = 4
+	gc.Widthint = 4
+	gc.Widthreg = 4
+}
+
+func Main() {
+	gc.Thearch.Thechar = thechar
+	gc.Thearch.Thestring = thestring
+	gc.Thearch.Thelinkarch = thelinkarch
+	gc.Thearch.Typedefs = typedefs
+	gc.Thearch.REGSP = x86.REGSP
+	gc.Thearch.REGCTXT = x86.REGCTXT
+	gc.Thearch.REGCALLX = x86.REG_BX
+	gc.Thearch.REGCALLX2 = x86.REG_AX
+	gc.Thearch.REGRETURN = x86.REG_AX
+	gc.Thearch.REGMIN = x86.REG_AX
+	gc.Thearch.REGMAX = x86.REG_DI
+	switch v := obj.Getgo386(); v {
+	case "387":
+		gc.Thearch.FREGMIN = x86.REG_F0
+		gc.Thearch.FREGMAX = x86.REG_F7
+		gc.Thearch.Use387 = true
+	case "sse2":
+		gc.Thearch.FREGMIN = x86.REG_X0
+		gc.Thearch.FREGMAX = x86.REG_X7
+	default:
+		fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v)
+		gc.Exit(1)
+	}
+	gc.Thearch.MAXWIDTH = MAXWIDTH
+	gc.Thearch.ReservedRegs = resvd
+
+	gc.Thearch.Betypeinit = betypeinit
+	gc.Thearch.Bgen_float = bgen_float
+	gc.Thearch.Cgen64 = cgen64
+	gc.Thearch.Cgen_bmul = cgen_bmul
+	gc.Thearch.Cgen_float = cgen_float
+	gc.Thearch.Cgen_hmul = cgen_hmul
+	gc.Thearch.Cgen_shift = cgen_shift
+	gc.Thearch.Clearfat = clearfat
+	gc.Thearch.Cmp64 = cmp64
+	gc.Thearch.Defframe = defframe
+	gc.Thearch.Dodiv = cgen_div
+	gc.Thearch.Excise = excise
+	gc.Thearch.Expandchecks = expandchecks
+	gc.Thearch.Getg = getg
+	gc.Thearch.Gins = gins
+	gc.Thearch.Ginscmp = ginscmp
+	gc.Thearch.Ginscon = ginscon
+	gc.Thearch.Ginsnop = ginsnop
+	gc.Thearch.Gmove = gmove
+	gc.Thearch.Igenindex = igenindex
+	gc.Thearch.Linkarchinit = linkarchinit
+	gc.Thearch.Peep = peep
+	gc.Thearch.Proginfo = proginfo
+	gc.Thearch.Regtyp = regtyp
+	gc.Thearch.Sameaddr = sameaddr
+	gc.Thearch.Smallindir = smallindir
+	gc.Thearch.Stackaddr = stackaddr
+	gc.Thearch.Blockcopy = blockcopy
+	gc.Thearch.Sudoaddable = sudoaddable
+	gc.Thearch.Sudoclean = sudoclean
+	gc.Thearch.Excludedregs = excludedregs
+	gc.Thearch.RtoB = RtoB
+	gc.Thearch.FtoB = FtoB
+	gc.Thearch.BtoR = BtoR
+	gc.Thearch.BtoF = BtoF
+	gc.Thearch.Optoas = optoas
+	gc.Thearch.Doregbits = doregbits
+	gc.Thearch.Regnames = regnames
+
+	gc.Main()
+	gc.Exit(0)
+}
diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go
new file mode 100644
index 0000000..dabc139
--- /dev/null
+++ b/src/cmd/compile/internal/x86/ggen.go
@@ -0,0 +1,940 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+)
+
+func defframe(ptxt *obj.Prog) {
+	var n *gc.Node
+
+	// fill in argument size, stack size
+	ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+	ptxt.To.Offset = int64(frame)
+
+	// insert code to zero ambiguously live variables
+	// so that the garbage collector only sees initialized values
+	// when it looks for pointers.
+	p := ptxt
+
+	hi := int64(0)
+	lo := hi
+	ax := uint32(0)
+	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
+		n = l.N
+		if !n.Name.Needzero {
+			continue
+		}
+		if n.Class != gc.PAUTO {
+			gc.Fatal("needzero class %d", n.Class)
+		}
+		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+			gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+		}
+		if lo != hi && n.Xoffset+n.Type.Width == lo-int64(2*gc.Widthptr) {
+			// merge with range we already have
+			lo = n.Xoffset
+
+			continue
+		}
+
+		// zero old range
+		p = zerorange(p, int64(frame), lo, hi, &ax)
+
+		// set new range
+		hi = n.Xoffset + n.Type.Width
+
+		lo = n.Xoffset
+	}
+
+	// zero final range
+	zerorange(p, int64(frame), lo, hi, &ax)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
+	cnt := hi - lo
+	if cnt == 0 {
+		return p
+	}
+	if *ax == 0 {
+		p = appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+		*ax = 1
+	}
+
+	if cnt <= int64(4*gc.Widthreg) {
+		for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
+			p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
+		}
+	} else if !gc.Nacl && cnt <= int64(128*gc.Widthreg) {
+		p = appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
+		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
+		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+	} else {
+		p = appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
+		p = appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
+		p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+		p = appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+	}
+
+	return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+	q := gc.Ctxt.NewProg()
+	gc.Clearp(q)
+	q.As = int16(as)
+	q.Lineno = p.Lineno
+	q.From.Type = int16(ftype)
+	q.From.Reg = int16(freg)
+	q.From.Offset = foffset
+	q.To.Type = int16(ttype)
+	q.To.Reg = int16(treg)
+	q.To.Offset = toffset
+	q.Link = p.Link
+	p.Link = q
+	return q
+}
+
+func clearfat(nl *gc.Node) {
+	/* clear a fat object */
+	if gc.Debug['g'] != 0 {
+		gc.Dump("\nclearfat", nl)
+	}
+
+	w := uint32(nl.Type.Width)
+
+	// Avoid taking the address for simple enough types.
+	if gc.Componentgen(nil, nl) {
+		return
+	}
+
+	c := w % 4 // bytes
+	q := w / 4 // quads
+
+	if q < 4 {
+		// Write sequence of MOV 0, off(base) instead of using STOSL.
+		// The hope is that although the code will be slightly longer,
+		// the MOVs will have no dependencies and pipeline better
+		// than the unrolled STOSL loop.
+		// NOTE: Must use agen, not igen, so that optimizer sees address
+		// being taken. We are not writing on field boundaries.
+		var n1 gc.Node
+		gc.Regalloc(&n1, gc.Types[gc.Tptr], nil)
+
+		gc.Agen(nl, &n1)
+		n1.Op = gc.OINDREG
+		var z gc.Node
+		gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
+		for {
+			tmp14 := q
+			q--
+			if tmp14 <= 0 {
+				break
+			}
+			n1.Type = z.Type
+			gins(x86.AMOVL, &z, &n1)
+			n1.Xoffset += 4
+		}
+
+		gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
+		for {
+			tmp15 := c
+			c--
+			if tmp15 <= 0 {
+				break
+			}
+			n1.Type = z.Type
+			gins(x86.AMOVB, &z, &n1)
+			n1.Xoffset++
+		}
+
+		gc.Regfree(&n1)
+		return
+	}
+
+	var n1 gc.Node
+	gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI)
+	gc.Agen(nl, &n1)
+	gconreg(x86.AMOVL, 0, x86.REG_AX)
+
+	if q > 128 || (q >= 4 && gc.Nacl) {
+		gconreg(x86.AMOVL, int64(q), x86.REG_CX)
+		gins(x86.AREP, nil, nil)   // repeat
+		gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
+	} else if q >= 4 {
+		p := gins(obj.ADUFFZERO, nil, nil)
+		p.To.Type = obj.TYPE_ADDR
+		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+
+		// 1 and 128 = magic constants: see ../../runtime/asm_386.s
+		p.To.Offset = 1 * (128 - int64(q))
+	} else {
+		for q > 0 {
+			gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
+			q--
+		}
+	}
+
+	for c > 0 {
+		gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
+		c--
+	}
+}
+
+var panicdiv *gc.Node
+
+/*
+ * generate division.
+ * caller must set:
+ *	ax = allocated AX register
+ *	dx = allocated DX register
+ * generates one of:
+ *	res = nl / nr
+ *	res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
+	// Have to be careful about handling
+	// most negative int divided by -1 correctly.
+	// The hardware will trap.
+	// Also the byte divide instruction needs AH,
+	// which we otherwise don't have to deal with.
+	// Easiest way to avoid for int8, int16: use int32.
+	// For int32 and int64, use explicit test.
+	// Could use int64 hw for int32.
+	t := nl.Type
+
+	t0 := t
+	check := 0
+	if gc.Issigned[t.Etype] {
+		check = 1
+		if gc.Isconst(nl, gc.CTINT) && nl.Int() != -1<<uint64(t.Width*8-1) {
+			check = 0
+		} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
+			check = 0
+		}
+	}
+
+	if t.Width < 4 {
+		if gc.Issigned[t.Etype] {
+			t = gc.Types[gc.TINT32]
+		} else {
+			t = gc.Types[gc.TUINT32]
+		}
+		check = 0
+	}
+
+	var t1 gc.Node
+	gc.Tempname(&t1, t)
+	var t2 gc.Node
+	gc.Tempname(&t2, t)
+	if t0 != t {
+		var t3 gc.Node
+		gc.Tempname(&t3, t0)
+		var t4 gc.Node
+		gc.Tempname(&t4, t0)
+		gc.Cgen(nl, &t3)
+		gc.Cgen(nr, &t4)
+
+		// Convert.
+		gmove(&t3, &t1)
+
+		gmove(&t4, &t2)
+	} else {
+		gc.Cgen(nl, &t1)
+		gc.Cgen(nr, &t2)
+	}
+
+	var n1 gc.Node
+	if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
+		gc.Regalloc(&n1, t, res)
+	} else {
+		gc.Regalloc(&n1, t, nil)
+	}
+	gmove(&t2, &n1)
+	gmove(&t1, ax)
+	var p2 *obj.Prog
+	var n4 gc.Node
+	if gc.Nacl {
+		// Native Client does not relay the divide-by-zero trap
+		// to the executing program, so we must insert a check
+		// for ourselves.
+		gc.Nodconst(&n4, t, 0)
+
+		gins(optoas(gc.OCMP, t), &n1, &n4)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		if panicdiv == nil {
+			panicdiv = gc.Sysfunc("panicdivide")
+		}
+		gc.Ginscall(panicdiv, -1)
+		gc.Patch(p1, gc.Pc)
+	}
+
+	if check != 0 {
+		gc.Nodconst(&n4, t, -1)
+		gins(optoas(gc.OCMP, t), &n1, &n4)
+		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+		if op == gc.ODIV {
+			// a / (-1) is -a.
+			gins(optoas(gc.OMINUS, t), nil, ax)
+
+			gmove(ax, res)
+		} else {
+			// a % (-1) is 0.
+			gc.Nodconst(&n4, t, 0)
+
+			gmove(&n4, res)
+		}
+
+		p2 = gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+	}
+
+	if !gc.Issigned[t.Etype] {
+		var nz gc.Node
+		gc.Nodconst(&nz, t, 0)
+		gmove(&nz, dx)
+	} else {
+		gins(optoas(gc.OEXTEND, t), nil, nil)
+	}
+	gins(optoas(op, t), &n1, nil)
+	gc.Regfree(&n1)
+
+	if op == gc.ODIV {
+		gmove(ax, res)
+	} else {
+		gmove(dx, res)
+	}
+	if check != 0 {
+		gc.Patch(p2, gc.Pc)
+	}
+}
+
+func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
+	r := int(reg[dr])
+	gc.Nodreg(x, gc.Types[gc.TINT32], dr)
+
+	// save current ax and dx if they are live
+	// and not the destination
+	*oldx = gc.Node{}
+
+	if r > 0 && !gc.Samereg(x, res) {
+		gc.Tempname(oldx, gc.Types[gc.TINT32])
+		gmove(x, oldx)
+	}
+
+	gc.Regalloc(x, t, x)
+}
+
+func restx(x *gc.Node, oldx *gc.Node) {
+	gc.Regfree(x)
+
+	if oldx.Op != 0 {
+		x.Type = gc.Types[gc.TINT32]
+		gmove(oldx, x)
+	}
+}
+
+/*
+ * generate division according to op, one of:
+ *	res = nl / nr
+ *	res = nl % nr
+ */
+func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	if gc.Is64(nl.Type) {
+		gc.Fatal("cgen_div %v", nl.Type)
+	}
+
+	var t *gc.Type
+	if gc.Issigned[nl.Type.Etype] {
+		t = gc.Types[gc.TINT32]
+	} else {
+		t = gc.Types[gc.TUINT32]
+	}
+	var ax gc.Node
+	var oldax gc.Node
+	savex(x86.REG_AX, &ax, &oldax, res, t)
+	var olddx gc.Node
+	var dx gc.Node
+	savex(x86.REG_DX, &dx, &olddx, res, t)
+	dodiv(op, nl, nr, res, &ax, &dx)
+	restx(&dx, &olddx)
+	restx(&ax, &oldax)
+}
+
+/*
+ * generate shift according to op, one of:
+ *	res = nl << nr
+ *	res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	if nl.Type.Width > 4 {
+		gc.Fatal("cgen_shift %v", nl.Type)
+	}
+
+	w := int(nl.Type.Width * 8)
+
+	a := optoas(op, nl.Type)
+
+	if nr.Op == gc.OLITERAL {
+		var n2 gc.Node
+		gc.Tempname(&n2, nl.Type)
+		gc.Cgen(nl, &n2)
+		var n1 gc.Node
+		gc.Regalloc(&n1, nl.Type, res)
+		gmove(&n2, &n1)
+		sc := uint64(nr.Int())
+		if sc >= uint64(nl.Type.Width*8) {
+			// large shift gets 2 shifts by width-1
+			gins(a, ncon(uint32(w)-1), &n1)
+
+			gins(a, ncon(uint32(w)-1), &n1)
+		} else {
+			gins(a, nr, &n1)
+		}
+		gmove(&n1, res)
+		gc.Regfree(&n1)
+		return
+	}
+
+	var oldcx gc.Node
+	var cx gc.Node
+	gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX)
+	if reg[x86.REG_CX] > 1 && !gc.Samereg(&cx, res) {
+		gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
+		gmove(&cx, &oldcx)
+	}
+
+	var n1 gc.Node
+	var nt gc.Node
+	if nr.Type.Width > 4 {
+		gc.Tempname(&nt, nr.Type)
+		n1 = nt
+	} else {
+		gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
+		gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
+	}
+
+	var n2 gc.Node
+	if gc.Samereg(&cx, res) {
+		gc.Regalloc(&n2, nl.Type, nil)
+	} else {
+		gc.Regalloc(&n2, nl.Type, res)
+	}
+	if nl.Ullman >= nr.Ullman {
+		gc.Cgen(nl, &n2)
+		gc.Cgen(nr, &n1)
+	} else {
+		gc.Cgen(nr, &n1)
+		gc.Cgen(nl, &n2)
+	}
+
+	// test and fix up large shifts
+	if bounded {
+		if nr.Type.Width > 4 {
+			// delayed reg alloc
+			gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
+
+			gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+			var lo gc.Node
+			var hi gc.Node
+			split64(&nt, &lo, &hi)
+			gmove(&lo, &n1)
+			splitclean()
+		}
+	} else {
+		var p1 *obj.Prog
+		if nr.Type.Width > 4 {
+			// delayed reg alloc
+			gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
+
+			gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+			var lo gc.Node
+			var hi gc.Node
+			split64(&nt, &lo, &hi)
+			gmove(&lo, &n1)
+			gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0))
+			p2 := gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
+			gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w)))
+			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+			splitclean()
+			gc.Patch(p2, gc.Pc)
+		} else {
+			gins(optoas(gc.OCMP, nr.Type), &n1, ncon(uint32(w)))
+			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+		}
+
+		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
+			gins(a, ncon(uint32(w)-1), &n2)
+		} else {
+			gmove(ncon(0), &n2)
+		}
+
+		gc.Patch(p1, gc.Pc)
+	}
+
+	gins(a, &n1, &n2)
+
+	if oldcx.Op != 0 {
+		gmove(&oldcx, &cx)
+	}
+
+	gmove(&n2, res)
+
+	gc.Regfree(&n1)
+	gc.Regfree(&n2)
+}
+
+/*
+ * generate byte multiply:
+ *	res = nl * nr
+ * there is no 2-operand byte multiply instruction so
+ * we do a full-width multiplication and truncate afterwards.
+ */
+func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
+	if optoas(op, nl.Type) != x86.AIMULB {
+		return false
+	}
+
+	// copy from byte to full registers
+	t := gc.Types[gc.TUINT32]
+
+	if gc.Issigned[nl.Type.Etype] {
+		t = gc.Types[gc.TINT32]
+	}
+
+	// largest ullman on left.
+	if nl.Ullman < nr.Ullman {
+		tmp := nl
+		nl = nr
+		nr = tmp
+	}
+
+	var nt gc.Node
+	gc.Tempname(&nt, nl.Type)
+	gc.Cgen(nl, &nt)
+	var n1 gc.Node
+	gc.Regalloc(&n1, t, res)
+	gc.Cgen(nr, &n1)
+	var n2 gc.Node
+	gc.Regalloc(&n2, t, nil)
+	gmove(&nt, &n2)
+	a := optoas(op, t)
+	gins(a, &n2, &n1)
+	gc.Regfree(&n2)
+	gmove(&n1, res)
+	gc.Regfree(&n1)
+
+	return true
+}
+
+/*
+ * generate high multiply:
+ *   res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+	var n1 gc.Node
+	var n2 gc.Node
+	var ax gc.Node
+	var dx gc.Node
+
+	t := nl.Type
+	a := optoas(gc.OHMUL, t)
+
+	// gen nl in n1.
+	gc.Tempname(&n1, t)
+
+	gc.Cgen(nl, &n1)
+
+	// gen nr in n2.
+	gc.Regalloc(&n2, t, res)
+
+	gc.Cgen(nr, &n2)
+
+	// multiply.
+	gc.Nodreg(&ax, t, x86.REG_AX)
+
+	gmove(&n2, &ax)
+	gins(a, &n1, nil)
+	gc.Regfree(&n2)
+
+	if t.Width == 1 {
+		// byte multiply behaves differently.
+		gc.Nodreg(&ax, t, x86.REG_AH)
+
+		gc.Nodreg(&dx, t, x86.REG_DX)
+		gmove(&ax, &dx)
+	}
+
+	gc.Nodreg(&dx, t, x86.REG_DX)
+	gmove(&dx, res)
+}
+
+/*
+ * generate floating-point operation.
+ */
+func cgen_float(n *gc.Node, res *gc.Node) {
+	nl := n.Left
+	switch n.Op {
+	case gc.OEQ,
+		gc.ONE,
+		gc.OLT,
+		gc.OLE,
+		gc.OGE:
+		p1 := gc.Gbranch(obj.AJMP, nil, 0)
+		p2 := gc.Pc
+		gmove(gc.Nodbool(true), res)
+		p3 := gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+		gc.Bgen(n, true, 0, p2)
+		gmove(gc.Nodbool(false), res)
+		gc.Patch(p3, gc.Pc)
+		return
+
+	case gc.OPLUS:
+		gc.Cgen(nl, res)
+		return
+
+	case gc.OCONV:
+		if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
+			gc.Cgen(nl, res)
+			return
+		}
+
+		var n2 gc.Node
+		gc.Tempname(&n2, n.Type)
+		var n1 gc.Node
+		gc.Mgen(nl, &n1, res)
+		gmove(&n1, &n2)
+		gmove(&n2, res)
+		gc.Mfree(&n1)
+		return
+	}
+
+	if gc.Thearch.Use387 {
+		cgen_float387(n, res)
+	} else {
+		cgen_floatsse(n, res)
+	}
+}
+
+// floating-point.  387 (not SSE2)
+func cgen_float387(n *gc.Node, res *gc.Node) {
+	var f0 gc.Node
+	var f1 gc.Node
+
+	nl := n.Left
+	nr := n.Right
+	gc.Nodreg(&f0, nl.Type, x86.REG_F0)
+	gc.Nodreg(&f1, n.Type, x86.REG_F0+1)
+	if nr != nil {
+		// binary
+		if nl.Ullman >= nr.Ullman {
+			gc.Cgen(nl, &f0)
+			if nr.Addable {
+				gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
+			} else {
+				gc.Cgen(nr, &f0)
+				gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
+			}
+		} else {
+			gc.Cgen(nr, &f0)
+			if nl.Addable {
+				gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
+			} else {
+				gc.Cgen(nl, &f0)
+				gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
+			}
+		}
+
+		gmove(&f0, res)
+		return
+	}
+
+	// unary
+	gc.Cgen(nl, &f0)
+
+	if n.Op != gc.OCONV && n.Op != gc.OPLUS {
+		gins(foptoas(int(n.Op), n.Type, 0), nil, nil)
+	}
+	gmove(&f0, res)
+	return
+}
+
+func cgen_floatsse(n *gc.Node, res *gc.Node) {
+	var a int
+
+	nl := n.Left
+	nr := n.Right
+	switch n.Op {
+	default:
+		gc.Dump("cgen_floatsse", n)
+		gc.Fatal("cgen_floatsse %v", gc.Oconv(int(n.Op), 0))
+		return
+
+	case gc.OMINUS,
+		gc.OCOM:
+		nr = gc.Nodintconst(-1)
+		gc.Convlit(&nr, n.Type)
+		a = foptoas(gc.OMUL, nl.Type, 0)
+		goto sbop
+
+		// symmetric binary
+	case gc.OADD,
+		gc.OMUL:
+		a = foptoas(int(n.Op), nl.Type, 0)
+
+		goto sbop
+
+		// asymmetric binary
+	case gc.OSUB,
+		gc.OMOD,
+		gc.ODIV:
+		a = foptoas(int(n.Op), nl.Type, 0)
+
+		goto abop
+	}
+
+sbop: // symmetric binary
+	if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
+		r := nl
+		nl = nr
+		nr = r
+	}
+
+abop: // asymmetric binary
+	if nl.Ullman >= nr.Ullman {
+		var nt gc.Node
+		gc.Tempname(&nt, nl.Type)
+		gc.Cgen(nl, &nt)
+		var n2 gc.Node
+		gc.Mgen(nr, &n2, nil)
+		var n1 gc.Node
+		gc.Regalloc(&n1, nl.Type, res)
+		gmove(&nt, &n1)
+		gins(a, &n2, &n1)
+		gmove(&n1, res)
+		gc.Regfree(&n1)
+		gc.Mfree(&n2)
+	} else {
+		var n2 gc.Node
+		gc.Regalloc(&n2, nr.Type, res)
+		gc.Cgen(nr, &n2)
+		var n1 gc.Node
+		gc.Regalloc(&n1, nl.Type, nil)
+		gc.Cgen(nl, &n1)
+		gins(a, &n2, &n1)
+		gc.Regfree(&n2)
+		gmove(&n1, res)
+		gc.Regfree(&n1)
+	}
+
+	return
+}
+
+func bgen_float(n *gc.Node, wantTrue bool, likely int, to *obj.Prog) {
+	nl := n.Left
+	nr := n.Right
+	a := int(n.Op)
+	if !wantTrue {
+		// brcom is not valid on floats when NaN is involved.
+		p1 := gc.Gbranch(obj.AJMP, nil, 0)
+		p2 := gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p1, gc.Pc)
+
+		// No need to avoid re-genning ninit.
+		bgen_float(n, true, -likely, p2)
+
+		gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+		gc.Patch(p2, gc.Pc)
+		return
+	}
+
+	if gc.Thearch.Use387 {
+		a = gc.Brrev(a) // because the args are stacked
+		if a == gc.OGE || a == gc.OGT {
+			// only < and <= work right with NaN; reverse if needed
+			nl, nr = nr, nl
+			a = gc.Brrev(a)
+		}
+
+		var ax, n2, tmp gc.Node
+		gc.Nodreg(&tmp, nr.Type, x86.REG_F0)
+		gc.Nodreg(&n2, nr.Type, x86.REG_F0+1)
+		gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX)
+		if gc.Simsimtype(nr.Type) == gc.TFLOAT64 {
+			if nl.Ullman > nr.Ullman {
+				gc.Cgen(nl, &tmp)
+				gc.Cgen(nr, &tmp)
+				gins(x86.AFXCHD, &tmp, &n2)
+			} else {
+				gc.Cgen(nr, &tmp)
+				gc.Cgen(nl, &tmp)
+			}
+
+			gins(x86.AFUCOMIP, &tmp, &n2)
+			gins(x86.AFMOVDP, &tmp, &tmp) // annoying pop but still better than STSW+SAHF
+		} else {
+			// TODO(rsc): The moves back and forth to memory
+			// here are for truncating the value to 32 bits.
+			// This handles 32-bit comparison but presumably
+			// all the other ops have the same problem.
+			// We need to figure out what the right general
+			// solution is, besides telling people to use float64.
+			var t1 gc.Node
+			gc.Tempname(&t1, gc.Types[gc.TFLOAT32])
+
+			var t2 gc.Node
+			gc.Tempname(&t2, gc.Types[gc.TFLOAT32])
+			gc.Cgen(nr, &t1)
+			gc.Cgen(nl, &t2)
+			gmove(&t2, &tmp)
+			gins(x86.AFCOMFP, &t1, &tmp)
+			gins(x86.AFSTSW, nil, &ax)
+			gins(x86.ASAHF, nil, nil)
+		}
+	} else {
+		// Not 387
+		if !nl.Addable {
+			nl = gc.CgenTemp(nl)
+		}
+		if !nr.Addable {
+			nr = gc.CgenTemp(nr)
+		}
+
+		var n2 gc.Node
+		gc.Regalloc(&n2, nr.Type, nil)
+		gmove(nr, &n2)
+		nr = &n2
+
+		if nl.Op != gc.OREGISTER {
+			var n3 gc.Node
+			gc.Regalloc(&n3, nl.Type, nil)
+			gmove(nl, &n3)
+			nl = &n3
+		}
+
+		if a == gc.OGE || a == gc.OGT {
+			// only < and <= work right with NaN; reverse if needed
+			nl, nr = nr, nl
+			a = gc.Brrev(a)
+		}
+
+		gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
+		if nl.Op == gc.OREGISTER {
+			gc.Regfree(nl)
+		}
+		gc.Regfree(nr)
+	}
+
+	switch a {
+	case gc.OEQ:
+		// neither NE nor P
+		p1 := gc.Gbranch(x86.AJNE, nil, -likely)
+		p2 := gc.Gbranch(x86.AJPS, nil, -likely)
+		gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+		gc.Patch(p1, gc.Pc)
+		gc.Patch(p2, gc.Pc)
+	case gc.ONE:
+		// either NE or P
+		gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)
+		gc.Patch(gc.Gbranch(x86.AJPS, nil, likely), to)
+	default:
+		gc.Patch(gc.Gbranch(optoas(a, nr.Type), nil, likely), to)
+	}
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+	var p1 *obj.Prog
+	var p2 *obj.Prog
+
+	for p := firstp; p != nil; p = p.Link {
+		if p.As != obj.ACHECKNIL {
+			continue
+		}
+		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+			gc.Warnl(int(p.Lineno), "generated nil check")
+		}
+
+		// check is
+		//	CMP arg, $0
+		//	JNE 2(PC) (likely)
+		//	MOV AX, 0
+		p1 = gc.Ctxt.NewProg()
+
+		p2 = gc.Ctxt.NewProg()
+		gc.Clearp(p1)
+		gc.Clearp(p2)
+		p1.Link = p2
+		p2.Link = p.Link
+		p.Link = p1
+		p1.Lineno = p.Lineno
+		p2.Lineno = p.Lineno
+		p1.Pc = 9999
+		p2.Pc = 9999
+		p.As = x86.ACMPL
+		p.To.Type = obj.TYPE_CONST
+		p.To.Offset = 0
+		p1.As = x86.AJNE
+		p1.From.Type = obj.TYPE_CONST
+		p1.From.Offset = 1 // likely
+		p1.To.Type = obj.TYPE_BRANCH
+		p1.To.Val = p2.Link
+
+		// crash by write to memory address 0.
+		// if possible, since we know arg is 0, use 0(arg),
+		// which will be shorter to encode than plain 0.
+		p2.As = x86.AMOVL
+
+		p2.From.Type = obj.TYPE_REG
+		p2.From.Reg = x86.REG_AX
+		if regtyp(&p.From) {
+			p2.To.Type = obj.TYPE_MEM
+			p2.To.Reg = p.From.Reg
+		} else {
+			p2.To.Type = obj.TYPE_MEM
+		}
+		p2.To.Offset = 0
+	}
+}
+
+// addr += index*width if possible.
+func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
+	switch width {
+	case 1, 2, 4, 8:
+		p1 := gins(x86.ALEAL, index, addr)
+		p1.From.Type = obj.TYPE_MEM
+		p1.From.Scale = int16(width)
+		p1.From.Index = p1.From.Reg
+		p1.From.Reg = p1.To.Reg
+		return true
+	}
+	return false
+}
+
+// res = runtime.getg()
+func getg(res *gc.Node) {
+	var n1 gc.Node
+	gc.Regalloc(&n1, res.Type, res)
+	mov := optoas(gc.OAS, gc.Types[gc.Tptr])
+	p := gins(mov, nil, &n1)
+	p.From.Type = obj.TYPE_REG
+	p.From.Reg = x86.REG_TLS
+	p = gins(mov, nil, &n1)
+	p.From = p.To
+	p.From.Type = obj.TYPE_MEM
+	p.From.Index = x86.REG_TLS
+	p.From.Scale = 1
+	gmove(&n1, res)
+	gc.Regfree(&n1)
+}
diff --git a/src/cmd/compile/internal/x86/gsubr.go b/src/cmd/compile/internal/x86/gsubr.go
new file mode 100644
index 0000000..baf2517
--- /dev/null
+++ b/src/cmd/compile/internal/x86/gsubr.go
@@ -0,0 +1,1798 @@
+// Derived from Inferno utils/8c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8c/txt.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import (
+	"cmd/compile/internal/big"
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+	"fmt"
+)
+
+// TODO(rsc): Can make this bigger if we move
+// the text segment up higher in 8l for all GOOS.
+// At the same time, can raise StackBig in ../../runtime/stack.h.
+var unmappedzero uint32 = 4096
+
+// foptoas flags
+const (
+	Frev  = 1 << 0
+	Fpop  = 1 << 1
+	Fpop2 = 1 << 2
+)
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+	if t == nil {
+		gc.Fatal("optoas: t is nil")
+	}
+
+	a := obj.AXXX
+	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+	default:
+		gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
+
+	case gc.OADDR<<16 | gc.TPTR32:
+		a = x86.ALEAL
+
+	case gc.OEQ<<16 | gc.TBOOL,
+		gc.OEQ<<16 | gc.TINT8,
+		gc.OEQ<<16 | gc.TUINT8,
+		gc.OEQ<<16 | gc.TINT16,
+		gc.OEQ<<16 | gc.TUINT16,
+		gc.OEQ<<16 | gc.TINT32,
+		gc.OEQ<<16 | gc.TUINT32,
+		gc.OEQ<<16 | gc.TINT64,
+		gc.OEQ<<16 | gc.TUINT64,
+		gc.OEQ<<16 | gc.TPTR32,
+		gc.OEQ<<16 | gc.TPTR64,
+		gc.OEQ<<16 | gc.TFLOAT32,
+		gc.OEQ<<16 | gc.TFLOAT64:
+		a = x86.AJEQ
+
+	case gc.ONE<<16 | gc.TBOOL,
+		gc.ONE<<16 | gc.TINT8,
+		gc.ONE<<16 | gc.TUINT8,
+		gc.ONE<<16 | gc.TINT16,
+		gc.ONE<<16 | gc.TUINT16,
+		gc.ONE<<16 | gc.TINT32,
+		gc.ONE<<16 | gc.TUINT32,
+		gc.ONE<<16 | gc.TINT64,
+		gc.ONE<<16 | gc.TUINT64,
+		gc.ONE<<16 | gc.TPTR32,
+		gc.ONE<<16 | gc.TPTR64,
+		gc.ONE<<16 | gc.TFLOAT32,
+		gc.ONE<<16 | gc.TFLOAT64:
+		a = x86.AJNE
+
+	case gc.OLT<<16 | gc.TINT8,
+		gc.OLT<<16 | gc.TINT16,
+		gc.OLT<<16 | gc.TINT32,
+		gc.OLT<<16 | gc.TINT64:
+		a = x86.AJLT
+
+	case gc.OLT<<16 | gc.TUINT8,
+		gc.OLT<<16 | gc.TUINT16,
+		gc.OLT<<16 | gc.TUINT32,
+		gc.OLT<<16 | gc.TUINT64:
+		a = x86.AJCS
+
+	case gc.OLE<<16 | gc.TINT8,
+		gc.OLE<<16 | gc.TINT16,
+		gc.OLE<<16 | gc.TINT32,
+		gc.OLE<<16 | gc.TINT64:
+		a = x86.AJLE
+
+	case gc.OLE<<16 | gc.TUINT8,
+		gc.OLE<<16 | gc.TUINT16,
+		gc.OLE<<16 | gc.TUINT32,
+		gc.OLE<<16 | gc.TUINT64:
+		a = x86.AJLS
+
+	case gc.OGT<<16 | gc.TINT8,
+		gc.OGT<<16 | gc.TINT16,
+		gc.OGT<<16 | gc.TINT32,
+		gc.OGT<<16 | gc.TINT64:
+		a = x86.AJGT
+
+	case gc.OGT<<16 | gc.TUINT8,
+		gc.OGT<<16 | gc.TUINT16,
+		gc.OGT<<16 | gc.TUINT32,
+		gc.OGT<<16 | gc.TUINT64,
+		gc.OLT<<16 | gc.TFLOAT32,
+		gc.OLT<<16 | gc.TFLOAT64:
+		a = x86.AJHI
+
+	case gc.OGE<<16 | gc.TINT8,
+		gc.OGE<<16 | gc.TINT16,
+		gc.OGE<<16 | gc.TINT32,
+		gc.OGE<<16 | gc.TINT64:
+		a = x86.AJGE
+
+	case gc.OGE<<16 | gc.TUINT8,
+		gc.OGE<<16 | gc.TUINT16,
+		gc.OGE<<16 | gc.TUINT32,
+		gc.OGE<<16 | gc.TUINT64,
+		gc.OLE<<16 | gc.TFLOAT32,
+		gc.OLE<<16 | gc.TFLOAT64:
+		a = x86.AJCC
+
+	case gc.OCMP<<16 | gc.TBOOL,
+		gc.OCMP<<16 | gc.TINT8,
+		gc.OCMP<<16 | gc.TUINT8:
+		a = x86.ACMPB
+
+	case gc.OCMP<<16 | gc.TINT16,
+		gc.OCMP<<16 | gc.TUINT16:
+		a = x86.ACMPW
+
+	case gc.OCMP<<16 | gc.TINT32,
+		gc.OCMP<<16 | gc.TUINT32,
+		gc.OCMP<<16 | gc.TPTR32:
+		a = x86.ACMPL
+
+	case gc.OAS<<16 | gc.TBOOL,
+		gc.OAS<<16 | gc.TINT8,
+		gc.OAS<<16 | gc.TUINT8:
+		a = x86.AMOVB
+
+	case gc.OAS<<16 | gc.TINT16,
+		gc.OAS<<16 | gc.TUINT16:
+		a = x86.AMOVW
+
+	case gc.OAS<<16 | gc.TINT32,
+		gc.OAS<<16 | gc.TUINT32,
+		gc.OAS<<16 | gc.TPTR32:
+		a = x86.AMOVL
+
+	case gc.OAS<<16 | gc.TFLOAT32:
+		a = x86.AMOVSS
+
+	case gc.OAS<<16 | gc.TFLOAT64:
+		a = x86.AMOVSD
+
+	case gc.OADD<<16 | gc.TINT8,
+		gc.OADD<<16 | gc.TUINT8:
+		a = x86.AADDB
+
+	case gc.OADD<<16 | gc.TINT16,
+		gc.OADD<<16 | gc.TUINT16:
+		a = x86.AADDW
+
+	case gc.OADD<<16 | gc.TINT32,
+		gc.OADD<<16 | gc.TUINT32,
+		gc.OADD<<16 | gc.TPTR32:
+		a = x86.AADDL
+
+	case gc.OSUB<<16 | gc.TINT8,
+		gc.OSUB<<16 | gc.TUINT8:
+		a = x86.ASUBB
+
+	case gc.OSUB<<16 | gc.TINT16,
+		gc.OSUB<<16 | gc.TUINT16:
+		a = x86.ASUBW
+
+	case gc.OSUB<<16 | gc.TINT32,
+		gc.OSUB<<16 | gc.TUINT32,
+		gc.OSUB<<16 | gc.TPTR32:
+		a = x86.ASUBL
+
+	case gc.OINC<<16 | gc.TINT8,
+		gc.OINC<<16 | gc.TUINT8:
+		a = x86.AINCB
+
+	case gc.OINC<<16 | gc.TINT16,
+		gc.OINC<<16 | gc.TUINT16:
+		a = x86.AINCW
+
+	case gc.OINC<<16 | gc.TINT32,
+		gc.OINC<<16 | gc.TUINT32,
+		gc.OINC<<16 | gc.TPTR32:
+		a = x86.AINCL
+
+	case gc.ODEC<<16 | gc.TINT8,
+		gc.ODEC<<16 | gc.TUINT8:
+		a = x86.ADECB
+
+	case gc.ODEC<<16 | gc.TINT16,
+		gc.ODEC<<16 | gc.TUINT16:
+		a = x86.ADECW
+
+	case gc.ODEC<<16 | gc.TINT32,
+		gc.ODEC<<16 | gc.TUINT32,
+		gc.ODEC<<16 | gc.TPTR32:
+		a = x86.ADECL
+
+	case gc.OCOM<<16 | gc.TINT8,
+		gc.OCOM<<16 | gc.TUINT8:
+		a = x86.ANOTB
+
+	case gc.OCOM<<16 | gc.TINT16,
+		gc.OCOM<<16 | gc.TUINT16:
+		a = x86.ANOTW
+
+	case gc.OCOM<<16 | gc.TINT32,
+		gc.OCOM<<16 | gc.TUINT32,
+		gc.OCOM<<16 | gc.TPTR32:
+		a = x86.ANOTL
+
+	case gc.OMINUS<<16 | gc.TINT8,
+		gc.OMINUS<<16 | gc.TUINT8:
+		a = x86.ANEGB
+
+	case gc.OMINUS<<16 | gc.TINT16,
+		gc.OMINUS<<16 | gc.TUINT16:
+		a = x86.ANEGW
+
+	case gc.OMINUS<<16 | gc.TINT32,
+		gc.OMINUS<<16 | gc.TUINT32,
+		gc.OMINUS<<16 | gc.TPTR32:
+		a = x86.ANEGL
+
+	case gc.OAND<<16 | gc.TINT8,
+		gc.OAND<<16 | gc.TUINT8:
+		a = x86.AANDB
+
+	case gc.OAND<<16 | gc.TINT16,
+		gc.OAND<<16 | gc.TUINT16:
+		a = x86.AANDW
+
+	case gc.OAND<<16 | gc.TINT32,
+		gc.OAND<<16 | gc.TUINT32,
+		gc.OAND<<16 | gc.TPTR32:
+		a = x86.AANDL
+
+	case gc.OOR<<16 | gc.TINT8,
+		gc.OOR<<16 | gc.TUINT8:
+		a = x86.AORB
+
+	case gc.OOR<<16 | gc.TINT16,
+		gc.OOR<<16 | gc.TUINT16:
+		a = x86.AORW
+
+	case gc.OOR<<16 | gc.TINT32,
+		gc.OOR<<16 | gc.TUINT32,
+		gc.OOR<<16 | gc.TPTR32:
+		a = x86.AORL
+
+	case gc.OXOR<<16 | gc.TINT8,
+		gc.OXOR<<16 | gc.TUINT8:
+		a = x86.AXORB
+
+	case gc.OXOR<<16 | gc.TINT16,
+		gc.OXOR<<16 | gc.TUINT16:
+		a = x86.AXORW
+
+	case gc.OXOR<<16 | gc.TINT32,
+		gc.OXOR<<16 | gc.TUINT32,
+		gc.OXOR<<16 | gc.TPTR32:
+		a = x86.AXORL
+
+	case gc.OLROT<<16 | gc.TINT8,
+		gc.OLROT<<16 | gc.TUINT8:
+		a = x86.AROLB
+
+	case gc.OLROT<<16 | gc.TINT16,
+		gc.OLROT<<16 | gc.TUINT16:
+		a = x86.AROLW
+
+	case gc.OLROT<<16 | gc.TINT32,
+		gc.OLROT<<16 | gc.TUINT32,
+		gc.OLROT<<16 | gc.TPTR32:
+		a = x86.AROLL
+
+	case gc.OLSH<<16 | gc.TINT8,
+		gc.OLSH<<16 | gc.TUINT8:
+		a = x86.ASHLB
+
+	case gc.OLSH<<16 | gc.TINT16,
+		gc.OLSH<<16 | gc.TUINT16:
+		a = x86.ASHLW
+
+	case gc.OLSH<<16 | gc.TINT32,
+		gc.OLSH<<16 | gc.TUINT32,
+		gc.OLSH<<16 | gc.TPTR32:
+		a = x86.ASHLL
+
+	case gc.ORSH<<16 | gc.TUINT8:
+		a = x86.ASHRB
+
+	case gc.ORSH<<16 | gc.TUINT16:
+		a = x86.ASHRW
+
+	case gc.ORSH<<16 | gc.TUINT32,
+		gc.ORSH<<16 | gc.TPTR32:
+		a = x86.ASHRL
+
+	case gc.ORSH<<16 | gc.TINT8:
+		a = x86.ASARB
+
+	case gc.ORSH<<16 | gc.TINT16:
+		a = x86.ASARW
+
+	case gc.ORSH<<16 | gc.TINT32:
+		a = x86.ASARL
+
+	case gc.OHMUL<<16 | gc.TINT8,
+		gc.OMUL<<16 | gc.TINT8,
+		gc.OMUL<<16 | gc.TUINT8:
+		a = x86.AIMULB
+
+	case gc.OHMUL<<16 | gc.TINT16,
+		gc.OMUL<<16 | gc.TINT16,
+		gc.OMUL<<16 | gc.TUINT16:
+		a = x86.AIMULW
+
+	case gc.OHMUL<<16 | gc.TINT32,
+		gc.OMUL<<16 | gc.TINT32,
+		gc.OMUL<<16 | gc.TUINT32,
+		gc.OMUL<<16 | gc.TPTR32:
+		a = x86.AIMULL
+
+	case gc.OHMUL<<16 | gc.TUINT8:
+		a = x86.AMULB
+
+	case gc.OHMUL<<16 | gc.TUINT16:
+		a = x86.AMULW
+
+	case gc.OHMUL<<16 | gc.TUINT32,
+		gc.OHMUL<<16 | gc.TPTR32:
+		a = x86.AMULL
+
+	case gc.ODIV<<16 | gc.TINT8,
+		gc.OMOD<<16 | gc.TINT8:
+		a = x86.AIDIVB
+
+	case gc.ODIV<<16 | gc.TUINT8,
+		gc.OMOD<<16 | gc.TUINT8:
+		a = x86.ADIVB
+
+	case gc.ODIV<<16 | gc.TINT16,
+		gc.OMOD<<16 | gc.TINT16:
+		a = x86.AIDIVW
+
+	case gc.ODIV<<16 | gc.TUINT16,
+		gc.OMOD<<16 | gc.TUINT16:
+		a = x86.ADIVW
+
+	case gc.ODIV<<16 | gc.TINT32,
+		gc.OMOD<<16 | gc.TINT32:
+		a = x86.AIDIVL
+
+	case gc.ODIV<<16 | gc.TUINT32,
+		gc.ODIV<<16 | gc.TPTR32,
+		gc.OMOD<<16 | gc.TUINT32,
+		gc.OMOD<<16 | gc.TPTR32:
+		a = x86.ADIVL
+
+	case gc.OEXTEND<<16 | gc.TINT16:
+		a = x86.ACWD
+
+	case gc.OEXTEND<<16 | gc.TINT32:
+		a = x86.ACDQ
+	}
+
+	return a
+}
+
+func foptoas(op int, t *gc.Type, flg int) int {
+	a := obj.AXXX
+	et := int(gc.Simtype[t.Etype])
+
+	if !gc.Thearch.Use387 {
+		switch uint32(op)<<16 | uint32(et) {
+		default:
+			gc.Fatal("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), t)
+
+		case gc.OCMP<<16 | gc.TFLOAT32:
+			a = x86.AUCOMISS
+
+		case gc.OCMP<<16 | gc.TFLOAT64:
+			a = x86.AUCOMISD
+
+		case gc.OAS<<16 | gc.TFLOAT32:
+			a = x86.AMOVSS
+
+		case gc.OAS<<16 | gc.TFLOAT64:
+			a = x86.AMOVSD
+
+		case gc.OADD<<16 | gc.TFLOAT32:
+			a = x86.AADDSS
+
+		case gc.OADD<<16 | gc.TFLOAT64:
+			a = x86.AADDSD
+
+		case gc.OSUB<<16 | gc.TFLOAT32:
+			a = x86.ASUBSS
+
+		case gc.OSUB<<16 | gc.TFLOAT64:
+			a = x86.ASUBSD
+
+		case gc.OMUL<<16 | gc.TFLOAT32:
+			a = x86.AMULSS
+
+		case gc.OMUL<<16 | gc.TFLOAT64:
+			a = x86.AMULSD
+
+		case gc.ODIV<<16 | gc.TFLOAT32:
+			a = x86.ADIVSS
+
+		case gc.ODIV<<16 | gc.TFLOAT64:
+			a = x86.ADIVSD
+		}
+
+		return a
+	}
+
+	// If we need Fpop, it means we're working on
+	// two different floating-point registers, not memory.
+	// There the instruction only has a float64 form.
+	if flg&Fpop != 0 {
+		et = gc.TFLOAT64
+	}
+
+	// clear Frev if unneeded
+	switch op {
+	case gc.OADD,
+		gc.OMUL:
+		flg &^= Frev
+	}
+
+	switch uint32(op)<<16 | (uint32(et)<<8 | uint32(flg)) {
+	case gc.OADD<<16 | (gc.TFLOAT32<<8 | 0):
+		return x86.AFADDF
+
+	case gc.OADD<<16 | (gc.TFLOAT64<<8 | 0):
+		return x86.AFADDD
+
+	case gc.OADD<<16 | (gc.TFLOAT64<<8 | Fpop):
+		return x86.AFADDDP
+
+	case gc.OSUB<<16 | (gc.TFLOAT32<<8 | 0):
+		return x86.AFSUBF
+
+	case gc.OSUB<<16 | (gc.TFLOAT32<<8 | Frev):
+		return x86.AFSUBRF
+
+	case gc.OSUB<<16 | (gc.TFLOAT64<<8 | 0):
+		return x86.AFSUBD
+
+	case gc.OSUB<<16 | (gc.TFLOAT64<<8 | Frev):
+		return x86.AFSUBRD
+
+	case gc.OSUB<<16 | (gc.TFLOAT64<<8 | Fpop):
+		return x86.AFSUBDP
+
+	case gc.OSUB<<16 | (gc.TFLOAT64<<8 | (Fpop | Frev)):
+		return x86.AFSUBRDP
+
+	case gc.OMUL<<16 | (gc.TFLOAT32<<8 | 0):
+		return x86.AFMULF
+
+	case gc.OMUL<<16 | (gc.TFLOAT64<<8 | 0):
+		return x86.AFMULD
+
+	case gc.OMUL<<16 | (gc.TFLOAT64<<8 | Fpop):
+		return x86.AFMULDP
+
+	case gc.ODIV<<16 | (gc.TFLOAT32<<8 | 0):
+		return x86.AFDIVF
+
+	case gc.ODIV<<16 | (gc.TFLOAT32<<8 | Frev):
+		return x86.AFDIVRF
+
+	case gc.ODIV<<16 | (gc.TFLOAT64<<8 | 0):
+		return x86.AFDIVD
+
+	case gc.ODIV<<16 | (gc.TFLOAT64<<8 | Frev):
+		return x86.AFDIVRD
+
+	case gc.ODIV<<16 | (gc.TFLOAT64<<8 | Fpop):
+		return x86.AFDIVDP
+
+	case gc.ODIV<<16 | (gc.TFLOAT64<<8 | (Fpop | Frev)):
+		return x86.AFDIVRDP
+
+	case gc.OCMP<<16 | (gc.TFLOAT32<<8 | 0):
+		return x86.AFCOMF
+
+	case gc.OCMP<<16 | (gc.TFLOAT32<<8 | Fpop):
+		return x86.AFCOMFP
+
+	case gc.OCMP<<16 | (gc.TFLOAT64<<8 | 0):
+		return x86.AFCOMD
+
+	case gc.OCMP<<16 | (gc.TFLOAT64<<8 | Fpop):
+		return x86.AFCOMDP
+
+	case gc.OCMP<<16 | (gc.TFLOAT64<<8 | Fpop2):
+		return x86.AFCOMDPP
+
+	case gc.OMINUS<<16 | (gc.TFLOAT32<<8 | 0):
+		return x86.AFCHS
+
+	case gc.OMINUS<<16 | (gc.TFLOAT64<<8 | 0):
+		return x86.AFCHS
+	}
+
+	gc.Fatal("foptoas %v %v %#x", gc.Oconv(int(op), 0), t, flg)
+	return 0
+}
+
+var resvd = []int{
+	//	REG_DI,	// for movstring
+	//	REG_SI,	// for movstring
+
+	x86.REG_AX, // for divide
+	x86.REG_CX, // for shift
+	x86.REG_DX, // for divide
+	x86.REG_SP, // for stack
+}
+
+/*
+ * generate
+ *	as $c, reg
+ */
+func gconreg(as int, c int64, reg int) {
+	var n1 gc.Node
+	var n2 gc.Node
+
+	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+	gc.Nodreg(&n2, gc.Types[gc.TINT64], reg)
+	gins(as, &n1, &n2)
+}
+
+/*
+ * generate
+ *	as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+	var n1 gc.Node
+	gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
+	gins(as, &n1, n2)
+}
+
+func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+	if gc.Isint[t.Etype] || int(t.Etype) == gc.Tptr {
+		if (n1.Op == gc.OLITERAL || n1.Op == gc.OADDR && n1.Left.Op == gc.ONAME) && n2.Op != gc.OLITERAL {
+			// Reverse comparison to place constant (including address constant) last.
+			op = gc.Brrev(op)
+			n1, n2 = n2, n1
+		}
+	}
+
+	// General case.
+	var r1, r2, g1, g2 gc.Node
+	if n1.Op == gc.ONAME && n1.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG {
+		r1 = *n1
+	} else {
+		gc.Regalloc(&r1, t, n1)
+		gc.Regalloc(&g1, n1.Type, &r1)
+		gc.Cgen(n1, &g1)
+		gmove(&g1, &r1)
+	}
+	if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] || n2.Op == gc.OADDR && n2.Left.Op == gc.ONAME && n2.Left.Class == gc.PEXTERN {
+		r2 = *n2
+	} else {
+		gc.Regalloc(&r2, t, n2)
+		gc.Regalloc(&g2, n1.Type, &r2)
+		gc.Cgen(n2, &g2)
+		gmove(&g2, &r2)
+	}
+	gins(optoas(gc.OCMP, t), &r1, &r2)
+	if r1.Op == gc.OREGISTER {
+		gc.Regfree(&g1)
+		gc.Regfree(&r1)
+	}
+	if r2.Op == gc.OREGISTER {
+		gc.Regfree(&g2)
+		gc.Regfree(&r2)
+	}
+	return gc.Gbranch(optoas(op, t), nil, likely)
+}
+
+/*
+ * swap node contents
+ */
+func nswap(a *gc.Node, b *gc.Node) {
+	t := *a
+	*a = *b
+	*b = t
+}
+
+/*
+ * return constant i node.
+ * overwritten by next call, but useful in calls to gins.
+ */
+
+var ncon_n gc.Node
+
+func ncon(i uint32) *gc.Node {
+	if ncon_n.Type == nil {
+		gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
+	}
+	ncon_n.SetInt(int64(i))
+	return &ncon_n
+}
+
+var sclean [10]gc.Node
+
+var nsclean int
+
+/*
+ * n is a 64-bit value.  fill in lo and hi to refer to its 32-bit halves.
+ */
+func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
+	if !gc.Is64(n.Type) {
+		gc.Fatal("split64 %v", n.Type)
+	}
+
+	if nsclean >= len(sclean) {
+		gc.Fatal("split64 clean")
+	}
+	sclean[nsclean].Op = gc.OEMPTY
+	nsclean++
+	switch n.Op {
+	default:
+		switch n.Op {
+		default:
+			var n1 gc.Node
+			if !dotaddable(n, &n1) {
+				gc.Igen(n, &n1, nil)
+				sclean[nsclean-1] = n1
+			}
+
+			n = &n1
+
+		case gc.ONAME:
+			if n.Class == gc.PPARAMREF {
+				var n1 gc.Node
+				gc.Cgen(n.Name.Heapaddr, &n1)
+				sclean[nsclean-1] = n1
+				n = &n1
+			}
+
+			// nothing
+		case gc.OINDREG:
+			break
+		}
+
+		*lo = *n
+		*hi = *n
+		lo.Type = gc.Types[gc.TUINT32]
+		if n.Type.Etype == gc.TINT64 {
+			hi.Type = gc.Types[gc.TINT32]
+		} else {
+			hi.Type = gc.Types[gc.TUINT32]
+		}
+		hi.Xoffset += 4
+
+	case gc.OLITERAL:
+		var n1 gc.Node
+		n.Convconst(&n1, n.Type)
+		i := n1.Int()
+		gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
+		i >>= 32
+		if n.Type.Etype == gc.TINT64 {
+			gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
+		} else {
+			gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
+		}
+	}
+}
+
+func splitclean() {
+	if nsclean <= 0 {
+		gc.Fatal("splitclean")
+	}
+	nsclean--
+	if sclean[nsclean].Op != gc.OEMPTY {
+		gc.Regfree(&sclean[nsclean])
+	}
+}
+
+// set up nodes representing fp constants
+var (
+	zerof        gc.Node
+	two63f       gc.Node
+	two64f       gc.Node
+	bignodes_did bool
+)
+
+func bignodes() {
+	if bignodes_did {
+		return
+	}
+	bignodes_did = true
+
+	gc.Nodconst(&zerof, gc.Types[gc.TINT64], 0)
+	zerof.Convconst(&zerof, gc.Types[gc.TFLOAT64])
+
+	var i big.Int
+	i.SetInt64(1)
+	i.Lsh(&i, 63)
+	var bigi gc.Node
+
+	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
+	bigi.SetBigInt(&i)
+	bigi.Convconst(&two63f, gc.Types[gc.TFLOAT64])
+
+	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
+	i.Lsh(&i, 1)
+	bigi.SetBigInt(&i)
+	bigi.Convconst(&two64f, gc.Types[gc.TFLOAT64])
+}
+
+func memname(n *gc.Node, t *gc.Type) {
+	gc.Tempname(n, t)
+	n.Sym = gc.Lookup("." + n.Sym.Name[1:]) // keep optimizer from registerizing
+	n.Orig.Sym = n.Sym
+}
+
+func gmove(f *gc.Node, t *gc.Node) {
+	if gc.Debug['M'] != 0 {
+		fmt.Printf("gmove %v -> %v\n", f, t)
+	}
+
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+	cvt := t.Type
+
+	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
+		gc.Complexmove(f, t)
+		return
+	}
+
+	if gc.Isfloat[ft] || gc.Isfloat[tt] {
+		floatmove(f, t)
+		return
+	}
+
+	// cannot have two integer memory operands;
+	// except 64-bit, which always copies via registers anyway.
+	var r1 gc.Node
+	var a int
+	if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
+		goto hard
+	}
+
+	// convert constant to desired type
+	if f.Op == gc.OLITERAL {
+		var con gc.Node
+		f.Convconst(&con, t.Type)
+		f = &con
+		ft = gc.Simsimtype(con.Type)
+	}
+
+	// value -> value copy, only one memory operand.
+	// figure out the instruction to use.
+	// break out of switch for one-instruction gins.
+	// goto rdst for "destination must be register".
+	// goto hard for "convert to cvt type first".
+	// otherwise handle and return.
+
+	switch uint32(ft)<<16 | uint32(tt) {
+	default:
+		// should not happen
+		gc.Fatal("gmove %v -> %v", f, t)
+		return
+
+		/*
+		 * integer copy and truncate
+		 */
+	case gc.TINT8<<16 | gc.TINT8, // same size
+		gc.TINT8<<16 | gc.TUINT8,
+		gc.TUINT8<<16 | gc.TINT8,
+		gc.TUINT8<<16 | gc.TUINT8:
+		a = x86.AMOVB
+
+	case gc.TINT16<<16 | gc.TINT8, // truncate
+		gc.TUINT16<<16 | gc.TINT8,
+		gc.TINT32<<16 | gc.TINT8,
+		gc.TUINT32<<16 | gc.TINT8,
+		gc.TINT16<<16 | gc.TUINT8,
+		gc.TUINT16<<16 | gc.TUINT8,
+		gc.TINT32<<16 | gc.TUINT8,
+		gc.TUINT32<<16 | gc.TUINT8:
+		a = x86.AMOVB
+
+		goto rsrc
+
+	case gc.TINT64<<16 | gc.TINT8, // truncate low word
+		gc.TUINT64<<16 | gc.TINT8,
+		gc.TINT64<<16 | gc.TUINT8,
+		gc.TUINT64<<16 | gc.TUINT8:
+		var flo gc.Node
+		var fhi gc.Node
+		split64(f, &flo, &fhi)
+
+		var r1 gc.Node
+		gc.Nodreg(&r1, t.Type, x86.REG_AX)
+		gmove(&flo, &r1)
+		gins(x86.AMOVB, &r1, t)
+		splitclean()
+		return
+
+	case gc.TINT16<<16 | gc.TINT16, // same size
+		gc.TINT16<<16 | gc.TUINT16,
+		gc.TUINT16<<16 | gc.TINT16,
+		gc.TUINT16<<16 | gc.TUINT16:
+		a = x86.AMOVW
+
+	case gc.TINT32<<16 | gc.TINT16, // truncate
+		gc.TUINT32<<16 | gc.TINT16,
+		gc.TINT32<<16 | gc.TUINT16,
+		gc.TUINT32<<16 | gc.TUINT16:
+		a = x86.AMOVW
+
+		goto rsrc
+
+	case gc.TINT64<<16 | gc.TINT16, // truncate low word
+		gc.TUINT64<<16 | gc.TINT16,
+		gc.TINT64<<16 | gc.TUINT16,
+		gc.TUINT64<<16 | gc.TUINT16:
+		var flo gc.Node
+		var fhi gc.Node
+		split64(f, &flo, &fhi)
+
+		var r1 gc.Node
+		gc.Nodreg(&r1, t.Type, x86.REG_AX)
+		gmove(&flo, &r1)
+		gins(x86.AMOVW, &r1, t)
+		splitclean()
+		return
+
+	case gc.TINT32<<16 | gc.TINT32, // same size
+		gc.TINT32<<16 | gc.TUINT32,
+		gc.TUINT32<<16 | gc.TINT32,
+		gc.TUINT32<<16 | gc.TUINT32:
+		a = x86.AMOVL
+
+	case gc.TINT64<<16 | gc.TINT32, // truncate
+		gc.TUINT64<<16 | gc.TINT32,
+		gc.TINT64<<16 | gc.TUINT32,
+		gc.TUINT64<<16 | gc.TUINT32:
+		var fhi gc.Node
+		var flo gc.Node
+		split64(f, &flo, &fhi)
+
+		var r1 gc.Node
+		gc.Nodreg(&r1, t.Type, x86.REG_AX)
+		gmove(&flo, &r1)
+		gins(x86.AMOVL, &r1, t)
+		splitclean()
+		return
+
+	case gc.TINT64<<16 | gc.TINT64, // same size
+		gc.TINT64<<16 | gc.TUINT64,
+		gc.TUINT64<<16 | gc.TINT64,
+		gc.TUINT64<<16 | gc.TUINT64:
+		var fhi gc.Node
+		var flo gc.Node
+		split64(f, &flo, &fhi)
+
+		var tlo gc.Node
+		var thi gc.Node
+		split64(t, &tlo, &thi)
+		if f.Op == gc.OLITERAL {
+			gins(x86.AMOVL, &flo, &tlo)
+			gins(x86.AMOVL, &fhi, &thi)
+		} else {
+			var r1 gc.Node
+			gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX)
+			var r2 gc.Node
+			gc.Nodreg(&r2, gc.Types[gc.TUINT32], x86.REG_DX)
+			gins(x86.AMOVL, &flo, &r1)
+			gins(x86.AMOVL, &fhi, &r2)
+			gins(x86.AMOVL, &r1, &tlo)
+			gins(x86.AMOVL, &r2, &thi)
+		}
+
+		splitclean()
+		splitclean()
+		return
+
+		/*
+		 * integer up-conversions
+		 */
+	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+		gc.TINT8<<16 | gc.TUINT16:
+		a = x86.AMOVBWSX
+
+		goto rdst
+
+	case gc.TINT8<<16 | gc.TINT32,
+		gc.TINT8<<16 | gc.TUINT32:
+		a = x86.AMOVBLSX
+		goto rdst
+
+	case gc.TINT8<<16 | gc.TINT64, // convert via int32
+		gc.TINT8<<16 | gc.TUINT64:
+		cvt = gc.Types[gc.TINT32]
+
+		goto hard
+
+	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+		gc.TUINT8<<16 | gc.TUINT16:
+		a = x86.AMOVBWZX
+
+		goto rdst
+
+	case gc.TUINT8<<16 | gc.TINT32,
+		gc.TUINT8<<16 | gc.TUINT32:
+		a = x86.AMOVBLZX
+		goto rdst
+
+	case gc.TUINT8<<16 | gc.TINT64, // convert via uint32
+		gc.TUINT8<<16 | gc.TUINT64:
+		cvt = gc.Types[gc.TUINT32]
+
+		goto hard
+
+	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+		gc.TINT16<<16 | gc.TUINT32:
+		a = x86.AMOVWLSX
+
+		goto rdst
+
+	case gc.TINT16<<16 | gc.TINT64, // convert via int32
+		gc.TINT16<<16 | gc.TUINT64:
+		cvt = gc.Types[gc.TINT32]
+
+		goto hard
+
+	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+		gc.TUINT16<<16 | gc.TUINT32:
+		a = x86.AMOVWLZX
+
+		goto rdst
+
+	case gc.TUINT16<<16 | gc.TINT64, // convert via uint32
+		gc.TUINT16<<16 | gc.TUINT64:
+		cvt = gc.Types[gc.TUINT32]
+
+		goto hard
+
+	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+		gc.TINT32<<16 | gc.TUINT64:
+		var thi gc.Node
+		var tlo gc.Node
+		split64(t, &tlo, &thi)
+
+		var flo gc.Node
+		gc.Nodreg(&flo, tlo.Type, x86.REG_AX)
+		var fhi gc.Node
+		gc.Nodreg(&fhi, thi.Type, x86.REG_DX)
+		gmove(f, &flo)
+		gins(x86.ACDQ, nil, nil)
+		gins(x86.AMOVL, &flo, &tlo)
+		gins(x86.AMOVL, &fhi, &thi)
+		splitclean()
+		return
+
+	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+		gc.TUINT32<<16 | gc.TUINT64:
+		var tlo gc.Node
+		var thi gc.Node
+		split64(t, &tlo, &thi)
+
+		gmove(f, &tlo)
+		gins(x86.AMOVL, ncon(0), &thi)
+		splitclean()
+		return
+	}
+
+	gins(a, f, t)
+	return
+
+	// requires register source
+rsrc:
+	gc.Regalloc(&r1, f.Type, t)
+
+	gmove(f, &r1)
+	gins(a, &r1, t)
+	gc.Regfree(&r1)
+	return
+
+	// requires register destination
+rdst:
+	{
+		gc.Regalloc(&r1, t.Type, t)
+
+		gins(a, f, &r1)
+		gmove(&r1, t)
+		gc.Regfree(&r1)
+		return
+	}
+
+	// requires register intermediate
+hard:
+	gc.Regalloc(&r1, cvt, t)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	gc.Regfree(&r1)
+	return
+}
+
+func floatmove(f *gc.Node, t *gc.Node) {
+	var r1 gc.Node
+
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+	cvt := t.Type
+
+	// cannot have two floating point memory operands.
+	if gc.Isfloat[ft] && gc.Isfloat[tt] && gc.Ismem(f) && gc.Ismem(t) {
+		goto hard
+	}
+
+	// convert constant to desired type
+	if f.Op == gc.OLITERAL {
+		var con gc.Node
+		f.Convconst(&con, t.Type)
+		f = &con
+		ft = gc.Simsimtype(con.Type)
+
+		// some constants can't move directly to memory.
+		if gc.Ismem(t) {
+			// float constants come from memory.
+			if gc.Isfloat[tt] {
+				goto hard
+			}
+		}
+	}
+
+	// value -> value copy, only one memory operand.
+	// figure out the instruction to use.
+	// break out of switch for one-instruction gins.
+	// goto rdst for "destination must be register".
+	// goto hard for "convert to cvt type first".
+	// otherwise handle and return.
+
+	switch uint32(ft)<<16 | uint32(tt) {
+	default:
+		if gc.Thearch.Use387 {
+			floatmove_387(f, t)
+		} else {
+			floatmove_sse(f, t)
+		}
+		return
+
+		// float to very long integer.
+	case gc.TFLOAT32<<16 | gc.TINT64,
+		gc.TFLOAT64<<16 | gc.TINT64:
+		if f.Op == gc.OREGISTER {
+			cvt = f.Type
+			goto hardmem
+		}
+
+		var r1 gc.Node
+		gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0)
+		if ft == gc.TFLOAT32 {
+			gins(x86.AFMOVF, f, &r1)
+		} else {
+			gins(x86.AFMOVD, f, &r1)
+		}
+
+		// set round to zero mode during conversion
+		var t1 gc.Node
+		memname(&t1, gc.Types[gc.TUINT16])
+
+		var t2 gc.Node
+		memname(&t2, gc.Types[gc.TUINT16])
+		gins(x86.AFSTCW, nil, &t1)
+		gins(x86.AMOVW, ncon(0xf7f), &t2)
+		gins(x86.AFLDCW, &t2, nil)
+		if tt == gc.TINT16 {
+			gins(x86.AFMOVWP, &r1, t)
+		} else if tt == gc.TINT32 {
+			gins(x86.AFMOVLP, &r1, t)
+		} else {
+			gins(x86.AFMOVVP, &r1, t)
+		}
+		gins(x86.AFLDCW, &t1, nil)
+		return
+
+	case gc.TFLOAT32<<16 | gc.TUINT64,
+		gc.TFLOAT64<<16 | gc.TUINT64:
+		if !gc.Ismem(f) {
+			cvt = f.Type
+			goto hardmem
+		}
+
+		bignodes()
+		var f0 gc.Node
+		gc.Nodreg(&f0, gc.Types[ft], x86.REG_F0)
+		var f1 gc.Node
+		gc.Nodreg(&f1, gc.Types[ft], x86.REG_F0+1)
+		var ax gc.Node
+		gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX)
+
+		if ft == gc.TFLOAT32 {
+			gins(x86.AFMOVF, f, &f0)
+		} else {
+			gins(x86.AFMOVD, f, &f0)
+		}
+
+		// if 0 > v { answer = 0 }
+		gins(x86.AFMOVD, &zerof, &f0)
+
+		gins(x86.AFUCOMIP, &f0, &f1)
+		p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
+
+		// if 1<<64 <= v { answer = 0 too }
+		gins(x86.AFMOVD, &two64f, &f0)
+
+		gins(x86.AFUCOMIP, &f0, &f1)
+		p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
+		gc.Patch(p1, gc.Pc)
+		gins(x86.AFMOVVP, &f0, t) // don't care about t, but will pop the stack
+		var thi gc.Node
+		var tlo gc.Node
+		split64(t, &tlo, &thi)
+		gins(x86.AMOVL, ncon(0), &tlo)
+		gins(x86.AMOVL, ncon(0), &thi)
+		splitclean()
+		p1 = gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p2, gc.Pc)
+
+		// in range; algorithm is:
+		//	if small enough, use native float64 -> int64 conversion.
+		//	otherwise, subtract 2^63, convert, and add it back.
+
+		// set round to zero mode during conversion
+		var t1 gc.Node
+		memname(&t1, gc.Types[gc.TUINT16])
+
+		var t2 gc.Node
+		memname(&t2, gc.Types[gc.TUINT16])
+		gins(x86.AFSTCW, nil, &t1)
+		gins(x86.AMOVW, ncon(0xf7f), &t2)
+		gins(x86.AFLDCW, &t2, nil)
+
+		// actual work
+		gins(x86.AFMOVD, &two63f, &f0)
+
+		gins(x86.AFUCOMIP, &f0, &f1)
+		p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0)
+		gins(x86.AFMOVVP, &f0, t)
+		p3 := gc.Gbranch(obj.AJMP, nil, 0)
+		gc.Patch(p2, gc.Pc)
+		gins(x86.AFMOVD, &two63f, &f0)
+		gins(x86.AFSUBDP, &f0, &f1)
+		gins(x86.AFMOVVP, &f0, t)
+		split64(t, &tlo, &thi)
+		gins(x86.AXORL, ncon(0x80000000), &thi) // + 2^63
+		gc.Patch(p3, gc.Pc)
+		splitclean()
+
+		// restore rounding mode
+		gins(x86.AFLDCW, &t1, nil)
+
+		gc.Patch(p1, gc.Pc)
+		return
+
+		/*
+		 * integer to float
+		 */
+	case gc.TINT64<<16 | gc.TFLOAT32,
+		gc.TINT64<<16 | gc.TFLOAT64:
+		if t.Op == gc.OREGISTER {
+			goto hardmem
+		}
+		var f0 gc.Node
+		gc.Nodreg(&f0, t.Type, x86.REG_F0)
+		gins(x86.AFMOVV, f, &f0)
+		if tt == gc.TFLOAT32 {
+			gins(x86.AFMOVFP, &f0, t)
+		} else {
+			gins(x86.AFMOVDP, &f0, t)
+		}
+		return
+
+		// algorithm is:
+	//	if small enough, use native int64 -> float64 conversion.
+	//	otherwise, halve (rounding to odd?), convert, and double.
+	case gc.TUINT64<<16 | gc.TFLOAT32,
+		gc.TUINT64<<16 | gc.TFLOAT64:
+		var ax gc.Node
+		gc.Nodreg(&ax, gc.Types[gc.TUINT32], x86.REG_AX)
+
+		var dx gc.Node
+		gc.Nodreg(&dx, gc.Types[gc.TUINT32], x86.REG_DX)
+		var cx gc.Node
+		gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX)
+		var t1 gc.Node
+		gc.Tempname(&t1, f.Type)
+		var tlo gc.Node
+		var thi gc.Node
+		split64(&t1, &tlo, &thi)
+		gmove(f, &t1)
+		gins(x86.ACMPL, &thi, ncon(0))
+		p1 := gc.Gbranch(x86.AJLT, nil, 0)
+
+		// native
+		var r1 gc.Node
+		gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0)
+
+		gins(x86.AFMOVV, &t1, &r1)
+		if tt == gc.TFLOAT32 {
+			gins(x86.AFMOVFP, &r1, t)
+		} else {
+			gins(x86.AFMOVDP, &r1, t)
+		}
+		p2 := gc.Gbranch(obj.AJMP, nil, 0)
+
+		// simulated
+		gc.Patch(p1, gc.Pc)
+
+		gmove(&tlo, &ax)
+		gmove(&thi, &dx)
+		p1 = gins(x86.ASHRL, ncon(1), &ax)
+		p1.From.Index = x86.REG_DX // double-width shift DX -> AX
+		p1.From.Scale = 0
+		gins(x86.AMOVL, ncon(0), &cx)
+		gins(x86.ASETCC, nil, &cx)
+		gins(x86.AORL, &cx, &ax)
+		gins(x86.ASHRL, ncon(1), &dx)
+		gmove(&dx, &thi)
+		gmove(&ax, &tlo)
+		gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0)
+		var r2 gc.Node
+		gc.Nodreg(&r2, gc.Types[tt], x86.REG_F0+1)
+		gins(x86.AFMOVV, &t1, &r1)
+		gins(x86.AFMOVD, &r1, &r1)
+		gins(x86.AFADDDP, &r1, &r2)
+		if tt == gc.TFLOAT32 {
+			gins(x86.AFMOVFP, &r1, t)
+		} else {
+			gins(x86.AFMOVDP, &r1, t)
+		}
+		gc.Patch(p2, gc.Pc)
+		splitclean()
+		return
+	}
+
+	// requires register intermediate
+hard:
+	gc.Regalloc(&r1, cvt, t)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	gc.Regfree(&r1)
+	return
+
+	// requires memory intermediate
+hardmem:
+	gc.Tempname(&r1, cvt)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	return
+}
+
+func floatmove_387(f *gc.Node, t *gc.Node) {
+	var r1 gc.Node
+	var a int
+
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+	cvt := t.Type
+
+	switch uint32(ft)<<16 | uint32(tt) {
+	default:
+		goto fatal
+
+		/*
+		* float to integer
+		 */
+	case gc.TFLOAT32<<16 | gc.TINT16,
+		gc.TFLOAT32<<16 | gc.TINT32,
+		gc.TFLOAT32<<16 | gc.TINT64,
+		gc.TFLOAT64<<16 | gc.TINT16,
+		gc.TFLOAT64<<16 | gc.TINT32,
+		gc.TFLOAT64<<16 | gc.TINT64:
+		if t.Op == gc.OREGISTER {
+			goto hardmem
+		}
+		var r1 gc.Node
+		gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0)
+		if f.Op != gc.OREGISTER {
+			if ft == gc.TFLOAT32 {
+				gins(x86.AFMOVF, f, &r1)
+			} else {
+				gins(x86.AFMOVD, f, &r1)
+			}
+		}
+
+		// set round to zero mode during conversion
+		var t1 gc.Node
+		memname(&t1, gc.Types[gc.TUINT16])
+
+		var t2 gc.Node
+		memname(&t2, gc.Types[gc.TUINT16])
+		gins(x86.AFSTCW, nil, &t1)
+		gins(x86.AMOVW, ncon(0xf7f), &t2)
+		gins(x86.AFLDCW, &t2, nil)
+		if tt == gc.TINT16 {
+			gins(x86.AFMOVWP, &r1, t)
+		} else if tt == gc.TINT32 {
+			gins(x86.AFMOVLP, &r1, t)
+		} else {
+			gins(x86.AFMOVVP, &r1, t)
+		}
+		gins(x86.AFLDCW, &t1, nil)
+		return
+
+		// convert via int32.
+	case gc.TFLOAT32<<16 | gc.TINT8,
+		gc.TFLOAT32<<16 | gc.TUINT16,
+		gc.TFLOAT32<<16 | gc.TUINT8,
+		gc.TFLOAT64<<16 | gc.TINT8,
+		gc.TFLOAT64<<16 | gc.TUINT16,
+		gc.TFLOAT64<<16 | gc.TUINT8:
+		var t1 gc.Node
+		gc.Tempname(&t1, gc.Types[gc.TINT32])
+
+		gmove(f, &t1)
+		switch tt {
+		default:
+			gc.Fatal("gmove %v", t)
+
+		case gc.TINT8:
+			gins(x86.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
+			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
+			gins(x86.ACMPL, &t1, ncon(0x7f))
+			p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
+			p3 := gc.Gbranch(obj.AJMP, nil, 0)
+			gc.Patch(p1, gc.Pc)
+			gc.Patch(p2, gc.Pc)
+			gmove(ncon(-0x80&(1<<32-1)), &t1)
+			gc.Patch(p3, gc.Pc)
+			gmove(&t1, t)
+
+		case gc.TUINT8:
+			gins(x86.ATESTL, ncon(0xffffff00), &t1)
+			p1 := gc.Gbranch(x86.AJEQ, nil, +1)
+			gins(x86.AMOVL, ncon(0), &t1)
+			gc.Patch(p1, gc.Pc)
+			gmove(&t1, t)
+
+		case gc.TUINT16:
+			gins(x86.ATESTL, ncon(0xffff0000), &t1)
+			p1 := gc.Gbranch(x86.AJEQ, nil, +1)
+			gins(x86.AMOVL, ncon(0), &t1)
+			gc.Patch(p1, gc.Pc)
+			gmove(&t1, t)
+		}
+
+		return
+
+		// convert via int64.
+	case gc.TFLOAT32<<16 | gc.TUINT32,
+		gc.TFLOAT64<<16 | gc.TUINT32:
+		cvt = gc.Types[gc.TINT64]
+
+		goto hardmem
+
+		/*
+		 * integer to float
+		 */
+	case gc.TINT16<<16 | gc.TFLOAT32,
+		gc.TINT16<<16 | gc.TFLOAT64,
+		gc.TINT32<<16 | gc.TFLOAT32,
+		gc.TINT32<<16 | gc.TFLOAT64,
+		gc.TINT64<<16 | gc.TFLOAT32,
+		gc.TINT64<<16 | gc.TFLOAT64:
+		if t.Op != gc.OREGISTER {
+			goto hard
+		}
+		if f.Op == gc.OREGISTER {
+			cvt = f.Type
+			goto hardmem
+		}
+
+		switch ft {
+		case gc.TINT16:
+			a = x86.AFMOVW
+
+		case gc.TINT32:
+			a = x86.AFMOVL
+
+		default:
+			a = x86.AFMOVV
+		}
+
+		// convert via int32 memory
+	case gc.TINT8<<16 | gc.TFLOAT32,
+		gc.TINT8<<16 | gc.TFLOAT64,
+		gc.TUINT16<<16 | gc.TFLOAT32,
+		gc.TUINT16<<16 | gc.TFLOAT64,
+		gc.TUINT8<<16 | gc.TFLOAT32,
+		gc.TUINT8<<16 | gc.TFLOAT64:
+		cvt = gc.Types[gc.TINT32]
+
+		goto hardmem
+
+		// convert via int64 memory
+	case gc.TUINT32<<16 | gc.TFLOAT32,
+		gc.TUINT32<<16 | gc.TFLOAT64:
+		cvt = gc.Types[gc.TINT64]
+
+		goto hardmem
+
+		// The way the code generator uses floating-point
+	// registers, a move from F0 to F0 is intended as a no-op.
+	// On the x86, it's not: it pushes a second copy of F0
+	// on the floating point stack.  So toss it away here.
+	// Also, F0 is the *only* register we ever evaluate
+	// into, so we should only see register/register as F0/F0.
+	/*
+	 * float to float
+	 */
+	case gc.TFLOAT32<<16 | gc.TFLOAT32,
+		gc.TFLOAT64<<16 | gc.TFLOAT64:
+		if gc.Ismem(f) && gc.Ismem(t) {
+			goto hard
+		}
+		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+			if f.Reg != x86.REG_F0 || t.Reg != x86.REG_F0 {
+				goto fatal
+			}
+			return
+		}
+
+		a = x86.AFMOVF
+		if ft == gc.TFLOAT64 {
+			a = x86.AFMOVD
+		}
+		if gc.Ismem(t) {
+			if f.Op != gc.OREGISTER || f.Reg != x86.REG_F0 {
+				gc.Fatal("gmove %v", f)
+			}
+			a = x86.AFMOVFP
+			if ft == gc.TFLOAT64 {
+				a = x86.AFMOVDP
+			}
+		}
+
+	case gc.TFLOAT32<<16 | gc.TFLOAT64:
+		if gc.Ismem(f) && gc.Ismem(t) {
+			goto hard
+		}
+		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+			if f.Reg != x86.REG_F0 || t.Reg != x86.REG_F0 {
+				goto fatal
+			}
+			return
+		}
+
+		if f.Op == gc.OREGISTER {
+			gins(x86.AFMOVDP, f, t)
+		} else {
+			gins(x86.AFMOVF, f, t)
+		}
+		return
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT32:
+		if gc.Ismem(f) && gc.Ismem(t) {
+			goto hard
+		}
+		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+			var r1 gc.Node
+			gc.Tempname(&r1, gc.Types[gc.TFLOAT32])
+			gins(x86.AFMOVFP, f, &r1)
+			gins(x86.AFMOVF, &r1, t)
+			return
+		}
+
+		if f.Op == gc.OREGISTER {
+			gins(x86.AFMOVFP, f, t)
+		} else {
+			gins(x86.AFMOVD, f, t)
+		}
+		return
+	}
+
+	gins(a, f, t)
+	return
+
+	// requires register intermediate
+hard:
+	gc.Regalloc(&r1, cvt, t)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	gc.Regfree(&r1)
+	return
+
+	// requires memory intermediate
+hardmem:
+	gc.Tempname(&r1, cvt)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	return
+
+	// should not happen
+fatal:
+	gc.Fatal("gmove %v -> %v", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+
+	return
+}
+
+func floatmove_sse(f *gc.Node, t *gc.Node) {
+	var r1 gc.Node
+	var cvt *gc.Type
+	var a int
+
+	ft := gc.Simsimtype(f.Type)
+	tt := gc.Simsimtype(t.Type)
+
+	switch uint32(ft)<<16 | uint32(tt) {
+	// should not happen
+	default:
+		gc.Fatal("gmove %v -> %v", f, t)
+
+		return
+
+		// convert via int32.
+	/*
+	* float to integer
+	 */
+	case gc.TFLOAT32<<16 | gc.TINT16,
+		gc.TFLOAT32<<16 | gc.TINT8,
+		gc.TFLOAT32<<16 | gc.TUINT16,
+		gc.TFLOAT32<<16 | gc.TUINT8,
+		gc.TFLOAT64<<16 | gc.TINT16,
+		gc.TFLOAT64<<16 | gc.TINT8,
+		gc.TFLOAT64<<16 | gc.TUINT16,
+		gc.TFLOAT64<<16 | gc.TUINT8:
+		cvt = gc.Types[gc.TINT32]
+
+		goto hard
+
+		// convert via int64.
+	case gc.TFLOAT32<<16 | gc.TUINT32,
+		gc.TFLOAT64<<16 | gc.TUINT32:
+		cvt = gc.Types[gc.TINT64]
+
+		goto hardmem
+
+	case gc.TFLOAT32<<16 | gc.TINT32:
+		a = x86.ACVTTSS2SL
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TINT32:
+		a = x86.ACVTTSD2SL
+		goto rdst
+
+		// convert via int32 memory
+	/*
+	 * integer to float
+	 */
+	case gc.TINT8<<16 | gc.TFLOAT32,
+		gc.TINT8<<16 | gc.TFLOAT64,
+		gc.TINT16<<16 | gc.TFLOAT32,
+		gc.TINT16<<16 | gc.TFLOAT64,
+		gc.TUINT16<<16 | gc.TFLOAT32,
+		gc.TUINT16<<16 | gc.TFLOAT64,
+		gc.TUINT8<<16 | gc.TFLOAT32,
+		gc.TUINT8<<16 | gc.TFLOAT64:
+		cvt = gc.Types[gc.TINT32]
+
+		goto hard
+
+		// convert via int64 memory
+	case gc.TUINT32<<16 | gc.TFLOAT32,
+		gc.TUINT32<<16 | gc.TFLOAT64:
+		cvt = gc.Types[gc.TINT64]
+
+		goto hardmem
+
+	case gc.TINT32<<16 | gc.TFLOAT32:
+		a = x86.ACVTSL2SS
+		goto rdst
+
+	case gc.TINT32<<16 | gc.TFLOAT64:
+		a = x86.ACVTSL2SD
+		goto rdst
+
+		/*
+		 * float to float
+		 */
+	case gc.TFLOAT32<<16 | gc.TFLOAT32:
+		a = x86.AMOVSS
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT64:
+		a = x86.AMOVSD
+
+	case gc.TFLOAT32<<16 | gc.TFLOAT64:
+		a = x86.ACVTSS2SD
+		goto rdst
+
+	case gc.TFLOAT64<<16 | gc.TFLOAT32:
+		a = x86.ACVTSD2SS
+		goto rdst
+	}
+
+	gins(a, f, t)
+	return
+
+	// requires register intermediate
+hard:
+	gc.Regalloc(&r1, cvt, t)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	gc.Regfree(&r1)
+	return
+
+	// requires memory intermediate
+hardmem:
+	gc.Tempname(&r1, cvt)
+
+	gmove(f, &r1)
+	gmove(&r1, t)
+	return
+
+	// requires register destination
+rdst:
+	gc.Regalloc(&r1, t.Type, t)
+
+	gins(a, f, &r1)
+	gmove(&r1, t)
+	gc.Regfree(&r1)
+	return
+}
+
+func samaddr(f *gc.Node, t *gc.Node) bool {
+	if f.Op != t.Op {
+		return false
+	}
+
+	switch f.Op {
+	case gc.OREGISTER:
+		if f.Reg != t.Reg {
+			break
+		}
+		return true
+	}
+
+	return false
+}
+
+/*
+ * generate one instruction:
+ *	as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+	if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
+		gc.Fatal("gins MOVF reg, reg")
+	}
+	if as == x86.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL {
+		gc.Fatal("gins CVTSD2SS const")
+	}
+	if as == x86.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Reg == x86.REG_F0 {
+		gc.Fatal("gins MOVSD into F0")
+	}
+
+	if as == x86.AMOVL && f != nil && f.Op == gc.OADDR && f.Left.Op == gc.ONAME && f.Left.Class != gc.PEXTERN && f.Left.Class != gc.PFUNC {
+		// Turn MOVL $xxx(FP/SP) into LEAL xxx.
+		// These should be equivalent but most of the backend
+		// only expects to see LEAL, because that's what we had
+		// historically generated. Various hidden assumptions are baked in by now.
+		as = x86.ALEAL
+		f = f.Left
+	}
+
+	switch as {
+	case x86.AMOVB,
+		x86.AMOVW,
+		x86.AMOVL:
+		if f != nil && t != nil && samaddr(f, t) {
+			return nil
+		}
+
+	case x86.ALEAL:
+		if f != nil && gc.Isconst(f, gc.CTNIL) {
+			gc.Fatal("gins LEAL nil %v", f.Type)
+		}
+	}
+
+	p := gc.Prog(as)
+	gc.Naddr(&p.From, f)
+	gc.Naddr(&p.To, t)
+
+	if gc.Debug['g'] != 0 {
+		fmt.Printf("%v\n", p)
+	}
+
+	w := 0
+	switch as {
+	case x86.AMOVB:
+		w = 1
+
+	case x86.AMOVW:
+		w = 2
+
+	case x86.AMOVL:
+		w = 4
+	}
+
+	if true && w != 0 && f != nil && (p.From.Width > int64(w) || p.To.Width > int64(w)) {
+		gc.Dump("bad width from:", f)
+		gc.Dump("bad width to:", t)
+		gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+	}
+
+	if p.To.Type == obj.TYPE_ADDR && w > 0 {
+		gc.Fatal("bad use of addr: %v", p)
+	}
+
+	return p
+}
+
+func ginsnop() {
+	var reg gc.Node
+	gc.Nodreg(&reg, gc.Types[gc.TINT], x86.REG_AX)
+	gins(x86.AXCHGL, &reg, &reg)
+}
+
+func dotaddable(n *gc.Node, n1 *gc.Node) bool {
+	if n.Op != gc.ODOT {
+		return false
+	}
+
+	var oary [10]int64
+	var nn *gc.Node
+	o := gc.Dotoffset(n, oary[:], &nn)
+	if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 {
+		*n1 = *nn
+		n1.Type = n.Type
+		n1.Xoffset += oary[0]
+		return true
+	}
+
+	return false
+}
+
+func sudoclean() {
+}
+
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+	*a = obj.Addr{}
+	return false
+}
diff --git a/src/cmd/compile/internal/x86/peep.go b/src/cmd/compile/internal/x86/peep.go
new file mode 100644
index 0000000..8b50eab
--- /dev/null
+++ b/src/cmd/compile/internal/x86/peep.go
@@ -0,0 +1,814 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+	"fmt"
+)
+
+const (
+	REGEXT      = 0
+	exregoffset = x86.REG_DI
+)
+
+var gactive uint32
+
+// do we need the carry bit
+func needc(p *obj.Prog) bool {
+	for p != nil {
+		if p.Info.Flags&gc.UseCarry != 0 {
+			return true
+		}
+		if p.Info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
+			return false
+		}
+		p = p.Link
+	}
+
+	return false
+}
+
+func rnops(r *gc.Flow) *gc.Flow {
+	if r != nil {
+		var p *obj.Prog
+		var r1 *gc.Flow
+		for {
+			p = r.Prog
+			if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
+				break
+			}
+			r1 = gc.Uniqs(r)
+			if r1 == nil {
+				break
+			}
+			r = r1
+		}
+	}
+
+	return r
+}
+
+func peep(firstp *obj.Prog) {
+	g := gc.Flowstart(firstp, nil)
+	if g == nil {
+		return
+	}
+	gactive = 0
+
+	// byte, word arithmetic elimination.
+	elimshortmov(g)
+
+	// constant propagation
+	// find MOV $con,R followed by
+	// another MOV $con,R without
+	// setting R in the interim
+	var p *obj.Prog
+	for r := g.Start; r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		case x86.ALEAL:
+			if regtyp(&p.To) {
+				if p.From.Sym != nil {
+					if p.From.Index == x86.REG_NONE {
+						conprop(r)
+					}
+				}
+			}
+
+		case x86.AMOVB,
+			x86.AMOVW,
+			x86.AMOVL,
+			x86.AMOVSS,
+			x86.AMOVSD:
+			if regtyp(&p.To) {
+				if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
+					conprop(r)
+				}
+			}
+		}
+	}
+
+	var r1 *gc.Flow
+	var p1 *obj.Prog
+	var r *gc.Flow
+	var t int
+loop1:
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		gc.Dumpit("loop1", g.Start, 0)
+	}
+
+	t = 0
+	for r = g.Start; r != nil; r = r.Link {
+		p = r.Prog
+		switch p.As {
+		case x86.AMOVL,
+			x86.AMOVSS,
+			x86.AMOVSD:
+			if regtyp(&p.To) {
+				if regtyp(&p.From) {
+					if copyprop(g, r) {
+						excise(r)
+						t++
+					} else if subprop(r) && copyprop(g, r) {
+						excise(r)
+						t++
+					}
+				}
+			}
+
+		case x86.AMOVBLZX,
+			x86.AMOVWLZX,
+			x86.AMOVBLSX,
+			x86.AMOVWLSX:
+			if regtyp(&p.To) {
+				r1 = rnops(gc.Uniqs(r))
+				if r1 != nil {
+					p1 = r1.Prog
+					if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
+						p1.As = x86.AMOVL
+						t++
+					}
+				}
+			}
+
+		case x86.AADDL,
+			x86.AADDW:
+			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
+				break
+			}
+			if p.From.Offset == -1 {
+				if p.As == x86.AADDL {
+					p.As = x86.ADECL
+				} else {
+					p.As = x86.ADECW
+				}
+				p.From = obj.Addr{}
+				break
+			}
+
+			if p.From.Offset == 1 {
+				if p.As == x86.AADDL {
+					p.As = x86.AINCL
+				} else {
+					p.As = x86.AINCW
+				}
+				p.From = obj.Addr{}
+				break
+			}
+
+		case x86.ASUBL,
+			x86.ASUBW:
+			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
+				break
+			}
+			if p.From.Offset == -1 {
+				if p.As == x86.ASUBL {
+					p.As = x86.AINCL
+				} else {
+					p.As = x86.AINCW
+				}
+				p.From = obj.Addr{}
+				break
+			}
+
+			if p.From.Offset == 1 {
+				if p.As == x86.ASUBL {
+					p.As = x86.ADECL
+				} else {
+					p.As = x86.ADECW
+				}
+				p.From = obj.Addr{}
+				break
+			}
+		}
+	}
+
+	if t != 0 {
+		goto loop1
+	}
+
+	// MOVSD removal.
+	// We never use packed registers, so a MOVSD between registers
+	// can be replaced by MOVAPD, which moves the pair of float64s
+	// instead of just the lower one.  We only use the lower one, but
+	// the processor can do better if we do moves using both.
+	for r := g.Start; r != nil; r = r.Link {
+		p = r.Prog
+		if p.As == x86.AMOVSD {
+			if regtyp(&p.From) {
+				if regtyp(&p.To) {
+					p.As = x86.AMOVAPD
+				}
+			}
+		}
+	}
+
+	gc.Flowend(g)
+}
+
+func excise(r *gc.Flow) {
+	p := r.Prog
+	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+		fmt.Printf("%v ===delete===\n", p)
+	}
+
+	obj.Nopout(p)
+
+	gc.Ostats.Ndelmov++
+}
+
+func regtyp(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_DI || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X7)
+}
+
+// movb elimination.
+// movb is simulated by the linker
+// when a register other than ax, bx, cx, dx
+// is used, so rewrite to other instructions
+// when possible.  a movb into a register
+// can smash the entire 64-bit register without
+// causing any trouble.
+func elimshortmov(g *gc.Graph) {
+	var p *obj.Prog
+
+	for r := g.Start; r != nil; r = r.Link {
+		p = r.Prog
+		if regtyp(&p.To) {
+			switch p.As {
+			case x86.AINCB,
+				x86.AINCW:
+				p.As = x86.AINCL
+
+			case x86.ADECB,
+				x86.ADECW:
+				p.As = x86.ADECL
+
+			case x86.ANEGB,
+				x86.ANEGW:
+				p.As = x86.ANEGL
+
+			case x86.ANOTB,
+				x86.ANOTW:
+				p.As = x86.ANOTL
+			}
+
+			if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
+				// move or artihmetic into partial register.
+				// from another register or constant can be movl.
+				// we don't switch to 32-bit arithmetic if it can
+				// change how the carry bit is set (and the carry bit is needed).
+				switch p.As {
+				case x86.AMOVB,
+					x86.AMOVW:
+					p.As = x86.AMOVL
+
+				case x86.AADDB,
+					x86.AADDW:
+					if !needc(p.Link) {
+						p.As = x86.AADDL
+					}
+
+				case x86.ASUBB,
+					x86.ASUBW:
+					if !needc(p.Link) {
+						p.As = x86.ASUBL
+					}
+
+				case x86.AMULB,
+					x86.AMULW:
+					p.As = x86.AMULL
+
+				case x86.AIMULB,
+					x86.AIMULW:
+					p.As = x86.AIMULL
+
+				case x86.AANDB,
+					x86.AANDW:
+					p.As = x86.AANDL
+
+				case x86.AORB,
+					x86.AORW:
+					p.As = x86.AORL
+
+				case x86.AXORB,
+					x86.AXORW:
+					p.As = x86.AXORL
+
+				case x86.ASHLB,
+					x86.ASHLW:
+					p.As = x86.ASHLL
+				}
+			} else {
+				// explicit zero extension
+				switch p.As {
+				case x86.AMOVB:
+					p.As = x86.AMOVBLZX
+
+				case x86.AMOVW:
+					p.As = x86.AMOVWLZX
+				}
+			}
+		}
+	}
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ *	MOV	a, R0
+ *	ADD	b, R0	/ no use of R1
+ *	MOV	R0, R1
+ * would be converted to
+ *	MOV	a, R1
+ *	ADD	b, R1
+ *	MOV	R1, R0
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ */
+func subprop(r0 *gc.Flow) bool {
+	p := r0.Prog
+	v1 := &p.From
+	if !regtyp(v1) {
+		return false
+	}
+	v2 := &p.To
+	if !regtyp(v2) {
+		return false
+	}
+	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+			fmt.Printf("\t? %v\n", r.Prog)
+		}
+		if gc.Uniqs(r) == nil {
+			break
+		}
+		p = r.Prog
+		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+			continue
+		}
+		if p.Info.Flags&gc.Call != 0 {
+			return false
+		}
+
+		if p.Info.Reguse|p.Info.Regset != 0 {
+			return false
+		}
+
+		if (p.Info.Flags&gc.Move != 0) && (p.Info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
+			copysub(&p.To, v1, v2, 1)
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+				if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
+					fmt.Printf(" excise")
+				}
+				fmt.Printf("\n")
+			}
+
+			for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+				p = r.Prog
+				copysub(&p.From, v1, v2, 1)
+				copysub(&p.To, v1, v2, 1)
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("%v\n", r.Prog)
+				}
+			}
+
+			t := int(v1.Reg)
+			v1.Reg = v2.Reg
+			v2.Reg = int16(t)
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("%v last\n", r.Prog)
+			}
+			return true
+		}
+
+		if copyau(&p.From, v2) || copyau(&p.To, v2) {
+			break
+		}
+		if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+			break
+		}
+	}
+
+	return false
+}
+
+/*
+ * The idea is to remove redundant copies.
+ *	v1->v2	F=0
+ *	(use v2	s/v2/v1/)*
+ *	set v1	F=1
+ *	use v2	return fail
+ *	-----------------
+ *	v1->v2	F=0
+ *	(use v2	s/v2/v1/)*
+ *	set v1	F=1
+ *	set v2	return success
+ */
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
+	p := r0.Prog
+	v1 := &p.From
+	v2 := &p.To
+	if copyas(v1, v2) {
+		return true
+	}
+	gactive++
+	return copy1(v1, v2, r0.S1, 0)
+}
+
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+	if uint32(r.Active) == gactive {
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("act set; return 1\n")
+		}
+		return true
+	}
+
+	r.Active = int32(gactive)
+	if gc.Debug['P'] != 0 {
+		fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
+	}
+	var t int
+	var p *obj.Prog
+	for ; r != nil; r = r.S1 {
+		p = r.Prog
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("%v", p)
+		}
+		if f == 0 && gc.Uniqp(r) == nil {
+			f = 1
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; merge; f=%d", f)
+			}
+		}
+
+		t = copyu(p, v2, nil)
+		switch t {
+		case 2: /* rar, can't split */
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+			}
+			return false
+
+		case 3: /* set */
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+			}
+			return true
+
+		case 1, /* used, substitute */
+			4: /* use and set */
+			if f != 0 {
+				if gc.Debug['P'] == 0 {
+					return false
+				}
+				if t == 4 {
+					fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+				} else {
+					fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+				}
+				return false
+			}
+
+			if copyu(p, v2, v1) != 0 {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; sub fail; return 0\n")
+				}
+				return false
+			}
+
+			if gc.Debug['P'] != 0 {
+				fmt.Printf("; sub %v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
+			}
+			if t == 4 {
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+				}
+				return true
+			}
+		}
+
+		if f == 0 {
+			t = copyu(p, v1, nil)
+			if f == 0 && (t == 2 || t == 3 || t == 4) {
+				f = 1
+				if gc.Debug['P'] != 0 {
+					fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+				}
+			}
+		}
+
+		if gc.Debug['P'] != 0 {
+			fmt.Printf("\n")
+		}
+		if r.S2 != nil {
+			if !copy1(v1, v2, r.S2, f) {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+/*
+ * return
+ * 1 if v only used (and substitute),
+ * 2 if read-alter-rewrite
+ * 3 if set
+ * 4 if set and used
+ * 0 otherwise (not touched)
+ */
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+	switch p.As {
+	case obj.AJMP:
+		if s != nil {
+			if copysub(&p.To, v, s, 1) != 0 {
+				return 1
+			}
+			return 0
+		}
+
+		if copyau(&p.To, v) {
+			return 1
+		}
+		return 0
+
+	case obj.ARET:
+		if s != nil {
+			return 1
+		}
+		return 3
+
+	case obj.ACALL:
+		if REGEXT != 0 /*TypeKind(100016)*/ && v.Type == obj.TYPE_REG && v.Reg <= REGEXT && v.Reg > exregoffset {
+			return 2
+		}
+		if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
+			return 2
+		}
+		if v.Type == p.From.Type && v.Reg == p.From.Reg {
+			return 2
+		}
+
+		if s != nil {
+			if copysub(&p.To, v, s, 1) != 0 {
+				return 1
+			}
+			return 0
+		}
+
+		if copyau(&p.To, v) {
+			return 4
+		}
+		return 3
+
+	case obj.ATEXT:
+		if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
+			return 3
+		}
+		return 0
+	}
+
+	if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+		return 0
+	}
+
+	if (p.Info.Reguse|p.Info.Regset)&RtoB(int(v.Reg)) != 0 {
+		return 2
+	}
+
+	if p.Info.Flags&gc.LeftAddr != 0 {
+		if copyas(&p.From, v) {
+			return 2
+		}
+	}
+
+	if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
+		if copyas(&p.To, v) {
+			return 2
+		}
+	}
+
+	if p.Info.Flags&gc.RightWrite != 0 {
+		if copyas(&p.To, v) {
+			if s != nil {
+				return copysub(&p.From, v, s, 1)
+			}
+			if copyau(&p.From, v) {
+				return 4
+			}
+			return 3
+		}
+	}
+
+	if p.Info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
+		if s != nil {
+			if copysub(&p.From, v, s, 1) != 0 {
+				return 1
+			}
+			return copysub(&p.To, v, s, 1)
+		}
+
+		if copyau(&p.From, v) {
+			return 1
+		}
+		if copyau(&p.To, v) {
+			return 1
+		}
+	}
+
+	return 0
+}
+
+/*
+ * direct reference,
+ * could be set/use depending on
+ * semantics
+ */
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+	if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_BL {
+		gc.Fatal("use of byte register")
+	}
+	if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_BL {
+		gc.Fatal("use of byte register")
+	}
+
+	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+		return false
+	}
+	if regtyp(v) {
+		return true
+	}
+	if (v.Type == obj.TYPE_MEM || v.Type == obj.TYPE_ADDR) && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+		if v.Offset == a.Offset {
+			return true
+		}
+	}
+	return false
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+		return false
+	}
+	if regtyp(v) {
+		return true
+	}
+	if (v.Type == obj.TYPE_MEM || v.Type == obj.TYPE_ADDR) && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+		if v.Offset == a.Offset {
+			return true
+		}
+	}
+	return false
+}
+
+/*
+ * either direct or indirect
+ */
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+	if copyas(a, v) {
+		return true
+	}
+	if regtyp(v) {
+		if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Reg == v.Reg {
+			return true
+		}
+		if a.Index == v.Reg {
+			return true
+		}
+	}
+
+	return false
+}
+
+/*
+ * substitute s for v in a
+ * return failure to substitute
+ */
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+	if copyas(a, v) {
+		reg := int(s.Reg)
+		if reg >= x86.REG_AX && reg <= x86.REG_DI || reg >= x86.REG_X0 && reg <= x86.REG_X7 {
+			if f != 0 {
+				a.Reg = int16(reg)
+			}
+		}
+
+		return 0
+	}
+
+	if regtyp(v) {
+		reg := int(v.Reg)
+		if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && int(a.Reg) == reg {
+			if (s.Reg == x86.REG_BP) && a.Index != obj.TYPE_NONE {
+				return 1 /* can't use BP-base with index */
+			}
+			if f != 0 {
+				a.Reg = s.Reg
+			}
+		}
+
+		//			return 0;
+		if int(a.Index) == reg {
+			if f != 0 {
+				a.Index = s.Reg
+			}
+			return 0
+		}
+
+		return 0
+	}
+
+	return 0
+}
+
+func conprop(r0 *gc.Flow) {
+	var p *obj.Prog
+	var t int
+
+	p0 := r0.Prog
+	v0 := &p0.To
+	r := r0
+
+loop:
+	r = gc.Uniqs(r)
+	if r == nil || r == r0 {
+		return
+	}
+	if gc.Uniqp(r) == nil {
+		return
+	}
+
+	p = r.Prog
+	t = copyu(p, v0, nil)
+	switch t {
+	case 0, // miss
+		1: // use
+		goto loop
+
+	case 2, // rar
+		4: // use and set
+		break
+
+	case 3: // set
+		if p.As == p0.As {
+			if p.From.Type == p0.From.Type {
+				if p.From.Reg == p0.From.Reg {
+					if p.From.Node == p0.From.Node {
+						if p.From.Offset == p0.From.Offset {
+							if p.From.Scale == p0.From.Scale {
+								if p.From.Type == obj.TYPE_FCONST && p.From.Val.(float64) == p0.From.Val.(float64) {
+									if p.From.Index == p0.From.Index {
+										excise(r)
+										goto loop
+									}
+								}
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+	return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096
+}
+
+func stackaddr(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP
+}
diff --git a/src/cmd/compile/internal/x86/prog.go b/src/cmd/compile/internal/x86/prog.go
new file mode 100644
index 0000000..f96a1aa
--- /dev/null
+++ b/src/cmd/compile/internal/x86/prog.go
@@ -0,0 +1,292 @@
+// Copyright 2013 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+	"cmd/compile/internal/gc"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+)
+
+var (
+	AX               = RtoB(x86.REG_AX)
+	BX               = RtoB(x86.REG_BX)
+	CX               = RtoB(x86.REG_CX)
+	DX               = RtoB(x86.REG_DX)
+	DI               = RtoB(x86.REG_DI)
+	SI               = RtoB(x86.REG_SI)
+	LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
+	RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [x86.ALAST]obj.ProgInfo{
+	obj.ATYPE:     {gc.Pseudo | gc.Skip, 0, 0, 0},
+	obj.ATEXT:     {gc.Pseudo, 0, 0, 0},
+	obj.AFUNCDATA: {gc.Pseudo, 0, 0, 0},
+	obj.APCDATA:   {gc.Pseudo, 0, 0, 0},
+	obj.AUNDEF:    {gc.Break, 0, 0, 0},
+	obj.AUSEFIELD: {gc.OK, 0, 0, 0},
+	obj.ACHECKNIL: {gc.LeftRead, 0, 0, 0},
+	obj.AVARDEF:   {gc.Pseudo | gc.RightWrite, 0, 0, 0},
+	obj.AVARKILL:  {gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+	// NOP is an internal no-op that also stands
+	// for USED and SET annotations, not the Intel opcode.
+	obj.ANOP:       {gc.LeftRead | gc.RightWrite, 0, 0, 0},
+	x86.AADCL:      {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.AADCW:      {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.AADDB:      {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AADDL:      {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AADDW:      {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AADDSD:     {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.AADDSS:     {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.AANDB:      {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AANDL:      {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AANDW:      {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	obj.ACALL:      {gc.RightAddr | gc.Call | gc.KillCarry, 0, 0, 0},
+	x86.ACDQ:       {gc.OK, AX, AX | DX, 0},
+	x86.ACWD:       {gc.OK, AX, AX | DX, 0},
+	x86.ACLD:       {gc.OK, 0, 0, 0},
+	x86.ASTD:       {gc.OK, 0, 0, 0},
+	x86.ACMPB:      {gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACMPL:      {gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACMPW:      {gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACOMISD:    {gc.SizeD | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACOMISS:    {gc.SizeF | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ACVTSD2SL:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSD2SS:  {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSL2SD:  {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSL2SS:  {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSS2SD:  {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTSS2SL:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTTSD2SL: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ACVTTSS2SL: {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.ADECB:      {gc.SizeB | RightRdwr, 0, 0, 0},
+	x86.ADECL:      {gc.SizeL | RightRdwr, 0, 0, 0},
+	x86.ADECW:      {gc.SizeW | RightRdwr, 0, 0, 0},
+	x86.ADIVB:      {gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+	x86.ADIVL:      {gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+	x86.ADIVW:      {gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+	x86.ADIVSD:     {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.ADIVSS:     {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.AFLDCW:     {gc.SizeW | gc.LeftAddr, 0, 0, 0},
+	x86.AFSTCW:     {gc.SizeW | gc.RightAddr, 0, 0, 0},
+	x86.AFSTSW:     {gc.SizeW | gc.RightAddr | gc.RightWrite, 0, 0, 0},
+	x86.AFADDD:     {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFADDDP:    {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFADDF:     {gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFCOMD:     {gc.SizeD | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+	x86.AFCOMDP:    {gc.SizeD | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+	x86.AFCOMDPP:   {gc.SizeD | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+	x86.AFCOMF:     {gc.SizeF | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+	x86.AFCOMFP:    {gc.SizeF | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+	x86.AFUCOMIP:   {gc.SizeF | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+	x86.AFCHS:      {gc.SizeD | RightRdwr, 0, 0, 0}, // also SizeF
+
+	x86.AFDIVDP:  {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFDIVF:   {gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFDIVD:   {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFDIVRDP: {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFDIVRF:  {gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFDIVRD:  {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFXCHD:   {gc.SizeD | LeftRdwr | RightRdwr, 0, 0, 0},
+	x86.AFSUBD:   {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFSUBDP:  {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFSUBF:   {gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFSUBRD:  {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFSUBRDP: {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFSUBRF:  {gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFMOVD:   {gc.SizeD | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+	x86.AFMOVF:   {gc.SizeF | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+	x86.AFMOVL:   {gc.SizeL | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+	x86.AFMOVW:   {gc.SizeW | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+	x86.AFMOVV:   {gc.SizeQ | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+
+	// These instructions are marked as RightAddr
+	// so that the register optimizer does not try to replace the
+	// memory references with integer register references.
+	// But they do not use the previous value at the address, so
+	// we also mark them RightWrite.
+	x86.AFMOVDP:   {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+	x86.AFMOVFP:   {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+	x86.AFMOVLP:   {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+	x86.AFMOVWP:   {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+	x86.AFMOVVP:   {gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+	x86.AFMULD:    {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFMULDP:   {gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AFMULF:    {gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+	x86.AIDIVB:    {gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+	x86.AIDIVL:    {gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+	x86.AIDIVW:    {gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+	x86.AIMULB:    {gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+	x86.AIMULL:    {gc.SizeL | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+	x86.AIMULW:    {gc.SizeW | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+	x86.AINCB:     {gc.SizeB | RightRdwr, 0, 0, 0},
+	x86.AINCL:     {gc.SizeL | RightRdwr, 0, 0, 0},
+	x86.AINCW:     {gc.SizeW | RightRdwr, 0, 0, 0},
+	x86.AJCC:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJCS:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJEQ:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJGE:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJGT:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJHI:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJLE:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJLS:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJLT:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJMI:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJNE:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJOC:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJOS:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJPC:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJPL:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	x86.AJPS:      {gc.Cjmp | gc.UseCarry, 0, 0, 0},
+	obj.AJMP:      {gc.Jump | gc.Break | gc.KillCarry, 0, 0, 0},
+	x86.ALEAL:     {gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+	x86.AMOVBLSX:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVBLZX:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVBWSX:  {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVBWZX:  {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVWLSX:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVWLZX:  {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+	x86.AMOVB:     {gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMOVL:     {gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMOVW:     {gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMOVSB:    {gc.OK, DI | SI, DI | SI, 0},
+	x86.AMOVSL:    {gc.OK, DI | SI, DI | SI, 0},
+	x86.AMOVSW:    {gc.OK, DI | SI, DI | SI, 0},
+	obj.ADUFFCOPY: {gc.OK, DI | SI, DI | SI | CX, 0},
+	x86.AMOVSD:    {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMOVSS:    {gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+	// We use MOVAPD as a faster synonym for MOVSD.
+	x86.AMOVAPD:   {gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+	x86.AMULB:     {gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+	x86.AMULL:     {gc.SizeL | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+	x86.AMULW:     {gc.SizeW | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+	x86.AMULSD:    {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.AMULSS:    {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.ANEGB:     {gc.SizeB | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ANEGL:     {gc.SizeL | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ANEGW:     {gc.SizeW | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ANOTB:     {gc.SizeB | RightRdwr, 0, 0, 0},
+	x86.ANOTL:     {gc.SizeL | RightRdwr, 0, 0, 0},
+	x86.ANOTW:     {gc.SizeW | RightRdwr, 0, 0, 0},
+	x86.AORB:      {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AORL:      {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AORW:      {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.APOPL:     {gc.SizeL | gc.RightWrite, 0, 0, 0},
+	x86.APUSHL:    {gc.SizeL | gc.LeftRead, 0, 0, 0},
+	x86.ARCLB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCLL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCLW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCRB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCRL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ARCRW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.AREP:      {gc.OK, CX, CX, 0},
+	x86.AREPN:     {gc.OK, CX, CX, 0},
+	obj.ARET:      {gc.Break | gc.KillCarry, 0, 0, 0},
+	x86.AROLB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.AROLL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.AROLW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ARORB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ARORL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ARORW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASAHF:     {gc.OK, AX, AX, 0},
+	x86.ASALB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASALL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASALW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASARB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASARL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASARW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASBBB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ASBBL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ASBBW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+	x86.ASETCC:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETCS:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETEQ:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETGE:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETGT:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETHI:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETLE:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETLS:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETLT:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETMI:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETNE:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETOC:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETOS:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETPC:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETPL:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASETPS:    {gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+	x86.ASHLB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHLL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHLW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHRB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHRL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASHRW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+	x86.ASTOSB:    {gc.OK, AX | DI, DI, 0},
+	x86.ASTOSL:    {gc.OK, AX | DI, DI, 0},
+	x86.ASTOSW:    {gc.OK, AX | DI, DI, 0},
+	obj.ADUFFZERO: {gc.OK, AX | DI, DI, 0},
+	x86.ASUBB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ASUBL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ASUBW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.ASUBSD:    {gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.ASUBSS:    {gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+	x86.ATESTB:    {gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ATESTL:    {gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.ATESTW:    {gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+	x86.AUCOMISD:  {gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	x86.AUCOMISS:  {gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
+	x86.AXCHGB:    {gc.SizeB | LeftRdwr | RightRdwr, 0, 0, 0},
+	x86.AXCHGL:    {gc.SizeL | LeftRdwr | RightRdwr, 0, 0, 0},
+	x86.AXCHGW:    {gc.SizeW | LeftRdwr | RightRdwr, 0, 0, 0},
+	x86.AXORB:     {gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AXORL:     {gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+	x86.AXORW:     {gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+}
+
+func proginfo(p *obj.Prog) {
+	info := &p.Info
+	*info = progtable[p.As]
+	if info.Flags == 0 {
+		gc.Fatal("unknown instruction %v", p)
+	}
+
+	if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST {
+		info.Reguse |= CX
+	}
+
+	if info.Flags&gc.ImulAXDX != 0 {
+		if p.To.Type == obj.TYPE_NONE {
+			info.Reguse |= AX
+			info.Regset |= AX | DX
+		} else {
+			info.Flags |= RightRdwr
+		}
+	}
+
+	// Addressing makes some registers used.
+	if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE {
+		info.Regindex |= RtoB(int(p.From.Reg))
+	}
+	if p.From.Index != x86.REG_NONE {
+		info.Regindex |= RtoB(int(p.From.Index))
+	}
+	if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE {
+		info.Regindex |= RtoB(int(p.To.Reg))
+	}
+	if p.To.Index != x86.REG_NONE {
+		info.Regindex |= RtoB(int(p.To.Index))
+	}
+}
diff --git a/src/cmd/compile/internal/x86/reg.go b/src/cmd/compile/internal/x86/reg.go
new file mode 100644
index 0000000..8c97171
--- /dev/null
+++ b/src/cmd/compile/internal/x86/reg.go
@@ -0,0 +1,112 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import "cmd/internal/obj/x86"
+import "cmd/compile/internal/gc"
+
+const (
+	NREGVAR = 16 /* 8 integer + 8 floating */
+)
+
+var reg [x86.MAXREG]uint8
+
+var regname = []string{
+	".ax",
+	".cx",
+	".dx",
+	".bx",
+	".sp",
+	".bp",
+	".si",
+	".di",
+	".x0",
+	".x1",
+	".x2",
+	".x3",
+	".x4",
+	".x5",
+	".x6",
+	".x7",
+}
+
+func regnames(n *int) []string {
+	*n = NREGVAR
+	return regname
+}
+
+func excludedregs() uint64 {
+	return RtoB(x86.REG_SP)
+}
+
+func doregbits(r int) uint64 {
+	b := uint64(0)
+	if r >= x86.REG_AX && r <= x86.REG_DI {
+		b |= RtoB(r)
+	} else if r >= x86.REG_AL && r <= x86.REG_BL {
+		b |= RtoB(r - x86.REG_AL + x86.REG_AX)
+	} else if r >= x86.REG_AH && r <= x86.REG_BH {
+		b |= RtoB(r - x86.REG_AH + x86.REG_AX)
+	} else if r >= x86.REG_X0 && r <= x86.REG_X0+7 {
+		b |= FtoB(r)
+	}
+	return b
+}
+
+func RtoB(r int) uint64 {
+	if r < x86.REG_AX || r > x86.REG_DI {
+		return 0
+	}
+	return 1 << uint(r-x86.REG_AX)
+}
+
+func BtoR(b uint64) int {
+	b &= 0xff
+	if b == 0 {
+		return 0
+	}
+	return gc.Bitno(b) + x86.REG_AX
+}
+
+func FtoB(f int) uint64 {
+	if f < x86.REG_X0 || f > x86.REG_X7 {
+		return 0
+	}
+	return 1 << uint(f-x86.REG_X0+8)
+}
+
+func BtoF(b uint64) int {
+	b &= 0xFF00
+	if b == 0 {
+		return 0
+	}
+	return gc.Bitno(b) - 8 + x86.REG_X0
+}
diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go
new file mode 100644
index 0000000..7b69c34
--- /dev/null
+++ b/src/cmd/compile/main.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"cmd/compile/internal/amd64"
+	"cmd/compile/internal/arm"
+	"cmd/compile/internal/arm64"
+	"cmd/compile/internal/ppc64"
+	"cmd/compile/internal/x86"
+	"cmd/internal/obj"
+	"fmt"
+	"os"
+)
+
+func main() {
+	switch obj.Getgoarch() {
+	default:
+		fmt.Fprintf(os.Stderr, "compile: unknown architecture %q\n", obj.Getgoarch())
+		os.Exit(2)
+	case "386":
+		x86.Main()
+	case "amd64", "amd64p32":
+		amd64.Main()
+	case "arm":
+		arm.Main()
+	case "arm64":
+		arm64.Main()
+	case "ppc64", "ppc64le":
+		ppc64.Main()
+	}
+}