cmd/compile: delete lots of the legacy backend

It's not everything, but it is a good start.

I tried to make the CL delete only.  goimports forced
a few exceptions to that rule.

Update #16357

Change-Id: I041925cb2fe68bb7ae1617af862b22c48da649c1
Reviewed-on: https://go-review.googlesource.com/29168
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
Reviewed-by: Martin Möhrmann <martisch@uos.de>
diff --git a/src/cmd/compile/internal/amd64/cgen.go b/src/cmd/compile/internal/amd64/cgen.go
deleted file mode 100644
index 1fdb807..0000000
--- a/src/cmd/compile/internal/amd64/cgen.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package amd64
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/x86"
-)
-
-func blockcopy(n, ns *gc.Node, osrc, odst, w int64) {
-	var noddi gc.Node
-	gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
-	var nodsi gc.Node
-	gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
-
-	var nodl gc.Node
-	var nodr gc.Node
-	if n.Ullman >= ns.Ullman {
-		gc.Agenr(n, &nodr, &nodsi)
-		if ns.Op == gc.ONAME {
-			gc.Gvardef(ns)
-		}
-		gc.Agenr(ns, &nodl, &noddi)
-	} else {
-		if ns.Op == gc.ONAME {
-			gc.Gvardef(ns)
-		}
-		gc.Agenr(ns, &nodl, &noddi)
-		gc.Agenr(n, &nodr, &nodsi)
-	}
-
-	if nodl.Reg != x86.REG_DI {
-		gmove(&nodl, &noddi)
-	}
-	if nodr.Reg != x86.REG_SI {
-		gmove(&nodr, &nodsi)
-	}
-	gc.Regfree(&nodl)
-	gc.Regfree(&nodr)
-
-	c := w % 8 // bytes
-	q := w / 8 // quads
-
-	var oldcx gc.Node
-	var cx gc.Node
-	savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
-
-	// if we are copying forward on the stack and
-	// the src and dst overlap, then reverse direction
-	if osrc < odst && odst < osrc+w {
-		// reverse direction
-		gins(x86.ASTD, nil, nil) // set direction flag
-		if c > 0 {
-			gconreg(addptr, w-1, x86.REG_SI)
-			gconreg(addptr, w-1, x86.REG_DI)
-
-			gconreg(movptr, c, x86.REG_CX)
-			gins(x86.AREP, nil, nil)   // repeat
-			gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
-		}
-
-		if q > 0 {
-			if c > 0 {
-				gconreg(addptr, -7, x86.REG_SI)
-				gconreg(addptr, -7, x86.REG_DI)
-			} else {
-				gconreg(addptr, w-8, x86.REG_SI)
-				gconreg(addptr, w-8, x86.REG_DI)
-			}
-
-			gconreg(movptr, q, x86.REG_CX)
-			gins(x86.AREP, nil, nil)   // repeat
-			gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
-		}
-
-		// we leave with the flag clear
-		gins(x86.ACLD, nil, nil)
-	} else {
-		// normal direction
-		if q > 128 || (gc.Nacl && q >= 4) || (obj.GOOS == "plan9" && q >= 4) {
-			gconreg(movptr, q, x86.REG_CX)
-			gins(x86.AREP, nil, nil)   // repeat
-			gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
-		} else if q >= 4 {
-			var oldx0 gc.Node
-			var x0 gc.Node
-			savex(x86.REG_X0, &x0, &oldx0, nil, gc.Types[gc.TFLOAT64])
-
-			p := gins(obj.ADUFFCOPY, nil, nil)
-			p.To.Type = obj.TYPE_ADDR
-			p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
-
-			// 64 blocks taking 14 bytes each
-			// see ../../../../runtime/mkduff.go
-			p.To.Offset = 14 * (64 - q/2)
-			restx(&x0, &oldx0)
-
-			if q%2 != 0 {
-				gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
-			}
-		} else if !gc.Nacl && c == 0 {
-			// We don't need the MOVSQ side-effect of updating SI and DI,
-			// and issuing a sequence of MOVQs directly is faster.
-			nodsi.Op = gc.OINDREG
-
-			noddi.Op = gc.OINDREG
-			for q > 0 {
-				gmove(&nodsi, &cx) // MOVQ x+(SI),CX
-				gmove(&cx, &noddi) // MOVQ CX,x+(DI)
-				nodsi.Xoffset += 8
-				noddi.Xoffset += 8
-				q--
-			}
-		} else {
-			for q > 0 {
-				gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
-				q--
-			}
-		}
-
-		// copy the remaining c bytes
-		if w < 4 || c <= 1 || (odst < osrc && osrc < odst+w) {
-			for c > 0 {
-				gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
-				c--
-			}
-		} else if w < 8 || c <= 4 {
-			nodsi.Op = gc.OINDREG
-			noddi.Op = gc.OINDREG
-			cx.Type = gc.Types[gc.TINT32]
-			nodsi.Type = gc.Types[gc.TINT32]
-			noddi.Type = gc.Types[gc.TINT32]
-			if c > 4 {
-				nodsi.Xoffset = 0
-				noddi.Xoffset = 0
-				gmove(&nodsi, &cx)
-				gmove(&cx, &noddi)
-			}
-
-			nodsi.Xoffset = c - 4
-			noddi.Xoffset = c - 4
-			gmove(&nodsi, &cx)
-			gmove(&cx, &noddi)
-		} else {
-			nodsi.Op = gc.OINDREG
-			noddi.Op = gc.OINDREG
-			cx.Type = gc.Types[gc.TINT64]
-			nodsi.Type = gc.Types[gc.TINT64]
-			noddi.Type = gc.Types[gc.TINT64]
-			nodsi.Xoffset = c - 8
-			noddi.Xoffset = c - 8
-			gmove(&nodsi, &cx)
-			gmove(&cx, &noddi)
-		}
-	}
-
-	restx(&cx, &oldcx)
-}
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
index b7ce5a0..22a25af 100644
--- a/src/cmd/compile/internal/amd64/galign.go
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -11,20 +11,13 @@
 )
 
 var (
-	addptr = x86.AADDQ
-	movptr = x86.AMOVQ
 	leaptr = x86.ALEAQ
-	cmpptr = x86.ACMPQ
 )
 
 func betypeinit() {
 	if obj.GOARCH == "amd64p32" {
-		addptr = x86.AADDL
-		movptr = x86.AMOVL
 		leaptr = x86.ALEAL
-		cmpptr = x86.ACMPL
 	}
-
 	if gc.Ctxt.Flag_dynlink || obj.GOOS == "nacl" {
 		resvd = append(resvd, x86.REG_R15)
 	}
@@ -50,40 +43,10 @@
 	gc.Thearch.FREGMAX = x86.REG_X15
 	gc.Thearch.MAXWIDTH = 1 << 50
 
-	gc.Thearch.AddIndex = addindex
 	gc.Thearch.Betypeinit = betypeinit
-	gc.Thearch.Cgen_bmul = cgen_bmul
-	gc.Thearch.Cgen_hmul = cgen_hmul
-	gc.Thearch.Cgen_shift = cgen_shift
-	gc.Thearch.Clearfat = clearfat
 	gc.Thearch.Defframe = defframe
-	gc.Thearch.Dodiv = dodiv
-	gc.Thearch.Excise = excise
-	gc.Thearch.Expandchecks = expandchecks
-	gc.Thearch.Getg = getg
 	gc.Thearch.Gins = gins
-	gc.Thearch.Ginsboolval = ginsboolval
-	gc.Thearch.Ginscmp = ginscmp
-	gc.Thearch.Ginscon = ginscon
-	gc.Thearch.Ginsnop = ginsnop
-	gc.Thearch.Gmove = gmove
-	gc.Thearch.Peep = peep
 	gc.Thearch.Proginfo = proginfo
-	gc.Thearch.Regtyp = regtyp
-	gc.Thearch.Sameaddr = sameaddr
-	gc.Thearch.Smallindir = smallindir
-	gc.Thearch.Stackaddr = stackaddr
-	gc.Thearch.Blockcopy = blockcopy
-	gc.Thearch.Sudoaddable = sudoaddable
-	gc.Thearch.Sudoclean = sudoclean
-	gc.Thearch.Excludedregs = excludedregs
-	gc.Thearch.RtoB = RtoB
-	gc.Thearch.FtoB = FtoB
-	gc.Thearch.BtoR = BtoR
-	gc.Thearch.BtoF = BtoF
-	gc.Thearch.Optoas = optoas
-	gc.Thearch.Doregbits = doregbits
-	gc.Thearch.Regnames = regnames
 
 	gc.Thearch.SSARegToReg = ssaRegToReg
 	gc.Thearch.SSAMarkMoves = ssaMarkMoves
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index e8390f2..4145ff2 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -181,627 +181,3 @@
 	p.Link = q
 	return q
 }
-
-var panicdiv *gc.Node
-
-/*
- * generate division.
- * generates one of:
- *	res = nl / nr
- *	res = nl % nr
- * according to op.
- */
-func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	// Have to be careful about handling
-	// most negative int divided by -1 correctly.
-	// The hardware will trap.
-	// Also the byte divide instruction needs AH,
-	// which we otherwise don't have to deal with.
-	// Easiest way to avoid for int8, int16: use int32.
-	// For int32 and int64, use explicit test.
-	// Could use int64 hw for int32.
-	t := nl.Type
-
-	t0 := t
-	check := false
-	if t.IsSigned() {
-		check = true
-		if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<<uint64(t.Width*8-1)) {
-			check = false
-		} else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 {
-			check = false
-		}
-	}
-
-	if t.Width < 4 {
-		if t.IsSigned() {
-			t = gc.Types[gc.TINT32]
-		} else {
-			t = gc.Types[gc.TUINT32]
-		}
-		check = false
-	}
-
-	a := optoas(op, t)
-
-	var n3 gc.Node
-	gc.Regalloc(&n3, t0, nil)
-	var ax gc.Node
-	var oldax gc.Node
-	if nl.Ullman >= nr.Ullman {
-		savex(x86.REG_AX, &ax, &oldax, res, t0)
-		gc.Cgen(nl, &ax)
-		gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
-		gc.Cgen(nr, &n3)
-		gc.Regfree(&ax)
-	} else {
-		gc.Cgen(nr, &n3)
-		savex(x86.REG_AX, &ax, &oldax, res, t0)
-		gc.Cgen(nl, &ax)
-	}
-
-	if t != t0 {
-		// Convert
-		ax1 := ax
-
-		n31 := n3
-		ax.Type = t
-		n3.Type = t
-		gmove(&ax1, &ax)
-		gmove(&n31, &n3)
-	}
-
-	var n4 gc.Node
-	if gc.Nacl {
-		// Native Client does not relay the divide-by-zero trap
-		// to the executing program, so we must insert a check
-		// for ourselves.
-		gc.Nodconst(&n4, t, 0)
-
-		gins(optoas(gc.OCMP, t), &n3, &n4)
-		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-		if panicdiv == nil {
-			panicdiv = gc.Sysfunc("panicdivide")
-		}
-		gc.Ginscall(panicdiv, -1)
-		gc.Patch(p1, gc.Pc)
-	}
-
-	var p2 *obj.Prog
-	if check {
-		gc.Nodconst(&n4, t, -1)
-		gins(optoas(gc.OCMP, t), &n3, &n4)
-		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-		if op == gc.ODIV {
-			// a / (-1) is -a.
-			gins(optoas(gc.OMINUS, t), nil, &ax)
-
-			gmove(&ax, res)
-		} else {
-			// a % (-1) is 0.
-			gc.Nodconst(&n4, t, 0)
-
-			gmove(&n4, res)
-		}
-
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-	}
-
-	var olddx gc.Node
-	var dx gc.Node
-	savex(x86.REG_DX, &dx, &olddx, res, t)
-	if !t.IsSigned() {
-		gc.Nodconst(&n4, t, 0)
-		gmove(&n4, &dx)
-	} else {
-		gins(optoas(gc.OEXTEND, t), nil, nil)
-	}
-	gins(a, &n3, nil)
-	gc.Regfree(&n3)
-	if op == gc.ODIV {
-		gmove(&ax, res)
-	} else {
-		gmove(&dx, res)
-	}
-	restx(&dx, &olddx)
-	if check {
-		gc.Patch(p2, gc.Pc)
-	}
-	restx(&ax, &oldax)
-}
-
-/*
- * register dr is one of the special ones (AX, CX, DI, SI, etc.).
- * we need to use it.  if it is already allocated as a temporary
- * (r > 1; can only happen if a routine like sgen passed a
- * special as cgen's res and then cgen used regalloc to reuse
- * it as its own temporary), then move it for now to another
- * register.  caller must call restx to move it back.
- * the move is not necessary if dr == res, because res is
- * known to be dead.
- */
-func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
-	r := uint8(gc.GetReg(dr))
-
-	// save current ax and dx if they are live
-	// and not the destination
-	*oldx = gc.Node{}
-
-	gc.Nodreg(x, t, dr)
-	if r > 1 && !gc.Samereg(x, res) {
-		gc.Regalloc(oldx, gc.Types[gc.TINT64], nil)
-		x.Type = gc.Types[gc.TINT64]
-		gmove(x, oldx)
-		x.Type = t
-		// TODO(marvin): Fix Node.EType type union.
-		oldx.Etype = gc.EType(r) // squirrel away old r value
-		gc.SetReg(dr, 1)
-	}
-}
-
-func restx(x *gc.Node, oldx *gc.Node) {
-	if oldx.Op != 0 {
-		x.Type = gc.Types[gc.TINT64]
-		gc.SetReg(int(x.Reg), int(oldx.Etype))
-		gmove(oldx, x)
-		gc.Regfree(oldx)
-	}
-}
-
-/*
- * generate high multiply:
- *   res = (nl*nr) >> width
- */
-func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	t := nl.Type
-	a := optoas(gc.OHMUL, t)
-	if nl.Ullman < nr.Ullman {
-		nl, nr = nr, nl
-	}
-
-	var n1 gc.Node
-	gc.Cgenr(nl, &n1, res)
-	var n2 gc.Node
-	gc.Cgenr(nr, &n2, nil)
-	var ax, oldax, dx, olddx gc.Node
-	savex(x86.REG_AX, &ax, &oldax, res, gc.Types[gc.TUINT64])
-	savex(x86.REG_DX, &dx, &olddx, res, gc.Types[gc.TUINT64])
-	gmove(&n1, &ax)
-	gins(a, &n2, nil)
-	gc.Regfree(&n2)
-	gc.Regfree(&n1)
-
-	if t.Width == 1 {
-		// byte multiply behaves differently.
-		var byteAH, byteDX gc.Node
-		gc.Nodreg(&byteAH, t, x86.REG_AH)
-		gc.Nodreg(&byteDX, t, x86.REG_DX)
-		gmove(&byteAH, &byteDX)
-	}
-	gmove(&dx, res)
-
-	restx(&ax, &oldax)
-	restx(&dx, &olddx)
-}
-
-/*
- * generate shift according to op, one of:
- *	res = nl << nr
- *	res = nl >> nr
- */
-func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	a := optoas(op, nl.Type)
-
-	if nr.Op == gc.OLITERAL {
-		var n1 gc.Node
-		gc.Regalloc(&n1, nl.Type, res)
-		gc.Cgen(nl, &n1)
-		sc := uint64(nr.Int64())
-		if sc >= uint64(nl.Type.Width*8) {
-			// large shift gets 2 shifts by width-1
-			var n3 gc.Node
-			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
-
-			gins(a, &n3, &n1)
-			gins(a, &n3, &n1)
-		} else {
-			gins(a, nr, &n1)
-		}
-		gmove(&n1, res)
-		gc.Regfree(&n1)
-		return
-	}
-
-	if nl.Ullman >= gc.UINF {
-		var n4 gc.Node
-		gc.Tempname(&n4, nl.Type)
-		gc.Cgen(nl, &n4)
-		nl = &n4
-	}
-
-	if nr.Ullman >= gc.UINF {
-		var n5 gc.Node
-		gc.Tempname(&n5, nr.Type)
-		gc.Cgen(nr, &n5)
-		nr = &n5
-	}
-
-	rcx := gc.GetReg(x86.REG_CX)
-	var n1 gc.Node
-	gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
-
-	// Allow either uint32 or uint64 as shift type,
-	// to avoid unnecessary conversion from uint32 to uint64
-	// just to do the comparison.
-	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
-
-	if tcount.Etype < gc.TUINT32 {
-		tcount = gc.Types[gc.TUINT32]
-	}
-
-	gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
-	var n3 gc.Node
-	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
-
-	var cx gc.Node
-	gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
-
-	var oldcx gc.Node
-	if rcx > 0 && !gc.Samereg(&cx, res) {
-		gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
-		gmove(&cx, &oldcx)
-	}
-
-	cx.Type = tcount
-
-	var n2 gc.Node
-	if gc.Samereg(&cx, res) {
-		gc.Regalloc(&n2, nl.Type, nil)
-	} else {
-		gc.Regalloc(&n2, nl.Type, res)
-	}
-	if nl.Ullman >= nr.Ullman {
-		gc.Cgen(nl, &n2)
-		gc.Cgen(nr, &n1)
-		gmove(&n1, &n3)
-	} else {
-		gc.Cgen(nr, &n1)
-		gmove(&n1, &n3)
-		gc.Cgen(nl, &n2)
-	}
-
-	gc.Regfree(&n3)
-
-	// test and fix up large shifts
-	if !bounded {
-		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
-		gins(optoas(gc.OCMP, tcount), &n1, &n3)
-		p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
-		if op == gc.ORSH && nl.Type.IsSigned() {
-			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
-			gins(a, &n3, &n2)
-		} else {
-			gc.Nodconst(&n3, nl.Type, 0)
-			gmove(&n3, &n2)
-		}
-
-		gc.Patch(p1, gc.Pc)
-	}
-
-	gins(a, &n1, &n2)
-
-	if oldcx.Op != 0 {
-		cx.Type = gc.Types[gc.TUINT64]
-		gmove(&oldcx, &cx)
-		gc.Regfree(&oldcx)
-	}
-
-	gmove(&n2, res)
-
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-/*
- * generate byte multiply:
- *	res = nl * nr
- * there is no 2-operand byte multiply instruction so
- * we do a full-width multiplication and truncate afterwards.
- */
-func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
-	if optoas(op, nl.Type) != x86.AIMULB {
-		return false
-	}
-
-	// largest ullman on left.
-	if nl.Ullman < nr.Ullman {
-		nl, nr = nr, nl
-	}
-
-	// generate operands in "8-bit" registers.
-	var n1b gc.Node
-	gc.Regalloc(&n1b, nl.Type, res)
-
-	gc.Cgen(nl, &n1b)
-	var n2b gc.Node
-	gc.Regalloc(&n2b, nr.Type, nil)
-	gc.Cgen(nr, &n2b)
-
-	// perform full-width multiplication.
-	t := gc.Types[gc.TUINT64]
-
-	if nl.Type.IsSigned() {
-		t = gc.Types[gc.TINT64]
-	}
-	var n1 gc.Node
-	gc.Nodreg(&n1, t, int(n1b.Reg))
-	var n2 gc.Node
-	gc.Nodreg(&n2, t, int(n2b.Reg))
-	a := optoas(op, t)
-	gins(a, &n2, &n1)
-
-	// truncate.
-	gmove(&n1, res)
-
-	gc.Regfree(&n1b)
-	gc.Regfree(&n2b)
-	return true
-}
-
-func clearfat(nl *gc.Node) {
-	/* clear a fat object */
-	if gc.Debug['g'] != 0 {
-		gc.Dump("\nclearfat", nl)
-	}
-
-	// Avoid taking the address for simple enough types.
-	if gc.Componentgen(nil, nl) {
-		return
-	}
-
-	w := nl.Type.Width
-
-	if w > 1024 || (w >= 64 && (gc.Nacl || isPlan9)) {
-		var oldn1 gc.Node
-		var n1 gc.Node
-		savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
-		gc.Agen(nl, &n1)
-
-		var ax gc.Node
-		var oldax gc.Node
-		savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
-		gconreg(x86.AMOVL, 0, x86.REG_AX)
-		gconreg(movptr, w/8, x86.REG_CX)
-
-		gins(x86.AREP, nil, nil)   // repeat
-		gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
-
-		if w%8 != 0 {
-			n1.Op = gc.OINDREG
-			clearfat_tail(&n1, w%8)
-		}
-
-		restx(&n1, &oldn1)
-		restx(&ax, &oldax)
-		return
-	}
-
-	if w >= 64 {
-		var oldn1 gc.Node
-		var n1 gc.Node
-		savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
-		gc.Agen(nl, &n1)
-
-		var vec_zero gc.Node
-		var old_x0 gc.Node
-		savex(x86.REG_X0, &vec_zero, &old_x0, nil, gc.Types[gc.TFLOAT64])
-		gins(x86.AXORPS, &vec_zero, &vec_zero)
-
-		if di := dzDI(w); di != 0 {
-			gconreg(addptr, di, x86.REG_DI)
-		}
-		p := gins(obj.ADUFFZERO, nil, nil)
-		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
-		p.To.Offset = dzOff(w)
-
-		if w%16 != 0 {
-			n1.Op = gc.OINDREG
-			n1.Xoffset -= 16 - w%16
-			gins(x86.AMOVUPS, &vec_zero, &n1)
-		}
-
-		restx(&vec_zero, &old_x0)
-		restx(&n1, &oldn1)
-		return
-	}
-
-	// NOTE: Must use agen, not igen, so that optimizer sees address
-	// being taken. We are not writing on field boundaries.
-	var n1 gc.Node
-	gc.Agenr(nl, &n1, nil)
-	n1.Op = gc.OINDREG
-
-	clearfat_tail(&n1, w)
-
-	gc.Regfree(&n1)
-}
-
-func clearfat_tail(n1 *gc.Node, b int64) {
-	if b >= 16 && isPlan9 {
-		var z gc.Node
-		gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
-		q := b / 8
-		for ; q > 0; q-- {
-			n1.Type = z.Type
-			gins(x86.AMOVQ, &z, n1)
-			n1.Xoffset += 8
-			b -= 8
-		}
-		if b != 0 {
-			n1.Xoffset -= 8 - b
-			gins(x86.AMOVQ, &z, n1)
-		}
-		return
-	}
-	if b >= 16 {
-		var vec_zero gc.Node
-		gc.Regalloc(&vec_zero, gc.Types[gc.TFLOAT64], nil)
-		gins(x86.AXORPS, &vec_zero, &vec_zero)
-
-		for b >= 16 {
-			gins(x86.AMOVUPS, &vec_zero, n1)
-			n1.Xoffset += 16
-			b -= 16
-		}
-
-		// MOVUPS X0, off(base) is a few bytes shorter than MOV 0, off(base)
-		if b != 0 {
-			n1.Xoffset -= 16 - b
-			gins(x86.AMOVUPS, &vec_zero, n1)
-		}
-
-		gc.Regfree(&vec_zero)
-		return
-	}
-
-	// Write sequence of MOV 0, off(base) instead of using STOSQ.
-	// The hope is that although the code will be slightly longer,
-	// the MOVs will have no dependencies and pipeline better
-	// than the unrolled STOSQ loop.
-	var z gc.Node
-	gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
-	if b >= 8 {
-		n1.Type = z.Type
-		gins(x86.AMOVQ, &z, n1)
-		n1.Xoffset += 8
-		b -= 8
-
-		if b != 0 {
-			n1.Xoffset -= 8 - b
-			gins(x86.AMOVQ, &z, n1)
-		}
-		return
-	}
-
-	if b >= 4 {
-		gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
-		n1.Type = z.Type
-		gins(x86.AMOVL, &z, n1)
-		n1.Xoffset += 4
-		b -= 4
-
-		if b != 0 {
-			n1.Xoffset -= 4 - b
-			gins(x86.AMOVL, &z, n1)
-		}
-		return
-	}
-
-	if b >= 2 {
-		gc.Nodconst(&z, gc.Types[gc.TUINT16], 0)
-		n1.Type = z.Type
-		gins(x86.AMOVW, &z, n1)
-		n1.Xoffset += 2
-		b -= 2
-	}
-
-	gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
-	for b > 0 {
-		n1.Type = z.Type
-		gins(x86.AMOVB, &z, n1)
-		n1.Xoffset++
-		b--
-	}
-
-}
-
-// Called after regopt and peep have run.
-// Expand CHECKNIL pseudo-op into actual nil pointer check.
-func expandchecks(firstp *obj.Prog) {
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
-	for p := firstp; p != nil; p = p.Link {
-		if p.As != obj.ACHECKNIL {
-			continue
-		}
-		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
-			gc.Warnl(p.Lineno, "generated nil check")
-		}
-
-		// check is
-		//	CMP arg, $0
-		//	JNE 2(PC) (likely)
-		//	MOV AX, 0
-		p1 = gc.Ctxt.NewProg()
-
-		p2 = gc.Ctxt.NewProg()
-		gc.Clearp(p1)
-		gc.Clearp(p2)
-		p1.Link = p2
-		p2.Link = p.Link
-		p.Link = p1
-		p1.Lineno = p.Lineno
-		p2.Lineno = p.Lineno
-		p1.Pc = 9999
-		p2.Pc = 9999
-		p.As = cmpptr
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = 0
-		p1.As = x86.AJNE
-		p1.From.Type = obj.TYPE_CONST
-		p1.From.Offset = 1 // likely
-		p1.To.Type = obj.TYPE_BRANCH
-		p1.To.Val = p2.Link
-
-		// crash by write to memory address 0.
-		// if possible, since we know arg is 0, use 0(arg),
-		// which will be shorter to encode than plain 0.
-		p2.As = x86.AMOVL
-
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = x86.REG_AX
-		if regtyp(&p.From) {
-			p2.To.Type = obj.TYPE_MEM
-			p2.To.Reg = p.From.Reg
-		} else {
-			p2.To.Type = obj.TYPE_MEM
-			p2.To.Reg = x86.REG_NONE
-		}
-
-		p2.To.Offset = 0
-	}
-}
-
-// addr += index*width if possible.
-func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
-	switch width {
-	case 1, 2, 4, 8:
-		p1 := gins(x86.ALEAQ, index, addr)
-		p1.From.Type = obj.TYPE_MEM
-		p1.From.Scale = int16(width)
-		p1.From.Index = p1.From.Reg
-		p1.From.Reg = p1.To.Reg
-		return true
-	}
-	return false
-}
-
-// res = runtime.getg()
-func getg(res *gc.Node) {
-	var n1 gc.Node
-	gc.Regalloc(&n1, res.Type, res)
-	mov := optoas(gc.OAS, gc.Types[gc.Tptr])
-	p := gins(mov, nil, &n1)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = x86.REG_TLS
-	p = gins(mov, nil, &n1)
-	p.From = p.To
-	p.From.Type = obj.TYPE_MEM
-	p.From.Index = x86.REG_TLS
-	p.From.Scale = 1
-	gmove(&n1, res)
-	gc.Regfree(&n1)
-}
diff --git a/src/cmd/compile/internal/amd64/gsubr.go b/src/cmd/compile/internal/amd64/gsubr.go
index 09bead2..76fca89 100644
--- a/src/cmd/compile/internal/amd64/gsubr.go
+++ b/src/cmd/compile/internal/amd64/gsubr.go
@@ -31,7 +31,6 @@
 package amd64
 
 import (
-	"cmd/compile/internal/big"
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
 	"cmd/internal/obj/x86"
@@ -48,523 +47,6 @@
 	x86.REG_SP, // for stack
 }
 
-/*
- * generate
- *	as $c, reg
- */
-func gconreg(as obj.As, c int64, reg int) {
-	var nr gc.Node
-
-	switch as {
-	case x86.AADDL,
-		x86.AMOVL,
-		x86.ALEAL:
-		gc.Nodreg(&nr, gc.Types[gc.TINT32], reg)
-
-	default:
-		gc.Nodreg(&nr, gc.Types[gc.TINT64], reg)
-	}
-
-	ginscon(as, c, &nr)
-}
-
-/*
- * generate
- *	as $c, n
- */
-func ginscon(as obj.As, c int64, n2 *gc.Node) {
-	var n1 gc.Node
-
-	switch as {
-	case x86.AADDL,
-		x86.AMOVL,
-		x86.ALEAL:
-		gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
-
-	default:
-		gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
-	}
-
-	if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) {
-		// cannot have 64-bit immediate in ADD, etc.
-		// instead, MOV into register first.
-		var ntmp gc.Node
-		gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
-
-		gins(x86.AMOVQ, &n1, &ntmp)
-		gins(as, &ntmp, n2)
-		gc.Regfree(&ntmp)
-		return
-	}
-
-	gins(as, &n1, n2)
-}
-
-func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
-	if t.IsInteger() && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL {
-		// Reverse comparison to place constant last.
-		op = gc.Brrev(op)
-		n1, n2 = n2, n1
-	}
-	// General case.
-	var r1, r2, g1, g2 gc.Node
-
-	// A special case to make write barriers more efficient.
-	// Comparing the first field of a named struct can be done directly.
-	base := n1
-	if n1.Op == gc.ODOT && n1.Left.Type.IsStruct() && n1.Left.Type.Field(0).Sym == n1.Sym {
-		base = n1.Left
-	}
-
-	if base.Op == gc.ONAME && base.Class != gc.PAUTOHEAP || n1.Op == gc.OINDREG {
-		r1 = *n1
-	} else {
-		gc.Regalloc(&r1, t, n1)
-		gc.Regalloc(&g1, n1.Type, &r1)
-		gc.Cgen(n1, &g1)
-		gmove(&g1, &r1)
-	}
-	if n2.Op == gc.OLITERAL && t.IsInteger() && gc.Smallintconst(n2) {
-		r2 = *n2
-	} else {
-		gc.Regalloc(&r2, t, n2)
-		gc.Regalloc(&g2, n1.Type, &r2)
-		gc.Cgen(n2, &g2)
-		gmove(&g2, &r2)
-	}
-	gins(optoas(gc.OCMP, t), &r1, &r2)
-	if r1.Op == gc.OREGISTER {
-		gc.Regfree(&g1)
-		gc.Regfree(&r1)
-	}
-	if r2.Op == gc.OREGISTER {
-		gc.Regfree(&g2)
-		gc.Regfree(&r2)
-	}
-	return gc.Gbranch(optoas(op, t), nil, likely)
-}
-
-func ginsboolval(a obj.As, n *gc.Node) {
-	gins(jmptoset(a), nil, n)
-}
-
-// set up nodes representing 2^63
-var (
-	bigi         gc.Node
-	bigf         gc.Node
-	bignodes_did bool
-)
-
-func bignodes() {
-	if bignodes_did {
-		return
-	}
-	bignodes_did = true
-
-	var i big.Int
-	i.SetInt64(1)
-	i.Lsh(&i, 63)
-
-	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
-	bigi.SetBigInt(&i)
-
-	bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
-}
-
-/*
- * generate move:
- *	t = f
- * hard part is conversions.
- */
-func gmove(f *gc.Node, t *gc.Node) {
-	if gc.Debug['M'] != 0 {
-		fmt.Printf("gmove %L -> %L\n", f, t)
-	}
-
-	ft := gc.Simsimtype(f.Type)
-	tt := gc.Simsimtype(t.Type)
-	cvt := t.Type
-
-	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
-		gc.Complexmove(f, t)
-		return
-	}
-
-	// cannot have two memory operands
-	var a obj.As
-	if gc.Ismem(f) && gc.Ismem(t) {
-		goto hard
-	}
-
-	// convert constant to desired type
-	if f.Op == gc.OLITERAL {
-		var con gc.Node
-		f.Convconst(&con, t.Type)
-		f = &con
-		ft = tt // so big switch will choose a simple mov
-
-		// some constants can't move directly to memory.
-		if gc.Ismem(t) {
-			// float constants come from memory.
-			if gc.Isfloat[tt] {
-				goto hard
-			}
-
-			// 64-bit immediates are really 32-bit sign-extended
-			// unless moving into a register.
-			if gc.Isint[tt] {
-				if i := con.Int64(); int64(int32(i)) != i {
-					goto hard
-				}
-			}
-		}
-	}
-
-	// value -> value copy, only one memory operand.
-	// figure out the instruction to use.
-	// break out of switch for one-instruction gins.
-	// goto rdst for "destination must be register".
-	// goto hard for "convert to cvt type first".
-	// otherwise handle and return.
-
-	switch uint32(ft)<<16 | uint32(tt) {
-	default:
-		gc.Dump("f", f)
-		gc.Dump("t", t)
-		gc.Fatalf("gmove %L -> %L", f.Type, t.Type)
-
-		/*
-		 * integer copy and truncate
-		 */
-	case gc.TINT8<<16 | gc.TINT8, // same size
-		gc.TINT8<<16 | gc.TUINT8,
-		gc.TUINT8<<16 | gc.TINT8,
-		gc.TUINT8<<16 | gc.TUINT8,
-		gc.TINT16<<16 | gc.TINT8,
-		// truncate
-		gc.TUINT16<<16 | gc.TINT8,
-		gc.TINT32<<16 | gc.TINT8,
-		gc.TUINT32<<16 | gc.TINT8,
-		gc.TINT64<<16 | gc.TINT8,
-		gc.TUINT64<<16 | gc.TINT8,
-		gc.TINT16<<16 | gc.TUINT8,
-		gc.TUINT16<<16 | gc.TUINT8,
-		gc.TINT32<<16 | gc.TUINT8,
-		gc.TUINT32<<16 | gc.TUINT8,
-		gc.TINT64<<16 | gc.TUINT8,
-		gc.TUINT64<<16 | gc.TUINT8:
-		a = x86.AMOVB
-
-	case gc.TINT16<<16 | gc.TINT16, // same size
-		gc.TINT16<<16 | gc.TUINT16,
-		gc.TUINT16<<16 | gc.TINT16,
-		gc.TUINT16<<16 | gc.TUINT16,
-		gc.TINT32<<16 | gc.TINT16,
-		// truncate
-		gc.TUINT32<<16 | gc.TINT16,
-		gc.TINT64<<16 | gc.TINT16,
-		gc.TUINT64<<16 | gc.TINT16,
-		gc.TINT32<<16 | gc.TUINT16,
-		gc.TUINT32<<16 | gc.TUINT16,
-		gc.TINT64<<16 | gc.TUINT16,
-		gc.TUINT64<<16 | gc.TUINT16:
-		a = x86.AMOVW
-
-	case gc.TINT32<<16 | gc.TINT32, // same size
-		gc.TINT32<<16 | gc.TUINT32,
-		gc.TUINT32<<16 | gc.TINT32,
-		gc.TUINT32<<16 | gc.TUINT32:
-		a = x86.AMOVL
-
-	case gc.TINT64<<16 | gc.TINT32, // truncate
-		gc.TUINT64<<16 | gc.TINT32,
-		gc.TINT64<<16 | gc.TUINT32,
-		gc.TUINT64<<16 | gc.TUINT32:
-		a = x86.AMOVQL
-
-	case gc.TINT64<<16 | gc.TINT64, // same size
-		gc.TINT64<<16 | gc.TUINT64,
-		gc.TUINT64<<16 | gc.TINT64,
-		gc.TUINT64<<16 | gc.TUINT64:
-		a = x86.AMOVQ
-
-		/*
-		 * integer up-conversions
-		 */
-	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
-		gc.TINT8<<16 | gc.TUINT16:
-		a = x86.AMOVBWSX
-
-		goto rdst
-
-	case gc.TINT8<<16 | gc.TINT32,
-		gc.TINT8<<16 | gc.TUINT32:
-		a = x86.AMOVBLSX
-		goto rdst
-
-	case gc.TINT8<<16 | gc.TINT64,
-		gc.TINT8<<16 | gc.TUINT64:
-		a = x86.AMOVBQSX
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
-		gc.TUINT8<<16 | gc.TUINT16:
-		a = x86.AMOVBWZX
-
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TINT32,
-		gc.TUINT8<<16 | gc.TUINT32:
-		a = x86.AMOVBLZX
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TINT64,
-		gc.TUINT8<<16 | gc.TUINT64:
-		a = x86.AMOVBQZX
-		goto rdst
-
-	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
-		gc.TINT16<<16 | gc.TUINT32:
-		a = x86.AMOVWLSX
-
-		goto rdst
-
-	case gc.TINT16<<16 | gc.TINT64,
-		gc.TINT16<<16 | gc.TUINT64:
-		a = x86.AMOVWQSX
-		goto rdst
-
-	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
-		gc.TUINT16<<16 | gc.TUINT32:
-		a = x86.AMOVWLZX
-
-		goto rdst
-
-	case gc.TUINT16<<16 | gc.TINT64,
-		gc.TUINT16<<16 | gc.TUINT64:
-		a = x86.AMOVWQZX
-		goto rdst
-
-	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
-		gc.TINT32<<16 | gc.TUINT64:
-		a = x86.AMOVLQSX
-
-		goto rdst
-
-		// AMOVL into a register zeros the top of the register,
-	// so this is not always necessary, but if we rely on AMOVL
-	// the optimizer is almost certain to screw with us.
-	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
-		gc.TUINT32<<16 | gc.TUINT64:
-		a = x86.AMOVLQZX
-
-		goto rdst
-
-		/*
-		* float to integer
-		 */
-	case gc.TFLOAT32<<16 | gc.TINT32:
-		a = x86.ACVTTSS2SL
-
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TINT32:
-		a = x86.ACVTTSD2SL
-		goto rdst
-
-	case gc.TFLOAT32<<16 | gc.TINT64:
-		a = x86.ACVTTSS2SQ
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TINT64:
-		a = x86.ACVTTSD2SQ
-		goto rdst
-
-		// convert via int32.
-	case gc.TFLOAT32<<16 | gc.TINT16,
-		gc.TFLOAT32<<16 | gc.TINT8,
-		gc.TFLOAT32<<16 | gc.TUINT16,
-		gc.TFLOAT32<<16 | gc.TUINT8,
-		gc.TFLOAT64<<16 | gc.TINT16,
-		gc.TFLOAT64<<16 | gc.TINT8,
-		gc.TFLOAT64<<16 | gc.TUINT16,
-		gc.TFLOAT64<<16 | gc.TUINT8:
-		cvt = gc.Types[gc.TINT32]
-
-		goto hard
-
-		// convert via int64.
-	case gc.TFLOAT32<<16 | gc.TUINT32,
-		gc.TFLOAT64<<16 | gc.TUINT32:
-		cvt = gc.Types[gc.TINT64]
-
-		goto hard
-
-		// algorithm is:
-	//	if small enough, use native float64 -> int64 conversion.
-	//	otherwise, subtract 2^63, convert, and add it back.
-	case gc.TFLOAT32<<16 | gc.TUINT64,
-		gc.TFLOAT64<<16 | gc.TUINT64:
-		a := x86.ACVTTSS2SQ
-
-		if ft == gc.TFLOAT64 {
-			a = x86.ACVTTSD2SQ
-		}
-		bignodes()
-		var r1 gc.Node
-		gc.Regalloc(&r1, gc.Types[ft], nil)
-		var r2 gc.Node
-		gc.Regalloc(&r2, gc.Types[tt], t)
-		var r3 gc.Node
-		gc.Regalloc(&r3, gc.Types[ft], nil)
-		var r4 gc.Node
-		gc.Regalloc(&r4, gc.Types[tt], nil)
-		gins(optoas(gc.OAS, f.Type), f, &r1)
-		gins(optoas(gc.OCMP, f.Type), &bigf, &r1)
-		p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
-		gins(a, &r1, &r2)
-		p2 := gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-		gins(optoas(gc.OAS, f.Type), &bigf, &r3)
-		gins(optoas(gc.OSUB, f.Type), &r3, &r1)
-		gins(a, &r1, &r2)
-		gins(x86.AMOVQ, &bigi, &r4)
-		gins(x86.AXORQ, &r4, &r2)
-		gc.Patch(p2, gc.Pc)
-		gmove(&r2, t)
-		gc.Regfree(&r4)
-		gc.Regfree(&r3)
-		gc.Regfree(&r2)
-		gc.Regfree(&r1)
-		return
-
-		/*
-		 * integer to float
-		 */
-	case gc.TINT32<<16 | gc.TFLOAT32:
-		a = x86.ACVTSL2SS
-
-		goto rdst
-
-	case gc.TINT32<<16 | gc.TFLOAT64:
-		a = x86.ACVTSL2SD
-		goto rdst
-
-	case gc.TINT64<<16 | gc.TFLOAT32:
-		a = x86.ACVTSQ2SS
-		goto rdst
-
-	case gc.TINT64<<16 | gc.TFLOAT64:
-		a = x86.ACVTSQ2SD
-		goto rdst
-
-		// convert via int32
-	case gc.TINT16<<16 | gc.TFLOAT32,
-		gc.TINT16<<16 | gc.TFLOAT64,
-		gc.TINT8<<16 | gc.TFLOAT32,
-		gc.TINT8<<16 | gc.TFLOAT64,
-		gc.TUINT16<<16 | gc.TFLOAT32,
-		gc.TUINT16<<16 | gc.TFLOAT64,
-		gc.TUINT8<<16 | gc.TFLOAT32,
-		gc.TUINT8<<16 | gc.TFLOAT64:
-		cvt = gc.Types[gc.TINT32]
-
-		goto hard
-
-		// convert via int64.
-	case gc.TUINT32<<16 | gc.TFLOAT32,
-		gc.TUINT32<<16 | gc.TFLOAT64:
-		cvt = gc.Types[gc.TINT64]
-
-		goto hard
-
-		// algorithm is:
-	//	if small enough, use native int64 -> uint64 conversion.
-	//	otherwise, halve (rounding to odd?), convert, and double.
-	case gc.TUINT64<<16 | gc.TFLOAT32,
-		gc.TUINT64<<16 | gc.TFLOAT64:
-		a := x86.ACVTSQ2SS
-
-		if tt == gc.TFLOAT64 {
-			a = x86.ACVTSQ2SD
-		}
-		var zero gc.Node
-		gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0)
-		var one gc.Node
-		gc.Nodconst(&one, gc.Types[gc.TUINT64], 1)
-		var r1 gc.Node
-		gc.Regalloc(&r1, f.Type, f)
-		var r2 gc.Node
-		gc.Regalloc(&r2, t.Type, t)
-		var r3 gc.Node
-		gc.Regalloc(&r3, f.Type, nil)
-		var r4 gc.Node
-		gc.Regalloc(&r4, f.Type, nil)
-		gmove(f, &r1)
-		gins(x86.ACMPQ, &r1, &zero)
-		p1 := gc.Gbranch(x86.AJLT, nil, +1)
-		gins(a, &r1, &r2)
-		p2 := gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-		gmove(&r1, &r3)
-		gins(x86.ASHRQ, &one, &r3)
-		gmove(&r1, &r4)
-		gins(x86.AANDL, &one, &r4)
-		gins(x86.AORQ, &r4, &r3)
-		gins(a, &r3, &r2)
-		gins(optoas(gc.OADD, t.Type), &r2, &r2)
-		gc.Patch(p2, gc.Pc)
-		gmove(&r2, t)
-		gc.Regfree(&r4)
-		gc.Regfree(&r3)
-		gc.Regfree(&r2)
-		gc.Regfree(&r1)
-		return
-
-		/*
-		 * float to float
-		 */
-	case gc.TFLOAT32<<16 | gc.TFLOAT32:
-		a = x86.AMOVSS
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT64:
-		a = x86.AMOVSD
-
-	case gc.TFLOAT32<<16 | gc.TFLOAT64:
-		a = x86.ACVTSS2SD
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT32:
-		a = x86.ACVTSD2SS
-		goto rdst
-	}
-
-	gins(a, f, t)
-	return
-
-	// requires register destination
-rdst:
-	{
-		var r1 gc.Node
-		gc.Regalloc(&r1, t.Type, t)
-
-		gins(a, f, &r1)
-		gmove(&r1, t)
-		gc.Regfree(&r1)
-		return
-	}
-
-	// requires register intermediate
-hard:
-	var r1 gc.Node
-	gc.Regalloc(&r1, cvt, t)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-}
-
 func samaddr(f *gc.Node, t *gc.Node) bool {
 	if f.Op != t.Op {
 		return false
@@ -679,745 +161,3 @@
 	gc.Nodreg(&reg, gc.Types[gc.TINT], x86.REG_AX)
 	gins(x86.AXCHGL, &reg, &reg)
 }
-
-/*
- * return Axxx for Oxxx on type t.
- */
-func optoas(op gc.Op, t *gc.Type) obj.As {
-	if t == nil {
-		gc.Fatalf("optoas: t is nil")
-	}
-
-	// avoid constant conversions in switches below
-	const (
-		OMINUS_  = uint32(gc.OMINUS) << 16
-		OLSH_    = uint32(gc.OLSH) << 16
-		ORSH_    = uint32(gc.ORSH) << 16
-		OADD_    = uint32(gc.OADD) << 16
-		OSUB_    = uint32(gc.OSUB) << 16
-		OMUL_    = uint32(gc.OMUL) << 16
-		ODIV_    = uint32(gc.ODIV) << 16
-		OMOD_    = uint32(gc.OMOD) << 16
-		OOR_     = uint32(gc.OOR) << 16
-		OAND_    = uint32(gc.OAND) << 16
-		OXOR_    = uint32(gc.OXOR) << 16
-		OEQ_     = uint32(gc.OEQ) << 16
-		ONE_     = uint32(gc.ONE) << 16
-		OLT_     = uint32(gc.OLT) << 16
-		OLE_     = uint32(gc.OLE) << 16
-		OGE_     = uint32(gc.OGE) << 16
-		OGT_     = uint32(gc.OGT) << 16
-		OCMP_    = uint32(gc.OCMP) << 16
-		OPS_     = uint32(gc.OPS) << 16
-		OPC_     = uint32(gc.OPC) << 16
-		OAS_     = uint32(gc.OAS) << 16
-		OHMUL_   = uint32(gc.OHMUL) << 16
-		OSQRT_   = uint32(gc.OSQRT) << 16
-		OADDR_   = uint32(gc.OADDR) << 16
-		OINC_    = uint32(gc.OINC) << 16
-		ODEC_    = uint32(gc.ODEC) << 16
-		OLROT_   = uint32(gc.OLROT) << 16
-		ORROTC_  = uint32(gc.ORROTC) << 16
-		OEXTEND_ = uint32(gc.OEXTEND) << 16
-	)
-
-	a := obj.AXXX
-	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
-	default:
-		gc.Fatalf("optoas: no entry %v-%v", op, t)
-
-	case OADDR_ | gc.TPTR32:
-		a = x86.ALEAL
-
-	case OADDR_ | gc.TPTR64:
-		a = x86.ALEAQ
-
-	case OEQ_ | gc.TBOOL,
-		OEQ_ | gc.TINT8,
-		OEQ_ | gc.TUINT8,
-		OEQ_ | gc.TINT16,
-		OEQ_ | gc.TUINT16,
-		OEQ_ | gc.TINT32,
-		OEQ_ | gc.TUINT32,
-		OEQ_ | gc.TINT64,
-		OEQ_ | gc.TUINT64,
-		OEQ_ | gc.TPTR32,
-		OEQ_ | gc.TPTR64,
-		OEQ_ | gc.TFLOAT32,
-		OEQ_ | gc.TFLOAT64:
-		a = x86.AJEQ
-
-	case ONE_ | gc.TBOOL,
-		ONE_ | gc.TINT8,
-		ONE_ | gc.TUINT8,
-		ONE_ | gc.TINT16,
-		ONE_ | gc.TUINT16,
-		ONE_ | gc.TINT32,
-		ONE_ | gc.TUINT32,
-		ONE_ | gc.TINT64,
-		ONE_ | gc.TUINT64,
-		ONE_ | gc.TPTR32,
-		ONE_ | gc.TPTR64,
-		ONE_ | gc.TFLOAT32,
-		ONE_ | gc.TFLOAT64:
-		a = x86.AJNE
-
-	case OPS_ | gc.TBOOL,
-		OPS_ | gc.TINT8,
-		OPS_ | gc.TUINT8,
-		OPS_ | gc.TINT16,
-		OPS_ | gc.TUINT16,
-		OPS_ | gc.TINT32,
-		OPS_ | gc.TUINT32,
-		OPS_ | gc.TINT64,
-		OPS_ | gc.TUINT64,
-		OPS_ | gc.TPTR32,
-		OPS_ | gc.TPTR64,
-		OPS_ | gc.TFLOAT32,
-		OPS_ | gc.TFLOAT64:
-		a = x86.AJPS
-
-	case OPC_ | gc.TBOOL,
-		OPC_ | gc.TINT8,
-		OPC_ | gc.TUINT8,
-		OPC_ | gc.TINT16,
-		OPC_ | gc.TUINT16,
-		OPC_ | gc.TINT32,
-		OPC_ | gc.TUINT32,
-		OPC_ | gc.TINT64,
-		OPC_ | gc.TUINT64,
-		OPC_ | gc.TPTR32,
-		OPC_ | gc.TPTR64,
-		OPC_ | gc.TFLOAT32,
-		OPC_ | gc.TFLOAT64:
-		a = x86.AJPC
-
-	case OLT_ | gc.TINT8,
-		OLT_ | gc.TINT16,
-		OLT_ | gc.TINT32,
-		OLT_ | gc.TINT64:
-		a = x86.AJLT
-
-	case OLT_ | gc.TUINT8,
-		OLT_ | gc.TUINT16,
-		OLT_ | gc.TUINT32,
-		OLT_ | gc.TUINT64:
-		a = x86.AJCS
-
-	case OLE_ | gc.TINT8,
-		OLE_ | gc.TINT16,
-		OLE_ | gc.TINT32,
-		OLE_ | gc.TINT64:
-		a = x86.AJLE
-
-	case OLE_ | gc.TUINT8,
-		OLE_ | gc.TUINT16,
-		OLE_ | gc.TUINT32,
-		OLE_ | gc.TUINT64:
-		a = x86.AJLS
-
-	case OGT_ | gc.TINT8,
-		OGT_ | gc.TINT16,
-		OGT_ | gc.TINT32,
-		OGT_ | gc.TINT64:
-		a = x86.AJGT
-
-	case OGT_ | gc.TUINT8,
-		OGT_ | gc.TUINT16,
-		OGT_ | gc.TUINT32,
-		OGT_ | gc.TUINT64,
-		OLT_ | gc.TFLOAT32,
-		OLT_ | gc.TFLOAT64:
-		a = x86.AJHI
-
-	case OGE_ | gc.TINT8,
-		OGE_ | gc.TINT16,
-		OGE_ | gc.TINT32,
-		OGE_ | gc.TINT64:
-		a = x86.AJGE
-
-	case OGE_ | gc.TUINT8,
-		OGE_ | gc.TUINT16,
-		OGE_ | gc.TUINT32,
-		OGE_ | gc.TUINT64,
-		OLE_ | gc.TFLOAT32,
-		OLE_ | gc.TFLOAT64:
-		a = x86.AJCC
-
-	case OCMP_ | gc.TBOOL,
-		OCMP_ | gc.TINT8,
-		OCMP_ | gc.TUINT8:
-		a = x86.ACMPB
-
-	case OCMP_ | gc.TINT16,
-		OCMP_ | gc.TUINT16:
-		a = x86.ACMPW
-
-	case OCMP_ | gc.TINT32,
-		OCMP_ | gc.TUINT32,
-		OCMP_ | gc.TPTR32:
-		a = x86.ACMPL
-
-	case OCMP_ | gc.TINT64,
-		OCMP_ | gc.TUINT64,
-		OCMP_ | gc.TPTR64:
-		a = x86.ACMPQ
-
-	case OCMP_ | gc.TFLOAT32:
-		a = x86.AUCOMISS
-
-	case OCMP_ | gc.TFLOAT64:
-		a = x86.AUCOMISD
-
-	case OAS_ | gc.TBOOL,
-		OAS_ | gc.TINT8,
-		OAS_ | gc.TUINT8:
-		a = x86.AMOVB
-
-	case OAS_ | gc.TINT16,
-		OAS_ | gc.TUINT16:
-		a = x86.AMOVW
-
-	case OAS_ | gc.TINT32,
-		OAS_ | gc.TUINT32,
-		OAS_ | gc.TPTR32:
-		a = x86.AMOVL
-
-	case OAS_ | gc.TINT64,
-		OAS_ | gc.TUINT64,
-		OAS_ | gc.TPTR64:
-		a = x86.AMOVQ
-
-	case OAS_ | gc.TFLOAT32:
-		a = x86.AMOVSS
-
-	case OAS_ | gc.TFLOAT64:
-		a = x86.AMOVSD
-
-	case OADD_ | gc.TINT8,
-		OADD_ | gc.TUINT8:
-		a = x86.AADDB
-
-	case OADD_ | gc.TINT16,
-		OADD_ | gc.TUINT16:
-		a = x86.AADDW
-
-	case OADD_ | gc.TINT32,
-		OADD_ | gc.TUINT32,
-		OADD_ | gc.TPTR32:
-		a = x86.AADDL
-
-	case OADD_ | gc.TINT64,
-		OADD_ | gc.TUINT64,
-		OADD_ | gc.TPTR64:
-		a = x86.AADDQ
-
-	case OADD_ | gc.TFLOAT32:
-		a = x86.AADDSS
-
-	case OADD_ | gc.TFLOAT64:
-		a = x86.AADDSD
-
-	case OSUB_ | gc.TINT8,
-		OSUB_ | gc.TUINT8:
-		a = x86.ASUBB
-
-	case OSUB_ | gc.TINT16,
-		OSUB_ | gc.TUINT16:
-		a = x86.ASUBW
-
-	case OSUB_ | gc.TINT32,
-		OSUB_ | gc.TUINT32,
-		OSUB_ | gc.TPTR32:
-		a = x86.ASUBL
-
-	case OSUB_ | gc.TINT64,
-		OSUB_ | gc.TUINT64,
-		OSUB_ | gc.TPTR64:
-		a = x86.ASUBQ
-
-	case OSUB_ | gc.TFLOAT32:
-		a = x86.ASUBSS
-
-	case OSUB_ | gc.TFLOAT64:
-		a = x86.ASUBSD
-
-	case OINC_ | gc.TINT8,
-		OINC_ | gc.TUINT8:
-		a = x86.AINCB
-
-	case OINC_ | gc.TINT16,
-		OINC_ | gc.TUINT16:
-		a = x86.AINCW
-
-	case OINC_ | gc.TINT32,
-		OINC_ | gc.TUINT32,
-		OINC_ | gc.TPTR32:
-		a = x86.AINCL
-
-	case OINC_ | gc.TINT64,
-		OINC_ | gc.TUINT64,
-		OINC_ | gc.TPTR64:
-		a = x86.AINCQ
-
-	case ODEC_ | gc.TINT8,
-		ODEC_ | gc.TUINT8:
-		a = x86.ADECB
-
-	case ODEC_ | gc.TINT16,
-		ODEC_ | gc.TUINT16:
-		a = x86.ADECW
-
-	case ODEC_ | gc.TINT32,
-		ODEC_ | gc.TUINT32,
-		ODEC_ | gc.TPTR32:
-		a = x86.ADECL
-
-	case ODEC_ | gc.TINT64,
-		ODEC_ | gc.TUINT64,
-		ODEC_ | gc.TPTR64:
-		a = x86.ADECQ
-
-	case OMINUS_ | gc.TINT8,
-		OMINUS_ | gc.TUINT8:
-		a = x86.ANEGB
-
-	case OMINUS_ | gc.TINT16,
-		OMINUS_ | gc.TUINT16:
-		a = x86.ANEGW
-
-	case OMINUS_ | gc.TINT32,
-		OMINUS_ | gc.TUINT32,
-		OMINUS_ | gc.TPTR32:
-		a = x86.ANEGL
-
-	case OMINUS_ | gc.TINT64,
-		OMINUS_ | gc.TUINT64,
-		OMINUS_ | gc.TPTR64:
-		a = x86.ANEGQ
-
-	case OAND_ | gc.TBOOL,
-		OAND_ | gc.TINT8,
-		OAND_ | gc.TUINT8:
-		a = x86.AANDB
-
-	case OAND_ | gc.TINT16,
-		OAND_ | gc.TUINT16:
-		a = x86.AANDW
-
-	case OAND_ | gc.TINT32,
-		OAND_ | gc.TUINT32,
-		OAND_ | gc.TPTR32:
-		a = x86.AANDL
-
-	case OAND_ | gc.TINT64,
-		OAND_ | gc.TUINT64,
-		OAND_ | gc.TPTR64:
-		a = x86.AANDQ
-
-	case OOR_ | gc.TBOOL,
-		OOR_ | gc.TINT8,
-		OOR_ | gc.TUINT8:
-		a = x86.AORB
-
-	case OOR_ | gc.TINT16,
-		OOR_ | gc.TUINT16:
-		a = x86.AORW
-
-	case OOR_ | gc.TINT32,
-		OOR_ | gc.TUINT32,
-		OOR_ | gc.TPTR32:
-		a = x86.AORL
-
-	case OOR_ | gc.TINT64,
-		OOR_ | gc.TUINT64,
-		OOR_ | gc.TPTR64:
-		a = x86.AORQ
-
-	case OXOR_ | gc.TINT8,
-		OXOR_ | gc.TUINT8:
-		a = x86.AXORB
-
-	case OXOR_ | gc.TINT16,
-		OXOR_ | gc.TUINT16:
-		a = x86.AXORW
-
-	case OXOR_ | gc.TINT32,
-		OXOR_ | gc.TUINT32,
-		OXOR_ | gc.TPTR32:
-		a = x86.AXORL
-
-	case OXOR_ | gc.TINT64,
-		OXOR_ | gc.TUINT64,
-		OXOR_ | gc.TPTR64:
-		a = x86.AXORQ
-
-	case OLROT_ | gc.TINT8,
-		OLROT_ | gc.TUINT8:
-		a = x86.AROLB
-
-	case OLROT_ | gc.TINT16,
-		OLROT_ | gc.TUINT16:
-		a = x86.AROLW
-
-	case OLROT_ | gc.TINT32,
-		OLROT_ | gc.TUINT32,
-		OLROT_ | gc.TPTR32:
-		a = x86.AROLL
-
-	case OLROT_ | gc.TINT64,
-		OLROT_ | gc.TUINT64,
-		OLROT_ | gc.TPTR64:
-		a = x86.AROLQ
-
-	case OLSH_ | gc.TINT8,
-		OLSH_ | gc.TUINT8:
-		a = x86.ASHLB
-
-	case OLSH_ | gc.TINT16,
-		OLSH_ | gc.TUINT16:
-		a = x86.ASHLW
-
-	case OLSH_ | gc.TINT32,
-		OLSH_ | gc.TUINT32,
-		OLSH_ | gc.TPTR32:
-		a = x86.ASHLL
-
-	case OLSH_ | gc.TINT64,
-		OLSH_ | gc.TUINT64,
-		OLSH_ | gc.TPTR64:
-		a = x86.ASHLQ
-
-	case ORSH_ | gc.TUINT8:
-		a = x86.ASHRB
-
-	case ORSH_ | gc.TUINT16:
-		a = x86.ASHRW
-
-	case ORSH_ | gc.TUINT32,
-		ORSH_ | gc.TPTR32:
-		a = x86.ASHRL
-
-	case ORSH_ | gc.TUINT64,
-		ORSH_ | gc.TPTR64:
-		a = x86.ASHRQ
-
-	case ORSH_ | gc.TINT8:
-		a = x86.ASARB
-
-	case ORSH_ | gc.TINT16:
-		a = x86.ASARW
-
-	case ORSH_ | gc.TINT32:
-		a = x86.ASARL
-
-	case ORSH_ | gc.TINT64:
-		a = x86.ASARQ
-
-	case ORROTC_ | gc.TINT8,
-		ORROTC_ | gc.TUINT8:
-		a = x86.ARCRB
-
-	case ORROTC_ | gc.TINT16,
-		ORROTC_ | gc.TUINT16:
-		a = x86.ARCRW
-
-	case ORROTC_ | gc.TINT32,
-		ORROTC_ | gc.TUINT32:
-		a = x86.ARCRL
-
-	case ORROTC_ | gc.TINT64,
-		ORROTC_ | gc.TUINT64:
-		a = x86.ARCRQ
-
-	case OHMUL_ | gc.TINT8,
-		OMUL_ | gc.TINT8,
-		OMUL_ | gc.TUINT8:
-		a = x86.AIMULB
-
-	case OHMUL_ | gc.TINT16,
-		OMUL_ | gc.TINT16,
-		OMUL_ | gc.TUINT16:
-		a = x86.AIMULW
-
-	case OHMUL_ | gc.TINT32,
-		OMUL_ | gc.TINT32,
-		OMUL_ | gc.TUINT32,
-		OMUL_ | gc.TPTR32:
-		a = x86.AIMULL
-
-	case OHMUL_ | gc.TINT64,
-		OMUL_ | gc.TINT64,
-		OMUL_ | gc.TUINT64,
-		OMUL_ | gc.TPTR64:
-		a = x86.AIMULQ
-
-	case OHMUL_ | gc.TUINT8:
-		a = x86.AMULB
-
-	case OHMUL_ | gc.TUINT16:
-		a = x86.AMULW
-
-	case OHMUL_ | gc.TUINT32,
-		OHMUL_ | gc.TPTR32:
-		a = x86.AMULL
-
-	case OHMUL_ | gc.TUINT64,
-		OHMUL_ | gc.TPTR64:
-		a = x86.AMULQ
-
-	case OMUL_ | gc.TFLOAT32:
-		a = x86.AMULSS
-
-	case OMUL_ | gc.TFLOAT64:
-		a = x86.AMULSD
-
-	case ODIV_ | gc.TINT8,
-		OMOD_ | gc.TINT8:
-		a = x86.AIDIVB
-
-	case ODIV_ | gc.TUINT8,
-		OMOD_ | gc.TUINT8:
-		a = x86.ADIVB
-
-	case ODIV_ | gc.TINT16,
-		OMOD_ | gc.TINT16:
-		a = x86.AIDIVW
-
-	case ODIV_ | gc.TUINT16,
-		OMOD_ | gc.TUINT16:
-		a = x86.ADIVW
-
-	case ODIV_ | gc.TINT32,
-		OMOD_ | gc.TINT32:
-		a = x86.AIDIVL
-
-	case ODIV_ | gc.TUINT32,
-		ODIV_ | gc.TPTR32,
-		OMOD_ | gc.TUINT32,
-		OMOD_ | gc.TPTR32:
-		a = x86.ADIVL
-
-	case ODIV_ | gc.TINT64,
-		OMOD_ | gc.TINT64:
-		a = x86.AIDIVQ
-
-	case ODIV_ | gc.TUINT64,
-		ODIV_ | gc.TPTR64,
-		OMOD_ | gc.TUINT64,
-		OMOD_ | gc.TPTR64:
-		a = x86.ADIVQ
-
-	case OEXTEND_ | gc.TINT16:
-		a = x86.ACWD
-
-	case OEXTEND_ | gc.TINT32:
-		a = x86.ACDQ
-
-	case OEXTEND_ | gc.TINT64:
-		a = x86.ACQO
-
-	case ODIV_ | gc.TFLOAT32:
-		a = x86.ADIVSS
-
-	case ODIV_ | gc.TFLOAT64:
-		a = x86.ADIVSD
-
-	case OSQRT_ | gc.TFLOAT64:
-		a = x86.ASQRTSD
-	}
-
-	return a
-}
-
-// jmptoset returns ASETxx for AJxx.
-func jmptoset(jmp obj.As) obj.As {
-	switch jmp {
-	case x86.AJEQ:
-		return x86.ASETEQ
-	case x86.AJNE:
-		return x86.ASETNE
-	case x86.AJLT:
-		return x86.ASETLT
-	case x86.AJCS:
-		return x86.ASETCS
-	case x86.AJLE:
-		return x86.ASETLE
-	case x86.AJLS:
-		return x86.ASETLS
-	case x86.AJGT:
-		return x86.ASETGT
-	case x86.AJHI:
-		return x86.ASETHI
-	case x86.AJGE:
-		return x86.ASETGE
-	case x86.AJCC:
-		return x86.ASETCC
-	case x86.AJMI:
-		return x86.ASETMI
-	case x86.AJOC:
-		return x86.ASETOC
-	case x86.AJOS:
-		return x86.ASETOS
-	case x86.AJPC:
-		return x86.ASETPC
-	case x86.AJPL:
-		return x86.ASETPL
-	case x86.AJPS:
-		return x86.ASETPS
-	}
-	gc.Fatalf("jmptoset: no entry for %v", jmp)
-	panic("unreachable")
-}
-
-const (
-	ODynam   = 1 << 0
-	OAddable = 1 << 1
-)
-
-var clean [20]gc.Node
-
-var cleani int = 0
-
-func sudoclean() {
-	if clean[cleani-1].Op != gc.OEMPTY {
-		gc.Regfree(&clean[cleani-1])
-	}
-	if clean[cleani-2].Op != gc.OEMPTY {
-		gc.Regfree(&clean[cleani-2])
-	}
-	cleani -= 2
-}
-
-/*
- * generate code to compute address of n,
- * a reference to a (perhaps nested) field inside
- * an array or struct.
- * return 0 on failure, 1 on success.
- * on success, leaves usable address in a.
- *
- * caller is responsible for calling sudoclean
- * after successful sudoaddable,
- * to release the register used for a.
- */
-func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
-	if n.Type == nil {
-		return false
-	}
-
-	*a = obj.Addr{}
-
-	switch n.Op {
-	case gc.OLITERAL:
-		if !gc.Isconst(n, gc.CTINT) {
-			break
-		}
-		v := n.Int64()
-		if v >= 32000 || v <= -32000 {
-			break
-		}
-		switch as {
-		default:
-			return false
-
-		case x86.AADDB,
-			x86.AADDW,
-			x86.AADDL,
-			x86.AADDQ,
-			x86.ASUBB,
-			x86.ASUBW,
-			x86.ASUBL,
-			x86.ASUBQ,
-			x86.AANDB,
-			x86.AANDW,
-			x86.AANDL,
-			x86.AANDQ,
-			x86.AORB,
-			x86.AORW,
-			x86.AORL,
-			x86.AORQ,
-			x86.AXORB,
-			x86.AXORW,
-			x86.AXORL,
-			x86.AXORQ,
-			x86.AINCB,
-			x86.AINCW,
-			x86.AINCL,
-			x86.AINCQ,
-			x86.ADECB,
-			x86.ADECW,
-			x86.ADECL,
-			x86.ADECQ,
-			x86.AMOVB,
-			x86.AMOVW,
-			x86.AMOVL,
-			x86.AMOVQ:
-			break
-		}
-
-		cleani += 2
-		reg := &clean[cleani-1]
-		reg1 := &clean[cleani-2]
-		reg.Op = gc.OEMPTY
-		reg1.Op = gc.OEMPTY
-		gc.Naddr(a, n)
-		return true
-
-	case gc.ODOT,
-		gc.ODOTPTR:
-		cleani += 2
-		reg := &clean[cleani-1]
-		reg1 := &clean[cleani-2]
-		reg.Op = gc.OEMPTY
-		reg1.Op = gc.OEMPTY
-		var nn *gc.Node
-		var oary [10]int64
-		o := gc.Dotoffset(n, oary[:], &nn)
-		if nn == nil {
-			sudoclean()
-			return false
-		}
-
-		if nn.Addable && o == 1 && oary[0] >= 0 {
-			// directly addressable set of DOTs
-			n1 := *nn
-
-			n1.Type = n.Type
-			n1.Xoffset += oary[0]
-			gc.Naddr(a, &n1)
-			return true
-		}
-
-		gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
-		n1 := *reg
-		n1.Op = gc.OINDREG
-		if oary[0] >= 0 {
-			gc.Agen(nn, reg)
-			n1.Xoffset = oary[0]
-		} else {
-			gc.Cgen(nn, reg)
-			gc.Cgen_checknil(reg)
-			n1.Xoffset = -(oary[0] + 1)
-		}
-
-		for i := 1; i < o; i++ {
-			if oary[i] >= 0 {
-				gc.Fatalf("can't happen")
-			}
-			gins(movptr, &n1, reg)
-			gc.Cgen_checknil(reg)
-			n1.Xoffset = -(oary[i] + 1)
-		}
-
-		a.Type = obj.TYPE_NONE
-		a.Index = x86.REG_NONE
-		gc.Fixlargeoffset(&n1)
-		gc.Naddr(a, &n1)
-		return true
-
-	case gc.OINDEX:
-		return false
-	}
-
-	return false
-}
diff --git a/src/cmd/compile/internal/amd64/peep.go b/src/cmd/compile/internal/amd64/peep.go
deleted file mode 100644
index d74f670..0000000
--- a/src/cmd/compile/internal/amd64/peep.go
+++ /dev/null
@@ -1,1025 +0,0 @@
-// Derived from Inferno utils/6c/peep.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/peep.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package amd64
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/x86"
-	"fmt"
-)
-
-var gactive uint32
-
-const (
-	exregoffset = x86.REG_R15
-)
-
-// do we need the carry bit
-func needc(p *obj.Prog) bool {
-	for p != nil {
-		flags := progcarryflags(p)
-		if flags&gc.UseCarry != 0 {
-			return true
-		}
-		if flags&(gc.SetCarry|gc.KillCarry) != 0 {
-			return false
-		}
-		p = p.Link
-	}
-
-	return false
-}
-
-func rnops(r *gc.Flow) *gc.Flow {
-	if r != nil {
-		var p *obj.Prog
-		var r1 *gc.Flow
-		for {
-			p = r.Prog
-			if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
-				break
-			}
-			r1 = gc.Uniqs(r)
-			if r1 == nil {
-				break
-			}
-			r = r1
-		}
-	}
-
-	return r
-}
-
-func peep(firstp *obj.Prog) {
-	g := gc.Flowstart(firstp, nil)
-	if g == nil {
-		return
-	}
-	gactive = 0
-
-	// byte, word arithmetic elimination.
-	elimshortmov(g)
-
-	// constant propagation
-	// find MOV $con,R followed by
-	// another MOV $con,R without
-	// setting R in the interim
-	var p *obj.Prog
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		case x86.ALEAL,
-			x86.ALEAQ:
-			if regtyp(&p.To) {
-				if p.From.Sym != nil {
-					if p.From.Index == x86.REG_NONE {
-						conprop(r)
-					}
-				}
-			}
-
-		case x86.AMOVB,
-			x86.AMOVW,
-			x86.AMOVL,
-			x86.AMOVQ,
-			x86.AMOVSS,
-			x86.AMOVSD:
-			if regtyp(&p.To) {
-				if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
-					conprop(r)
-				}
-			}
-		}
-	}
-
-	var r *gc.Flow
-	var r1 *gc.Flow
-	var p1 *obj.Prog
-	var t int
-loop1:
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		gc.Dumpit("loop1", g.Start, 0)
-	}
-
-	t = 0
-	for r = g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		case x86.AMOVL,
-			x86.AMOVQ,
-			x86.AMOVSS,
-			x86.AMOVSD:
-			if regtyp(&p.To) {
-				if regtyp(&p.From) {
-					if copyprop(g, r) {
-						excise(r)
-						t++
-					} else if subprop(r) && copyprop(g, r) {
-						excise(r)
-						t++
-					}
-				}
-			}
-
-		case x86.AMOVBLZX,
-			x86.AMOVWLZX,
-			x86.AMOVBLSX,
-			x86.AMOVWLSX:
-			if regtyp(&p.To) {
-				r1 = rnops(gc.Uniqs(r))
-				if r1 != nil {
-					p1 = r1.Prog
-					if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
-						p1.As = x86.AMOVL
-						t++
-					}
-				}
-			}
-
-		case x86.AMOVBQSX,
-			x86.AMOVBQZX,
-			x86.AMOVWQSX,
-			x86.AMOVWQZX,
-			x86.AMOVLQSX,
-			x86.AMOVLQZX,
-			x86.AMOVQL:
-			if regtyp(&p.To) {
-				r1 = rnops(gc.Uniqs(r))
-				if r1 != nil {
-					p1 = r1.Prog
-					if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
-						p1.As = x86.AMOVQ
-						t++
-					}
-				}
-			}
-
-		case x86.AADDL,
-			x86.AADDQ,
-			x86.AADDW:
-			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
-				break
-			}
-			if p.From.Offset == -1 {
-				if p.As == x86.AADDQ {
-					p.As = x86.ADECQ
-				} else if p.As == x86.AADDL {
-					p.As = x86.ADECL
-				} else {
-					p.As = x86.ADECW
-				}
-				p.From = obj.Addr{}
-				break
-			}
-
-			if p.From.Offset == 1 {
-				if p.As == x86.AADDQ {
-					p.As = x86.AINCQ
-				} else if p.As == x86.AADDL {
-					p.As = x86.AINCL
-				} else {
-					p.As = x86.AINCW
-				}
-				p.From = obj.Addr{}
-				break
-			}
-
-		case x86.ASUBL,
-			x86.ASUBQ,
-			x86.ASUBW:
-			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
-				break
-			}
-			if p.From.Offset == -1 {
-				if p.As == x86.ASUBQ {
-					p.As = x86.AINCQ
-				} else if p.As == x86.ASUBL {
-					p.As = x86.AINCL
-				} else {
-					p.As = x86.AINCW
-				}
-				p.From = obj.Addr{}
-				break
-			}
-
-			if p.From.Offset == 1 {
-				if p.As == x86.ASUBQ {
-					p.As = x86.ADECQ
-				} else if p.As == x86.ASUBL {
-					p.As = x86.ADECL
-				} else {
-					p.As = x86.ADECW
-				}
-				p.From = obj.Addr{}
-				break
-			}
-		}
-	}
-
-	if t != 0 {
-		goto loop1
-	}
-
-	// MOVLQZX removal.
-	// The MOVLQZX exists to avoid being confused for a
-	// MOVL that is just copying 32-bit data around during
-	// copyprop. Now that copyprop is done, remov MOVLQZX R1, R2
-	// if it is dominated by an earlier ADDL/MOVL/etc into R1 that
-	// will have already cleared the high bits.
-	//
-	// MOVSD removal.
-	// We never use packed registers, so a MOVSD between registers
-	// can be replaced by MOVAPD, which moves the pair of float64s
-	// instead of just the lower one. We only use the lower one, but
-	// the processor can do better if we do moves using both.
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		if p.As == x86.AMOVLQZX {
-			if regtyp(&p.From) {
-				if p.From.Type == p.To.Type && p.From.Reg == p.To.Reg {
-					if prevl(r, p.From.Reg) {
-						excise(r)
-					}
-				}
-			}
-		}
-
-		if p.As == x86.AMOVSD {
-			if regtyp(&p.From) {
-				if regtyp(&p.To) {
-					p.As = x86.AMOVAPD
-				}
-			}
-		}
-	}
-
-	// load pipelining
-	// push any load from memory as early as possible
-	// to give it time to complete before use.
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		case x86.AMOVB,
-			x86.AMOVW,
-			x86.AMOVL,
-			x86.AMOVQ,
-			x86.AMOVLQZX:
-			if regtyp(&p.To) && !regconsttyp(&p.From) {
-				pushback(r)
-			}
-		}
-	}
-
-	gc.Flowend(g)
-}
-
-func pushback(r0 *gc.Flow) {
-	var r *gc.Flow
-	var p *obj.Prog
-
-	var b *gc.Flow
-	p0 := r0.Prog
-	for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
-		p = r.Prog
-		if p.As != obj.ANOP {
-			if !regconsttyp(&p.From) || !regtyp(&p.To) {
-				break
-			}
-			if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 {
-				break
-			}
-		}
-
-		if p.As == obj.ACALL {
-			break
-		}
-		b = r
-	}
-
-	if b == nil {
-		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-			fmt.Printf("no pushback: %v\n", r0.Prog)
-			if r != nil {
-				fmt.Printf("\t%v [%v]\n", r.Prog, gc.Uniqs(r) != nil)
-			}
-		}
-
-		return
-	}
-
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("pushback\n")
-		for r := b; ; r = r.Link {
-			fmt.Printf("\t%v\n", r.Prog)
-			if r == r0 {
-				break
-			}
-		}
-	}
-
-	t := *r0.Prog
-	for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
-		p0 = r.Link.Prog
-		p = r.Prog
-		p0.As = p.As
-		p0.Lineno = p.Lineno
-		p0.From = p.From
-		p0.To = p.To
-
-		if r == b {
-			break
-		}
-	}
-
-	p0 = r.Prog
-	p0.As = t.As
-	p0.Lineno = t.Lineno
-	p0.From = t.From
-	p0.To = t.To
-
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("\tafter\n")
-		for r := b; ; r = r.Link {
-			fmt.Printf("\t%v\n", r.Prog)
-			if r == r0 {
-				break
-			}
-		}
-	}
-}
-
-func excise(r *gc.Flow) {
-	p := r.Prog
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("%v ===delete===\n", p)
-	}
-
-	obj.Nopout(p)
-
-	gc.Ostats.Ndelmov++
-}
-
-func regtyp(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_R15 || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X15)
-}
-
-// movb elimination.
-// movb is simulated by the linker
-// when a register other than ax, bx, cx, dx
-// is used, so rewrite to other instructions
-// when possible.  a movb into a register
-// can smash the entire 32-bit register without
-// causing any trouble.
-//
-// TODO: Using the Q forms here instead of the L forms
-// seems unnecessary, and it makes the instructions longer.
-func elimshortmov(g *gc.Graph) {
-	var p *obj.Prog
-
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		if regtyp(&p.To) {
-			switch p.As {
-			case x86.AINCB,
-				x86.AINCW:
-				p.As = x86.AINCQ
-
-			case x86.ADECB,
-				x86.ADECW:
-				p.As = x86.ADECQ
-
-			case x86.ANEGB,
-				x86.ANEGW:
-				p.As = x86.ANEGQ
-
-			case x86.ANOTB,
-				x86.ANOTW:
-				p.As = x86.ANOTQ
-			}
-
-			if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
-				// move or arithmetic into partial register.
-				// from another register or constant can be movl.
-				// we don't switch to 64-bit arithmetic if it can
-				// change how the carry bit is set (and the carry bit is needed).
-				switch p.As {
-				case x86.AMOVB,
-					x86.AMOVW:
-					p.As = x86.AMOVQ
-
-				case x86.AADDB,
-					x86.AADDW:
-					if !needc(p.Link) {
-						p.As = x86.AADDQ
-					}
-
-				case x86.ASUBB,
-					x86.ASUBW:
-					if !needc(p.Link) {
-						p.As = x86.ASUBQ
-					}
-
-				case x86.AMULB,
-					x86.AMULW:
-					p.As = x86.AMULQ
-
-				case x86.AIMULB,
-					x86.AIMULW:
-					p.As = x86.AIMULQ
-
-				case x86.AANDB,
-					x86.AANDW:
-					p.As = x86.AANDQ
-
-				case x86.AORB,
-					x86.AORW:
-					p.As = x86.AORQ
-
-				case x86.AXORB,
-					x86.AXORW:
-					p.As = x86.AXORQ
-
-				case x86.ASHLB,
-					x86.ASHLW:
-					p.As = x86.ASHLQ
-				}
-			} else if p.From.Type != obj.TYPE_REG {
-				// explicit zero extension, but don't
-				// do that if source is a byte register
-				// (only AH can occur and it's forbidden).
-				switch p.As {
-				case x86.AMOVB:
-					p.As = x86.AMOVBQZX
-
-				case x86.AMOVW:
-					p.As = x86.AMOVWQZX
-				}
-			}
-		}
-	}
-}
-
-// is 'a' a register or constant?
-func regconsttyp(a *obj.Addr) bool {
-	if regtyp(a) {
-		return true
-	}
-	switch a.Type {
-	case obj.TYPE_CONST,
-		obj.TYPE_FCONST,
-		obj.TYPE_SCONST,
-		obj.TYPE_ADDR: // TODO(rsc): Not all TYPE_ADDRs are constants.
-		return true
-	}
-
-	return false
-}
-
-// is reg guaranteed to be truncated by a previous L instruction?
-func prevl(r0 *gc.Flow, reg int16) bool {
-	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
-		p := r.Prog
-		if p.To.Type == obj.TYPE_REG && p.To.Reg == reg {
-			flags := progflags(p)
-			if flags&gc.RightWrite != 0 {
-				if flags&gc.SizeL != 0 {
-					return true
-				}
-				return false
-			}
-		}
-	}
-	return false
-}
-
-/*
- * the idea is to substitute
- * one register for another
- * from one MOV to another
- *	MOV	a, R0
- *	ADD	b, R0	/ no use of R1
- *	MOV	R0, R1
- * would be converted to
- *	MOV	a, R1
- *	ADD	b, R1
- *	MOV	R1, R0
- * hopefully, then the former or latter MOV
- * will be eliminated by copy propagation.
- */
-func subprop(r0 *gc.Flow) bool {
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("subprop %v\n", r0.Prog)
-	}
-	p := r0.Prog
-	v1 := &p.From
-	if !regtyp(v1) {
-		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-			fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
-		}
-		return false
-	}
-
-	v2 := &p.To
-	if !regtyp(v2) {
-		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-			fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
-		}
-		return false
-	}
-
-	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
-		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-			fmt.Printf("\t? %v\n", r.Prog)
-		}
-		if gc.Uniqs(r) == nil {
-			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-				fmt.Printf("\tno unique successor\n")
-			}
-			break
-		}
-
-		p = r.Prog
-		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
-			continue
-		}
-		if p.Info.Flags&gc.Call != 0 {
-			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-				fmt.Printf("\tfound %v; return 0\n", p)
-			}
-			return false
-		}
-
-		if p.Info.Reguse|p.Info.Regset != 0 {
-			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-				fmt.Printf("\tfound %v; return 0\n", p)
-			}
-			return false
-		}
-
-		if (p.Info.Flags&gc.Move != 0) && (p.Info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
-			copysub(&p.To, v1, v2, true)
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
-				if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
-					fmt.Printf(" excise")
-				}
-				fmt.Printf("\n")
-			}
-
-			for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
-				p = r.Prog
-				copysub(&p.From, v1, v2, true)
-				copysub(&p.To, v1, v2, true)
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("%v\n", r.Prog)
-				}
-			}
-
-			v1.Reg, v2.Reg = v2.Reg, v1.Reg
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("%v last\n", r.Prog)
-			}
-			return true
-		}
-
-		if copyau(&p.From, v2) || copyau(&p.To, v2) {
-			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-				fmt.Printf("\tcopyau %v failed\n", gc.Ctxt.Dconv(v2))
-			}
-			break
-		}
-
-		if copysub(&p.From, v1, v2, false) || copysub(&p.To, v1, v2, false) {
-			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-				fmt.Printf("\tcopysub failed\n")
-			}
-			break
-		}
-	}
-
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("\tran off end; return 0\n")
-	}
-	return false
-}
-
-/*
- * The idea is to remove redundant copies.
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	use v2	return fail
- *	-----------------
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	set v2	return success
- */
-func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("copyprop %v\n", r0.Prog)
-	}
-	p := r0.Prog
-	v1 := &p.From
-	v2 := &p.To
-	if copyas(v1, v2) {
-		return true
-	}
-	gactive++
-	return copy1(v1, v2, r0.S1, false)
-}
-
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f bool) bool {
-	if uint32(r.Active) == gactive {
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("act set; return 1\n")
-		}
-		return true
-	}
-
-	r.Active = int32(gactive)
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("copy %v->%v f=%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
-	}
-	for ; r != nil; r = r.S1 {
-		p := r.Prog
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("%v", p)
-		}
-		if !f && gc.Uniqp(r) == nil {
-			f = true
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; merge; f=%v", f)
-			}
-		}
-
-		switch t := copyu(p, v2, nil); t {
-		case 2: /* rar, can't split */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
-			}
-			return false
-
-		case 3: /* set */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
-			}
-			return true
-
-		case 1, /* used, substitute */
-			4: /* use and set */
-			if f {
-				if gc.Debug['P'] == 0 {
-					return false
-				}
-				if t == 4 {
-					fmt.Printf("; %v used+set and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				} else {
-					fmt.Printf("; %v used and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				}
-				return false
-			}
-
-			if copyu(p, v2, v1) != 0 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; sub fail; return 0\n")
-				}
-				return false
-			}
-
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; sub %v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
-			}
-			if t == 4 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
-				}
-				return true
-			}
-		}
-
-		if !f {
-			t := copyu(p, v1, nil)
-			if t == 2 || t == 3 || t == 4 {
-				f = true
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %v set and !f; f=%v", gc.Ctxt.Dconv(v1), f)
-				}
-			}
-		}
-
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("\n")
-		}
-		if r.S2 != nil {
-			if !copy1(v1, v2, r.S2, f) {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-/*
- * return
- * 1 if v only used (and substitute),
- * 2 if read-alter-rewrite
- * 3 if set
- * 4 if set and used
- * 0 otherwise (not touched)
- */
-func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
-	switch p.As {
-	case obj.AJMP:
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case obj.ARET:
-		if s != nil {
-			return 1
-		}
-		return 3
-
-	case obj.ACALL:
-		if x86.REGEXT != 0 /*TypeKind(100016)*/ && v.Type == obj.TYPE_REG && v.Reg <= x86.REGEXT && v.Reg > exregoffset {
-			return 2
-		}
-		if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
-			return 2
-		}
-		if v.Type == p.From.Type && v.Reg == p.From.Reg {
-			return 2
-		}
-
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.To, v) {
-			return 4
-		}
-		return 3
-
-	case obj.ATEXT:
-		if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
-			return 3
-		}
-		return 0
-	}
-
-	if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
-		return 0
-	}
-
-	if (p.Info.Reguse|p.Info.Regset)&RtoB(int(v.Reg)) != 0 {
-		return 2
-	}
-
-	if (p.Info.Reguse|p.Info.Regset)&FtoB(int(v.Reg)) != 0 {
-		return 2
-	}
-
-	if p.Info.Flags&gc.LeftAddr != 0 {
-		if copyas(&p.From, v) {
-			return 2
-		}
-	}
-
-	if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
-		if copyas(&p.To, v) {
-			return 2
-		}
-	}
-
-	if p.Info.Flags&gc.RightWrite != 0 {
-		if copyas(&p.To, v) {
-			if s != nil {
-				if copysub(&p.From, v, s, true) {
-					return 1
-				}
-				return 0
-			}
-			if copyau(&p.From, v) {
-				return 4
-			}
-			return 3
-		}
-	}
-
-	if p.Info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau(&p.To, v) {
-			return 1
-		}
-	}
-	return 0
-}
-
-/*
- * direct reference,
- * could be set/use depending on
- * semantics
- */
-func copyas(a *obj.Addr, v *obj.Addr) bool {
-	if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_R15B {
-		gc.Fatalf("use of byte register")
-	}
-	if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_R15B {
-		gc.Fatalf("use of byte register")
-	}
-
-	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
-		return false
-	}
-	if regtyp(v) {
-		return true
-	}
-	if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
-		if v.Offset == a.Offset {
-			return true
-		}
-	}
-	return false
-}
-
-func sameaddr(a *obj.Addr, v *obj.Addr) bool {
-	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
-		return false
-	}
-	if regtyp(v) {
-		return true
-	}
-	if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
-		if v.Offset == a.Offset {
-			return true
-		}
-	}
-	return false
-}
-
-/*
- * either direct or indirect
- */
-func copyau(a *obj.Addr, v *obj.Addr) bool {
-	if copyas(a, v) {
-		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-			fmt.Printf("\tcopyau: copyas returned 1\n")
-		}
-		return true
-	}
-
-	if regtyp(v) {
-		if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
-			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-				fmt.Printf("\tcopyau: found indir use - return 1\n")
-			}
-			return true
-		}
-
-		if a.Index == v.Reg {
-			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-				fmt.Printf("\tcopyau: found index use - return 1\n")
-			}
-			return true
-		}
-	}
-	return false
-}
-
-// copysub substitute s for v in a.
-// copysub returns true on failure to substitute. TODO(dfc) reverse this logic, copysub should return false on failure
-func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool {
-	if copyas(a, v) {
-		if s.Reg >= x86.REG_AX && s.Reg <= x86.REG_R15 || s.Reg >= x86.REG_X0 && s.Reg <= x86.REG_X0+15 {
-			if f {
-				a.Reg = s.Reg
-			}
-		}
-		return false
-	}
-
-	if regtyp(v) {
-		if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
-			if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
-				return true /* can't use BP-base with index */
-			}
-			if f {
-				a.Reg = s.Reg
-			}
-		}
-		if a.Index == v.Reg {
-			if f {
-				a.Index = s.Reg
-			}
-		}
-	}
-	return false
-}
-
-func conprop(r0 *gc.Flow) {
-	p0 := r0.Prog
-	v0 := &p0.To
-	r := r0
-
-loop:
-	r = gc.Uniqs(r)
-	if r == nil || r == r0 {
-		return
-	}
-	if gc.Uniqp(r) == nil {
-		return
-	}
-
-	p := r.Prog
-	t := copyu(p, v0, nil)
-	switch t {
-	case 0, // miss
-		1: // use
-		goto loop
-
-	case 2, // rar
-		4: // use and set
-		break
-
-	case 3: // set
-		if p.As == p0.As {
-			if p.From.Type == p0.From.Type {
-				if p.From.Reg == p0.From.Reg {
-					if p.From.Node == p0.From.Node {
-						if p.From.Offset == p0.From.Offset {
-							if p.From.Scale == p0.From.Scale {
-								if p.From.Type == obj.TYPE_FCONST && p.From.Val.(float64) == p0.From.Val.(float64) {
-									if p.From.Index == p0.From.Index {
-										excise(r)
-										goto loop
-									}
-								}
-							}
-						}
-					}
-				}
-			}
-		}
-	}
-}
-
-func smallindir(a *obj.Addr, reg *obj.Addr) bool {
-	return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096
-}
-
-func stackaddr(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP
-}
diff --git a/src/cmd/compile/internal/amd64/reg.go b/src/cmd/compile/internal/amd64/reg.go
index 361b1e2..bff70a2 100644
--- a/src/cmd/compile/internal/amd64/reg.go
+++ b/src/cmd/compile/internal/amd64/reg.go
@@ -30,72 +30,7 @@
 
 package amd64
 
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj/x86"
-)
-
-const (
-	NREGVAR = 32
-)
-
-var regname = []string{
-	".AX",
-	".CX",
-	".DX",
-	".BX",
-	".SP",
-	".BP",
-	".SI",
-	".DI",
-	".R8",
-	".R9",
-	".R10",
-	".R11",
-	".R12",
-	".R13",
-	".R14",
-	".R15",
-	".X0",
-	".X1",
-	".X2",
-	".X3",
-	".X4",
-	".X5",
-	".X6",
-	".X7",
-	".X8",
-	".X9",
-	".X10",
-	".X11",
-	".X12",
-	".X13",
-	".X14",
-	".X15",
-}
-
-func regnames(n *int) []string {
-	*n = NREGVAR
-	return regname
-}
-
-func excludedregs() uint64 {
-	return RtoB(x86.REG_SP)
-}
-
-func doregbits(r int) uint64 {
-	b := uint64(0)
-	if r >= x86.REG_AX && r <= x86.REG_R15 {
-		b |= RtoB(r)
-	} else if r >= x86.REG_AL && r <= x86.REG_R15B {
-		b |= RtoB(r - x86.REG_AL + x86.REG_AX)
-	} else if r >= x86.REG_AH && r <= x86.REG_BH {
-		b |= RtoB(r - x86.REG_AH + x86.REG_AX)
-	} else if r >= x86.REG_X0 && r <= x86.REG_X0+15 {
-		b |= FtoB(r)
-	}
-	return b
-}
+import "cmd/internal/obj/x86"
 
 // For ProgInfo.
 const (
@@ -115,38 +50,3 @@
 	}
 	return 1 << uint(r-x86.REG_AX)
 }
-
-func BtoR(b uint64) int {
-	b &= 0xffff
-	if gc.Nacl {
-		b &^= (1<<(x86.REG_BP-x86.REG_AX) | 1<<(x86.REG_R15-x86.REG_AX))
-	} else if gc.Ctxt.Framepointer_enabled {
-		// BP is part of the calling convention if framepointer_enabled.
-		b &^= (1 << (x86.REG_BP - x86.REG_AX))
-	}
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + x86.REG_AX
-}
-
-/*
- *	bit	reg
- *	16	X0
- *	...
- *	31	X15
- */
-func FtoB(f int) uint64 {
-	if f < x86.REG_X0 || f > x86.REG_X15 {
-		return 0
-	}
-	return 1 << uint(f-x86.REG_X0+16)
-}
-
-func BtoF(b uint64) int {
-	b &= 0xFFFF0000
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) - 16 + x86.REG_X0
-}
diff --git a/src/cmd/compile/internal/arm/cgen.go b/src/cmd/compile/internal/arm/cgen.go
deleted file mode 100644
index c60df08..0000000
--- a/src/cmd/compile/internal/arm/cgen.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/arm"
-)
-
-/*
- * generate array index into res.
- * n might be any size; res is 32-bit.
- * returns Prog* to patch to panic call.
- */
-func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
-	if !gc.Is64(n.Type) {
-		gc.Cgen(n, res)
-		return nil
-	}
-
-	var tmp gc.Node
-	gc.Tempname(&tmp, gc.Types[gc.TINT64])
-	gc.Cgen(n, &tmp)
-	var lo gc.Node
-	var hi gc.Node
-	split64(&tmp, &lo, &hi)
-	gmove(&lo, res)
-	if bounded {
-		splitclean()
-		return nil
-	}
-
-	var n1 gc.Node
-	gc.Regalloc(&n1, gc.Types[gc.TINT32], nil)
-	var n2 gc.Node
-	gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
-	var zero gc.Node
-	gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
-	gmove(&hi, &n1)
-	gmove(&zero, &n2)
-	gins(arm.ACMP, &n1, &n2)
-	gc.Regfree(&n2)
-	gc.Regfree(&n1)
-	splitclean()
-	return gc.Gbranch(arm.ABNE, nil, -1)
-}
-
-func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
-	gc.Tempname(res, n.Type)
-	return cgenindex(n, res, bounded)
-}
-
-func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
-	// determine alignment.
-	// want to avoid unaligned access, so have to use
-	// smaller operations for less aligned types.
-	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
-	align := int(n.Type.Align)
-
-	var op obj.As
-	switch align {
-	default:
-		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
-
-	case 1:
-		op = arm.AMOVB
-
-	case 2:
-		op = arm.AMOVH
-
-	case 4:
-		op = arm.AMOVW
-	}
-
-	if w%int64(align) != 0 {
-		gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
-	}
-	c := int32(w / int64(align))
-
-	if osrc%int64(align) != 0 || odst%int64(align) != 0 {
-		gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
-	}
-
-	// if we are copying forward on the stack and
-	// the src and dst overlap, then reverse direction
-	dir := align
-	if osrc < odst && odst < osrc+w {
-		dir = -dir
-	}
-
-	if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
-		var r0 gc.Node
-		r0.Op = gc.OREGISTER
-		r0.Reg = arm.REG_R0
-		var r1 gc.Node
-		r1.Op = gc.OREGISTER
-		r1.Reg = arm.REG_R0 + 1
-		var r2 gc.Node
-		r2.Op = gc.OREGISTER
-		r2.Reg = arm.REG_R0 + 2
-
-		var src gc.Node
-		gc.Regalloc(&src, gc.Types[gc.Tptr], &r1)
-		var dst gc.Node
-		gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2)
-		if n.Ullman >= res.Ullman {
-			// eval n first
-			gc.Agen(n, &src)
-
-			if res.Op == gc.ONAME {
-				gc.Gvardef(res)
-			}
-			gc.Agen(res, &dst)
-		} else {
-			// eval res first
-			if res.Op == gc.ONAME {
-				gc.Gvardef(res)
-			}
-			gc.Agen(res, &dst)
-			gc.Agen(n, &src)
-		}
-
-		var tmp gc.Node
-		gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0)
-		f := gc.Sysfunc("duffcopy")
-		p := gins(obj.ADUFFCOPY, nil, f)
-		gc.Afunclit(&p.To, f)
-
-		// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
-		p.To.Offset = 8 * (128 - int64(c))
-
-		gc.Regfree(&tmp)
-		gc.Regfree(&src)
-		gc.Regfree(&dst)
-		return
-	}
-
-	var dst gc.Node
-	var src gc.Node
-	if n.Ullman >= res.Ullman {
-		gc.Agenr(n, &dst, res) // temporarily use dst
-		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
-		gins(arm.AMOVW, &dst, &src)
-		if res.Op == gc.ONAME {
-			gc.Gvardef(res)
-		}
-		gc.Agen(res, &dst)
-	} else {
-		if res.Op == gc.ONAME {
-			gc.Gvardef(res)
-		}
-		gc.Agenr(res, &dst, res)
-		gc.Agenr(n, &src, nil)
-	}
-
-	var tmp gc.Node
-	gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil)
-
-	// set up end marker
-	var nend gc.Node
-
-	if c >= 4 {
-		gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil)
-
-		p := gins(arm.AMOVW, &src, &nend)
-		p.From.Type = obj.TYPE_ADDR
-		if dir < 0 {
-			p.From.Offset = int64(dir)
-		} else {
-			p.From.Offset = w
-		}
-	}
-
-	// move src and dest to the end of block if necessary
-	if dir < 0 {
-		p := gins(arm.AMOVW, &src, &src)
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = w + int64(dir)
-
-		p = gins(arm.AMOVW, &dst, &dst)
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = w + int64(dir)
-	}
-
-	// move
-	if c >= 4 {
-		p := gins(op, &src, &tmp)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Offset = int64(dir)
-		p.Scond |= arm.C_PBIT
-		ploop := p
-
-		p = gins(op, &tmp, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = int64(dir)
-		p.Scond |= arm.C_PBIT
-
-		p = gins(arm.ACMP, &src, nil)
-		raddr(&nend, p)
-
-		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
-		gc.Regfree(&nend)
-	} else {
-		var p *obj.Prog
-		for ; c > 0; c-- {
-			p = gins(op, &src, &tmp)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Offset = int64(dir)
-			p.Scond |= arm.C_PBIT
-
-			p = gins(op, &tmp, &dst)
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = int64(dir)
-			p.Scond |= arm.C_PBIT
-		}
-	}
-
-	gc.Regfree(&dst)
-	gc.Regfree(&src)
-	gc.Regfree(&tmp)
-}
diff --git a/src/cmd/compile/internal/arm/cgen64.go b/src/cmd/compile/internal/arm/cgen64.go
deleted file mode 100644
index 33e8406..0000000
--- a/src/cmd/compile/internal/arm/cgen64.go
+++ /dev/null
@@ -1,859 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/arm"
-)
-
-/*
- * attempt to generate 64-bit
- *	res = n
- * return 1 on success, 0 if op not handled.
- */
-func cgen64(n *gc.Node, res *gc.Node) {
-	if res.Op != gc.OINDREG && res.Op != gc.ONAME {
-		gc.Dump("n", n)
-		gc.Dump("res", res)
-		gc.Fatalf("cgen64 %v of %v", n.Op, res.Op)
-	}
-
-	l := n.Left
-	var t1 gc.Node
-	if !l.Addable {
-		gc.Tempname(&t1, l.Type)
-		gc.Cgen(l, &t1)
-		l = &t1
-	}
-
-	var hi1 gc.Node
-	var lo1 gc.Node
-	split64(l, &lo1, &hi1)
-	switch n.Op {
-	default:
-		gc.Fatalf("cgen64 %v", n.Op)
-
-	case gc.OMINUS:
-		var lo2 gc.Node
-		var hi2 gc.Node
-		split64(res, &lo2, &hi2)
-
-		gc.Regalloc(&t1, lo1.Type, nil)
-		var al gc.Node
-		gc.Regalloc(&al, lo1.Type, nil)
-		var ah gc.Node
-		gc.Regalloc(&ah, hi1.Type, nil)
-
-		gins(arm.AMOVW, &lo1, &al)
-		gins(arm.AMOVW, &hi1, &ah)
-
-		gmove(ncon(0), &t1)
-		p1 := gins(arm.ASUB, &al, &t1)
-		p1.Scond |= arm.C_SBIT
-		gins(arm.AMOVW, &t1, &lo2)
-
-		gmove(ncon(0), &t1)
-		gins(arm.ASBC, &ah, &t1)
-		gins(arm.AMOVW, &t1, &hi2)
-
-		gc.Regfree(&t1)
-		gc.Regfree(&al)
-		gc.Regfree(&ah)
-		splitclean()
-		splitclean()
-		return
-
-	case gc.OCOM:
-		gc.Regalloc(&t1, lo1.Type, nil)
-		gmove(ncon(^uint32(0)), &t1)
-
-		var lo2 gc.Node
-		var hi2 gc.Node
-		split64(res, &lo2, &hi2)
-		var n1 gc.Node
-		gc.Regalloc(&n1, lo1.Type, nil)
-
-		gins(arm.AMOVW, &lo1, &n1)
-		gins(arm.AEOR, &t1, &n1)
-		gins(arm.AMOVW, &n1, &lo2)
-
-		gins(arm.AMOVW, &hi1, &n1)
-		gins(arm.AEOR, &t1, &n1)
-		gins(arm.AMOVW, &n1, &hi2)
-
-		gc.Regfree(&t1)
-		gc.Regfree(&n1)
-		splitclean()
-		splitclean()
-		return
-
-		// binary operators.
-	// common setup below.
-	case gc.OADD,
-		gc.OSUB,
-		gc.OMUL,
-		gc.OLSH,
-		gc.ORSH,
-		gc.OAND,
-		gc.OOR,
-		gc.OXOR,
-		gc.OLROT:
-		break
-	}
-
-	// setup for binary operators
-	r := n.Right
-
-	if r != nil && !r.Addable {
-		var t2 gc.Node
-		gc.Tempname(&t2, r.Type)
-		gc.Cgen(r, &t2)
-		r = &t2
-	}
-
-	var hi2 gc.Node
-	var lo2 gc.Node
-	if gc.Is64(r.Type) {
-		split64(r, &lo2, &hi2)
-	}
-
-	var al gc.Node
-	gc.Regalloc(&al, lo1.Type, nil)
-	var ah gc.Node
-	gc.Regalloc(&ah, hi1.Type, nil)
-
-	// Do op. Leave result in ah:al.
-	switch n.Op {
-	default:
-		gc.Fatalf("cgen64: not implemented: %v\n", n)
-
-		// TODO: Constants
-	case gc.OADD:
-		var bl gc.Node
-		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
-
-		var bh gc.Node
-		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
-		gins(arm.AMOVW, &hi1, &ah)
-		gins(arm.AMOVW, &lo1, &al)
-		gins(arm.AMOVW, &hi2, &bh)
-		gins(arm.AMOVW, &lo2, &bl)
-		p1 := gins(arm.AADD, &bl, &al)
-		p1.Scond |= arm.C_SBIT
-		gins(arm.AADC, &bh, &ah)
-		gc.Regfree(&bl)
-		gc.Regfree(&bh)
-
-		// TODO: Constants.
-	case gc.OSUB:
-		var bl gc.Node
-		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
-
-		var bh gc.Node
-		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
-		gins(arm.AMOVW, &lo1, &al)
-		gins(arm.AMOVW, &hi1, &ah)
-		gins(arm.AMOVW, &lo2, &bl)
-		gins(arm.AMOVW, &hi2, &bh)
-		p1 := gins(arm.ASUB, &bl, &al)
-		p1.Scond |= arm.C_SBIT
-		gins(arm.ASBC, &bh, &ah)
-		gc.Regfree(&bl)
-		gc.Regfree(&bh)
-
-		// TODO(kaib): this can be done with 4 regs and does not need 6
-	case gc.OMUL:
-		var bl gc.Node
-		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
-
-		var bh gc.Node
-		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
-		var cl gc.Node
-		gc.Regalloc(&cl, gc.Types[gc.TPTR32], nil)
-		var ch gc.Node
-		gc.Regalloc(&ch, gc.Types[gc.TPTR32], nil)
-
-		// load args into bh:bl and bh:bl.
-		gins(arm.AMOVW, &hi1, &bh)
-
-		gins(arm.AMOVW, &lo1, &bl)
-		gins(arm.AMOVW, &hi2, &ch)
-		gins(arm.AMOVW, &lo2, &cl)
-
-		// bl * cl -> ah al
-		p1 := gins(arm.AMULLU, nil, nil)
-
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = bl.Reg
-		p1.Reg = cl.Reg
-		p1.To.Type = obj.TYPE_REGREG
-		p1.To.Reg = ah.Reg
-		p1.To.Offset = int64(al.Reg)
-
-		//print("%v\n", p1);
-
-		// bl * ch + ah -> ah
-		p1 = gins(arm.AMULA, nil, nil)
-
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = bl.Reg
-		p1.Reg = ch.Reg
-		p1.To.Type = obj.TYPE_REGREG2
-		p1.To.Reg = ah.Reg
-		p1.To.Offset = int64(ah.Reg)
-
-		//print("%v\n", p1);
-
-		// bh * cl + ah -> ah
-		p1 = gins(arm.AMULA, nil, nil)
-
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = bh.Reg
-		p1.Reg = cl.Reg
-		p1.To.Type = obj.TYPE_REGREG2
-		p1.To.Reg = ah.Reg
-		p1.To.Offset = int64(ah.Reg)
-
-		//print("%v\n", p1);
-
-		gc.Regfree(&bh)
-
-		gc.Regfree(&bl)
-		gc.Regfree(&ch)
-		gc.Regfree(&cl)
-
-		// We only rotate by a constant c in [0,64).
-	// if c >= 32:
-	//	lo, hi = hi, lo
-	//	c -= 32
-	// if c == 0:
-	//	no-op
-	// else:
-	//	t = hi
-	//	shld hi:lo, c
-	//	shld lo:t, c
-	case gc.OLROT:
-		v := uint64(r.Int64())
-
-		var bl gc.Node
-		gc.Regalloc(&bl, lo1.Type, nil)
-		var bh gc.Node
-		gc.Regalloc(&bh, hi1.Type, nil)
-		if v >= 32 {
-			// reverse during load to do the first 32 bits of rotate
-			v -= 32
-
-			gins(arm.AMOVW, &hi1, &bl)
-			gins(arm.AMOVW, &lo1, &bh)
-		} else {
-			gins(arm.AMOVW, &hi1, &bh)
-			gins(arm.AMOVW, &lo1, &bl)
-		}
-
-		if v == 0 {
-			gins(arm.AMOVW, &bh, &ah)
-			gins(arm.AMOVW, &bl, &al)
-		} else {
-			// rotate by 1 <= v <= 31
-			//	MOVW	bl<<v, al
-			//	MOVW	bh<<v, ah
-			//	OR		bl>>(32-v), ah
-			//	OR		bh>>(32-v), al
-			gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
-
-			gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
-			gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
-			gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al)
-		}
-
-		gc.Regfree(&bl)
-		gc.Regfree(&bh)
-
-	case gc.OLSH:
-		var bl gc.Node
-		gc.Regalloc(&bl, lo1.Type, nil)
-		var bh gc.Node
-		gc.Regalloc(&bh, hi1.Type, nil)
-		gins(arm.AMOVW, &hi1, &bh)
-		gins(arm.AMOVW, &lo1, &bl)
-
-		var p6 *obj.Prog
-		var s gc.Node
-		var n1 gc.Node
-		var creg gc.Node
-		var p1 *obj.Prog
-		var p2 *obj.Prog
-		var p3 *obj.Prog
-		var p4 *obj.Prog
-		var p5 *obj.Prog
-		if r.Op == gc.OLITERAL {
-			v := uint64(r.Int64())
-			if v >= 64 {
-				// TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
-				// here and below (verify it optimizes to EOR)
-				gins(arm.AEOR, &al, &al)
-
-				gins(arm.AEOR, &ah, &ah)
-			} else if v > 32 {
-				gins(arm.AEOR, &al, &al)
-
-				//	MOVW	bl<<(v-32), ah
-				gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v-32), &ah)
-			} else if v == 32 {
-				gins(arm.AEOR, &al, &al)
-				gins(arm.AMOVW, &bl, &ah)
-			} else if v > 0 {
-				//	MOVW	bl<<v, al
-				gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
-
-				//	MOVW	bh<<v, ah
-				gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
-
-				//	OR		bl>>(32-v), ah
-				gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
-			} else {
-				gins(arm.AMOVW, &bl, &al)
-				gins(arm.AMOVW, &bh, &ah)
-			}
-
-			goto olsh_break
-		}
-
-		gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
-		gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
-		if gc.Is64(r.Type) {
-			// shift is >= 1<<32
-			var cl gc.Node
-			var ch gc.Node
-			split64(r, &cl, &ch)
-
-			gmove(&ch, &s)
-			gins(arm.ATST, &s, nil)
-			p6 = gc.Gbranch(arm.ABNE, nil, 0)
-			gmove(&cl, &s)
-			splitclean()
-		} else {
-			gmove(r, &s)
-			p6 = nil
-		}
-
-		gins(arm.ATST, &s, nil)
-
-		// shift == 0
-		p1 = gins(arm.AMOVW, &bl, &al)
-
-		p1.Scond = arm.C_SCOND_EQ
-		p1 = gins(arm.AMOVW, &bh, &ah)
-		p1.Scond = arm.C_SCOND_EQ
-		p2 = gc.Gbranch(arm.ABEQ, nil, 0)
-
-		// shift is < 32
-		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
-
-		gmove(&n1, &creg)
-		gins(arm.ACMP, &s, &creg)
-
-		//	MOVW.LO		bl<<s, al
-		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	MOVW.LO		bh<<s, ah
-		p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	SUB.LO		s, creg
-		p1 = gins(arm.ASUB, &s, &creg)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	OR.LO		bl>>creg, ah
-		p1 = gregshift(arm.AORR, &bl, arm.SHIFT_LR, &creg, &ah)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	BLO	end
-		p3 = gc.Gbranch(arm.ABLO, nil, 0)
-
-		// shift == 32
-		p1 = gins(arm.AEOR, &al, &al)
-
-		p1.Scond = arm.C_SCOND_EQ
-		p1 = gins(arm.AMOVW, &bl, &ah)
-		p1.Scond = arm.C_SCOND_EQ
-		p4 = gc.Gbranch(arm.ABEQ, nil, 0)
-
-		// shift is < 64
-		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
-
-		gmove(&n1, &creg)
-		gins(arm.ACMP, &s, &creg)
-
-		//	EOR.LO	al, al
-		p1 = gins(arm.AEOR, &al, &al)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	MOVW.LO		creg>>1, creg
-		p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	SUB.LO		creg, s
-		p1 = gins(arm.ASUB, &creg, &s)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	MOVW	bl<<s, ah
-		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &ah)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		p5 = gc.Gbranch(arm.ABLO, nil, 0)
-
-		// shift >= 64
-		if p6 != nil {
-			gc.Patch(p6, gc.Pc)
-		}
-		gins(arm.AEOR, &al, &al)
-		gins(arm.AEOR, &ah, &ah)
-
-		gc.Patch(p2, gc.Pc)
-		gc.Patch(p3, gc.Pc)
-		gc.Patch(p4, gc.Pc)
-		gc.Patch(p5, gc.Pc)
-		gc.Regfree(&s)
-		gc.Regfree(&creg)
-
-	olsh_break:
-		gc.Regfree(&bl)
-		gc.Regfree(&bh)
-
-	case gc.ORSH:
-		var bl gc.Node
-		gc.Regalloc(&bl, lo1.Type, nil)
-		var bh gc.Node
-		gc.Regalloc(&bh, hi1.Type, nil)
-		gins(arm.AMOVW, &hi1, &bh)
-		gins(arm.AMOVW, &lo1, &bl)
-
-		var p4 *obj.Prog
-		var p5 *obj.Prog
-		var n1 gc.Node
-		var p6 *obj.Prog
-		var s gc.Node
-		var p1 *obj.Prog
-		var p2 *obj.Prog
-		var creg gc.Node
-		var p3 *obj.Prog
-		if r.Op == gc.OLITERAL {
-			v := uint64(r.Int64())
-			if v >= 64 {
-				if bh.Type.Etype == gc.TINT32 {
-					//	MOVW	bh->31, al
-					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
-
-					//	MOVW	bh->31, ah
-					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
-				} else {
-					gins(arm.AEOR, &al, &al)
-					gins(arm.AEOR, &ah, &ah)
-				}
-			} else if v > 32 {
-				if bh.Type.Etype == gc.TINT32 {
-					//	MOVW	bh->(v-32), al
-					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v-32), &al)
-
-					//	MOVW	bh->31, ah
-					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
-				} else {
-					//	MOVW	bh>>(v-32), al
-					gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v-32), &al)
-
-					gins(arm.AEOR, &ah, &ah)
-				}
-			} else if v == 32 {
-				gins(arm.AMOVW, &bh, &al)
-				if bh.Type.Etype == gc.TINT32 {
-					//	MOVW	bh->31, ah
-					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
-				} else {
-					gins(arm.AEOR, &ah, &ah)
-				}
-			} else if v > 0 {
-				//	MOVW	bl>>v, al
-				gshift(arm.AMOVW, &bl, arm.SHIFT_LR, int32(v), &al)
-
-				//	OR		bh<<(32-v), al
-				gshift(arm.AORR, &bh, arm.SHIFT_LL, int32(32-v), &al)
-
-				if bh.Type.Etype == gc.TINT32 {
-					//	MOVW	bh->v, ah
-					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v), &ah)
-				} else {
-					//	MOVW	bh>>v, ah
-					gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v), &ah)
-				}
-			} else {
-				gins(arm.AMOVW, &bl, &al)
-				gins(arm.AMOVW, &bh, &ah)
-			}
-
-			goto orsh_break
-		}
-
-		gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
-		gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
-		if gc.Is64(r.Type) {
-			// shift is >= 1<<32
-			var ch gc.Node
-			var cl gc.Node
-			split64(r, &cl, &ch)
-
-			gmove(&ch, &s)
-			gins(arm.ATST, &s, nil)
-			var p1 *obj.Prog
-			if bh.Type.Etype == gc.TINT32 {
-				p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
-			} else {
-				p1 = gins(arm.AEOR, &ah, &ah)
-			}
-			p1.Scond = arm.C_SCOND_NE
-			p6 = gc.Gbranch(arm.ABNE, nil, 0)
-			gmove(&cl, &s)
-			splitclean()
-		} else {
-			gmove(r, &s)
-			p6 = nil
-		}
-
-		gins(arm.ATST, &s, nil)
-
-		// shift == 0
-		p1 = gins(arm.AMOVW, &bl, &al)
-
-		p1.Scond = arm.C_SCOND_EQ
-		p1 = gins(arm.AMOVW, &bh, &ah)
-		p1.Scond = arm.C_SCOND_EQ
-		p2 = gc.Gbranch(arm.ABEQ, nil, 0)
-
-		// check if shift is < 32
-		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
-
-		gmove(&n1, &creg)
-		gins(arm.ACMP, &s, &creg)
-
-		//	MOVW.LO		bl>>s, al
-		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	SUB.LO		s,creg
-		p1 = gins(arm.ASUB, &s, &creg)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	OR.LO		bh<<(32-s), al
-		p1 = gregshift(arm.AORR, &bh, arm.SHIFT_LL, &creg, &al)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		if bh.Type.Etype == gc.TINT32 {
-			//	MOVW	bh->s, ah
-			p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &ah)
-		} else {
-			//	MOVW	bh>>s, ah
-			p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &ah)
-		}
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	BLO	end
-		p3 = gc.Gbranch(arm.ABLO, nil, 0)
-
-		// shift == 32
-		p1 = gins(arm.AMOVW, &bh, &al)
-
-		p1.Scond = arm.C_SCOND_EQ
-		if bh.Type.Etype == gc.TINT32 {
-			gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
-		} else {
-			gins(arm.AEOR, &ah, &ah)
-		}
-		p4 = gc.Gbranch(arm.ABEQ, nil, 0)
-
-		// check if shift is < 64
-		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
-
-		gmove(&n1, &creg)
-		gins(arm.ACMP, &s, &creg)
-
-		//	MOVW.LO		creg>>1, creg
-		p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		//	SUB.LO		creg, s
-		p1 = gins(arm.ASUB, &creg, &s)
-
-		p1.Scond = arm.C_SCOND_LO
-
-		if bh.Type.Etype == gc.TINT32 {
-			//	MOVW	bh->(s-32), al
-			p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)
-
-			p1.Scond = arm.C_SCOND_LO
-		} else {
-			//	MOVW	bh>>(v-32), al
-			p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)
-
-			p1.Scond = arm.C_SCOND_LO
-		}
-
-		//	BLO	end
-		p5 = gc.Gbranch(arm.ABLO, nil, 0)
-
-		// s >= 64
-		if p6 != nil {
-			gc.Patch(p6, gc.Pc)
-		}
-		if bh.Type.Etype == gc.TINT32 {
-			//	MOVW	bh->31, al
-			gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
-		} else {
-			gins(arm.AEOR, &al, &al)
-		}
-
-		gc.Patch(p2, gc.Pc)
-		gc.Patch(p3, gc.Pc)
-		gc.Patch(p4, gc.Pc)
-		gc.Patch(p5, gc.Pc)
-		gc.Regfree(&s)
-		gc.Regfree(&creg)
-
-	orsh_break:
-		gc.Regfree(&bl)
-		gc.Regfree(&bh)
-
-		// TODO(kaib): literal optimizations
-	// make constant the right side (it usually is anyway).
-	//		if(lo1.op == OLITERAL) {
-	//			nswap(&lo1, &lo2);
-	//			nswap(&hi1, &hi2);
-	//		}
-	//		if(lo2.op == OLITERAL) {
-	//			// special cases for constants.
-	//			lv = mpgetfix(lo2.val.u.xval);
-	//			hv = mpgetfix(hi2.val.u.xval);
-	//			splitclean();	// right side
-	//			split64(res, &lo2, &hi2);
-	//			switch(n->op) {
-	//			case OXOR:
-	//				gmove(&lo1, &lo2);
-	//				gmove(&hi1, &hi2);
-	//				switch(lv) {
-	//				case 0:
-	//					break;
-	//				case 0xffffffffu:
-	//					gins(ANOTL, N, &lo2);
-	//					break;
-	//				default:
-	//					gins(AXORL, ncon(lv), &lo2);
-	//					break;
-	//				}
-	//				switch(hv) {
-	//				case 0:
-	//					break;
-	//				case 0xffffffffu:
-	//					gins(ANOTL, N, &hi2);
-	//					break;
-	//				default:
-	//					gins(AXORL, ncon(hv), &hi2);
-	//					break;
-	//				}
-	//				break;
-
-	//			case OAND:
-	//				switch(lv) {
-	//				case 0:
-	//					gins(AMOVL, ncon(0), &lo2);
-	//					break;
-	//				default:
-	//					gmove(&lo1, &lo2);
-	//					if(lv != 0xffffffffu)
-	//						gins(AANDL, ncon(lv), &lo2);
-	//					break;
-	//				}
-	//				switch(hv) {
-	//				case 0:
-	//					gins(AMOVL, ncon(0), &hi2);
-	//					break;
-	//				default:
-	//					gmove(&hi1, &hi2);
-	//					if(hv != 0xffffffffu)
-	//						gins(AANDL, ncon(hv), &hi2);
-	//					break;
-	//				}
-	//				break;
-
-	//			case OOR:
-	//				switch(lv) {
-	//				case 0:
-	//					gmove(&lo1, &lo2);
-	//					break;
-	//				case 0xffffffffu:
-	//					gins(AMOVL, ncon(0xffffffffu), &lo2);
-	//					break;
-	//				default:
-	//					gmove(&lo1, &lo2);
-	//					gins(AORL, ncon(lv), &lo2);
-	//					break;
-	//				}
-	//				switch(hv) {
-	//				case 0:
-	//					gmove(&hi1, &hi2);
-	//					break;
-	//				case 0xffffffffu:
-	//					gins(AMOVL, ncon(0xffffffffu), &hi2);
-	//					break;
-	//				default:
-	//					gmove(&hi1, &hi2);
-	//					gins(AORL, ncon(hv), &hi2);
-	//					break;
-	//				}
-	//				break;
-	//			}
-	//			splitclean();
-	//			splitclean();
-	//			goto out;
-	//		}
-	case gc.OXOR,
-		gc.OAND,
-		gc.OOR:
-		var n1 gc.Node
-		gc.Regalloc(&n1, lo1.Type, nil)
-
-		gins(arm.AMOVW, &lo1, &al)
-		gins(arm.AMOVW, &hi1, &ah)
-		gins(arm.AMOVW, &lo2, &n1)
-		gins(optoas(n.Op, lo1.Type), &n1, &al)
-		gins(arm.AMOVW, &hi2, &n1)
-		gins(optoas(n.Op, lo1.Type), &n1, &ah)
-		gc.Regfree(&n1)
-	}
-
-	if gc.Is64(r.Type) {
-		splitclean()
-	}
-	splitclean()
-
-	split64(res, &lo1, &hi1)
-	gins(arm.AMOVW, &al, &lo1)
-	gins(arm.AMOVW, &ah, &hi1)
-	splitclean()
-
-	//out:
-	gc.Regfree(&al)
-
-	gc.Regfree(&ah)
-}
-
-/*
- * generate comparison of nl, nr, both 64-bit.
- * nl is memory; nr is constant or memory.
- */
-func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) {
-	var lo1 gc.Node
-	var hi1 gc.Node
-	var lo2 gc.Node
-	var hi2 gc.Node
-	var r1 gc.Node
-	var r2 gc.Node
-
-	split64(nl, &lo1, &hi1)
-	split64(nr, &lo2, &hi2)
-
-	// compare most significant word;
-	// if they differ, we're done.
-	t := hi1.Type
-
-	gc.Regalloc(&r1, gc.Types[gc.TINT32], nil)
-	gc.Regalloc(&r2, gc.Types[gc.TINT32], nil)
-	gins(arm.AMOVW, &hi1, &r1)
-	gins(arm.AMOVW, &hi2, &r2)
-	gins(arm.ACMP, &r1, &r2)
-	gc.Regfree(&r1)
-	gc.Regfree(&r2)
-
-	var br *obj.Prog
-	switch op {
-	default:
-		gc.Fatalf("cmp64 %v %v", op, t)
-
-		// cmp hi
-	// bne L
-	// cmp lo
-	// beq to
-	// L:
-	case gc.OEQ:
-		br = gc.Gbranch(arm.ABNE, nil, -likely)
-
-		// cmp hi
-	// bne to
-	// cmp lo
-	// bne to
-	case gc.ONE:
-		gc.Patch(gc.Gbranch(arm.ABNE, nil, likely), to)
-
-		// cmp hi
-	// bgt to
-	// blt L
-	// cmp lo
-	// bge to (or bgt to)
-	// L:
-	case gc.OGE,
-		gc.OGT:
-		gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
-
-		br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
-
-		// cmp hi
-	// blt to
-	// bgt L
-	// cmp lo
-	// ble to (or jlt to)
-	// L:
-	case gc.OLE,
-		gc.OLT:
-		gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
-
-		br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
-	}
-
-	// compare least significant word
-	t = lo1.Type
-
-	gc.Regalloc(&r1, gc.Types[gc.TINT32], nil)
-	gc.Regalloc(&r2, gc.Types[gc.TINT32], nil)
-	gins(arm.AMOVW, &lo1, &r1)
-	gins(arm.AMOVW, &lo2, &r2)
-	gins(arm.ACMP, &r1, &r2)
-	gc.Regfree(&r1)
-	gc.Regfree(&r2)
-
-	// jump again
-	gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
-
-	// point first branch down here if appropriate
-	if br != nil {
-		gc.Patch(br, gc.Pc)
-	}
-
-	splitclean()
-	splitclean()
-}
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
index afd86e4..acd1508 100644
--- a/src/cmd/compile/internal/arm/galign.go
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -28,38 +28,9 @@
 	gc.Thearch.ReservedRegs = resvd
 
 	gc.Thearch.Betypeinit = betypeinit
-	gc.Thearch.Cgen64 = cgen64
-	gc.Thearch.Cgen_hmul = cgen_hmul
-	gc.Thearch.Cgen_shift = cgen_shift
-	gc.Thearch.Clearfat = clearfat
-	gc.Thearch.Cmp64 = cmp64
 	gc.Thearch.Defframe = defframe
-	gc.Thearch.Excise = excise
-	gc.Thearch.Expandchecks = expandchecks
-	gc.Thearch.Getg = getg
 	gc.Thearch.Gins = gins
-	gc.Thearch.Ginscmp = ginscmp
-	gc.Thearch.Ginscon = ginscon
-	gc.Thearch.Ginsnop = ginsnop
-	gc.Thearch.Gmove = gmove
-	gc.Thearch.Cgenindex = cgenindex
-	gc.Thearch.Peep = peep
 	gc.Thearch.Proginfo = proginfo
-	gc.Thearch.Regtyp = regtyp
-	gc.Thearch.Sameaddr = sameaddr
-	gc.Thearch.Smallindir = smallindir
-	gc.Thearch.Stackaddr = stackaddr
-	gc.Thearch.Blockcopy = blockcopy
-	gc.Thearch.Sudoaddable = sudoaddable
-	gc.Thearch.Sudoclean = sudoclean
-	gc.Thearch.Excludedregs = excludedregs
-	gc.Thearch.RtoB = RtoB
-	gc.Thearch.FtoB = RtoB
-	gc.Thearch.BtoR = BtoR
-	gc.Thearch.BtoF = BtoF
-	gc.Thearch.Optoas = optoas
-	gc.Thearch.Doregbits = doregbits
-	gc.Thearch.Regnames = regnames
 
 	gc.Thearch.SSARegToReg = ssaRegToReg
 	gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
index 15d13ed..24b63b4 100644
--- a/src/cmd/compile/internal/arm/ggen.go
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -111,440 +111,9 @@
 	return q
 }
 
-/*
- * generate high multiply
- *  res = (nl * nr) >> wordsize
- */
-func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	if nl.Ullman < nr.Ullman {
-		nl, nr = nr, nl
-	}
-
-	t := nl.Type
-	w := t.Width * 8
-	var n1 gc.Node
-	gc.Regalloc(&n1, t, res)
-	gc.Cgen(nl, &n1)
-	var n2 gc.Node
-	gc.Regalloc(&n2, t, nil)
-	gc.Cgen(nr, &n2)
-	switch gc.Simtype[t.Etype] {
-	case gc.TINT8,
-		gc.TINT16:
-		gins(optoas(gc.OMUL, t), &n2, &n1)
-		gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
-
-	case gc.TUINT8,
-		gc.TUINT16:
-		gins(optoas(gc.OMUL, t), &n2, &n1)
-		gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1)
-
-		// perform a long multiplication.
-	case gc.TINT32,
-		gc.TUINT32:
-		var p *obj.Prog
-		if t.IsSigned() {
-			p = gins(arm.AMULL, &n2, nil)
-		} else {
-			p = gins(arm.AMULLU, &n2, nil)
-		}
-
-		// n2 * n1 -> (n1 n2)
-		p.Reg = n1.Reg
-
-		p.To.Type = obj.TYPE_REGREG
-		p.To.Reg = n1.Reg
-		p.To.Offset = int64(n2.Reg)
-
-	default:
-		gc.Fatalf("cgen_hmul %v", t)
-	}
-
-	gc.Cgen(&n1, res)
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-/*
- * generate shift according to op, one of:
- *	res = nl << nr
- *	res = nl >> nr
- */
-func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	if nl.Type.Width > 4 {
-		gc.Fatalf("cgen_shift %v", nl.Type)
-	}
-
-	w := int(nl.Type.Width * 8)
-
-	if op == gc.OLROT {
-		v := nr.Int64()
-		var n1 gc.Node
-		gc.Regalloc(&n1, nl.Type, res)
-		if w == 32 {
-			gc.Cgen(nl, &n1)
-			gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
-		} else {
-			var n2 gc.Node
-			gc.Regalloc(&n2, nl.Type, nil)
-			gc.Cgen(nl, &n2)
-			gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
-			gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
-			gc.Regfree(&n2)
-
-			// Ensure sign/zero-extended result.
-			gins(optoas(gc.OAS, nl.Type), &n1, &n1)
-		}
-
-		gmove(&n1, res)
-		gc.Regfree(&n1)
-		return
-	}
-
-	if nr.Op == gc.OLITERAL {
-		var n1 gc.Node
-		gc.Regalloc(&n1, nl.Type, res)
-		gc.Cgen(nl, &n1)
-		sc := uint64(nr.Int64())
-		if sc == 0 {
-		} else // nothing to do
-		if sc >= uint64(nl.Type.Width*8) {
-			if op == gc.ORSH && nl.Type.IsSigned() {
-				gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
-			} else {
-				gins(arm.AEOR, &n1, &n1)
-			}
-		} else {
-			if op == gc.ORSH && nl.Type.IsSigned() {
-				gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
-			} else if op == gc.ORSH {
-				gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
-			} else {
-				gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
-			}
-		}
-
-		if w < 32 && op == gc.OLSH {
-			gins(optoas(gc.OAS, nl.Type), &n1, &n1)
-		}
-		gmove(&n1, res)
-		gc.Regfree(&n1)
-		return
-	}
-
-	tr := nr.Type
-	var t gc.Node
-	var n1 gc.Node
-	var n2 gc.Node
-	var n3 gc.Node
-	if tr.Width > 4 {
-		var nt gc.Node
-		gc.Tempname(&nt, nr.Type)
-		if nl.Ullman >= nr.Ullman {
-			gc.Regalloc(&n2, nl.Type, res)
-			gc.Cgen(nl, &n2)
-			gc.Cgen(nr, &nt)
-			n1 = nt
-		} else {
-			gc.Cgen(nr, &nt)
-			gc.Regalloc(&n2, nl.Type, res)
-			gc.Cgen(nl, &n2)
-		}
-
-		var hi gc.Node
-		var lo gc.Node
-		split64(&nt, &lo, &hi)
-		gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
-		gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
-		gmove(&lo, &n1)
-		gmove(&hi, &n3)
-		splitclean()
-		gins(arm.ATST, &n3, nil)
-		gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
-		p1 := gins(arm.AMOVW, &t, &n1)
-		p1.Scond = arm.C_SCOND_NE
-		tr = gc.Types[gc.TUINT32]
-		gc.Regfree(&n3)
-	} else {
-		if nl.Ullman >= nr.Ullman {
-			gc.Regalloc(&n2, nl.Type, res)
-			gc.Cgen(nl, &n2)
-			gc.Regalloc(&n1, nr.Type, nil)
-			gc.Cgen(nr, &n1)
-		} else {
-			gc.Regalloc(&n1, nr.Type, nil)
-			gc.Cgen(nr, &n1)
-			gc.Regalloc(&n2, nl.Type, res)
-			gc.Cgen(nl, &n2)
-		}
-	}
-
-	// test for shift being 0
-	gins(arm.ATST, &n1, nil)
-
-	p3 := gc.Gbranch(arm.ABEQ, nil, -1)
-
-	// test and fix up large shifts
-	// TODO: if(!bounded), don't emit some of this.
-	gc.Regalloc(&n3, tr, nil)
-
-	gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
-	gmove(&t, &n3)
-	gins(arm.ACMP, &n1, &n3)
-	if op == gc.ORSH {
-		var p1 *obj.Prog
-		var p2 *obj.Prog
-		if nl.Type.IsSigned() {
-			p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
-			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
-		} else {
-			p1 = gins(arm.AEOR, &n2, &n2)
-			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
-		}
-
-		p1.Scond = arm.C_SCOND_HS
-		p2.Scond = arm.C_SCOND_LO
-	} else {
-		p1 := gins(arm.AEOR, &n2, &n2)
-		p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
-		p1.Scond = arm.C_SCOND_HS
-		p2.Scond = arm.C_SCOND_LO
-	}
-
-	gc.Regfree(&n3)
-
-	gc.Patch(p3, gc.Pc)
-
-	// Left-shift of smaller word must be sign/zero-extended.
-	if w < 32 && op == gc.OLSH {
-		gins(optoas(gc.OAS, nl.Type), &n2, &n2)
-	}
-	gmove(&n2, res)
-
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-func clearfat(nl *gc.Node) {
-	/* clear a fat object */
-	if gc.Debug['g'] != 0 {
-		gc.Dump("\nclearfat", nl)
-	}
-
-	w := uint32(nl.Type.Width)
-
-	// Avoid taking the address for simple enough types.
-	if gc.Componentgen(nil, nl) {
-		return
-	}
-
-	c := w % 4 // bytes
-	q := w / 4 // quads
-
-	if nl.Type.Align < 4 {
-		q = 0
-		c = w
-	}
-
-	var r0 gc.Node
-	r0.Op = gc.OREGISTER
-
-	r0.Reg = arm.REG_R0
-	var r1 gc.Node
-	r1.Op = gc.OREGISTER
-	r1.Reg = arm.REG_R1
-	var dst gc.Node
-	gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
-	gc.Agen(nl, &dst)
-	var nc gc.Node
-	gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
-	var nz gc.Node
-	gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
-	gc.Cgen(&nc, &nz)
-
-	if q > 128 {
-		var end gc.Node
-		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
-		p := gins(arm.AMOVW, &dst, &end)
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = int64(q) * 4
-
-		p = gins(arm.AMOVW, &nz, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 4
-		p.Scond |= arm.C_PBIT
-		pl := p
-
-		p = gins(arm.ACMP, &dst, nil)
-		raddr(&end, p)
-		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
-
-		gc.Regfree(&end)
-	} else if q >= 4 && !gc.Nacl {
-		f := gc.Sysfunc("duffzero")
-		p := gins(obj.ADUFFZERO, nil, f)
-		gc.Afunclit(&p.To, f)
-
-		// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
-		p.To.Offset = 4 * (128 - int64(q))
-	} else {
-		var p *obj.Prog
-		for q > 0 {
-			p = gins(arm.AMOVW, &nz, &dst)
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = 4
-			p.Scond |= arm.C_PBIT
-
-			//print("1. %v\n", p);
-			q--
-		}
-	}
-
-	if c > 4 {
-		// Loop to zero unaligned memory.
-		var end gc.Node
-		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
-		p := gins(arm.AMOVW, &dst, &end)
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = int64(c)
-
-		p = gins(arm.AMOVB, &nz, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 1
-		p.Scond |= arm.C_PBIT
-		pl := p
-
-		p = gins(arm.ACMP, &dst, nil)
-		raddr(&end, p)
-		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
-
-		gc.Regfree(&end)
-		c = 0
-	}
-	var p *obj.Prog
-	for c > 0 {
-		p = gins(arm.AMOVB, &nz, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 1
-		p.Scond |= arm.C_PBIT
-
-		//print("2. %v\n", p);
-		c--
-	}
-
-	gc.Regfree(&dst)
-	gc.Regfree(&nz)
-}
-
-// Called after regopt and peep have run.
-// Expand CHECKNIL pseudo-op into actual nil pointer check.
-func expandchecks(firstp *obj.Prog) {
-	var reg int
-	var p1 *obj.Prog
-
-	for p := firstp; p != nil; p = p.Link {
-		if p.As != obj.ACHECKNIL {
-			continue
-		}
-		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
-			gc.Warnl(p.Lineno, "generated nil check")
-		}
-		if p.From.Type != obj.TYPE_REG {
-			gc.Fatalf("invalid nil check %v", p)
-		}
-		reg = int(p.From.Reg)
-
-		// check is
-		//	CMP arg, $0
-		//	MOV.EQ arg, 0(arg)
-		p1 = gc.Ctxt.NewProg()
-
-		gc.Clearp(p1)
-		p1.Link = p.Link
-		p.Link = p1
-		p1.Lineno = p.Lineno
-		p1.Pc = 9999
-		p1.As = arm.AMOVW
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = int16(reg)
-		p1.To.Type = obj.TYPE_MEM
-		p1.To.Reg = int16(reg)
-		p1.To.Offset = 0
-		p1.Scond = arm.C_SCOND_EQ
-		p.As = arm.ACMP
-		p.From.Type = obj.TYPE_CONST
-		p.From.Reg = 0
-		p.From.Offset = 0
-		p.Reg = int16(reg)
-	}
-}
-
 func ginsnop() {
 	var r gc.Node
 	gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
 	p := gins(arm.AAND, &r, &r)
 	p.Scond = arm.C_SCOND_EQ
 }
-
-/*
- * generate
- *	as $c, n
- */
-func ginscon(as obj.As, c int64, n *gc.Node) {
-	var n1 gc.Node
-	gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
-	var n2 gc.Node
-	gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
-	gmove(&n1, &n2)
-	gins(as, &n2, n)
-	gc.Regfree(&n2)
-}
-
-func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
-	if t.IsInteger() && n1.Op == gc.OLITERAL && n1.Int64() == 0 && n2.Op != gc.OLITERAL {
-		op = gc.Brrev(op)
-		n1, n2 = n2, n1
-	}
-	var r1, r2, g1, g2 gc.Node
-	gc.Regalloc(&r1, t, n1)
-	gc.Regalloc(&g1, n1.Type, &r1)
-	gc.Cgen(n1, &g1)
-	gmove(&g1, &r1)
-	if t.IsInteger() && n2.Op == gc.OLITERAL && n2.Int64() == 0 {
-		gins(arm.ACMP, &r1, n2)
-	} else {
-		gc.Regalloc(&r2, t, n2)
-		gc.Regalloc(&g2, n1.Type, &r2)
-		gc.Cgen(n2, &g2)
-		gmove(&g2, &r2)
-		gins(optoas(gc.OCMP, t), &r1, &r2)
-		gc.Regfree(&g2)
-		gc.Regfree(&r2)
-	}
-	gc.Regfree(&g1)
-	gc.Regfree(&r1)
-	return gc.Gbranch(optoas(op, t), nil, likely)
-}
-
-// addr += index*width if possible.
-func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
-	switch width {
-	case 2:
-		gshift(arm.AADD, index, arm.SHIFT_LL, 1, addr)
-		return true
-	case 4:
-		gshift(arm.AADD, index, arm.SHIFT_LL, 2, addr)
-		return true
-	case 8:
-		gshift(arm.AADD, index, arm.SHIFT_LL, 3, addr)
-		return true
-	}
-	return false
-}
-
-// res = runtime.getg()
-func getg(res *gc.Node) {
-	var n1 gc.Node
-	gc.Nodreg(&n1, res.Type, arm.REGG)
-	gmove(&n1, res)
-}
diff --git a/src/cmd/compile/internal/arm/gsubr.go b/src/cmd/compile/internal/arm/gsubr.go
index 1a7ebbc..2ef4760 100644
--- a/src/cmd/compile/internal/arm/gsubr.go
+++ b/src/cmd/compile/internal/arm/gsubr.go
@@ -43,587 +43,6 @@
 }
 
 /*
- * return constant i node.
- * overwritten by next call, but useful in calls to gins.
- */
-
-var ncon_n gc.Node
-
-func ncon(i uint32) *gc.Node {
-	if ncon_n.Type == nil {
-		gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
-	}
-	ncon_n.SetInt(int64(i))
-	return &ncon_n
-}
-
-var sclean [10]gc.Node
-
-var nsclean int
-
-/*
- * n is a 64-bit value.  fill in lo and hi to refer to its 32-bit halves.
- */
-func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
-	if !gc.Is64(n.Type) {
-		gc.Fatalf("split64 %v", n.Type)
-	}
-
-	if nsclean >= len(sclean) {
-		gc.Fatalf("split64 clean")
-	}
-	sclean[nsclean].Op = gc.OEMPTY
-	nsclean++
-	switch n.Op {
-	default:
-		switch n.Op {
-		default:
-			var n1 gc.Node
-			if !dotaddable(n, &n1) {
-				gc.Igen(n, &n1, nil)
-				sclean[nsclean-1] = n1
-			}
-
-			n = &n1
-
-		case gc.ONAME, gc.OINDREG:
-			// nothing
-		}
-
-		*lo = *n
-		*hi = *n
-		lo.Type = gc.Types[gc.TUINT32]
-		if n.Type.Etype == gc.TINT64 {
-			hi.Type = gc.Types[gc.TINT32]
-		} else {
-			hi.Type = gc.Types[gc.TUINT32]
-		}
-		hi.Xoffset += 4
-
-	case gc.OLITERAL:
-		var n1 gc.Node
-		n.Convconst(&n1, n.Type)
-		i := n1.Int64()
-		gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
-		i >>= 32
-		if n.Type.Etype == gc.TINT64 {
-			gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
-		} else {
-			gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
-		}
-	}
-}
-
-func splitclean() {
-	if nsclean <= 0 {
-		gc.Fatalf("splitclean")
-	}
-	nsclean--
-	if sclean[nsclean].Op != gc.OEMPTY {
-		gc.Regfree(&sclean[nsclean])
-	}
-}
-
-func gmove(f *gc.Node, t *gc.Node) {
-	if gc.Debug['M'] != 0 {
-		fmt.Printf("gmove %v -> %v\n", f, t)
-	}
-
-	ft := gc.Simsimtype(f.Type)
-	tt := gc.Simsimtype(t.Type)
-	cvt := t.Type
-
-	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
-		gc.Complexmove(f, t)
-		return
-	}
-
-	// cannot have two memory operands;
-	// except 64-bit, which always copies via registers anyway.
-	var a obj.As
-	var r1 gc.Node
-	if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
-		goto hard
-	}
-
-	// convert constant to desired type
-	if f.Op == gc.OLITERAL {
-		var con gc.Node
-		switch tt {
-		default:
-			f.Convconst(&con, t.Type)
-
-		case gc.TINT16,
-			gc.TINT8:
-			var con gc.Node
-			f.Convconst(&con, gc.Types[gc.TINT32])
-			var r1 gc.Node
-			gc.Regalloc(&r1, con.Type, t)
-			gins(arm.AMOVW, &con, &r1)
-			gmove(&r1, t)
-			gc.Regfree(&r1)
-			return
-
-		case gc.TUINT16,
-			gc.TUINT8:
-			var con gc.Node
-			f.Convconst(&con, gc.Types[gc.TUINT32])
-			var r1 gc.Node
-			gc.Regalloc(&r1, con.Type, t)
-			gins(arm.AMOVW, &con, &r1)
-			gmove(&r1, t)
-			gc.Regfree(&r1)
-			return
-		}
-
-		f = &con
-		ft = gc.Simsimtype(con.Type)
-
-		// constants can't move directly to memory
-		if gc.Ismem(t) && !gc.Is64(t.Type) {
-			goto hard
-		}
-	}
-
-	// value -> value copy, only one memory operand.
-	// figure out the instruction to use.
-	// break out of switch for one-instruction gins.
-	// goto rdst for "destination must be register".
-	// goto hard for "convert to cvt type first".
-	// otherwise handle and return.
-
-	switch uint32(ft)<<16 | uint32(tt) {
-	default:
-		// should not happen
-		gc.Fatalf("gmove %v -> %v", f, t)
-		return
-
-		/*
-		 * integer copy and truncate
-		 */
-	case gc.TINT8<<16 | gc.TINT8: // same size
-		if !gc.Ismem(f) {
-			a = arm.AMOVB
-			break
-		}
-		fallthrough
-
-	case gc.TUINT8<<16 | gc.TINT8,
-		gc.TINT16<<16 | gc.TINT8, // truncate
-		gc.TUINT16<<16 | gc.TINT8,
-		gc.TINT32<<16 | gc.TINT8,
-		gc.TUINT32<<16 | gc.TINT8:
-		a = arm.AMOVBS
-
-	case gc.TUINT8<<16 | gc.TUINT8:
-		if !gc.Ismem(f) {
-			a = arm.AMOVB
-			break
-		}
-		fallthrough
-
-	case gc.TINT8<<16 | gc.TUINT8,
-		gc.TINT16<<16 | gc.TUINT8,
-		gc.TUINT16<<16 | gc.TUINT8,
-		gc.TINT32<<16 | gc.TUINT8,
-		gc.TUINT32<<16 | gc.TUINT8:
-		a = arm.AMOVBU
-
-	case gc.TINT64<<16 | gc.TINT8, // truncate low word
-		gc.TUINT64<<16 | gc.TINT8:
-		a = arm.AMOVBS
-
-		goto trunc64
-
-	case gc.TINT64<<16 | gc.TUINT8,
-		gc.TUINT64<<16 | gc.TUINT8:
-		a = arm.AMOVBU
-		goto trunc64
-
-	case gc.TINT16<<16 | gc.TINT16: // same size
-		if !gc.Ismem(f) {
-			a = arm.AMOVH
-			break
-		}
-		fallthrough
-
-	case gc.TUINT16<<16 | gc.TINT16,
-		gc.TINT32<<16 | gc.TINT16, // truncate
-		gc.TUINT32<<16 | gc.TINT16:
-		a = arm.AMOVHS
-
-	case gc.TUINT16<<16 | gc.TUINT16:
-		if !gc.Ismem(f) {
-			a = arm.AMOVH
-			break
-		}
-		fallthrough
-
-	case gc.TINT16<<16 | gc.TUINT16,
-		gc.TINT32<<16 | gc.TUINT16,
-		gc.TUINT32<<16 | gc.TUINT16:
-		a = arm.AMOVHU
-
-	case gc.TINT64<<16 | gc.TINT16, // truncate low word
-		gc.TUINT64<<16 | gc.TINT16:
-		a = arm.AMOVHS
-
-		goto trunc64
-
-	case gc.TINT64<<16 | gc.TUINT16,
-		gc.TUINT64<<16 | gc.TUINT16:
-		a = arm.AMOVHU
-		goto trunc64
-
-	case gc.TINT32<<16 | gc.TINT32, // same size
-		gc.TINT32<<16 | gc.TUINT32,
-		gc.TUINT32<<16 | gc.TINT32,
-		gc.TUINT32<<16 | gc.TUINT32:
-		a = arm.AMOVW
-
-	case gc.TINT64<<16 | gc.TINT32, // truncate
-		gc.TUINT64<<16 | gc.TINT32,
-		gc.TINT64<<16 | gc.TUINT32,
-		gc.TUINT64<<16 | gc.TUINT32:
-		var flo gc.Node
-		var fhi gc.Node
-		split64(f, &flo, &fhi)
-
-		var r1 gc.Node
-		gc.Regalloc(&r1, t.Type, nil)
-		gins(arm.AMOVW, &flo, &r1)
-		gins(arm.AMOVW, &r1, t)
-		gc.Regfree(&r1)
-		splitclean()
-		return
-
-	case gc.TINT64<<16 | gc.TINT64, // same size
-		gc.TINT64<<16 | gc.TUINT64,
-		gc.TUINT64<<16 | gc.TINT64,
-		gc.TUINT64<<16 | gc.TUINT64:
-		var fhi gc.Node
-		var flo gc.Node
-		split64(f, &flo, &fhi)
-
-		var tlo gc.Node
-		var thi gc.Node
-		split64(t, &tlo, &thi)
-		var r1 gc.Node
-		gc.Regalloc(&r1, flo.Type, nil)
-		var r2 gc.Node
-		gc.Regalloc(&r2, fhi.Type, nil)
-		gins(arm.AMOVW, &flo, &r1)
-		gins(arm.AMOVW, &fhi, &r2)
-		gins(arm.AMOVW, &r1, &tlo)
-		gins(arm.AMOVW, &r2, &thi)
-		gc.Regfree(&r1)
-		gc.Regfree(&r2)
-		splitclean()
-		splitclean()
-		return
-
-		/*
-		 * integer up-conversions
-		 */
-	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
-		gc.TINT8<<16 | gc.TUINT16,
-		gc.TINT8<<16 | gc.TINT32,
-		gc.TINT8<<16 | gc.TUINT32:
-		a = arm.AMOVBS
-
-		goto rdst
-
-	case gc.TINT8<<16 | gc.TINT64, // convert via int32
-		gc.TINT8<<16 | gc.TUINT64:
-		cvt = gc.Types[gc.TINT32]
-
-		goto hard
-
-	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
-		gc.TUINT8<<16 | gc.TUINT16,
-		gc.TUINT8<<16 | gc.TINT32,
-		gc.TUINT8<<16 | gc.TUINT32:
-		a = arm.AMOVBU
-
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TINT64, // convert via uint32
-		gc.TUINT8<<16 | gc.TUINT64:
-		cvt = gc.Types[gc.TUINT32]
-
-		goto hard
-
-	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
-		gc.TINT16<<16 | gc.TUINT32:
-		a = arm.AMOVHS
-
-		goto rdst
-
-	case gc.TINT16<<16 | gc.TINT64, // convert via int32
-		gc.TINT16<<16 | gc.TUINT64:
-		cvt = gc.Types[gc.TINT32]
-
-		goto hard
-
-	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
-		gc.TUINT16<<16 | gc.TUINT32:
-		a = arm.AMOVHU
-
-		goto rdst
-
-	case gc.TUINT16<<16 | gc.TINT64, // convert via uint32
-		gc.TUINT16<<16 | gc.TUINT64:
-		cvt = gc.Types[gc.TUINT32]
-
-		goto hard
-
-	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
-		gc.TINT32<<16 | gc.TUINT64:
-		var tlo gc.Node
-		var thi gc.Node
-		split64(t, &tlo, &thi)
-
-		var r1 gc.Node
-		gc.Regalloc(&r1, tlo.Type, nil)
-		var r2 gc.Node
-		gc.Regalloc(&r2, thi.Type, nil)
-		gmove(f, &r1)
-		p1 := gins(arm.AMOVW, &r1, &r2)
-		p1.From.Type = obj.TYPE_SHIFT
-		p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Reg)&15 // r1->31
-		p1.From.Reg = 0
-
-		//print("gmove: %v\n", p1);
-		gins(arm.AMOVW, &r1, &tlo)
-
-		gins(arm.AMOVW, &r2, &thi)
-		gc.Regfree(&r1)
-		gc.Regfree(&r2)
-		splitclean()
-		return
-
-	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
-		gc.TUINT32<<16 | gc.TUINT64:
-		var thi gc.Node
-		var tlo gc.Node
-		split64(t, &tlo, &thi)
-
-		gmove(f, &tlo)
-		var r1 gc.Node
-		gc.Regalloc(&r1, thi.Type, nil)
-		gins(arm.AMOVW, ncon(0), &r1)
-		gins(arm.AMOVW, &r1, &thi)
-		gc.Regfree(&r1)
-		splitclean()
-		return
-
-		//	case CASE(TFLOAT64, TUINT64):
-	/*
-	* float to integer
-	 */
-	case gc.TFLOAT32<<16 | gc.TINT8,
-		gc.TFLOAT32<<16 | gc.TUINT8,
-		gc.TFLOAT32<<16 | gc.TINT16,
-		gc.TFLOAT32<<16 | gc.TUINT16,
-		gc.TFLOAT32<<16 | gc.TINT32,
-		gc.TFLOAT32<<16 | gc.TUINT32,
-
-		//	case CASE(TFLOAT32, TUINT64):
-
-		gc.TFLOAT64<<16 | gc.TINT8,
-		gc.TFLOAT64<<16 | gc.TUINT8,
-		gc.TFLOAT64<<16 | gc.TINT16,
-		gc.TFLOAT64<<16 | gc.TUINT16,
-		gc.TFLOAT64<<16 | gc.TINT32,
-		gc.TFLOAT64<<16 | gc.TUINT32:
-		fa := arm.AMOVF
-
-		a := arm.AMOVFW
-		if ft == gc.TFLOAT64 {
-			fa = arm.AMOVD
-			a = arm.AMOVDW
-		}
-
-		ta := arm.AMOVW
-		switch tt {
-		case gc.TINT8:
-			ta = arm.AMOVBS
-
-		case gc.TUINT8:
-			ta = arm.AMOVBU
-
-		case gc.TINT16:
-			ta = arm.AMOVHS
-
-		case gc.TUINT16:
-			ta = arm.AMOVHU
-		}
-
-		var r1 gc.Node
-		gc.Regalloc(&r1, gc.Types[ft], f)
-		var r2 gc.Node
-		gc.Regalloc(&r2, gc.Types[tt], t)
-		gins(fa, f, &r1)        // load to fpu
-		p1 := gins(a, &r1, &r1) // convert to w
-		switch tt {
-		case gc.TUINT8,
-			gc.TUINT16,
-			gc.TUINT32:
-			p1.Scond |= arm.C_UBIT
-		}
-
-		gins(arm.AMOVW, &r1, &r2) // copy to cpu
-		gins(ta, &r2, t)          // store
-		gc.Regfree(&r1)
-		gc.Regfree(&r2)
-		return
-
-		/*
-		 * integer to float
-		 */
-	case gc.TINT8<<16 | gc.TFLOAT32,
-		gc.TUINT8<<16 | gc.TFLOAT32,
-		gc.TINT16<<16 | gc.TFLOAT32,
-		gc.TUINT16<<16 | gc.TFLOAT32,
-		gc.TINT32<<16 | gc.TFLOAT32,
-		gc.TUINT32<<16 | gc.TFLOAT32,
-		gc.TINT8<<16 | gc.TFLOAT64,
-		gc.TUINT8<<16 | gc.TFLOAT64,
-		gc.TINT16<<16 | gc.TFLOAT64,
-		gc.TUINT16<<16 | gc.TFLOAT64,
-		gc.TINT32<<16 | gc.TFLOAT64,
-		gc.TUINT32<<16 | gc.TFLOAT64:
-		fa := arm.AMOVW
-
-		switch ft {
-		case gc.TINT8:
-			fa = arm.AMOVBS
-
-		case gc.TUINT8:
-			fa = arm.AMOVBU
-
-		case gc.TINT16:
-			fa = arm.AMOVHS
-
-		case gc.TUINT16:
-			fa = arm.AMOVHU
-		}
-
-		a := arm.AMOVWF
-		ta := arm.AMOVF
-		if tt == gc.TFLOAT64 {
-			a = arm.AMOVWD
-			ta = arm.AMOVD
-		}
-
-		var r1 gc.Node
-		gc.Regalloc(&r1, gc.Types[ft], f)
-		var r2 gc.Node
-		gc.Regalloc(&r2, gc.Types[tt], t)
-		gins(fa, f, &r1)          // load to cpu
-		gins(arm.AMOVW, &r1, &r2) // copy to fpu
-		p1 := gins(a, &r2, &r2)   // convert
-		switch ft {
-		case gc.TUINT8,
-			gc.TUINT16,
-			gc.TUINT32:
-			p1.Scond |= arm.C_UBIT
-		}
-
-		gins(ta, &r2, t) // store
-		gc.Regfree(&r1)
-		gc.Regfree(&r2)
-		return
-
-	case gc.TUINT64<<16 | gc.TFLOAT32,
-		gc.TUINT64<<16 | gc.TFLOAT64:
-		gc.Fatalf("gmove UINT64, TFLOAT not implemented")
-		return
-
-		/*
-		 * float to float
-		 */
-	case gc.TFLOAT32<<16 | gc.TFLOAT32:
-		a = arm.AMOVF
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT64:
-		a = arm.AMOVD
-
-	case gc.TFLOAT32<<16 | gc.TFLOAT64:
-		var r1 gc.Node
-		gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t)
-		gins(arm.AMOVF, f, &r1)
-		gins(arm.AMOVFD, &r1, &r1)
-		gins(arm.AMOVD, &r1, t)
-		gc.Regfree(&r1)
-		return
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT32:
-		var r1 gc.Node
-		gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t)
-		gins(arm.AMOVD, f, &r1)
-		gins(arm.AMOVDF, &r1, &r1)
-		gins(arm.AMOVF, &r1, t)
-		gc.Regfree(&r1)
-		return
-	}
-
-	gins(a, f, t)
-	return
-
-	// TODO(kaib): we almost always require a register dest anyway, this can probably be
-	// removed.
-	// requires register destination
-rdst:
-	{
-		gc.Regalloc(&r1, t.Type, t)
-
-		gins(a, f, &r1)
-		gmove(&r1, t)
-		gc.Regfree(&r1)
-		return
-	}
-
-	// requires register intermediate
-hard:
-	gc.Regalloc(&r1, cvt, t)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-
-	// truncate 64 bit integer
-trunc64:
-	var fhi gc.Node
-	var flo gc.Node
-	split64(f, &flo, &fhi)
-
-	gc.Regalloc(&r1, t.Type, nil)
-	gins(a, &flo, &r1)
-	gins(a, &r1, t)
-	gc.Regfree(&r1)
-	splitclean()
-	return
-}
-
-func samaddr(f *gc.Node, t *gc.Node) bool {
-	if f.Op != t.Op {
-		return false
-	}
-
-	switch f.Op {
-	case gc.OREGISTER:
-		if f.Reg != t.Reg {
-			break
-		}
-		return true
-	}
-
-	return false
-}
-
-/*
  * generate one instruction:
  *	as f, t
  */
@@ -719,507 +138,3 @@
 		p.Reg = a.Reg
 	}
 }
-
-/* generate a constant shift
- * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
- */
-func gshift(as obj.As, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
-	if sval <= 0 || sval > 32 {
-		gc.Fatalf("bad shift value: %d", sval)
-	}
-
-	sval = sval & 0x1f
-
-	p := gins(as, nil, rhs)
-	p.From.Type = obj.TYPE_SHIFT
-	p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Reg)&15
-	return p
-}
-
-/* generate a register shift
- */
-func gregshift(as obj.As, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
-	p := gins(as, nil, rhs)
-	p.From.Type = obj.TYPE_SHIFT
-	p.From.Offset = int64(stype) | (int64(reg.Reg)&15)<<8 | 1<<4 | int64(lhs.Reg)&15
-	return p
-}
-
-/*
- * return Axxx for Oxxx on type t.
- */
-func optoas(op gc.Op, t *gc.Type) obj.As {
-	if t == nil {
-		gc.Fatalf("optoas: t is nil")
-	}
-
-	// avoid constant conversions in switches below
-	const (
-		OMINUS_ = uint32(gc.OMINUS) << 16
-		OLSH_   = uint32(gc.OLSH) << 16
-		ORSH_   = uint32(gc.ORSH) << 16
-		OADD_   = uint32(gc.OADD) << 16
-		OSUB_   = uint32(gc.OSUB) << 16
-		OMUL_   = uint32(gc.OMUL) << 16
-		ODIV_   = uint32(gc.ODIV) << 16
-		OMOD_   = uint32(gc.OMOD) << 16
-		OOR_    = uint32(gc.OOR) << 16
-		OAND_   = uint32(gc.OAND) << 16
-		OXOR_   = uint32(gc.OXOR) << 16
-		OEQ_    = uint32(gc.OEQ) << 16
-		ONE_    = uint32(gc.ONE) << 16
-		OLT_    = uint32(gc.OLT) << 16
-		OLE_    = uint32(gc.OLE) << 16
-		OGE_    = uint32(gc.OGE) << 16
-		OGT_    = uint32(gc.OGT) << 16
-		OCMP_   = uint32(gc.OCMP) << 16
-		OPS_    = uint32(gc.OPS) << 16
-		OAS_    = uint32(gc.OAS) << 16
-		OSQRT_  = uint32(gc.OSQRT) << 16
-	)
-
-	a := obj.AXXX
-	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
-	default:
-		gc.Fatalf("optoas: no entry %v-%v etype %v simtype %v", op, t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]])
-
-		/*	case CASE(OADDR, TPTR32):
-				a = ALEAL;
-				break;
-
-			case CASE(OADDR, TPTR64):
-				a = ALEAQ;
-				break;
-		*/
-	// TODO(kaib): make sure the conditional branches work on all edge cases
-	case OEQ_ | gc.TBOOL,
-		OEQ_ | gc.TINT8,
-		OEQ_ | gc.TUINT8,
-		OEQ_ | gc.TINT16,
-		OEQ_ | gc.TUINT16,
-		OEQ_ | gc.TINT32,
-		OEQ_ | gc.TUINT32,
-		OEQ_ | gc.TINT64,
-		OEQ_ | gc.TUINT64,
-		OEQ_ | gc.TPTR32,
-		OEQ_ | gc.TPTR64,
-		OEQ_ | gc.TFLOAT32,
-		OEQ_ | gc.TFLOAT64:
-		a = arm.ABEQ
-
-	case ONE_ | gc.TBOOL,
-		ONE_ | gc.TINT8,
-		ONE_ | gc.TUINT8,
-		ONE_ | gc.TINT16,
-		ONE_ | gc.TUINT16,
-		ONE_ | gc.TINT32,
-		ONE_ | gc.TUINT32,
-		ONE_ | gc.TINT64,
-		ONE_ | gc.TUINT64,
-		ONE_ | gc.TPTR32,
-		ONE_ | gc.TPTR64,
-		ONE_ | gc.TFLOAT32,
-		ONE_ | gc.TFLOAT64:
-		a = arm.ABNE
-
-	case OLT_ | gc.TINT8,
-		OLT_ | gc.TINT16,
-		OLT_ | gc.TINT32,
-		OLT_ | gc.TINT64,
-		OLT_ | gc.TFLOAT32,
-		OLT_ | gc.TFLOAT64:
-		a = arm.ABLT
-
-	case OLT_ | gc.TUINT8,
-		OLT_ | gc.TUINT16,
-		OLT_ | gc.TUINT32,
-		OLT_ | gc.TUINT64:
-		a = arm.ABLO
-
-	case OLE_ | gc.TINT8,
-		OLE_ | gc.TINT16,
-		OLE_ | gc.TINT32,
-		OLE_ | gc.TINT64,
-		OLE_ | gc.TFLOAT32,
-		OLE_ | gc.TFLOAT64:
-		a = arm.ABLE
-
-	case OLE_ | gc.TUINT8,
-		OLE_ | gc.TUINT16,
-		OLE_ | gc.TUINT32,
-		OLE_ | gc.TUINT64:
-		a = arm.ABLS
-
-	case OGT_ | gc.TINT8,
-		OGT_ | gc.TINT16,
-		OGT_ | gc.TINT32,
-		OGT_ | gc.TINT64,
-		OGT_ | gc.TFLOAT32,
-		OGT_ | gc.TFLOAT64:
-		a = arm.ABGT
-
-	case OGT_ | gc.TUINT8,
-		OGT_ | gc.TUINT16,
-		OGT_ | gc.TUINT32,
-		OGT_ | gc.TUINT64:
-		a = arm.ABHI
-
-	case OGE_ | gc.TINT8,
-		OGE_ | gc.TINT16,
-		OGE_ | gc.TINT32,
-		OGE_ | gc.TINT64,
-		OGE_ | gc.TFLOAT32,
-		OGE_ | gc.TFLOAT64:
-		a = arm.ABGE
-
-	case OGE_ | gc.TUINT8,
-		OGE_ | gc.TUINT16,
-		OGE_ | gc.TUINT32,
-		OGE_ | gc.TUINT64:
-		a = arm.ABHS
-
-	case OCMP_ | gc.TBOOL,
-		OCMP_ | gc.TINT8,
-		OCMP_ | gc.TUINT8,
-		OCMP_ | gc.TINT16,
-		OCMP_ | gc.TUINT16,
-		OCMP_ | gc.TINT32,
-		OCMP_ | gc.TUINT32,
-		OCMP_ | gc.TPTR32:
-		a = arm.ACMP
-
-	case OCMP_ | gc.TFLOAT32:
-		a = arm.ACMPF
-
-	case OCMP_ | gc.TFLOAT64:
-		a = arm.ACMPD
-
-	case OPS_ | gc.TFLOAT32,
-		OPS_ | gc.TFLOAT64:
-		a = arm.ABVS
-
-	case OAS_ | gc.TBOOL:
-		a = arm.AMOVB
-
-	case OAS_ | gc.TINT8:
-		a = arm.AMOVBS
-
-	case OAS_ | gc.TUINT8:
-		a = arm.AMOVBU
-
-	case OAS_ | gc.TINT16:
-		a = arm.AMOVHS
-
-	case OAS_ | gc.TUINT16:
-		a = arm.AMOVHU
-
-	case OAS_ | gc.TINT32,
-		OAS_ | gc.TUINT32,
-		OAS_ | gc.TPTR32:
-		a = arm.AMOVW
-
-	case OAS_ | gc.TFLOAT32:
-		a = arm.AMOVF
-
-	case OAS_ | gc.TFLOAT64:
-		a = arm.AMOVD
-
-	case OADD_ | gc.TINT8,
-		OADD_ | gc.TUINT8,
-		OADD_ | gc.TINT16,
-		OADD_ | gc.TUINT16,
-		OADD_ | gc.TINT32,
-		OADD_ | gc.TUINT32,
-		OADD_ | gc.TPTR32:
-		a = arm.AADD
-
-	case OADD_ | gc.TFLOAT32:
-		a = arm.AADDF
-
-	case OADD_ | gc.TFLOAT64:
-		a = arm.AADDD
-
-	case OSUB_ | gc.TINT8,
-		OSUB_ | gc.TUINT8,
-		OSUB_ | gc.TINT16,
-		OSUB_ | gc.TUINT16,
-		OSUB_ | gc.TINT32,
-		OSUB_ | gc.TUINT32,
-		OSUB_ | gc.TPTR32:
-		a = arm.ASUB
-
-	case OSUB_ | gc.TFLOAT32:
-		a = arm.ASUBF
-
-	case OSUB_ | gc.TFLOAT64:
-		a = arm.ASUBD
-
-	case OMINUS_ | gc.TINT8,
-		OMINUS_ | gc.TUINT8,
-		OMINUS_ | gc.TINT16,
-		OMINUS_ | gc.TUINT16,
-		OMINUS_ | gc.TINT32,
-		OMINUS_ | gc.TUINT32,
-		OMINUS_ | gc.TPTR32:
-		a = arm.ARSB
-
-	case OAND_ | gc.TINT8,
-		OAND_ | gc.TUINT8,
-		OAND_ | gc.TINT16,
-		OAND_ | gc.TUINT16,
-		OAND_ | gc.TINT32,
-		OAND_ | gc.TUINT32,
-		OAND_ | gc.TPTR32:
-		a = arm.AAND
-
-	case OOR_ | gc.TINT8,
-		OOR_ | gc.TUINT8,
-		OOR_ | gc.TINT16,
-		OOR_ | gc.TUINT16,
-		OOR_ | gc.TINT32,
-		OOR_ | gc.TUINT32,
-		OOR_ | gc.TPTR32:
-		a = arm.AORR
-
-	case OXOR_ | gc.TINT8,
-		OXOR_ | gc.TUINT8,
-		OXOR_ | gc.TINT16,
-		OXOR_ | gc.TUINT16,
-		OXOR_ | gc.TINT32,
-		OXOR_ | gc.TUINT32,
-		OXOR_ | gc.TPTR32:
-		a = arm.AEOR
-
-	case OLSH_ | gc.TINT8,
-		OLSH_ | gc.TUINT8,
-		OLSH_ | gc.TINT16,
-		OLSH_ | gc.TUINT16,
-		OLSH_ | gc.TINT32,
-		OLSH_ | gc.TUINT32,
-		OLSH_ | gc.TPTR32:
-		a = arm.ASLL
-
-	case ORSH_ | gc.TUINT8,
-		ORSH_ | gc.TUINT16,
-		ORSH_ | gc.TUINT32,
-		ORSH_ | gc.TPTR32:
-		a = arm.ASRL
-
-	case ORSH_ | gc.TINT8,
-		ORSH_ | gc.TINT16,
-		ORSH_ | gc.TINT32:
-		a = arm.ASRA
-
-	case OMUL_ | gc.TUINT8,
-		OMUL_ | gc.TUINT16,
-		OMUL_ | gc.TUINT32,
-		OMUL_ | gc.TPTR32:
-		a = arm.AMULU
-
-	case OMUL_ | gc.TINT8,
-		OMUL_ | gc.TINT16,
-		OMUL_ | gc.TINT32:
-		a = arm.AMUL
-
-	case OMUL_ | gc.TFLOAT32:
-		a = arm.AMULF
-
-	case OMUL_ | gc.TFLOAT64:
-		a = arm.AMULD
-
-	case ODIV_ | gc.TUINT8,
-		ODIV_ | gc.TUINT16,
-		ODIV_ | gc.TUINT32,
-		ODIV_ | gc.TPTR32:
-		a = arm.ADIVU
-
-	case ODIV_ | gc.TINT8,
-		ODIV_ | gc.TINT16,
-		ODIV_ | gc.TINT32:
-		a = arm.ADIV
-
-	case OMOD_ | gc.TUINT8,
-		OMOD_ | gc.TUINT16,
-		OMOD_ | gc.TUINT32,
-		OMOD_ | gc.TPTR32:
-		a = arm.AMODU
-
-	case OMOD_ | gc.TINT8,
-		OMOD_ | gc.TINT16,
-		OMOD_ | gc.TINT32:
-		a = arm.AMOD
-
-		//	case CASE(OEXTEND, TINT16):
-	//		a = ACWD;
-	//		break;
-
-	//	case CASE(OEXTEND, TINT32):
-	//		a = ACDQ;
-	//		break;
-
-	//	case CASE(OEXTEND, TINT64):
-	//		a = ACQO;
-	//		break;
-
-	case ODIV_ | gc.TFLOAT32:
-		a = arm.ADIVF
-
-	case ODIV_ | gc.TFLOAT64:
-		a = arm.ADIVD
-
-	case OSQRT_ | gc.TFLOAT64:
-		a = arm.ASQRTD
-	}
-
-	return a
-}
-
-const (
-	ODynam = 1 << 0
-	OPtrto = 1 << 1
-)
-
-var clean [20]gc.Node
-
-var cleani int = 0
-
-func sudoclean() {
-	if clean[cleani-1].Op != gc.OEMPTY {
-		gc.Regfree(&clean[cleani-1])
-	}
-	if clean[cleani-2].Op != gc.OEMPTY {
-		gc.Regfree(&clean[cleani-2])
-	}
-	cleani -= 2
-}
-
-func dotaddable(n *gc.Node, n1 *gc.Node) bool {
-	if n.Op != gc.ODOT {
-		return false
-	}
-
-	var oary [10]int64
-	var nn *gc.Node
-	o := gc.Dotoffset(n, oary[:], &nn)
-	if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 {
-		*n1 = *nn
-		n1.Type = n.Type
-		n1.Xoffset += oary[0]
-		return true
-	}
-
-	return false
-}
-
-/*
- * generate code to compute address of n,
- * a reference to a (perhaps nested) field inside
- * an array or struct.
- * return 0 on failure, 1 on success.
- * on success, leaves usable address in a.
- *
- * caller is responsible for calling sudoclean
- * after successful sudoaddable,
- * to release the register used for a.
- */
-func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
-	if n.Type == nil {
-		return false
-	}
-
-	*a = obj.Addr{}
-
-	switch n.Op {
-	case gc.OLITERAL:
-		if !gc.Isconst(n, gc.CTINT) {
-			break
-		}
-		v := n.Int64()
-		if v >= 32000 || v <= -32000 {
-			break
-		}
-		switch as {
-		default:
-			return false
-
-		case arm.AADD,
-			arm.ASUB,
-			arm.AAND,
-			arm.AORR,
-			arm.AEOR,
-			arm.AMOVB,
-			arm.AMOVBS,
-			arm.AMOVBU,
-			arm.AMOVH,
-			arm.AMOVHS,
-			arm.AMOVHU,
-			arm.AMOVW:
-			break
-		}
-
-		cleani += 2
-		reg := &clean[cleani-1]
-		reg1 := &clean[cleani-2]
-		reg.Op = gc.OEMPTY
-		reg1.Op = gc.OEMPTY
-		gc.Naddr(a, n)
-		return true
-
-	case gc.ODOT,
-		gc.ODOTPTR:
-		cleani += 2
-		reg := &clean[cleani-1]
-		reg1 := &clean[cleani-2]
-		reg.Op = gc.OEMPTY
-		reg1.Op = gc.OEMPTY
-		var nn *gc.Node
-		var oary [10]int64
-		o := gc.Dotoffset(n, oary[:], &nn)
-		if nn == nil {
-			sudoclean()
-			return false
-		}
-
-		if nn.Addable && o == 1 && oary[0] >= 0 {
-			// directly addressable set of DOTs
-			n1 := *nn
-
-			n1.Type = n.Type
-			n1.Xoffset += oary[0]
-			gc.Naddr(a, &n1)
-			return true
-		}
-
-		gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
-		n1 := *reg
-		n1.Op = gc.OINDREG
-		if oary[0] >= 0 {
-			gc.Agen(nn, reg)
-			n1.Xoffset = oary[0]
-		} else {
-			gc.Cgen(nn, reg)
-			gc.Cgen_checknil(reg)
-			n1.Xoffset = -(oary[0] + 1)
-		}
-
-		for i := 1; i < o; i++ {
-			if oary[i] >= 0 {
-				gc.Fatalf("can't happen")
-			}
-			gins(arm.AMOVW, &n1, reg)
-			gc.Cgen_checknil(reg)
-			n1.Xoffset = -(oary[i] + 1)
-		}
-
-		a.Type = obj.TYPE_NONE
-		a.Name = obj.NAME_NONE
-		n1.Type = n.Type
-		gc.Naddr(a, &n1)
-		return true
-
-	case gc.OINDEX:
-		return false
-	}
-
-	return false
-}
diff --git a/src/cmd/compile/internal/arm/peep.go b/src/cmd/compile/internal/arm/peep.go
deleted file mode 100644
index 12678df..0000000
--- a/src/cmd/compile/internal/arm/peep.go
+++ /dev/null
@@ -1,1734 +0,0 @@
-// Inferno utils/5c/peep.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5c/peep.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/arm"
-	"fmt"
-)
-
-var gactive uint32
-
-// UNUSED
-func peep(firstp *obj.Prog) {
-	g := gc.Flowstart(firstp, nil)
-	if g == nil {
-		return
-	}
-	gactive = 0
-
-	var p *obj.Prog
-	var t int
-loop1:
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		gc.Dumpit("loop1", g.Start, 0)
-	}
-
-	t = 0
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		/*
-		 * elide shift into TYPE_SHIFT operand of subsequent instruction
-		 */
-		//			if(shiftprop(r)) {
-		//				excise(r);
-		//				t++;
-		//				break;
-		//			}
-		case arm.ASLL,
-			arm.ASRL,
-			arm.ASRA:
-			break
-
-		case arm.AMOVB,
-			arm.AMOVH,
-			arm.AMOVW,
-			arm.AMOVF,
-			arm.AMOVD:
-			if regtyp(&p.From) {
-				if p.From.Type == p.To.Type && isfloatreg(&p.From) == isfloatreg(&p.To) {
-					if p.Scond == arm.C_SCOND_NONE {
-						if copyprop(g, r) {
-							excise(r)
-							t++
-							break
-						}
-
-						if subprop(r) && copyprop(g, r) {
-							excise(r)
-							t++
-							break
-						}
-					}
-				}
-			}
-
-		case arm.AMOVHS,
-			arm.AMOVHU,
-			arm.AMOVBS,
-			arm.AMOVBU:
-			if p.From.Type == obj.TYPE_REG {
-				if shortprop(r) {
-					t++
-				}
-			}
-		}
-	}
-
-	/*
-		if(p->scond == C_SCOND_NONE)
-		if(regtyp(&p->to))
-		if(isdconst(&p->from)) {
-			constprop(&p->from, &p->to, r->s1);
-		}
-		break;
-	*/
-	if t != 0 {
-		goto loop1
-	}
-
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		/*
-		 * EOR -1,x,y => MVN x,y
-		 */
-		case arm.AEOR:
-			if isdconst(&p.From) && p.From.Offset == -1 {
-				p.As = arm.AMVN
-				p.From.Type = obj.TYPE_REG
-				if p.Reg != 0 {
-					p.From.Reg = p.Reg
-				} else {
-					p.From.Reg = p.To.Reg
-				}
-				p.Reg = 0
-			}
-		}
-	}
-
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		case arm.AMOVW,
-			arm.AMOVB,
-			arm.AMOVBS,
-			arm.AMOVBU:
-			if p.From.Type == obj.TYPE_MEM && p.From.Offset == 0 {
-				xtramodes(g, r, &p.From)
-			} else if p.To.Type == obj.TYPE_MEM && p.To.Offset == 0 {
-				xtramodes(g, r, &p.To)
-			} else {
-				continue
-			}
-		}
-	}
-
-	//		case ACMP:
-	//			/*
-	//			 * elide CMP $0,x if calculation of x can set condition codes
-	//			 */
-	//			if(isdconst(&p->from) || p->from.offset != 0)
-	//				continue;
-	//			r2 = r->s1;
-	//			if(r2 == nil)
-	//				continue;
-	//			t = r2->prog->as;
-	//			switch(t) {
-	//			default:
-	//				continue;
-	//			case ABEQ:
-	//			case ABNE:
-	//			case ABMI:
-	//			case ABPL:
-	//				break;
-	//			case ABGE:
-	//				t = ABPL;
-	//				break;
-	//			case ABLT:
-	//				t = ABMI;
-	//				break;
-	//			case ABHI:
-	//				t = ABNE;
-	//				break;
-	//			case ABLS:
-	//				t = ABEQ;
-	//				break;
-	//			}
-	//			r1 = r;
-	//			do
-	//				r1 = uniqp(r1);
-	//			while (r1 != nil && r1->prog->as == ANOP);
-	//			if(r1 == nil)
-	//				continue;
-	//			p1 = r1->prog;
-	//			if(p1->to.type != TYPE_REG)
-	//				continue;
-	//			if(p1->to.reg != p->reg)
-	//			if(!(p1->as == AMOVW && p1->from.type == TYPE_REG && p1->from.reg == p->reg))
-	//				continue;
-	//
-	//			switch(p1->as) {
-	//			default:
-	//				continue;
-	//			case AMOVW:
-	//				if(p1->from.type != TYPE_REG)
-	//					continue;
-	//			case AAND:
-	//			case AEOR:
-	//			case AORR:
-	//			case ABIC:
-	//			case AMVN:
-	//			case ASUB:
-	//			case ARSB:
-	//			case AADD:
-	//			case AADC:
-	//			case ASBC:
-	//			case ARSC:
-	//				break;
-	//			}
-	//			p1->scond |= C_SBIT;
-	//			r2->prog->as = t;
-	//			excise(r);
-	//			continue;
-
-	//	predicate(g);
-
-	gc.Flowend(g)
-}
-
-func regtyp(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG && (arm.REG_R0 <= a.Reg && a.Reg <= arm.REG_R15 || arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15)
-}
-
-/*
- * the idea is to substitute
- * one register for another
- * from one MOV to another
- *	MOV	a, R0
- *	ADD	b, R0	/ no use of R1
- *	MOV	R0, R1
- * would be converted to
- *	MOV	a, R1
- *	ADD	b, R1
- *	MOV	R1, R0
- * hopefully, then the former or latter MOV
- * will be eliminated by copy propagation.
- */
-func subprop(r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	if !regtyp(v1) {
-		return false
-	}
-	v2 := &p.To
-	if !regtyp(v2) {
-		return false
-	}
-	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
-		if gc.Uniqs(r) == nil {
-			break
-		}
-		p = r.Prog
-		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
-			continue
-		}
-		if p.Info.Flags&gc.Call != 0 {
-			return false
-		}
-
-		// TODO(rsc): Whatever invalidated the info should have done this call.
-		proginfo(p)
-
-		if (p.Info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG {
-			p.Info.Flags |= gc.RegRead
-			p.Info.Flags &^= (gc.CanRegRead | gc.RightRead)
-			p.Reg = p.To.Reg
-		}
-
-		switch p.As {
-		case arm.AMULLU,
-			arm.AMULA,
-			arm.AMVN:
-			return false
-		}
-
-		if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
-			if p.To.Type == v1.Type {
-				if p.To.Reg == v1.Reg {
-					if p.Scond == arm.C_SCOND_NONE {
-						copysub(&p.To, v1, v2, true)
-						if gc.Debug['P'] != 0 {
-							fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
-							if p.From.Type == v2.Type {
-								fmt.Printf(" excise")
-							}
-							fmt.Printf("\n")
-						}
-
-						for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
-							p = r.Prog
-							copysub(&p.From, v1, v2, true)
-							copysub1(p, v1, v2, true)
-							copysub(&p.To, v1, v2, true)
-							if gc.Debug['P'] != 0 {
-								fmt.Printf("%v\n", r.Prog)
-							}
-						}
-
-						v1.Reg, v2.Reg = v2.Reg, v1.Reg
-						if gc.Debug['P'] != 0 {
-							fmt.Printf("%v last\n", r.Prog)
-						}
-						return true
-					}
-				}
-			}
-		}
-
-		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
-			break
-		}
-		if copysub(&p.From, v1, v2, false) || copysub1(p, v1, v2, false) || copysub(&p.To, v1, v2, false) {
-			break
-		}
-	}
-
-	return false
-}
-
-/*
- * The idea is to remove redundant copies.
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	use v2	return fail
- *	-----------------
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	set v2	return success
- */
-func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	v2 := &p.To
-	if copyas(v1, v2) {
-		return true
-	}
-	gactive++
-	return copy1(v1, v2, r0.S1, false)
-}
-
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f bool) bool {
-	if uint32(r.Active) == gactive {
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("act set; return 1\n")
-		}
-		return true
-	}
-
-	r.Active = int32(gactive)
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("copy %v->%v f=%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
-	}
-	for ; r != nil; r = r.S1 {
-		p := r.Prog
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("%v", p)
-		}
-		if !f && gc.Uniqp(r) == nil {
-			f = true
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; merge; f=%v", f)
-			}
-		}
-
-		switch t := copyu(p, v2, nil); t {
-		case 2: /* rar, can't split */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %vrar; return 0\n", gc.Ctxt.Dconv(v2))
-			}
-			return false
-
-		case 3: /* set */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %vset; return 1\n", gc.Ctxt.Dconv(v2))
-			}
-			return true
-
-		case 1, /* used, substitute */
-			4: /* use and set */
-			if f {
-				if gc.Debug['P'] == 0 {
-					return false
-				}
-				if t == 4 {
-					fmt.Printf("; %vused+set and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				} else {
-					fmt.Printf("; %vused and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				}
-				return false
-			}
-
-			if copyu(p, v2, v1) != 0 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; sub fail; return 0\n")
-				}
-				return false
-			}
-
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
-			}
-			if t == 4 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %vused+set; return 1\n", gc.Ctxt.Dconv(v2))
-				}
-				return true
-			}
-		}
-
-		if !f {
-			t := copyu(p, v1, nil)
-			if t == 2 || t == 3 || t == 4 {
-				f = true
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %vset and !f; f=%v", gc.Ctxt.Dconv(v1), f)
-				}
-			}
-		}
-
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("\n")
-		}
-		if r.S2 != nil {
-			if !copy1(v1, v2, r.S2, f) {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-// UNUSED
-/*
- * The idea is to remove redundant constants.
- *	$c1->v1
- *	($c1->v2 s/$c1/v1)*
- *	set v1  return
- * The v1->v2 should be eliminated by copy propagation.
- */
-func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) {
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("constprop %v->%v\n", gc.Ctxt.Dconv(c1), gc.Ctxt.Dconv(v1))
-	}
-	var p *obj.Prog
-	for ; r != nil; r = r.S1 {
-		p = r.Prog
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("%v", p)
-		}
-		if gc.Uniqp(r) == nil {
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; merge; return\n")
-			}
-			return
-		}
-
-		if p.As == arm.AMOVW && copyas(&p.From, c1) {
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(&p.From), gc.Ctxt.Dconv(v1))
-			}
-			p.From = *v1
-		} else if copyu(p, v1, nil) > 1 {
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %vset; return\n", gc.Ctxt.Dconv(v1))
-			}
-			return
-		}
-
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("\n")
-		}
-		if r.S2 != nil {
-			constprop(c1, v1, r.S2)
-		}
-	}
-}
-
-/*
- * shortprop eliminates redundant zero/sign extensions.
- *
- *   MOVBS x, R
- *   <no use R>
- *   MOVBS R, R'
- *
- * changed to
- *
- *   MOVBS x, R
- *   ...
- *   MOVB  R, R' (compiled to mov)
- *
- * MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
- */
-func shortprop(r *gc.Flow) bool {
-	p := r.Prog
-	r1 := findpre(r, &p.From)
-	if r1 == nil {
-		return false
-	}
-
-	p1 := r1.Prog
-	if p1.As == p.As {
-		// Two consecutive extensions.
-		goto gotit
-	}
-
-	if p1.As == arm.AMOVW && isdconst(&p1.From) && p1.From.Offset >= 0 && p1.From.Offset < 128 {
-		// Loaded an immediate.
-		goto gotit
-	}
-
-	return false
-
-gotit:
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("shortprop\n%v\n%v", p1, p)
-	}
-	switch p.As {
-	case arm.AMOVBS,
-		arm.AMOVBU:
-		p.As = arm.AMOVB
-
-	case arm.AMOVHS,
-		arm.AMOVHU:
-		p.As = arm.AMOVH
-	}
-
-	if gc.Debug['P'] != 0 {
-		fmt.Printf(" => %v\n", p.As)
-	}
-	return true
-}
-
-// UNUSED
-/*
- * ASLL x,y,w
- * .. (not use w, not set x y w)
- * AXXX w,a,b (a != w)
- * .. (not use w)
- * (set w)
- * ----------- changed to
- * ..
- * AXXX (x<<y),a,b
- * ..
- */
-func shiftprop(r *gc.Flow) bool {
-	p := r.Prog
-	if p.To.Type != obj.TYPE_REG {
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
-		}
-		return false
-	}
-
-	n := p.To.Reg
-	var a obj.Addr
-	if p.Reg != 0 && p.Reg != p.To.Reg {
-		a.Type = obj.TYPE_REG
-		a.Reg = p.Reg
-	}
-
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("shiftprop\n%v", p)
-	}
-	r1 := r
-	var p1 *obj.Prog
-	for {
-		/* find first use of shift result; abort if shift operands or result are changed */
-		r1 = gc.Uniqs(r1)
-
-		if r1 == nil {
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("\tbranch; FAILURE\n")
-			}
-			return false
-		}
-
-		if gc.Uniqp(r1) == nil {
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("\tmerge; FAILURE\n")
-			}
-			return false
-		}
-
-		p1 = r1.Prog
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("\n%v", p1)
-		}
-		switch copyu(p1, &p.To, nil) {
-		case 0: /* not used or set */
-			if (p.From.Type == obj.TYPE_REG && copyu(p1, &p.From, nil) > 1) || (a.Type == obj.TYPE_REG && copyu(p1, &a, nil) > 1) {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("\targs modified; FAILURE\n")
-				}
-				return false
-			}
-
-			continue
-		case 3: /* set, not used */
-			{
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("\tBOTCH: noref; FAILURE\n")
-				}
-				return false
-			}
-		}
-
-		break
-	}
-
-	/* check whether substitution can be done */
-	switch p1.As {
-	default:
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("\tnon-dpi; FAILURE\n")
-		}
-		return false
-
-	case arm.AAND,
-		arm.AEOR,
-		arm.AADD,
-		arm.AADC,
-		arm.AORR,
-		arm.ASUB,
-		arm.ASBC,
-		arm.ARSB,
-		arm.ARSC:
-		if p1.Reg == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && p1.To.Reg == n) {
-			if p1.From.Type != obj.TYPE_REG {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("\tcan't swap; FAILURE\n")
-				}
-				return false
-			}
-
-			p1.Reg = p1.From.Reg
-			p1.From.Reg = n
-			switch p1.As {
-			case arm.ASUB:
-				p1.As = arm.ARSB
-
-			case arm.ARSB:
-				p1.As = arm.ASUB
-
-			case arm.ASBC:
-				p1.As = arm.ARSC
-
-			case arm.ARSC:
-				p1.As = arm.ASBC
-			}
-
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("\t=>%v", p1)
-			}
-		}
-		fallthrough
-
-	case arm.ABIC,
-		arm.ATST,
-		arm.ACMP,
-		arm.ACMN:
-		if p1.Reg == n {
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("\tcan't swap; FAILURE\n")
-			}
-			return false
-		}
-
-		if p1.Reg == 0 && p1.To.Reg == n {
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("\tshift result used twice; FAILURE\n")
-			}
-			return false
-		}
-
-		//	case AMVN:
-		if p1.From.Type == obj.TYPE_SHIFT {
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("\tshift result used in shift; FAILURE\n")
-			}
-			return false
-		}
-
-		if p1.From.Type != obj.TYPE_REG || p1.From.Reg != n {
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("\tBOTCH: where is it used?; FAILURE\n")
-			}
-			return false
-		}
-	}
-
-	/* check whether shift result is used subsequently */
-	p2 := p1
-
-	if p1.To.Reg != n {
-		var p1 *obj.Prog
-		for {
-			r1 = gc.Uniqs(r1)
-			if r1 == nil {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("\tinconclusive; FAILURE\n")
-				}
-				return false
-			}
-
-			p1 = r1.Prog
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("\n%v", p1)
-			}
-			switch copyu(p1, &p.To, nil) {
-			case 0: /* not used or set */
-				continue
-
-			case 3: /* set, not used */
-				break
-
-			default: /* used */
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("\treused; FAILURE\n")
-				}
-				return false
-			}
-
-			break
-		}
-	}
-
-	/* make the substitution */
-	p2.From.Reg = 0
-	o := p.Reg
-	if o == 0 {
-		o = p.To.Reg
-	}
-	o &= 15
-
-	switch p.From.Type {
-	case obj.TYPE_CONST:
-		o |= int16(p.From.Offset&0x1f) << 7
-
-	case obj.TYPE_REG:
-		o |= 1<<4 | (p.From.Reg&15)<<8
-	}
-
-	switch p.As {
-	case arm.ASLL:
-		o |= 0 << 5
-
-	case arm.ASRL:
-		o |= 1 << 5
-
-	case arm.ASRA:
-		o |= 2 << 5
-	}
-
-	p2.From = obj.Addr{}
-	p2.From.Type = obj.TYPE_SHIFT
-	p2.From.Offset = int64(o)
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("\t=>%v\tSUCCEED\n", p2)
-	}
-	return true
-}
-
-/*
- * findpre returns the last instruction mentioning v
- * before r. It must be a set, and there must be
- * a unique path from that instruction to r.
- */
-func findpre(r *gc.Flow, v *obj.Addr) *gc.Flow {
-	var r1 *gc.Flow
-
-	for r1 = gc.Uniqp(r); r1 != nil; r, r1 = r1, gc.Uniqp(r1) {
-		if gc.Uniqs(r1) != r {
-			return nil
-		}
-		switch copyu(r1.Prog, v, nil) {
-		case 1, /* used */
-			2: /* read-alter-rewrite */
-			return nil
-
-		case 3, /* set */
-			4: /* set and used */
-			return r1
-		}
-	}
-
-	return nil
-}
-
-/*
- * findinc finds ADD instructions with a constant
- * argument which falls within the immed_12 range.
- */
-func findinc(r *gc.Flow, r2 *gc.Flow, v *obj.Addr) *gc.Flow {
-	var r1 *gc.Flow
-	var p *obj.Prog
-
-	for r1 = gc.Uniqs(r); r1 != nil && r1 != r2; r, r1 = r1, gc.Uniqs(r1) {
-		if gc.Uniqp(r1) != r {
-			return nil
-		}
-		switch copyu(r1.Prog, v, nil) {
-		case 0: /* not touched */
-			continue
-
-		case 4: /* set and used */
-			p = r1.Prog
-
-			if p.As == arm.AADD {
-				if isdconst(&p.From) {
-					if p.From.Offset > -4096 && p.From.Offset < 4096 {
-						return r1
-					}
-				}
-			}
-			fallthrough
-
-		default:
-			return nil
-		}
-	}
-
-	return nil
-}
-
-func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) bool {
-	if r == r2 {
-		return true
-	}
-	n := int(0)
-	var a [3]obj.Addr
-	if p.Reg != 0 && p.Reg != p.To.Reg {
-		a[n].Type = obj.TYPE_REG
-		a[n].Reg = p.Reg
-		n++
-	}
-
-	switch p.From.Type {
-	case obj.TYPE_SHIFT:
-		a[n].Type = obj.TYPE_REG
-		a[n].Reg = int16(arm.REG_R0 + (p.From.Offset & 0xf))
-		n++
-		fallthrough
-
-	case obj.TYPE_REG:
-		a[n].Type = obj.TYPE_REG
-		a[n].Reg = p.From.Reg
-		n++
-	}
-
-	if n == 0 {
-		return true
-	}
-	var i int
-	for ; r != nil && r != r2; r = gc.Uniqs(r) {
-		p = r.Prog
-		for i = 0; i < n; i++ {
-			if copyu(p, &a[i], nil) > 1 {
-				return false
-			}
-		}
-	}
-
-	return true
-}
-
-func findu1(r *gc.Flow, v *obj.Addr) bool {
-	for ; r != nil; r = r.S1 {
-		if r.Active != 0 {
-			return false
-		}
-		r.Active = 1
-		switch copyu(r.Prog, v, nil) {
-		case 1, /* used */
-			2, /* read-alter-rewrite */
-			4: /* set and used */
-			return true
-
-		case 3: /* set */
-			return false
-		}
-
-		if r.S2 != nil {
-			if findu1(r.S2, v) {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
-	for r1 := g.Start; r1 != nil; r1 = r1.Link {
-		r1.Active = 0
-	}
-	return findu1(r, v)
-}
-
-/*
- * xtramodes enables the ARM post increment and
- * shift offset addressing modes to transform
- *   MOVW   0(R3),R1
- *   ADD    $4,R3,R3
- * into
- *   MOVW.P 4(R3),R1
- * and
- *   ADD    R0,R1
- *   MOVBU  0(R1),R0
- * into
- *   MOVBU  R0<<0(R1),R0
- */
-func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
-	p := r.Prog
-	v := *a
-	v.Type = obj.TYPE_REG
-	r1 := findpre(r, &v)
-	if r1 != nil {
-		p1 := r1.Prog
-		if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg {
-			switch p1.As {
-			case arm.AADD:
-				if p1.Scond&arm.C_SBIT != 0 {
-					// avoid altering ADD.S/ADC sequences.
-					break
-				}
-
-				if p1.From.Type == obj.TYPE_REG || (p1.From.Type == obj.TYPE_SHIFT && p1.From.Offset&(1<<4) == 0 && ((p.As != arm.AMOVB && p.As != arm.AMOVBS) || (a == &p.From && p1.From.Offset&^0xf == 0))) || ((p1.From.Type == obj.TYPE_ADDR || p1.From.Type == obj.TYPE_CONST) && p1.From.Offset > -4096 && p1.From.Offset < 4096) {
-					if nochange(gc.Uniqs(r1), r, p1) {
-						if a != &p.From || v.Reg != p.To.Reg {
-							if finduse(g, r.S1, &v) {
-								if p1.Reg == 0 || p1.Reg == v.Reg {
-									/* pre-indexing */
-									p.Scond |= arm.C_WBIT
-								} else {
-									return false
-								}
-							}
-						}
-
-						switch p1.From.Type {
-						/* register offset */
-						case obj.TYPE_REG:
-							if gc.Nacl {
-								return false
-							}
-							*a = obj.Addr{}
-							a.Type = obj.TYPE_SHIFT
-							a.Offset = int64(p1.From.Reg) & 15
-
-							/* scaled register offset */
-						case obj.TYPE_SHIFT:
-							if gc.Nacl {
-								return false
-							}
-							*a = obj.Addr{}
-							a.Type = obj.TYPE_SHIFT
-							fallthrough
-
-							/* immediate offset */
-						case obj.TYPE_CONST,
-							obj.TYPE_ADDR:
-							a.Offset = p1.From.Offset
-						}
-
-						if p1.Reg != 0 {
-							a.Reg = p1.Reg
-						}
-						excise(r1)
-						return true
-					}
-				}
-
-			case arm.AMOVW:
-				if p1.From.Type == obj.TYPE_REG {
-					r2 := findinc(r1, r, &p1.From)
-					if r2 != nil {
-						var r3 *gc.Flow
-						for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) {
-						}
-						if r3 == r {
-							/* post-indexing */
-							p1 := r2.Prog
-
-							a.Reg = p1.To.Reg
-							a.Offset = p1.From.Offset
-							p.Scond |= arm.C_PBIT
-							if !finduse(g, r, &r1.Prog.To) {
-								excise(r1)
-							}
-							excise(r2)
-							return true
-						}
-					}
-				}
-			}
-		}
-	}
-
-	if a != &p.From || a.Reg != p.To.Reg {
-		r1 := findinc(r, nil, &v)
-		if r1 != nil {
-			/* post-indexing */
-			p1 := r1.Prog
-
-			a.Offset = p1.From.Offset
-			p.Scond |= arm.C_PBIT
-			excise(r1)
-			return true
-		}
-	}
-
-	return false
-}
-
-/*
- * return
- * 1 if v only used (and substitute),
- * 2 if read-alter-rewrite
- * 3 if set
- * 4 if set and used
- * 0 otherwise (not touched)
- */
-func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
-	switch p.As {
-	default:
-		fmt.Printf("copyu: can't find %v\n", p.As)
-		return 2
-
-	case arm.AMOVM:
-		if v.Type != obj.TYPE_REG {
-			return 0
-		}
-		if p.From.Type == obj.TYPE_CONST { /* read reglist, read/rar */
-			if s != nil {
-				if p.From.Offset&(1<<uint(v.Reg)) != 0 {
-					return 1
-				}
-				if copysub(&p.To, v, s, true) {
-					return 1
-				}
-				return 0
-			}
-
-			if copyau(&p.To, v) {
-				if p.Scond&arm.C_WBIT != 0 {
-					return 2
-				}
-				return 1
-			}
-
-			if p.From.Offset&(1<<uint(v.Reg)) != 0 {
-				return 1 /* read/rar, write reglist */
-			}
-		} else {
-			if s != nil {
-				if p.To.Offset&(1<<uint(v.Reg)) != 0 {
-					return 1
-				}
-				if copysub(&p.From, v, s, true) {
-					return 1
-				}
-				return 0
-			}
-
-			if copyau(&p.From, v) {
-				if p.Scond&arm.C_WBIT != 0 {
-					return 2
-				}
-				if p.To.Offset&(1<<uint(v.Reg)) != 0 {
-					return 4
-				}
-				return 1
-			}
-
-			if p.To.Offset&(1<<uint(v.Reg)) != 0 {
-				return 3
-			}
-		}
-
-		return 0
-
-	case obj.ANOP, /* read,, write */
-		arm.ASQRTD,
-		arm.AMOVW,
-		arm.AMOVF,
-		arm.AMOVD,
-		arm.AMOVH,
-		arm.AMOVHS,
-		arm.AMOVHU,
-		arm.AMOVB,
-		arm.AMOVBS,
-		arm.AMOVBU,
-		arm.AMOVFW,
-		arm.AMOVWF,
-		arm.AMOVDW,
-		arm.AMOVWD,
-		arm.AMOVFD,
-		arm.AMOVDF:
-		if p.Scond&(arm.C_WBIT|arm.C_PBIT) != 0 {
-			if v.Type == obj.TYPE_REG {
-				if p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_SHIFT {
-					if p.From.Reg == v.Reg {
-						return 2
-					}
-				} else {
-					if p.To.Reg == v.Reg {
-						return 2
-					}
-				}
-			}
-		}
-
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if !copyas(&p.To, v) {
-				if copysub(&p.To, v, s, true) {
-					return 1
-				}
-			}
-			return 0
-		}
-
-		if copyas(&p.To, v) {
-			if p.Scond != arm.C_SCOND_NONE {
-				return 2
-			}
-			if copyau(&p.From, v) {
-				return 4
-			}
-			return 3
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case arm.AMULLU, /* read, read, write, write */
-		arm.AMULL,
-		arm.AMULA,
-		arm.AMVN:
-		return 2
-
-	case arm.AADD, /* read, read, write */
-		arm.AADC,
-		arm.ASUB,
-		arm.ASBC,
-		arm.ARSB,
-		arm.ASLL,
-		arm.ASRL,
-		arm.ASRA,
-		arm.AORR,
-		arm.AAND,
-		arm.AEOR,
-		arm.AMUL,
-		arm.AMULU,
-		arm.ADIV,
-		arm.ADIVU,
-		arm.AMOD,
-		arm.AMODU,
-		arm.AADDF,
-		arm.AADDD,
-		arm.ASUBF,
-		arm.ASUBD,
-		arm.AMULF,
-		arm.AMULD,
-		arm.ADIVF,
-		arm.ADIVD,
-		obj.ACHECKNIL,
-		/* read */
-		arm.ACMPF, /* read, read, */
-		arm.ACMPD,
-		arm.ACMP,
-		arm.ACMN,
-		arm.ATST:
-		/* read,, */
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if copysub1(p, v, s, true) {
-				return 1
-			}
-			if !copyas(&p.To, v) {
-				if copysub(&p.To, v, s, true) {
-					return 1
-				}
-			}
-			return 0
-		}
-
-		if copyas(&p.To, v) {
-			if p.Scond != arm.C_SCOND_NONE {
-				return 2
-			}
-			if p.Reg == 0 {
-				p.Reg = p.To.Reg
-			}
-			if copyau(&p.From, v) {
-				return 4
-			}
-			if copyau1(p, v) {
-				return 4
-			}
-			return 3
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau1(p, v) {
-			return 1
-		}
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case arm.ABEQ, /* read, read */
-		arm.ABNE,
-		arm.ABCS,
-		arm.ABHS,
-		arm.ABCC,
-		arm.ABLO,
-		arm.ABMI,
-		arm.ABPL,
-		arm.ABVS,
-		arm.ABVC,
-		arm.ABHI,
-		arm.ABLS,
-		arm.ABGE,
-		arm.ABLT,
-		arm.ABGT,
-		arm.ABLE:
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if copysub1(p, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau1(p, v) {
-			return 1
-		}
-		return 0
-
-	case arm.AB: /* funny */
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case obj.ARET: /* funny */
-		if s != nil {
-			return 1
-		}
-		return 3
-
-	case arm.ABL: /* funny */
-		if v.Type == obj.TYPE_REG {
-			// TODO(rsc): REG_R0 and REG_F0 used to be
-			// (when register numbers started at 0) exregoffset and exfregoffset,
-			// which are unset entirely.
-			// It's strange that this handles R0 and F0 differently from the other
-			// registers. Possible failure to optimize?
-			if arm.REG_R0 < v.Reg && v.Reg <= arm.REGEXT {
-				return 2
-			}
-			if v.Reg == arm.REGARG {
-				return 2
-			}
-			if arm.REG_F0 < v.Reg && v.Reg <= arm.FREGEXT {
-				return 2
-			}
-		}
-
-		if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
-			return 2
-		}
-
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.To, v) {
-			return 4
-		}
-		return 3
-
-	// R0 is zero, used by DUFFZERO, cannot be substituted.
-	// R1 is ptr to memory, used and set, cannot be substituted.
-	case obj.ADUFFZERO:
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == arm.REG_R0 {
-				return 1
-			}
-			if v.Reg == arm.REG_R0+1 {
-				return 2
-			}
-		}
-
-		return 0
-
-	// R0 is scratch, set by DUFFCOPY, cannot be substituted.
-	// R1, R2 areptr to src, dst, used and set, cannot be substituted.
-	case obj.ADUFFCOPY:
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == arm.REG_R0 {
-				return 3
-			}
-			if v.Reg == arm.REG_R0+1 || v.Reg == arm.REG_R0+2 {
-				return 2
-			}
-		}
-
-		return 0
-
-	case obj.ATEXT: /* funny */
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == arm.REGARG {
-				return 3
-			}
-		}
-		return 0
-
-	case obj.APCDATA,
-		obj.AFUNCDATA,
-		obj.AVARDEF,
-		obj.AVARKILL,
-		obj.AVARLIVE,
-		obj.AUSEFIELD:
-		return 0
-	}
-}
-
-/*
- * direct reference,
- * could be set/use depending on
- * semantics
- */
-func copyas(a *obj.Addr, v *obj.Addr) bool {
-	if regtyp(v) {
-		if a.Type == v.Type {
-			if a.Reg == v.Reg {
-				return true
-			}
-		}
-	} else if v.Type == obj.TYPE_CONST { /* for constprop */
-		if a.Type == v.Type {
-			if a.Name == v.Name {
-				if a.Sym == v.Sym {
-					if a.Reg == v.Reg {
-						if a.Offset == v.Offset {
-							return true
-						}
-					}
-				}
-			}
-		}
-	}
-
-	return false
-}
-
-func sameaddr(a *obj.Addr, v *obj.Addr) bool {
-	if a.Type != v.Type {
-		return false
-	}
-	if regtyp(v) && a.Reg == v.Reg {
-		return true
-	}
-
-	// TODO(rsc): Change v->type to v->name and enable.
-	//if(v->type == NAME_AUTO || v->type == NAME_PARAM) {
-	//	if(v->offset == a->offset)
-	//		return 1;
-	//}
-	return false
-}
-
-/*
- * either direct or indirect
- */
-func copyau(a *obj.Addr, v *obj.Addr) bool {
-	if copyas(a, v) {
-		return true
-	}
-	if v.Type == obj.TYPE_REG {
-		if a.Type == obj.TYPE_ADDR && a.Reg != 0 {
-			if a.Reg == v.Reg {
-				return true
-			}
-		} else if a.Type == obj.TYPE_MEM {
-			if a.Reg == v.Reg {
-				return true
-			}
-		} else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
-			if a.Reg == v.Reg {
-				return true
-			}
-			if a.Offset == int64(v.Reg) {
-				return true
-			}
-		} else if a.Type == obj.TYPE_SHIFT {
-			if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
-				return true
-			}
-			if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-/*
- * compare v to the center
- * register in p (p->reg)
- */
-func copyau1(p *obj.Prog, v *obj.Addr) bool {
-	if v.Type == obj.TYPE_REG && v.Reg == 0 {
-		return false
-	}
-	return p.Reg == v.Reg
-}
-
-// copysub substitute s for v in a.
-// copysub returns true on failure to substitute.
-// TODO(dfc) remove unused return value, remove calls with f=false as they do nothing.
-func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool {
-	if f && copyau(a, v) {
-		if a.Type == obj.TYPE_SHIFT {
-			if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
-				a.Offset = a.Offset&^0xf | int64(s.Reg)&0xf
-			}
-			if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
-				a.Offset = a.Offset&^(0xf<<8) | (int64(s.Reg)&0xf)<<8
-			}
-		} else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
-			if a.Offset == int64(v.Reg) {
-				a.Offset = int64(s.Reg)
-			}
-			if a.Reg == v.Reg {
-				a.Reg = s.Reg
-			}
-		} else {
-			a.Reg = s.Reg
-		}
-	}
-	return false
-}
-
-// TODO(dfc) remove unused return value, remove calls with f=false as they do nothing.
-func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f bool) bool {
-	if f && copyau1(p1, v) {
-		p1.Reg = s.Reg
-	}
-	return false
-}
-
-var predinfo = []struct {
-	opcode    obj.As
-	notopcode obj.As
-	scond     int
-	notscond  int
-}{
-	{arm.ABEQ, arm.ABNE, 0x0, 0x1},
-	{arm.ABNE, arm.ABEQ, 0x1, 0x0},
-	{arm.ABCS, arm.ABCC, 0x2, 0x3},
-	{arm.ABHS, arm.ABLO, 0x2, 0x3},
-	{arm.ABCC, arm.ABCS, 0x3, 0x2},
-	{arm.ABLO, arm.ABHS, 0x3, 0x2},
-	{arm.ABMI, arm.ABPL, 0x4, 0x5},
-	{arm.ABPL, arm.ABMI, 0x5, 0x4},
-	{arm.ABVS, arm.ABVC, 0x6, 0x7},
-	{arm.ABVC, arm.ABVS, 0x7, 0x6},
-	{arm.ABHI, arm.ABLS, 0x8, 0x9},
-	{arm.ABLS, arm.ABHI, 0x9, 0x8},
-	{arm.ABGE, arm.ABLT, 0xA, 0xB},
-	{arm.ABLT, arm.ABGE, 0xB, 0xA},
-	{arm.ABGT, arm.ABLE, 0xC, 0xD},
-	{arm.ABLE, arm.ABGT, 0xD, 0xC},
-}
-
-type Joininfo struct {
-	start *gc.Flow
-	last  *gc.Flow
-	end   *gc.Flow
-	len   int
-}
-
-const (
-	Join = iota
-	Split
-	End
-	Branch
-	Setcond
-	Toolong
-)
-
-const (
-	Falsecond = iota
-	Truecond
-	Delbranch
-	Keepbranch
-)
-
-func isbranch(p *obj.Prog) bool {
-	return (arm.ABEQ <= p.As) && (p.As <= arm.ABLE)
-}
-
-func predicable(p *obj.Prog) bool {
-	switch p.As {
-	case obj.ANOP,
-		obj.AXXX,
-		obj.AGLOBL,
-		obj.ATEXT,
-		arm.AWORD:
-		return false
-	}
-
-	if isbranch(p) {
-		return false
-	}
-	return true
-}
-
-/*
- * Depends on an analysis of the encodings performed by 5l.
- * These seem to be all of the opcodes that lead to the "S" bit
- * being set in the instruction encodings.
- *
- * C_SBIT may also have been set explicitly in p->scond.
- */
-func modifiescpsr(p *obj.Prog) bool {
-	switch p.As {
-	case arm.AMULLU,
-		arm.AMULA,
-		arm.AMULU,
-		arm.ADIVU,
-		arm.ATEQ,
-		arm.ACMN,
-		arm.ATST,
-		arm.ACMP,
-		arm.AMUL,
-		arm.ADIV,
-		arm.AMOD,
-		arm.AMODU,
-		arm.ABL:
-		return true
-	}
-
-	if p.Scond&arm.C_SBIT != 0 {
-		return true
-	}
-	return false
-}
-
-/*
- * Find the maximal chain of instructions starting with r which could
- * be executed conditionally
- */
-func joinsplit(r *gc.Flow, j *Joininfo) int {
-	j.start = r
-	j.last = r
-	j.len = 0
-	for {
-		if r.P2 != nil && (r.P1 != nil || r.P2.P2link != nil) {
-			j.end = r
-			return Join
-		}
-
-		if r.S1 != nil && r.S2 != nil {
-			j.end = r
-			return Split
-		}
-
-		j.last = r
-		if r.Prog.As != obj.ANOP {
-			j.len++
-		}
-		if r.S1 == nil && r.S2 == nil {
-			j.end = r.Link
-			return End
-		}
-
-		if r.S2 != nil {
-			j.end = r.S2
-			return Branch
-		}
-
-		if modifiescpsr(r.Prog) {
-			j.end = r.S1
-			return Setcond
-		}
-
-		r = r.S1
-		if j.len >= 4 {
-			break
-		}
-	}
-
-	j.end = r
-	return Toolong
-}
-
-func successor(r *gc.Flow) *gc.Flow {
-	if r.S1 != nil {
-		return r.S1
-	}
-	return r.S2
-}
-
-func applypred(rstart *gc.Flow, j *Joininfo, cond int, branch int) {
-	if j.len == 0 {
-		return
-	}
-	var pred int
-	if cond == Truecond {
-		pred = predinfo[rstart.Prog.As-arm.ABEQ].scond
-	} else {
-		pred = predinfo[rstart.Prog.As-arm.ABEQ].notscond
-	}
-
-	for r := j.start; ; r = successor(r) {
-		if r.Prog.As == arm.AB {
-			if r != j.last || branch == Delbranch {
-				excise(r)
-			} else {
-				if cond == Truecond {
-					r.Prog.As = predinfo[rstart.Prog.As-arm.ABEQ].opcode
-				} else {
-					r.Prog.As = predinfo[rstart.Prog.As-arm.ABEQ].notopcode
-				}
-			}
-		} else if predicable(r.Prog) {
-			r.Prog.Scond = uint8(int(r.Prog.Scond&^arm.C_SCOND) | pred)
-		}
-		if r.S1 != r.Link {
-			r.S1 = r.Link
-			r.Link.P1 = r
-		}
-
-		if r == j.last {
-			break
-		}
-	}
-}
-
-func predicate(g *gc.Graph) {
-	var t1 int
-	var t2 int
-	var j1 Joininfo
-	var j2 Joininfo
-
-	for r := g.Start; r != nil; r = r.Link {
-		if isbranch(r.Prog) {
-			t1 = joinsplit(r.S1, &j1)
-			t2 = joinsplit(r.S2, &j2)
-			if j1.last.Link != j2.start {
-				continue
-			}
-			if j1.end == j2.end {
-				if (t1 == Branch && (t2 == Join || t2 == Setcond)) || (t2 == Join && (t1 == Join || t1 == Setcond)) {
-					applypred(r, &j1, Falsecond, Delbranch)
-					applypred(r, &j2, Truecond, Delbranch)
-					excise(r)
-					continue
-				}
-			}
-
-			if t1 == End || t1 == Branch {
-				applypred(r, &j1, Falsecond, Keepbranch)
-				excise(r)
-				continue
-			}
-		}
-	}
-}
-
-func isdconst(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_CONST
-}
-
-func isfloatreg(a *obj.Addr) bool {
-	return arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15
-}
-
-func stackaddr(a *obj.Addr) bool {
-	return regtyp(a) && a.Reg == arm.REGSP
-}
-
-func smallindir(a *obj.Addr, reg *obj.Addr) bool {
-	return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
-}
-
-func excise(r *gc.Flow) {
-	p := r.Prog
-	obj.Nopout(p)
-}
diff --git a/src/cmd/compile/internal/arm/reg.go b/src/cmd/compile/internal/arm/reg.go
index 729cab4..53de694 100644
--- a/src/cmd/compile/internal/arm/reg.go
+++ b/src/cmd/compile/internal/arm/reg.go
@@ -31,59 +31,6 @@
 package arm
 
 import "cmd/internal/obj/arm"
-import "cmd/compile/internal/gc"
-
-const (
-	NREGVAR = 32
-)
-
-var regname = []string{
-	".R0",
-	".R1",
-	".R2",
-	".R3",
-	".R4",
-	".R5",
-	".R6",
-	".R7",
-	".R8",
-	".R9",
-	".R10",
-	".R11",
-	".R12",
-	".R13",
-	".R14",
-	".R15",
-	".F0",
-	".F1",
-	".F2",
-	".F3",
-	".F4",
-	".F5",
-	".F6",
-	".F7",
-	".F8",
-	".F9",
-	".F10",
-	".F11",
-	".F12",
-	".F13",
-	".F14",
-	".F15",
-}
-
-func regnames(n *int) []string {
-	*n = NREGVAR
-	return regname
-}
-
-func excludedregs() uint64 {
-	return RtoB(arm.REGSP) | RtoB(arm.REGLINK) | RtoB(arm.REGPC)
-}
-
-func doregbits(r int) uint64 {
-	return 0
-}
 
 /*
  *	bit	reg
@@ -116,21 +63,3 @@
 
 	return 0
 }
-
-func BtoR(b uint64) int {
-	// TODO Allow R0 and R1, but be careful with a 0 return
-	// TODO Allow R9. Only R10 is reserved now (just g, not m).
-	b &= 0x11fc // excluded R9 and R10 for m and g, but not R12
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + arm.REG_R0
-}
-
-func BtoF(b uint64) int {
-	b &= 0xfffc0000
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) - 16 + arm.REG_F0
-}
diff --git a/src/cmd/compile/internal/arm64/cgen.go b/src/cmd/compile/internal/arm64/cgen.go
deleted file mode 100644
index 87f3498..0000000
--- a/src/cmd/compile/internal/arm64/cgen.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm64
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/arm64"
-)
-
-func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
-	// determine alignment.
-	// want to avoid unaligned access, so have to use
-	// smaller operations for less aligned types.
-	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
-	align := int(n.Type.Align)
-
-	var op obj.As
-	switch align {
-	default:
-		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
-
-	case 1:
-		op = arm64.AMOVB
-
-	case 2:
-		op = arm64.AMOVH
-
-	case 4:
-		op = arm64.AMOVW
-
-	case 8:
-		op = arm64.AMOVD
-	}
-
-	if w%int64(align) != 0 {
-		gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
-	}
-	c := int32(w / int64(align))
-
-	if osrc%int64(align) != 0 || odst%int64(align) != 0 {
-		gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
-	}
-
-	// if we are copying forward on the stack and
-	// the src and dst overlap, then reverse direction
-	dir := align
-
-	if osrc < odst && odst < osrc+w {
-		dir = -dir
-	}
-
-	var dst gc.Node
-	var src gc.Node
-	if n.Ullman >= res.Ullman {
-		gc.Agenr(n, &dst, res) // temporarily use dst
-		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
-		gins(arm64.AMOVD, &dst, &src)
-		if res.Op == gc.ONAME {
-			gc.Gvardef(res)
-		}
-		gc.Agen(res, &dst)
-	} else {
-		if res.Op == gc.ONAME {
-			gc.Gvardef(res)
-		}
-		gc.Agenr(res, &dst, res)
-		gc.Agenr(n, &src, nil)
-	}
-
-	var tmp gc.Node
-	gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
-
-	// set up end marker
-	var nend gc.Node
-
-	// move src and dest to the end of block if necessary
-	if dir < 0 {
-		if c >= 4 {
-			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
-			gins(arm64.AMOVD, &src, &nend)
-		}
-
-		p := gins(arm64.AADD, nil, &src)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-
-		p = gins(arm64.AADD, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-	} else {
-		p := gins(arm64.AADD, nil, &src)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(-dir)
-
-		p = gins(arm64.AADD, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(-dir)
-
-		if c >= 4 {
-			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
-			p := gins(arm64.AMOVD, &src, &nend)
-			p.From.Type = obj.TYPE_ADDR
-			p.From.Offset = w
-		}
-	}
-
-	// move
-	// TODO: enable duffcopy for larger copies.
-	if c >= 4 {
-		p := gins(op, &src, &tmp)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Offset = int64(dir)
-		p.Scond = arm64.C_XPRE
-		ploop := p
-
-		p = gins(op, &tmp, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = int64(dir)
-		p.Scond = arm64.C_XPRE
-
-		p = gcmp(arm64.ACMP, &src, &nend)
-
-		gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), ploop)
-		gc.Regfree(&nend)
-	} else {
-		// TODO(austin): Instead of generating ADD $-8,R8; ADD
-		// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
-		// generate the offsets directly and eliminate the
-		// ADDs. That will produce shorter, more
-		// pipeline-able code.
-		var p *obj.Prog
-		for ; c > 0; c-- {
-			p = gins(op, &src, &tmp)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Offset = int64(dir)
-			p.Scond = arm64.C_XPRE
-
-			p = gins(op, &tmp, &dst)
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = int64(dir)
-			p.Scond = arm64.C_XPRE
-		}
-	}
-
-	gc.Regfree(&dst)
-	gc.Regfree(&src)
-	gc.Regfree(&tmp)
-}
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
index 12f9b051..696434b 100644
--- a/src/cmd/compile/internal/arm64/galign.go
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -29,38 +29,9 @@
 	gc.Thearch.ReservedRegs = resvd
 
 	gc.Thearch.Betypeinit = betypeinit
-	gc.Thearch.Cgen_hmul = cgen_hmul
-	gc.Thearch.AddSetCarry = AddSetCarry
-	gc.Thearch.RightShiftWithCarry = RightShiftWithCarry
-	gc.Thearch.Cgen_shift = cgen_shift
-	gc.Thearch.Clearfat = clearfat
 	gc.Thearch.Defframe = defframe
-	gc.Thearch.Dodiv = dodiv
-	gc.Thearch.Excise = excise
-	gc.Thearch.Expandchecks = expandchecks
-	gc.Thearch.Getg = getg
 	gc.Thearch.Gins = gins
-	gc.Thearch.Ginscmp = ginscmp
-	gc.Thearch.Ginscon = ginscon
-	gc.Thearch.Ginsnop = ginsnop
-	gc.Thearch.Gmove = gmove
-	gc.Thearch.Peep = peep
 	gc.Thearch.Proginfo = proginfo
-	gc.Thearch.Regtyp = regtyp
-	gc.Thearch.Sameaddr = sameaddr
-	gc.Thearch.Smallindir = smallindir
-	gc.Thearch.Stackaddr = stackaddr
-	gc.Thearch.Blockcopy = blockcopy
-	gc.Thearch.Sudoaddable = sudoaddable
-	gc.Thearch.Sudoclean = sudoclean
-	gc.Thearch.Excludedregs = excludedregs
-	gc.Thearch.RtoB = RtoB
-	gc.Thearch.FtoB = RtoB
-	gc.Thearch.BtoR = BtoR
-	gc.Thearch.BtoF = BtoF
-	gc.Thearch.Optoas = optoas
-	gc.Thearch.Doregbits = doregbits
-	gc.Thearch.Regnames = regnames
 
 	gc.Thearch.SSARegToReg = ssaRegToReg
 	gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
index aebd3d0..48a6a4c 100644
--- a/src/cmd/compile/internal/arm64/ggen.go
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -8,7 +8,6 @@
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm64"
-	"fmt"
 )
 
 func defframe(ptxt *obj.Prog) {
@@ -127,450 +126,3 @@
 	gc.Nodconst(&con, gc.Types[gc.TINT], 0)
 	gins(arm64.AHINT, &con, nil)
 }
-
-var panicdiv *gc.Node
-
-/*
- * generate division.
- * generates one of:
- *	res = nl / nr
- *	res = nl % nr
- * according to op.
- */
-func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	// Have to be careful about handling
-	// most negative int divided by -1 correctly.
-	// The hardware will generate undefined result.
-	// Also need to explicitly trap on division on zero,
-	// the hardware will silently generate undefined result.
-	// DIVW will leave unpredictable result in higher 32-bit,
-	// so always use DIVD/DIVDU.
-	t := nl.Type
-
-	t0 := t
-	check := false
-	if t.IsSigned() {
-		check = true
-		if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<<uint64(t.Width*8-1)) {
-			check = false
-		} else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 {
-			check = false
-		}
-	}
-
-	if t.Width < 8 {
-		if t.IsSigned() {
-			t = gc.Types[gc.TINT64]
-		} else {
-			t = gc.Types[gc.TUINT64]
-		}
-		check = false
-	}
-
-	a := optoas(gc.ODIV, t)
-
-	var tl gc.Node
-	gc.Regalloc(&tl, t0, nil)
-	var tr gc.Node
-	gc.Regalloc(&tr, t0, nil)
-	if nl.Ullman >= nr.Ullman {
-		gc.Cgen(nl, &tl)
-		gc.Cgen(nr, &tr)
-	} else {
-		gc.Cgen(nr, &tr)
-		gc.Cgen(nl, &tl)
-	}
-
-	if t != t0 {
-		// Convert
-		tl2 := tl
-
-		tr2 := tr
-		tl.Type = t
-		tr.Type = t
-		gmove(&tl2, &tl)
-		gmove(&tr2, &tr)
-	}
-
-	// Handle divide-by-zero panic.
-	p1 := gins(optoas(gc.OCMP, t), &tr, nil)
-	p1.Reg = arm64.REGZERO
-	p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-	if panicdiv == nil {
-		panicdiv = gc.Sysfunc("panicdivide")
-	}
-	gc.Ginscall(panicdiv, -1)
-	gc.Patch(p1, gc.Pc)
-
-	var p2 *obj.Prog
-	if check {
-		var nm1 gc.Node
-		gc.Nodconst(&nm1, t, -1)
-		gcmp(optoas(gc.OCMP, t), &tr, &nm1)
-		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-		if op == gc.ODIV {
-			// a / (-1) is -a.
-			gins(optoas(gc.OMINUS, t), &tl, &tl)
-
-			gmove(&tl, res)
-		} else {
-			// a % (-1) is 0.
-			var nz gc.Node
-			gc.Nodconst(&nz, t, 0)
-
-			gmove(&nz, res)
-		}
-
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-	}
-
-	p1 = gins(a, &tr, &tl)
-	if op == gc.ODIV {
-		gc.Regfree(&tr)
-		gmove(&tl, res)
-	} else {
-		// A%B = A-(A/B*B)
-		var tm gc.Node
-		gc.Regalloc(&tm, t, nil)
-
-		// patch div to use the 3 register form
-		// TODO(minux): add gins3?
-		p1.Reg = p1.To.Reg
-
-		p1.To.Reg = tm.Reg
-		gins(optoas(gc.OMUL, t), &tr, &tm)
-		gc.Regfree(&tr)
-		gins(optoas(gc.OSUB, t), &tm, &tl)
-		gc.Regfree(&tm)
-		gmove(&tl, res)
-	}
-
-	gc.Regfree(&tl)
-	if check {
-		gc.Patch(p2, gc.Pc)
-	}
-}
-
-// RightShiftWithCarry generates a constant unsigned
-// right shift with carry.
-//
-// res = n >> shift // with carry
-func RightShiftWithCarry(n *gc.Node, shift uint, res *gc.Node) {
-	// Extra 1 is for carry bit.
-	maxshift := uint(n.Type.Width*8 + 1)
-	if shift == 0 {
-		gmove(n, res)
-	} else if shift < maxshift {
-		// 1. clear rightmost bit of target
-		var n1 gc.Node
-		gc.Nodconst(&n1, n.Type, 1)
-		gins(optoas(gc.ORSH, n.Type), &n1, n)
-		gins(optoas(gc.OLSH, n.Type), &n1, n)
-		// 2. add carry flag to target
-		var n2 gc.Node
-		gc.Nodconst(&n1, n.Type, 0)
-		gc.Regalloc(&n2, n.Type, nil)
-		gins(optoas(gc.OAS, n.Type), &n1, &n2)
-		gins(arm64.AADC, &n2, n)
-		// 3. right rotate 1 bit
-		gc.Nodconst(&n1, n.Type, 1)
-		gins(arm64.AROR, &n1, n)
-
-		// ARM64 backend doesn't eliminate shifts by 0. It is manually checked here.
-		if shift > 1 {
-			var n3 gc.Node
-			gc.Nodconst(&n3, n.Type, int64(shift-1))
-			cgen_shift(gc.ORSH, true, n, &n3, res)
-		} else {
-			gmove(n, res)
-		}
-		gc.Regfree(&n2)
-	} else {
-		gc.Fatalf("RightShiftWithCarry: shift(%v) is bigger than max size(%v)", shift, maxshift)
-	}
-}
-
-// AddSetCarry generates add and set carry.
-//
-//   res = nl + nr // with carry flag set
-func AddSetCarry(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	gins(arm64.AADDS, nl, nr)
-	gmove(nr, res)
-}
-
-/*
- * generate high multiply:
- *   res = (nl*nr) >> width
- */
-func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	// largest ullman on left.
-	if nl.Ullman < nr.Ullman {
-		nl, nr = nr, nl
-	}
-
-	t := nl.Type
-	w := t.Width * 8
-	var n1 gc.Node
-	gc.Cgenr(nl, &n1, res)
-	var n2 gc.Node
-	gc.Cgenr(nr, &n2, nil)
-	switch gc.Simtype[t.Etype] {
-	case gc.TINT8,
-		gc.TINT16,
-		gc.TINT32:
-		gins(optoas(gc.OMUL, t), &n2, &n1)
-		p := gins(arm64.AASR, nil, &n1)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-
-	case gc.TUINT8,
-		gc.TUINT16,
-		gc.TUINT32:
-		gins(optoas(gc.OMUL, t), &n2, &n1)
-		p := gins(arm64.ALSR, nil, &n1)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-
-	case gc.TINT64,
-		gc.TUINT64:
-		if t.IsSigned() {
-			gins(arm64.ASMULH, &n2, &n1)
-		} else {
-			gins(arm64.AUMULH, &n2, &n1)
-		}
-
-	default:
-		gc.Fatalf("cgen_hmul %v", t)
-	}
-
-	gc.Cgen(&n1, res)
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-/*
- * generate shift according to op, one of:
- *	res = nl << nr
- *	res = nl >> nr
- */
-func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	a := optoas(op, nl.Type)
-
-	if nr.Op == gc.OLITERAL {
-		var n1 gc.Node
-		gc.Regalloc(&n1, nl.Type, res)
-		gc.Cgen(nl, &n1)
-		sc := uint64(nr.Int64())
-		if sc >= uint64(nl.Type.Width)*8 {
-			// large shift gets 2 shifts by width-1
-			var n3 gc.Node
-			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
-
-			gins(a, &n3, &n1)
-			gins(a, &n3, &n1)
-		} else {
-			gins(a, nr, &n1)
-		}
-		gmove(&n1, res)
-		gc.Regfree(&n1)
-		return
-	}
-
-	if nl.Ullman >= gc.UINF {
-		var n4 gc.Node
-		gc.Tempname(&n4, nl.Type)
-		gc.Cgen(nl, &n4)
-		nl = &n4
-	}
-
-	if nr.Ullman >= gc.UINF {
-		var n5 gc.Node
-		gc.Tempname(&n5, nr.Type)
-		gc.Cgen(nr, &n5)
-		nr = &n5
-	}
-
-	// Allow either uint32 or uint64 as shift type,
-	// to avoid unnecessary conversion from uint32 to uint64
-	// just to do the comparison.
-	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
-
-	if tcount.Etype < gc.TUINT32 {
-		tcount = gc.Types[gc.TUINT32]
-	}
-
-	var n1 gc.Node
-	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
-	var n3 gc.Node
-	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
-
-	var n2 gc.Node
-	gc.Regalloc(&n2, nl.Type, res)
-
-	if nl.Ullman >= nr.Ullman {
-		gc.Cgen(nl, &n2)
-		gc.Cgen(nr, &n1)
-		gmove(&n1, &n3)
-	} else {
-		gc.Cgen(nr, &n1)
-		gmove(&n1, &n3)
-		gc.Cgen(nl, &n2)
-	}
-
-	gc.Regfree(&n3)
-
-	// test and fix up large shifts
-	if !bounded {
-		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
-		gcmp(optoas(gc.OCMP, tcount), &n1, &n3)
-		p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
-		if op == gc.ORSH && nl.Type.IsSigned() {
-			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
-			gins(a, &n3, &n2)
-		} else {
-			gc.Nodconst(&n3, nl.Type, 0)
-			gmove(&n3, &n2)
-		}
-
-		gc.Patch(p1, gc.Pc)
-	}
-
-	gins(a, &n1, &n2)
-
-	gmove(&n2, res)
-
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-func clearfat(nl *gc.Node) {
-	/* clear a fat object */
-	if gc.Debug['g'] != 0 {
-		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
-	}
-
-	w := uint64(nl.Type.Width)
-
-	// Avoid taking the address for simple enough types.
-	if gc.Componentgen(nil, nl) {
-		return
-	}
-
-	c := w % 8 // bytes
-	q := w / 8 // dwords
-
-	var r0 gc.Node
-	gc.Nodreg(&r0, gc.Types[gc.TUINT64], arm64.REGZERO)
-	var dst gc.Node
-
-	// REGRT1 is reserved on arm64, see arm64/gsubr.go.
-	gc.Nodreg(&dst, gc.Types[gc.Tptr], arm64.REGRT1)
-	gc.Agen(nl, &dst)
-
-	var boff uint64
-	if q > 128 {
-		p := gins(arm64.ASUB, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 8
-
-		var end gc.Node
-		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
-		p = gins(arm64.AMOVD, &dst, &end)
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = int64(q * 8)
-
-		p = gins(arm64.AMOVD, &r0, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 8
-		p.Scond = arm64.C_XPRE
-		pl := p
-
-		p = gcmp(arm64.ACMP, &dst, &end)
-		gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), pl)
-
-		gc.Regfree(&end)
-
-		// The loop leaves R16 on the last zeroed dword
-		boff = 8
-	} else if q >= 4 && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
-		p := gins(arm64.ASUB, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 8
-		f := gc.Sysfunc("duffzero")
-		p = gins(obj.ADUFFZERO, nil, f)
-		gc.Afunclit(&p.To, f)
-
-		// 4 and 128 = magic constants: see ../../runtime/asm_arm64x.s
-		p.To.Offset = int64(4 * (128 - q))
-
-		// duffzero leaves R16 on the last zeroed dword
-		boff = 8
-	} else {
-		var p *obj.Prog
-		for t := uint64(0); t < q; t++ {
-			p = gins(arm64.AMOVD, &r0, &dst)
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = int64(8 * t)
-		}
-
-		boff = 8 * q
-	}
-
-	var p *obj.Prog
-	for t := uint64(0); t < c; t++ {
-		p = gins(arm64.AMOVB, &r0, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = int64(t + boff)
-	}
-}
-
-// Called after regopt and peep have run.
-// Expand CHECKNIL pseudo-op into actual nil pointer check.
-func expandchecks(firstp *obj.Prog) {
-	var p1 *obj.Prog
-
-	for p := firstp; p != nil; p = p.Link {
-		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
-			fmt.Printf("expandchecks: %v\n", p)
-		}
-		if p.As != obj.ACHECKNIL {
-			continue
-		}
-		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
-			gc.Warnl(p.Lineno, "generated nil check")
-		}
-		if p.From.Type != obj.TYPE_REG {
-			gc.Fatalf("invalid nil check %v\n", p)
-		}
-
-		// check is
-		//	CBNZ arg, 2(PC)
-		//	MOVD ZR, 0(arg)
-		p1 = gc.Ctxt.NewProg()
-		gc.Clearp(p1)
-		p1.Link = p.Link
-		p.Link = p1
-		p1.Lineno = p.Lineno
-		p1.Pc = 9999
-
-		p.As = arm64.ACBNZ
-		p.To.Type = obj.TYPE_BRANCH
-		p.To.Val = p1.Link
-
-		// crash by write to memory address 0.
-		p1.As = arm64.AMOVD
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = arm64.REGZERO
-		p1.To.Type = obj.TYPE_MEM
-		p1.To.Reg = p.From.Reg
-		p1.To.Offset = 0
-	}
-}
-
-// res = runtime.getg()
-func getg(res *gc.Node) {
-	var n1 gc.Node
-	gc.Nodreg(&n1, res.Type, arm64.REGG)
-	gmove(&n1, res)
-}
diff --git a/src/cmd/compile/internal/arm64/gsubr.go b/src/cmd/compile/internal/arm64/gsubr.go
index 8bff578..9e73959 100644
--- a/src/cmd/compile/internal/arm64/gsubr.go
+++ b/src/cmd/compile/internal/arm64/gsubr.go
@@ -98,371 +98,6 @@
 	gc.Regfree(&ntmp)
 }
 
-func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
-	if t.IsInteger() && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
-		// Reverse comparison to place constant last.
-		op = gc.Brrev(op)
-		n1, n2 = n2, n1
-	}
-
-	var r1, r2, g1, g2 gc.Node
-	gc.Regalloc(&r1, t, n1)
-	gc.Regalloc(&g1, n1.Type, &r1)
-	gc.Cgen(n1, &g1)
-	gmove(&g1, &r1)
-	if t.IsInteger() && gc.Isconst(n2, gc.CTINT) {
-		ginscon2(optoas(gc.OCMP, t), &r1, n2.Int64())
-	} else {
-		gc.Regalloc(&r2, t, n2)
-		gc.Regalloc(&g2, n1.Type, &r2)
-		gc.Cgen(n2, &g2)
-		gmove(&g2, &r2)
-		gcmp(optoas(gc.OCMP, t), &r1, &r2)
-		gc.Regfree(&g2)
-		gc.Regfree(&r2)
-	}
-	gc.Regfree(&g1)
-	gc.Regfree(&r1)
-	return gc.Gbranch(optoas(op, t), nil, likely)
-}
-
-/*
- * generate move:
- *	t = f
- * hard part is conversions.
- */
-func gmove(f *gc.Node, t *gc.Node) {
-	if gc.Debug['M'] != 0 {
-		fmt.Printf("gmove %L -> %L\n", f, t)
-	}
-
-	ft := int(gc.Simsimtype(f.Type))
-	tt := int(gc.Simsimtype(t.Type))
-	cvt := t.Type
-
-	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
-		gc.Complexmove(f, t)
-		return
-	}
-
-	// cannot have two memory operands
-	var r1 gc.Node
-	var a obj.As
-	if gc.Ismem(f) && gc.Ismem(t) {
-		goto hard
-	}
-
-	// convert constant to desired type
-	if f.Op == gc.OLITERAL {
-		var con gc.Node
-		switch tt {
-		default:
-			f.Convconst(&con, t.Type)
-
-		case gc.TINT32,
-			gc.TINT16,
-			gc.TINT8:
-			var con gc.Node
-			f.Convconst(&con, gc.Types[gc.TINT64])
-			var r1 gc.Node
-			gc.Regalloc(&r1, con.Type, t)
-			gins(arm64.AMOVD, &con, &r1)
-			gmove(&r1, t)
-			gc.Regfree(&r1)
-			return
-
-		case gc.TUINT32,
-			gc.TUINT16,
-			gc.TUINT8:
-			var con gc.Node
-			f.Convconst(&con, gc.Types[gc.TUINT64])
-			var r1 gc.Node
-			gc.Regalloc(&r1, con.Type, t)
-			gins(arm64.AMOVD, &con, &r1)
-			gmove(&r1, t)
-			gc.Regfree(&r1)
-			return
-		}
-
-		f = &con
-		ft = tt // so big switch will choose a simple mov
-
-		// constants can't move directly to memory.
-		if gc.Ismem(t) {
-			goto hard
-		}
-	}
-
-	// value -> value copy, first operand in memory.
-	// any floating point operand requires register
-	// src, so goto hard to copy to register first.
-	if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) {
-		cvt = gc.Types[ft]
-		goto hard
-	}
-
-	// value -> value copy, only one memory operand.
-	// figure out the instruction to use.
-	// break out of switch for one-instruction gins.
-	// goto rdst for "destination must be register".
-	// goto hard for "convert to cvt type first".
-	// otherwise handle and return.
-
-	switch uint32(ft)<<16 | uint32(tt) {
-	default:
-		gc.Fatalf("gmove %L -> %L", f.Type, t.Type)
-
-		/*
-		 * integer copy and truncate
-		 */
-	case gc.TINT8<<16 | gc.TINT8, // same size
-		gc.TUINT8<<16 | gc.TINT8,
-		gc.TINT16<<16 | gc.TINT8,
-		// truncate
-		gc.TUINT16<<16 | gc.TINT8,
-		gc.TINT32<<16 | gc.TINT8,
-		gc.TUINT32<<16 | gc.TINT8,
-		gc.TINT64<<16 | gc.TINT8,
-		gc.TUINT64<<16 | gc.TINT8:
-		a = arm64.AMOVB
-
-	case gc.TINT8<<16 | gc.TUINT8, // same size
-		gc.TUINT8<<16 | gc.TUINT8,
-		gc.TINT16<<16 | gc.TUINT8,
-		// truncate
-		gc.TUINT16<<16 | gc.TUINT8,
-		gc.TINT32<<16 | gc.TUINT8,
-		gc.TUINT32<<16 | gc.TUINT8,
-		gc.TINT64<<16 | gc.TUINT8,
-		gc.TUINT64<<16 | gc.TUINT8:
-		a = arm64.AMOVBU
-
-	case gc.TINT16<<16 | gc.TINT16, // same size
-		gc.TUINT16<<16 | gc.TINT16,
-		gc.TINT32<<16 | gc.TINT16,
-		// truncate
-		gc.TUINT32<<16 | gc.TINT16,
-		gc.TINT64<<16 | gc.TINT16,
-		gc.TUINT64<<16 | gc.TINT16:
-		a = arm64.AMOVH
-
-	case gc.TINT16<<16 | gc.TUINT16, // same size
-		gc.TUINT16<<16 | gc.TUINT16,
-		gc.TINT32<<16 | gc.TUINT16,
-		// truncate
-		gc.TUINT32<<16 | gc.TUINT16,
-		gc.TINT64<<16 | gc.TUINT16,
-		gc.TUINT64<<16 | gc.TUINT16:
-		a = arm64.AMOVHU
-
-	case gc.TINT32<<16 | gc.TINT32, // same size
-		gc.TUINT32<<16 | gc.TINT32,
-		gc.TINT64<<16 | gc.TINT32,
-		// truncate
-		gc.TUINT64<<16 | gc.TINT32:
-		a = arm64.AMOVW
-
-	case gc.TINT32<<16 | gc.TUINT32, // same size
-		gc.TUINT32<<16 | gc.TUINT32,
-		gc.TINT64<<16 | gc.TUINT32,
-		gc.TUINT64<<16 | gc.TUINT32:
-		a = arm64.AMOVWU
-
-	case gc.TINT64<<16 | gc.TINT64, // same size
-		gc.TINT64<<16 | gc.TUINT64,
-		gc.TUINT64<<16 | gc.TINT64,
-		gc.TUINT64<<16 | gc.TUINT64:
-		a = arm64.AMOVD
-
-		/*
-		 * integer up-conversions
-		 */
-	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
-		gc.TINT8<<16 | gc.TUINT16,
-		gc.TINT8<<16 | gc.TINT32,
-		gc.TINT8<<16 | gc.TUINT32,
-		gc.TINT8<<16 | gc.TINT64,
-		gc.TINT8<<16 | gc.TUINT64:
-		a = arm64.AMOVB
-
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
-		gc.TUINT8<<16 | gc.TUINT16,
-		gc.TUINT8<<16 | gc.TINT32,
-		gc.TUINT8<<16 | gc.TUINT32,
-		gc.TUINT8<<16 | gc.TINT64,
-		gc.TUINT8<<16 | gc.TUINT64:
-		a = arm64.AMOVBU
-
-		goto rdst
-
-	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
-		gc.TINT16<<16 | gc.TUINT32,
-		gc.TINT16<<16 | gc.TINT64,
-		gc.TINT16<<16 | gc.TUINT64:
-		a = arm64.AMOVH
-
-		goto rdst
-
-	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
-		gc.TUINT16<<16 | gc.TUINT32,
-		gc.TUINT16<<16 | gc.TINT64,
-		gc.TUINT16<<16 | gc.TUINT64:
-		a = arm64.AMOVHU
-
-		goto rdst
-
-	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
-		gc.TINT32<<16 | gc.TUINT64:
-		a = arm64.AMOVW
-
-		goto rdst
-
-	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
-		gc.TUINT32<<16 | gc.TUINT64:
-		a = arm64.AMOVWU
-
-		goto rdst
-
-	/*
-	* float to integer
-	 */
-	case gc.TFLOAT32<<16 | gc.TINT32:
-		a = arm64.AFCVTZSSW
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TINT32:
-		a = arm64.AFCVTZSDW
-		goto rdst
-
-	case gc.TFLOAT32<<16 | gc.TINT64:
-		a = arm64.AFCVTZSS
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TINT64:
-		a = arm64.AFCVTZSD
-		goto rdst
-
-	case gc.TFLOAT32<<16 | gc.TUINT32:
-		a = arm64.AFCVTZUSW
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TUINT32:
-		a = arm64.AFCVTZUDW
-		goto rdst
-
-	case gc.TFLOAT32<<16 | gc.TUINT64:
-		a = arm64.AFCVTZUS
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TUINT64:
-		a = arm64.AFCVTZUD
-		goto rdst
-
-	case gc.TFLOAT32<<16 | gc.TINT16,
-		gc.TFLOAT32<<16 | gc.TINT8,
-		gc.TFLOAT64<<16 | gc.TINT16,
-		gc.TFLOAT64<<16 | gc.TINT8:
-		cvt = gc.Types[gc.TINT32]
-
-		goto hard
-
-	case gc.TFLOAT32<<16 | gc.TUINT16,
-		gc.TFLOAT32<<16 | gc.TUINT8,
-		gc.TFLOAT64<<16 | gc.TUINT16,
-		gc.TFLOAT64<<16 | gc.TUINT8:
-		cvt = gc.Types[gc.TUINT32]
-
-		goto hard
-
-	/*
-	 * integer to float
-	 */
-	case gc.TINT8<<16 | gc.TFLOAT32,
-		gc.TINT16<<16 | gc.TFLOAT32,
-		gc.TINT32<<16 | gc.TFLOAT32:
-		a = arm64.ASCVTFWS
-
-		goto rdst
-
-	case gc.TINT8<<16 | gc.TFLOAT64,
-		gc.TINT16<<16 | gc.TFLOAT64,
-		gc.TINT32<<16 | gc.TFLOAT64:
-		a = arm64.ASCVTFWD
-
-		goto rdst
-
-	case gc.TINT64<<16 | gc.TFLOAT32:
-		a = arm64.ASCVTFS
-		goto rdst
-
-	case gc.TINT64<<16 | gc.TFLOAT64:
-		a = arm64.ASCVTFD
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TFLOAT32,
-		gc.TUINT16<<16 | gc.TFLOAT32,
-		gc.TUINT32<<16 | gc.TFLOAT32:
-		a = arm64.AUCVTFWS
-
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TFLOAT64,
-		gc.TUINT16<<16 | gc.TFLOAT64,
-		gc.TUINT32<<16 | gc.TFLOAT64:
-		a = arm64.AUCVTFWD
-
-		goto rdst
-
-	case gc.TUINT64<<16 | gc.TFLOAT32:
-		a = arm64.AUCVTFS
-		goto rdst
-
-	case gc.TUINT64<<16 | gc.TFLOAT64:
-		a = arm64.AUCVTFD
-		goto rdst
-
-		/*
-		 * float to float
-		 */
-	case gc.TFLOAT32<<16 | gc.TFLOAT32:
-		a = arm64.AFMOVS
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT64:
-		a = arm64.AFMOVD
-
-	case gc.TFLOAT32<<16 | gc.TFLOAT64:
-		a = arm64.AFCVTSD
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT32:
-		a = arm64.AFCVTDS
-		goto rdst
-	}
-
-	gins(a, f, t)
-	return
-
-	// requires register destination
-rdst:
-	gc.Regalloc(&r1, t.Type, t)
-
-	gins(a, f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-
-	// requires register intermediate
-hard:
-	gc.Regalloc(&r1, cvt, t)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-}
-
 // gins is called by the front end.
 // It synthesizes some multiple-instruction sequences
 // so the front end can stay simpler.
@@ -582,398 +217,3 @@
 	raddr(lhs, p)
 	return p
 }
-
-/*
- * return Axxx for Oxxx on type t.
- */
-func optoas(op gc.Op, t *gc.Type) obj.As {
-	if t == nil {
-		gc.Fatalf("optoas: t is nil")
-	}
-
-	// avoid constant conversions in switches below
-	const (
-		OMINUS_ = uint32(gc.OMINUS) << 16
-		OLSH_   = uint32(gc.OLSH) << 16
-		ORSH_   = uint32(gc.ORSH) << 16
-		OADD_   = uint32(gc.OADD) << 16
-		OSUB_   = uint32(gc.OSUB) << 16
-		OMUL_   = uint32(gc.OMUL) << 16
-		ODIV_   = uint32(gc.ODIV) << 16
-		OOR_    = uint32(gc.OOR) << 16
-		OAND_   = uint32(gc.OAND) << 16
-		OXOR_   = uint32(gc.OXOR) << 16
-		OEQ_    = uint32(gc.OEQ) << 16
-		ONE_    = uint32(gc.ONE) << 16
-		OLT_    = uint32(gc.OLT) << 16
-		OLE_    = uint32(gc.OLE) << 16
-		OGE_    = uint32(gc.OGE) << 16
-		OGT_    = uint32(gc.OGT) << 16
-		OCMP_   = uint32(gc.OCMP) << 16
-		OAS_    = uint32(gc.OAS) << 16
-		OHMUL_  = uint32(gc.OHMUL) << 16
-		OSQRT_  = uint32(gc.OSQRT) << 16
-	)
-
-	a := obj.AXXX
-	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
-	default:
-		gc.Fatalf("optoas: no entry for op=%v type=%v", op, t)
-
-	case OEQ_ | gc.TBOOL,
-		OEQ_ | gc.TINT8,
-		OEQ_ | gc.TUINT8,
-		OEQ_ | gc.TINT16,
-		OEQ_ | gc.TUINT16,
-		OEQ_ | gc.TINT32,
-		OEQ_ | gc.TUINT32,
-		OEQ_ | gc.TINT64,
-		OEQ_ | gc.TUINT64,
-		OEQ_ | gc.TPTR32,
-		OEQ_ | gc.TPTR64,
-		OEQ_ | gc.TFLOAT32,
-		OEQ_ | gc.TFLOAT64:
-		a = arm64.ABEQ
-
-	case ONE_ | gc.TBOOL,
-		ONE_ | gc.TINT8,
-		ONE_ | gc.TUINT8,
-		ONE_ | gc.TINT16,
-		ONE_ | gc.TUINT16,
-		ONE_ | gc.TINT32,
-		ONE_ | gc.TUINT32,
-		ONE_ | gc.TINT64,
-		ONE_ | gc.TUINT64,
-		ONE_ | gc.TPTR32,
-		ONE_ | gc.TPTR64,
-		ONE_ | gc.TFLOAT32,
-		ONE_ | gc.TFLOAT64:
-		a = arm64.ABNE
-
-	case OLT_ | gc.TINT8,
-		OLT_ | gc.TINT16,
-		OLT_ | gc.TINT32,
-		OLT_ | gc.TINT64:
-		a = arm64.ABLT
-
-	case OLT_ | gc.TUINT8,
-		OLT_ | gc.TUINT16,
-		OLT_ | gc.TUINT32,
-		OLT_ | gc.TUINT64,
-		OLT_ | gc.TFLOAT32,
-		OLT_ | gc.TFLOAT64:
-		a = arm64.ABLO
-
-	case OLE_ | gc.TINT8,
-		OLE_ | gc.TINT16,
-		OLE_ | gc.TINT32,
-		OLE_ | gc.TINT64:
-		a = arm64.ABLE
-
-	case OLE_ | gc.TUINT8,
-		OLE_ | gc.TUINT16,
-		OLE_ | gc.TUINT32,
-		OLE_ | gc.TUINT64,
-		OLE_ | gc.TFLOAT32,
-		OLE_ | gc.TFLOAT64:
-		a = arm64.ABLS
-
-	case OGT_ | gc.TINT8,
-		OGT_ | gc.TINT16,
-		OGT_ | gc.TINT32,
-		OGT_ | gc.TINT64,
-		OGT_ | gc.TFLOAT32,
-		OGT_ | gc.TFLOAT64:
-		a = arm64.ABGT
-
-	case OGT_ | gc.TUINT8,
-		OGT_ | gc.TUINT16,
-		OGT_ | gc.TUINT32,
-		OGT_ | gc.TUINT64:
-		a = arm64.ABHI
-
-	case OGE_ | gc.TINT8,
-		OGE_ | gc.TINT16,
-		OGE_ | gc.TINT32,
-		OGE_ | gc.TINT64,
-		OGE_ | gc.TFLOAT32,
-		OGE_ | gc.TFLOAT64:
-		a = arm64.ABGE
-
-	case OGE_ | gc.TUINT8,
-		OGE_ | gc.TUINT16,
-		OGE_ | gc.TUINT32,
-		OGE_ | gc.TUINT64:
-		a = arm64.ABHS
-
-	case OCMP_ | gc.TBOOL,
-		OCMP_ | gc.TINT8,
-		OCMP_ | gc.TINT16,
-		OCMP_ | gc.TINT32,
-		OCMP_ | gc.TPTR32,
-		OCMP_ | gc.TINT64,
-		OCMP_ | gc.TUINT8,
-		OCMP_ | gc.TUINT16,
-		OCMP_ | gc.TUINT32,
-		OCMP_ | gc.TUINT64,
-		OCMP_ | gc.TPTR64:
-		a = arm64.ACMP
-
-	case OCMP_ | gc.TFLOAT32:
-		a = arm64.AFCMPS
-
-	case OCMP_ | gc.TFLOAT64:
-		a = arm64.AFCMPD
-
-	case OAS_ | gc.TBOOL,
-		OAS_ | gc.TINT8:
-		a = arm64.AMOVB
-
-	case OAS_ | gc.TUINT8:
-		a = arm64.AMOVBU
-
-	case OAS_ | gc.TINT16:
-		a = arm64.AMOVH
-
-	case OAS_ | gc.TUINT16:
-		a = arm64.AMOVHU
-
-	case OAS_ | gc.TINT32:
-		a = arm64.AMOVW
-
-	case OAS_ | gc.TUINT32,
-		OAS_ | gc.TPTR32:
-		a = arm64.AMOVWU
-
-	case OAS_ | gc.TINT64,
-		OAS_ | gc.TUINT64,
-		OAS_ | gc.TPTR64:
-		a = arm64.AMOVD
-
-	case OAS_ | gc.TFLOAT32:
-		a = arm64.AFMOVS
-
-	case OAS_ | gc.TFLOAT64:
-		a = arm64.AFMOVD
-
-	case OADD_ | gc.TINT8,
-		OADD_ | gc.TUINT8,
-		OADD_ | gc.TINT16,
-		OADD_ | gc.TUINT16,
-		OADD_ | gc.TINT32,
-		OADD_ | gc.TUINT32,
-		OADD_ | gc.TPTR32,
-		OADD_ | gc.TINT64,
-		OADD_ | gc.TUINT64,
-		OADD_ | gc.TPTR64:
-		a = arm64.AADD
-
-	case OADD_ | gc.TFLOAT32:
-		a = arm64.AFADDS
-
-	case OADD_ | gc.TFLOAT64:
-		a = arm64.AFADDD
-
-	case OSUB_ | gc.TINT8,
-		OSUB_ | gc.TUINT8,
-		OSUB_ | gc.TINT16,
-		OSUB_ | gc.TUINT16,
-		OSUB_ | gc.TINT32,
-		OSUB_ | gc.TUINT32,
-		OSUB_ | gc.TPTR32,
-		OSUB_ | gc.TINT64,
-		OSUB_ | gc.TUINT64,
-		OSUB_ | gc.TPTR64:
-		a = arm64.ASUB
-
-	case OSUB_ | gc.TFLOAT32:
-		a = arm64.AFSUBS
-
-	case OSUB_ | gc.TFLOAT64:
-		a = arm64.AFSUBD
-
-	case OMINUS_ | gc.TINT8,
-		OMINUS_ | gc.TUINT8,
-		OMINUS_ | gc.TINT16,
-		OMINUS_ | gc.TUINT16,
-		OMINUS_ | gc.TINT32,
-		OMINUS_ | gc.TUINT32,
-		OMINUS_ | gc.TPTR32,
-		OMINUS_ | gc.TINT64,
-		OMINUS_ | gc.TUINT64,
-		OMINUS_ | gc.TPTR64:
-		a = arm64.ANEG
-
-	case OMINUS_ | gc.TFLOAT32:
-		a = arm64.AFNEGS
-
-	case OMINUS_ | gc.TFLOAT64:
-		a = arm64.AFNEGD
-
-	case OAND_ | gc.TINT8,
-		OAND_ | gc.TUINT8,
-		OAND_ | gc.TINT16,
-		OAND_ | gc.TUINT16,
-		OAND_ | gc.TINT32,
-		OAND_ | gc.TUINT32,
-		OAND_ | gc.TPTR32,
-		OAND_ | gc.TINT64,
-		OAND_ | gc.TUINT64,
-		OAND_ | gc.TPTR64:
-		a = arm64.AAND
-
-	case OOR_ | gc.TINT8,
-		OOR_ | gc.TUINT8,
-		OOR_ | gc.TINT16,
-		OOR_ | gc.TUINT16,
-		OOR_ | gc.TINT32,
-		OOR_ | gc.TUINT32,
-		OOR_ | gc.TPTR32,
-		OOR_ | gc.TINT64,
-		OOR_ | gc.TUINT64,
-		OOR_ | gc.TPTR64:
-		a = arm64.AORR
-
-	case OXOR_ | gc.TINT8,
-		OXOR_ | gc.TUINT8,
-		OXOR_ | gc.TINT16,
-		OXOR_ | gc.TUINT16,
-		OXOR_ | gc.TINT32,
-		OXOR_ | gc.TUINT32,
-		OXOR_ | gc.TPTR32,
-		OXOR_ | gc.TINT64,
-		OXOR_ | gc.TUINT64,
-		OXOR_ | gc.TPTR64:
-		a = arm64.AEOR
-
-		// TODO(minux): handle rotates
-	//case CASE(OLROT, TINT8):
-	//case CASE(OLROT, TUINT8):
-	//case CASE(OLROT, TINT16):
-	//case CASE(OLROT, TUINT16):
-	//case CASE(OLROT, TINT32):
-	//case CASE(OLROT, TUINT32):
-	//case CASE(OLROT, TPTR32):
-	//case CASE(OLROT, TINT64):
-	//case CASE(OLROT, TUINT64):
-	//case CASE(OLROT, TPTR64):
-	//	a = 0//???; RLDC?
-	//	break;
-
-	case OLSH_ | gc.TINT8,
-		OLSH_ | gc.TUINT8,
-		OLSH_ | gc.TINT16,
-		OLSH_ | gc.TUINT16,
-		OLSH_ | gc.TINT32,
-		OLSH_ | gc.TUINT32,
-		OLSH_ | gc.TPTR32,
-		OLSH_ | gc.TINT64,
-		OLSH_ | gc.TUINT64,
-		OLSH_ | gc.TPTR64:
-		a = arm64.ALSL
-
-	case ORSH_ | gc.TUINT8,
-		ORSH_ | gc.TUINT16,
-		ORSH_ | gc.TUINT32,
-		ORSH_ | gc.TPTR32,
-		ORSH_ | gc.TUINT64,
-		ORSH_ | gc.TPTR64:
-		a = arm64.ALSR
-
-	case ORSH_ | gc.TINT8,
-		ORSH_ | gc.TINT16,
-		ORSH_ | gc.TINT32,
-		ORSH_ | gc.TINT64:
-		a = arm64.AASR
-
-	case OHMUL_ | gc.TINT64:
-		a = arm64.ASMULH
-
-	case OHMUL_ | gc.TUINT64,
-		OHMUL_ | gc.TPTR64:
-		a = arm64.AUMULH
-
-	case OMUL_ | gc.TINT8,
-		OMUL_ | gc.TINT16,
-		OMUL_ | gc.TINT32:
-		a = arm64.ASMULL
-
-	case OMUL_ | gc.TINT64:
-		a = arm64.AMUL
-
-	case OMUL_ | gc.TUINT8,
-		OMUL_ | gc.TUINT16,
-		OMUL_ | gc.TUINT32,
-		OMUL_ | gc.TPTR32:
-		// don't use word multiply, the high 32-bit are undefined.
-		a = arm64.AUMULL
-
-	case OMUL_ | gc.TUINT64,
-		OMUL_ | gc.TPTR64:
-		a = arm64.AMUL // for 64-bit multiplies, signedness doesn't matter.
-
-	case OMUL_ | gc.TFLOAT32:
-		a = arm64.AFMULS
-
-	case OMUL_ | gc.TFLOAT64:
-		a = arm64.AFMULD
-
-	case ODIV_ | gc.TINT8,
-		ODIV_ | gc.TINT16,
-		ODIV_ | gc.TINT32,
-		ODIV_ | gc.TINT64:
-		a = arm64.ASDIV
-
-	case ODIV_ | gc.TUINT8,
-		ODIV_ | gc.TUINT16,
-		ODIV_ | gc.TUINT32,
-		ODIV_ | gc.TPTR32,
-		ODIV_ | gc.TUINT64,
-		ODIV_ | gc.TPTR64:
-		a = arm64.AUDIV
-
-	case ODIV_ | gc.TFLOAT32:
-		a = arm64.AFDIVS
-
-	case ODIV_ | gc.TFLOAT64:
-		a = arm64.AFDIVD
-
-	case OSQRT_ | gc.TFLOAT64:
-		a = arm64.AFSQRTD
-	}
-
-	return a
-}
-
-const (
-	ODynam   = 1 << 0
-	OAddable = 1 << 1
-)
-
-func xgen(n *gc.Node, a *gc.Node, o int) bool {
-	// TODO(minux)
-
-	return -1 != 0 /*TypeKind(100016)*/
-}
-
-func sudoclean() {
-	return
-}
-
-/*
- * generate code to compute address of n,
- * a reference to a (perhaps nested) field inside
- * an array or struct.
- * return 0 on failure, 1 on success.
- * on success, leaves usable address in a.
- *
- * caller is responsible for calling sudoclean
- * after successful sudoaddable,
- * to release the register used for a.
- */
-func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
-	// TODO(minux)
-
-	*a = obj.Addr{}
-	return false
-}
diff --git a/src/cmd/compile/internal/arm64/peep.go b/src/cmd/compile/internal/arm64/peep.go
deleted file mode 100644
index 6e0b527..0000000
--- a/src/cmd/compile/internal/arm64/peep.go
+++ /dev/null
@@ -1,797 +0,0 @@
-// Derived from Inferno utils/6c/peep.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/peep.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm64
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/arm64"
-	"fmt"
-)
-
-var gactive uint32
-
-func peep(firstp *obj.Prog) {
-	g := gc.Flowstart(firstp, nil)
-	if g == nil {
-		return
-	}
-	gactive = 0
-
-	var p *obj.Prog
-	var r *gc.Flow
-	var t int
-loop1:
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		gc.Dumpit("loop1", g.Start, 0)
-	}
-
-	t = 0
-	for r = g.Start; r != nil; r = r.Link {
-		p = r.Prog
-
-		// TODO(minux) Handle smaller moves. arm and amd64
-		// distinguish between moves that *must* sign/zero
-		// extend and moves that don't care so they
-		// can eliminate moves that don't care without
-		// breaking moves that do care. This might let us
-		// simplify or remove the next peep loop, too.
-		if p.As == arm64.AMOVD || p.As == arm64.AFMOVD {
-			if regtyp(&p.To) {
-				// Try to eliminate reg->reg moves
-				if regtyp(&p.From) {
-					if p.From.Type == p.To.Type {
-						if copyprop(r) {
-							excise(r)
-							t++
-						} else if subprop(r) && copyprop(r) {
-							excise(r)
-							t++
-						}
-					}
-				}
-			}
-		}
-	}
-
-	if t != 0 {
-		goto loop1
-	}
-
-	/*
-	 * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
-	 */
-	var p1 *obj.Prog
-	var r1 *gc.Flow
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		default:
-			continue
-
-		case arm64.AMOVH,
-			arm64.AMOVHU,
-			arm64.AMOVB,
-			arm64.AMOVBU,
-			arm64.AMOVW,
-			arm64.AMOVWU:
-			if p.To.Type != obj.TYPE_REG {
-				continue
-			}
-		}
-
-		r1 = r.Link
-		if r1 == nil {
-			continue
-		}
-		p1 = r1.Prog
-		if p1.As != p.As {
-			continue
-		}
-		if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
-			continue
-		}
-		if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
-			continue
-		}
-		excise(r1)
-	}
-
-	if gc.Debug['D'] > 1 {
-		goto ret /* allow following code improvement to be suppressed */
-	}
-
-	// MOVD $c, R'; ADD R', R (R' unused) -> ADD $c, R
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		default:
-			continue
-
-		case arm64.AMOVD:
-			if p.To.Type != obj.TYPE_REG {
-				continue
-			}
-			if p.From.Type != obj.TYPE_CONST {
-				continue
-			}
-			if p.From.Offset < 0 || 4096 <= p.From.Offset {
-				continue
-			}
-		}
-		r1 = r.Link
-		if r1 == nil {
-			continue
-		}
-		p1 = r1.Prog
-		if p1.As != arm64.AADD && p1.As != arm64.ASUB { // TODO(aram): also logical after we have bimm.
-			continue
-		}
-		if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
-			continue
-		}
-		if p1.To.Type != obj.TYPE_REG {
-			continue
-		}
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, p1.As, p, p1)
-		}
-		p1.From.Type = obj.TYPE_CONST
-		p1.From = p.From
-		excise(r)
-	}
-
-	/* TODO(minux):
-	 * look for OP x,y,R; CMP R, $0 -> OP.S x,y,R
-	 * when OP can set condition codes correctly
-	 */
-
-ret:
-	gc.Flowend(g)
-}
-
-func excise(r *gc.Flow) {
-	p := r.Prog
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("%v ===delete===\n", p)
-	}
-	obj.Nopout(p)
-	gc.Ostats.Ndelmov++
-}
-
-func regtyp(a *obj.Addr) bool {
-	// TODO(rsc): Floating point register exclusions?
-	return a.Type == obj.TYPE_REG && arm64.REG_R0 <= a.Reg && a.Reg <= arm64.REG_F31 && a.Reg != arm64.REGZERO
-}
-
-/*
- * the idea is to substitute
- * one register for another
- * from one MOV to another
- *	MOV	a, R1
- *	ADD	b, R1	/ no use of R2
- *	MOV	R1, R2
- * would be converted to
- *	MOV	a, R2
- *	ADD	b, R2
- *	MOV	R2, R1
- * hopefully, then the former or latter MOV
- * will be eliminated by copy propagation.
- *
- * r0 (the argument, not the register) is the MOV at the end of the
- * above sequences. This returns 1 if it modified any instructions.
- */
-func subprop(r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	if !regtyp(v1) {
-		return false
-	}
-	v2 := &p.To
-	if !regtyp(v2) {
-		return false
-	}
-	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
-		if gc.Uniqs(r) == nil {
-			break
-		}
-		p = r.Prog
-		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
-			continue
-		}
-		if p.Info.Flags&gc.Call != 0 {
-			return false
-		}
-
-		if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
-			if p.To.Type == v1.Type {
-				if p.To.Reg == v1.Reg {
-					copysub(&p.To, v1, v2, true)
-					if gc.Debug['P'] != 0 {
-						fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
-						if p.From.Type == v2.Type {
-							fmt.Printf(" excise")
-						}
-						fmt.Printf("\n")
-					}
-
-					for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
-						p = r.Prog
-						copysub(&p.From, v1, v2, true)
-						copysub1(p, v1, v2, true)
-						copysub(&p.To, v1, v2, true)
-						if gc.Debug['P'] != 0 {
-							fmt.Printf("%v\n", r.Prog)
-						}
-					}
-
-					v1.Reg, v2.Reg = v2.Reg, v1.Reg
-					if gc.Debug['P'] != 0 {
-						fmt.Printf("%v last\n", r.Prog)
-					}
-					return true
-				}
-			}
-		}
-
-		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
-			break
-		}
-		if copysub(&p.From, v1, v2, false) || copysub1(p, v1, v2, false) || copysub(&p.To, v1, v2, false) {
-			break
-		}
-	}
-
-	return false
-}
-
-/*
- * The idea is to remove redundant copies.
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	use v2	return fail (v1->v2 move must remain)
- *	-----------------
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	set v2	return success (caller can remove v1->v2 move)
- */
-func copyprop(r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	v2 := &p.To
-	if copyas(v1, v2) {
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("eliminating self-move: %v\n", r0.Prog)
-		}
-		return true
-	}
-
-	gactive++
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
-	}
-	return copy1(v1, v2, r0.S1, false)
-}
-
-// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
-// all uses were rewritten.
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f bool) bool {
-	if uint32(r.Active) == gactive {
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("act set; return 1\n")
-		}
-		return true
-	}
-
-	r.Active = int32(gactive)
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("copy1 replace %v with %v f=%v\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
-	}
-	for ; r != nil; r = r.S1 {
-		p := r.Prog
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("%v", p)
-		}
-		if !f && gc.Uniqp(r) == nil {
-			// Multiple predecessors; conservatively
-			// assume v1 was set on other path
-			f = true
-
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; merge; f=%v", f)
-			}
-		}
-
-		switch t := copyu(p, v2, nil); t {
-		case 2: /* rar, can't split */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
-			}
-			return false
-
-		case 3: /* set */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
-			}
-			return true
-
-		case 1, /* used, substitute */
-			4: /* use and set */
-			if f {
-				if gc.Debug['P'] == 0 {
-					return false
-				}
-				if t == 4 {
-					fmt.Printf("; %v used+set and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				} else {
-					fmt.Printf("; %v used and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				}
-				return false
-			}
-
-			if copyu(p, v2, v1) != 0 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; sub fail; return 0\n")
-				}
-				return false
-			}
-
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
-			}
-			if t == 4 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
-				}
-				return true
-			}
-		}
-
-		if !f {
-			t := copyu(p, v1, nil)
-			if t == 2 || t == 3 || t == 4 {
-				f = true
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %v set and !f; f=%v", gc.Ctxt.Dconv(v1), f)
-				}
-			}
-		}
-
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("\n")
-		}
-		if r.S2 != nil {
-			if !copy1(v1, v2, r.S2, f) {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-// If s==nil, copyu returns the set/use of v in p; otherwise, it
-// modifies p to replace reads of v with reads of s and returns 0 for
-// success or non-zero for failure.
-//
-// If s==nil, copy returns one of the following values:
-//	1 if v only used
-//	2 if v is set and used in one address (read-alter-rewrite;
-//	  can't substitute)
-//	3 if v is only set
-//	4 if v is set in one address and used in another (so addresses
-//	  can be rewritten independently)
-//	0 otherwise (not touched)
-func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
-	if p.From3Type() != obj.TYPE_NONE {
-		// 7g never generates a from3
-		fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
-	}
-	if p.RegTo2 != obj.REG_NONE {
-		// 7g never generates a to2
-		fmt.Printf("copyu: RegTo2 (%v) not implemented\n", obj.Rconv(int(p.RegTo2)))
-	}
-
-	switch p.As {
-	default:
-		fmt.Printf("copyu: can't find %v\n", p.As)
-		return 2
-
-	case obj.ANOP, /* read p->from, write p->to */
-		arm64.ANEG,
-		arm64.AFNEGD,
-		arm64.AFNEGS,
-		arm64.AFSQRTD,
-		arm64.AFCVTZSD,
-		arm64.AFCVTZSS,
-		arm64.AFCVTZSDW,
-		arm64.AFCVTZSSW,
-		arm64.AFCVTZUD,
-		arm64.AFCVTZUS,
-		arm64.AFCVTZUDW,
-		arm64.AFCVTZUSW,
-		arm64.AFCVTSD,
-		arm64.AFCVTDS,
-		arm64.ASCVTFD,
-		arm64.ASCVTFS,
-		arm64.ASCVTFWD,
-		arm64.ASCVTFWS,
-		arm64.AUCVTFD,
-		arm64.AUCVTFS,
-		arm64.AUCVTFWD,
-		arm64.AUCVTFWS,
-		arm64.AMOVB,
-		arm64.AMOVBU,
-		arm64.AMOVH,
-		arm64.AMOVHU,
-		arm64.AMOVW,
-		arm64.AMOVWU,
-		arm64.AMOVD,
-		arm64.AFMOVS,
-		arm64.AFMOVD:
-		if p.Scond == 0 {
-			if s != nil {
-				if copysub(&p.From, v, s, true) {
-					return 1
-				}
-
-				// Update only indirect uses of v in p->to
-				if !copyas(&p.To, v) {
-					if copysub(&p.To, v, s, true) {
-						return 1
-					}
-				}
-				return 0
-			}
-
-			if copyas(&p.To, v) {
-				// Fix up implicit from
-				if p.From.Type == obj.TYPE_NONE {
-					p.From = p.To
-				}
-				if copyau(&p.From, v) {
-					return 4
-				}
-				return 3
-			}
-
-			if copyau(&p.From, v) {
-				return 1
-			}
-			if copyau(&p.To, v) {
-				// p->to only indirectly uses v
-				return 1
-			}
-
-			return 0
-		}
-
-		/* rar p->from, write p->to or read p->from, rar p->to */
-		if p.From.Type == obj.TYPE_MEM {
-			if copyas(&p.From, v) {
-				// No s!=nil check; need to fail
-				// anyway in that case
-				return 2
-			}
-
-			if s != nil {
-				if copysub(&p.To, v, s, true) {
-					return 1
-				}
-				return 0
-			}
-
-			if copyas(&p.To, v) {
-				return 3
-			}
-		} else if p.To.Type == obj.TYPE_MEM {
-			if copyas(&p.To, v) {
-				return 2
-			}
-			if s != nil {
-				if copysub(&p.From, v, s, true) {
-					return 1
-				}
-				return 0
-			}
-
-			if copyau(&p.From, v) {
-				return 1
-			}
-		} else {
-			fmt.Printf("copyu: bad %v\n", p)
-		}
-
-		return 0
-
-	case arm64.AADD, /* read p->from, read p->reg, write p->to */
-		arm64.AADDS,
-		arm64.ASUB,
-		arm64.AADC,
-		arm64.AAND,
-		arm64.AORR,
-		arm64.AEOR,
-		arm64.AROR,
-		arm64.AMUL,
-		arm64.ASMULL,
-		arm64.AUMULL,
-		arm64.ASMULH,
-		arm64.AUMULH,
-		arm64.ASDIV,
-		arm64.AUDIV,
-		arm64.ALSL,
-		arm64.ALSR,
-		arm64.AASR,
-		arm64.AFADDD,
-		arm64.AFADDS,
-		arm64.AFSUBD,
-		arm64.AFSUBS,
-		arm64.AFMULD,
-		arm64.AFMULS,
-		arm64.AFDIVD,
-		arm64.AFDIVS:
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if copysub1(p, v, s, true) {
-				return 1
-			}
-
-			// Update only indirect uses of v in p->to
-			if !copyas(&p.To, v) {
-				if copysub(&p.To, v, s, true) {
-					return 1
-				}
-			}
-			return 0
-		}
-
-		if copyas(&p.To, v) {
-			if p.Reg == 0 {
-				// Fix up implicit reg (e.g., ADD
-				// R3,R4 -> ADD R3,R4,R4) so we can
-				// update reg and to separately.
-				p.Reg = p.To.Reg
-			}
-
-			if copyau(&p.From, v) {
-				return 4
-			}
-			if copyau1(p, v) {
-				return 4
-			}
-			return 3
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau1(p, v) {
-			return 1
-		}
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case arm64.ABEQ,
-		arm64.ABNE,
-		arm64.ABGE,
-		arm64.ABLT,
-		arm64.ABGT,
-		arm64.ABLE,
-		arm64.ABLO,
-		arm64.ABLS,
-		arm64.ABHI,
-		arm64.ABHS:
-		return 0
-
-	case obj.ACHECKNIL, /* read p->from */
-		arm64.ACMP, /* read p->from, read p->reg */
-		arm64.AFCMPD,
-		arm64.AFCMPS:
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if copysub1(p, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau1(p, v) {
-			return 1
-		}
-		return 0
-
-	case arm64.AB: /* read p->to */
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case obj.ARET: /* funny */
-		if s != nil {
-			return 0
-		}
-
-		// All registers die at this point, so claim
-		// everything is set (and not used).
-		return 3
-
-	case arm64.ABL: /* funny */
-		if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
-			return 2
-		}
-
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.To, v) {
-			return 4
-		}
-		return 3
-
-	// R31 is zero, used by DUFFZERO, cannot be substituted.
-	// R16 is ptr to memory, used and set, cannot be substituted.
-	case obj.ADUFFZERO:
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == 31 {
-				return 1
-			}
-			if v.Reg == 16 {
-				return 2
-			}
-		}
-
-		return 0
-
-	// R16, R17 are ptr to src, dst, used and set, cannot be substituted.
-	// R27 is scratch, set by DUFFCOPY, cannot be substituted.
-	case obj.ADUFFCOPY:
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == 16 || v.Reg == 17 {
-				return 2
-			}
-			if v.Reg == 27 {
-				return 3
-			}
-		}
-
-		return 0
-
-	case arm64.AHINT,
-		obj.ATEXT,
-		obj.APCDATA,
-		obj.AFUNCDATA,
-		obj.AVARDEF,
-		obj.AVARKILL,
-		obj.AVARLIVE,
-		obj.AUSEFIELD:
-		return 0
-	}
-}
-
-// copyas returns true if a and v address the same register.
-//
-// If a is the from operand, this means this operation reads the
-// register in v. If a is the to operand, this means this operation
-// writes the register in v.
-func copyas(a *obj.Addr, v *obj.Addr) bool {
-	return regtyp(v) && a.Type == v.Type && a.Reg == v.Reg
-}
-
-// copyau returns true if a either directly or indirectly addresses the
-// same register as v.
-//
-// If a is the from operand, this means this operation reads the
-// register in v. If a is the to operand, this means the operation
-// either reads or writes the register in v (if !copyas(a, v), then
-// the operation reads the register in v).
-func copyau(a *obj.Addr, v *obj.Addr) bool {
-	if copyas(a, v) {
-		return true
-	}
-	if v.Type == obj.TYPE_REG {
-		if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
-			if v.Reg == a.Reg {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// copyau1 returns true if p->reg references the same register as v and v
-// is a direct reference.
-func copyau1(p *obj.Prog, v *obj.Addr) bool {
-	return regtyp(v) && v.Reg != 0 && p.Reg == v.Reg
-}
-
-// copysub replaces v with s in a if f==true or indicates it if could if f==false.
-// Returns true on failure to substitute (it always succeeds on arm64).
-// TODO(dfc) remove unused return value, remove calls with f=false as they do nothing.
-func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool {
-	if f && copyau(a, v) {
-		a.Reg = s.Reg
-	}
-	return false
-}
-
-// copysub1 replaces v with s in p1->reg if f==true or indicates if it could if f==false.
-// Returns true on failure to substitute (it always succeeds on arm64).
-// TODO(dfc) remove unused return value, remove calls with f=false as they do nothing.
-func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f bool) bool {
-	if f && copyau1(p1, v) {
-		p1.Reg = s.Reg
-	}
-	return false
-}
-
-func sameaddr(a *obj.Addr, v *obj.Addr) bool {
-	if a.Type != v.Type {
-		return false
-	}
-	if regtyp(v) && a.Reg == v.Reg {
-		return true
-	}
-	if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
-		if v.Offset == a.Offset {
-			return true
-		}
-	}
-	return false
-}
-
-func smallindir(a *obj.Addr, reg *obj.Addr) bool {
-	return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
-}
-
-func stackaddr(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG && a.Reg == arm64.REGSP
-}
diff --git a/src/cmd/compile/internal/arm64/reg.go b/src/cmd/compile/internal/arm64/reg.go
index dfbbff5..0df68b6 100644
--- a/src/cmd/compile/internal/arm64/reg.go
+++ b/src/cmd/compile/internal/arm64/reg.go
@@ -30,105 +30,7 @@
 
 package arm64
 
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj/arm64"
-)
-
-const (
-	NREGVAR = 64 /* 32 general + 32 floating */
-)
-
-var regname = []string{
-	".R0",
-	".R1",
-	".R2",
-	".R3",
-	".R4",
-	".R5",
-	".R6",
-	".R7",
-	".R8",
-	".R9",
-	".R10",
-	".R11",
-	".R12",
-	".R13",
-	".R14",
-	".R15",
-	".R16",
-	".R17",
-	".R18",
-	".R19",
-	".R20",
-	".R21",
-	".R22",
-	".R23",
-	".R24",
-	".R25",
-	".R26",
-	".R27",
-	".R28",
-	".R29",
-	".R30",
-	".R31",
-	".F0",
-	".F1",
-	".F2",
-	".F3",
-	".F4",
-	".F5",
-	".F6",
-	".F7",
-	".F8",
-	".F9",
-	".F10",
-	".F11",
-	".F12",
-	".F13",
-	".F14",
-	".F15",
-	".F16",
-	".F17",
-	".F18",
-	".F19",
-	".F20",
-	".F21",
-	".F22",
-	".F23",
-	".F24",
-	".F25",
-	".F26",
-	".F27",
-	".F28",
-	".F29",
-	".F30",
-	".F31",
-}
-
-func regnames(n *int) []string {
-	*n = NREGVAR
-	return regname
-}
-
-func excludedregs() uint64 {
-	// Exclude registers with fixed functions
-	regbits := RtoB(arm64.REGRT1) | RtoB(arm64.REGRT2) | RtoB(arm64.REGPR)
-
-	// Exclude R26 - R31.
-	for r := arm64.REGMAX + 1; r <= arm64.REGZERO; r++ {
-		regbits |= RtoB(r)
-	}
-
-	// Also exclude floating point registers with fixed constants
-	regbits |= RtoB(arm64.REG_F27) | RtoB(arm64.REG_F28) | RtoB(arm64.REG_F29) | RtoB(arm64.REG_F30) | RtoB(arm64.REG_F31)
-
-	return regbits
-}
-
-func doregbits(r int) uint64 {
-	return 0
-}
+import "cmd/internal/obj/arm64"
 
 /*
  * track register variables including external registers:
@@ -151,19 +53,3 @@
 	}
 	return 0
 }
-
-func BtoR(b uint64) int {
-	b &= 0xffffffff
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + arm64.REG_R0
-}
-
-func BtoF(b uint64) int {
-	b >>= 32
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + arm64.REG_F0
-}
diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go
index 363079a..3d8a7c1 100644
--- a/src/cmd/compile/internal/gc/cgen.go
+++ b/src/cmd/compile/internal/gc/cgen.go
@@ -4,2634 +4,7 @@
 
 package gc
 
-import (
-	"cmd/internal/obj"
-	"cmd/internal/obj/ppc64"
-	"cmd/internal/sys"
-	"fmt"
-)
-
-// generate:
-//	res = n;
-// simplifies and calls Thearch.Gmove.
-// if wb is true, need to emit write barriers.
-func Cgen(n, res *Node) {
-	cgen_wb(n, res, false)
-}
-
-func cgen_wb(n, res *Node, wb bool) {
-	if Debug['g'] != 0 {
-		op := "cgen"
-		if wb {
-			op = "cgen_wb"
-		}
-		Dump("\n"+op+"-n", n)
-		Dump(op+"-res", res)
-	}
-
-	if n == nil || n.Type == nil {
-		return
-	}
-
-	if res == nil || res.Type == nil {
-		Fatalf("cgen: res nil")
-	}
-
-	for n.Op == OCONVNOP {
-		n = n.Left
-	}
-
-	switch n.Op {
-	case OARRAYBYTESTRTMP:
-		sgen_wb(n.Left, res, n.Type.Width, wb)
-		return
-
-	case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
-		cgen_slice(n, res, wb)
-		return
-
-	case OEFACE:
-		if res.Op != ONAME || !res.Addable || wb {
-			var n1 Node
-			Tempname(&n1, n.Type)
-			Cgen_eface(n, &n1)
-			cgen_wb(&n1, res, wb)
-		} else {
-			Cgen_eface(n, res)
-		}
-		return
-
-	case ODOTTYPE:
-		cgen_dottype(n, res, nil, wb)
-		return
-
-	case OAPPEND:
-		cgen_append(n, res)
-		return
-	}
-
-	if n.Ullman >= UINF {
-		if n.Op == OINDREG {
-			Fatalf("cgen: this is going to miscompile")
-		}
-		if res.Ullman >= UINF {
-			var n1 Node
-			Tempname(&n1, n.Type)
-			Cgen(n, &n1)
-			cgen_wb(&n1, res, wb)
-			return
-		}
-	}
-
-	if Isfat(n.Type) {
-		if n.Type.Width < 0 {
-			Fatalf("forgot to compute width for %v", n.Type)
-		}
-		sgen_wb(n, res, n.Type.Width, wb)
-		return
-	}
-
-	if !res.Addable {
-		if n.Ullman > res.Ullman {
-			if Ctxt.Arch.RegSize == 4 && Is64(n.Type) {
-				var n1 Node
-				Tempname(&n1, n.Type)
-				Cgen(n, &n1)
-				cgen_wb(&n1, res, wb)
-				return
-			}
-
-			var n1 Node
-			Regalloc(&n1, n.Type, res)
-			Cgen(n, &n1)
-			if n1.Ullman > res.Ullman {
-				Dump("n1", &n1)
-				Dump("res", res)
-				Fatalf("loop in cgen")
-			}
-
-			cgen_wb(&n1, res, wb)
-			Regfree(&n1)
-			return
-		}
-
-		if res.Ullman < UINF {
-			if Complexop(n, res) {
-				Complexgen(n, res)
-				return
-			}
-
-			f := true // gen through register
-			switch n.Op {
-			case OLITERAL:
-				if Smallintconst(n) {
-					f = false
-				}
-
-			case OREGISTER:
-				f = false
-			}
-
-			if !n.Type.IsComplex() && Ctxt.Arch.RegSize == 8 && !wb {
-				a := Thearch.Optoas(OAS, res.Type)
-				var addr obj.Addr
-				if Thearch.Sudoaddable(a, res, &addr) {
-					var p1 *obj.Prog
-					if f {
-						var n2 Node
-						Regalloc(&n2, res.Type, nil)
-						Cgen(n, &n2)
-						p1 = Thearch.Gins(a, &n2, nil)
-						Regfree(&n2)
-					} else {
-						p1 = Thearch.Gins(a, n, nil)
-					}
-					p1.To = addr
-					if Debug['g'] != 0 {
-						fmt.Printf("%v [ignore previous line]\n", p1)
-					}
-					Thearch.Sudoclean()
-					return
-				}
-			}
-		}
-
-		if Ctxt.Arch.Family == sys.I386 {
-			// no registers to speak of
-			var n1, n2 Node
-			Tempname(&n1, n.Type)
-			Cgen(n, &n1)
-			Igen(res, &n2, nil)
-			cgen_wb(&n1, &n2, wb)
-			Regfree(&n2)
-			return
-		}
-
-		var n1 Node
-		Igen(res, &n1, nil)
-		cgen_wb(n, &n1, wb)
-		Regfree(&n1)
-		return
-	}
-
-	// update addressability for string, slice
-	// can't do in walk because n->left->addable
-	// changes if n->left is an escaping local variable.
-	switch n.Op {
-	case OSPTR, OLEN:
-		if n.Left.Type.IsSlice() || n.Left.Type.IsString() {
-			n.Addable = n.Left.Addable
-		}
-
-	case OCAP:
-		if n.Left.Type.IsSlice() {
-			n.Addable = n.Left.Addable
-		}
-
-	case OITAB, OIDATA:
-		n.Addable = n.Left.Addable
-	}
-
-	if wb {
-		if Simtype[res.Type.Etype] != Tptr {
-			Fatalf("cgen_wb of type %v", res.Type)
-		}
-		if n.Ullman >= UINF {
-			var n1 Node
-			Tempname(&n1, n.Type)
-			Cgen(n, &n1)
-			n = &n1
-		}
-		cgen_wbptr(n, res)
-		return
-	}
-
-	// Write barrier now handled. Code below this line can ignore wb.
-
-	if Ctxt.Arch.Family == sys.ARM { // TODO(rsc): Maybe more often?
-		// if both are addressable, move
-		if n.Addable && res.Addable {
-			if Is64(n.Type) || Is64(res.Type) || n.Op == OREGISTER || res.Op == OREGISTER || n.Type.IsComplex() || res.Type.IsComplex() {
-				Thearch.Gmove(n, res)
-			} else {
-				var n1 Node
-				Regalloc(&n1, n.Type, nil)
-				Thearch.Gmove(n, &n1)
-				Cgen(&n1, res)
-				Regfree(&n1)
-			}
-
-			return
-		}
-
-		// if both are not addressable, use a temporary.
-		if !n.Addable && !res.Addable {
-			// could use regalloc here sometimes,
-			// but have to check for ullman >= UINF.
-			var n1 Node
-			Tempname(&n1, n.Type)
-			Cgen(n, &n1)
-			Cgen(&n1, res)
-			return
-		}
-
-		// if result is not addressable directly but n is,
-		// compute its address and then store via the address.
-		if !res.Addable {
-			var n1 Node
-			Igen(res, &n1, nil)
-			Cgen(n, &n1)
-			Regfree(&n1)
-			return
-		}
-	}
-
-	if Complexop(n, res) {
-		Complexgen(n, res)
-		return
-	}
-
-	if Ctxt.Arch.InFamily(sys.AMD64, sys.I386, sys.S390X) && n.Addable {
-		Thearch.Gmove(n, res)
-		return
-	}
-
-	if Ctxt.Arch.InFamily(sys.ARM64, sys.MIPS64, sys.PPC64) {
-		// if both are addressable, move
-		if n.Addable {
-			if n.Op == OREGISTER || res.Op == OREGISTER {
-				Thearch.Gmove(n, res)
-			} else {
-				var n1 Node
-				Regalloc(&n1, n.Type, nil)
-				Thearch.Gmove(n, &n1)
-				Cgen(&n1, res)
-				Regfree(&n1)
-			}
-			return
-		}
-	}
-
-	// if n is sudoaddable generate addr and move
-	if Ctxt.Arch.Family == sys.ARM && !Is64(n.Type) && !Is64(res.Type) && !n.Type.IsComplex() && !res.Type.IsComplex() {
-		a := Thearch.Optoas(OAS, n.Type)
-		var addr obj.Addr
-		if Thearch.Sudoaddable(a, n, &addr) {
-			if res.Op != OREGISTER {
-				var n2 Node
-				Regalloc(&n2, res.Type, nil)
-				p1 := Thearch.Gins(a, nil, &n2)
-				p1.From = addr
-				if Debug['g'] != 0 {
-					fmt.Printf("%v [ignore previous line]\n", p1)
-				}
-				Thearch.Gmove(&n2, res)
-				Regfree(&n2)
-			} else {
-				p1 := Thearch.Gins(a, nil, res)
-				p1.From = addr
-				if Debug['g'] != 0 {
-					fmt.Printf("%v [ignore previous line]\n", p1)
-				}
-			}
-			Thearch.Sudoclean()
-			return
-		}
-	}
-
-	nl := n.Left
-	nr := n.Right
-
-	if nl != nil && nl.Ullman >= UINF {
-		if nr != nil && nr.Ullman >= UINF {
-			var n1 Node
-			Tempname(&n1, nl.Type)
-			Cgen(nl, &n1)
-			n2 := *n
-			n2.Left = &n1
-			Cgen(&n2, res)
-			return
-		}
-	}
-
-	// 64-bit ops are hard on 32-bit machine.
-	if Ctxt.Arch.RegSize == 4 && (Is64(n.Type) || Is64(res.Type) || n.Left != nil && Is64(n.Left.Type)) {
-		switch n.Op {
-		// math goes to cgen64.
-		case OMINUS,
-			OCOM,
-			OADD,
-			OSUB,
-			OMUL,
-			OLROT,
-			OLSH,
-			ORSH,
-			OAND,
-			OOR,
-			OXOR:
-			Thearch.Cgen64(n, res)
-			return
-		}
-	}
-
-	if Thearch.Cgen_float != nil && nl != nil && n.Type.IsFloat() && nl.Type.IsFloat() {
-		Thearch.Cgen_float(n, res)
-		return
-	}
-
-	if !n.Type.IsComplex() && Ctxt.Arch.RegSize == 8 {
-		a := Thearch.Optoas(OAS, n.Type)
-		var addr obj.Addr
-		if Thearch.Sudoaddable(a, n, &addr) {
-			if res.Op == OREGISTER {
-				p1 := Thearch.Gins(a, nil, res)
-				p1.From = addr
-			} else {
-				var n2 Node
-				Regalloc(&n2, n.Type, nil)
-				p1 := Thearch.Gins(a, nil, &n2)
-				p1.From = addr
-				Thearch.Gins(a, &n2, res)
-				Regfree(&n2)
-			}
-
-			Thearch.Sudoclean()
-			return
-		}
-	}
-
-	var a obj.As
-	switch n.Op {
-	default:
-		Dump("cgen", n)
-		Dump("cgen-res", res)
-		Fatalf("cgen: unknown op %+S", n)
-
-	case OOROR, OANDAND,
-		OEQ, ONE,
-		OLT, OLE,
-		OGE, OGT,
-		ONOT:
-		Bvgen(n, res, true)
-		return
-
-	case OPLUS:
-		Cgen(nl, res)
-		return
-
-		// unary
-	case OCOM:
-		a := Thearch.Optoas(OXOR, nl.Type)
-
-		var n1 Node
-		Regalloc(&n1, nl.Type, nil)
-		Cgen(nl, &n1)
-		var n2 Node
-		Nodconst(&n2, nl.Type, -1)
-		Thearch.Gins(a, &n2, &n1)
-		cgen_norm(n, &n1, res)
-		return
-
-	case OMINUS:
-		if nl.Type.IsFloat() {
-			nr = Nodintconst(-1)
-			nr = convlit(nr, n.Type)
-			a = Thearch.Optoas(OMUL, nl.Type)
-			goto sbop
-		}
-
-		a := Thearch.Optoas(n.Op, nl.Type)
-		// unary
-		var n1 Node
-		Regalloc(&n1, nl.Type, res)
-
-		Cgen(nl, &n1)
-		if Ctxt.Arch.Family == sys.ARM {
-			var n2 Node
-			Nodconst(&n2, nl.Type, 0)
-			Thearch.Gins(a, &n2, &n1)
-		} else if Ctxt.Arch.Family == sys.ARM64 {
-			Thearch.Gins(a, &n1, &n1)
-		} else {
-			Thearch.Gins(a, nil, &n1)
-		}
-		cgen_norm(n, &n1, res)
-		return
-
-	case OSQRT:
-		var n1 Node
-		Regalloc(&n1, nl.Type, res)
-		Cgen(n.Left, &n1)
-		Thearch.Gins(Thearch.Optoas(OSQRT, nl.Type), &n1, &n1)
-		Thearch.Gmove(&n1, res)
-		Regfree(&n1)
-		return
-
-	case OGETG:
-		Thearch.Getg(res)
-		return
-
-		// symmetric binary
-	case OAND,
-		OOR,
-		OXOR,
-		OADD,
-		OMUL:
-		if n.Op == OMUL && Thearch.Cgen_bmul != nil && Thearch.Cgen_bmul(n.Op, nl, nr, res) {
-			break
-		}
-		a = Thearch.Optoas(n.Op, nl.Type)
-		goto sbop
-
-		// asymmetric binary
-	case OSUB:
-		a = Thearch.Optoas(n.Op, nl.Type)
-		goto abop
-
-	case OHMUL:
-		Thearch.Cgen_hmul(nl, nr, res)
-
-	case OCONV:
-		if Eqtype(n.Type, nl.Type) || Noconv(n.Type, nl.Type) {
-			Cgen(nl, res)
-			return
-		}
-
-		if Ctxt.Arch.Family == sys.I386 {
-			var n1 Node
-			var n2 Node
-			Tempname(&n2, n.Type)
-			Mgen(nl, &n1, res)
-			Thearch.Gmove(&n1, &n2)
-			Thearch.Gmove(&n2, res)
-			Mfree(&n1)
-			break
-		}
-
-		var n1 Node
-		var n2 Node
-		if Ctxt.Arch.Family == sys.ARM {
-			if nl.Addable && !Is64(nl.Type) {
-				Regalloc(&n1, nl.Type, res)
-				Thearch.Gmove(nl, &n1)
-			} else {
-				if n.Type.Width > int64(Widthptr) || Is64(nl.Type) || nl.Type.IsFloat() {
-					Tempname(&n1, nl.Type)
-				} else {
-					Regalloc(&n1, nl.Type, res)
-				}
-				Cgen(nl, &n1)
-			}
-			if n.Type.Width > int64(Widthptr) || Is64(n.Type) || n.Type.IsFloat() {
-				Tempname(&n2, n.Type)
-			} else {
-				Regalloc(&n2, n.Type, nil)
-			}
-		} else {
-			if n.Type.Width > nl.Type.Width {
-				// If loading from memory, do conversion during load,
-				// so as to avoid use of 8-bit register in, say, int(*byteptr).
-				switch nl.Op {
-				case ODOT, ODOTPTR, OINDEX, OIND, ONAME:
-					Igen(nl, &n1, res)
-					Regalloc(&n2, n.Type, res)
-					Thearch.Gmove(&n1, &n2)
-					Thearch.Gmove(&n2, res)
-					Regfree(&n2)
-					Regfree(&n1)
-					return
-				}
-			}
-			Regalloc(&n1, nl.Type, res)
-			Regalloc(&n2, n.Type, &n1)
-			Cgen(nl, &n1)
-		}
-
-		// if we do the conversion n1 -> n2 here
-		// reusing the register, then gmove won't
-		// have to allocate its own register.
-		Thearch.Gmove(&n1, &n2)
-		Thearch.Gmove(&n2, res)
-		if n2.Op == OREGISTER {
-			Regfree(&n2)
-		}
-		if n1.Op == OREGISTER {
-			Regfree(&n1)
-		}
-
-	case ODOT,
-		ODOTPTR,
-		OINDEX,
-		OIND:
-		var n1 Node
-		Igen(n, &n1, res)
-
-		Thearch.Gmove(&n1, res)
-		Regfree(&n1)
-
-	case OITAB:
-		// interface table is first word of interface value
-		var n1 Node
-		Igen(nl, &n1, res)
-		n1.Type = n.Type
-		Thearch.Gmove(&n1, res)
-		Regfree(&n1)
-
-	case OIDATA:
-		// interface data is second word of interface value
-		var n1 Node
-		Igen(nl, &n1, res)
-		n1.Type = n.Type
-		n1.Xoffset += int64(Widthptr)
-		Thearch.Gmove(&n1, res)
-		Regfree(&n1)
-
-	case OSPTR:
-		// pointer is the first word of string or slice.
-		if Isconst(nl, CTSTR) {
-			var n1 Node
-			Regalloc(&n1, Types[Tptr], res)
-			p1 := Thearch.Gins(Thearch.Optoas(OAS, n1.Type), nil, &n1)
-			Datastring(nl.Val().U.(string), &p1.From)
-			p1.From.Type = obj.TYPE_ADDR
-			Thearch.Gmove(&n1, res)
-			Regfree(&n1)
-			break
-		}
-
-		var n1 Node
-		Igen(nl, &n1, res)
-		n1.Type = n.Type
-		Thearch.Gmove(&n1, res)
-		Regfree(&n1)
-
-	case OLEN:
-		if nl.Type.IsMap() || nl.Type.IsChan() {
-			// map and chan have len in the first int-sized word.
-			// a zero pointer means zero length
-			var n1 Node
-			Regalloc(&n1, Types[Tptr], res)
-
-			Cgen(nl, &n1)
-
-			var n2 Node
-			Nodconst(&n2, Types[Tptr], 0)
-			p1 := Thearch.Ginscmp(OEQ, Types[Tptr], &n1, &n2, 0)
-
-			n2 = n1
-			n2.Op = OINDREG
-			n2.Type = Types[Simtype[TINT]]
-			Thearch.Gmove(&n2, &n1)
-
-			Patch(p1, Pc)
-
-			Thearch.Gmove(&n1, res)
-			Regfree(&n1)
-			break
-		}
-
-		if nl.Type.IsString() || nl.Type.IsSlice() {
-			// both slice and string have len one pointer into the struct.
-			// a zero pointer means zero length
-			var n1 Node
-			Igen(nl, &n1, res)
-
-			n1.Type = Types[Simtype[TUINT]]
-			n1.Xoffset += int64(Array_nel)
-			Thearch.Gmove(&n1, res)
-			Regfree(&n1)
-			break
-		}
-
-		Fatalf("cgen: OLEN: unknown type %L", nl.Type)
-
-	case OCAP:
-		if nl.Type.IsChan() {
-			// chan has cap in the second int-sized word.
-			// a zero pointer means zero length
-			var n1 Node
-			Regalloc(&n1, Types[Tptr], res)
-
-			Cgen(nl, &n1)
-
-			var n2 Node
-			Nodconst(&n2, Types[Tptr], 0)
-			p1 := Thearch.Ginscmp(OEQ, Types[Tptr], &n1, &n2, 0)
-
-			n2 = n1
-			n2.Op = OINDREG
-			n2.Xoffset = int64(Widthint)
-			n2.Type = Types[Simtype[TINT]]
-			Thearch.Gmove(&n2, &n1)
-
-			Patch(p1, Pc)
-
-			Thearch.Gmove(&n1, res)
-			Regfree(&n1)
-			break
-		}
-
-		if nl.Type.IsSlice() {
-			var n1 Node
-			Igen(nl, &n1, res)
-			n1.Type = Types[Simtype[TUINT]]
-			n1.Xoffset += int64(Array_cap)
-			Thearch.Gmove(&n1, res)
-			Regfree(&n1)
-			break
-		}
-
-		Fatalf("cgen: OCAP: unknown type %L", nl.Type)
-
-	case OADDR:
-		if n.Bounded { // let race detector avoid nil checks
-			Disable_checknil++
-		}
-		Agen(nl, res)
-		if n.Bounded {
-			Disable_checknil--
-		}
-
-	case OCALLMETH:
-		cgen_callmeth(n, 0)
-		cgen_callret(n, res)
-
-	case OCALLINTER:
-		cgen_callinter(n, res, 0)
-		cgen_callret(n, res)
-
-	case OCALLFUNC:
-		cgen_call(n, 0)
-		cgen_callret(n, res)
-
-	case OMOD, ODIV:
-		if n.Type.IsFloat() || Thearch.Dodiv == nil {
-			a = Thearch.Optoas(n.Op, nl.Type)
-			goto abop
-		}
-
-		if nl.Ullman >= nr.Ullman {
-			var n1 Node
-			Regalloc(&n1, nl.Type, res)
-			Cgen(nl, &n1)
-			cgen_div(n.Op, &n1, nr, res)
-			Regfree(&n1)
-		} else {
-			var n2 Node
-			if !Smallintconst(nr) {
-				Regalloc(&n2, nr.Type, res)
-				Cgen(nr, &n2)
-			} else {
-				n2 = *nr
-			}
-
-			cgen_div(n.Op, nl, &n2, res)
-			if n2.Op != OLITERAL {
-				Regfree(&n2)
-			}
-		}
-
-	case OLSH, ORSH, OLROT:
-		Thearch.Cgen_shift(n.Op, n.Bounded, nl, nr, res)
-	}
-
-	return
-
-	// put simplest on right - we'll generate into left
-	// and then adjust it using the computation of right.
-	// constants and variables have the same ullman
-	// count, so look for constants specially.
-	//
-	// an integer constant we can use as an immediate
-	// is simpler than a variable - we can use the immediate
-	// in the adjustment instruction directly - so it goes
-	// on the right.
-	//
-	// other constants, like big integers or floating point
-	// constants, require a mov into a register, so those
-	// might as well go on the left, so we can reuse that
-	// register for the computation.
-sbop: // symmetric binary
-	if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) {
-		nl, nr = nr, nl
-	}
-
-abop: // asymmetric binary
-	var n1 Node
-	var n2 Node
-	if Ctxt.Arch.Family == sys.I386 {
-		// no registers, sigh
-		if Smallintconst(nr) {
-			var n1 Node
-			Mgen(nl, &n1, res)
-			var n2 Node
-			Regalloc(&n2, nl.Type, &n1)
-			Thearch.Gmove(&n1, &n2)
-			Thearch.Gins(a, nr, &n2)
-			Thearch.Gmove(&n2, res)
-			Regfree(&n2)
-			Mfree(&n1)
-		} else if nl.Ullman >= nr.Ullman {
-			var nt Node
-			Tempname(&nt, nl.Type)
-			Cgen(nl, &nt)
-			var n2 Node
-			Mgen(nr, &n2, nil)
-			var n1 Node
-			Regalloc(&n1, nl.Type, res)
-			Thearch.Gmove(&nt, &n1)
-			Thearch.Gins(a, &n2, &n1)
-			Thearch.Gmove(&n1, res)
-			Regfree(&n1)
-			Mfree(&n2)
-		} else {
-			var n2 Node
-			Regalloc(&n2, nr.Type, res)
-			Cgen(nr, &n2)
-			var n1 Node
-			Regalloc(&n1, nl.Type, nil)
-			Cgen(nl, &n1)
-			Thearch.Gins(a, &n2, &n1)
-			Regfree(&n2)
-			Thearch.Gmove(&n1, res)
-			Regfree(&n1)
-		}
-		return
-	}
-
-	if nl.Ullman >= nr.Ullman {
-		Regalloc(&n1, nl.Type, res)
-		Cgen(nl, &n1)
-
-		if Smallintconst(nr) && Ctxt.Arch.Family != sys.MIPS64 && Ctxt.Arch.Family != sys.ARM && Ctxt.Arch.Family != sys.ARM64 && Ctxt.Arch.Family != sys.PPC64 { // TODO(rsc): Check opcode for arm
-			n2 = *nr
-		} else {
-			Regalloc(&n2, nr.Type, nil)
-			Cgen(nr, &n2)
-		}
-	} else {
-		if Smallintconst(nr) && Ctxt.Arch.Family != sys.MIPS64 && Ctxt.Arch.Family != sys.ARM && Ctxt.Arch.Family != sys.ARM64 && Ctxt.Arch.Family != sys.PPC64 { // TODO(rsc): Check opcode for arm
-			n2 = *nr
-		} else {
-			Regalloc(&n2, nr.Type, res)
-			Cgen(nr, &n2)
-		}
-
-		Regalloc(&n1, nl.Type, nil)
-		Cgen(nl, &n1)
-	}
-
-	Thearch.Gins(a, &n2, &n1)
-	if n2.Op != OLITERAL {
-		Regfree(&n2)
-	}
-	cgen_norm(n, &n1, res)
-}
-
-var sys_wbptr *Node
-
-func cgen_wbptr(n, res *Node) {
-	if Curfn != nil {
-		if Curfn.Func.Pragma&Nowritebarrier != 0 {
-			Yyerror("write barrier prohibited")
-		}
-		if Curfn.Func.WBLineno == 0 {
-			Curfn.Func.WBLineno = lineno
-		}
-	}
-	if Debug_wb > 0 {
-		Warn("write barrier")
-	}
-
-	var dst, src Node
-	Igen(res, &dst, nil)
-	if n.Op == OREGISTER {
-		src = *n
-		Regrealloc(&src)
-	} else {
-		Cgenr(n, &src, nil)
-	}
-
-	wbVar := syslook("writeBarrier")
-	wbEnabled := NodSym(ODOT, wbVar, wbVar.Type.Field(0).Sym)
-	wbEnabled = typecheck(wbEnabled, Erv)
-	pbr := Thearch.Ginscmp(ONE, Types[TUINT8], wbEnabled, Nodintconst(0), -1)
-	Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, &dst)
-	pjmp := Gbranch(obj.AJMP, nil, 0)
-	Patch(pbr, Pc)
-	var adst Node
-	Agenr(&dst, &adst, &dst)
-	p := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &adst, nil)
-	a := &p.To
-	a.Type = obj.TYPE_MEM
-	a.Reg = int16(Thearch.REGSP)
-	a.Offset = Ctxt.FixedFrameSize()
-	p2 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
-	p2.To = p.To
-	p2.To.Offset += int64(Widthptr)
-	Regfree(&adst)
-	if sys_wbptr == nil {
-		sys_wbptr = writebarrierfn("writebarrierptr", Types[Tptr], Types[Tptr])
-	}
-	Ginscall(sys_wbptr, 0)
-	Patch(pjmp, Pc)
-
-	Regfree(&dst)
-	Regfree(&src)
-}
-
-func cgen_wbfat(n, res *Node) {
-	if Curfn != nil {
-		if Curfn.Func.Pragma&Nowritebarrier != 0 {
-			Yyerror("write barrier prohibited")
-		}
-		if Curfn.Func.WBLineno == 0 {
-			Curfn.Func.WBLineno = lineno
-		}
-	}
-	if Debug_wb > 0 {
-		Warn("write barrier")
-	}
-	needType := true
-	funcName := "typedmemmove"
-	var dst, src Node
-	if n.Ullman >= res.Ullman {
-		Agenr(n, &src, nil)
-		Agenr(res, &dst, nil)
-	} else {
-		Agenr(res, &dst, nil)
-		Agenr(n, &src, nil)
-	}
-	p := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &dst, nil)
-	a := &p.To
-	a.Type = obj.TYPE_MEM
-	a.Reg = int16(Thearch.REGSP)
-	a.Offset = Ctxt.FixedFrameSize()
-	if needType {
-		a.Offset += int64(Widthptr)
-	}
-	p2 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
-	p2.To = p.To
-	p2.To.Offset += int64(Widthptr)
-	Regfree(&dst)
-	if needType {
-		src.Type = Types[Tptr]
-		Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), typename(n.Type), &src)
-		p3 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
-		p3.To = p2.To
-		p3.To.Offset -= 2 * int64(Widthptr)
-	}
-	Regfree(&src)
-	Ginscall(writebarrierfn(funcName, Types[Tptr], Types[Tptr]), 0)
-}
-
-// cgen_norm moves n1 to res, truncating to expected type if necessary.
-// n1 is a register, and cgen_norm frees it.
-func cgen_norm(n, n1, res *Node) {
-	switch Ctxt.Arch.Family {
-	case sys.AMD64, sys.I386:
-		// We use sized math, so the result is already truncated.
-	default:
-		switch n.Op {
-		case OADD, OSUB, OMUL, ODIV, OCOM, OMINUS:
-			// TODO(rsc): What about left shift?
-			Thearch.Gins(Thearch.Optoas(OAS, n.Type), n1, n1)
-		}
-	}
-
-	Thearch.Gmove(n1, res)
-	Regfree(n1)
-}
-
-func Mgen(n *Node, n1 *Node, rg *Node) {
-	n1.Op = OEMPTY
-
-	if n.Addable {
-		*n1 = *n
-		if n1.Op == OREGISTER || n1.Op == OINDREG {
-			reg[n.Reg-int16(Thearch.REGMIN)]++
-		}
-		return
-	}
-
-	Tempname(n1, n.Type)
-	Cgen(n, n1)
-	if n.Type.Width <= int64(Widthptr) || n.Type.IsFloat() {
-		n2 := *n1
-		Regalloc(n1, n.Type, rg)
-		Thearch.Gmove(&n2, n1)
-	}
-}
-
-func Mfree(n *Node) {
-	if n.Op == OREGISTER {
-		Regfree(n)
-	}
-}
-
-// allocate a register (reusing res if possible) and generate
-//	a = n
-// The caller must call Regfree(a).
-func Cgenr(n *Node, a *Node, res *Node) {
-	if Debug['g'] != 0 {
-		Dump("cgenr-n", n)
-	}
-
-	if Isfat(n.Type) {
-		Fatalf("cgenr on fat node")
-	}
-
-	if n.Addable {
-		Regalloc(a, n.Type, res)
-		Thearch.Gmove(n, a)
-		return
-	}
-
-	switch n.Op {
-	case ONAME,
-		ODOT,
-		ODOTPTR,
-		OINDEX,
-		OCALLFUNC,
-		OCALLMETH,
-		OCALLINTER:
-		var n1 Node
-		Igen(n, &n1, res)
-		Regalloc(a, n.Type, &n1)
-		Thearch.Gmove(&n1, a)
-		Regfree(&n1)
-
-	default:
-		Regalloc(a, n.Type, res)
-		Cgen(n, a)
-	}
-}
-
-// allocate a register (reusing res if possible) and generate
-//	a = &n
-// The caller must call Regfree(a).
-// The generated code checks that the result is not nil.
-func Agenr(n *Node, a *Node, res *Node) {
-	if Debug['g'] != 0 {
-		Dump("\nagenr-n", n)
-	}
-
-	nl := n.Left
-	nr := n.Right
-
-	switch n.Op {
-	case ODOT, ODOTPTR, OCALLFUNC, OCALLMETH, OCALLINTER:
-		var n1 Node
-		Igen(n, &n1, res)
-		Regalloc(a, Types[Tptr], &n1)
-		Agen(&n1, a)
-		Regfree(&n1)
-
-	case OIND:
-		Cgenr(n.Left, a, res)
-		if !n.Left.NonNil {
-			Cgen_checknil(a)
-		} else if Debug_checknil != 0 && n.Lineno > 1 {
-			Warnl(n.Lineno, "removed nil check")
-		}
-
-	case OINDEX:
-		if Ctxt.Arch.Family == sys.ARM {
-			var p2 *obj.Prog // to be patched to panicindex.
-			w := uint32(n.Type.Width)
-			bounded := Debug['B'] != 0 || n.Bounded
-			var n1 Node
-			var n3 Node
-			if nr.Addable {
-				var tmp Node
-				if !Isconst(nr, CTINT) {
-					Tempname(&tmp, Types[TINT32])
-				}
-				if !Isconst(nl, CTSTR) {
-					Agenr(nl, &n3, res)
-				}
-				if !Isconst(nr, CTINT) {
-					p2 = Thearch.Cgenindex(nr, &tmp, bounded)
-					Regalloc(&n1, tmp.Type, nil)
-					Thearch.Gmove(&tmp, &n1)
-				}
-			} else if nl.Addable {
-				if !Isconst(nr, CTINT) {
-					var tmp Node
-					Tempname(&tmp, Types[TINT32])
-					p2 = Thearch.Cgenindex(nr, &tmp, bounded)
-					Regalloc(&n1, tmp.Type, nil)
-					Thearch.Gmove(&tmp, &n1)
-				}
-
-				if !Isconst(nl, CTSTR) {
-					Agenr(nl, &n3, res)
-				}
-			} else {
-				var tmp Node
-				Tempname(&tmp, Types[TINT32])
-				p2 = Thearch.Cgenindex(nr, &tmp, bounded)
-				nr = &tmp
-				if !Isconst(nl, CTSTR) {
-					Agenr(nl, &n3, res)
-				}
-				Regalloc(&n1, tmp.Type, nil)
-				Thearch.Gins(Thearch.Optoas(OAS, tmp.Type), &tmp, &n1)
-			}
-
-			// &a is in &n3 (allocated in res)
-			// i is in &n1 (if not constant)
-			// w is width
-
-			// constant index
-			if Isconst(nr, CTINT) {
-				if Isconst(nl, CTSTR) {
-					Fatalf("constant string constant index")
-				}
-				v := uint64(nr.Int64())
-				var n2 Node
-				if nl.Type.IsSlice() || nl.Type.IsString() {
-					if Debug['B'] == 0 && !n.Bounded {
-						n1 = n3
-						n1.Op = OINDREG
-						n1.Type = Types[Tptr]
-						n1.Xoffset = int64(Array_nel)
-						Nodconst(&n2, Types[TUINT32], int64(v))
-						p1 := Thearch.Ginscmp(OGT, Types[TUINT32], &n1, &n2, +1)
-						Ginscall(Panicindex, -1)
-						Patch(p1, Pc)
-					}
-
-					n1 = n3
-					n1.Op = OINDREG
-					n1.Type = Types[Tptr]
-					n1.Xoffset = int64(Array_array)
-					Thearch.Gmove(&n1, &n3)
-				}
-
-				Nodconst(&n2, Types[Tptr], int64(v*uint64(w)))
-				Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
-				*a = n3
-				break
-			}
-
-			var n2 Node
-			Regalloc(&n2, Types[TINT32], &n1) // i
-			Thearch.Gmove(&n1, &n2)
-			Regfree(&n1)
-
-			var n4 Node
-			if Debug['B'] == 0 && !n.Bounded {
-				// check bounds
-				if Isconst(nl, CTSTR) {
-					Nodconst(&n4, Types[TUINT32], int64(len(nl.Val().U.(string))))
-				} else if nl.Type.IsSlice() || nl.Type.IsString() {
-					n1 = n3
-					n1.Op = OINDREG
-					n1.Type = Types[Tptr]
-					n1.Xoffset = int64(Array_nel)
-					Regalloc(&n4, Types[TUINT32], nil)
-					Thearch.Gmove(&n1, &n4)
-				} else {
-					Nodconst(&n4, Types[TUINT32], nl.Type.NumElem())
-				}
-				p1 := Thearch.Ginscmp(OLT, Types[TUINT32], &n2, &n4, +1)
-				if n4.Op == OREGISTER {
-					Regfree(&n4)
-				}
-				if p2 != nil {
-					Patch(p2, Pc)
-				}
-				Ginscall(Panicindex, -1)
-				Patch(p1, Pc)
-			}
-
-			if Isconst(nl, CTSTR) {
-				Regalloc(&n3, Types[Tptr], res)
-				p1 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), nil, &n3)
-				Datastring(nl.Val().U.(string), &p1.From)
-				p1.From.Type = obj.TYPE_ADDR
-			} else if nl.Type.IsSlice() || nl.Type.IsString() {
-				n1 = n3
-				n1.Op = OINDREG
-				n1.Type = Types[Tptr]
-				n1.Xoffset = int64(Array_array)
-				Thearch.Gmove(&n1, &n3)
-			}
-
-			if w == 0 {
-				// nothing to do
-			} else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
-				// done by back end
-			} else if w == 1 {
-				Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
-			} else {
-				if w&(w-1) == 0 {
-					// Power of 2.  Use shift.
-					Thearch.Ginscon(Thearch.Optoas(OLSH, Types[TUINT32]), int64(log2(uint64(w))), &n2)
-				} else {
-					// Not a power of 2.  Use multiply.
-					Regalloc(&n4, Types[TUINT32], nil)
-					Nodconst(&n1, Types[TUINT32], int64(w))
-					Thearch.Gmove(&n1, &n4)
-					Thearch.Gins(Thearch.Optoas(OMUL, Types[TUINT32]), &n4, &n2)
-					Regfree(&n4)
-				}
-				Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
-			}
-			*a = n3
-			Regfree(&n2)
-			break
-		}
-		if Ctxt.Arch.Family == sys.I386 {
-			var p2 *obj.Prog // to be patched to panicindex.
-			w := uint32(n.Type.Width)
-			bounded := Debug['B'] != 0 || n.Bounded
-			var n3 Node
-			var tmp Node
-			var n1 Node
-			if nr.Addable {
-				// Generate &nl first, and move nr into register.
-				if !Isconst(nl, CTSTR) {
-					Igen(nl, &n3, res)
-				}
-				if !Isconst(nr, CTINT) {
-					p2 = Thearch.Igenindex(nr, &tmp, bounded)
-					Regalloc(&n1, tmp.Type, nil)
-					Thearch.Gmove(&tmp, &n1)
-				}
-			} else if nl.Addable {
-				// Generate nr first, and move &nl into register.
-				if !Isconst(nr, CTINT) {
-					p2 = Thearch.Igenindex(nr, &tmp, bounded)
-					Regalloc(&n1, tmp.Type, nil)
-					Thearch.Gmove(&tmp, &n1)
-				}
-
-				if !Isconst(nl, CTSTR) {
-					Igen(nl, &n3, res)
-				}
-			} else {
-				p2 = Thearch.Igenindex(nr, &tmp, bounded)
-				nr = &tmp
-				if !Isconst(nl, CTSTR) {
-					Igen(nl, &n3, res)
-				}
-				Regalloc(&n1, tmp.Type, nil)
-				Thearch.Gins(Thearch.Optoas(OAS, tmp.Type), &tmp, &n1)
-			}
-
-			// For fixed array we really want the pointer in n3.
-			var n2 Node
-			if nl.Type.IsArray() {
-				Regalloc(&n2, Types[Tptr], &n3)
-				Agen(&n3, &n2)
-				Regfree(&n3)
-				n3 = n2
-			}
-
-			// &a[0] is in n3 (allocated in res)
-			// i is in n1 (if not constant)
-			// len(a) is in nlen (if needed)
-			// w is width
-
-			// constant index
-			if Isconst(nr, CTINT) {
-				if Isconst(nl, CTSTR) {
-					Fatalf("constant string constant index") // front end should handle
-				}
-				v := uint64(nr.Int64())
-				if nl.Type.IsSlice() || nl.Type.IsString() {
-					if Debug['B'] == 0 && !n.Bounded {
-						nlen := n3
-						nlen.Type = Types[TUINT32]
-						nlen.Xoffset += int64(Array_nel)
-						Nodconst(&n2, Types[TUINT32], int64(v))
-						p1 := Thearch.Ginscmp(OGT, Types[TUINT32], &nlen, &n2, +1)
-						Ginscall(Panicindex, -1)
-						Patch(p1, Pc)
-					}
-				}
-
-				// Load base pointer in n2 = n3.
-				Regalloc(&n2, Types[Tptr], &n3)
-
-				n3.Type = Types[Tptr]
-				n3.Xoffset += int64(Array_array)
-				Thearch.Gmove(&n3, &n2)
-				Regfree(&n3)
-				if v*uint64(w) != 0 {
-					Nodconst(&n1, Types[Tptr], int64(v*uint64(w)))
-					Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n1, &n2)
-				}
-				*a = n2
-				break
-			}
-
-			// i is in register n1, extend to 32 bits.
-			t := Types[TUINT32]
-
-			if n1.Type.IsSigned() {
-				t = Types[TINT32]
-			}
-
-			Regalloc(&n2, t, &n1) // i
-			Thearch.Gmove(&n1, &n2)
-			Regfree(&n1)
-
-			if Debug['B'] == 0 && !n.Bounded {
-				// check bounds
-				t := Types[TUINT32]
-
-				var nlen Node
-				if Isconst(nl, CTSTR) {
-					Nodconst(&nlen, t, int64(len(nl.Val().U.(string))))
-				} else if nl.Type.IsSlice() || nl.Type.IsString() {
-					nlen = n3
-					nlen.Type = t
-					nlen.Xoffset += int64(Array_nel)
-				} else {
-					Nodconst(&nlen, t, nl.Type.NumElem())
-				}
-
-				p1 := Thearch.Ginscmp(OLT, t, &n2, &nlen, +1)
-				if p2 != nil {
-					Patch(p2, Pc)
-				}
-				Ginscall(Panicindex, -1)
-				Patch(p1, Pc)
-			}
-
-			if Isconst(nl, CTSTR) {
-				Regalloc(&n3, Types[Tptr], res)
-				p1 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), nil, &n3)
-				Datastring(nl.Val().U.(string), &p1.From)
-				p1.From.Type = obj.TYPE_ADDR
-				Thearch.Gins(Thearch.Optoas(OADD, n3.Type), &n2, &n3)
-				goto indexdone1
-			}
-
-			// Load base pointer in n3.
-			Regalloc(&tmp, Types[Tptr], &n3)
-
-			if nl.Type.IsSlice() || nl.Type.IsString() {
-				n3.Type = Types[Tptr]
-				n3.Xoffset += int64(Array_array)
-				Thearch.Gmove(&n3, &tmp)
-			}
-
-			Regfree(&n3)
-			n3 = tmp
-
-			if w == 0 {
-				// nothing to do
-			} else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
-				// done by back end
-			} else if w == 1 {
-				Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
-			} else {
-				if w&(w-1) == 0 {
-					// Power of 2.  Use shift.
-					Thearch.Ginscon(Thearch.Optoas(OLSH, Types[TUINT32]), int64(log2(uint64(w))), &n2)
-				} else {
-					// Not a power of 2.  Use multiply.
-					Thearch.Ginscon(Thearch.Optoas(OMUL, Types[TUINT32]), int64(w), &n2)
-				}
-				Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
-			}
-
-		indexdone1:
-			*a = n3
-			Regfree(&n2)
-			break
-		}
-
-		freelen := 0
-		w := uint64(n.Type.Width)
-
-		// Generate the non-addressable child first.
-		var n3 Node
-		var nlen Node
-		var tmp Node
-		var n1 Node
-		if nr.Addable {
-			goto irad
-		}
-		if nl.Addable {
-			Cgenr(nr, &n1, nil)
-			if !Isconst(nl, CTSTR) {
-				if nl.Type.IsArray() {
-					Agenr(nl, &n3, res)
-				} else {
-					Igen(nl, &nlen, res)
-					freelen = 1
-					nlen.Type = Types[Tptr]
-					nlen.Xoffset += int64(Array_array)
-					Regalloc(&n3, Types[Tptr], res)
-					Thearch.Gmove(&nlen, &n3)
-					nlen.Type = Types[Simtype[TUINT]]
-					nlen.Xoffset += int64(Array_nel) - int64(Array_array)
-				}
-			}
-
-			goto index
-		}
-
-		Tempname(&tmp, nr.Type)
-		Cgen(nr, &tmp)
-		nr = &tmp
-
-	irad:
-		if !Isconst(nl, CTSTR) {
-			if nl.Type.IsArray() {
-				Agenr(nl, &n3, res)
-			} else {
-				if !nl.Addable {
-					if res != nil && res.Op == OREGISTER { // give up res, which we don't need yet.
-						Regfree(res)
-					}
-
-					// igen will need an addressable node.
-					var tmp2 Node
-					Tempname(&tmp2, nl.Type)
-					Cgen(nl, &tmp2)
-					nl = &tmp2
-
-					if res != nil && res.Op == OREGISTER { // reacquire res
-						Regrealloc(res)
-					}
-				}
-
-				Igen(nl, &nlen, res)
-				freelen = 1
-				nlen.Type = Types[Tptr]
-				nlen.Xoffset += int64(Array_array)
-				Regalloc(&n3, Types[Tptr], res)
-				Thearch.Gmove(&nlen, &n3)
-				nlen.Type = Types[Simtype[TUINT]]
-				nlen.Xoffset += int64(Array_nel) - int64(Array_array)
-			}
-		}
-
-		if !Isconst(nr, CTINT) {
-			Cgenr(nr, &n1, nil)
-		}
-
-		goto index
-
-		// &a is in &n3 (allocated in res)
-		// i is in &n1 (if not constant)
-		// len(a) is in nlen (if needed)
-		// w is width
-
-		// constant index
-	index:
-		if Isconst(nr, CTINT) {
-			if Isconst(nl, CTSTR) {
-				Fatalf("constant string constant index") // front end should handle
-			}
-			v := uint64(nr.Int64())
-			if nl.Type.IsSlice() || nl.Type.IsString() {
-				if Debug['B'] == 0 && !n.Bounded {
-					p1 := Thearch.Ginscmp(OGT, Types[Simtype[TUINT]], &nlen, Nodintconst(int64(v)), +1)
-					Ginscall(Panicindex, -1)
-					Patch(p1, Pc)
-				}
-
-				Regfree(&nlen)
-			}
-
-			if v*w != 0 {
-				Thearch.Ginscon(Thearch.Optoas(OADD, Types[Tptr]), int64(v*w), &n3)
-			}
-			*a = n3
-			break
-		}
-
-		// type of the index
-		t := Types[TUINT64]
-
-		if n1.Type.IsSigned() {
-			t = Types[TINT64]
-		}
-
-		var n2 Node
-		Regalloc(&n2, t, &n1) // i
-		Thearch.Gmove(&n1, &n2)
-		Regfree(&n1)
-
-		if Debug['B'] == 0 && !n.Bounded {
-			// check bounds
-			t = Types[Simtype[TUINT]]
-
-			if Is64(nr.Type) {
-				t = Types[TUINT64]
-			}
-			if Isconst(nl, CTSTR) {
-				Nodconst(&nlen, t, int64(len(nl.Val().U.(string))))
-			} else if nl.Type.IsSlice() || nl.Type.IsString() {
-				// nlen already initialized
-			} else {
-				Nodconst(&nlen, t, nl.Type.NumElem())
-			}
-
-			p1 := Thearch.Ginscmp(OLT, t, &n2, &nlen, +1)
-			Ginscall(Panicindex, -1)
-			Patch(p1, Pc)
-		}
-
-		if Isconst(nl, CTSTR) {
-			Regalloc(&n3, Types[Tptr], res)
-			p1 := Thearch.Gins(Thearch.Optoas(OAS, n3.Type), nil, &n3) // XXX was LEAQ!
-			Datastring(nl.Val().U.(string), &p1.From)
-			p1.From.Type = obj.TYPE_ADDR
-			Thearch.Gins(Thearch.Optoas(OADD, n3.Type), &n2, &n3)
-			goto indexdone
-		}
-
-		if w == 0 {
-			// nothing to do
-		} else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
-			// done by back end
-		} else if w == 1 {
-			Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
-		} else {
-			if w&(w-1) == 0 {
-				// Power of 2.  Use shift.
-				Thearch.Ginscon(Thearch.Optoas(OLSH, t), int64(log2(w)), &n2)
-			} else {
-				// Not a power of 2.  Use multiply.
-				Thearch.Ginscon(Thearch.Optoas(OMUL, t), int64(w), &n2)
-			}
-			Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
-		}
-
-	indexdone:
-		*a = n3
-		Regfree(&n2)
-		if freelen != 0 {
-			Regfree(&nlen)
-		}
-
-	default:
-		Regalloc(a, Types[Tptr], res)
-		Agen(n, a)
-	}
-}
-
-// log2 returns the logarithm base 2 of n.  n must be a power of 2.
-func log2(n uint64) int {
-	x := 0
-	for n>>uint(x) != 1 {
-		x++
-	}
-	return x
-}
-
-// generate:
-//	res = &n;
-// The generated code checks that the result is not nil.
-func Agen(n *Node, res *Node) {
-	if Debug['g'] != 0 {
-		Dump("\nagen-res", res)
-		Dump("agen-r", n)
-	}
-
-	if n == nil || n.Type == nil {
-		return
-	}
-
-	for n.Op == OCONVNOP {
-		n = n.Left
-	}
-
-	if Isconst(n, CTNIL) && n.Type.Width > int64(Widthptr) {
-		// Use of a nil interface or nil slice.
-		// Create a temporary we can take the address of and read.
-		// The generated code is just going to panic, so it need not
-		// be terribly efficient. See issue 3670.
-		var n1 Node
-		Tempname(&n1, n.Type)
-
-		Gvardef(&n1)
-		Thearch.Clearfat(&n1)
-		var n2 Node
-		Regalloc(&n2, Types[Tptr], res)
-		var n3 Node
-		n3.Op = OADDR
-		n3.Left = &n1
-		Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &n3, &n2)
-		Thearch.Gmove(&n2, res)
-		Regfree(&n2)
-		return
-	}
-
-	if n.Op == OINDREG && n.Xoffset == 0 {
-		// Generate MOVW R0, R1 instead of MOVW $0(R0), R1.
-		// This allows better move propagation in the back ends
-		// (and maybe it helps the processor).
-		n1 := *n
-		n1.Op = OREGISTER
-		n1.Type = res.Type
-		Thearch.Gmove(&n1, res)
-		return
-	}
-
-	if n.Addable {
-		if n.Op == OREGISTER {
-			Fatalf("agen OREGISTER")
-		}
-		var n1 Node
-		n1.Op = OADDR
-		n1.Left = n
-		var n2 Node
-		Regalloc(&n2, Types[Tptr], res)
-		Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &n1, &n2)
-		Thearch.Gmove(&n2, res)
-		Regfree(&n2)
-		return
-	}
-
-	nl := n.Left
-
-	switch n.Op {
-	default:
-		Dump("bad agen", n)
-		Fatalf("agen: unknown op %+S", n)
-
-	case OCALLMETH:
-		cgen_callmeth(n, 0)
-		cgen_aret(n, res)
-
-	case OCALLINTER:
-		cgen_callinter(n, res, 0)
-		cgen_aret(n, res)
-
-	case OCALLFUNC:
-		cgen_call(n, 0)
-		cgen_aret(n, res)
-
-	case OEFACE, ODOTTYPE, OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR, OARRAYBYTESTRTMP:
-		var n1 Node
-		Tempname(&n1, n.Type)
-		Cgen(n, &n1)
-		Agen(&n1, res)
-
-	case OINDEX:
-		var n1 Node
-		Agenr(n, &n1, res)
-		Thearch.Gmove(&n1, res)
-		Regfree(&n1)
-
-	case OIND:
-		Cgen(nl, res)
-		if !nl.NonNil {
-			Cgen_checknil(res)
-		} else if Debug_checknil != 0 && n.Lineno > 1 {
-			Warnl(n.Lineno, "removed nil check")
-		}
-
-	case ODOT:
-		Agen(nl, res)
-		if n.Xoffset != 0 {
-			addOffset(res, n.Xoffset)
-		}
-
-	case ODOTPTR:
-		Cgen(nl, res)
-		if !nl.NonNil {
-			Cgen_checknil(res)
-		} else if Debug_checknil != 0 && n.Lineno > 1 {
-			Warnl(n.Lineno, "removed nil check")
-		}
-		if n.Xoffset != 0 {
-			addOffset(res, n.Xoffset)
-		}
-	}
-}
-
-func addOffset(res *Node, offset int64) {
-	if Ctxt.Arch.InFamily(sys.AMD64, sys.I386) {
-		Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), Nodintconst(offset), res)
-		return
-	}
-
-	var n1, n2 Node
-	Regalloc(&n1, Types[Tptr], nil)
-	Thearch.Gmove(res, &n1)
-	Regalloc(&n2, Types[Tptr], nil)
-	Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), Nodintconst(offset), &n2)
-	Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n1)
-	Thearch.Gmove(&n1, res)
-	Regfree(&n1)
-	Regfree(&n2)
-}
-
-// Igen computes the address &n, stores it in a register r,
-// and rewrites a to refer to *r. The chosen r may be the
-// stack pointer, it may be borrowed from res, or it may
-// be a newly allocated register. The caller must call Regfree(a)
-// to free r when the address is no longer needed.
-// The generated code ensures that &n is not nil.
-func Igen(n *Node, a *Node, res *Node) {
-	if Debug['g'] != 0 {
-		Dump("\nigen-n", n)
-	}
-
-	switch n.Op {
-	case ONAME:
-		if n.Class == PAUTOHEAP {
-			Dump("igen", n)
-			Fatalf("bad name")
-		}
-		*a = *n
-		return
-
-	case OINDREG:
-		// Increase the refcount of the register so that igen's caller
-		// has to call Regfree.
-		if n.Reg != int16(Thearch.REGSP) {
-			reg[n.Reg-int16(Thearch.REGMIN)]++
-		}
-		*a = *n
-		return
-
-	case ODOT:
-		Igen(n.Left, a, res)
-		a.Xoffset += n.Xoffset
-		a.Type = n.Type
-		Fixlargeoffset(a)
-		return
-
-	case ODOTPTR:
-		Cgenr(n.Left, a, res)
-		if !n.Left.NonNil {
-			Cgen_checknil(a)
-		} else if Debug_checknil != 0 && n.Lineno > 1 {
-			Warnl(n.Lineno, "removed nil check")
-		}
-		a.Op = OINDREG
-		a.Xoffset += n.Xoffset
-		a.Type = n.Type
-		Fixlargeoffset(a)
-		return
-
-	case OCALLFUNC, OCALLMETH, OCALLINTER:
-		switch n.Op {
-		case OCALLFUNC:
-			cgen_call(n, 0)
-
-		case OCALLMETH:
-			cgen_callmeth(n, 0)
-
-		case OCALLINTER:
-			cgen_callinter(n, nil, 0)
-		}
-
-		fp := n.Left.Type.Results().Field(0)
-		*a = Node{}
-		a.Op = OINDREG
-		a.Reg = int16(Thearch.REGSP)
-		a.Addable = true
-		a.Xoffset = fp.Offset + Ctxt.FixedFrameSize()
-		a.Type = n.Type
-		return
-
-	case OINDEX:
-		// Index of fixed-size array by constant can
-		// put the offset in the addressing.
-		// Could do the same for slice except that we need
-		// to use the real index for the bounds checking.
-		if n.Left.Type.IsArray() || (n.Left.Type.IsPtr() && n.Left.Left.Type.IsArray()) {
-			if Isconst(n.Right, CTINT) {
-				// Compute &a.
-				if !n.Left.Type.IsPtr() {
-					Igen(n.Left, a, res)
-				} else {
-					var n1 Node
-					Igen(n.Left, &n1, res)
-					Cgen_checknil(&n1)
-					Regalloc(a, Types[Tptr], res)
-					Thearch.Gmove(&n1, a)
-					Regfree(&n1)
-					a.Op = OINDREG
-				}
-
-				// Compute &a[i] as &a + i*width.
-				a.Type = n.Type
-
-				a.Xoffset += n.Right.Int64() * n.Type.Width
-				Fixlargeoffset(a)
-				return
-			}
-		}
-	}
-
-	Agenr(n, a, res)
-	a.Op = OINDREG
-	a.Type = n.Type
-}
-
-// Bgen generates code for branches:
-//
-// 	if n == wantTrue {
-// 		goto to
-// 	}
-func Bgen(n *Node, wantTrue bool, likely int, to *obj.Prog) {
-	bgenx(n, nil, wantTrue, likely, to)
-}
-
-// Bvgen generates code for calculating boolean values:
-// 	res = n == wantTrue
-func Bvgen(n, res *Node, wantTrue bool) {
-	if Thearch.Ginsboolval == nil {
-		// Direct value generation not implemented for this architecture.
-		// Implement using jumps.
-		bvgenjump(n, res, wantTrue, true)
-		return
-	}
-	bgenx(n, res, wantTrue, 0, nil)
-}
-
-// bvgenjump implements boolean value generation using jumps:
-// 	if n == wantTrue {
-// 		res = 1
-// 	} else {
-// 		res = 0
-// 	}
-// geninit controls whether n's Ninit is generated.
-func bvgenjump(n, res *Node, wantTrue, geninit bool) {
-	init := n.Ninit
-	if !geninit {
-		n.Ninit.Set(nil)
-	}
-	p1 := Gbranch(obj.AJMP, nil, 0)
-	p2 := Pc
-	Thearch.Gmove(Nodbool(true), res)
-	p3 := Gbranch(obj.AJMP, nil, 0)
-	Patch(p1, Pc)
-	Bgen(n, wantTrue, 0, p2)
-	Thearch.Gmove(Nodbool(false), res)
-	Patch(p3, Pc)
-	n.Ninit.MoveNodes(&init)
-}
-
-// bgenx is the backend for Bgen and Bvgen.
-// If res is nil, it generates a branch.
-// Otherwise, it generates a boolean value.
-func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
-	if Debug['g'] != 0 {
-		fmt.Printf("\nbgenx wantTrue=%t likely=%d to=%v\n", wantTrue, likely, to)
-		Dump("n", n)
-		Dump("res", res)
-	}
-
-	genval := res != nil
-
-	if n == nil {
-		n = Nodbool(true)
-	}
-
-	Genlist(n.Ninit)
-
-	if n.Type == nil {
-		n = convlit(n, Types[TBOOL])
-		if n.Type == nil {
-			return
-		}
-	}
-
-	if !n.Type.IsBoolean() {
-		Fatalf("bgen: bad type %v for %v", n.Type, n.Op)
-	}
-
-	for n.Op == OCONVNOP {
-		n = n.Left
-		Genlist(n.Ninit)
-	}
-
-	if Thearch.Bgen_float != nil && n.Left != nil && n.Left.Type.IsFloat() {
-		if genval {
-			bvgenjump(n, res, wantTrue, false)
-			return
-		}
-		Thearch.Bgen_float(n, wantTrue, likely, to)
-		return
-	}
-
-	switch n.Op {
-	default:
-		if genval {
-			Cgen(n, res)
-			if !wantTrue {
-				Thearch.Gins(Thearch.Optoas(OXOR, Types[TUINT8]), Nodintconst(1), res)
-			}
-			return
-		}
-
-		var tmp Node
-		Regalloc(&tmp, n.Type, nil)
-		Cgen(n, &tmp)
-		bgenNonZero(&tmp, nil, wantTrue, likely, to)
-		Regfree(&tmp)
-		return
-
-	case ONAME:
-		// Some architectures might need a temporary or other help here,
-		// but they don't support direct generation of a bool value yet.
-		// We can fix that as we go.
-		mayNeedTemp := Ctxt.Arch.InFamily(sys.ARM, sys.ARM64, sys.MIPS64, sys.PPC64, sys.S390X)
-
-		if genval {
-			if mayNeedTemp {
-				Fatalf("genval ONAMES not fully implemented")
-			}
-			Cgen(n, res)
-			if !wantTrue {
-				Thearch.Gins(Thearch.Optoas(OXOR, Types[TUINT8]), Nodintconst(1), res)
-			}
-			return
-		}
-
-		if n.Addable && !mayNeedTemp {
-			// no need for a temporary
-			bgenNonZero(n, nil, wantTrue, likely, to)
-			return
-		}
-		var tmp Node
-		Regalloc(&tmp, n.Type, nil)
-		Cgen(n, &tmp)
-		bgenNonZero(&tmp, nil, wantTrue, likely, to)
-		Regfree(&tmp)
-		return
-
-	case OLITERAL:
-		// n is a constant.
-		if !Isconst(n, CTBOOL) {
-			Fatalf("bgen: non-bool const %L\n", n)
-		}
-		if genval {
-			Cgen(Nodbool(wantTrue == n.Val().U.(bool)), res)
-			return
-		}
-		// If n == wantTrue, jump; otherwise do nothing.
-		if wantTrue == n.Val().U.(bool) {
-			Patch(Gbranch(obj.AJMP, nil, likely), to)
-		}
-		return
-
-	case OANDAND, OOROR:
-		and := (n.Op == OANDAND) == wantTrue
-		if genval {
-			p1 := Gbranch(obj.AJMP, nil, 0)
-			p2 := Gbranch(obj.AJMP, nil, 0)
-			Patch(p2, Pc)
-			Cgen(Nodbool(!and), res)
-			p3 := Gbranch(obj.AJMP, nil, 0)
-			Patch(p1, Pc)
-			Bgen(n.Left, wantTrue != and, 0, p2)
-			Bvgen(n.Right, res, wantTrue)
-			Patch(p3, Pc)
-			return
-		}
-
-		if and {
-			p1 := Gbranch(obj.AJMP, nil, 0)
-			p2 := Gbranch(obj.AJMP, nil, 0)
-			Patch(p1, Pc)
-			Bgen(n.Left, !wantTrue, -likely, p2)
-			Bgen(n.Right, !wantTrue, -likely, p2)
-			p1 = Gbranch(obj.AJMP, nil, 0)
-			Patch(p1, to)
-			Patch(p2, Pc)
-		} else {
-			Bgen(n.Left, wantTrue, likely, to)
-			Bgen(n.Right, wantTrue, likely, to)
-		}
-		return
-
-	case ONOT: // unary
-		if n.Left == nil || n.Left.Type == nil {
-			return
-		}
-		bgenx(n.Left, res, !wantTrue, likely, to)
-		return
-
-	case OEQ, ONE, OLT, OGT, OLE, OGE:
-		if n.Left == nil || n.Left.Type == nil || n.Right == nil || n.Right.Type == nil {
-			return
-		}
-	}
-
-	// n.Op is one of OEQ, ONE, OLT, OGT, OLE, OGE
-	nl := n.Left
-	nr := n.Right
-	op := n.Op
-
-	if !wantTrue {
-		if nr.Type.IsFloat() {
-			// Brcom is not valid on floats when NaN is involved.
-			ll := n.Ninit // avoid re-genning Ninit
-			n.Ninit.Set(nil)
-			if genval {
-				bgenx(n, res, true, likely, to)
-				Thearch.Gins(Thearch.Optoas(OXOR, Types[TUINT8]), Nodintconst(1), res) // res = !res
-				n.Ninit.Set(ll.Slice())
-				return
-			}
-			p1 := Gbranch(obj.AJMP, nil, 0)
-			p2 := Gbranch(obj.AJMP, nil, 0)
-			Patch(p1, Pc)
-			bgenx(n, res, true, -likely, p2)
-			Patch(Gbranch(obj.AJMP, nil, 0), to)
-			Patch(p2, Pc)
-			n.Ninit.Set(ll.Slice())
-			return
-		}
-
-		op = Brcom(op)
-	}
-	wantTrue = true
-
-	// make simplest on right
-	if nl.Op == OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < UINF) {
-		op = Brrev(op)
-		nl, nr = nr, nl
-	}
-
-	if nl.Type.IsSlice() || nl.Type.IsInterface() {
-		// front end should only leave cmp to literal nil
-		if (op != OEQ && op != ONE) || nr.Op != OLITERAL {
-			if nl.Type.IsSlice() {
-				Yyerror("illegal slice comparison")
-			} else {
-				Yyerror("illegal interface comparison")
-			}
-			return
-		}
-
-		var ptr Node
-		Igen(nl, &ptr, nil)
-		if nl.Type.IsSlice() {
-			ptr.Xoffset += int64(Array_array)
-		}
-		ptr.Type = Types[Tptr]
-		var tmp Node
-		Regalloc(&tmp, ptr.Type, &ptr)
-		Cgen(&ptr, &tmp)
-		Regfree(&ptr)
-		bgenNonZero(&tmp, res, op == OEQ != wantTrue, likely, to)
-		Regfree(&tmp)
-		return
-	}
-
-	if nl.Type.IsComplex() {
-		complexbool(op, nl, nr, res, wantTrue, likely, to)
-		return
-	}
-
-	if Ctxt.Arch.RegSize == 4 && Is64(nr.Type) {
-		if genval {
-			// TODO: Teach Cmp64 to generate boolean values and remove this.
-			bvgenjump(n, res, wantTrue, false)
-			return
-		}
-		if !nl.Addable || Isconst(nl, CTINT) {
-			nl = CgenTemp(nl)
-		}
-		if !nr.Addable {
-			nr = CgenTemp(nr)
-		}
-		Thearch.Cmp64(nl, nr, op, likely, to)
-		return
-	}
-
-	if nr.Ullman >= UINF {
-		var n1 Node
-		Regalloc(&n1, nl.Type, nil)
-		Cgen(nl, &n1)
-		nl = &n1
-
-		var tmp Node
-		Tempname(&tmp, nl.Type)
-		Thearch.Gmove(&n1, &tmp)
-		Regfree(&n1)
-
-		var n2 Node
-		Regalloc(&n2, nr.Type, nil)
-		Cgen(nr, &n2)
-		nr = &n2
-
-		Regalloc(&n1, nl.Type, nil)
-		Cgen(&tmp, &n1)
-		Regfree(&n1)
-		Regfree(&n2)
-	} else {
-		var n1 Node
-		if !nl.Addable && Ctxt.Arch.Family == sys.I386 {
-			Tempname(&n1, nl.Type)
-		} else {
-			Regalloc(&n1, nl.Type, nil)
-			defer Regfree(&n1)
-		}
-		Cgen(nl, &n1)
-		nl = &n1
-
-		if Smallintconst(nr) && Ctxt.Arch.Family != sys.MIPS64 && Ctxt.Arch.Family != sys.PPC64 {
-			Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), nl, nr)
-			bins(nr.Type, res, op, likely, to)
-			return
-		}
-
-		if !nr.Addable && Ctxt.Arch.Family == sys.I386 {
-			nr = CgenTemp(nr)
-		}
-
-		var n2 Node
-		Regalloc(&n2, nr.Type, nil)
-		Cgen(nr, &n2)
-		nr = &n2
-		Regfree(&n2)
-	}
-
-	l, r := nl, nr
-
-	// On x86, only < and <= work right with NaN; reverse if needed
-	if Ctxt.Arch.Family == sys.AMD64 && nl.Type.IsFloat() && (op == OGT || op == OGE) {
-		l, r = r, l
-		op = Brrev(op)
-	}
-
-	// MIPS does not have CMP instruction
-	if Ctxt.Arch.Family == sys.MIPS64 {
-		p := Thearch.Ginscmp(op, nr.Type, l, r, likely)
-		Patch(p, to)
-		return
-	}
-
-	// Do the comparison.
-	Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), l, r)
-
-	// Handle floating point special cases.
-	// Note that 8g has Bgen_float and is handled above.
-	if nl.Type.IsFloat() {
-		switch Ctxt.Arch.Family {
-		case sys.ARM:
-			if genval {
-				Fatalf("genval 5g Isfloat special cases not implemented")
-			}
-			switch n.Op {
-			case ONE:
-				Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, likely), to)
-				Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
-			default:
-				p := Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, -likely)
-				Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
-				Patch(p, Pc)
-			}
-			return
-		case sys.AMD64:
-			switch n.Op {
-			case OEQ:
-				// neither NE nor P
-				if genval {
-					var reg Node
-					Regalloc(&reg, Types[TBOOL], nil)
-					Thearch.Ginsboolval(Thearch.Optoas(OEQ, nr.Type), &reg)
-					Thearch.Ginsboolval(Thearch.Optoas(OPC, nr.Type), res)
-					Thearch.Gins(Thearch.Optoas(OAND, Types[TBOOL]), &reg, res)
-					Regfree(&reg)
-				} else {
-					p1 := Gbranch(Thearch.Optoas(ONE, nr.Type), nil, -likely)
-					p2 := Gbranch(Thearch.Optoas(OPS, nr.Type), nil, -likely)
-					Patch(Gbranch(obj.AJMP, nil, 0), to)
-					Patch(p1, Pc)
-					Patch(p2, Pc)
-				}
-				return
-			case ONE:
-				// either NE or P
-				if genval {
-					var reg Node
-					Regalloc(&reg, Types[TBOOL], nil)
-					Thearch.Ginsboolval(Thearch.Optoas(ONE, nr.Type), &reg)
-					Thearch.Ginsboolval(Thearch.Optoas(OPS, nr.Type), res)
-					Thearch.Gins(Thearch.Optoas(OOR, Types[TBOOL]), &reg, res)
-					Regfree(&reg)
-				} else {
-					Patch(Gbranch(Thearch.Optoas(ONE, nr.Type), nil, likely), to)
-					Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nil, likely), to)
-				}
-				return
-			}
-		case sys.ARM64, sys.PPC64:
-			if genval {
-				Fatalf("genval 7g, 9g Isfloat special cases not implemented")
-			}
-			switch n.Op {
-			// On arm64 and ppc64, <= and >= mishandle NaN. Must decompose into < or > and =.
-			// TODO(josh): Convert a <= b to b > a instead?
-			case OLE, OGE:
-				if op == OLE {
-					op = OLT
-				} else {
-					op = OGT
-				}
-				Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
-				Patch(Gbranch(Thearch.Optoas(OEQ, nr.Type), nr.Type, likely), to)
-				return
-			}
-		}
-	}
-
-	// Not a special case. Insert the conditional jump or value gen.
-	bins(nr.Type, res, op, likely, to)
-}
-
-func bgenNonZero(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
-	// TODO: Optimize on systems that can compare to zero easily.
-	var op Op = ONE
-	if !wantTrue {
-		op = OEQ
-	}
-
-	// MIPS does not have CMP instruction
-	if Thearch.LinkArch.Family == sys.MIPS64 {
-		p := Gbranch(Thearch.Optoas(op, n.Type), n.Type, likely)
-		Naddr(&p.From, n)
-		Patch(p, to)
-		return
-	}
-
-	var zero Node
-	Nodconst(&zero, n.Type, 0)
-	Thearch.Gins(Thearch.Optoas(OCMP, n.Type), n, &zero)
-	bins(n.Type, res, op, likely, to)
-}
-
-// bins inserts an instruction to handle the result of a compare.
-// If res is non-nil, it inserts appropriate value generation instructions.
-// If res is nil, it inserts a branch to to.
-func bins(typ *Type, res *Node, op Op, likely int, to *obj.Prog) {
-	a := Thearch.Optoas(op, typ)
-	if res != nil {
-		// value gen
-		Thearch.Ginsboolval(a, res)
-	} else {
-		// jump
-		Patch(Gbranch(a, typ, likely), to)
-	}
-}
-
-// stkof returns n's offset from SP if n is on the stack
-// (either a local variable or the return value from a function call
-// or the arguments to a function call).
-// If n is not on the stack, stkof returns -1000.
-// If n is on the stack but in an unknown location
-// (due to array index arithmetic), stkof returns +1000.
-//
-// NOTE(rsc): It is possible that the ODOT and OINDEX cases
-// are not relevant here, since it shouldn't be possible for them
-// to be involved in an overlapping copy. Only function results
-// from one call and the arguments to the next can overlap in
-// any non-trivial way. If they can be dropped, then this function
-// becomes much simpler and also more trustworthy.
-// The fact that it works at all today is probably due to the fact
-// that ODOT and OINDEX are irrelevant.
-func stkof(n *Node) int64 {
-	switch n.Op {
-	case OINDREG:
-		if n.Reg != int16(Thearch.REGSP) {
-			return -1000 // not on stack
-		}
-		return n.Xoffset
-
-	case ODOT:
-		t := n.Left.Type
-		if t.IsPtr() {
-			break
-		}
-		off := stkof(n.Left)
-		if off == -1000 || off == +1000 {
-			return off
-		}
-		return off + n.Xoffset
-
-	case OINDEX:
-		t := n.Left.Type
-		if !t.IsArray() {
-			break
-		}
-		off := stkof(n.Left)
-		if off == -1000 || off == +1000 {
-			return off
-		}
-		if Isconst(n.Right, CTINT) {
-			return off + t.Elem().Width*n.Right.Int64()
-		}
-		return +1000 // on stack but not sure exactly where
-
-	case OCALLMETH, OCALLINTER, OCALLFUNC:
-		t := n.Left.Type
-		if t.IsPtr() {
-			t = t.Elem()
-		}
-
-		f := t.Results().Field(0)
-		if f != nil {
-			return f.Offset + Ctxt.FixedFrameSize()
-		}
-	}
-
-	// botch - probably failing to recognize address
-	// arithmetic on the above. eg INDEX and DOT
-	return -1000 // not on stack
-}
-
-// block copy:
-//	memmove(&ns, &n, w);
-// if wb is true, needs write barrier.
-func sgen_wb(n *Node, ns *Node, w int64, wb bool) {
-	if Debug['g'] != 0 {
-		op := "sgen"
-		if wb {
-			op = "sgen-wb"
-		}
-		fmt.Printf("\n%s w=%d\n", op, w)
-		Dump("r", n)
-		Dump("res", ns)
-	}
-
-	if n.Ullman >= UINF && ns.Ullman >= UINF {
-		Fatalf("sgen UINF")
-	}
-
-	if w < 0 {
-		Fatalf("sgen copy %d", w)
-	}
-
-	// If copying .args, that's all the results, so record definition sites
-	// for them for the liveness analysis.
-	if ns.Op == ONAME && ns.Sym.Name == ".args" {
-		for _, ln := range Curfn.Func.Dcl {
-			if ln.Class == PPARAMOUT {
-				Gvardef(ln)
-			}
-		}
-	}
-
-	// Avoid taking the address for simple enough types.
-	if componentgen_wb(n, ns, wb) {
-		return
-	}
-
-	if w == 0 {
-		// evaluate side effects only
-		var nodr Node
-		Regalloc(&nodr, Types[Tptr], nil)
-		Agen(ns, &nodr)
-		Agen(n, &nodr)
-		Regfree(&nodr)
-		return
-	}
-
-	// offset on the stack
-	osrc := stkof(n)
-	odst := stkof(ns)
-
-	if odst != -1000 {
-		// on stack, write barrier not needed after all
-		wb = false
-	}
-
-	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) || wb && osrc != -1000 {
-		// osrc and odst both on stack, and at least one is in
-		// an unknown position. Could generate code to test
-		// for forward/backward copy, but instead just copy
-		// to a temporary location first.
-		//
-		// OR: write barrier needed and source is on stack.
-		// Invoking the write barrier will use the stack to prepare its call.
-		// Copy to temporary.
-		var tmp Node
-		Tempname(&tmp, n.Type)
-		sgen_wb(n, &tmp, w, false)
-		sgen_wb(&tmp, ns, w, wb)
-		return
-	}
-
-	if wb {
-		cgen_wbfat(n, ns)
-		return
-	}
-
-	Thearch.Blockcopy(n, ns, osrc, odst, w)
-}
-
-// generate:
-//	call f
-//	proc=-1	normal call but no return
-//	proc=0	normal call
-//	proc=1	goroutine run in new proc
-//	proc=2	defer call save away stack
-//	proc=3	normal call to C pointer (not Go func value)
-func Ginscall(f *Node, proc int) {
-	if f.Type != nil {
-		extra := int32(0)
-		if proc == 1 || proc == 2 {
-			extra = 2 * int32(Widthptr)
-		}
-		Setmaxarg(f.Type, extra)
-	}
-
-	switch proc {
-	default:
-		Fatalf("Ginscall: bad proc %d", proc)
-
-	case 0, // normal call
-		-1: // normal call but no return
-		if f.Op == ONAME && f.Class == PFUNC {
-			if f == Deferreturn {
-				// Deferred calls will appear to be returning to the CALL
-				// deferreturn(SB) that we are about to emit. However, the
-				// stack scanning code will think that the instruction
-				// before the CALL is executing. To avoid the scanning
-				// code making bad assumptions (both cosmetic such as
-				// showing the wrong line number and fatal, such as being
-				// confused over whether a stack slot contains a pointer
-				// or a scalar) insert an actual hardware NOP that will
-				// have the right line number. This is different from
-				// obj.ANOP, which is a virtual no-op that doesn't make it
-				// into the instruction stream.
-				Thearch.Ginsnop()
-
-				if Thearch.LinkArch.Family == sys.PPC64 {
-					// On ppc64, when compiling Go into position
-					// independent code on ppc64le we insert an
-					// instruction to reload the TOC pointer from the
-					// stack as well. See the long comment near
-					// jmpdefer in runtime/asm_ppc64.s for why.
-					// If the MOVD is not needed, insert a hardware NOP
-					// so that the same number of instructions are used
-					// on ppc64 in both shared and non-shared modes.
-					if Ctxt.Flag_shared {
-						p := Thearch.Gins(ppc64.AMOVD, nil, nil)
-						p.From.Type = obj.TYPE_MEM
-						p.From.Offset = 24
-						p.From.Reg = ppc64.REGSP
-						p.To.Type = obj.TYPE_REG
-						p.To.Reg = ppc64.REG_R2
-					} else {
-						Thearch.Ginsnop()
-					}
-				}
-			}
-
-			p := Thearch.Gins(obj.ACALL, nil, f)
-			Afunclit(&p.To, f)
-			if proc == -1 || Noreturn(p) {
-				Thearch.Gins(obj.AUNDEF, nil, nil)
-			}
-			break
-		}
-
-		var reg Node
-		Nodreg(&reg, Types[Tptr], Thearch.REGCTXT)
-		var r1 Node
-		Nodreg(&r1, Types[Tptr], Thearch.REGCALLX)
-		Thearch.Gmove(f, &reg)
-		reg.Op = OINDREG
-		Thearch.Gmove(&reg, &r1)
-		reg.Op = OREGISTER
-		Thearch.Gins(obj.ACALL, &reg, &r1)
-
-	case 3: // normal call of c function pointer
-		Thearch.Gins(obj.ACALL, nil, f)
-
-	case 1, // call in new proc (go)
-		2: // deferred call (defer)
-		var stk Node
-
-		// size of arguments at 0(SP)
-		stk.Op = OINDREG
-		stk.Reg = int16(Thearch.REGSP)
-		stk.Xoffset = Ctxt.FixedFrameSize()
-		Thearch.Ginscon(Thearch.Optoas(OAS, Types[TINT32]), int64(Argsize(f.Type)), &stk)
-
-		// FuncVal* at 8(SP)
-		stk.Xoffset = int64(Widthptr) + Ctxt.FixedFrameSize()
-
-		var reg Node
-		Nodreg(&reg, Types[Tptr], Thearch.REGCALLX2)
-		Thearch.Gmove(f, &reg)
-		Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &reg, &stk)
-
-		if proc == 1 {
-			Ginscall(Newproc, 0)
-		} else {
-			if !hasdefer {
-				Fatalf("hasdefer=0 but has defer")
-			}
-			Ginscall(Deferproc, 0)
-		}
-
-		if proc == 2 {
-			Nodreg(&reg, Types[TINT32], Thearch.REGRETURN)
-			p := Thearch.Ginscmp(OEQ, Types[TINT32], &reg, Nodintconst(0), +1)
-			cgen_ret(nil)
-			Patch(p, Pc)
-		}
-	}
-}
-
-// n is call to interface method.
-// generate res = n.
-func cgen_callinter(n *Node, res *Node, proc int) {
-	i := n.Left
-	if i.Op != ODOTINTER {
-		Fatalf("cgen_callinter: not ODOTINTER %v", i.Op)
-	}
-
-	i = i.Left // interface
-
-	if !i.Addable {
-		var tmpi Node
-		Tempname(&tmpi, i.Type)
-		Cgen(i, &tmpi)
-		i = &tmpi
-	}
-
-	Genlist(n.List) // assign the args
-
-	// i is now addable, prepare an indirected
-	// register to hold its address.
-	var nodi Node
-	Igen(i, &nodi, res) // REG = &inter
-
-	var nodsp Node
-	Nodindreg(&nodsp, Types[Tptr], Thearch.REGSP)
-	nodsp.Xoffset = Ctxt.FixedFrameSize()
-	if proc != 0 {
-		nodsp.Xoffset += 2 * int64(Widthptr) // leave room for size & fn
-	}
-	nodi.Type = Types[Tptr]
-	nodi.Xoffset += int64(Widthptr)
-	Cgen(&nodi, &nodsp) // {0, 8(nacl), or 16}(SP) = 8(REG) -- i.data
-
-	var nodo Node
-	Regalloc(&nodo, Types[Tptr], res)
-
-	nodi.Type = Types[Tptr]
-	nodi.Xoffset -= int64(Widthptr)
-	Cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
-	Regfree(&nodi)
-
-	var nodr Node
-	Regalloc(&nodr, Types[Tptr], &nodo)
-	if n.Left.Xoffset == BADWIDTH {
-		Fatalf("cgen_callinter: badwidth")
-	}
-	Cgen_checknil(&nodo) // in case offset is huge
-	nodo.Op = OINDREG
-	nodo.Xoffset = n.Left.Xoffset + 3*int64(Widthptr) + 8
-	if proc == 0 {
-		// plain call: use direct c function pointer - more efficient
-		Cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
-		proc = 3
-	} else {
-		// go/defer. generate go func value.
-		Agen(&nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
-	}
-
-	nodr.Type = n.Left.Type
-	Ginscall(&nodr, proc)
-
-	Regfree(&nodr)
-	Regfree(&nodo)
-}
-
-// generate function call;
-//	proc=0	normal call
-//	proc=1	goroutine run in new proc
-//	proc=2	defer call save away stack
-func cgen_call(n *Node, proc int) {
-	if n == nil {
-		return
-	}
-
-	var afun Node
-	if n.Left.Ullman >= UINF {
-		// if name involves a fn call
-		// precompute the address of the fn
-		Tempname(&afun, Types[Tptr])
-
-		Cgen(n.Left, &afun)
-	}
-
-	Genlist(n.List) // assign the args
-	t := n.Left.Type
-
-	// call tempname pointer
-	if n.Left.Ullman >= UINF {
-		var nod Node
-		Regalloc(&nod, Types[Tptr], nil)
-		Cgen_as(&nod, &afun)
-		nod.Type = t
-		Ginscall(&nod, proc)
-		Regfree(&nod)
-		return
-	}
-
-	// call pointer
-	if n.Left.Op != ONAME || n.Left.Class != PFUNC {
-		var nod Node
-		Regalloc(&nod, Types[Tptr], nil)
-		Cgen_as(&nod, n.Left)
-		nod.Type = t
-		Ginscall(&nod, proc)
-		Regfree(&nod)
-		return
-	}
-
-	// call direct
-	n.Left.Name.Method = true
-
-	Ginscall(n.Left, proc)
-}
-
-// call to n has already been generated.
-// generate:
-//	res = return value from call.
-func cgen_callret(n *Node, res *Node) {
-	t := n.Left.Type
-	if t.Etype == TPTR32 || t.Etype == TPTR64 {
-		t = t.Elem()
-	}
-
-	fp := t.Results().Field(0)
-	if fp == nil {
-		Fatalf("cgen_callret: nil")
-	}
-
-	var nod Node
-	nod.Op = OINDREG
-	nod.Reg = int16(Thearch.REGSP)
-	nod.Addable = true
-
-	nod.Xoffset = fp.Offset + Ctxt.FixedFrameSize()
-	nod.Type = fp.Type
-	Cgen_as(res, &nod)
-}
-
-// call to n has already been generated.
-// generate:
-//	res = &return value from call.
-func cgen_aret(n *Node, res *Node) {
-	t := n.Left.Type
-	if t.IsPtr() {
-		t = t.Elem()
-	}
-
-	fp := t.Results().Field(0)
-	if fp == nil {
-		Fatalf("cgen_aret: nil")
-	}
-
-	var nod1 Node
-	nod1.Op = OINDREG
-	nod1.Reg = int16(Thearch.REGSP)
-	nod1.Addable = true
-	nod1.Xoffset = fp.Offset + Ctxt.FixedFrameSize()
-	nod1.Type = fp.Type
-
-	if res.Op != OREGISTER {
-		var nod2 Node
-		Regalloc(&nod2, Types[Tptr], res)
-		Agen(&nod1, &nod2)
-		Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &nod2, res)
-		Regfree(&nod2)
-	} else {
-		Agen(&nod1, res)
-	}
-}
-
-// generate return.
-// n->left is assignments to return values.
-func cgen_ret(n *Node) {
-	if n != nil {
-		Genlist(n.List) // copy out args
-	}
-	if hasdefer {
-		Ginscall(Deferreturn, 0)
-	}
-	Genlist(Curfn.Func.Exit)
-	p := Thearch.Gins(obj.ARET, nil, nil)
-	if n != nil && n.Op == ORETJMP {
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = Linksym(n.Left.Sym)
-	}
-}
+import "cmd/internal/sys"
 
 // hasHMUL64 reports whether the architecture supports 64-bit
 // signed and unsigned high multiplication (OHMUL).
@@ -2680,933 +53,3 @@
 	Fatalf("unknown architecture")
 	return false
 }
-
-// generate division according to op, one of:
-//	res = nl / nr
-//	res = nl % nr
-func cgen_div(op Op, nl *Node, nr *Node, res *Node) {
-	var w int
-
-	// Architectures need to support 64-bit high multiplications
-	// (OHMUL) in order to perform divide by constant optimizations.
-	if nr.Op != OLITERAL || !hasHMUL64() {
-		goto longdiv
-	}
-	w = int(nl.Type.Width * 8)
-
-	// Front end handled 32-bit division. We only need to handle 64-bit.
-	// Try to do division using multiplication: (2^w)/d.
-	// See Hacker's Delight, chapter 10.
-	switch Simtype[nl.Type.Etype] {
-	default:
-		goto longdiv
-
-	case TUINT64:
-		var m Magic
-		m.W = w
-		m.Ud = uint64(nr.Int64())
-		Umagic(&m)
-		if m.Bad != 0 {
-			break
-		}
-
-		// In order to add the numerator we need to be able to
-		// avoid overflow. This is done by shifting the result of the
-		// addition right by 1 and inserting the carry bit into
-		// the MSB. For now this needs the RROTC instruction.
-		// TODO(mundaym): Hacker's Delight 2nd ed. chapter 10 proposes
-		// an alternative sequence of instructions for architectures
-		// (TODO: MIPS64, PPC64, S390X) that do not have a shift
-		// right with carry instruction.
-		if m.Ua != 0 && !hasRROTC64() && !hasRightShiftWithCarry() {
-			goto longdiv
-		}
-		if op == OMOD {
-			goto longmod
-		}
-
-		var n1 Node
-		Cgenr(nl, &n1, nil)
-		var n2 Node
-		Nodconst(&n2, nl.Type, int64(m.Um))
-		var n3 Node
-		Regalloc(&n3, nl.Type, res)
-		Thearch.Cgen_hmul(&n1, &n2, &n3)
-
-		if m.Ua != 0 {
-			// Need to add numerator accounting for overflow.
-			if hasAddSetCarry() {
-				Thearch.AddSetCarry(&n1, &n3, &n3)
-			} else {
-				Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3)
-			}
-
-			if !hasRROTC64() {
-				Thearch.RightShiftWithCarry(&n3, uint(m.S), &n3)
-			} else {
-				Nodconst(&n2, nl.Type, 1)
-				Thearch.Gins(Thearch.Optoas(ORROTC, nl.Type), &n2, &n3)
-				Nodconst(&n2, nl.Type, int64(m.S)-1)
-				Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3)
-			}
-		} else {
-			Nodconst(&n2, nl.Type, int64(m.S))
-			Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) // shift dx
-		}
-
-		Thearch.Gmove(&n3, res)
-		Regfree(&n1)
-		Regfree(&n3)
-		return
-
-	case TINT64:
-		var m Magic
-		m.W = w
-		m.Sd = nr.Int64()
-		Smagic(&m)
-		if m.Bad != 0 {
-			break
-		}
-		if op == OMOD {
-			goto longmod
-		}
-
-		var n1 Node
-		Cgenr(nl, &n1, res)
-		var n2 Node
-		Nodconst(&n2, nl.Type, m.Sm)
-		var n3 Node
-		Regalloc(&n3, nl.Type, nil)
-		Thearch.Cgen_hmul(&n1, &n2, &n3)
-
-		if m.Sm < 0 {
-			// Need to add numerator (cannot overflow).
-			Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3)
-		}
-
-		Nodconst(&n2, nl.Type, int64(m.S))
-		Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) // shift n3
-
-		Nodconst(&n2, nl.Type, int64(w)-1)
-
-		Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
-		Thearch.Gins(Thearch.Optoas(OSUB, nl.Type), &n1, &n3) // added
-
-		if m.Sd < 0 {
-			// This could probably be removed by factoring it into
-			// the multiplier.
-			Thearch.Gins(Thearch.Optoas(OMINUS, nl.Type), nil, &n3)
-		}
-
-		Thearch.Gmove(&n3, res)
-		Regfree(&n1)
-		Regfree(&n3)
-		return
-	}
-
-	goto longdiv
-
-	// Division and mod using (slow) hardware instruction.
-longdiv:
-	Thearch.Dodiv(op, nl, nr, res)
-
-	return
-
-	// Mod using formula A%B = A-(A/B*B) but
-	// we know that there is a fast algorithm for A/B.
-longmod:
-	var n1 Node
-	Regalloc(&n1, nl.Type, res)
-
-	Cgen(nl, &n1)
-	var n2 Node
-	Regalloc(&n2, nl.Type, nil)
-	cgen_div(ODIV, &n1, nr, &n2)
-	a := Thearch.Optoas(OMUL, nl.Type)
-
-	if !Smallintconst(nr) {
-		var n3 Node
-		Regalloc(&n3, nl.Type, nil)
-		Cgen(nr, &n3)
-		Thearch.Gins(a, &n3, &n2)
-		Regfree(&n3)
-	} else {
-		Thearch.Gins(a, nr, &n2)
-	}
-	Thearch.Gins(Thearch.Optoas(OSUB, nl.Type), &n2, &n1)
-	Thearch.Gmove(&n1, res)
-	Regfree(&n1)
-	Regfree(&n2)
-}
-
-func Fixlargeoffset(n *Node) {
-	if n == nil {
-		return
-	}
-	if n.Op != OINDREG {
-		return
-	}
-	if n.Reg == int16(Thearch.REGSP) { // stack offset cannot be large
-		return
-	}
-	if n.Xoffset != int64(int32(n.Xoffset)) {
-		// offset too large, add to register instead.
-		a := *n
-
-		a.Op = OREGISTER
-		a.Type = Types[Tptr]
-		a.Xoffset = 0
-		Cgen_checknil(&a)
-		Thearch.Ginscon(Thearch.Optoas(OADD, Types[Tptr]), n.Xoffset, &a)
-		n.Xoffset = 0
-	}
-}
-
-func cgen_append(n, res *Node) {
-	if Debug['g'] != 0 {
-		Dump("cgen_append-n", n)
-		Dump("cgen_append-res", res)
-	}
-	for _, n1 := range n.List.Slice() {
-		if n1.Ullman >= UINF {
-			Fatalf("append with function call arguments")
-		}
-	}
-
-	// res = append(src, x, y, z)
-	//
-	// If res and src are the same, we can avoid writing to base and cap
-	// unless we grow the underlying array.
-	needFullUpdate := !samesafeexpr(res, n.List.First())
-
-	// Copy src triple into base, len, cap.
-	base := temp(Types[Tptr])
-	len := temp(Types[TUINT])
-	cap := temp(Types[TUINT])
-
-	var src Node
-	Igen(n.List.First(), &src, nil)
-	src.Type = Types[Tptr]
-	Thearch.Gmove(&src, base)
-	src.Type = Types[TUINT]
-	src.Xoffset += int64(Widthptr)
-	Thearch.Gmove(&src, len)
-	src.Xoffset += int64(Widthptr)
-	Thearch.Gmove(&src, cap)
-
-	// if len+argc <= cap goto L1
-	var rlen Node
-	Regalloc(&rlen, Types[TUINT], nil)
-	Thearch.Gmove(len, &rlen)
-	Thearch.Ginscon(Thearch.Optoas(OADD, Types[TUINT]), int64(n.List.Len()-1), &rlen)
-	p := Thearch.Ginscmp(OLE, Types[TUINT], &rlen, cap, +1)
-	// Note: rlen and src are Regrealloc'ed below at the target of the
-	// branch we just emitted; do not reuse these Go variables for
-	// other purposes. They need to still describe the same things
-	// below that they describe right here.
-	Regfree(&src)
-
-	// base, len, cap = growslice(type, base, len, cap, newlen)
-	var arg Node
-	arg.Op = OINDREG
-	arg.Reg = int16(Thearch.REGSP)
-	arg.Addable = true
-	arg.Xoffset = Ctxt.FixedFrameSize()
-	arg.Type = Ptrto(Types[TUINT8])
-	Cgen(typename(res.Type.Elem()), &arg)
-	arg.Xoffset += int64(Widthptr)
-
-	arg.Type = Types[Tptr]
-	Cgen(base, &arg)
-	arg.Xoffset += int64(Widthptr)
-
-	arg.Type = Types[TUINT]
-	Cgen(len, &arg)
-	arg.Xoffset += int64(Widthptr)
-
-	arg.Type = Types[TUINT]
-	Cgen(cap, &arg)
-	arg.Xoffset += int64(Widthptr)
-
-	arg.Type = Types[TUINT]
-	Cgen(&rlen, &arg)
-	arg.Xoffset += int64(Widthptr)
-	Regfree(&rlen)
-
-	fn := syslook("growslice")
-	fn = substArgTypes(fn, res.Type.Elem(), res.Type.Elem())
-	Ginscall(fn, 0)
-
-	if Widthptr == 4 && Widthreg == 8 {
-		arg.Xoffset += 4
-	}
-
-	arg.Type = Types[Tptr]
-	Cgen(&arg, base)
-	arg.Xoffset += int64(Widthptr)
-
-	arg.Type = Types[TUINT]
-	Cgen(&arg, len)
-	arg.Xoffset += int64(Widthptr)
-
-	arg.Type = Types[TUINT]
-	Cgen(&arg, cap)
-
-	// Update res with base, len+argc, cap.
-	if needFullUpdate {
-		if Debug_append > 0 {
-			Warn("append: full update")
-		}
-		Patch(p, Pc)
-	}
-	if res.Op == ONAME {
-		Gvardef(res)
-	}
-	var dst, r1 Node
-	Igen(res, &dst, nil)
-	dst.Type = Types[TUINT]
-	dst.Xoffset += int64(Widthptr)
-	Regalloc(&r1, Types[TUINT], nil)
-	Thearch.Gmove(len, &r1)
-	Thearch.Ginscon(Thearch.Optoas(OADD, Types[TUINT]), int64(n.List.Len()-1), &r1)
-	Thearch.Gmove(&r1, &dst)
-	Regfree(&r1)
-	dst.Xoffset += int64(Widthptr)
-	Thearch.Gmove(cap, &dst)
-	dst.Type = Types[Tptr]
-	dst.Xoffset -= 2 * int64(Widthptr)
-	cgen_wb(base, &dst, needwritebarrier(&dst, base))
-	Regfree(&dst)
-
-	if !needFullUpdate {
-		if Debug_append > 0 {
-			Warn("append: len-only update")
-		}
-		// goto L2;
-		// L1:
-		//	update len only
-		// L2:
-		q := Gbranch(obj.AJMP, nil, 0)
-		Patch(p, Pc)
-		// At the goto above, src refers to cap and rlen holds the new len
-		if src.Op == OREGISTER || src.Op == OINDREG {
-			Regrealloc(&src)
-		}
-		Regrealloc(&rlen)
-		src.Xoffset -= int64(Widthptr)
-		Thearch.Gmove(&rlen, &src)
-		Regfree(&src)
-		Regfree(&rlen)
-		Patch(q, Pc)
-	}
-
-	// Copy data into place.
-	// Could do write barrier check around entire copy instead of each element.
-	// Could avoid reloading registers on each iteration if we know the cgen_wb
-	// is not going to use a write barrier.
-	i := 0
-	var r2 Node
-	for _, n2 := range n.List.Slice()[1:] {
-		Regalloc(&r1, Types[Tptr], nil)
-		Thearch.Gmove(base, &r1)
-		Regalloc(&r2, Types[TUINT], nil)
-		Thearch.Gmove(len, &r2)
-		if i > 0 {
-			Thearch.Gins(Thearch.Optoas(OADD, Types[TUINT]), Nodintconst(int64(i)), &r2)
-		}
-		w := res.Type.Elem().Width
-		if Thearch.AddIndex != nil && Thearch.AddIndex(&r2, w, &r1) {
-			// r1 updated by back end
-		} else if w == 1 {
-			Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &r2, &r1)
-		} else {
-			Thearch.Ginscon(Thearch.Optoas(OMUL, Types[TUINT]), w, &r2)
-			Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &r2, &r1)
-		}
-		Regfree(&r2)
-
-		r1.Op = OINDREG
-		r1.Type = res.Type.Elem()
-		cgen_wb(n2, &r1, needwritebarrier(&r1, n2))
-		Regfree(&r1)
-		i++
-	}
-}
-
-// Generate res = n, where n is x[i:j] or x[i:j:k].
-// If wb is true, need write barrier updating res's base pointer.
-// On systems with 32-bit ints, i, j, k are guaranteed to be 32-bit values.
-func cgen_slice(n, res *Node, wb bool) {
-	if Debug['g'] != 0 {
-		Dump("cgen_slice-n", n)
-		Dump("cgen_slice-res", res)
-	}
-
-	needFullUpdate := !samesafeexpr(n.Left, res)
-
-	// orderexpr has made sure that x is safe (but possibly expensive)
-	// and i, j, k are cheap. On a system with registers (anything but 386)
-	// we can evaluate x first and then know we have enough registers
-	// for i, j, k as well.
-	var x, xbase, xlen, xcap, i, j, k Node
-	if n.Op != OSLICEARR && n.Op != OSLICE3ARR {
-		Igen(n.Left, &x, nil)
-	}
-
-	indexRegType := Types[TUINT]
-	if Widthreg > Widthptr { // amd64p32
-		indexRegType = Types[TUINT64]
-	}
-
-	// On most systems, we use registers.
-	// The 386 has basically no registers, so substitute functions
-	// that can work with temporaries instead.
-	regalloc := Regalloc
-	ginscon := Thearch.Ginscon
-	gins := Thearch.Gins
-	if Thearch.LinkArch.Family == sys.I386 {
-		regalloc = func(n *Node, t *Type, reuse *Node) {
-			Tempname(n, t)
-		}
-		ginscon = func(as obj.As, c int64, n *Node) {
-			var n1 Node
-			Regalloc(&n1, n.Type, n)
-			Thearch.Gmove(n, &n1)
-			Thearch.Ginscon(as, c, &n1)
-			Thearch.Gmove(&n1, n)
-			Regfree(&n1)
-		}
-		gins = func(as obj.As, f, t *Node) *obj.Prog {
-			var n1 Node
-			Regalloc(&n1, t.Type, t)
-			Thearch.Gmove(t, &n1)
-			Thearch.Gins(as, f, &n1)
-			Thearch.Gmove(&n1, t)
-			Regfree(&n1)
-			return nil
-		}
-	}
-
-	panics := make([]*obj.Prog, 0, 6) // 3 loads + 3 checks
-
-	loadlen := func() {
-		if xlen.Op != 0 {
-			return
-		}
-		if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
-			Nodconst(&xlen, indexRegType, n.Left.Type.Elem().NumElem())
-			return
-		}
-		if n.Op == OSLICESTR && Isconst(n.Left, CTSTR) {
-			Nodconst(&xlen, indexRegType, int64(len(n.Left.Val().U.(string))))
-			return
-		}
-		regalloc(&xlen, indexRegType, nil)
-		x.Xoffset += int64(Widthptr)
-		x.Type = Types[TUINT]
-		Thearch.Gmove(&x, &xlen)
-		x.Xoffset -= int64(Widthptr)
-	}
-
-	loadcap := func() {
-		if xcap.Op != 0 {
-			return
-		}
-		if n.Op == OSLICEARR || n.Op == OSLICE3ARR || n.Op == OSLICESTR {
-			loadlen()
-			xcap = xlen
-			if xcap.Op == OREGISTER {
-				Regrealloc(&xcap)
-			}
-			return
-		}
-		regalloc(&xcap, indexRegType, nil)
-		x.Xoffset += 2 * int64(Widthptr)
-		x.Type = Types[TUINT]
-		Thearch.Gmove(&x, &xcap)
-		x.Xoffset -= 2 * int64(Widthptr)
-	}
-
-	x1, x2, x3 := n.SliceBounds() // unevaluated index arguments
-
-	// load computes src into targ, but if src refers to the len or cap of n.Left,
-	// load copies those from xlen, xcap, loading xlen if needed.
-	// If targ.Op == OREGISTER on return, it must be Regfreed,
-	// but it should not be modified without first checking whether it is
-	// xlen or xcap's register.
-	load := func(src, targ *Node) {
-		if src == nil {
-			return
-		}
-		switch src.Op {
-		case OLITERAL:
-			*targ = *src
-			return
-		case OLEN:
-			// NOTE(rsc): This doesn't actually trigger, because order.go
-			// has pulled all the len and cap calls into separate assignments
-			// to temporaries. There are tests in test/sliceopt.go that could
-			// be enabled if this is fixed.
-			if samesafeexpr(n.Left, src.Left) {
-				if Debug_slice > 0 {
-					Warn("slice: reuse len")
-				}
-				loadlen()
-				*targ = xlen
-				if targ.Op == OREGISTER {
-					Regrealloc(targ)
-				}
-				return
-			}
-		case OCAP:
-			// NOTE(rsc): This doesn't actually trigger; see note in case OLEN above.
-			if samesafeexpr(n.Left, src.Left) {
-				if Debug_slice > 0 {
-					Warn("slice: reuse cap")
-				}
-				loadcap()
-				*targ = xcap
-				if targ.Op == OREGISTER {
-					Regrealloc(targ)
-				}
-				return
-			}
-		}
-		if i.Op != 0 && samesafeexpr(x1, src) {
-			if Debug_slice > 0 {
-				Warn("slice: reuse 1st index")
-			}
-			*targ = i
-			if targ.Op == OREGISTER {
-				Regrealloc(targ)
-			}
-			return
-		}
-		if j.Op != 0 && samesafeexpr(x2, src) {
-			if Debug_slice > 0 {
-				Warn("slice: reuse 2nd index")
-			}
-			*targ = j
-			if targ.Op == OREGISTER {
-				Regrealloc(targ)
-			}
-			return
-		}
-		if Thearch.Cgenindex != nil {
-			regalloc(targ, indexRegType, nil)
-			p := Thearch.Cgenindex(src, targ, false)
-			if p != nil {
-				panics = append(panics, p)
-			}
-		} else if Thearch.Igenindex != nil {
-			p := Thearch.Igenindex(src, targ, false)
-			if p != nil {
-				panics = append(panics, p)
-			}
-		} else {
-			regalloc(targ, indexRegType, nil)
-			var tmp Node
-			Cgenr(src, &tmp, targ)
-			Thearch.Gmove(&tmp, targ)
-			Regfree(&tmp)
-		}
-	}
-
-	load(x1, &i)
-	load(x2, &j)
-	load(x3, &k)
-
-	// i defaults to 0.
-	if i.Op == 0 {
-		Nodconst(&i, indexRegType, 0)
-	}
-
-	// j defaults to len(x)
-	if j.Op == 0 {
-		loadlen()
-		j = xlen
-		if j.Op == OREGISTER {
-			Regrealloc(&j)
-		}
-	}
-
-	// k defaults to cap(x)
-	// Only need to load it if we're recalculating cap or doing a full update.
-	if k.Op == 0 && n.Op != OSLICESTR && (!iszero(&i) || needFullUpdate) {
-		loadcap()
-		k = xcap
-		if k.Op == OREGISTER {
-			Regrealloc(&k)
-		}
-	}
-
-	// Check constant indexes for negative values, and against constant length if known.
-	// The func obvious below checks for out-of-order constant indexes.
-	var bound int64 = -1
-	if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
-		bound = n.Left.Type.Elem().NumElem()
-	} else if n.Op == OSLICESTR && Isconst(n.Left, CTSTR) {
-		bound = int64(len(n.Left.Val().U.(string)))
-	}
-	if Isconst(&i, CTINT) {
-		if i.Val().U.(*Mpint).CmpInt64(0) < 0 || bound >= 0 && i.Val().U.(*Mpint).CmpInt64(bound) > 0 {
-			Yyerror("slice index out of bounds")
-		}
-	}
-	if Isconst(&j, CTINT) {
-		if j.Val().U.(*Mpint).CmpInt64(0) < 0 || bound >= 0 && j.Val().U.(*Mpint).CmpInt64(bound) > 0 {
-			Yyerror("slice index out of bounds")
-		}
-	}
-	if Isconst(&k, CTINT) {
-		if k.Val().U.(*Mpint).CmpInt64(0) < 0 || bound >= 0 && k.Val().U.(*Mpint).CmpInt64(bound) > 0 {
-			Yyerror("slice index out of bounds")
-		}
-	}
-
-	// same reports whether n1 and n2 are the same register or constant.
-	same := func(n1, n2 *Node) bool {
-		return n1.Op == OREGISTER && n2.Op == OREGISTER && n1.Reg == n2.Reg ||
-			n1.Op == ONAME && n2.Op == ONAME && n1.Orig == n2.Orig && n1.Type == n2.Type && n1.Xoffset == n2.Xoffset ||
-			n1.Op == OLITERAL && n2.Op == OLITERAL && n1.Val().U.(*Mpint).Cmp(n2.Val().U.(*Mpint)) == 0
-	}
-
-	// obvious reports whether n1 <= n2 is obviously true,
-	// and it calls Yyerror if n1 <= n2 is obviously false.
-	obvious := func(n1, n2 *Node) bool {
-		if Debug['B'] != 0 { // -B disables bounds checks
-			return true
-		}
-		if same(n1, n2) {
-			return true // n1 == n2
-		}
-		if iszero(n1) {
-			return true // using unsigned compare, so 0 <= n2 always true
-		}
-		if xlen.Op != 0 && same(n1, &xlen) && xcap.Op != 0 && same(n2, &xcap) {
-			return true // len(x) <= cap(x) always true
-		}
-		if Isconst(n1, CTINT) && Isconst(n2, CTINT) {
-			if n1.Val().U.(*Mpint).Cmp(n2.Val().U.(*Mpint)) <= 0 {
-				return true // n1, n2 constants such that n1 <= n2
-			}
-			Yyerror("slice index out of bounds")
-			return true
-		}
-		return false
-	}
-
-	compare := func(n1, n2 *Node) {
-		// n1 might be a 64-bit constant, even on 32-bit architectures,
-		// but it will be represented in 32 bits.
-		if Ctxt.Arch.RegSize == 4 && Is64(n1.Type) {
-			if n1.Val().U.(*Mpint).CmpInt64(1<<31) >= 0 {
-				Fatalf("missed slice out of bounds check")
-			}
-			var tmp Node
-			Nodconst(&tmp, indexRegType, n1.Int64())
-			n1 = &tmp
-		}
-		p := Thearch.Ginscmp(OGT, indexRegType, n1, n2, -1)
-		panics = append(panics, p)
-	}
-
-	loadcap()
-	max := &xcap
-	if k.Op != 0 && (n.Op == OSLICE3 || n.Op == OSLICE3ARR) {
-		if obvious(&k, max) {
-			if Debug_slice > 0 {
-				Warn("slice: omit check for 3rd index")
-			}
-		} else {
-			compare(&k, max)
-		}
-		max = &k
-	}
-	if j.Op != 0 {
-		if obvious(&j, max) {
-			if Debug_slice > 0 {
-				Warn("slice: omit check for 2nd index")
-			}
-		} else {
-			compare(&j, max)
-		}
-		max = &j
-	}
-	if i.Op != 0 {
-		if obvious(&i, max) {
-			if Debug_slice > 0 {
-				Warn("slice: omit check for 1st index")
-			}
-		} else {
-			compare(&i, max)
-		}
-		max = &i
-	}
-	if k.Op != 0 && i.Op != 0 {
-		obvious(&i, &k) // emit compile-time error for x[3:n:2]
-	}
-
-	if len(panics) > 0 {
-		p := Gbranch(obj.AJMP, nil, 0)
-		for _, q := range panics {
-			Patch(q, Pc)
-		}
-		Ginscall(panicslice, -1)
-		Patch(p, Pc)
-	}
-
-	// Checks are done.
-	// Compute new len as j-i, cap as k-i.
-	// If i and j are same register, len is constant 0.
-	// If i and k are same register, cap is constant 0.
-	// If j and k are same register, len and cap are same.
-
-	// Done with xlen and xcap.
-	// Now safe to modify j and k even if they alias xlen, xcap.
-	if xlen.Op == OREGISTER {
-		Regfree(&xlen)
-	}
-	if xcap.Op == OREGISTER {
-		Regfree(&xcap)
-	}
-
-	// are j and k the same value?
-	sameJK := same(&j, &k)
-
-	if i.Op != 0 {
-		// j -= i
-		if same(&i, &j) {
-			if Debug_slice > 0 {
-				Warn("slice: result len == 0")
-			}
-			if j.Op == OREGISTER {
-				Regfree(&j)
-			}
-			Nodconst(&j, indexRegType, 0)
-		} else {
-			switch j.Op {
-			case OLITERAL:
-				if Isconst(&i, CTINT) {
-					Nodconst(&j, indexRegType, j.Int64()-i.Int64())
-					if Debug_slice > 0 {
-						Warn("slice: result len == %d", j.Int64())
-					}
-					break
-				}
-				fallthrough
-			case ONAME:
-				if !istemp(&j) {
-					var r Node
-					regalloc(&r, indexRegType, nil)
-					Thearch.Gmove(&j, &r)
-					j = r
-				}
-				fallthrough
-			case OREGISTER:
-				if i.Op == OLITERAL {
-					v := i.Int64()
-					if v != 0 {
-						ginscon(Thearch.Optoas(OSUB, indexRegType), v, &j)
-					}
-				} else {
-					gins(Thearch.Optoas(OSUB, indexRegType), &i, &j)
-				}
-			}
-		}
-
-		// k -= i if k different from j and cap is needed.j
-		// (The modifications to j above cannot affect i: if j and i were aliased,
-		// we replace j with a constant 0 instead of doing a subtraction,
-		// leaving i unmodified.)
-		if k.Op == 0 {
-			if Debug_slice > 0 && n.Op != OSLICESTR {
-				Warn("slice: result cap not computed")
-			}
-			// no need
-		} else if same(&i, &k) {
-			if k.Op == OREGISTER {
-				Regfree(&k)
-			}
-			Nodconst(&k, indexRegType, 0)
-			if Debug_slice > 0 {
-				Warn("slice: result cap == 0")
-			}
-		} else if sameJK {
-			if Debug_slice > 0 {
-				Warn("slice: result cap == result len")
-			}
-			// k and j were the same value; make k-i the same as j-i.
-			if k.Op == OREGISTER {
-				Regfree(&k)
-			}
-			k = j
-			if k.Op == OREGISTER {
-				Regrealloc(&k)
-			}
-		} else {
-			switch k.Op {
-			case OLITERAL:
-				if Isconst(&i, CTINT) {
-					Nodconst(&k, indexRegType, k.Int64()-i.Int64())
-					if Debug_slice > 0 {
-						Warn("slice: result cap == %d", k.Int64())
-					}
-					break
-				}
-				fallthrough
-			case ONAME:
-				if !istemp(&k) {
-					var r Node
-					regalloc(&r, indexRegType, nil)
-					Thearch.Gmove(&k, &r)
-					k = r
-				}
-				fallthrough
-			case OREGISTER:
-				if same(&i, &k) {
-					Regfree(&k)
-					Nodconst(&k, indexRegType, 0)
-					if Debug_slice > 0 {
-						Warn("slice: result cap == 0")
-					}
-				} else if i.Op == OLITERAL {
-					v := i.Int64()
-					if v != 0 {
-						ginscon(Thearch.Optoas(OSUB, indexRegType), v, &k)
-					}
-				} else {
-					gins(Thearch.Optoas(OSUB, indexRegType), &i, &k)
-				}
-			}
-		}
-	}
-
-	adjustBase := true
-	if i.Op == 0 || iszero(&i) {
-		if Debug_slice > 0 {
-			Warn("slice: skip base adjustment for 1st index 0")
-		}
-		adjustBase = false
-	} else if k.Op != 0 && iszero(&k) || k.Op == 0 && iszero(&j) {
-		if Debug_slice > 0 {
-			if n.Op == OSLICESTR {
-				Warn("slice: skip base adjustment for string len == 0")
-			} else {
-				Warn("slice: skip base adjustment for cap == 0")
-			}
-		}
-		adjustBase = false
-	}
-
-	if !adjustBase && !needFullUpdate {
-		if Debug_slice > 0 {
-			if k.Op != 0 {
-				Warn("slice: len/cap-only update")
-			} else {
-				Warn("slice: len-only update")
-			}
-		}
-		if i.Op == OREGISTER {
-			Regfree(&i)
-		}
-		// Write len (and cap if needed) back to x.
-		x.Xoffset += int64(Widthptr)
-		x.Type = Types[TUINT]
-		Thearch.Gmove(&j, &x)
-		x.Xoffset -= int64(Widthptr)
-		if k.Op != 0 {
-			x.Xoffset += 2 * int64(Widthptr)
-			x.Type = Types[TUINT]
-			Thearch.Gmove(&k, &x)
-			x.Xoffset -= 2 * int64(Widthptr)
-		}
-		Regfree(&x)
-	} else {
-		// Compute new base. May smash i.
-		if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
-			Cgenr(n.Left, &xbase, nil)
-			Cgen_checknil(&xbase)
-		} else {
-			var ptr *Type
-			if n.Op == OSLICESTR {
-				ptr = ptrToUint8
-			} else {
-				ptr = Ptrto(n.Type.Elem())
-			}
-			regalloc(&xbase, ptr, nil)
-			x.Type = xbase.Type
-			Thearch.Gmove(&x, &xbase)
-			Regfree(&x)
-		}
-		if i.Op != 0 && adjustBase {
-			// Branch around the base adjustment if the resulting cap will be 0.
-			var p *obj.Prog
-			size := &k
-			if k.Op == 0 {
-				size = &j
-			}
-			if Isconst(size, CTINT) {
-				// zero was checked above, must be non-zero.
-			} else {
-				var tmp Node
-				Nodconst(&tmp, indexRegType, 0)
-				p = Thearch.Ginscmp(OEQ, indexRegType, size, &tmp, -1)
-			}
-			var w int64
-			if n.Op == OSLICESTR {
-				w = 1 // res is string, elem size is 1 (byte)
-			} else {
-				w = res.Type.Elem().Width // res is []T, elem size is T.width
-			}
-			if Isconst(&i, CTINT) {
-				ginscon(Thearch.Optoas(OADD, xbase.Type), i.Int64()*w, &xbase)
-			} else if Thearch.AddIndex != nil && Thearch.AddIndex(&i, w, &xbase) {
-				// done by back end
-			} else if w == 1 {
-				gins(Thearch.Optoas(OADD, xbase.Type), &i, &xbase)
-			} else {
-				if i.Op == ONAME && !istemp(&i) {
-					var tmp Node
-					Tempname(&tmp, i.Type)
-					Thearch.Gmove(&i, &tmp)
-					i = tmp
-				}
-				ginscon(Thearch.Optoas(OMUL, i.Type), w, &i)
-				gins(Thearch.Optoas(OADD, xbase.Type), &i, &xbase)
-			}
-			if p != nil {
-				Patch(p, Pc)
-			}
-		}
-		if i.Op == OREGISTER {
-			Regfree(&i)
-		}
-
-		// Write len, cap, base to result.
-		if res.Op == ONAME {
-			Gvardef(res)
-		}
-		Igen(res, &x, nil)
-		x.Xoffset += int64(Widthptr)
-		x.Type = Types[TUINT]
-		Thearch.Gmove(&j, &x)
-		x.Xoffset -= int64(Widthptr)
-		if k.Op != 0 {
-			x.Xoffset += 2 * int64(Widthptr)
-			Thearch.Gmove(&k, &x)
-			x.Xoffset -= 2 * int64(Widthptr)
-		}
-		x.Type = xbase.Type
-		cgen_wb(&xbase, &x, wb)
-		Regfree(&xbase)
-		Regfree(&x)
-	}
-
-	if j.Op == OREGISTER {
-		Regfree(&j)
-	}
-	if k.Op == OREGISTER {
-		Regfree(&k)
-	}
-}
diff --git a/src/cmd/compile/internal/gc/cplx.go b/src/cmd/compile/internal/gc/cplx.go
deleted file mode 100644
index 96a1dfb..0000000
--- a/src/cmd/compile/internal/gc/cplx.go
+++ /dev/null
@@ -1,474 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import "cmd/internal/obj"
-
-func overlap_cplx(f *Node, t *Node) bool {
-	// check whether f and t could be overlapping stack references.
-	// not exact, because it's hard to check for the stack register
-	// in portable code.  close enough: worst case we will allocate
-	// an extra temporary and the registerizer will clean it up.
-	return f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset
-}
-
-func complexbool(op Op, nl, nr, res *Node, wantTrue bool, likely int, to *obj.Prog) {
-	// make both sides addable in ullman order
-	if nr != nil {
-		if nl.Ullman > nr.Ullman && !nl.Addable {
-			nl = CgenTemp(nl)
-		}
-
-		if !nr.Addable {
-			nr = CgenTemp(nr)
-		}
-	}
-	if !nl.Addable {
-		nl = CgenTemp(nl)
-	}
-
-	// Break nl and nr into real and imaginary components.
-	var lreal, limag, rreal, rimag Node
-	subnode(&lreal, &limag, nl)
-	subnode(&rreal, &rimag, nr)
-
-	// build tree
-	// if branching:
-	// 	real(l) == real(r) && imag(l) == imag(r)
-	// if generating a value, use a branch-free version:
-	// 	real(l) == real(r) & imag(l) == imag(r)
-	realeq := Node{
-		Op:    OEQ,
-		Left:  &lreal,
-		Right: &rreal,
-		Type:  Types[TBOOL],
-	}
-	imageq := Node{
-		Op:    OEQ,
-		Left:  &limag,
-		Right: &rimag,
-		Type:  Types[TBOOL],
-	}
-	and := Node{
-		Op:    OANDAND,
-		Left:  &realeq,
-		Right: &imageq,
-		Type:  Types[TBOOL],
-	}
-
-	if res != nil {
-		// generating a value
-		and.Op = OAND
-		if op == ONE {
-			and.Op = OOR
-			realeq.Op = ONE
-			imageq.Op = ONE
-		}
-		Bvgen(&and, res, true)
-		return
-	}
-
-	// generating a branch
-	if op == ONE {
-		wantTrue = !wantTrue
-	}
-
-	Bgen(&and, wantTrue, likely, to)
-}
-
-// break addable nc-complex into nr-real and ni-imaginary
-func subnode(nr *Node, ni *Node, nc *Node) {
-	if !nc.Addable {
-		Fatalf("subnode not addable")
-	}
-
-	tc := Simsimtype(nc.Type)
-	tc = cplxsubtype(tc)
-	t := Types[tc]
-
-	if nc.Op == OLITERAL {
-		u := nc.Val().U.(*Mpcplx)
-		nodfconst(nr, t, &u.Real)
-		nodfconst(ni, t, &u.Imag)
-		return
-	}
-
-	*nr = *nc
-	nr.Type = t
-
-	*ni = *nc
-	ni.Type = t
-	ni.Xoffset += t.Width
-}
-
-// generate code res = -nl
-func minus(nl *Node, res *Node) {
-	var ra Node
-	ra.Op = OMINUS
-	ra.Left = nl
-	ra.Type = nl.Type
-	Cgen(&ra, res)
-}
-
-// build and execute tree
-//	real(res) = -real(nl)
-//	imag(res) = -imag(nl)
-func complexminus(nl *Node, res *Node) {
-	var n1 Node
-	var n2 Node
-	var n5 Node
-	var n6 Node
-
-	subnode(&n1, &n2, nl)
-	subnode(&n5, &n6, res)
-
-	minus(&n1, &n5)
-	minus(&n2, &n6)
-}
-
-// build and execute tree
-//	real(res) = real(nl) op real(nr)
-//	imag(res) = imag(nl) op imag(nr)
-func complexadd(op Op, nl *Node, nr *Node, res *Node) {
-	var n1 Node
-	var n2 Node
-	var n3 Node
-	var n4 Node
-	var n5 Node
-	var n6 Node
-
-	subnode(&n1, &n2, nl)
-	subnode(&n3, &n4, nr)
-	subnode(&n5, &n6, res)
-
-	var ra Node
-	ra.Op = op
-	ra.Left = &n1
-	ra.Right = &n3
-	ra.Type = n1.Type
-	Cgen(&ra, &n5)
-
-	ra = Node{}
-	ra.Op = op
-	ra.Left = &n2
-	ra.Right = &n4
-	ra.Type = n2.Type
-	Cgen(&ra, &n6)
-}
-
-// build and execute tree
-//	tmp       = real(nl)*real(nr) - imag(nl)*imag(nr)
-//	imag(res) = real(nl)*imag(nr) + imag(nl)*real(nr)
-//	real(res) = tmp
-func complexmul(nl *Node, nr *Node, res *Node) {
-	var n1 Node
-	var n2 Node
-	var n3 Node
-	var n4 Node
-	var n5 Node
-	var n6 Node
-	var tmp Node
-
-	subnode(&n1, &n2, nl)
-	subnode(&n3, &n4, nr)
-	subnode(&n5, &n6, res)
-	Tempname(&tmp, n5.Type)
-
-	// real part -> tmp
-	var rm1 Node
-
-	rm1.Op = OMUL
-	rm1.Left = &n1
-	rm1.Right = &n3
-	rm1.Type = n1.Type
-
-	var rm2 Node
-	rm2.Op = OMUL
-	rm2.Left = &n2
-	rm2.Right = &n4
-	rm2.Type = n2.Type
-
-	var ra Node
-	ra.Op = OSUB
-	ra.Left = &rm1
-	ra.Right = &rm2
-	ra.Type = rm1.Type
-	Cgen(&ra, &tmp)
-
-	// imag part
-	rm1 = Node{}
-
-	rm1.Op = OMUL
-	rm1.Left = &n1
-	rm1.Right = &n4
-	rm1.Type = n1.Type
-
-	rm2 = Node{}
-	rm2.Op = OMUL
-	rm2.Left = &n2
-	rm2.Right = &n3
-	rm2.Type = n2.Type
-
-	ra = Node{}
-	ra.Op = OADD
-	ra.Left = &rm1
-	ra.Right = &rm2
-	ra.Type = rm1.Type
-	Cgen(&ra, &n6)
-
-	// tmp ->real part
-	Cgen(&tmp, &n5)
-}
-
-func nodfconst(n *Node, t *Type, fval *Mpflt) {
-	*n = Node{}
-	n.Op = OLITERAL
-	n.Addable = true
-	ullmancalc(n)
-	n.SetVal(Val{fval})
-	n.Type = t
-
-	if !t.IsFloat() {
-		Fatalf("nodfconst: bad type %v", t)
-	}
-}
-
-func Complexop(n *Node, res *Node) bool {
-	if n != nil && n.Type != nil {
-		if n.Type.IsComplex() {
-			goto maybe
-		}
-	}
-
-	if res != nil && res.Type != nil {
-		if res.Type.IsComplex() {
-			goto maybe
-		}
-	}
-
-	if n.Op == OREAL || n.Op == OIMAG {
-		//dump("\ncomplex-yes", n);
-		return true
-	}
-
-	//dump("\ncomplex-no", n);
-	return false
-
-maybe:
-	switch n.Op {
-	case OCONV, // implemented ops
-		OADD,
-		OSUB,
-		OMUL,
-		OMINUS,
-		OCOMPLEX,
-		OREAL,
-		OIMAG:
-		//dump("\ncomplex-yes", n);
-		return true
-
-	case ODOT,
-		ODOTPTR,
-		OINDEX,
-		OIND,
-		ONAME:
-		//dump("\ncomplex-yes", n);
-		return true
-	}
-
-	//dump("\ncomplex-no", n);
-	return false
-}
-
-func Complexmove(f *Node, t *Node) {
-	if Debug['g'] != 0 {
-		Dump("\ncomplexmove-f", f)
-		Dump("complexmove-t", t)
-	}
-
-	if !t.Addable {
-		Fatalf("complexmove: to not addable")
-	}
-
-	ft := Simsimtype(f.Type)
-	tt := Simsimtype(t.Type)
-	// complex to complex move/convert.
-	// make f addable.
-	// also use temporary if possible stack overlap.
-	if (ft == TCOMPLEX64 || ft == TCOMPLEX128) && (tt == TCOMPLEX64 || tt == TCOMPLEX128) {
-		if !f.Addable || overlap_cplx(f, t) {
-			var tmp Node
-			Tempname(&tmp, f.Type)
-			Complexmove(f, &tmp)
-			f = &tmp
-		}
-
-		var n1 Node
-		var n2 Node
-		subnode(&n1, &n2, f)
-		var n4 Node
-		var n3 Node
-		subnode(&n3, &n4, t)
-
-		Cgen(&n1, &n3)
-		Cgen(&n2, &n4)
-	} else {
-		Fatalf("complexmove: unknown conversion: %v -> %v\n", f.Type, t.Type)
-	}
-}
-
-func Complexgen(n *Node, res *Node) {
-	if Debug['g'] != 0 {
-		Dump("\ncomplexgen-n", n)
-		Dump("complexgen-res", res)
-	}
-
-	for n.Op == OCONVNOP {
-		n = n.Left
-	}
-
-	// pick off float/complex opcodes
-	switch n.Op {
-	case OCOMPLEX:
-		if res.Addable {
-			var n1 Node
-			var n2 Node
-			subnode(&n1, &n2, res)
-			var tmp Node
-			Tempname(&tmp, n1.Type)
-			Cgen(n.Left, &tmp)
-			Cgen(n.Right, &n2)
-			Cgen(&tmp, &n1)
-			return
-		}
-
-	case OREAL, OIMAG:
-		nl := n.Left
-		if !nl.Addable {
-			var tmp Node
-			Tempname(&tmp, nl.Type)
-			Complexgen(nl, &tmp)
-			nl = &tmp
-		}
-
-		var n1 Node
-		var n2 Node
-		subnode(&n1, &n2, nl)
-		if n.Op == OREAL {
-			Cgen(&n1, res)
-			return
-		}
-
-		Cgen(&n2, res)
-		return
-	}
-
-	// perform conversion from n to res
-	tl := Simsimtype(res.Type)
-
-	tl = cplxsubtype(tl)
-	tr := Simsimtype(n.Type)
-	tr = cplxsubtype(tr)
-	if tl != tr {
-		if !n.Addable {
-			var n1 Node
-			Tempname(&n1, n.Type)
-			Complexmove(n, &n1)
-			n = &n1
-		}
-
-		Complexmove(n, res)
-		return
-	}
-
-	if !res.Addable {
-		var n1 Node
-		Igen(res, &n1, nil)
-		Cgen(n, &n1)
-		Regfree(&n1)
-		return
-	}
-
-	if n.Addable {
-		Complexmove(n, res)
-		return
-	}
-
-	switch n.Op {
-	default:
-		Dump("complexgen: unknown op", n)
-		Fatalf("complexgen: unknown op %v", n.Op)
-
-	case ODOT,
-		ODOTPTR,
-		OINDEX,
-		OIND,
-		OCALLFUNC,
-		OCALLMETH,
-		OCALLINTER:
-		var n1 Node
-		Igen(n, &n1, res)
-
-		Complexmove(&n1, res)
-		Regfree(&n1)
-		return
-
-	case OCONV,
-		OADD,
-		OSUB,
-		OMUL,
-		OMINUS,
-		OCOMPLEX,
-		OREAL,
-		OIMAG:
-		break
-	}
-
-	nl := n.Left
-	if nl == nil {
-		return
-	}
-	nr := n.Right
-
-	// make both sides addable in ullman order
-	var tnl Node
-	if nr != nil {
-		if nl.Ullman > nr.Ullman && !nl.Addable {
-			Tempname(&tnl, nl.Type)
-			Cgen(nl, &tnl)
-			nl = &tnl
-		}
-
-		if !nr.Addable {
-			var tnr Node
-			Tempname(&tnr, nr.Type)
-			Cgen(nr, &tnr)
-			nr = &tnr
-		}
-	}
-
-	if !nl.Addable {
-		Tempname(&tnl, nl.Type)
-		Cgen(nl, &tnl)
-		nl = &tnl
-	}
-
-	switch n.Op {
-	default:
-		Fatalf("complexgen: unknown op %v", n.Op)
-
-	case OCONV:
-		Complexmove(nl, res)
-
-	case OMINUS:
-		complexminus(nl, res)
-
-	case OADD, OSUB:
-		complexadd(n.Op, nl, nr, res)
-
-	case OMUL:
-		complexmul(nl, nr, res)
-	}
-}
diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go
index 21594c6..f948fc6 100644
--- a/src/cmd/compile/internal/gc/gen.go
+++ b/src/cmd/compile/internal/gc/gen.go
@@ -6,11 +6,7 @@
 
 package gc
 
-import (
-	"cmd/internal/obj"
-	"cmd/internal/sys"
-	"fmt"
-)
+import "fmt"
 
 // TODO: labellist should become part of a "compilation state" for functions.
 var labellist []*Label
@@ -291,370 +287,6 @@
 	return nil
 }
 
-// compile statements
-func Genlist(l Nodes) {
-	for _, n := range l.Slice() {
-		gen(n)
-	}
-}
-
-// generate code to start new proc running call n.
-func cgen_proc(n *Node, proc int) {
-	switch n.Left.Op {
-	default:
-		Fatalf("cgen_proc: unknown call %v", n.Left.Op)
-
-	case OCALLMETH:
-		cgen_callmeth(n.Left, proc)
-
-	case OCALLINTER:
-		cgen_callinter(n.Left, nil, proc)
-
-	case OCALLFUNC:
-		cgen_call(n.Left, proc)
-	}
-}
-
-// generate declaration.
-// have to allocate heap copy
-// for escaped variables.
-func cgen_dcl(n *Node) {
-	if Debug['g'] != 0 {
-		Dump("\ncgen-dcl", n)
-	}
-	if n.Op != ONAME {
-		Dump("cgen_dcl", n)
-		Fatalf("cgen_dcl")
-	}
-
-	if n.Class == PAUTOHEAP {
-		Fatalf("cgen_dcl %v", n)
-	}
-}
-
-// generate discard of value
-func cgen_discard(nr *Node) {
-	if nr == nil {
-		return
-	}
-
-	switch nr.Op {
-	case ONAME:
-		if nr.Class != PAUTOHEAP && nr.Class != PEXTERN && nr.Class != PFUNC {
-			gused(nr)
-		}
-
-		// unary
-	case OADD,
-		OAND,
-		ODIV,
-		OEQ,
-		OGE,
-		OGT,
-		OLE,
-		OLSH,
-		OLT,
-		OMOD,
-		OMUL,
-		ONE,
-		OOR,
-		ORSH,
-		OSUB,
-		OXOR:
-		cgen_discard(nr.Left)
-
-		cgen_discard(nr.Right)
-
-		// binary
-	case OCAP,
-		OCOM,
-		OLEN,
-		OMINUS,
-		ONOT,
-		OPLUS:
-		cgen_discard(nr.Left)
-
-	case OIND:
-		Cgen_checknil(nr.Left)
-
-		// special enough to just evaluate
-	default:
-		var tmp Node
-		Tempname(&tmp, nr.Type)
-
-		Cgen_as(&tmp, nr)
-		gused(&tmp)
-	}
-}
-
-// clearslim generates code to zero a slim node.
-func Clearslim(n *Node) {
-	var z Node
-	z.Op = OLITERAL
-	z.Type = n.Type
-	z.Addable = true
-
-	switch Simtype[n.Type.Etype] {
-	case TCOMPLEX64, TCOMPLEX128:
-		z.SetVal(Val{new(Mpcplx)})
-		z.Val().U.(*Mpcplx).Real.SetFloat64(0.0)
-		z.Val().U.(*Mpcplx).Imag.SetFloat64(0.0)
-
-	case TFLOAT32, TFLOAT64:
-		var zero Mpflt
-		zero.SetFloat64(0.0)
-		z.SetVal(Val{&zero})
-
-	case TPTR32, TPTR64, TCHAN, TMAP:
-		z.SetVal(Val{new(NilVal)})
-
-	case TBOOL:
-		z.SetVal(Val{false})
-
-	case TINT8,
-		TINT16,
-		TINT32,
-		TINT64,
-		TUINT8,
-		TUINT16,
-		TUINT32,
-		TUINT64:
-		z.SetVal(Val{new(Mpint)})
-		z.Val().U.(*Mpint).SetInt64(0)
-
-	default:
-		Fatalf("clearslim called on type %v", n.Type)
-	}
-
-	ullmancalc(&z)
-	Cgen(&z, n)
-}
-
-// generate:
-//	res = iface{typ, data}
-// n->left is typ
-// n->right is data
-func Cgen_eface(n *Node, res *Node) {
-	// the right node of an eface may contain function calls that uses res as an argument,
-	// so it's important that it is done first
-
-	tmp := temp(Types[Tptr])
-	Cgen(n.Right, tmp)
-
-	Gvardef(res)
-
-	dst := *res
-	dst.Type = Types[Tptr]
-	dst.Xoffset += int64(Widthptr)
-	Cgen(tmp, &dst)
-
-	dst.Xoffset -= int64(Widthptr)
-	Cgen(n.Left, &dst)
-}
-
-// generate one of:
-//	res, resok = x.(T)
-//	res = x.(T) (when resok == nil)
-// n.Left is x
-// n.Type is T
-func cgen_dottype(n *Node, res, resok *Node, wb bool) {
-	if Debug_typeassert > 0 {
-		Warn("type assertion inlined")
-	}
-	//	iface := n.Left
-	//	r1 := iword(iface)
-	//	if n.Left is non-empty interface {
-	//		r1 = *r1
-	//	}
-	//	if r1 == T {
-	//		res = idata(iface)
-	//		resok = true
-	//	} else {
-	//		assert[EI]2T(x, T, nil) // (when resok == nil; does not return)
-	//		resok = false // (when resok != nil)
-	//	}
-	//
-	var iface Node
-	Igen(n.Left, &iface, res)
-	var r1, r2 Node
-	byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte)
-	Regalloc(&r1, byteptr, nil)
-	iface.Type = byteptr
-	Cgen(&iface, &r1)
-	if !n.Left.Type.IsEmptyInterface() {
-		// Holding itab, want concrete type in second word.
-		p := Thearch.Ginscmp(OEQ, byteptr, &r1, Nodintconst(0), -1)
-		r2 = r1
-		r2.Op = OINDREG
-		r2.Xoffset = int64(Widthptr)
-		Cgen(&r2, &r1)
-		Patch(p, Pc)
-	}
-	Regalloc(&r2, byteptr, nil)
-	Cgen(typename(n.Type), &r2)
-	p := Thearch.Ginscmp(ONE, byteptr, &r1, &r2, -1)
-	Regfree(&r2) // not needed for success path; reclaimed on one failure path
-	iface.Xoffset += int64(Widthptr)
-	Cgen(&iface, &r1)
-	Regfree(&iface)
-
-	if resok == nil {
-		r1.Type = res.Type
-		cgen_wb(&r1, res, wb)
-		q := Gbranch(obj.AJMP, nil, 0)
-		Patch(p, Pc)
-		Regrealloc(&r2) // reclaim from above, for this failure path
-		fn := syslook("panicdottype")
-		dowidth(fn.Type)
-		call := Nod(OCALLFUNC, fn, nil)
-		r1.Type = byteptr
-		r2.Type = byteptr
-		call.List.Set([]*Node{&r1, &r2, typename(n.Left.Type)})
-		call.List.Set(ascompatte(OCALLFUNC, call, false, fn.Type.Params(), call.List.Slice(), 0, nil))
-		gen(call)
-		Regfree(&r1)
-		Regfree(&r2)
-		Thearch.Gins(obj.AUNDEF, nil, nil)
-		Patch(q, Pc)
-	} else {
-		// This half is handling the res, resok = x.(T) case,
-		// which is called from gen, not cgen, and is consequently fussier
-		// about blank assignments. We have to avoid calling cgen for those.
-		r1.Type = res.Type
-		if !isblank(res) {
-			cgen_wb(&r1, res, wb)
-		}
-		Regfree(&r1)
-		if !isblank(resok) {
-			Cgen(Nodbool(true), resok)
-		}
-		q := Gbranch(obj.AJMP, nil, 0)
-		Patch(p, Pc)
-		if !isblank(res) {
-			n := nodnil()
-			n.Type = res.Type
-			Cgen(n, res)
-		}
-		if !isblank(resok) {
-			Cgen(Nodbool(false), resok)
-		}
-		Patch(q, Pc)
-	}
-}
-
-// generate:
-//	res, resok = x.(T)
-// n.Left is x
-// n.Type is T
-func Cgen_As2dottype(n, res, resok *Node) {
-	if Debug_typeassert > 0 {
-		Warn("type assertion inlined")
-	}
-	//	iface := n.Left
-	//	r1 := iword(iface)
-	//	if n.Left is non-empty interface {
-	//		r1 = *r1
-	//	}
-	//	if r1 == T {
-	//		res = idata(iface)
-	//		resok = true
-	//	} else {
-	//		res = nil
-	//		resok = false
-	//	}
-	//
-	var iface Node
-	Igen(n.Left, &iface, nil)
-	var r1, r2 Node
-	byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte)
-	Regalloc(&r1, byteptr, res)
-	iface.Type = byteptr
-	Cgen(&iface, &r1)
-	if !n.Left.Type.IsEmptyInterface() {
-		// Holding itab, want concrete type in second word.
-		p := Thearch.Ginscmp(OEQ, byteptr, &r1, Nodintconst(0), -1)
-		r2 = r1
-		r2.Op = OINDREG
-		r2.Xoffset = int64(Widthptr)
-		Cgen(&r2, &r1)
-		Patch(p, Pc)
-	}
-	Regalloc(&r2, byteptr, nil)
-	Cgen(typename(n.Type), &r2)
-	p := Thearch.Ginscmp(ONE, byteptr, &r1, &r2, -1)
-	iface.Type = n.Type
-	iface.Xoffset += int64(Widthptr)
-	Cgen(&iface, &r1)
-	if iface.Op != 0 {
-		Regfree(&iface)
-	}
-	Cgen(&r1, res)
-	q := Gbranch(obj.AJMP, nil, 0)
-	Patch(p, Pc)
-
-	fn := syslook("panicdottype")
-	dowidth(fn.Type)
-	call := Nod(OCALLFUNC, fn, nil)
-	call.List.Set([]*Node{&r1, &r2, typename(n.Left.Type)})
-	call.List.Set(ascompatte(OCALLFUNC, call, false, fn.Type.Params(), call.List.Slice(), 0, nil))
-	gen(call)
-	Regfree(&r1)
-	Regfree(&r2)
-	Thearch.Gins(obj.AUNDEF, nil, nil)
-	Patch(q, Pc)
-}
-
-// gather series of offsets
-// >=0 is direct addressed field
-// <0 is pointer to next field (+1)
-func Dotoffset(n *Node, oary []int64, nn **Node) int {
-	var i int
-
-	switch n.Op {
-	case ODOT:
-		if n.Xoffset == BADWIDTH {
-			Dump("bad width in dotoffset", n)
-			Fatalf("bad width in dotoffset")
-		}
-
-		i = Dotoffset(n.Left, oary, nn)
-		if i > 0 {
-			if oary[i-1] >= 0 {
-				oary[i-1] += n.Xoffset
-			} else {
-				oary[i-1] -= n.Xoffset
-			}
-			break
-		}
-
-		if i < 10 {
-			oary[i] = n.Xoffset
-			i++
-		}
-
-	case ODOTPTR:
-		if n.Xoffset == BADWIDTH {
-			Dump("bad width in dotoffset", n)
-			Fatalf("bad width in dotoffset")
-		}
-
-		i = Dotoffset(n.Left, oary, nn)
-		if i < 10 {
-			oary[i] = -(n.Xoffset + 1)
-			i++
-		}
-
-	default:
-		*nn = n
-		return 0
-	}
-
-	if i >= 10 {
-		*nn = nil
-	}
-	return i
-}
-
 // make a new off the books
 func Tempname(nn *Node, t *Type) {
 	if Curfn == nil {
@@ -697,347 +329,6 @@
 	return n.Orig
 }
 
-func gen(n *Node) {
-	//dump("gen", n);
-
-	lno := setlineno(n)
-
-	wasregalloc := Anyregalloc()
-
-	if n == nil {
-		goto ret
-	}
-
-	if n.Ninit.Len() > 0 {
-		Genlist(n.Ninit)
-	}
-
-	setlineno(n)
-
-	switch n.Op {
-	default:
-		Fatalf("gen: unknown op %+S", n)
-
-	case OCASE,
-		OFALL,
-		OXCASE,
-		OXFALL,
-		ODCLCONST,
-		ODCLFUNC,
-		ODCLTYPE:
-		break
-
-	case OEMPTY:
-		break
-
-	case OBLOCK:
-		Genlist(n.List)
-
-	case OLABEL:
-		if isblanksym(n.Left.Sym) {
-			break
-		}
-
-		lab := newlab(n)
-
-		// if there are pending gotos, resolve them all to the current pc.
-		var p2 *obj.Prog
-		for p1 := lab.Gotopc; p1 != nil; p1 = p2 {
-			p2 = unpatch(p1)
-			Patch(p1, Pc)
-		}
-
-		lab.Gotopc = nil
-		if lab.Labelpc == nil {
-			lab.Labelpc = Pc
-		}
-
-		if n.Name.Defn != nil {
-			switch n.Name.Defn.Op {
-			// so stmtlabel can find the label
-			case OFOR, OSWITCH, OSELECT:
-				n.Name.Defn.Sym = lab.Sym
-			}
-		}
-
-		// if label is defined, emit jump to it.
-	// otherwise save list of pending gotos in lab->gotopc.
-	// the list is linked through the normal jump target field
-	// to avoid a second list.  (the jumps are actually still
-	// valid code, since they're just going to another goto
-	// to the same label.  we'll unwind it when we learn the pc
-	// of the label in the OLABEL case above.)
-	case OGOTO:
-		lab := newlab(n)
-
-		if lab.Labelpc != nil {
-			gjmp(lab.Labelpc)
-		} else {
-			lab.Gotopc = gjmp(lab.Gotopc)
-		}
-
-	case OBREAK:
-		if n.Left != nil {
-			lab := n.Left.Sym.Label
-			if lab == nil {
-				Yyerror("break label not defined: %v", n.Left.Sym)
-				break
-			}
-
-			lab.Used = true
-			if lab.Breakpc == nil {
-				Yyerror("invalid break label %v", n.Left.Sym)
-				break
-			}
-
-			gjmp(lab.Breakpc)
-			break
-		}
-
-		if breakpc == nil {
-			Yyerror("break is not in a loop")
-			break
-		}
-
-		gjmp(breakpc)
-
-	case OCONTINUE:
-		if n.Left != nil {
-			lab := n.Left.Sym.Label
-			if lab == nil {
-				Yyerror("continue label not defined: %v", n.Left.Sym)
-				break
-			}
-
-			lab.Used = true
-			if lab.Continpc == nil {
-				Yyerror("invalid continue label %v", n.Left.Sym)
-				break
-			}
-
-			gjmp(lab.Continpc)
-			break
-		}
-
-		if continpc == nil {
-			Yyerror("continue is not in a loop")
-			break
-		}
-
-		gjmp(continpc)
-
-	case OFOR:
-		sbreak := breakpc
-		p1 := gjmp(nil)     //		goto test
-		breakpc = gjmp(nil) // break:	goto done
-		scontin := continpc
-		continpc = Pc
-
-		// define break and continue labels
-		lab := stmtlabel(n)
-		if lab != nil {
-			lab.Breakpc = breakpc
-			lab.Continpc = continpc
-		}
-
-		gen(n.Right)                     // contin:	incr
-		Patch(p1, Pc)                    // test:
-		Bgen(n.Left, false, -1, breakpc) //		if(!test) goto break
-		Genlist(n.Nbody)                 //		body
-		gjmp(continpc)
-		Patch(breakpc, Pc) // done:
-		continpc = scontin
-		breakpc = sbreak
-		if lab != nil {
-			lab.Breakpc = nil
-			lab.Continpc = nil
-		}
-
-	case OIF:
-		p1 := gjmp(nil)                         //		goto test
-		p2 := gjmp(nil)                         // p2:		goto else
-		Patch(p1, Pc)                           // test:
-		Bgen(n.Left, false, int(-n.Likely), p2) //		if(!test) goto p2
-		Genlist(n.Nbody)                        //		then
-		p3 := gjmp(nil)                         //		goto done
-		Patch(p2, Pc)                           // else:
-		Genlist(n.Rlist)                        //		else
-		Patch(p3, Pc)                           // done:
-
-	case OSWITCH:
-		sbreak := breakpc
-		p1 := gjmp(nil)     //		goto test
-		breakpc = gjmp(nil) // break:	goto done
-
-		// define break label
-		lab := stmtlabel(n)
-		if lab != nil {
-			lab.Breakpc = breakpc
-		}
-
-		Patch(p1, Pc)      // test:
-		Genlist(n.Nbody)   //		switch(test) body
-		Patch(breakpc, Pc) // done:
-		breakpc = sbreak
-		if lab != nil {
-			lab.Breakpc = nil
-		}
-
-	case OSELECT:
-		sbreak := breakpc
-		p1 := gjmp(nil)     //		goto test
-		breakpc = gjmp(nil) // break:	goto done
-
-		// define break label
-		lab := stmtlabel(n)
-		if lab != nil {
-			lab.Breakpc = breakpc
-		}
-
-		Patch(p1, Pc)      // test:
-		Genlist(n.Nbody)   //		select() body
-		Patch(breakpc, Pc) // done:
-		breakpc = sbreak
-		if lab != nil {
-			lab.Breakpc = nil
-		}
-
-	case ODCL:
-		cgen_dcl(n.Left)
-
-	case OAS:
-		if gen_as_init(n, false) {
-			break
-		}
-		Cgen_as(n.Left, n.Right)
-
-	case OASWB:
-		Cgen_as_wb(n.Left, n.Right, true)
-
-	case OAS2DOTTYPE:
-		cgen_dottype(n.Rlist.First(), n.List.First(), n.List.Second(), needwritebarrier(n.List.First(), n.Rlist.First()))
-
-	case OCALLMETH:
-		cgen_callmeth(n, 0)
-
-	case OCALLINTER:
-		cgen_callinter(n, nil, 0)
-
-	case OCALLFUNC:
-		cgen_call(n, 0)
-
-	case OPROC:
-		cgen_proc(n, 1)
-
-	case ODEFER:
-		cgen_proc(n, 2)
-
-	case ORETURN, ORETJMP:
-		cgen_ret(n)
-
-	// Function calls turned into compiler intrinsics.
-	// At top level, can just ignore the call and make sure to preserve side effects in the argument, if any.
-	case OGETG:
-		// nothing
-	case OSQRT:
-		cgen_discard(n.Left)
-
-	case OCHECKNIL:
-		Cgen_checknil(n.Left)
-
-	case OVARKILL:
-		Gvarkill(n.Left)
-
-	case OVARLIVE:
-		Gvarlive(n.Left)
-	}
-
-ret:
-	if Anyregalloc() != wasregalloc {
-		Dump("node", n)
-		Fatalf("registers left allocated")
-	}
-
-	lineno = lno
-}
-
-func Cgen_as(nl, nr *Node) {
-	Cgen_as_wb(nl, nr, false)
-}
-
-func Cgen_as_wb(nl, nr *Node, wb bool) {
-	if Debug['g'] != 0 {
-		op := "cgen_as"
-		if wb {
-			op = "cgen_as_wb"
-		}
-		Dump(op, nl)
-		Dump(op+" = ", nr)
-	}
-
-	for nr != nil && nr.Op == OCONVNOP {
-		nr = nr.Left
-	}
-
-	if nl == nil || isblank(nl) {
-		cgen_discard(nr)
-		return
-	}
-
-	if nr == nil || iszero(nr) {
-		tl := nl.Type
-		if tl == nil {
-			return
-		}
-		if Isfat(tl) {
-			if nl.Op == ONAME {
-				Gvardef(nl)
-			}
-			Thearch.Clearfat(nl)
-			return
-		}
-
-		Clearslim(nl)
-		return
-	}
-
-	tl := nl.Type
-	if tl == nil {
-		return
-	}
-
-	cgen_wb(nr, nl, wb)
-}
-
-func cgen_callmeth(n *Node, proc int) {
-	// generate a rewrite in n2 for the method call
-	// (p.f)(...) goes to (f)(p,...)
-
-	l := n.Left
-
-	if l.Op != ODOTMETH {
-		Fatalf("cgen_callmeth: not dotmethod: %v", l)
-	}
-
-	n2 := *n
-	n2.Op = OCALLFUNC
-	n2.Left = newname(l.Sym)
-	n2.Left.Type = l.Type
-
-	if n2.Left.Op == ONAME {
-		n2.Left.Class = PFUNC
-	}
-	cgen_call(&n2, proc)
-}
-
-// CgenTemp creates a temporary node, assigns n to it, and returns it.
-func CgenTemp(n *Node) *Node {
-	var tmp Node
-	Tempname(&tmp, n.Type)
-	Cgen(n, &tmp)
-	return &tmp
-}
-
 func checklabels() {
 	for _, lab := range labellist {
 		if lab.Def == nil {
@@ -1060,252 +351,3 @@
 		}
 	}
 }
-
-// Componentgen copies a composite value by moving its individual components.
-// Slices, strings and interfaces are supported. Small structs or arrays with
-// elements of basic type are also supported.
-// nr is nil when assigning a zero value.
-func Componentgen(nr, nl *Node) bool {
-	return componentgen_wb(nr, nl, false)
-}
-
-// componentgen_wb is like componentgen but if wb==true emits write barriers for pointer updates.
-func componentgen_wb(nr, nl *Node, wb bool) bool {
-	// Don't generate any code for complete copy of a variable into itself.
-	// It's useless, and the VARDEF will incorrectly mark the old value as dead.
-	// (This check assumes that the arguments passed to componentgen did not
-	// themselves come from Igen, or else we could have Op==ONAME but
-	// with a Type and Xoffset describing an individual field, not the entire
-	// variable.)
-	if nl.Op == ONAME && nl == nr {
-		return true
-	}
-
-	// Count number of moves required to move components.
-	// If using write barrier, can only emit one pointer.
-	// TODO(rsc): Allow more pointers, for reflect.Value.
-	const maxMoves = 8
-	n := 0
-	numPtr := 0
-	visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
-		n++
-		if Simtype[t.Etype] == Tptr && t != itable {
-			numPtr++
-		}
-		return n <= maxMoves && (!wb || numPtr <= 1)
-	})
-	if n > maxMoves || wb && numPtr > 1 {
-		return false
-	}
-
-	// Must call emitVardef after evaluating rhs but before writing to lhs.
-	emitVardef := func() {
-		// Emit vardef if needed.
-		if nl.Op == ONAME {
-			switch nl.Type.Etype {
-			case TARRAY, TSLICE, TSTRING, TINTER, TSTRUCT:
-				Gvardef(nl)
-			}
-		}
-	}
-
-	isConstString := Isconst(nr, CTSTR)
-
-	if !cadable(nl) && nr != nil && !cadable(nr) && !isConstString {
-		return false
-	}
-
-	var nodl Node
-	if cadable(nl) {
-		nodl = *nl
-	} else {
-		if nr != nil && !cadable(nr) && !isConstString {
-			return false
-		}
-		if nr == nil || isConstString || nl.Ullman >= nr.Ullman {
-			Igen(nl, &nodl, nil)
-			defer Regfree(&nodl)
-		}
-	}
-	lbase := nodl.Xoffset
-
-	// Special case: zeroing.
-	var nodr Node
-	if nr == nil {
-		// When zeroing, prepare a register containing zero.
-		// TODO(rsc): Check that this is actually generating the best code.
-		if Thearch.REGZERO != 0 {
-			// cpu has a dedicated zero register
-			Nodreg(&nodr, Types[TUINT], Thearch.REGZERO)
-		} else {
-			// no dedicated zero register
-			var zero Node
-			Nodconst(&zero, nl.Type, 0)
-			Regalloc(&nodr, Types[TUINT], nil)
-			Thearch.Gmove(&zero, &nodr)
-			defer Regfree(&nodr)
-		}
-
-		emitVardef()
-		visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
-			nodl.Type = t
-			nodl.Xoffset = lbase + offset
-			nodr.Type = t
-			if t.IsFloat() {
-				// TODO(rsc): Cache zero register like we do for integers?
-				Clearslim(&nodl)
-			} else {
-				Thearch.Gmove(&nodr, &nodl)
-			}
-			return true
-		})
-		return true
-	}
-
-	// Special case: assignment of string constant.
-	if isConstString {
-		emitVardef()
-
-		// base
-		nodl.Type = Ptrto(Types[TUINT8])
-		Regalloc(&nodr, Types[Tptr], nil)
-		p := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), nil, &nodr)
-		Datastring(nr.Val().U.(string), &p.From)
-		p.From.Type = obj.TYPE_ADDR
-		Thearch.Gmove(&nodr, &nodl)
-		Regfree(&nodr)
-
-		// length
-		nodl.Type = Types[Simtype[TUINT]]
-		nodl.Xoffset += int64(Array_nel) - int64(Array_array)
-		Nodconst(&nodr, nodl.Type, int64(len(nr.Val().U.(string))))
-		Thearch.Gmove(&nodr, &nodl)
-		return true
-	}
-
-	// General case: copy nl = nr.
-	nodr = *nr
-	if !cadable(nr) {
-		if nr.Ullman >= UINF && nodl.Op == OINDREG {
-			Fatalf("miscompile")
-		}
-		Igen(nr, &nodr, nil)
-		defer Regfree(&nodr)
-	}
-	rbase := nodr.Xoffset
-
-	if nodl.Op == 0 {
-		Igen(nl, &nodl, nil)
-		defer Regfree(&nodl)
-		lbase = nodl.Xoffset
-	}
-
-	emitVardef()
-	var (
-		ptrType   *Type
-		ptrOffset int64
-	)
-	visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
-		if wb && Simtype[t.Etype] == Tptr && t != itable {
-			if ptrType != nil {
-				Fatalf("componentgen_wb %v", nl.Type)
-			}
-			ptrType = t
-			ptrOffset = offset
-			return true
-		}
-		nodl.Type = t
-		nodl.Xoffset = lbase + offset
-		nodr.Type = t
-		nodr.Xoffset = rbase + offset
-		Thearch.Gmove(&nodr, &nodl)
-		return true
-	})
-	if ptrType != nil {
-		nodl.Type = ptrType
-		nodl.Xoffset = lbase + ptrOffset
-		nodr.Type = ptrType
-		nodr.Xoffset = rbase + ptrOffset
-		cgen_wbptr(&nodr, &nodl)
-	}
-	return true
-}
-
-// visitComponents walks the individual components of the type t,
-// walking into array elements, struct fields, the real and imaginary
-// parts of complex numbers, and on 32-bit systems the high and
-// low halves of 64-bit integers.
-// It calls f for each such component, passing the component (aka element)
-// type and memory offset, assuming t starts at startOffset.
-// If f ever returns false, visitComponents returns false without any more
-// calls to f. Otherwise visitComponents returns true.
-func visitComponents(t *Type, startOffset int64, f func(elem *Type, elemOffset int64) bool) bool {
-	switch t.Etype {
-	case TINT64:
-		if Widthreg == 8 {
-			break
-		}
-		// NOTE: Assuming little endian (signed top half at offset 4).
-		// We don't have any 32-bit big-endian systems.
-		if !Thearch.LinkArch.InFamily(sys.ARM, sys.I386) {
-			Fatalf("unknown 32-bit architecture")
-		}
-		return f(Types[TUINT32], startOffset) &&
-			f(Types[TINT32], startOffset+4)
-
-	case TUINT64:
-		if Widthreg == 8 {
-			break
-		}
-		return f(Types[TUINT32], startOffset) &&
-			f(Types[TUINT32], startOffset+4)
-
-	case TCOMPLEX64:
-		return f(Types[TFLOAT32], startOffset) &&
-			f(Types[TFLOAT32], startOffset+4)
-
-	case TCOMPLEX128:
-		return f(Types[TFLOAT64], startOffset) &&
-			f(Types[TFLOAT64], startOffset+8)
-
-	case TINTER:
-		return f(itable, startOffset) &&
-			f(Ptrto(Types[TUINT8]), startOffset+int64(Widthptr))
-
-	case TSTRING:
-		return f(Ptrto(Types[TUINT8]), startOffset) &&
-			f(Types[Simtype[TUINT]], startOffset+int64(Widthptr))
-
-	case TSLICE:
-		return f(Ptrto(t.Elem()), startOffset+int64(Array_array)) &&
-			f(Types[Simtype[TUINT]], startOffset+int64(Array_nel)) &&
-			f(Types[Simtype[TUINT]], startOffset+int64(Array_cap))
-
-	case TARRAY:
-		// Short-circuit [1e6]struct{}.
-		if t.Elem().Width == 0 {
-			return true
-		}
-
-		for i := int64(0); i < t.NumElem(); i++ {
-			if !visitComponents(t.Elem(), startOffset+i*t.Elem().Width, f) {
-				return false
-			}
-		}
-		return true
-
-	case TSTRUCT:
-		for _, field := range t.Fields().Slice() {
-			if !visitComponents(field.Type, startOffset+field.Offset, f) {
-				return false
-			}
-		}
-		return true
-	}
-	return f(t, startOffset)
-}
-
-func cadable(n *Node) bool {
-	// Note: Not sure why you can have n.Op == ONAME without n.Addable, but you can.
-	return n.Addable && n.Op == ONAME
-}
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index 97fa7e2..ecd8b227 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -368,68 +368,11 @@
 	MAXWIDTH     int64
 	ReservedRegs []int
 
-	AddIndex            func(*Node, int64, *Node) bool // optional
-	Betypeinit          func()
-	Bgen_float          func(*Node, bool, int, *obj.Prog) // optional
-	Cgen64              func(*Node, *Node)                // only on 32-bit systems
-	Cgenindex           func(*Node, *Node, bool) *obj.Prog
-	Cgen_bmul           func(Op, *Node, *Node, *Node) bool
-	Cgen_float          func(*Node, *Node) // optional
-	Cgen_hmul           func(*Node, *Node, *Node)
-	RightShiftWithCarry func(*Node, uint, *Node)  // only on systems without RROTC instruction
-	AddSetCarry         func(*Node, *Node, *Node) // only on systems when ADD does not update carry flag
-	Cgen_shift          func(Op, bool, *Node, *Node, *Node)
-	Clearfat            func(*Node)
-	Cmp64               func(*Node, *Node, Op, int, *obj.Prog) // only on 32-bit systems
-	Defframe            func(*obj.Prog)
-	Dodiv               func(Op, *Node, *Node, *Node)
-	Excise              func(*Flow)
-	Expandchecks        func(*obj.Prog)
-	Getg                func(*Node)
-	Gins                func(obj.As, *Node, *Node) *obj.Prog
-
-	// Ginscmp generates code comparing n1 to n2 and jumping away if op is satisfied.
-	// The returned prog should be Patch'ed with the jump target.
-	// If op is not satisfied, code falls through to the next emitted instruction.
-	// Likely is the branch prediction hint: +1 for likely, -1 for unlikely, 0 for no opinion.
-	//
-	// Ginscmp must be able to handle all kinds of arguments for n1 and n2,
-	// not just simple registers, although it can assume that there are no
-	// function calls needed during the evaluation, and on 32-bit systems
-	// the values are guaranteed not to be 64-bit values, so no in-memory
-	// temporaries are necessary.
-	Ginscmp func(op Op, t *Type, n1, n2 *Node, likely int) *obj.Prog
-
-	// Ginsboolval inserts instructions to convert the result
-	// of a just-completed comparison to a boolean value.
-	// The first argument is the conditional jump instruction
-	// corresponding to the desired value.
-	// The second argument is the destination.
-	// If not present, Ginsboolval will be emulated with jumps.
-	Ginsboolval func(obj.As, *Node)
-
-	Ginscon      func(obj.As, int64, *Node)
-	Ginsnop      func()
-	Gmove        func(*Node, *Node)
-	Igenindex    func(*Node, *Node, bool) *obj.Prog
-	Peep         func(*obj.Prog)
-	Proginfo     func(*obj.Prog) // fills in Prog.Info
-	Regtyp       func(*obj.Addr) bool
-	Sameaddr     func(*obj.Addr, *obj.Addr) bool
-	Smallindir   func(*obj.Addr, *obj.Addr) bool
-	Stackaddr    func(*obj.Addr) bool
-	Blockcopy    func(*Node, *Node, int64, int64, int64)
-	Sudoaddable  func(obj.As, *Node, *obj.Addr) bool
-	Sudoclean    func()
-	Excludedregs func() uint64
-	RtoB         func(int) uint64
-	FtoB         func(int) uint64
-	BtoR         func(uint64) int
-	BtoF         func(uint64) int
-	Optoas       func(Op, *Type) obj.As
-	Doregbits    func(int) uint64
-	Regnames     func(*int) []string
-	Use387       bool // should 8g use 387 FP instructions instead of sse2.
+	Betypeinit func()
+	Defframe   func(*obj.Prog)
+	Gins       func(obj.As, *Node, *Node) *obj.Prog
+	Proginfo   func(*obj.Prog) // fills in Prog.Info
+	Use387     bool            // should 8g use 387 FP instructions instead of sse2.
 
 	// SSARegToReg maps ssa register numbers to obj register numbers.
 	SSARegToReg []int16
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index 02d09df..59ac9af 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -692,42 +692,6 @@
 	}
 }
 
-func gclean() {
-	for _, r := range Thearch.ReservedRegs {
-		reg[r-Thearch.REGMIN]--
-	}
-
-	for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
-		n := reg[r-Thearch.REGMIN]
-		if n != 0 {
-			if Debug['v'] != 0 {
-				Regdump()
-			}
-			Yyerror("reg %v left allocated", obj.Rconv(r))
-		}
-	}
-
-	for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
-		n := reg[r-Thearch.REGMIN]
-		if n != 0 {
-			if Debug['v'] != 0 {
-				Regdump()
-			}
-			Yyerror("reg %v left allocated", obj.Rconv(r))
-		}
-	}
-}
-
-func Anyregalloc() bool {
-	n := 0
-	for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
-		if reg[r-Thearch.REGMIN] == 0 {
-			n++
-		}
-	}
-	return n > len(Thearch.ReservedRegs)
-}
-
 // allocate register of type t, leave in n.
 // if o != N, o may be reusable register.
 // caller must Regfree(n).
@@ -829,49 +793,6 @@
 	}
 }
 
-// Reginuse reports whether r is in use.
-func Reginuse(r int) bool {
-	switch {
-	case Thearch.REGMIN <= r && r <= Thearch.REGMAX,
-		Thearch.FREGMIN <= r && r <= Thearch.FREGMAX:
-		// ok
-	default:
-		Fatalf("reginuse: reg out of range")
-	}
-
-	return reg[r-Thearch.REGMIN] > 0
-}
-
-// Regrealloc(n) undoes the effect of Regfree(n),
-// so that a register can be given up but then reclaimed.
-func Regrealloc(n *Node) {
-	if n.Op != OREGISTER && n.Op != OINDREG {
-		Fatalf("regrealloc: not a register")
-	}
-	i := int(n.Reg)
-	if i == Thearch.REGSP {
-		return
-	}
-	switch {
-	case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
-		Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
-		// ok
-	default:
-		Fatalf("regrealloc: reg out of range")
-	}
-
-	i -= Thearch.REGMIN
-	if reg[i] == 0 && Debug['v'] > 0 {
-		if regstk[i] == nil {
-			regstk[i] = make([]byte, 4096)
-		}
-		stk := regstk[i]
-		n := runtime.Stack(stk[:cap(stk)], false)
-		regstk[i] = stk[:n]
-	}
-	reg[i]++
-}
-
 func Regdump() {
 	if Debug['v'] == 0 {
 		fmt.Printf("run compiler with -v for register allocation sites\n")
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index 16e62a3..0fcc78ab 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -302,34 +302,6 @@
 	}
 }
 
-func Cgen_checknil(n *Node) {
-	if Disable_checknil != 0 {
-		return
-	}
-
-	// Ideally we wouldn't see any integer types here, but we do.
-	if n.Type == nil || (!n.Type.IsPtr() && !n.Type.IsInteger() && n.Type.Etype != TUNSAFEPTR) {
-		Dump("checknil", n)
-		Fatalf("bad checknil")
-	}
-
-	// Most architectures require that the address to be checked is
-	// in a register (it could be in memory).
-	needsReg := !Thearch.LinkArch.InFamily(sys.AMD64, sys.I386)
-
-	// Move the address to be checked into a register if necessary.
-	if (needsReg && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
-		var reg Node
-		Regalloc(&reg, Types[Tptr], n)
-		Cgen(n, &reg)
-		Thearch.Gins(obj.ACHECKNIL, &reg, nil)
-		Regfree(&reg)
-		return
-	}
-
-	Thearch.Gins(obj.ACHECKNIL, n, nil)
-}
-
 func compile(fn *Node) {
 	if Newproc == nil {
 		Newproc = Sysfunc("newproc")
diff --git a/src/cmd/compile/internal/gc/popt.go b/src/cmd/compile/internal/gc/popt.go
deleted file mode 100644
index 520dcab..0000000
--- a/src/cmd/compile/internal/gc/popt.go
+++ /dev/null
@@ -1,1094 +0,0 @@
-// Derived from Inferno utils/6c/gc.h
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/gc.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// "Portable" optimizations.
-
-package gc
-
-import (
-	"cmd/internal/obj"
-	"fmt"
-	"sort"
-	"strings"
-)
-
-type OptStats struct {
-	Ncvtreg int32
-	Nspill  int32
-	Nreload int32
-	Ndelmov int32
-	Nvar    int32
-	Naddr   int32
-}
-
-var Ostats OptStats
-
-var noreturn_symlist [10]*Sym
-
-// p is a call instruction. Does the call fail to return?
-func Noreturn(p *obj.Prog) bool {
-	if noreturn_symlist[0] == nil {
-		noreturn_symlist[0] = Pkglookup("panicindex", Runtimepkg)
-		noreturn_symlist[1] = Pkglookup("panicslice", Runtimepkg)
-		noreturn_symlist[2] = Pkglookup("throwinit", Runtimepkg)
-		noreturn_symlist[3] = Pkglookup("gopanic", Runtimepkg)
-		noreturn_symlist[4] = Pkglookup("panicwrap", Runtimepkg)
-		noreturn_symlist[5] = Pkglookup("throwreturn", Runtimepkg)
-		noreturn_symlist[6] = Pkglookup("selectgo", Runtimepkg)
-		noreturn_symlist[7] = Pkglookup("block", Runtimepkg)
-	}
-
-	if p.To.Node == nil {
-		return false
-	}
-	s := ((p.To.Node).(*Node)).Sym
-	if s == nil {
-		return false
-	}
-	for i := 0; noreturn_symlist[i] != nil; i++ {
-		if s == noreturn_symlist[i] {
-			return true
-		}
-	}
-	return false
-}
-
-// JMP chasing and removal.
-//
-// The code generator depends on being able to write out jump
-// instructions that it can jump to now but fill in later.
-// the linker will resolve them nicely, but they make the code
-// longer and more difficult to follow during debugging.
-// Remove them.
-
-// what instruction does a JMP to p eventually land on?
-func chasejmp(p *obj.Prog, jmploop *int) *obj.Prog {
-	n := 0
-	for p != nil && p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH {
-		n++
-		if n > 10 {
-			*jmploop = 1
-			break
-		}
-
-		p = p.To.Val.(*obj.Prog)
-	}
-
-	return p
-}
-
-// reuse reg pointer for mark/sweep state.
-// leave reg==nil at end because alive==nil.
-var alive interface{} = nil
-var dead interface{} = 1
-
-// mark all code reachable from firstp as alive
-func mark(firstp *obj.Prog) {
-	for p := firstp; p != nil; p = p.Link {
-		if p.Opt != dead {
-			break
-		}
-		p.Opt = alive
-		if p.As != obj.ACALL && p.To.Type == obj.TYPE_BRANCH && p.To.Val.(*obj.Prog) != nil {
-			mark(p.To.Val.(*obj.Prog))
-		}
-		if p.As == obj.AJMP || p.As == obj.ARET || p.As == obj.AUNDEF {
-			break
-		}
-	}
-}
-
-func fixjmp(firstp *obj.Prog) {
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		fmt.Printf("\nfixjmp\n")
-	}
-
-	// pass 1: resolve jump to jump, mark all code as dead.
-	jmploop := 0
-
-	for p := firstp; p != nil; p = p.Link {
-		if Debug['R'] != 0 && Debug['v'] != 0 {
-			fmt.Printf("%v\n", p)
-		}
-		if p.As != obj.ACALL && p.To.Type == obj.TYPE_BRANCH && p.To.Val.(*obj.Prog) != nil && p.To.Val.(*obj.Prog).As == obj.AJMP {
-			if Debug['N'] == 0 {
-				p.To.Val = chasejmp(p.To.Val.(*obj.Prog), &jmploop)
-				if Debug['R'] != 0 && Debug['v'] != 0 {
-					fmt.Printf("->%v\n", p)
-				}
-			}
-		}
-
-		p.Opt = dead
-	}
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		fmt.Printf("\n")
-	}
-
-	// pass 2: mark all reachable code alive
-	mark(firstp)
-
-	// pass 3: delete dead code (mostly JMPs).
-	var last *obj.Prog
-
-	for p := firstp; p != nil; p = p.Link {
-		if p.Opt == dead {
-			if p.Link == nil && p.As == obj.ARET && last != nil && last.As != obj.ARET {
-				// This is the final ARET, and the code so far doesn't have one.
-				// Let it stay. The register allocator assumes that all live code in
-				// the function can be traversed by starting at all the RET instructions
-				// and following predecessor links. If we remove the final RET,
-				// this assumption will not hold in the case of an infinite loop
-				// at the end of a function.
-				// Keep the RET but mark it dead for the liveness analysis.
-				p.Mode = 1
-			} else {
-				if Debug['R'] != 0 && Debug['v'] != 0 {
-					fmt.Printf("del %v\n", p)
-				}
-				continue
-			}
-		}
-
-		if last != nil {
-			last.Link = p
-		}
-		last = p
-	}
-
-	last.Link = nil
-
-	// pass 4: elide JMP to next instruction.
-	// only safe if there are no jumps to JMPs anymore.
-	if jmploop == 0 && Debug['N'] == 0 {
-		var last *obj.Prog
-		for p := firstp; p != nil; p = p.Link {
-			if p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH && p.To.Val == p.Link {
-				if Debug['R'] != 0 && Debug['v'] != 0 {
-					fmt.Printf("del %v\n", p)
-				}
-				continue
-			}
-
-			if last != nil {
-				last.Link = p
-			}
-			last = p
-		}
-
-		last.Link = nil
-	}
-
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		fmt.Printf("\n")
-		for p := firstp; p != nil; p = p.Link {
-			fmt.Printf("%v\n", p)
-		}
-		fmt.Printf("\n")
-	}
-}
-
-// Control flow analysis. The Flow structures hold predecessor and successor
-// information as well as basic loop analysis.
-//
-//	graph = Flowstart(firstp, nil)
-//	... use flow graph ...
-//	Flowend(graph) // free graph
-//
-// Typical uses of the flow graph are to iterate over all the flow-relevant instructions:
-//
-//	for f := graph.Start; f != nil; f = f.Link {}
-//
-// or, given an instruction f, to iterate over all the predecessors, which is
-// f.P1 and this list:
-//
-//	for f2 := f.P2; f2 != nil; f2 = f2.P2link {}
-//
-// The second argument (newData) to Flowstart specifies a func to create object
-// for every f.Data field, for use by the client.
-// If newData is nil, f.Data will be nil.
-
-type Graph struct {
-	Start *Flow
-	Num   int
-
-	// After calling flowrpo, rpo lists the flow nodes in reverse postorder,
-	// and each non-dead Flow node f has g->rpo[f->rpo] == f.
-	Rpo []*Flow
-}
-
-type Flow struct {
-	Prog   *obj.Prog // actual instruction
-	P1     *Flow     // predecessors of this instruction: p1,
-	P2     *Flow     // and then p2 linked though p2link.
-	P2link *Flow
-	S1     *Flow // successors of this instruction (at most two: s1 and s2).
-	S2     *Flow
-	Link   *Flow // next instruction in function code
-
-	Active int32 // usable by client
-
-	Id     int32  // sequence number in flow graph
-	Rpo    int32  // reverse post ordering
-	Loop   uint16 // x5 for every loop
-	Refset bool   // diagnostic generated
-
-	Data interface{} // for use by client
-}
-
-var flowmark int
-
-// MaxFlowProg is the maximum size program (counted in instructions)
-// for which the flow code will build a graph. Functions larger than this limit
-// will not have flow graphs and consequently will not be optimized.
-const MaxFlowProg = 50000
-
-var ffcache []Flow // reusable []Flow, to reduce allocation
-
-func growffcache(n int) {
-	if n > cap(ffcache) {
-		n = (n * 5) / 4
-		if n > MaxFlowProg {
-			n = MaxFlowProg
-		}
-		ffcache = make([]Flow, n)
-	}
-	ffcache = ffcache[:n]
-}
-
-func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
-	// Count and mark instructions to annotate.
-	nf := 0
-
-	for p := firstp; p != nil; p = p.Link {
-		p.Opt = nil // should be already, but just in case
-		Thearch.Proginfo(p)
-		if p.Info.Flags&Skip != 0 {
-			continue
-		}
-		p.Opt = &flowmark
-		nf++
-	}
-
-	if nf == 0 {
-		return nil
-	}
-
-	if nf >= MaxFlowProg {
-		if Debug['v'] != 0 {
-			Warn("%v is too big (%d instructions)", Curfn.Func.Nname.Sym, nf)
-		}
-		return nil
-	}
-
-	// Allocate annotations and assign to instructions.
-	graph := new(Graph)
-
-	growffcache(nf)
-	ff := ffcache
-	start := &ff[0]
-	id := 0
-	var last *Flow
-	for p := firstp; p != nil; p = p.Link {
-		if p.Opt == nil {
-			continue
-		}
-		f := &ff[0]
-		ff = ff[1:]
-		p.Opt = f
-		f.Prog = p
-		if last != nil {
-			last.Link = f
-		}
-		last = f
-		if newData != nil {
-			f.Data = newData()
-		}
-		f.Id = int32(id)
-		id++
-	}
-
-	// Fill in pred/succ information.
-	var f1 *Flow
-	var p *obj.Prog
-	for f := start; f != nil; f = f.Link {
-		p = f.Prog
-		if p.Info.Flags&Break == 0 {
-			f1 = f.Link
-			f.S1 = f1
-			f1.P1 = f
-		}
-
-		if p.To.Type == obj.TYPE_BRANCH {
-			if p.To.Val == nil {
-				Fatalf("pnil %v", p)
-			}
-			f1 = p.To.Val.(*obj.Prog).Opt.(*Flow)
-			if f1 == nil {
-				Fatalf("fnil %v / %v", p, p.To.Val.(*obj.Prog))
-			}
-			if f1 == f {
-				//fatal("self loop %v", p);
-				continue
-			}
-
-			f.S2 = f1
-			f.P2link = f1.P2
-			f1.P2 = f
-		}
-	}
-
-	graph.Start = start
-	graph.Num = nf
-	return graph
-}
-
-func Flowend(graph *Graph) {
-	for f := graph.Start; f != nil; f = f.Link {
-		f.Prog.Info.Flags = 0 // drop cached proginfo
-		f.Prog.Opt = nil
-	}
-	clear := ffcache[:graph.Num]
-	for i := range clear {
-		clear[i] = Flow{}
-	}
-}
-
-// find looping structure
-//
-// 1) find reverse postordering
-// 2) find approximate dominators,
-//	the actual dominators if the flow graph is reducible
-//	otherwise, dominators plus some other non-dominators.
-//	See Matthew S. Hecht and Jeffrey D. Ullman,
-//	"Analysis of a Simple Algorithm for Global Data Flow Problems",
-//	Conf.  Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
-//	Oct. 1-3, 1973, pp.  207-217.
-// 3) find all nodes with a predecessor dominated by the current node.
-//	such a node is a loop head.
-//	recursively, all preds with a greater rpo number are in the loop
-func postorder(r *Flow, rpo2r []*Flow, n int32) int32 {
-	r.Rpo = 1
-	r1 := r.S1
-	if r1 != nil && r1.Rpo == 0 {
-		n = postorder(r1, rpo2r, n)
-	}
-	r1 = r.S2
-	if r1 != nil && r1.Rpo == 0 {
-		n = postorder(r1, rpo2r, n)
-	}
-	rpo2r[n] = r
-	n++
-	return n
-}
-
-func rpolca(idom []int32, rpo1 int32, rpo2 int32) int32 {
-	if rpo1 == -1 {
-		return rpo2
-	}
-	var t int32
-	for rpo1 != rpo2 {
-		if rpo1 > rpo2 {
-			t = rpo2
-			rpo2 = rpo1
-			rpo1 = t
-		}
-
-		for rpo1 < rpo2 {
-			t = idom[rpo2]
-			if t >= rpo2 {
-				Fatalf("bad idom")
-			}
-			rpo2 = t
-		}
-	}
-
-	return rpo1
-}
-
-func doms(idom []int32, r int32, s int32) bool {
-	for s > r {
-		s = idom[s]
-	}
-	return s == r
-}
-
-func loophead(idom []int32, r *Flow) bool {
-	src := r.Rpo
-	if r.P1 != nil && doms(idom, src, r.P1.Rpo) {
-		return true
-	}
-	for r = r.P2; r != nil; r = r.P2link {
-		if doms(idom, src, r.Rpo) {
-			return true
-		}
-	}
-	return false
-}
-
-func loopmark(rpo2r **Flow, head int32, r *Flow) {
-	if r.Rpo < head || r.Active == head {
-		return
-	}
-	r.Active = head
-	r.Loop += LOOP
-	if r.P1 != nil {
-		loopmark(rpo2r, head, r.P1)
-	}
-	for r = r.P2; r != nil; r = r.P2link {
-		loopmark(rpo2r, head, r)
-	}
-}
-
-func flowrpo(g *Graph) {
-	g.Rpo = make([]*Flow, g.Num)
-	idom := make([]int32, g.Num)
-
-	for r1 := g.Start; r1 != nil; r1 = r1.Link {
-		r1.Active = 0
-	}
-
-	rpo2r := g.Rpo
-	d := postorder(g.Start, rpo2r, 0)
-	nr := int32(g.Num)
-	if d > nr {
-		Fatalf("too many reg nodes %d %d", d, nr)
-	}
-	nr = d
-	var r1 *Flow
-	for i := int32(0); i < nr/2; i++ {
-		r1 = rpo2r[i]
-		rpo2r[i] = rpo2r[nr-1-i]
-		rpo2r[nr-1-i] = r1
-	}
-
-	for i := int32(0); i < nr; i++ {
-		rpo2r[i].Rpo = i
-	}
-
-	idom[0] = 0
-	var me int32
-	for i := int32(0); i < nr; i++ {
-		r1 = rpo2r[i]
-		me = r1.Rpo
-		d = -1
-
-		// rpo2r[r.Rpo] == r protects against considering dead code,
-		// which has r.Rpo == 0.
-		if r1.P1 != nil && rpo2r[r1.P1.Rpo] == r1.P1 && r1.P1.Rpo < me {
-			d = r1.P1.Rpo
-		}
-		for r1 = r1.P2; r1 != nil; r1 = r1.P2link {
-			if rpo2r[r1.Rpo] == r1 && r1.Rpo < me {
-				d = rpolca(idom, d, r1.Rpo)
-			}
-		}
-		idom[i] = d
-	}
-
-	for i := int32(0); i < nr; i++ {
-		r1 = rpo2r[i]
-		r1.Loop++
-		if r1.P2 != nil && loophead(idom, r1) {
-			loopmark(&rpo2r[0], i, r1)
-		}
-	}
-
-	for r1 := g.Start; r1 != nil; r1 = r1.Link {
-		r1.Active = 0
-	}
-}
-
-func Uniqp(r *Flow) *Flow {
-	r1 := r.P1
-	if r1 == nil {
-		r1 = r.P2
-		if r1 == nil || r1.P2link != nil {
-			return nil
-		}
-	} else if r.P2 != nil {
-		return nil
-	}
-	return r1
-}
-
-func Uniqs(r *Flow) *Flow {
-	r1 := r.S1
-	if r1 == nil {
-		r1 = r.S2
-		if r1 == nil {
-			return nil
-		}
-	} else if r.S2 != nil {
-		return nil
-	}
-	return r1
-}
-
-// The compilers assume they can generate temporary variables
-// as needed to preserve the right semantics or simplify code
-// generation and the back end will still generate good code.
-// This results in a large number of ephemeral temporary variables.
-// Merge temps with non-overlapping lifetimes and equal types using the
-// greedy algorithm in Poletto and Sarkar, "Linear Scan Register Allocation",
-// ACM TOPLAS 1999.
-
-type TempVar struct {
-	node    *Node
-	def     *Flow    // definition of temp var
-	use     *Flow    // use list, chained through Flow.data
-	merge   *TempVar // merge var with this one
-	start   int64    // smallest Prog.pc in live range
-	end     int64    // largest Prog.pc in live range
-	addr    bool     // address taken - no accurate end
-	removed bool     // removed from program
-}
-
-// startcmp sorts TempVars by start, then id, then symbol name.
-type startcmp []*TempVar
-
-func (x startcmp) Len() int      { return len(x) }
-func (x startcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x startcmp) Less(i, j int) bool {
-	a := x[i]
-	b := x[j]
-
-	if a.start < b.start {
-		return true
-	}
-	if a.start > b.start {
-		return false
-	}
-
-	// Order what's left by id or symbol name,
-	// just so that sort is forced into a specific ordering,
-	// so that the result of the sort does not depend on
-	// the sort implementation.
-	if a.def != b.def {
-		return int(a.def.Id-b.def.Id) < 0
-	}
-	if a.node != b.node {
-		return a.node.Sym.Name < b.node.Sym.Name
-	}
-	return false
-}
-
-// Is n available for merging?
-func canmerge(n *Node) bool {
-	return n.Class == PAUTO && strings.HasPrefix(n.Sym.Name, "autotmp")
-}
-
-func mergetemp(firstp *obj.Prog) {
-	const (
-		debugmerge = 0
-	)
-
-	g := Flowstart(firstp, nil)
-	if g == nil {
-		return
-	}
-
-	// Build list of all mergeable variables.
-	var vars []*TempVar
-	for _, n := range Curfn.Func.Dcl {
-		if canmerge(n) {
-			v := &TempVar{}
-			vars = append(vars, v)
-			n.SetOpt(v)
-			v.node = n
-		}
-	}
-
-	// Build list of uses.
-	// We assume that the earliest reference to a temporary is its definition.
-	// This is not true of variables in general but our temporaries are all
-	// single-use (that's why we have so many!).
-	for f := g.Start; f != nil; f = f.Link {
-		p := f.Prog
-		if p.From.Node != nil && ((p.From.Node).(*Node)).Opt() != nil && p.To.Node != nil && ((p.To.Node).(*Node)).Opt() != nil {
-			Fatalf("double node %v", p)
-		}
-		var v *TempVar
-		n, _ := p.From.Node.(*Node)
-		if n != nil {
-			v, _ = n.Opt().(*TempVar)
-		}
-		if v == nil {
-			n, _ = p.To.Node.(*Node)
-			if n != nil {
-				v, _ = n.Opt().(*TempVar)
-			}
-		}
-		if v != nil {
-			if v.def == nil {
-				v.def = f
-			}
-			f.Data = v.use
-			v.use = f
-			if n == p.From.Node && (p.Info.Flags&LeftAddr != 0) {
-				v.addr = true
-			}
-		}
-	}
-
-	if debugmerge > 1 && Debug['v'] != 0 {
-		Dumpit("before", g.Start, 0)
-	}
-
-	nkill := 0
-
-	// Special case.
-	for _, v := range vars {
-		if v.addr {
-			continue
-		}
-
-		// Used in only one instruction, which had better be a write.
-		f := v.use
-		if f != nil && f.Data.(*Flow) == nil {
-			p := f.Prog
-			if p.To.Node == v.node && (p.Info.Flags&RightWrite != 0) && p.Info.Flags&RightRead == 0 {
-				p.As = obj.ANOP
-				p.To = obj.Addr{}
-				v.removed = true
-				if debugmerge > 0 && Debug['v'] != 0 {
-					fmt.Printf("drop write-only %v\n", v.node.Sym)
-				}
-			} else {
-				Fatalf("temp used and not set: %v", p)
-			}
-			nkill++
-			continue
-		}
-
-		// Written in one instruction, read in the next, otherwise unused,
-		// no jumps to the next instruction. Happens mainly in 386 compiler.
-		f = v.use
-		if f != nil && f.Link == f.Data.(*Flow) && (f.Data.(*Flow)).Data.(*Flow) == nil && Uniqp(f.Link) == f {
-			p := f.Prog
-			p1 := f.Link.Prog
-			const (
-				SizeAny = SizeB | SizeW | SizeL | SizeQ | SizeF | SizeD
-			)
-			if p.From.Node == v.node && p1.To.Node == v.node && (p.Info.Flags&Move != 0) && (p.Info.Flags|p1.Info.Flags)&(LeftAddr|RightAddr) == 0 && p.Info.Flags&SizeAny == p1.Info.Flags&SizeAny {
-				p1.From = p.From
-				Thearch.Excise(f)
-				v.removed = true
-				if debugmerge > 0 && Debug['v'] != 0 {
-					fmt.Printf("drop immediate-use %v\n", v.node.Sym)
-				}
-			}
-
-			nkill++
-			continue
-		}
-	}
-
-	// Traverse live range of each variable to set start, end.
-	// Each flood uses a new value of gen so that we don't have
-	// to clear all the r.Active words after each variable.
-	gen := uint32(0)
-
-	for _, v := range vars {
-		gen++
-		for f := v.use; f != nil; f = f.Data.(*Flow) {
-			mergewalk(v, f, gen)
-		}
-		if v.addr {
-			gen++
-			for f := v.use; f != nil; f = f.Data.(*Flow) {
-				varkillwalk(v, f, gen)
-			}
-		}
-	}
-
-	// Sort variables by start.
-	bystart := make([]*TempVar, len(vars))
-	copy(bystart, vars)
-	sort.Sort(startcmp(bystart))
-
-	// List of in-use variables, sorted by end, so that the ones that
-	// will last the longest are the earliest ones in the array.
-	// The tail inuse[nfree:] holds no-longer-used variables.
-	// In theory we should use a sorted tree so that insertions are
-	// guaranteed O(log n) and then the loop is guaranteed O(n log n).
-	// In practice, it doesn't really matter.
-	inuse := make([]*TempVar, len(bystart))
-
-	ninuse := 0
-	nfree := len(bystart)
-	for _, v := range bystart {
-		if debugmerge > 0 && Debug['v'] != 0 {
-			fmt.Printf("consider %#v: removed=%t\n", v.node, v.removed)
-		}
-
-		if v.removed {
-			continue
-		}
-
-		// Expire no longer in use.
-		for ninuse > 0 && inuse[ninuse-1].end < v.start {
-			ninuse--
-			nfree--
-			inuse[nfree] = inuse[ninuse]
-		}
-
-		if debugmerge > 0 && Debug['v'] != 0 {
-			fmt.Printf("consider %#v: removed=%t nfree=%d nvar=%d\n", v.node, v.removed, nfree, len(bystart))
-		}
-
-		// Find old temp to reuse if possible.
-		t := v.node.Type
-
-		for j := nfree; j < len(inuse); j++ {
-			v1 := inuse[j]
-			if debugmerge > 0 && Debug['v'] != 0 {
-				fmt.Printf("consider %#v: maybe %#v: type=%v,%v addrtaken=%v,%v\n", v.node, v1.node, t, v1.node.Type, v.node.Addrtaken, v1.node.Addrtaken)
-			}
-
-			// Require the types to match but also require the addrtaken bits to match.
-			// If a variable's address is taken, that disables registerization for the individual
-			// words of the variable (for example, the base,len,cap of a slice).
-			// We don't want to merge a non-addressed var with an addressed one and
-			// inhibit registerization of the former.
-			if Eqtype(t, v1.node.Type) && v.node.Addrtaken == v1.node.Addrtaken {
-				inuse[j] = inuse[nfree]
-				nfree++
-				if v1.merge != nil {
-					v.merge = v1.merge
-				} else {
-					v.merge = v1
-				}
-				nkill++
-				break
-			}
-		}
-
-		// Sort v into inuse.
-		j := ninuse
-		ninuse++
-
-		for j > 0 && inuse[j-1].end < v.end {
-			inuse[j] = inuse[j-1]
-			j--
-		}
-
-		inuse[j] = v
-	}
-
-	if debugmerge > 0 && Debug['v'] != 0 {
-		fmt.Printf("%v [%d - %d]\n", Curfn.Func.Nname.Sym, len(vars), nkill)
-		for _, v := range vars {
-			fmt.Printf("var %#v %v %d-%d", v.node, v.node.Type, v.start, v.end)
-			if v.addr {
-				fmt.Printf(" addr=true")
-			}
-			if v.removed {
-				fmt.Printf(" removed=true")
-			}
-			if v.merge != nil {
-				fmt.Printf(" merge %#v", v.merge.node)
-			}
-			if v.start == v.end && v.def != nil {
-				fmt.Printf(" %v", v.def.Prog)
-			}
-			fmt.Printf("\n")
-		}
-
-		if debugmerge > 1 && Debug['v'] != 0 {
-			Dumpit("after", g.Start, 0)
-		}
-	}
-
-	// Update node references to use merged temporaries.
-	for f := g.Start; f != nil; f = f.Link {
-		p := f.Prog
-		n, _ := p.From.Node.(*Node)
-		if n != nil {
-			v, _ := n.Opt().(*TempVar)
-			if v != nil && v.merge != nil {
-				p.From.Node = v.merge.node
-			}
-		}
-		n, _ = p.To.Node.(*Node)
-		if n != nil {
-			v, _ := n.Opt().(*TempVar)
-			if v != nil && v.merge != nil {
-				p.To.Node = v.merge.node
-			}
-		}
-	}
-
-	// Delete merged nodes from declaration list.
-	dcl := make([]*Node, 0, len(Curfn.Func.Dcl)-nkill)
-	for _, n := range Curfn.Func.Dcl {
-		v, _ := n.Opt().(*TempVar)
-		if v != nil && (v.merge != nil || v.removed) {
-			continue
-		}
-		dcl = append(dcl, n)
-	}
-	Curfn.Func.Dcl = dcl
-
-	// Clear aux structures.
-	for _, v := range vars {
-		v.node.SetOpt(nil)
-	}
-
-	Flowend(g)
-}
-
-func mergewalk(v *TempVar, f0 *Flow, gen uint32) {
-	var p *obj.Prog
-	var f1 *Flow
-
-	for f1 = f0; f1 != nil; f1 = f1.P1 {
-		if uint32(f1.Active) == gen {
-			break
-		}
-		f1.Active = int32(gen)
-		p = f1.Prog
-		if v.end < p.Pc {
-			v.end = p.Pc
-		}
-		if f1 == v.def {
-			v.start = p.Pc
-			break
-		}
-	}
-
-	var f2 *Flow
-	for f := f0; f != f1; f = f.P1 {
-		for f2 = f.P2; f2 != nil; f2 = f2.P2link {
-			mergewalk(v, f2, gen)
-		}
-	}
-}
-
-func varkillwalk(v *TempVar, f0 *Flow, gen uint32) {
-	var p *obj.Prog
-	var f1 *Flow
-
-	for f1 = f0; f1 != nil; f1 = f1.S1 {
-		if uint32(f1.Active) == gen {
-			break
-		}
-		f1.Active = int32(gen)
-		p = f1.Prog
-		if v.end < p.Pc {
-			v.end = p.Pc
-		}
-		if v.start > p.Pc {
-			v.start = p.Pc
-		}
-		if p.As == obj.ARET || (p.As == obj.AVARKILL && p.To.Node == v.node) {
-			break
-		}
-	}
-
-	for f := f0; f != f1; f = f.S1 {
-		varkillwalk(v, f.S2, gen)
-	}
-}
-
-// Eliminate redundant nil pointer checks.
-//
-// The code generation pass emits a CHECKNIL for every possibly nil pointer.
-// This pass removes a CHECKNIL if every predecessor path has already
-// checked this value for nil.
-//
-// Simple backwards flood from check to definition.
-// Run prog loop backward from end of program to beginning to avoid quadratic
-// behavior removing a run of checks.
-//
-// Assume that stack variables with address not taken can be loaded multiple times
-// from memory without being rechecked. Other variables need to be checked on
-// each load.
-
-var killed int // f.Data is either nil or &killed
-
-func nilopt(firstp *obj.Prog) {
-	g := Flowstart(firstp, nil)
-	if g == nil {
-		return
-	}
-
-	if Debug_checknil > 1 { // || strcmp(curfn->nname->sym->name, "f1") == 0
-		Dumpit("nilopt", g.Start, 0)
-	}
-
-	ncheck := 0
-	nkill := 0
-	var p *obj.Prog
-	for f := g.Start; f != nil; f = f.Link {
-		p = f.Prog
-		if p.As != obj.ACHECKNIL || !Thearch.Regtyp(&p.From) {
-			continue
-		}
-		ncheck++
-		if Thearch.Stackaddr(&p.From) {
-			if Debug_checknil != 0 && p.Lineno > 1 {
-				Warnl(p.Lineno, "removed nil check of SP address")
-			}
-			f.Data = &killed
-			continue
-		}
-
-		nilwalkfwd(f)
-		if f.Data != nil {
-			if Debug_checknil != 0 && p.Lineno > 1 {
-				Warnl(p.Lineno, "removed nil check before indirect")
-			}
-			continue
-		}
-
-		nilwalkback(f)
-		if f.Data != nil {
-			if Debug_checknil != 0 && p.Lineno > 1 {
-				Warnl(p.Lineno, "removed repeated nil check")
-			}
-			continue
-		}
-	}
-
-	for f := g.Start; f != nil; f = f.Link {
-		if f.Data != nil {
-			nkill++
-			Thearch.Excise(f)
-		}
-	}
-
-	Flowend(g)
-
-	if Debug_checknil > 1 {
-		fmt.Printf("%v: removed %d of %d nil checks\n", Curfn.Func.Nname.Sym, nkill, ncheck)
-	}
-}
-
-func nilwalkback(fcheck *Flow) {
-	for f := fcheck; f != nil; f = Uniqp(f) {
-		p := f.Prog
-		if (p.Info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
-			// Found initialization of value we're checking for nil.
-			// without first finding the check, so this one is unchecked.
-			return
-		}
-
-		if f != fcheck && p.As == obj.ACHECKNIL && Thearch.Sameaddr(&p.From, &fcheck.Prog.From) {
-			fcheck.Data = &killed
-			return
-		}
-	}
-}
-
-// Here is a more complex version that scans backward across branches.
-// It assumes fcheck->kill = 1 has been set on entry, and its job is to find a reason
-// to keep the check (setting fcheck->kill = 0).
-// It doesn't handle copying of aggregates as well as I would like,
-// nor variables with their address taken,
-// and it's too subtle to turn on this late in Go 1.2. Perhaps for Go 1.3.
-/*
-for(f1 = f0; f1 != nil; f1 = f1->p1) {
-	if(f1->active == gen)
-		break;
-	f1->active = gen;
-	p = f1->prog;
-
-	// If same check, stop this loop but still check
-	// alternate predecessors up to this point.
-	if(f1 != fcheck && p->as == ACHECKNIL && thearch.sameaddr(&p->from, &fcheck->prog->from))
-		break;
-
-	if((p.Info.flags & RightWrite) && thearch.sameaddr(&p->to, &fcheck->prog->from)) {
-		// Found initialization of value we're checking for nil.
-		// without first finding the check, so this one is unchecked.
-		fcheck->kill = 0;
-		return;
-	}
-
-	if(f1->p1 == nil && f1->p2 == nil) {
-		print("lost pred for %v\n", fcheck->prog);
-		for(f1=f0; f1!=nil; f1=f1->p1) {
-			thearch.proginfo(&info, f1->prog);
-			print("\t%v %d %d %D %D\n", r1->prog, info.flags&RightWrite, thearch.sameaddr(&f1->prog->to, &fcheck->prog->from), &f1->prog->to, &fcheck->prog->from);
-		}
-		fatal("lost pred trail");
-	}
-}
-
-for(f = f0; f != f1; f = f->p1)
-	for(f2 = f->p2; f2 != nil; f2 = f2->p2link)
-		nilwalkback(fcheck, f2, gen);
-*/
-
-func nilwalkfwd(fcheck *Flow) {
-	// If the path down from rcheck dereferences the address
-	// (possibly with a small offset) before writing to memory
-	// and before any subsequent checks, it's okay to wait for
-	// that implicit check. Only consider this basic block to
-	// avoid problems like:
-	//	_ = *x // should panic
-	//	for {} // no writes but infinite loop may be considered visible
-
-	var last *Flow
-	for f := Uniqs(fcheck); f != nil; f = Uniqs(f) {
-		p := f.Prog
-		if (p.Info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) {
-			fcheck.Data = &killed
-			return
-		}
-
-		if (p.Info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) {
-			fcheck.Data = &killed
-			return
-		}
-
-		// Stop if another nil check happens.
-		if p.As == obj.ACHECKNIL {
-			return
-		}
-
-		// Stop if value is lost.
-		if (p.Info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
-			return
-		}
-
-		// Stop if memory write.
-		if (p.Info.Flags&RightWrite != 0) && !Thearch.Regtyp(&p.To) {
-			return
-		}
-
-		// Stop if we jump backward.
-		if last != nil && f.Id <= last.Id {
-			return
-		}
-		last = f
-	}
-}
diff --git a/src/cmd/compile/internal/gc/reg.go b/src/cmd/compile/internal/gc/reg.go
deleted file mode 100644
index 6ccb99f3..0000000
--- a/src/cmd/compile/internal/gc/reg.go
+++ /dev/null
@@ -1,1532 +0,0 @@
-// Derived from Inferno utils/6c/reg.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/reg.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package gc
-
-import (
-	"bytes"
-	"cmd/internal/obj"
-	"cmd/internal/sys"
-	"fmt"
-	"sort"
-	"strings"
-)
-
-// A Var represents a single variable that may be stored in a register.
-// That variable may itself correspond to a hardware register,
-// to represent the use of registers in the unoptimized instruction stream.
-type Var struct {
-	offset     int64
-	node       *Node
-	nextinnode *Var
-	width      int
-	id         int // index in vars
-	name       int8
-	etype      EType
-	addr       int8
-}
-
-// Bits represents a set of Vars, stored as a bit set of var numbers
-// (the index in vars, or equivalently v.id).
-type Bits struct {
-	b [BITS]uint64
-}
-
-const (
-	BITS = 3
-	NVAR = BITS * 64
-)
-
-var (
-	vars [NVAR]Var // variables under consideration
-	nvar int       // number of vars
-
-	regbits uint64 // bits for hardware registers
-
-	zbits   Bits // zero
-	externs Bits // global variables
-	params  Bits // function parameters and results
-	ivar    Bits // function parameters (inputs)
-	ovar    Bits // function results (outputs)
-	consts  Bits // constant values
-	addrs   Bits // variables with address taken
-)
-
-// A Reg is a wrapper around a single Prog (one instruction) that holds
-// register optimization information while the optimizer runs.
-// r->prog is the instruction.
-type Reg struct {
-	set  Bits // regopt variables written by this instruction.
-	use1 Bits // regopt variables read by prog->from.
-	use2 Bits // regopt variables read by prog->to.
-
-	// refahead/refbehind are the regopt variables whose current
-	// value may be used in the following/preceding instructions
-	// up to a CALL (or the value is clobbered).
-	refbehind Bits
-	refahead  Bits
-
-	// calahead/calbehind are similar, but for variables in
-	// instructions that are reachable after hitting at least one
-	// CALL.
-	calbehind Bits
-	calahead  Bits
-
-	regdiff Bits
-	act     Bits
-	regu    uint64 // register used bitmap
-}
-
-// A Rgn represents a single regopt variable over a region of code
-// where a register could potentially be dedicated to that variable.
-// The code encompassed by a Rgn is defined by the flow graph,
-// starting at enter, flood-filling forward while varno is refahead
-// and backward while varno is refbehind, and following branches.
-// A single variable may be represented by multiple disjoint Rgns and
-// each Rgn may choose a different register for that variable.
-// Registers are allocated to regions greedily in order of descending
-// cost.
-type Rgn struct {
-	enter *Flow
-	cost  int16
-	varno int16
-	regno int16
-}
-
-// The Plan 9 C compilers used a limit of 600 regions,
-// but the yacc-generated parser in y.go has 3100 regions.
-// We set MaxRgn large enough to handle that.
-// There's not a huge cost to having too many regions:
-// the main processing traces the live area for each variable,
-// which is limited by the number of variables times the area,
-// not the raw region count. If there are many regions, they
-// are almost certainly small and easy to trace.
-// The only operation that scales with region count is the
-// sorting by cost, which uses sort.Sort and is therefore
-// guaranteed n log n.
-const MaxRgn = 6000
-
-var (
-	region  []Rgn
-	nregion int
-)
-
-type rcmp []Rgn
-
-func (x rcmp) Len() int {
-	return len(x)
-}
-
-func (x rcmp) Swap(i, j int) {
-	x[i], x[j] = x[j], x[i]
-}
-
-func (x rcmp) Less(i, j int) bool {
-	p1 := &x[i]
-	p2 := &x[j]
-	if p1.cost != p2.cost {
-		return int(p2.cost)-int(p1.cost) < 0
-	}
-	if p1.varno != p2.varno {
-		return int(p2.varno)-int(p1.varno) < 0
-	}
-	if p1.enter != p2.enter {
-		return int(p2.enter.Id-p1.enter.Id) < 0
-	}
-	return false
-}
-
-func setaddrs(bit Bits) {
-	var i int
-	var n int
-	var v *Var
-	var node *Node
-
-	for bany(&bit) {
-		// convert each bit to a variable
-		i = bnum(&bit)
-
-		node = vars[i].node
-		n = int(vars[i].name)
-		biclr(&bit, uint(i))
-
-		// disable all pieces of that variable
-		for i = 0; i < nvar; i++ {
-			v = &vars[i]
-			if v.node == node && int(v.name) == n {
-				v.addr = 2
-			}
-		}
-	}
-}
-
-var regnodes [64]*Node
-
-func walkvardef(n *Node, f *Flow, active int) {
-	var f1 *Flow
-	var bn int
-	var v *Var
-
-	for f1 = f; f1 != nil; f1 = f1.S1 {
-		if f1.Active == int32(active) {
-			break
-		}
-		f1.Active = int32(active)
-		if f1.Prog.As == obj.AVARKILL && f1.Prog.To.Node == n {
-			break
-		}
-		for v, _ = n.Opt().(*Var); v != nil; v = v.nextinnode {
-			bn = v.id
-			biset(&(f1.Data.(*Reg)).act, uint(bn))
-		}
-
-		if f1.Prog.As == obj.ACALL {
-			break
-		}
-	}
-
-	for f2 := f; f2 != f1; f2 = f2.S1 {
-		if f2.S2 != nil {
-			walkvardef(n, f2.S2, active)
-		}
-	}
-}
-
-// add mov b,rn
-// just after r
-func addmove(r *Flow, bn int, rn int, f int) {
-	p1 := Ctxt.NewProg()
-	Clearp(p1)
-	p1.Pc = 9999
-
-	p := r.Prog
-	p1.Link = p.Link
-	p.Link = p1
-	p1.Lineno = p.Lineno
-
-	v := &vars[bn]
-
-	a := &p1.To
-	a.Offset = v.offset
-	a.Etype = uint8(v.etype)
-	a.Type = obj.TYPE_MEM
-	a.Name = v.name
-	a.Node = v.node
-	a.Sym = Linksym(v.node.Sym)
-
-	/* NOTE(rsc): 9g did
-	if(a->etype == TARRAY)
-		a->type = TYPE_ADDR;
-	else if(a->sym == nil)
-		a->type = TYPE_CONST;
-	*/
-	p1.As = Thearch.Optoas(OAS, Types[uint8(v.etype)])
-
-	// TODO(rsc): Remove special case here.
-	if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) && v.etype == TBOOL {
-		p1.As = Thearch.Optoas(OAS, Types[TUINT8])
-	}
-	p1.From.Type = obj.TYPE_REG
-	p1.From.Reg = int16(rn)
-	p1.From.Name = obj.NAME_NONE
-	if f == 0 {
-		p1.From = *a
-		*a = obj.Addr{}
-		a.Type = obj.TYPE_REG
-		a.Reg = int16(rn)
-	}
-
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		fmt.Printf("%v ===add=== %v\n", p, p1)
-	}
-	Ostats.Nspill++
-}
-
-func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) bool {
-	t1 := o1 + int64(w1)
-	t2 := o2 + int64(w2)
-
-	if t1 <= o2 || t2 <= o1 {
-		return false
-	}
-
-	return true
-}
-
-func mkvar(f *Flow, a *obj.Addr) Bits {
-	// mark registers used
-	if a.Type == obj.TYPE_NONE {
-		return zbits
-	}
-
-	r := f.Data.(*Reg)
-	r.use1.b[0] |= Thearch.Doregbits(int(a.Index)) // TODO: Use RtoB
-
-	var n int
-	switch a.Type {
-	default:
-		regu := Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
-		if regu == 0 {
-			return zbits
-		}
-		bit := zbits
-		bit.b[0] = regu
-		return bit
-
-		// TODO(rsc): Remove special case here.
-	case obj.TYPE_ADDR:
-		var bit Bits
-		if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) {
-			goto memcase
-		}
-		a.Type = obj.TYPE_MEM
-		bit = mkvar(f, a)
-		setaddrs(bit)
-		a.Type = obj.TYPE_ADDR
-		Ostats.Naddr++
-		return zbits
-
-	memcase:
-		fallthrough
-
-	case obj.TYPE_MEM:
-		if r != nil {
-			r.use1.b[0] |= Thearch.RtoB(int(a.Reg))
-		}
-
-		/* NOTE: 5g did
-		if(r->f.prog->scond & (C_PBIT|C_WBIT))
-			r->set.b[0] |= RtoB(a->reg);
-		*/
-		switch a.Name {
-		default:
-			// Note: This case handles NAME_EXTERN and NAME_STATIC.
-			// We treat these as requiring eager writes to memory, due to
-			// the possibility of a fault handler looking at them, so there is
-			// not much point in registerizing the loads.
-			// If we later choose the set of candidate variables from a
-			// larger list, these cases could be deprioritized instead of
-			// removed entirely.
-			return zbits
-
-		case obj.NAME_PARAM,
-			obj.NAME_AUTO:
-			n = int(a.Name)
-		}
-	}
-
-	node, _ := a.Node.(*Node)
-	if node == nil || node.Op != ONAME || node.Orig == nil {
-		return zbits
-	}
-	node = node.Orig
-	if node.Orig != node {
-		Fatalf("%v: bad node", Ctxt.Dconv(a))
-	}
-	if node.Sym == nil || node.Sym.Name[0] == '.' {
-		return zbits
-	}
-	et := EType(a.Etype)
-	o := a.Offset
-	w := a.Width
-	if w < 0 {
-		Fatalf("bad width %d for %v", w, Ctxt.Dconv(a))
-	}
-
-	flag := 0
-	var v *Var
-	for i := 0; i < nvar; i++ {
-		v = &vars[i]
-		if v.node == node && int(v.name) == n {
-			if v.offset == o {
-				if v.etype == et {
-					if int64(v.width) == w {
-						// TODO(rsc): Remove special case for arm here.
-						if flag == 0 || Thearch.LinkArch.Family != sys.ARM {
-							return blsh(uint(i))
-						}
-					}
-				}
-			}
-
-			// if they overlap, disable both
-			if overlap_reg(v.offset, v.width, o, int(w)) {
-				//				print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et);
-				v.addr = 1
-
-				flag = 1
-			}
-		}
-	}
-
-	switch et {
-	case 0, TFUNC:
-		return zbits
-	}
-
-	if nvar >= NVAR {
-		if Debug['w'] > 1 && node != nil {
-			Fatalf("variable not optimized: %#v", node)
-		}
-		if Debug['v'] > 0 {
-			Warn("variable not optimized: %#v", node)
-		}
-
-		// If we're not tracking a word in a variable, mark the rest as
-		// having its address taken, so that we keep the whole thing
-		// live at all calls. otherwise we might optimize away part of
-		// a variable but not all of it.
-		var v *Var
-		for i := 0; i < nvar; i++ {
-			v = &vars[i]
-			if v.node == node {
-				v.addr = 1
-			}
-		}
-
-		return zbits
-	}
-
-	i := nvar
-	nvar++
-	v = &vars[i]
-	v.id = i
-	v.offset = o
-	v.name = int8(n)
-	v.etype = et
-	v.width = int(w)
-	v.addr = int8(flag) // funny punning
-	v.node = node
-
-	// node->opt is the head of a linked list
-	// of Vars within the given Node, so that
-	// we can start at a Var and find all the other
-	// Vars in the same Go variable.
-	v.nextinnode, _ = node.Opt().(*Var)
-
-	node.SetOpt(v)
-
-	bit := blsh(uint(i))
-	if n == obj.NAME_EXTERN || n == obj.NAME_STATIC {
-		for z := 0; z < BITS; z++ {
-			externs.b[z] |= bit.b[z]
-		}
-	}
-	if n == obj.NAME_PARAM {
-		for z := 0; z < BITS; z++ {
-			params.b[z] |= bit.b[z]
-		}
-	}
-
-	if node.Class == PPARAM {
-		for z := 0; z < BITS; z++ {
-			ivar.b[z] |= bit.b[z]
-		}
-	}
-	if node.Class == PPARAMOUT {
-		for z := 0; z < BITS; z++ {
-			ovar.b[z] |= bit.b[z]
-		}
-	}
-
-	// Treat values with their address taken as live at calls,
-	// because the garbage collector's liveness analysis in plive.go does.
-	// These must be consistent or else we will elide stores and the garbage
-	// collector will see uninitialized data.
-	// The typical case where our own analysis is out of sync is when the
-	// node appears to have its address taken but that code doesn't actually
-	// get generated and therefore doesn't show up as an address being
-	// taken when we analyze the instruction stream.
-	// One instance of this case is when a closure uses the same name as
-	// an outer variable for one of its own variables declared with :=.
-	// The parser flags the outer variable as possibly shared, and therefore
-	// sets addrtaken, even though it ends up not being actually shared.
-	// If we were better about _ elision, _ = &x would suffice too.
-	// The broader := in a closure problem is mentioned in a comment in
-	// closure.go:/^typecheckclosure and dcl.go:/^oldname.
-	if node.Addrtaken {
-		v.addr = 1
-	}
-
-	// Disable registerization for globals, because:
-	// (1) we might panic at any time and we want the recovery code
-	// to see the latest values (issue 1304).
-	// (2) we don't know what pointers might point at them and we want
-	// loads via those pointers to see updated values and vice versa (issue 7995).
-	//
-	// Disable registerization for results if using defer, because the deferred func
-	// might recover and return, causing the current values to be used.
-	if node.Class == PEXTERN || (hasdefer && node.Class == PPARAMOUT) {
-		v.addr = 1
-	}
-
-	if Debug['R'] != 0 {
-		fmt.Printf("bit=%2d et=%v w=%d+%d %#v %v flag=%d\n", i, et, o, w, node, Ctxt.Dconv(a), v.addr)
-	}
-	Ostats.Nvar++
-
-	return bit
-}
-
-var change int
-
-func prop(f *Flow, ref Bits, cal Bits) {
-	var f1 *Flow
-	var r1 *Reg
-	var z int
-	var i int
-	var v *Var
-	var v1 *Var
-
-	for f1 = f; f1 != nil; f1 = f1.P1 {
-		r1 = f1.Data.(*Reg)
-		for z = 0; z < BITS; z++ {
-			ref.b[z] |= r1.refahead.b[z]
-			if ref.b[z] != r1.refahead.b[z] {
-				r1.refahead.b[z] = ref.b[z]
-				change = 1
-			}
-
-			cal.b[z] |= r1.calahead.b[z]
-			if cal.b[z] != r1.calahead.b[z] {
-				r1.calahead.b[z] = cal.b[z]
-				change = 1
-			}
-		}
-
-		switch f1.Prog.As {
-		case obj.ACALL:
-			if Noreturn(f1.Prog) {
-				break
-			}
-
-			// Mark all input variables (ivar) as used, because that's what the
-			// liveness bitmaps say. The liveness bitmaps say that so that a
-			// panic will not show stale values in the parameter dump.
-			// Mark variables with a recent VARDEF (r1->act) as used,
-			// so that the optimizer flushes initializations to memory,
-			// so that if a garbage collection happens during this CALL,
-			// the collector will see initialized memory. Again this is to
-			// match what the liveness bitmaps say.
-			for z = 0; z < BITS; z++ {
-				cal.b[z] |= ref.b[z] | externs.b[z] | ivar.b[z] | r1.act.b[z]
-				ref.b[z] = 0
-			}
-
-			// cal.b is the current approximation of what's live across the call.
-			// Every bit in cal.b is a single stack word. For each such word,
-			// find all the other tracked stack words in the same Go variable
-			// (struct/slice/string/interface) and mark them live too.
-			// This is necessary because the liveness analysis for the garbage
-			// collector works at variable granularity, not at word granularity.
-			// It is fundamental for slice/string/interface: the garbage collector
-			// needs the whole value, not just some of the words, in order to
-			// interpret the other bits correctly. Specifically, slice needs a consistent
-			// ptr and cap, string needs a consistent ptr and len, and interface
-			// needs a consistent type word and data word.
-			for z = 0; z < BITS; z++ {
-				if cal.b[z] == 0 {
-					continue
-				}
-				for i = 0; i < 64; i++ {
-					if z*64+i >= nvar || (cal.b[z]>>uint(i))&1 == 0 {
-						continue
-					}
-					v = &vars[z*64+i]
-					if v.node.Opt() == nil { // v represents fixed register, not Go variable
-						continue
-					}
-
-					// v->node->opt is the head of a linked list of Vars
-					// corresponding to tracked words from the Go variable v->node.
-					// Walk the list and set all the bits.
-					// For a large struct this could end up being quadratic:
-					// after the first setting, the outer loop (for z, i) would see a 1 bit
-					// for all of the remaining words in the struct, and for each such
-					// word would go through and turn on all the bits again.
-					// To avoid the quadratic behavior, we only turn on the bits if
-					// v is the head of the list or if the head's bit is not yet turned on.
-					// This will set the bits at most twice, keeping the overall loop linear.
-					v1, _ = v.node.Opt().(*Var)
-
-					if v == v1 || !btest(&cal, uint(v1.id)) {
-						for ; v1 != nil; v1 = v1.nextinnode {
-							biset(&cal, uint(v1.id))
-						}
-					}
-				}
-			}
-
-		case obj.ATEXT:
-			for z = 0; z < BITS; z++ {
-				cal.b[z] = 0
-				ref.b[z] = 0
-			}
-
-		case obj.ARET:
-			for z = 0; z < BITS; z++ {
-				cal.b[z] = externs.b[z] | ovar.b[z]
-				ref.b[z] = 0
-			}
-		}
-
-		for z = 0; z < BITS; z++ {
-			ref.b[z] = ref.b[z]&^r1.set.b[z] | r1.use1.b[z] | r1.use2.b[z]
-			cal.b[z] &^= (r1.set.b[z] | r1.use1.b[z] | r1.use2.b[z])
-			r1.refbehind.b[z] = ref.b[z]
-			r1.calbehind.b[z] = cal.b[z]
-		}
-
-		if f1.Active != 0 {
-			break
-		}
-		f1.Active = 1
-	}
-
-	var r *Reg
-	var f2 *Flow
-	for ; f != f1; f = f.P1 {
-		r = f.Data.(*Reg)
-		for f2 = f.P2; f2 != nil; f2 = f2.P2link {
-			prop(f2, r.refbehind, r.calbehind)
-		}
-	}
-}
-
-func synch(f *Flow, dif Bits) {
-	var r1 *Reg
-	var z int
-
-	for f1 := f; f1 != nil; f1 = f1.S1 {
-		r1 = f1.Data.(*Reg)
-		for z = 0; z < BITS; z++ {
-			dif.b[z] = dif.b[z]&^(^r1.refbehind.b[z]&r1.refahead.b[z]) | r1.set.b[z] | r1.regdiff.b[z]
-			if dif.b[z] != r1.regdiff.b[z] {
-				r1.regdiff.b[z] = dif.b[z]
-				change = 1
-			}
-		}
-
-		if f1.Active != 0 {
-			break
-		}
-		f1.Active = 1
-		for z = 0; z < BITS; z++ {
-			dif.b[z] &^= (^r1.calbehind.b[z] & r1.calahead.b[z])
-		}
-		if f1.S2 != nil {
-			synch(f1.S2, dif)
-		}
-	}
-}
-
-func allreg(b uint64, r *Rgn) uint64 {
-	v := &vars[r.varno]
-	r.regno = 0
-	switch v.etype {
-	default:
-		Fatalf("unknown etype %d/%v", Bitno(b), v.etype)
-
-	case TINT8,
-		TUINT8,
-		TINT16,
-		TUINT16,
-		TINT32,
-		TUINT32,
-		TINT64,
-		TUINT64,
-		TINT,
-		TUINT,
-		TUINTPTR,
-		TBOOL,
-		TPTR32,
-		TPTR64:
-		i := Thearch.BtoR(^b)
-		if i != 0 && r.cost > 0 {
-			r.regno = int16(i)
-			return Thearch.RtoB(i)
-		}
-
-	case TFLOAT32, TFLOAT64:
-		i := Thearch.BtoF(^b)
-		if i != 0 && r.cost > 0 {
-			r.regno = int16(i)
-			return Thearch.FtoB(i)
-		}
-	}
-
-	return 0
-}
-
-func LOAD(r *Reg, z int) uint64 {
-	return ^r.refbehind.b[z] & r.refahead.b[z]
-}
-
-func STORE(r *Reg, z int) uint64 {
-	return ^r.calbehind.b[z] & r.calahead.b[z]
-}
-
-// Cost parameters
-const (
-	CLOAD = 5 // cost of load
-	CREF  = 5 // cost of reference if not registerized
-	LOOP  = 3 // loop execution count (applied in popt.go)
-)
-
-func paint1(f *Flow, bn int) {
-	z := bn / 64
-	bb := uint64(1 << uint(bn%64))
-	r := f.Data.(*Reg)
-	if r.act.b[z]&bb != 0 {
-		return
-	}
-	var f1 *Flow
-	var r1 *Reg
-	for {
-		if r.refbehind.b[z]&bb == 0 {
-			break
-		}
-		f1 = f.P1
-		if f1 == nil {
-			break
-		}
-		r1 = f1.Data.(*Reg)
-		if r1.refahead.b[z]&bb == 0 {
-			break
-		}
-		if r1.act.b[z]&bb != 0 {
-			break
-		}
-		f = f1
-		r = r1
-	}
-
-	if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
-		change -= CLOAD * int(f.Loop)
-	}
-
-	for {
-		r.act.b[z] |= bb
-
-		if f.Prog.As != obj.ANOP { // don't give credit for NOPs
-			if r.use1.b[z]&bb != 0 {
-				change += CREF * int(f.Loop)
-			}
-			if (r.use2.b[z]|r.set.b[z])&bb != 0 {
-				change += CREF * int(f.Loop)
-			}
-		}
-
-		if STORE(r, z)&r.regdiff.b[z]&bb != 0 {
-			change -= CLOAD * int(f.Loop)
-		}
-
-		if r.refbehind.b[z]&bb != 0 {
-			for f1 = f.P2; f1 != nil; f1 = f1.P2link {
-				if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
-					paint1(f1, bn)
-				}
-			}
-		}
-
-		if r.refahead.b[z]&bb == 0 {
-			break
-		}
-		f1 = f.S2
-		if f1 != nil {
-			if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
-				paint1(f1, bn)
-			}
-		}
-		f = f.S1
-		if f == nil {
-			break
-		}
-		r = f.Data.(*Reg)
-		if r.act.b[z]&bb != 0 {
-			break
-		}
-		if r.refbehind.b[z]&bb == 0 {
-			break
-		}
-	}
-}
-
-func paint2(f *Flow, bn int, depth int) uint64 {
-	z := bn / 64
-	bb := uint64(1 << uint(bn%64))
-	vreg := regbits
-	r := f.Data.(*Reg)
-	if r.act.b[z]&bb == 0 {
-		return vreg
-	}
-	var r1 *Reg
-	var f1 *Flow
-	for {
-		if r.refbehind.b[z]&bb == 0 {
-			break
-		}
-		f1 = f.P1
-		if f1 == nil {
-			break
-		}
-		r1 = f1.Data.(*Reg)
-		if r1.refahead.b[z]&bb == 0 {
-			break
-		}
-		if r1.act.b[z]&bb == 0 {
-			break
-		}
-		f = f1
-		r = r1
-	}
-
-	for {
-		if Debug['R'] != 0 && Debug['v'] != 0 {
-			fmt.Printf("  paint2 %d %v\n", depth, f.Prog)
-		}
-
-		r.act.b[z] &^= bb
-
-		vreg |= r.regu
-
-		if r.refbehind.b[z]&bb != 0 {
-			for f1 = f.P2; f1 != nil; f1 = f1.P2link {
-				if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
-					vreg |= paint2(f1, bn, depth+1)
-				}
-			}
-		}
-
-		if r.refahead.b[z]&bb == 0 {
-			break
-		}
-		f1 = f.S2
-		if f1 != nil {
-			if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
-				vreg |= paint2(f1, bn, depth+1)
-			}
-		}
-		f = f.S1
-		if f == nil {
-			break
-		}
-		r = f.Data.(*Reg)
-		if r.act.b[z]&bb == 0 {
-			break
-		}
-		if r.refbehind.b[z]&bb == 0 {
-			break
-		}
-	}
-
-	return vreg
-}
-
-func paint3(f *Flow, bn int, rb uint64, rn int) {
-	z := bn / 64
-	bb := uint64(1 << uint(bn%64))
-	r := f.Data.(*Reg)
-	if r.act.b[z]&bb != 0 {
-		return
-	}
-	var r1 *Reg
-	var f1 *Flow
-	for {
-		if r.refbehind.b[z]&bb == 0 {
-			break
-		}
-		f1 = f.P1
-		if f1 == nil {
-			break
-		}
-		r1 = f1.Data.(*Reg)
-		if r1.refahead.b[z]&bb == 0 {
-			break
-		}
-		if r1.act.b[z]&bb != 0 {
-			break
-		}
-		f = f1
-		r = r1
-	}
-
-	if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
-		addmove(f, bn, rn, 0)
-	}
-	var p *obj.Prog
-	for {
-		r.act.b[z] |= bb
-		p = f.Prog
-
-		if r.use1.b[z]&bb != 0 {
-			if Debug['R'] != 0 && Debug['v'] != 0 {
-				fmt.Printf("%v", p)
-			}
-			addreg(&p.From, rn)
-			if Debug['R'] != 0 && Debug['v'] != 0 {
-				fmt.Printf(" ===change== %v\n", p)
-			}
-		}
-
-		if (r.use2.b[z]|r.set.b[z])&bb != 0 {
-			if Debug['R'] != 0 && Debug['v'] != 0 {
-				fmt.Printf("%v", p)
-			}
-			addreg(&p.To, rn)
-			if Debug['R'] != 0 && Debug['v'] != 0 {
-				fmt.Printf(" ===change== %v\n", p)
-			}
-		}
-
-		if STORE(r, z)&r.regdiff.b[z]&bb != 0 {
-			addmove(f, bn, rn, 1)
-		}
-		r.regu |= rb
-
-		if r.refbehind.b[z]&bb != 0 {
-			for f1 = f.P2; f1 != nil; f1 = f1.P2link {
-				if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
-					paint3(f1, bn, rb, rn)
-				}
-			}
-		}
-
-		if r.refahead.b[z]&bb == 0 {
-			break
-		}
-		f1 = f.S2
-		if f1 != nil {
-			if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
-				paint3(f1, bn, rb, rn)
-			}
-		}
-		f = f.S1
-		if f == nil {
-			break
-		}
-		r = f.Data.(*Reg)
-		if r.act.b[z]&bb != 0 {
-			break
-		}
-		if r.refbehind.b[z]&bb == 0 {
-			break
-		}
-	}
-}
-
-func addreg(a *obj.Addr, rn int) {
-	a.Sym = nil
-	a.Node = nil
-	a.Offset = 0
-	a.Type = obj.TYPE_REG
-	a.Reg = int16(rn)
-	a.Name = 0
-
-	Ostats.Ncvtreg++
-}
-
-func dumpone(f *Flow, isreg int) {
-	fmt.Printf("%d:%v", f.Loop, f.Prog)
-	if isreg != 0 {
-		r := f.Data.(*Reg)
-		var bit Bits
-		for z := 0; z < BITS; z++ {
-			bit.b[z] = r.set.b[z] | r.use1.b[z] | r.use2.b[z] | r.refbehind.b[z] | r.refahead.b[z] | r.calbehind.b[z] | r.calahead.b[z] | r.regdiff.b[z] | r.act.b[z] | 0
-		}
-		if bany(&bit) {
-			fmt.Printf("\t")
-			if bany(&r.set) {
-				fmt.Printf(" s:%v", &r.set)
-			}
-			if bany(&r.use1) {
-				fmt.Printf(" u1:%v", &r.use1)
-			}
-			if bany(&r.use2) {
-				fmt.Printf(" u2:%v", &r.use2)
-			}
-			if bany(&r.refbehind) {
-				fmt.Printf(" rb:%v ", &r.refbehind)
-			}
-			if bany(&r.refahead) {
-				fmt.Printf(" ra:%v ", &r.refahead)
-			}
-			if bany(&r.calbehind) {
-				fmt.Printf(" cb:%v ", &r.calbehind)
-			}
-			if bany(&r.calahead) {
-				fmt.Printf(" ca:%v ", &r.calahead)
-			}
-			if bany(&r.regdiff) {
-				fmt.Printf(" d:%v ", &r.regdiff)
-			}
-			if bany(&r.act) {
-				fmt.Printf(" a:%v ", &r.act)
-			}
-		}
-	}
-
-	fmt.Printf("\n")
-}
-
-func Dumpit(str string, r0 *Flow, isreg int) {
-	var r1 *Flow
-
-	fmt.Printf("\n%s\n", str)
-	for r := r0; r != nil; r = r.Link {
-		dumpone(r, isreg)
-		r1 = r.P2
-		if r1 != nil {
-			fmt.Printf("\tpred:")
-			for ; r1 != nil; r1 = r1.P2link {
-				fmt.Printf(" %.4d", uint(int(r1.Prog.Pc)))
-			}
-			if r.P1 != nil {
-				fmt.Printf(" (and %.4d)", uint(int(r.P1.Prog.Pc)))
-			} else {
-				fmt.Printf(" (only)")
-			}
-			fmt.Printf("\n")
-		}
-
-		// Print successors if it's not just the next one
-		if r.S1 != r.Link || r.S2 != nil {
-			fmt.Printf("\tsucc:")
-			if r.S1 != nil {
-				fmt.Printf(" %.4d", uint(int(r.S1.Prog.Pc)))
-			}
-			if r.S2 != nil {
-				fmt.Printf(" %.4d", uint(int(r.S2.Prog.Pc)))
-			}
-			fmt.Printf("\n")
-		}
-	}
-}
-
-func regopt(firstp *obj.Prog) {
-	mergetemp(firstp)
-
-	// control flow is more complicated in generated go code
-	// than in generated c code.  define pseudo-variables for
-	// registers, so we have complete register usage information.
-	var nreg int
-	regnames := Thearch.Regnames(&nreg)
-
-	nvar = nreg
-	for i := 0; i < nreg; i++ {
-		vars[i] = Var{}
-	}
-	for i := 0; i < nreg; i++ {
-		if regnodes[i] == nil {
-			regnodes[i] = newname(Lookup(regnames[i]))
-		}
-		vars[i].node = regnodes[i]
-	}
-
-	regbits = Thearch.Excludedregs()
-	externs = zbits
-	params = zbits
-	consts = zbits
-	addrs = zbits
-	ivar = zbits
-	ovar = zbits
-
-	// pass 1
-	// build aux data structure
-	// allocate pcs
-	// find use and set of variables
-	g := Flowstart(firstp, func() interface{} { return new(Reg) })
-	if g == nil {
-		for i := 0; i < nvar; i++ {
-			vars[i].node.SetOpt(nil)
-		}
-		return
-	}
-
-	firstf := g.Start
-
-	for f := firstf; f != nil; f = f.Link {
-		p := f.Prog
-		// AVARLIVE must be considered a use, do not skip it.
-		// Otherwise the variable will be optimized away,
-		// and the whole point of AVARLIVE is to keep it on the stack.
-		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
-			continue
-		}
-
-		// Avoid making variables for direct-called functions.
-		if p.As == obj.ACALL && p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_EXTERN {
-			continue
-		}
-
-		// from vs to doesn't matter for registers.
-		r := f.Data.(*Reg)
-		r.use1.b[0] |= p.Info.Reguse | p.Info.Regindex
-		r.set.b[0] |= p.Info.Regset
-
-		bit := mkvar(f, &p.From)
-		if bany(&bit) {
-			if p.Info.Flags&LeftAddr != 0 {
-				setaddrs(bit)
-			}
-			if p.Info.Flags&LeftRead != 0 {
-				for z := 0; z < BITS; z++ {
-					r.use1.b[z] |= bit.b[z]
-				}
-			}
-			if p.Info.Flags&LeftWrite != 0 {
-				for z := 0; z < BITS; z++ {
-					r.set.b[z] |= bit.b[z]
-				}
-			}
-		}
-
-		// Compute used register for reg
-		if p.Info.Flags&RegRead != 0 {
-			r.use1.b[0] |= Thearch.RtoB(int(p.Reg))
-		}
-
-		// Currently we never generate three register forms.
-		// If we do, this will need to change.
-		if p.From3Type() != obj.TYPE_NONE && p.From3Type() != obj.TYPE_CONST {
-			Fatalf("regopt not implemented for from3")
-		}
-
-		bit = mkvar(f, &p.To)
-		if bany(&bit) {
-			if p.Info.Flags&RightAddr != 0 {
-				setaddrs(bit)
-			}
-			if p.Info.Flags&RightRead != 0 {
-				for z := 0; z < BITS; z++ {
-					r.use2.b[z] |= bit.b[z]
-				}
-			}
-			if p.Info.Flags&RightWrite != 0 {
-				for z := 0; z < BITS; z++ {
-					r.set.b[z] |= bit.b[z]
-				}
-			}
-		}
-	}
-
-	for i := 0; i < nvar; i++ {
-		v := &vars[i]
-		if v.addr != 0 {
-			bit := blsh(uint(i))
-			for z := 0; z < BITS; z++ {
-				addrs.b[z] |= bit.b[z]
-			}
-		}
-
-		if Debug['R'] != 0 && Debug['v'] != 0 {
-			fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, v.etype, v.width, v.node, v.offset)
-		}
-	}
-
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		Dumpit("pass1", firstf, 1)
-	}
-
-	// pass 2
-	// find looping structure
-	flowrpo(g)
-
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		Dumpit("pass2", firstf, 1)
-	}
-
-	// pass 2.5
-	// iterate propagating fat vardef covering forward
-	// r->act records vars with a VARDEF since the last CALL.
-	// (r->act will be reused in pass 5 for something else,
-	// but we'll be done with it by then.)
-	active := 0
-
-	for f := firstf; f != nil; f = f.Link {
-		f.Active = 0
-		r := f.Data.(*Reg)
-		r.act = zbits
-	}
-
-	for f := firstf; f != nil; f = f.Link {
-		p := f.Prog
-		if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt() != nil {
-			active++
-			walkvardef(p.To.Node.(*Node), f, active)
-		}
-	}
-
-	// pass 3
-	// iterate propagating usage
-	// 	back until flow graph is complete
-	var f1 *Flow
-	var i int
-	var f *Flow
-loop1:
-	change = 0
-
-	for f = firstf; f != nil; f = f.Link {
-		f.Active = 0
-	}
-	for f = firstf; f != nil; f = f.Link {
-		if f.Prog.As == obj.ARET {
-			prop(f, zbits, zbits)
-		}
-	}
-
-	// pick up unreachable code
-loop11:
-	i = 0
-
-	for f = firstf; f != nil; f = f1 {
-		f1 = f.Link
-		if f1 != nil && f1.Active != 0 && f.Active == 0 {
-			prop(f, zbits, zbits)
-			i = 1
-		}
-	}
-
-	if i != 0 {
-		goto loop11
-	}
-	if change != 0 {
-		goto loop1
-	}
-
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		Dumpit("pass3", firstf, 1)
-	}
-
-	// pass 4
-	// iterate propagating register/variable synchrony
-	// 	forward until graph is complete
-loop2:
-	change = 0
-
-	for f = firstf; f != nil; f = f.Link {
-		f.Active = 0
-	}
-	synch(firstf, zbits)
-	if change != 0 {
-		goto loop2
-	}
-
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		Dumpit("pass4", firstf, 1)
-	}
-
-	// pass 4.5
-	// move register pseudo-variables into regu.
-	mask := uint64((1 << uint(nreg)) - 1)
-	for f := firstf; f != nil; f = f.Link {
-		r := f.Data.(*Reg)
-		r.regu = (r.refbehind.b[0] | r.set.b[0]) & mask
-		r.set.b[0] &^= mask
-		r.use1.b[0] &^= mask
-		r.use2.b[0] &^= mask
-		r.refbehind.b[0] &^= mask
-		r.refahead.b[0] &^= mask
-		r.calbehind.b[0] &^= mask
-		r.calahead.b[0] &^= mask
-		r.regdiff.b[0] &^= mask
-		r.act.b[0] &^= mask
-	}
-
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		Dumpit("pass4.5", firstf, 1)
-	}
-
-	// pass 5
-	// isolate regions
-	// calculate costs (paint1)
-	var bit Bits
-	if f := firstf; f != nil {
-		r := f.Data.(*Reg)
-		for z := 0; z < BITS; z++ {
-			bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
-		}
-		if bany(&bit) && !f.Refset {
-			// should never happen - all variables are preset
-			if Debug['w'] != 0 {
-				fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), &bit)
-			}
-			f.Refset = true
-		}
-	}
-
-	for f := firstf; f != nil; f = f.Link {
-		(f.Data.(*Reg)).act = zbits
-	}
-	nregion = 0
-	region = region[:0]
-	for f := firstf; f != nil; f = f.Link {
-		r := f.Data.(*Reg)
-		for z := 0; z < BITS; z++ {
-			bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
-		}
-		if bany(&bit) && !f.Refset {
-			if Debug['w'] != 0 {
-				fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), &bit)
-			}
-			f.Refset = true
-			Thearch.Excise(f)
-		}
-
-		for z := 0; z < BITS; z++ {
-			bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
-		}
-		for bany(&bit) {
-			i = bnum(&bit)
-			change = 0
-			paint1(f, i)
-			biclr(&bit, uint(i))
-			if change <= 0 {
-				continue
-			}
-			if nregion >= MaxRgn {
-				nregion++
-				continue
-			}
-
-			region = append(region, Rgn{
-				enter: f,
-				cost:  int16(change),
-				varno: int16(i),
-			})
-			nregion++
-		}
-	}
-
-	if false && Debug['v'] != 0 && strings.Contains(Curfn.Func.Nname.Sym.Name, "Parse") {
-		Warn("regions: %d\n", nregion)
-	}
-	if nregion >= MaxRgn {
-		if Debug['v'] != 0 {
-			Warn("too many regions: %d\n", nregion)
-		}
-		nregion = MaxRgn
-	}
-
-	sort.Sort(rcmp(region[:nregion]))
-
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		Dumpit("pass5", firstf, 1)
-	}
-
-	// pass 6
-	// determine used registers (paint2)
-	// replace code (paint3)
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		fmt.Printf("\nregisterizing\n")
-	}
-	for i := 0; i < nregion; i++ {
-		rgp := &region[i]
-		if Debug['R'] != 0 && Debug['v'] != 0 {
-			fmt.Printf("region %d: cost %d varno %d enter %d\n", i, rgp.cost, rgp.varno, rgp.enter.Prog.Pc)
-		}
-		bit = blsh(uint(rgp.varno))
-		usedreg := paint2(rgp.enter, int(rgp.varno), 0)
-		vreg := allreg(usedreg, rgp)
-		if rgp.regno != 0 {
-			if Debug['R'] != 0 && Debug['v'] != 0 {
-				v := &vars[rgp.varno]
-				fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, v.etype, obj.Rconv(int(rgp.regno)), usedreg, vreg)
-			}
-
-			paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno))
-		}
-	}
-
-	// free aux structures. peep allocates new ones.
-	for i := 0; i < nvar; i++ {
-		vars[i].node.SetOpt(nil)
-	}
-	Flowend(g)
-	firstf = nil
-
-	if Debug['R'] != 0 && Debug['v'] != 0 {
-		// Rebuild flow graph, since we inserted instructions
-		g := Flowstart(firstp, nil)
-		firstf = g.Start
-		Dumpit("pass6", firstf, 0)
-		Flowend(g)
-		firstf = nil
-	}
-
-	// pass 7
-	// peep-hole on basic block
-	if Debug['R'] == 0 || Debug['P'] != 0 {
-		Thearch.Peep(firstp)
-	}
-
-	// eliminate nops
-	for p := firstp; p != nil; p = p.Link {
-		for p.Link != nil && p.Link.As == obj.ANOP {
-			p.Link = p.Link.Link
-		}
-		if p.To.Type == obj.TYPE_BRANCH {
-			for p.To.Val.(*obj.Prog) != nil && p.To.Val.(*obj.Prog).As == obj.ANOP {
-				p.To.Val = p.To.Val.(*obj.Prog).Link
-			}
-		}
-	}
-
-	if Debug['R'] != 0 {
-		if Ostats.Ncvtreg != 0 || Ostats.Nspill != 0 || Ostats.Nreload != 0 || Ostats.Ndelmov != 0 || Ostats.Nvar != 0 || Ostats.Naddr != 0 || false {
-			fmt.Printf("\nstats\n")
-		}
-
-		if Ostats.Ncvtreg != 0 {
-			fmt.Printf("\t%4d cvtreg\n", Ostats.Ncvtreg)
-		}
-		if Ostats.Nspill != 0 {
-			fmt.Printf("\t%4d spill\n", Ostats.Nspill)
-		}
-		if Ostats.Nreload != 0 {
-			fmt.Printf("\t%4d reload\n", Ostats.Nreload)
-		}
-		if Ostats.Ndelmov != 0 {
-			fmt.Printf("\t%4d delmov\n", Ostats.Ndelmov)
-		}
-		if Ostats.Nvar != 0 {
-			fmt.Printf("\t%4d var\n", Ostats.Nvar)
-		}
-		if Ostats.Naddr != 0 {
-			fmt.Printf("\t%4d addr\n", Ostats.Naddr)
-		}
-
-		Ostats = OptStats{}
-	}
-}
-
-// bany reports whether any bits in a are set.
-func bany(a *Bits) bool {
-	for _, x := range &a.b { // & to avoid making a copy of a.b
-		if x != 0 {
-			return true
-		}
-	}
-	return false
-}
-
-// bnum reports the lowest index of a 1 bit in a.
-func bnum(a *Bits) int {
-	for i, x := range &a.b { // & to avoid making a copy of a.b
-		if x != 0 {
-			return 64*i + Bitno(x)
-		}
-	}
-
-	Fatalf("bad in bnum")
-	return 0
-}
-
-// blsh returns a Bits with 1 at index n, 0 elsewhere (1<<n).
-func blsh(n uint) Bits {
-	c := zbits
-	c.b[n/64] = 1 << (n % 64)
-	return c
-}
-
-// btest reports whether bit n is 1.
-func btest(a *Bits, n uint) bool {
-	return a.b[n/64]&(1<<(n%64)) != 0
-}
-
-// biset sets bit n to 1.
-func biset(a *Bits, n uint) {
-	a.b[n/64] |= 1 << (n % 64)
-}
-
-// biclr sets bit n to 0.
-func biclr(a *Bits, n uint) {
-	a.b[n/64] &^= (1 << (n % 64))
-}
-
-// Bitno reports the lowest index of a 1 bit in b.
-// It calls Fatalf if there is no 1 bit.
-func Bitno(b uint64) int {
-	if b == 0 {
-		Fatalf("bad in bitno")
-	}
-	n := 0
-	if b&(1<<32-1) == 0 {
-		n += 32
-		b >>= 32
-	}
-	if b&(1<<16-1) == 0 {
-		n += 16
-		b >>= 16
-	}
-	if b&(1<<8-1) == 0 {
-		n += 8
-		b >>= 8
-	}
-	if b&(1<<4-1) == 0 {
-		n += 4
-		b >>= 4
-	}
-	if b&(1<<2-1) == 0 {
-		n += 2
-		b >>= 2
-	}
-	if b&1 == 0 {
-		n++
-	}
-	return n
-}
-
-// String returns a space-separated list of the variables represented by bits.
-func (bits Bits) String() string {
-	// Note: This method takes a value receiver, both for convenience
-	// and to make it safe to modify the bits as we process them.
-	// Even so, most prints above use &bits, because then the value
-	// being stored in the interface{} is a pointer and does not require
-	// an allocation and copy to create the interface{}.
-	var buf bytes.Buffer
-	sep := ""
-	for bany(&bits) {
-		i := bnum(&bits)
-		buf.WriteString(sep)
-		sep = " "
-		v := &vars[i]
-		if v.node == nil || v.node.Sym == nil {
-			fmt.Fprintf(&buf, "$%d", i)
-		} else {
-			fmt.Fprintf(&buf, "%s(%d)", v.node.Sym.Name, i)
-			if v.offset != 0 {
-				fmt.Fprintf(&buf, "%+d", v.offset)
-			}
-		}
-		biclr(&bits, uint(i))
-	}
-	return buf.String()
-}
diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/gc/sizeof_test.go
index a01da13..1a0e530 100644
--- a/src/cmd/compile/internal/gc/sizeof_test.go
+++ b/src/cmd/compile/internal/gc/sizeof_test.go
@@ -22,7 +22,6 @@
 		_32bit uintptr     // size on 32bit platforms
 		_64bit uintptr     // size on 64bit platforms
 	}{
-		{Flow{}, 52, 88},
 		{Func{}, 96, 168},
 		{Name{}, 52, 80},
 		{Node{}, 92, 144},
diff --git a/src/cmd/compile/internal/mips64/cgen.go b/src/cmd/compile/internal/mips64/cgen.go
deleted file mode 100644
index 998afea..0000000
--- a/src/cmd/compile/internal/mips64/cgen.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mips64
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/mips"
-)
-
-func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
-	// determine alignment.
-	// want to avoid unaligned access, so have to use
-	// smaller operations for less aligned types.
-	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
-	align := int(n.Type.Align)
-
-	var op obj.As
-	switch align {
-	default:
-		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
-
-	case 1:
-		op = mips.AMOVB
-
-	case 2:
-		op = mips.AMOVH
-
-	case 4:
-		op = mips.AMOVW
-
-	case 8:
-		op = mips.AMOVV
-	}
-
-	if w%int64(align) != 0 {
-		gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
-	}
-	c := int32(w / int64(align))
-
-	// if we are copying forward on the stack and
-	// the src and dst overlap, then reverse direction
-	dir := align
-
-	if osrc < odst && odst < osrc+w {
-		dir = -dir
-	}
-
-	var dst gc.Node
-	var src gc.Node
-	if n.Ullman >= res.Ullman {
-		gc.Agenr(n, &dst, res) // temporarily use dst
-		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
-		gins(mips.AMOVV, &dst, &src)
-		if res.Op == gc.ONAME {
-			gc.Gvardef(res)
-		}
-		gc.Agen(res, &dst)
-	} else {
-		if res.Op == gc.ONAME {
-			gc.Gvardef(res)
-		}
-		gc.Agenr(res, &dst, res)
-		gc.Agenr(n, &src, nil)
-	}
-
-	var tmp gc.Node
-	gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
-
-	// set up end marker
-	var nend gc.Node
-
-	// move src and dest to the end of block if necessary
-	if dir < 0 {
-		if c >= 4 {
-			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
-			gins(mips.AMOVV, &src, &nend)
-		}
-
-		p := gins(mips.AADDV, nil, &src)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-
-		p = gins(mips.AADDV, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-	} else {
-		p := gins(mips.AADDV, nil, &src)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(-dir)
-
-		p = gins(mips.AADDV, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(-dir)
-
-		if c >= 4 {
-			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
-			p := gins(mips.AMOVV, &src, &nend)
-			p.From.Type = obj.TYPE_ADDR
-			p.From.Offset = w
-		}
-	}
-
-	// move
-	// TODO: enable duffcopy for larger copies.
-	if c >= 4 {
-		p := gins(op, &src, &tmp)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Offset = int64(dir)
-		ploop := p
-
-		p = gins(mips.AADDV, nil, &src)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(dir)
-
-		p = gins(op, &tmp, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = int64(dir)
-
-		p = gins(mips.AADDV, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(dir)
-
-		gc.Patch(ginsbranch(mips.ABNE, nil, &src, &nend, 0), ploop)
-		gc.Regfree(&nend)
-	} else {
-		// TODO: Instead of generating ADDV $-8,R8; ADDV
-		// $-8,R7; n*(MOVV 8(R8),R9; ADDV $8,R8; MOVV R9,8(R7);
-		// ADDV $8,R7;) just generate the offsets directly and
-		// eliminate the ADDs. That will produce shorter, more
-		// pipeline-able code.
-		var p *obj.Prog
-		for ; c > 0; c-- {
-			p = gins(op, &src, &tmp)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Offset = int64(dir)
-
-			p = gins(mips.AADDV, nil, &src)
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = int64(dir)
-
-			p = gins(op, &tmp, &dst)
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = int64(dir)
-
-			p = gins(mips.AADDV, nil, &dst)
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = int64(dir)
-		}
-	}
-
-	gc.Regfree(&dst)
-	gc.Regfree(&src)
-	gc.Regfree(&tmp)
-}
diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go
index 8abe651..ca1cb68 100644
--- a/src/cmd/compile/internal/mips64/galign.go
+++ b/src/cmd/compile/internal/mips64/galign.go
@@ -32,36 +32,9 @@
 	gc.Thearch.ReservedRegs = resvd
 
 	gc.Thearch.Betypeinit = betypeinit
-	gc.Thearch.Cgen_hmul = cgen_hmul
-	gc.Thearch.Cgen_shift = cgen_shift
-	gc.Thearch.Clearfat = clearfat
 	gc.Thearch.Defframe = defframe
-	gc.Thearch.Dodiv = dodiv
-	gc.Thearch.Excise = excise
-	gc.Thearch.Expandchecks = expandchecks
-	gc.Thearch.Getg = getg
 	gc.Thearch.Gins = gins
-	gc.Thearch.Ginscmp = ginscmp
-	gc.Thearch.Ginscon = ginscon
-	gc.Thearch.Ginsnop = ginsnop
-	gc.Thearch.Gmove = gmove
-	gc.Thearch.Peep = peep
 	gc.Thearch.Proginfo = proginfo
-	gc.Thearch.Regtyp = regtyp
-	gc.Thearch.Sameaddr = sameaddr
-	gc.Thearch.Smallindir = smallindir
-	gc.Thearch.Stackaddr = stackaddr
-	gc.Thearch.Blockcopy = blockcopy
-	gc.Thearch.Sudoaddable = sudoaddable
-	gc.Thearch.Sudoclean = sudoclean
-	gc.Thearch.Excludedregs = excludedregs
-	gc.Thearch.RtoB = RtoB
-	gc.Thearch.FtoB = RtoB
-	gc.Thearch.BtoR = BtoR
-	gc.Thearch.BtoF = BtoF
-	gc.Thearch.Optoas = optoas
-	gc.Thearch.Doregbits = doregbits
-	gc.Thearch.Regnames = regnames
 
 	gc.Thearch.SSARegToReg = ssaRegToReg
 	gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
index 599ca06..49ec8da 100644
--- a/src/cmd/compile/internal/mips64/ggen.go
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -8,7 +8,6 @@
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
 	"cmd/internal/obj/mips"
-	"fmt"
 )
 
 func defframe(ptxt *obj.Prog) {
@@ -122,370 +121,3 @@
 	gc.Nodreg(&reg, gc.Types[gc.TINT], mips.REG_R0)
 	gins(mips.ANOR, &reg, &reg)
 }
-
-var panicdiv *gc.Node
-
-/*
- * generate division.
- * generates one of:
- *	res = nl / nr
- *	res = nl % nr
- * according to op.
- */
-func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	t := nl.Type
-
-	t0 := t
-
-	if t.Width < 8 {
-		if t.IsSigned() {
-			t = gc.Types[gc.TINT64]
-		} else {
-			t = gc.Types[gc.TUINT64]
-		}
-	}
-
-	a := optoas(gc.ODIV, t)
-
-	var tl gc.Node
-	gc.Regalloc(&tl, t0, nil)
-	var tr gc.Node
-	gc.Regalloc(&tr, t0, nil)
-	if nl.Ullman >= nr.Ullman {
-		gc.Cgen(nl, &tl)
-		gc.Cgen(nr, &tr)
-	} else {
-		gc.Cgen(nr, &tr)
-		gc.Cgen(nl, &tl)
-	}
-
-	if t != t0 {
-		// Convert
-		tl2 := tl
-
-		tr2 := tr
-		tl.Type = t
-		tr.Type = t
-		gmove(&tl2, &tl)
-		gmove(&tr2, &tr)
-	}
-
-	// Handle divide-by-zero panic.
-	p1 := ginsbranch(mips.ABNE, nil, &tr, nil, 0)
-	if panicdiv == nil {
-		panicdiv = gc.Sysfunc("panicdivide")
-	}
-	gc.Ginscall(panicdiv, -1)
-	gc.Patch(p1, gc.Pc)
-
-	gins3(a, &tr, &tl, nil)
-	gc.Regfree(&tr)
-	if op == gc.ODIV {
-		var lo gc.Node
-		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
-		gins(mips.AMOVV, &lo, &tl)
-	} else { // remainder in REG_HI
-		var hi gc.Node
-		gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
-		gins(mips.AMOVV, &hi, &tl)
-	}
-	gmove(&tl, res)
-	gc.Regfree(&tl)
-}
-
-/*
- * generate high multiply:
- *   res = (nl*nr) >> width
- */
-func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	// largest ullman on left.
-	if nl.Ullman < nr.Ullman {
-		nl, nr = nr, nl
-	}
-
-	t := nl.Type
-	w := t.Width * 8
-	var n1 gc.Node
-	gc.Cgenr(nl, &n1, res)
-	var n2 gc.Node
-	gc.Cgenr(nr, &n2, nil)
-	switch gc.Simtype[t.Etype] {
-	case gc.TINT8,
-		gc.TINT16,
-		gc.TINT32:
-		gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
-		var lo gc.Node
-		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
-		gins(mips.AMOVV, &lo, &n1)
-		p := gins(mips.ASRAV, nil, &n1)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-
-	case gc.TUINT8,
-		gc.TUINT16,
-		gc.TUINT32:
-		gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
-		var lo gc.Node
-		gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
-		gins(mips.AMOVV, &lo, &n1)
-		p := gins(mips.ASRLV, nil, &n1)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-
-	case gc.TINT64,
-		gc.TUINT64:
-		if t.IsSigned() {
-			gins3(mips.AMULV, &n2, &n1, nil)
-		} else {
-			gins3(mips.AMULVU, &n2, &n1, nil)
-		}
-		var hi gc.Node
-		gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
-		gins(mips.AMOVV, &hi, &n1)
-
-	default:
-		gc.Fatalf("cgen_hmul %v", t)
-	}
-
-	gc.Cgen(&n1, res)
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-/*
- * generate shift according to op, one of:
- *	res = nl << nr
- *	res = nl >> nr
- */
-func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	a := optoas(op, nl.Type)
-
-	if nr.Op == gc.OLITERAL {
-		var n1 gc.Node
-		gc.Regalloc(&n1, nl.Type, res)
-		gc.Cgen(nl, &n1)
-		sc := uint64(nr.Int64())
-		if sc >= uint64(nl.Type.Width*8) {
-			// large shift gets 2 shifts by width-1
-			var n3 gc.Node
-			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
-
-			gins(a, &n3, &n1)
-			gins(a, &n3, &n1)
-		} else {
-			gins(a, nr, &n1)
-		}
-		gmove(&n1, res)
-		gc.Regfree(&n1)
-		return
-	}
-
-	if nl.Ullman >= gc.UINF {
-		var n4 gc.Node
-		gc.Tempname(&n4, nl.Type)
-		gc.Cgen(nl, &n4)
-		nl = &n4
-	}
-
-	if nr.Ullman >= gc.UINF {
-		var n5 gc.Node
-		gc.Tempname(&n5, nr.Type)
-		gc.Cgen(nr, &n5)
-		nr = &n5
-	}
-
-	// Allow either uint32 or uint64 as shift type,
-	// to avoid unnecessary conversion from uint32 to uint64
-	// just to do the comparison.
-	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
-
-	if tcount.Etype < gc.TUINT32 {
-		tcount = gc.Types[gc.TUINT32]
-	}
-
-	var n1 gc.Node
-	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
-	var n3 gc.Node
-	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
-
-	var n2 gc.Node
-	gc.Regalloc(&n2, nl.Type, res)
-
-	if nl.Ullman >= nr.Ullman {
-		gc.Cgen(nl, &n2)
-		gc.Cgen(nr, &n1)
-		gmove(&n1, &n3)
-	} else {
-		gc.Cgen(nr, &n1)
-		gmove(&n1, &n3)
-		gc.Cgen(nl, &n2)
-	}
-
-	gc.Regfree(&n3)
-
-	// test and fix up large shifts
-	if !bounded {
-		var rtmp gc.Node
-		gc.Nodreg(&rtmp, tcount, mips.REGTMP)
-		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
-		gins3(mips.ASGTU, &n3, &n1, &rtmp)
-		p1 := ginsbranch(mips.ABNE, nil, &rtmp, nil, 0)
-		if op == gc.ORSH && nl.Type.IsSigned() {
-			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
-			gins(a, &n3, &n2)
-		} else {
-			gc.Nodconst(&n3, nl.Type, 0)
-			gmove(&n3, &n2)
-		}
-
-		gc.Patch(p1, gc.Pc)
-	}
-
-	gins(a, &n1, &n2)
-
-	gmove(&n2, res)
-
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-func clearfat(nl *gc.Node) {
-	/* clear a fat object */
-	if gc.Debug['g'] != 0 {
-		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
-	}
-
-	w := uint64(nl.Type.Width)
-
-	// Avoid taking the address for simple enough types.
-	if gc.Componentgen(nil, nl) {
-		return
-	}
-
-	c := w % 8 // bytes
-	q := w / 8 // dwords
-
-	if gc.Reginuse(mips.REGRT1) {
-		gc.Fatalf("%v in use during clearfat", obj.Rconv(mips.REGRT1))
-	}
-
-	var r0 gc.Node
-	gc.Nodreg(&r0, gc.Types[gc.TUINT64], mips.REGZERO)
-	var dst gc.Node
-	gc.Nodreg(&dst, gc.Types[gc.Tptr], mips.REGRT1)
-	gc.Regrealloc(&dst)
-	gc.Agen(nl, &dst)
-
-	var boff uint64
-	if q > 128 {
-		p := gins(mips.ASUBV, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 8
-
-		var end gc.Node
-		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
-		p = gins(mips.AMOVV, &dst, &end)
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = int64(q * 8)
-
-		p = gins(mips.AMOVV, &r0, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 8
-		pl := p
-
-		p = gins(mips.AADDV, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 8
-
-		gc.Patch(ginsbranch(mips.ABNE, nil, &dst, &end, 0), pl)
-
-		gc.Regfree(&end)
-
-		// The loop leaves R1 on the last zeroed dword
-		boff = 8
-		// TODO(dfc): https://golang.org/issue/12108
-		// If DUFFZERO is used inside a tail call (see genwrapper) it will
-		// overwrite the link register.
-	} else if false && q >= 4 {
-		p := gins(mips.ASUBV, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 8
-		f := gc.Sysfunc("duffzero")
-		p = gins(obj.ADUFFZERO, nil, f)
-		gc.Afunclit(&p.To, f)
-
-		// 8 and 128 = magic constants: see ../../runtime/asm_mips64x.s
-		p.To.Offset = int64(8 * (128 - q))
-
-		// duffzero leaves R1 on the last zeroed dword
-		boff = 8
-	} else {
-		var p *obj.Prog
-		for t := uint64(0); t < q; t++ {
-			p = gins(mips.AMOVV, &r0, &dst)
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = int64(8 * t)
-		}
-
-		boff = 8 * q
-	}
-
-	var p *obj.Prog
-	for t := uint64(0); t < c; t++ {
-		p = gins(mips.AMOVB, &r0, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = int64(t + boff)
-	}
-
-	gc.Regfree(&dst)
-}
-
-// Called after regopt and peep have run.
-// Expand CHECKNIL pseudo-op into actual nil pointer check.
-func expandchecks(firstp *obj.Prog) {
-	var p1 *obj.Prog
-
-	for p := firstp; p != nil; p = p.Link {
-		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
-			fmt.Printf("expandchecks: %v\n", p)
-		}
-		if p.As != obj.ACHECKNIL {
-			continue
-		}
-		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
-			gc.Warnl(p.Lineno, "generated nil check")
-		}
-		if p.From.Type != obj.TYPE_REG {
-			gc.Fatalf("invalid nil check %v\n", p)
-		}
-
-		// check is
-		//	BNE arg, 2(PC)
-		//	MOVV R0, 0(R0)
-		p1 = gc.Ctxt.NewProg()
-		gc.Clearp(p1)
-		p1.Link = p.Link
-		p.Link = p1
-		p1.Lineno = p.Lineno
-		p1.Pc = 9999
-
-		p.As = mips.ABNE
-		p.To.Type = obj.TYPE_BRANCH
-		p.To.Val = p1.Link
-
-		// crash by write to memory address 0.
-		p1.As = mips.AMOVV
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = mips.REGZERO
-		p1.To.Type = obj.TYPE_MEM
-		p1.To.Reg = mips.REGZERO
-		p1.To.Offset = 0
-	}
-}
-
-// res = runtime.getg()
-func getg(res *gc.Node) {
-	var n1 gc.Node
-	gc.Nodreg(&n1, res.Type, mips.REGG)
-	gmove(&n1, res)
-}
diff --git a/src/cmd/compile/internal/mips64/gsubr.go b/src/cmd/compile/internal/mips64/gsubr.go
index fd0efc3..ecf8359 100644
--- a/src/cmd/compile/internal/mips64/gsubr.go
+++ b/src/cmd/compile/internal/mips64/gsubr.go
@@ -31,7 +31,6 @@
 package mips64
 
 import (
-	"cmd/compile/internal/big"
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
 	"cmd/internal/obj/mips"
@@ -51,7 +50,7 @@
 
 /*
  * generate
- *	as $c, n
+ *      as $c, n
  */
 func ginscon(as obj.As, c int64, n2 *gc.Node) {
 	var n1 gc.Node
@@ -73,495 +72,6 @@
 	rawgins(as, &n1, n2)
 }
 
-// generate branch
-// n1, n2 are registers
-func ginsbranch(as obj.As, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
-	p := gc.Gbranch(as, t, likely)
-	gc.Naddr(&p.From, n1)
-	if n2 != nil {
-		p.Reg = n2.Reg
-	}
-	return p
-}
-
-func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
-	if !t.IsFloat() && (op == gc.OLT || op == gc.OGE) {
-		// swap nodes to fit SGT instruction
-		n1, n2 = n2, n1
-	}
-	if t.IsFloat() && (op == gc.OLT || op == gc.OLE) {
-		// swap nodes to fit CMPGT, CMPGE instructions and reverse relation
-		n1, n2 = n2, n1
-		if op == gc.OLT {
-			op = gc.OGT
-		} else {
-			op = gc.OGE
-		}
-	}
-
-	var r1, r2, g1, g2 gc.Node
-	gc.Regalloc(&r1, t, n1)
-	gc.Regalloc(&g1, n1.Type, &r1)
-	gc.Cgen(n1, &g1)
-	gmove(&g1, &r1)
-
-	gc.Regalloc(&r2, t, n2)
-	gc.Regalloc(&g2, n1.Type, &r2)
-	gc.Cgen(n2, &g2)
-	gmove(&g2, &r2)
-
-	var p *obj.Prog
-	var ntmp gc.Node
-	gc.Nodreg(&ntmp, gc.Types[gc.TINT], mips.REGTMP)
-
-	switch gc.Simtype[t.Etype] {
-	case gc.TINT8,
-		gc.TINT16,
-		gc.TINT32,
-		gc.TINT64:
-		if op == gc.OEQ || op == gc.ONE {
-			p = ginsbranch(optoas(op, t), nil, &r1, &r2, likely)
-		} else {
-			gins3(mips.ASGT, &r1, &r2, &ntmp)
-
-			p = ginsbranch(optoas(op, t), nil, &ntmp, nil, likely)
-		}
-
-	case gc.TBOOL,
-		gc.TUINT8,
-		gc.TUINT16,
-		gc.TUINT32,
-		gc.TUINT64,
-		gc.TPTR32,
-		gc.TPTR64:
-		if op == gc.OEQ || op == gc.ONE {
-			p = ginsbranch(optoas(op, t), nil, &r1, &r2, likely)
-		} else {
-			gins3(mips.ASGTU, &r1, &r2, &ntmp)
-
-			p = ginsbranch(optoas(op, t), nil, &ntmp, nil, likely)
-		}
-
-	case gc.TFLOAT32:
-		switch op {
-		default:
-			gc.Fatalf("ginscmp: no entry for op=%v type=%v", op, t)
-
-		case gc.OEQ,
-			gc.ONE:
-			gins3(mips.ACMPEQF, &r1, &r2, nil)
-
-		case gc.OGE:
-			gins3(mips.ACMPGEF, &r1, &r2, nil)
-
-		case gc.OGT:
-			gins3(mips.ACMPGTF, &r1, &r2, nil)
-		}
-		p = gc.Gbranch(optoas(op, t), nil, likely)
-
-	case gc.TFLOAT64:
-		switch op {
-		default:
-			gc.Fatalf("ginscmp: no entry for op=%v type=%v", op, t)
-
-		case gc.OEQ,
-			gc.ONE:
-			gins3(mips.ACMPEQD, &r1, &r2, nil)
-
-		case gc.OGE:
-			gins3(mips.ACMPGED, &r1, &r2, nil)
-
-		case gc.OGT:
-			gins3(mips.ACMPGTD, &r1, &r2, nil)
-		}
-		p = gc.Gbranch(optoas(op, t), nil, likely)
-	}
-
-	gc.Regfree(&g2)
-	gc.Regfree(&r2)
-	gc.Regfree(&g1)
-	gc.Regfree(&r1)
-
-	return p
-}
-
-// set up nodes representing 2^63
-var (
-	bigi         gc.Node
-	bigf         gc.Node
-	bignodes_did bool
-)
-
-func bignodes() {
-	if bignodes_did {
-		return
-	}
-	bignodes_did = true
-
-	var i big.Int
-	i.SetInt64(1)
-	i.Lsh(&i, 63)
-
-	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
-	bigi.SetBigInt(&i)
-
-	bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
-}
-
-/*
- * generate move:
- *	t = f
- * hard part is conversions.
- */
-func gmove(f *gc.Node, t *gc.Node) {
-	if gc.Debug['M'] != 0 {
-		fmt.Printf("gmove %L -> %L\n", f, t)
-	}
-
-	ft := int(gc.Simsimtype(f.Type))
-	tt := int(gc.Simsimtype(t.Type))
-	cvt := t.Type
-
-	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
-		gc.Complexmove(f, t)
-		return
-	}
-
-	// cannot have two memory operands
-	var r2 gc.Node
-	var r1 gc.Node
-	var a obj.As
-	if gc.Ismem(f) && gc.Ismem(t) {
-		goto hard
-	}
-
-	// convert constant to desired type
-	if f.Op == gc.OLITERAL {
-		var con gc.Node
-		switch tt {
-		default:
-			f.Convconst(&con, t.Type)
-
-		case gc.TINT32,
-			gc.TINT16,
-			gc.TINT8:
-			var con gc.Node
-			f.Convconst(&con, gc.Types[gc.TINT64])
-			var r1 gc.Node
-			gc.Regalloc(&r1, con.Type, t)
-			gins(mips.AMOVV, &con, &r1)
-			gmove(&r1, t)
-			gc.Regfree(&r1)
-			return
-
-		case gc.TUINT32,
-			gc.TUINT16,
-			gc.TUINT8:
-			var con gc.Node
-			f.Convconst(&con, gc.Types[gc.TUINT64])
-			var r1 gc.Node
-			gc.Regalloc(&r1, con.Type, t)
-			gins(mips.AMOVV, &con, &r1)
-			gmove(&r1, t)
-			gc.Regfree(&r1)
-			return
-		}
-
-		f = &con
-		ft = tt // so big switch will choose a simple mov
-
-		// constants can't move directly to memory.
-		if gc.Ismem(t) {
-			goto hard
-		}
-	}
-
-	// value -> value copy, first operand in memory.
-	// any floating point operand requires register
-	// src, so goto hard to copy to register first.
-	if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) {
-		cvt = gc.Types[ft]
-		goto hard
-	}
-
-	// value -> value copy, only one memory operand.
-	// figure out the instruction to use.
-	// break out of switch for one-instruction gins.
-	// goto rdst for "destination must be register".
-	// goto hard for "convert to cvt type first".
-	// otherwise handle and return.
-
-	switch uint32(ft)<<16 | uint32(tt) {
-	default:
-		gc.Fatalf("gmove %L -> %L", f.Type, t.Type)
-
-		/*
-		 * integer copy and truncate
-		 */
-	case gc.TINT8<<16 | gc.TINT8, // same size
-		gc.TUINT8<<16 | gc.TINT8,
-		gc.TINT16<<16 | gc.TINT8, // truncate
-		gc.TUINT16<<16 | gc.TINT8,
-		gc.TINT32<<16 | gc.TINT8,
-		gc.TUINT32<<16 | gc.TINT8,
-		gc.TINT64<<16 | gc.TINT8,
-		gc.TUINT64<<16 | gc.TINT8:
-		a = mips.AMOVB
-
-	case gc.TINT8<<16 | gc.TUINT8, // same size
-		gc.TUINT8<<16 | gc.TUINT8,
-		gc.TINT16<<16 | gc.TUINT8, // truncate
-		gc.TUINT16<<16 | gc.TUINT8,
-		gc.TINT32<<16 | gc.TUINT8,
-		gc.TUINT32<<16 | gc.TUINT8,
-		gc.TINT64<<16 | gc.TUINT8,
-		gc.TUINT64<<16 | gc.TUINT8:
-		a = mips.AMOVBU
-
-	case gc.TINT16<<16 | gc.TINT16, // same size
-		gc.TUINT16<<16 | gc.TINT16,
-		gc.TINT32<<16 | gc.TINT16, // truncate
-		gc.TUINT32<<16 | gc.TINT16,
-		gc.TINT64<<16 | gc.TINT16,
-		gc.TUINT64<<16 | gc.TINT16:
-		a = mips.AMOVH
-
-	case gc.TINT16<<16 | gc.TUINT16, // same size
-		gc.TUINT16<<16 | gc.TUINT16,
-		gc.TINT32<<16 | gc.TUINT16, // truncate
-		gc.TUINT32<<16 | gc.TUINT16,
-		gc.TINT64<<16 | gc.TUINT16,
-		gc.TUINT64<<16 | gc.TUINT16:
-		a = mips.AMOVHU
-
-	case gc.TINT32<<16 | gc.TINT32, // same size
-		gc.TUINT32<<16 | gc.TINT32,
-		gc.TINT64<<16 | gc.TINT32, // truncate
-		gc.TUINT64<<16 | gc.TINT32:
-		a = mips.AMOVW
-
-	case gc.TINT32<<16 | gc.TUINT32, // same size
-		gc.TUINT32<<16 | gc.TUINT32,
-		gc.TINT64<<16 | gc.TUINT32, // truncate
-		gc.TUINT64<<16 | gc.TUINT32:
-		a = mips.AMOVWU
-
-	case gc.TINT64<<16 | gc.TINT64, // same size
-		gc.TINT64<<16 | gc.TUINT64,
-		gc.TUINT64<<16 | gc.TINT64,
-		gc.TUINT64<<16 | gc.TUINT64:
-		a = mips.AMOVV
-
-		/*
-		 * integer up-conversions
-		 */
-	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
-		gc.TINT8<<16 | gc.TUINT16,
-		gc.TINT8<<16 | gc.TINT32,
-		gc.TINT8<<16 | gc.TUINT32,
-		gc.TINT8<<16 | gc.TINT64,
-		gc.TINT8<<16 | gc.TUINT64:
-		a = mips.AMOVB
-
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
-		gc.TUINT8<<16 | gc.TUINT16,
-		gc.TUINT8<<16 | gc.TINT32,
-		gc.TUINT8<<16 | gc.TUINT32,
-		gc.TUINT8<<16 | gc.TINT64,
-		gc.TUINT8<<16 | gc.TUINT64:
-		a = mips.AMOVBU
-
-		goto rdst
-
-	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
-		gc.TINT16<<16 | gc.TUINT32,
-		gc.TINT16<<16 | gc.TINT64,
-		gc.TINT16<<16 | gc.TUINT64:
-		a = mips.AMOVH
-
-		goto rdst
-
-	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
-		gc.TUINT16<<16 | gc.TUINT32,
-		gc.TUINT16<<16 | gc.TINT64,
-		gc.TUINT16<<16 | gc.TUINT64:
-		a = mips.AMOVHU
-
-		goto rdst
-
-	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
-		gc.TINT32<<16 | gc.TUINT64:
-		a = mips.AMOVW
-
-		goto rdst
-
-	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
-		gc.TUINT32<<16 | gc.TUINT64:
-		a = mips.AMOVWU
-
-		goto rdst
-
-		//warn("gmove: convert float to int not implemented: %N -> %N\n", f, t);
-	//return;
-	// algorithm is:
-	//	if small enough, use native float64 -> int64 conversion.
-	//	otherwise, subtract 2^63, convert, and add it back.
-	/*
-	* float to integer
-	 */
-	case gc.TFLOAT32<<16 | gc.TINT32,
-		gc.TFLOAT64<<16 | gc.TINT32,
-		gc.TFLOAT32<<16 | gc.TINT64,
-		gc.TFLOAT64<<16 | gc.TINT64,
-		gc.TFLOAT32<<16 | gc.TINT16,
-		gc.TFLOAT32<<16 | gc.TINT8,
-		gc.TFLOAT32<<16 | gc.TUINT16,
-		gc.TFLOAT32<<16 | gc.TUINT8,
-		gc.TFLOAT64<<16 | gc.TINT16,
-		gc.TFLOAT64<<16 | gc.TINT8,
-		gc.TFLOAT64<<16 | gc.TUINT16,
-		gc.TFLOAT64<<16 | gc.TUINT8,
-		gc.TFLOAT32<<16 | gc.TUINT32,
-		gc.TFLOAT64<<16 | gc.TUINT32,
-		gc.TFLOAT32<<16 | gc.TUINT64,
-		gc.TFLOAT64<<16 | gc.TUINT64:
-		bignodes()
-
-		gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], nil)
-		gmove(f, &r1)
-		if tt == gc.TUINT64 {
-			gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
-			gmove(&bigf, &r2)
-			gins3(mips.ACMPGED, &r1, &r2, nil)
-			p1 := gc.Gbranch(mips.ABFPF, nil, 0)
-			gins(mips.ASUBD, &r2, &r1)
-			gc.Patch(p1, gc.Pc)
-			gc.Regfree(&r2)
-		}
-
-		gc.Regalloc(&r2, gc.Types[gc.TINT64], t)
-		gins(mips.ATRUNCDV, &r1, &r1)
-		gins(mips.AMOVV, &r1, &r2)
-		gc.Regfree(&r1)
-
-		if tt == gc.TUINT64 {
-			p1 := gc.Gbranch(mips.ABFPF, nil, 0) // use FCR0 here again
-			gc.Nodreg(&r1, gc.Types[gc.TINT64], mips.REGTMP)
-			gmove(&bigi, &r1)
-			gins(mips.AADDVU, &r1, &r2)
-			gc.Patch(p1, gc.Pc)
-		}
-
-		gmove(&r2, t)
-		gc.Regfree(&r2)
-		return
-
-		//warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
-	//return;
-	// algorithm is:
-	//	if small enough, use native int64 -> float64 conversion.
-	//	otherwise, halve (x -> (x>>1)|(x&1)), convert, and double.
-	/*
-	 * integer to float
-	 */
-	case gc.TINT32<<16 | gc.TFLOAT32,
-		gc.TINT32<<16 | gc.TFLOAT64,
-		gc.TINT64<<16 | gc.TFLOAT32,
-		gc.TINT64<<16 | gc.TFLOAT64,
-		gc.TINT16<<16 | gc.TFLOAT32,
-		gc.TINT16<<16 | gc.TFLOAT64,
-		gc.TINT8<<16 | gc.TFLOAT32,
-		gc.TINT8<<16 | gc.TFLOAT64,
-		gc.TUINT16<<16 | gc.TFLOAT32,
-		gc.TUINT16<<16 | gc.TFLOAT64,
-		gc.TUINT8<<16 | gc.TFLOAT32,
-		gc.TUINT8<<16 | gc.TFLOAT64,
-		gc.TUINT32<<16 | gc.TFLOAT32,
-		gc.TUINT32<<16 | gc.TFLOAT64,
-		gc.TUINT64<<16 | gc.TFLOAT32,
-		gc.TUINT64<<16 | gc.TFLOAT64:
-		bignodes()
-
-		var rtmp gc.Node
-		gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
-		gmove(f, &r1)
-		if ft == gc.TUINT64 {
-			gc.Nodreg(&rtmp, gc.Types[gc.TUINT64], mips.REGTMP)
-			gmove(&bigi, &rtmp)
-			gins(mips.AAND, &r1, &rtmp)
-			p1 := ginsbranch(mips.ABEQ, nil, &rtmp, nil, 0)
-			var r3 gc.Node
-			gc.Regalloc(&r3, gc.Types[gc.TUINT64], nil)
-			p2 := gins3(mips.AAND, nil, &r1, &r3)
-			p2.From.Type = obj.TYPE_CONST
-			p2.From.Offset = 1
-			p3 := gins(mips.ASRLV, nil, &r1)
-			p3.From.Type = obj.TYPE_CONST
-			p3.From.Offset = 1
-			gins(mips.AOR, &r3, &r1)
-			gc.Regfree(&r3)
-			gc.Patch(p1, gc.Pc)
-		}
-
-		gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
-		gins(mips.AMOVV, &r1, &r2)
-		gins(mips.AMOVVD, &r2, &r2)
-		gc.Regfree(&r1)
-
-		if ft == gc.TUINT64 {
-			p1 := ginsbranch(mips.ABEQ, nil, &rtmp, nil, 0)
-			gins(mips.AADDD, &r2, &r2)
-			gc.Patch(p1, gc.Pc)
-		}
-
-		gmove(&r2, t)
-		gc.Regfree(&r2)
-		return
-
-		/*
-		 * float to float
-		 */
-	case gc.TFLOAT32<<16 | gc.TFLOAT32:
-		a = mips.AMOVF
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT64:
-		a = mips.AMOVD
-
-	case gc.TFLOAT32<<16 | gc.TFLOAT64:
-		a = mips.AMOVFD
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT32:
-		a = mips.AMOVDF
-		goto rdst
-	}
-
-	gins(a, f, t)
-	return
-
-	// requires register destination
-rdst:
-	{
-		gc.Regalloc(&r1, t.Type, t)
-
-		gins(a, f, &r1)
-		gmove(&r1, t)
-		gc.Regfree(&r1)
-		return
-	}
-
-	// requires register intermediate
-hard:
-	gc.Regalloc(&r1, cvt, t)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-}
-
 // gins is called by the front end.
 // It synthesizes some multiple-instruction sequences
 // so the front end can stay simpler.
@@ -577,19 +87,6 @@
 
 /*
  * generate one instruction:
- *	as f, r, t
- * r must be register, if not nil
- */
-func gins3(as obj.As, f, r, t *gc.Node) *obj.Prog {
-	p := rawgins(as, f, t)
-	if r != nil {
-		p.Reg = r.Reg
-	}
-	return p
-}
-
-/*
- * generate one instruction:
  *	as f, t
  */
 func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
@@ -683,384 +180,3 @@
 
 	return p
 }
-
-/*
- * return Axxx for Oxxx on type t.
- */
-func optoas(op gc.Op, t *gc.Type) obj.As {
-	if t == nil {
-		gc.Fatalf("optoas: t is nil")
-	}
-
-	// avoid constant conversions in switches below
-	const (
-		OMINUS_ = uint32(gc.OMINUS) << 16
-		OLSH_   = uint32(gc.OLSH) << 16
-		ORSH_   = uint32(gc.ORSH) << 16
-		OADD_   = uint32(gc.OADD) << 16
-		OSUB_   = uint32(gc.OSUB) << 16
-		OMUL_   = uint32(gc.OMUL) << 16
-		ODIV_   = uint32(gc.ODIV) << 16
-		OOR_    = uint32(gc.OOR) << 16
-		OAND_   = uint32(gc.OAND) << 16
-		OXOR_   = uint32(gc.OXOR) << 16
-		OEQ_    = uint32(gc.OEQ) << 16
-		ONE_    = uint32(gc.ONE) << 16
-		OLT_    = uint32(gc.OLT) << 16
-		OLE_    = uint32(gc.OLE) << 16
-		OGE_    = uint32(gc.OGE) << 16
-		OGT_    = uint32(gc.OGT) << 16
-		OCMP_   = uint32(gc.OCMP) << 16
-		OAS_    = uint32(gc.OAS) << 16
-		OHMUL_  = uint32(gc.OHMUL) << 16
-	)
-
-	a := obj.AXXX
-	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
-	default:
-		gc.Fatalf("optoas: no entry for op=%v type=%v", op, t)
-
-	case OEQ_ | gc.TBOOL,
-		OEQ_ | gc.TINT8,
-		OEQ_ | gc.TUINT8,
-		OEQ_ | gc.TINT16,
-		OEQ_ | gc.TUINT16,
-		OEQ_ | gc.TINT32,
-		OEQ_ | gc.TUINT32,
-		OEQ_ | gc.TINT64,
-		OEQ_ | gc.TUINT64,
-		OEQ_ | gc.TPTR32,
-		OEQ_ | gc.TPTR64:
-		a = mips.ABEQ
-
-	case OEQ_ | gc.TFLOAT32, // ACMPEQF
-		OEQ_ | gc.TFLOAT64: // ACMPEQD
-		a = mips.ABFPT
-
-	case ONE_ | gc.TBOOL,
-		ONE_ | gc.TINT8,
-		ONE_ | gc.TUINT8,
-		ONE_ | gc.TINT16,
-		ONE_ | gc.TUINT16,
-		ONE_ | gc.TINT32,
-		ONE_ | gc.TUINT32,
-		ONE_ | gc.TINT64,
-		ONE_ | gc.TUINT64,
-		ONE_ | gc.TPTR32,
-		ONE_ | gc.TPTR64:
-		a = mips.ABNE
-
-	case ONE_ | gc.TFLOAT32, // ACMPEQF
-		ONE_ | gc.TFLOAT64: // ACMPEQD
-		a = mips.ABFPF
-
-	case OLT_ | gc.TINT8, // ASGT
-		OLT_ | gc.TINT16,
-		OLT_ | gc.TINT32,
-		OLT_ | gc.TINT64,
-		OLT_ | gc.TUINT8, // ASGTU
-		OLT_ | gc.TUINT16,
-		OLT_ | gc.TUINT32,
-		OLT_ | gc.TUINT64:
-		a = mips.ABNE
-
-	case OLT_ | gc.TFLOAT32, // ACMPGEF
-		OLT_ | gc.TFLOAT64: // ACMPGED
-		a = mips.ABFPT
-
-	case OLE_ | gc.TINT8, // ASGT
-		OLE_ | gc.TINT16,
-		OLE_ | gc.TINT32,
-		OLE_ | gc.TINT64,
-		OLE_ | gc.TUINT8, // ASGTU
-		OLE_ | gc.TUINT16,
-		OLE_ | gc.TUINT32,
-		OLE_ | gc.TUINT64:
-		a = mips.ABEQ
-
-	case OLE_ | gc.TFLOAT32, // ACMPGTF
-		OLE_ | gc.TFLOAT64: // ACMPGTD
-		a = mips.ABFPT
-
-	case OGT_ | gc.TINT8, // ASGT
-		OGT_ | gc.TINT16,
-		OGT_ | gc.TINT32,
-		OGT_ | gc.TINT64,
-		OGT_ | gc.TUINT8, // ASGTU
-		OGT_ | gc.TUINT16,
-		OGT_ | gc.TUINT32,
-		OGT_ | gc.TUINT64:
-		a = mips.ABNE
-
-	case OGT_ | gc.TFLOAT32, // ACMPGTF
-		OGT_ | gc.TFLOAT64: // ACMPGTD
-		a = mips.ABFPT
-
-	case OGE_ | gc.TINT8, // ASGT
-		OGE_ | gc.TINT16,
-		OGE_ | gc.TINT32,
-		OGE_ | gc.TINT64,
-		OGE_ | gc.TUINT8, // ASGTU
-		OGE_ | gc.TUINT16,
-		OGE_ | gc.TUINT32,
-		OGE_ | gc.TUINT64:
-		a = mips.ABEQ
-
-	case OGE_ | gc.TFLOAT32, // ACMPGEF
-		OGE_ | gc.TFLOAT64: // ACMPGED
-		a = mips.ABFPT
-
-	case OAS_ | gc.TBOOL,
-		OAS_ | gc.TINT8:
-		a = mips.AMOVB
-
-	case OAS_ | gc.TUINT8:
-		a = mips.AMOVBU
-
-	case OAS_ | gc.TINT16:
-		a = mips.AMOVH
-
-	case OAS_ | gc.TUINT16:
-		a = mips.AMOVHU
-
-	case OAS_ | gc.TINT32:
-		a = mips.AMOVW
-
-	case OAS_ | gc.TUINT32,
-		OAS_ | gc.TPTR32:
-		a = mips.AMOVWU
-
-	case OAS_ | gc.TINT64,
-		OAS_ | gc.TUINT64,
-		OAS_ | gc.TPTR64:
-		a = mips.AMOVV
-
-	case OAS_ | gc.TFLOAT32:
-		a = mips.AMOVF
-
-	case OAS_ | gc.TFLOAT64:
-		a = mips.AMOVD
-
-	case OADD_ | gc.TINT8,
-		OADD_ | gc.TUINT8,
-		OADD_ | gc.TINT16,
-		OADD_ | gc.TUINT16,
-		OADD_ | gc.TINT32,
-		OADD_ | gc.TUINT32,
-		OADD_ | gc.TPTR32:
-		a = mips.AADDU
-
-	case OADD_ | gc.TINT64,
-		OADD_ | gc.TUINT64,
-		OADD_ | gc.TPTR64:
-		a = mips.AADDVU
-
-	case OADD_ | gc.TFLOAT32:
-		a = mips.AADDF
-
-	case OADD_ | gc.TFLOAT64:
-		a = mips.AADDD
-
-	case OSUB_ | gc.TINT8,
-		OSUB_ | gc.TUINT8,
-		OSUB_ | gc.TINT16,
-		OSUB_ | gc.TUINT16,
-		OSUB_ | gc.TINT32,
-		OSUB_ | gc.TUINT32,
-		OSUB_ | gc.TPTR32:
-		a = mips.ASUBU
-
-	case OSUB_ | gc.TINT64,
-		OSUB_ | gc.TUINT64,
-		OSUB_ | gc.TPTR64:
-		a = mips.ASUBVU
-
-	case OSUB_ | gc.TFLOAT32:
-		a = mips.ASUBF
-
-	case OSUB_ | gc.TFLOAT64:
-		a = mips.ASUBD
-
-	case OMINUS_ | gc.TINT8,
-		OMINUS_ | gc.TUINT8,
-		OMINUS_ | gc.TINT16,
-		OMINUS_ | gc.TUINT16,
-		OMINUS_ | gc.TINT32,
-		OMINUS_ | gc.TUINT32,
-		OMINUS_ | gc.TPTR32,
-		OMINUS_ | gc.TINT64,
-		OMINUS_ | gc.TUINT64,
-		OMINUS_ | gc.TPTR64:
-		a = mips.ASUBVU
-
-	case OAND_ | gc.TINT8,
-		OAND_ | gc.TUINT8,
-		OAND_ | gc.TINT16,
-		OAND_ | gc.TUINT16,
-		OAND_ | gc.TINT32,
-		OAND_ | gc.TUINT32,
-		OAND_ | gc.TPTR32,
-		OAND_ | gc.TINT64,
-		OAND_ | gc.TUINT64,
-		OAND_ | gc.TPTR64:
-		a = mips.AAND
-
-	case OOR_ | gc.TINT8,
-		OOR_ | gc.TUINT8,
-		OOR_ | gc.TINT16,
-		OOR_ | gc.TUINT16,
-		OOR_ | gc.TINT32,
-		OOR_ | gc.TUINT32,
-		OOR_ | gc.TPTR32,
-		OOR_ | gc.TINT64,
-		OOR_ | gc.TUINT64,
-		OOR_ | gc.TPTR64:
-		a = mips.AOR
-
-	case OXOR_ | gc.TINT8,
-		OXOR_ | gc.TUINT8,
-		OXOR_ | gc.TINT16,
-		OXOR_ | gc.TUINT16,
-		OXOR_ | gc.TINT32,
-		OXOR_ | gc.TUINT32,
-		OXOR_ | gc.TPTR32,
-		OXOR_ | gc.TINT64,
-		OXOR_ | gc.TUINT64,
-		OXOR_ | gc.TPTR64:
-		a = mips.AXOR
-
-		// TODO(minux): handle rotates
-	//case CASE(OLROT, TINT8):
-	//case CASE(OLROT, TUINT8):
-	//case CASE(OLROT, TINT16):
-	//case CASE(OLROT, TUINT16):
-	//case CASE(OLROT, TINT32):
-	//case CASE(OLROT, TUINT32):
-	//case CASE(OLROT, TPTR32):
-	//case CASE(OLROT, TINT64):
-	//case CASE(OLROT, TUINT64):
-	//case CASE(OLROT, TPTR64):
-	//	a = 0//???; RLDC?
-	//	break;
-
-	case OLSH_ | gc.TINT8,
-		OLSH_ | gc.TUINT8,
-		OLSH_ | gc.TINT16,
-		OLSH_ | gc.TUINT16,
-		OLSH_ | gc.TINT32,
-		OLSH_ | gc.TUINT32,
-		OLSH_ | gc.TPTR32,
-		OLSH_ | gc.TINT64,
-		OLSH_ | gc.TUINT64,
-		OLSH_ | gc.TPTR64:
-		a = mips.ASLLV
-
-	case ORSH_ | gc.TUINT8,
-		ORSH_ | gc.TUINT16,
-		ORSH_ | gc.TUINT32,
-		ORSH_ | gc.TPTR32,
-		ORSH_ | gc.TUINT64,
-		ORSH_ | gc.TPTR64:
-		a = mips.ASRLV
-
-	case ORSH_ | gc.TINT8,
-		ORSH_ | gc.TINT16,
-		ORSH_ | gc.TINT32,
-		ORSH_ | gc.TINT64:
-		a = mips.ASRAV
-
-		// TODO(minux): handle rotates
-	//case CASE(ORROTC, TINT8):
-	//case CASE(ORROTC, TUINT8):
-	//case CASE(ORROTC, TINT16):
-	//case CASE(ORROTC, TUINT16):
-	//case CASE(ORROTC, TINT32):
-	//case CASE(ORROTC, TUINT32):
-	//case CASE(ORROTC, TINT64):
-	//case CASE(ORROTC, TUINT64):
-	//	a = 0//??? RLDC??
-	//	break;
-
-	case OHMUL_ | gc.TINT64:
-		a = mips.AMULV
-
-	case OHMUL_ | gc.TUINT64,
-		OHMUL_ | gc.TPTR64:
-		a = mips.AMULVU
-
-	case OMUL_ | gc.TINT8,
-		OMUL_ | gc.TINT16,
-		OMUL_ | gc.TINT32,
-		OMUL_ | gc.TINT64:
-		a = mips.AMULV
-
-	case OMUL_ | gc.TUINT8,
-		OMUL_ | gc.TUINT16,
-		OMUL_ | gc.TUINT32,
-		OMUL_ | gc.TPTR32,
-		OMUL_ | gc.TUINT64,
-		OMUL_ | gc.TPTR64:
-		a = mips.AMULVU
-
-	case OMUL_ | gc.TFLOAT32:
-		a = mips.AMULF
-
-	case OMUL_ | gc.TFLOAT64:
-		a = mips.AMULD
-
-	case ODIV_ | gc.TINT8,
-		ODIV_ | gc.TINT16,
-		ODIV_ | gc.TINT32,
-		ODIV_ | gc.TINT64:
-		a = mips.ADIVV
-
-	case ODIV_ | gc.TUINT8,
-		ODIV_ | gc.TUINT16,
-		ODIV_ | gc.TUINT32,
-		ODIV_ | gc.TPTR32,
-		ODIV_ | gc.TUINT64,
-		ODIV_ | gc.TPTR64:
-		a = mips.ADIVVU
-
-	case ODIV_ | gc.TFLOAT32:
-		a = mips.ADIVF
-
-	case ODIV_ | gc.TFLOAT64:
-		a = mips.ADIVD
-	}
-
-	return a
-}
-
-const (
-	ODynam   = 1 << 0
-	OAddable = 1 << 1
-)
-
-func xgen(n *gc.Node, a *gc.Node, o int) bool {
-	// TODO(minux)
-
-	return -1 != 0 /*TypeKind(100016)*/
-}
-
-func sudoclean() {
-	return
-}
-
-/*
- * generate code to compute address of n,
- * a reference to a (perhaps nested) field inside
- * an array or struct.
- * return 0 on failure, 1 on success.
- * on success, leaves usable address in a.
- *
- * caller is responsible for calling sudoclean
- * after successful sudoaddable,
- * to release the register used for a.
- */
-func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
-	// TODO(minux)
-
-	*a = obj.Addr{}
-	return false
-}
diff --git a/src/cmd/compile/internal/mips64/peep.go b/src/cmd/compile/internal/mips64/peep.go
deleted file mode 100644
index ce639ac..0000000
--- a/src/cmd/compile/internal/mips64/peep.go
+++ /dev/null
@@ -1,772 +0,0 @@
-// Derived from Inferno utils/6c/peep.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/peep.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips64
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/mips"
-	"fmt"
-)
-
-var gactive uint32
-
-func peep(firstp *obj.Prog) {
-	g := gc.Flowstart(firstp, nil)
-	if g == nil {
-		return
-	}
-	gactive = 0
-
-	var p *obj.Prog
-	var r *gc.Flow
-	var t int
-loop1:
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		gc.Dumpit("loop1", g.Start, 0)
-	}
-
-	t = 0
-	for r = g.Start; r != nil; r = r.Link {
-		p = r.Prog
-
-		// TODO(austin) Handle smaller moves.  arm and amd64
-		// distinguish between moves that moves that *must*
-		// sign/zero extend and moves that don't care so they
-		// can eliminate moves that don't care without
-		// breaking moves that do care. This might let us
-		// simplify or remove the next peep loop, too.
-		if p.As == mips.AMOVV || p.As == mips.AMOVF || p.As == mips.AMOVD {
-			if regtyp(&p.To) {
-				// Try to eliminate reg->reg moves
-				if regtyp(&p.From) {
-					if isfreg(&p.From) == isfreg(&p.To) {
-						if copyprop(r) {
-							excise(r)
-							t++
-						} else if subprop(r) && copyprop(r) {
-							excise(r)
-							t++
-						}
-					}
-				}
-
-				// Convert uses to $0 to uses of R0 and
-				// propagate R0
-				if regzer(&p.From) {
-					if p.To.Type == obj.TYPE_REG && !isfreg(&p.To) {
-						p.From.Type = obj.TYPE_REG
-						p.From.Reg = mips.REGZERO
-						if copyprop(r) {
-							excise(r)
-							t++
-						} else if subprop(r) && copyprop(r) {
-							excise(r)
-							t++
-						}
-					}
-				}
-			}
-		}
-	}
-
-	if t != 0 {
-		goto loop1
-	}
-
-	/*
-	 * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
-	 */
-	var p1 *obj.Prog
-	var r1 *gc.Flow
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		default:
-			continue
-
-		case mips.AMOVH,
-			mips.AMOVHU,
-			mips.AMOVB,
-			mips.AMOVBU,
-			mips.AMOVW,
-			mips.AMOVWU:
-			if p.To.Type != obj.TYPE_REG {
-				continue
-			}
-		}
-
-		r1 = r.Link
-		if r1 == nil {
-			continue
-		}
-		p1 = r1.Prog
-		if p1.As != p.As {
-			continue
-		}
-		if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
-			continue
-		}
-		if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
-			continue
-		}
-		excise(r1)
-	}
-
-	gc.Flowend(g)
-}
-
-func excise(r *gc.Flow) {
-	p := r.Prog
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("%v ===delete===\n", p)
-	}
-	obj.Nopout(p)
-	gc.Ostats.Ndelmov++
-}
-
-// regzer returns true if a's value is 0 (a is R0 or $0)
-func regzer(a *obj.Addr) bool {
-	if a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_ADDR {
-		if a.Sym == nil && a.Reg == 0 {
-			if a.Offset == 0 {
-				return true
-			}
-		}
-	}
-	return a.Type == obj.TYPE_REG && a.Reg == mips.REGZERO
-}
-
-func regtyp(a *obj.Addr) bool {
-	// TODO(rsc): Floating point register exclusions?
-	return a.Type == obj.TYPE_REG && mips.REG_R0 <= a.Reg && a.Reg <= mips.REG_F31 && a.Reg != mips.REGZERO
-}
-
-func isfreg(a *obj.Addr) bool {
-	return mips.REG_F0 <= a.Reg && a.Reg <= mips.REG_F31
-}
-
-/*
- * the idea is to substitute
- * one register for another
- * from one MOV to another
- *	MOV	a, R1
- *	ADD	b, R1	/ no use of R2
- *	MOV	R1, R2
- * would be converted to
- *	MOV	a, R2
- *	ADD	b, R2
- *	MOV	R2, R1
- * hopefully, then the former or latter MOV
- * will be eliminated by copy propagation.
- *
- * r0 (the argument, not the register) is the MOV at the end of the
- * above sequences.  This returns 1 if it modified any instructions.
- */
-func subprop(r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	if !regtyp(v1) {
-		return false
-	}
-	v2 := &p.To
-	if !regtyp(v2) {
-		return false
-	}
-	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
-		if gc.Uniqs(r) == nil {
-			break
-		}
-		p = r.Prog
-		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
-			continue
-		}
-		if p.Info.Flags&gc.Call != 0 {
-			return false
-		}
-
-		if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
-			if p.To.Type == v1.Type {
-				if p.To.Reg == v1.Reg {
-					copysub(&p.To, v1, v2, true)
-					if gc.Debug['P'] != 0 {
-						fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
-						if p.From.Type == v2.Type {
-							fmt.Printf(" excise")
-						}
-						fmt.Printf("\n")
-					}
-
-					for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
-						p = r.Prog
-						copysub(&p.From, v1, v2, true)
-						copysub1(p, v1, v2, true)
-						copysub(&p.To, v1, v2, true)
-						if gc.Debug['P'] != 0 {
-							fmt.Printf("%v\n", r.Prog)
-						}
-					}
-
-					v1.Reg, v2.Reg = v2.Reg, v1.Reg
-					if gc.Debug['P'] != 0 {
-						fmt.Printf("%v last\n", r.Prog)
-					}
-					return true
-				}
-			}
-		}
-
-		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
-			break
-		}
-		if copysub(&p.From, v1, v2, false) || copysub1(p, v1, v2, false) || copysub(&p.To, v1, v2, false) {
-			break
-		}
-	}
-
-	return false
-}
-
-/*
- * The idea is to remove redundant copies.
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	use v2	return fail (v1->v2 move must remain)
- *	-----------------
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	set v2	return success (caller can remove v1->v2 move)
- */
-func copyprop(r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	v2 := &p.To
-	if copyas(v1, v2) {
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("eliminating self-move: %v\n", r0.Prog)
-		}
-		return true
-	}
-
-	gactive++
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
-	}
-	return copy1(v1, v2, r0.S1, false)
-}
-
-// copy1 replaces uses of v2 with v1 starting at r and returns true if
-// all uses were rewritten.
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f bool) bool {
-	if uint32(r.Active) == gactive {
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("act set; return 1\n")
-		}
-		return true
-	}
-
-	r.Active = int32(gactive)
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("copy1 replace %v with %v f=%v\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
-	}
-	for ; r != nil; r = r.S1 {
-		p := r.Prog
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("%v", p)
-		}
-		if !f && gc.Uniqp(r) == nil {
-			// Multiple predecessors; conservatively
-			// assume v1 was set on other path
-			f = true
-
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; merge; f=%v", f)
-			}
-		}
-
-		switch t := copyu(p, v2, nil); t {
-		case 2: /* rar, can't split */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
-			}
-			return false
-
-		case 3: /* set */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
-			}
-			return true
-
-		case 1, /* used, substitute */
-			4: /* use and set */
-			if f {
-				if gc.Debug['P'] == 0 {
-					return false
-				}
-				if t == 4 {
-					fmt.Printf("; %v used+set and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				} else {
-					fmt.Printf("; %v used and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				}
-				return false
-			}
-
-			if copyu(p, v2, v1) != 0 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; sub fail; return 0\n")
-				}
-				return false
-			}
-
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
-			}
-			if t == 4 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
-				}
-				return true
-			}
-		}
-
-		if !f {
-			t := copyu(p, v1, nil)
-			if t == 2 || t == 3 || t == 4 {
-				f = true
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %v set and !f; f=%v", gc.Ctxt.Dconv(v1), f)
-				}
-			}
-		}
-
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("\n")
-		}
-		if r.S2 != nil {
-			if !copy1(v1, v2, r.S2, f) {
-				return false
-			}
-		}
-	}
-
-	return true
-}
-
-// If s==nil, copyu returns the set/use of v in p; otherwise, it
-// modifies p to replace reads of v with reads of s and returns 0 for
-// success or non-zero for failure.
-//
-// If s==nil, copy returns one of the following values:
-// 	1 if v only used
-//	2 if v is set and used in one address (read-alter-rewrite;
-// 	  can't substitute)
-//	3 if v is only set
-//	4 if v is set in one address and used in another (so addresses
-// 	  can be rewritten independently)
-//	0 otherwise (not touched)
-func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
-	if p.From3Type() != obj.TYPE_NONE {
-		// never generates a from3
-		fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
-	}
-
-	switch p.As {
-	default:
-		fmt.Printf("copyu: can't find %v\n", p.As)
-		return 2
-
-	case obj.ANOP, /* read p->from, write p->to */
-		mips.AMOVV,
-		mips.AMOVF,
-		mips.AMOVD,
-		mips.AMOVH,
-		mips.AMOVHU,
-		mips.AMOVB,
-		mips.AMOVBU,
-		mips.AMOVW,
-		mips.AMOVWU,
-		mips.AMOVFD,
-		mips.AMOVDF,
-		mips.AMOVDW,
-		mips.AMOVWD,
-		mips.AMOVFW,
-		mips.AMOVWF,
-		mips.AMOVDV,
-		mips.AMOVVD,
-		mips.AMOVFV,
-		mips.AMOVVF,
-		mips.ATRUNCFV,
-		mips.ATRUNCDV,
-		mips.ATRUNCFW,
-		mips.ATRUNCDW:
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-
-			// Update only indirect uses of v in p->to
-			if !copyas(&p.To, v) {
-				if copysub(&p.To, v, s, true) {
-					return 1
-				}
-			}
-			return 0
-		}
-
-		if copyas(&p.To, v) {
-			// Fix up implicit from
-			if p.From.Type == obj.TYPE_NONE {
-				p.From = p.To
-			}
-			if copyau(&p.From, v) {
-				return 4
-			}
-			return 3
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau(&p.To, v) {
-			// p->to only indirectly uses v
-			return 1
-		}
-
-		return 0
-
-	case mips.ASGT, /* read p->from, read p->reg, write p->to */
-		mips.ASGTU,
-
-		mips.AADD,
-		mips.AADDU,
-		mips.ASUB,
-		mips.ASUBU,
-		mips.ASLL,
-		mips.ASRL,
-		mips.ASRA,
-		mips.AOR,
-		mips.ANOR,
-		mips.AAND,
-		mips.AXOR,
-
-		mips.AADDV,
-		mips.AADDVU,
-		mips.ASUBV,
-		mips.ASUBVU,
-		mips.ASLLV,
-		mips.ASRLV,
-		mips.ASRAV,
-
-		mips.AADDF,
-		mips.AADDD,
-		mips.ASUBF,
-		mips.ASUBD,
-		mips.AMULF,
-		mips.AMULD,
-		mips.ADIVF,
-		mips.ADIVD:
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if copysub1(p, v, s, true) {
-				return 1
-			}
-
-			// Update only indirect uses of v in p->to
-			if !copyas(&p.To, v) {
-				if copysub(&p.To, v, s, true) {
-					return 1
-				}
-			}
-			return 0
-		}
-
-		if copyas(&p.To, v) {
-			if p.Reg == 0 {
-				// Fix up implicit reg (e.g., ADD
-				// R3,R4 -> ADD R3,R4,R4) so we can
-				// update reg and to separately.
-				p.Reg = p.To.Reg
-			}
-
-			if copyau(&p.From, v) {
-				return 4
-			}
-			if copyau1(p, v) {
-				return 4
-			}
-			return 3
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau1(p, v) {
-			return 1
-		}
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case obj.ACHECKNIL, /* read p->from */
-		mips.ABEQ, /* read p->from, read p->reg */
-		mips.ABNE,
-		mips.ABGTZ,
-		mips.ABGEZ,
-		mips.ABLTZ,
-		mips.ABLEZ,
-
-		mips.ACMPEQD,
-		mips.ACMPEQF,
-		mips.ACMPGED,
-		mips.ACMPGEF,
-		mips.ACMPGTD,
-		mips.ACMPGTF,
-		mips.ABFPF,
-		mips.ABFPT,
-
-		mips.AMUL,
-		mips.AMULU,
-		mips.ADIV,
-		mips.ADIVU,
-		mips.AMULV,
-		mips.AMULVU,
-		mips.ADIVV,
-		mips.ADIVVU:
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if copysub1(p, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau1(p, v) {
-			return 1
-		}
-		return 0
-
-	case mips.AJMP: /* read p->to */
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case mips.ARET: /* funny */
-		if s != nil {
-			return 0
-		}
-
-		// All registers die at this point, so claim
-		// everything is set (and not used).
-		return 3
-
-	case mips.AJAL: /* funny */
-		if v.Type == obj.TYPE_REG {
-			// TODO(rsc): REG_R0 and REG_F0 used to be
-			// (when register numbers started at 0) exregoffset and exfregoffset,
-			// which are unset entirely.
-			// It's strange that this handles R0 and F0 differently from the other
-			// registers. Possible failure to optimize?
-			if mips.REG_R0 < v.Reg && v.Reg <= mips.REG_R31 {
-				return 2
-			}
-			if v.Reg == mips.REGARG {
-				return 2
-			}
-			if mips.REG_F0 < v.Reg && v.Reg <= mips.REG_F31 {
-				return 2
-			}
-		}
-
-		if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
-			return 2
-		}
-
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.To, v) {
-			return 4
-		}
-		return 3
-
-	// R0 is zero, used by DUFFZERO, cannot be substituted.
-	// R1 is ptr to memory, used and set, cannot be substituted.
-	case obj.ADUFFZERO:
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == 0 {
-				return 1
-			}
-			if v.Reg == 1 {
-				return 2
-			}
-		}
-
-		return 0
-
-	// R1, R2 are ptr to src, dst, used and set, cannot be substituted.
-	// R3 is scratch, set by DUFFCOPY, cannot be substituted.
-	case obj.ADUFFCOPY:
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == 1 || v.Reg == 2 {
-				return 2
-			}
-			if v.Reg == 3 {
-				return 3
-			}
-		}
-
-		return 0
-
-	case obj.ATEXT: /* funny */
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == mips.REGARG {
-				return 3
-			}
-		}
-		return 0
-
-	case obj.APCDATA,
-		obj.AFUNCDATA,
-		obj.AVARDEF,
-		obj.AVARKILL,
-		obj.AVARLIVE,
-		obj.AUSEFIELD:
-		return 0
-	}
-}
-
-// copyas returns 1 if a and v address the same register.
-//
-// If a is the from operand, this means this operation reads the
-// register in v. If a is the to operand, this means this operation
-// writes the register in v.
-func copyas(a *obj.Addr, v *obj.Addr) bool {
-	if regtyp(v) {
-		if a.Type == v.Type {
-			if a.Reg == v.Reg {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// copyau returns 1 if a either directly or indirectly addresses the
-// same register as v.
-//
-// If a is the from operand, this means this operation reads the
-// register in v. If a is the to operand, this means the operation
-// either reads or writes the register in v (if !copyas(a, v), then
-// the operation reads the register in v).
-func copyau(a *obj.Addr, v *obj.Addr) bool {
-	if copyas(a, v) {
-		return true
-	}
-	if v.Type == obj.TYPE_REG {
-		if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
-			if v.Reg == a.Reg {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// copyau1 returns true if p->reg references the same register as v and v
-// is a direct reference.
-func copyau1(p *obj.Prog, v *obj.Addr) bool {
-	return regtyp(v) && v.Reg != 0 && p.Reg == v.Reg
-}
-
-// copysub replaces v with s in a if f==true or indicates it if could if f==false.
-// Returns true on failure to substitute (it always succeeds on mips).
-// TODO(dfc) remove unused return value, remove calls with f=false as they do nothing.
-func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool {
-	if f && copyau(a, v) {
-		a.Reg = s.Reg
-	}
-	return false
-}
-
-// copysub1 replaces v with s in p1->reg if f==true or indicates if it could if f==false.
-// Returns true on failure to substitute (it always succeeds on mips).
-// TODO(dfc) remove unused return value, remove calls with f=false as they do nothing.
-func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f bool) bool {
-	if f && copyau1(p1, v) {
-		p1.Reg = s.Reg
-	}
-	return false
-}
-
-func sameaddr(a *obj.Addr, v *obj.Addr) bool {
-	if a.Type != v.Type {
-		return false
-	}
-	if regtyp(v) && a.Reg == v.Reg {
-		return true
-	}
-	if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
-		if v.Offset == a.Offset {
-			return true
-		}
-	}
-	return false
-}
-
-func smallindir(a *obj.Addr, reg *obj.Addr) bool {
-	return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
-}
-
-func stackaddr(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG && a.Reg == mips.REGSP
-}
diff --git a/src/cmd/compile/internal/mips64/reg.go b/src/cmd/compile/internal/mips64/reg.go
index d384933..8655f5b 100644
--- a/src/cmd/compile/internal/mips64/reg.go
+++ b/src/cmd/compile/internal/mips64/reg.go
@@ -31,93 +31,6 @@
 package mips64
 
 import "cmd/internal/obj/mips"
-import "cmd/compile/internal/gc"
-
-const (
-	NREGVAR = 64 /* 32 general + 32 floating */
-)
-
-var regname = []string{
-	".R0",
-	".R1",
-	".R2",
-	".R3",
-	".R4",
-	".R5",
-	".R6",
-	".R7",
-	".R8",
-	".R9",
-	".R10",
-	".R11",
-	".R12",
-	".R13",
-	".R14",
-	".R15",
-	".R16",
-	".R17",
-	".R18",
-	".R19",
-	".R20",
-	".R21",
-	".R22",
-	".R23",
-	".R24",
-	".R25",
-	".R26",
-	".R27",
-	".R28",
-	".R29",
-	".R30",
-	".R31",
-	".F0",
-	".F1",
-	".F2",
-	".F3",
-	".F4",
-	".F5",
-	".F6",
-	".F7",
-	".F8",
-	".F9",
-	".F10",
-	".F11",
-	".F12",
-	".F13",
-	".F14",
-	".F15",
-	".F16",
-	".F17",
-	".F18",
-	".F19",
-	".F20",
-	".F21",
-	".F22",
-	".F23",
-	".F24",
-	".F25",
-	".F26",
-	".F27",
-	".F28",
-	".F29",
-	".F30",
-	".F31",
-}
-
-func regnames(n *int) []string {
-	*n = NREGVAR
-	return regname
-}
-
-func excludedregs() uint64 {
-	// Exclude registers with fixed functions
-	regbits := 1<<0 | RtoB(mips.REGSP) | RtoB(mips.REGG) | RtoB(mips.REGSB) | RtoB(mips.REGTMP) | RtoB(mips.REGLINK) | RtoB(mips.REG_R26) | RtoB(mips.REG_R27)
-	return regbits
-}
-
-func doregbits(r int) uint64 {
-	return 0
-}
 
 /*
  * track register variables including external registers:
@@ -140,19 +53,3 @@
 	}
 	return 0
 }
-
-func BtoR(b uint64) int {
-	b &= 0xffffffff
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + mips.REG_R0
-}
-
-func BtoF(b uint64) int {
-	b >>= 32
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + mips.REG_F0
-}
diff --git a/src/cmd/compile/internal/ppc64/cgen.go b/src/cmd/compile/internal/ppc64/cgen.go
deleted file mode 100644
index f4cc9c4..0000000
--- a/src/cmd/compile/internal/ppc64/cgen.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ppc64
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/ppc64"
-)
-
-func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
-	// determine alignment.
-	// want to avoid unaligned access, so have to use
-	// smaller operations for less aligned types.
-	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
-	align := int(n.Type.Align)
-
-	var op obj.As
-	switch align {
-	default:
-		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
-
-	case 1:
-		op = ppc64.AMOVBU
-
-	case 2:
-		op = ppc64.AMOVHU
-
-	case 4:
-		op = ppc64.AMOVWZU // there is no lwau, only lwaux
-
-	case 8:
-		op = ppc64.AMOVDU
-	}
-
-	if w%int64(align) != 0 {
-		gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
-	}
-	c := int32(w / int64(align))
-
-	// if we are copying forward on the stack and
-	// the src and dst overlap, then reverse direction
-	dir := align
-
-	if osrc < odst && odst < osrc+w {
-		dir = -dir
-	}
-
-	var dst gc.Node
-	var src gc.Node
-	if n.Ullman >= res.Ullman {
-		gc.Agenr(n, &dst, res) // temporarily use dst
-		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
-		gins(ppc64.AMOVD, &dst, &src)
-		if res.Op == gc.ONAME {
-			gc.Gvardef(res)
-		}
-		gc.Agen(res, &dst)
-	} else {
-		if res.Op == gc.ONAME {
-			gc.Gvardef(res)
-		}
-		gc.Agenr(res, &dst, res)
-		gc.Agenr(n, &src, nil)
-	}
-
-	var tmp gc.Node
-	gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
-
-	// set up end marker
-	var nend gc.Node
-
-	// move src and dest to the end of block if necessary
-	if dir < 0 {
-		if c >= 4 {
-			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
-			gins(ppc64.AMOVD, &src, &nend)
-		}
-
-		p := gins(ppc64.AADD, nil, &src)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-
-		p = gins(ppc64.AADD, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-	} else {
-		p := gins(ppc64.AADD, nil, &src)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(-dir)
-
-		p = gins(ppc64.AADD, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(-dir)
-
-		if c >= 4 {
-			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
-			p := gins(ppc64.AMOVD, &src, &nend)
-			p.From.Type = obj.TYPE_ADDR
-			p.From.Offset = w
-		}
-	}
-
-	// move
-	// TODO: enable duffcopy for larger copies.
-	if c >= 4 {
-		p := gins(op, &src, &tmp)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Offset = int64(dir)
-		ploop := p
-
-		p = gins(op, &tmp, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = int64(dir)
-
-		p = gins(ppc64.ACMP, &src, &nend)
-
-		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
-		gc.Regfree(&nend)
-	} else {
-		// TODO(austin): Instead of generating ADD $-8,R8; ADD
-		// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
-		// generate the offsets directly and eliminate the
-		// ADDs. That will produce shorter, more
-		// pipeline-able code.
-		var p *obj.Prog
-		for ; c > 0; c-- {
-			p = gins(op, &src, &tmp)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Offset = int64(dir)
-
-			p = gins(op, &tmp, &dst)
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = int64(dir)
-		}
-	}
-
-	gc.Regfree(&dst)
-	gc.Regfree(&src)
-	gc.Regfree(&tmp)
-}
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
index a3fab79..5491c12 100644
--- a/src/cmd/compile/internal/ppc64/galign.go
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -35,36 +35,9 @@
 	gc.Thearch.ReservedRegs = resvd
 
 	gc.Thearch.Betypeinit = betypeinit
-	gc.Thearch.Cgen_hmul = cgen_hmul
-	gc.Thearch.Cgen_shift = cgen_shift
-	gc.Thearch.Clearfat = clearfat
 	gc.Thearch.Defframe = defframe
-	gc.Thearch.Dodiv = dodiv
-	gc.Thearch.Excise = excise
-	gc.Thearch.Expandchecks = expandchecks
-	gc.Thearch.Getg = getg
 	gc.Thearch.Gins = gins
-	gc.Thearch.Ginscmp = ginscmp
-	gc.Thearch.Ginscon = ginscon
-	gc.Thearch.Ginsnop = ginsnop
-	gc.Thearch.Gmove = gmove
-	gc.Thearch.Peep = peep
 	gc.Thearch.Proginfo = proginfo
-	gc.Thearch.Regtyp = regtyp
-	gc.Thearch.Sameaddr = sameaddr
-	gc.Thearch.Smallindir = smallindir
-	gc.Thearch.Stackaddr = stackaddr
-	gc.Thearch.Blockcopy = blockcopy
-	gc.Thearch.Sudoaddable = sudoaddable
-	gc.Thearch.Sudoclean = sudoclean
-	gc.Thearch.Excludedregs = excludedregs
-	gc.Thearch.RtoB = RtoB
-	gc.Thearch.FtoB = RtoB
-	gc.Thearch.BtoR = BtoR
-	gc.Thearch.BtoF = BtoF
-	gc.Thearch.Optoas = optoas
-	gc.Thearch.Doregbits = doregbits
-	gc.Thearch.Regnames = regnames
 
 	gc.Thearch.SSARegToReg = ssaRegToReg
 	gc.Thearch.SSAMarkMoves = ssaMarkMoves
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
index eeda4a2..f36eedc 100644
--- a/src/cmd/compile/internal/ppc64/ggen.go
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -8,7 +8,6 @@
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
 	"cmd/internal/obj/ppc64"
-	"fmt"
 )
 
 func defframe(ptxt *obj.Prog) {
@@ -114,440 +113,3 @@
 	gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
 	gins(ppc64.AOR, &reg, &reg)
 }
-
-var panicdiv *gc.Node
-
-/*
- * generate division.
- * generates one of:
- *	res = nl / nr
- *	res = nl % nr
- * according to op.
- */
-func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	// Have to be careful about handling
-	// most negative int divided by -1 correctly.
-	// The hardware will generate undefined result.
-	// Also need to explicitly trap on division on zero,
-	// the hardware will silently generate undefined result.
-	// DIVW will leave unpredictable result in higher 32-bit,
-	// so always use DIVD/DIVDU.
-	t := nl.Type
-
-	t0 := t
-	check := false
-	if t.IsSigned() {
-		check = true
-		if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<<uint64(t.Width*8-1)) {
-			check = false
-		} else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 {
-			check = false
-		}
-	}
-
-	if t.Width < 8 {
-		if t.IsSigned() {
-			t = gc.Types[gc.TINT64]
-		} else {
-			t = gc.Types[gc.TUINT64]
-		}
-		check = false
-	}
-
-	a := optoas(gc.ODIV, t)
-
-	var tl gc.Node
-	gc.Regalloc(&tl, t0, nil)
-	var tr gc.Node
-	gc.Regalloc(&tr, t0, nil)
-	if nl.Ullman >= nr.Ullman {
-		gc.Cgen(nl, &tl)
-		gc.Cgen(nr, &tr)
-	} else {
-		gc.Cgen(nr, &tr)
-		gc.Cgen(nl, &tl)
-	}
-
-	if t != t0 {
-		// Convert
-		tl2 := tl
-
-		tr2 := tr
-		tl.Type = t
-		tr.Type = t
-		gmove(&tl2, &tl)
-		gmove(&tr2, &tr)
-	}
-
-	// Handle divide-by-zero panic.
-	p1 := gins(optoas(gc.OCMP, t), &tr, nil)
-
-	p1.To.Type = obj.TYPE_REG
-	p1.To.Reg = ppc64.REGZERO
-	p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-	if panicdiv == nil {
-		panicdiv = gc.Sysfunc("panicdivide")
-	}
-	gc.Ginscall(panicdiv, -1)
-	gc.Patch(p1, gc.Pc)
-
-	var p2 *obj.Prog
-	if check {
-		var nm1 gc.Node
-		gc.Nodconst(&nm1, t, -1)
-		gins(optoas(gc.OCMP, t), &tr, &nm1)
-		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-		if op == gc.ODIV {
-			// a / (-1) is -a.
-			gins(optoas(gc.OMINUS, t), nil, &tl)
-
-			gmove(&tl, res)
-		} else {
-			// a % (-1) is 0.
-			var nz gc.Node
-			gc.Nodconst(&nz, t, 0)
-
-			gmove(&nz, res)
-		}
-
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-	}
-
-	p1 = gins(a, &tr, &tl)
-	if op == gc.ODIV {
-		gc.Regfree(&tr)
-		gmove(&tl, res)
-	} else {
-		// A%B = A-(A/B*B)
-		var tm gc.Node
-		gc.Regalloc(&tm, t, nil)
-
-		// patch div to use the 3 register form
-		// TODO(minux): add gins3?
-		p1.Reg = p1.To.Reg
-
-		p1.To.Reg = tm.Reg
-		gins(optoas(gc.OMUL, t), &tr, &tm)
-		gc.Regfree(&tr)
-		gins(optoas(gc.OSUB, t), &tm, &tl)
-		gc.Regfree(&tm)
-		gmove(&tl, res)
-	}
-
-	gc.Regfree(&tl)
-	if check {
-		gc.Patch(p2, gc.Pc)
-	}
-}
-
-/*
- * generate high multiply:
- *   res = (nl*nr) >> width
- */
-func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	// largest ullman on left.
-	if nl.Ullman < nr.Ullman {
-		nl, nr = nr, nl
-	}
-
-	t := nl.Type
-	w := t.Width * 8
-	var n1 gc.Node
-	gc.Cgenr(nl, &n1, res)
-	var n2 gc.Node
-	gc.Cgenr(nr, &n2, nil)
-	switch gc.Simtype[t.Etype] {
-	case gc.TINT8,
-		gc.TINT16,
-		gc.TINT32:
-		gins(optoas(gc.OMUL, t), &n2, &n1)
-		p := gins(ppc64.ASRAD, nil, &n1)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-
-	case gc.TUINT8,
-		gc.TUINT16,
-		gc.TUINT32:
-		gins(optoas(gc.OMUL, t), &n2, &n1)
-		p := gins(ppc64.ASRD, nil, &n1)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = w
-
-	case gc.TINT64,
-		gc.TUINT64:
-		if t.IsSigned() {
-			gins(ppc64.AMULHD, &n2, &n1)
-		} else {
-			gins(ppc64.AMULHDU, &n2, &n1)
-		}
-
-	default:
-		gc.Fatalf("cgen_hmul %v", t)
-	}
-
-	gc.Cgen(&n1, res)
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-/*
- * generate shift according to op, one of:
- *	res = nl << nr
- *	res = nl >> nr
- */
-func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	a := optoas(op, nl.Type)
-
-	if nr.Op == gc.OLITERAL {
-		var n1 gc.Node
-		gc.Regalloc(&n1, nl.Type, res)
-		gc.Cgen(nl, &n1)
-		sc := uint64(nr.Int64())
-		if sc >= uint64(nl.Type.Width*8) {
-			// large shift gets 2 shifts by width-1
-			var n3 gc.Node
-			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
-
-			gins(a, &n3, &n1)
-			gins(a, &n3, &n1)
-		} else {
-			gins(a, nr, &n1)
-		}
-		gmove(&n1, res)
-		gc.Regfree(&n1)
-		return
-	}
-
-	if nl.Ullman >= gc.UINF {
-		var n4 gc.Node
-		gc.Tempname(&n4, nl.Type)
-		gc.Cgen(nl, &n4)
-		nl = &n4
-	}
-
-	if nr.Ullman >= gc.UINF {
-		var n5 gc.Node
-		gc.Tempname(&n5, nr.Type)
-		gc.Cgen(nr, &n5)
-		nr = &n5
-	}
-
-	// Allow either uint32 or uint64 as shift type,
-	// to avoid unnecessary conversion from uint32 to uint64
-	// just to do the comparison.
-	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
-
-	if tcount.Etype < gc.TUINT32 {
-		tcount = gc.Types[gc.TUINT32]
-	}
-
-	var n1 gc.Node
-	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
-	var n3 gc.Node
-	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
-
-	var n2 gc.Node
-	gc.Regalloc(&n2, nl.Type, res)
-
-	if nl.Ullman >= nr.Ullman {
-		gc.Cgen(nl, &n2)
-		gc.Cgen(nr, &n1)
-		gmove(&n1, &n3)
-	} else {
-		gc.Cgen(nr, &n1)
-		gmove(&n1, &n3)
-		gc.Cgen(nl, &n2)
-	}
-
-	gc.Regfree(&n3)
-
-	// test and fix up large shifts
-	if !bounded {
-		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
-		gins(optoas(gc.OCMP, tcount), &n1, &n3)
-		p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
-		if op == gc.ORSH && nl.Type.IsSigned() {
-			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
-			gins(a, &n3, &n2)
-		} else {
-			gc.Nodconst(&n3, nl.Type, 0)
-			gmove(&n3, &n2)
-		}
-
-		gc.Patch(p1, gc.Pc)
-	}
-
-	gins(a, &n1, &n2)
-
-	gmove(&n2, res)
-
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-func clearfat(nl *gc.Node) {
-	/* clear a fat object */
-	if gc.Debug['g'] != 0 {
-		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
-	}
-
-	w := uint64(nl.Type.Width)
-
-	// Avoid taking the address for simple enough types.
-	if gc.Componentgen(nil, nl) {
-		return
-	}
-
-	c := w % 8 // bytes
-	q := w / 8 // dwords
-
-	if gc.Reginuse(ppc64.REGRT1) {
-		gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
-	}
-
-	var r0 gc.Node
-	gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
-	var dst gc.Node
-	gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
-	gc.Regrealloc(&dst)
-	gc.Agen(nl, &dst)
-
-	var boff uint64
-	if q > 128 {
-		p := gins(ppc64.ASUB, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 8
-
-		var end gc.Node
-		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
-		p = gins(ppc64.AMOVD, &dst, &end)
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = int64(q * 8)
-
-		p = gins(ppc64.AMOVDU, &r0, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 8
-		pl := p
-
-		p = gins(ppc64.ACMP, &dst, &end)
-		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
-
-		gc.Regfree(&end)
-
-		// The loop leaves R3 on the last zeroed dword
-		boff = 8
-	} else if q >= 4 {
-		p := gins(ppc64.ASUB, nil, &dst)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 8
-		f := gc.Sysfunc("duffzero")
-		p = gins(obj.ADUFFZERO, nil, f)
-		gc.Afunclit(&p.To, f)
-
-		// 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
-		p.To.Offset = int64(4 * (128 - q))
-
-		// duffzero leaves R3 on the last zeroed dword
-		boff = 8
-	} else {
-		var p *obj.Prog
-		for t := uint64(0); t < q; t++ {
-			p = gins(ppc64.AMOVD, &r0, &dst)
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = int64(8 * t)
-		}
-
-		boff = 8 * q
-	}
-
-	var p *obj.Prog
-	for t := uint64(0); t < c; t++ {
-		p = gins(ppc64.AMOVB, &r0, &dst)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = int64(t + boff)
-	}
-
-	gc.Regfree(&dst)
-}
-
-// Called after regopt and peep have run.
-// Expand CHECKNIL pseudo-op into actual nil pointer check.
-func expandchecks(firstp *obj.Prog) {
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
-	for p := firstp; p != nil; p = p.Link {
-		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
-			fmt.Printf("expandchecks: %v\n", p)
-		}
-		if p.As != obj.ACHECKNIL {
-			continue
-		}
-		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
-			gc.Warnl(p.Lineno, "generated nil check")
-		}
-		if p.From.Type != obj.TYPE_REG {
-			gc.Fatalf("invalid nil check %v\n", p)
-		}
-
-		/*
-			// check is
-			//	TD $4, R0, arg (R0 is always zero)
-			// eqv. to:
-			// 	tdeq r0, arg
-			// NOTE: this needs special runtime support to make SIGTRAP recoverable.
-			reg = p->from.reg;
-			p->as = ATD;
-			p->from = p->to = p->from3 = zprog.from;
-			p->from.type = TYPE_CONST;
-			p->from.offset = 4;
-			p->from.reg = 0;
-			p->reg = REGZERO;
-			p->to.type = TYPE_REG;
-			p->to.reg = reg;
-		*/
-		// check is
-		//	CMP arg, R0
-		//	BNE 2(PC) [likely]
-		//	MOVD R0, 0(R0)
-		p1 = gc.Ctxt.NewProg()
-
-		p2 = gc.Ctxt.NewProg()
-		gc.Clearp(p1)
-		gc.Clearp(p2)
-		p1.Link = p2
-		p2.Link = p.Link
-		p.Link = p1
-		p1.Lineno = p.Lineno
-		p2.Lineno = p.Lineno
-		p1.Pc = 9999
-		p2.Pc = 9999
-		p.As = ppc64.ACMP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = ppc64.REGZERO
-		p1.As = ppc64.ABNE
-
-		//p1->from.type = TYPE_CONST;
-		//p1->from.offset = 1; // likely
-		p1.To.Type = obj.TYPE_BRANCH
-
-		p1.To.Val = p2.Link
-
-		// crash by write to memory address 0.
-		p2.As = ppc64.AMOVD
-
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = ppc64.REGZERO
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = ppc64.REGZERO
-		p2.To.Offset = 0
-	}
-}
-
-// res = runtime.getg()
-func getg(res *gc.Node) {
-	var n1 gc.Node
-	gc.Nodreg(&n1, res.Type, ppc64.REGG)
-	gmove(&n1, res)
-}
diff --git a/src/cmd/compile/internal/ppc64/gsubr.go b/src/cmd/compile/internal/ppc64/gsubr.go
index 86dc241..3fa151f 100644
--- a/src/cmd/compile/internal/ppc64/gsubr.go
+++ b/src/cmd/compile/internal/ppc64/gsubr.go
@@ -31,7 +31,6 @@
 package ppc64
 
 import (
-	"cmd/compile/internal/big"
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
 	"cmd/internal/obj/ppc64"
@@ -112,438 +111,6 @@
 	gc.Regfree(&ntmp)
 }
 
-func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
-	if t.IsInteger() && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
-		// Reverse comparison to place constant last.
-		op = gc.Brrev(op)
-		n1, n2 = n2, n1
-	}
-
-	var r1, r2, g1, g2 gc.Node
-	gc.Regalloc(&r1, t, n1)
-	gc.Regalloc(&g1, n1.Type, &r1)
-	gc.Cgen(n1, &g1)
-	gmove(&g1, &r1)
-	if t.IsInteger() && gc.Isconst(n2, gc.CTINT) {
-		ginscon2(optoas(gc.OCMP, t), &r1, n2.Int64())
-	} else {
-		gc.Regalloc(&r2, t, n2)
-		gc.Regalloc(&g2, n1.Type, &r2)
-		gc.Cgen(n2, &g2)
-		gmove(&g2, &r2)
-		rawgins(optoas(gc.OCMP, t), &r1, &r2)
-		gc.Regfree(&g2)
-		gc.Regfree(&r2)
-	}
-	gc.Regfree(&g1)
-	gc.Regfree(&r1)
-	return gc.Gbranch(optoas(op, t), nil, likely)
-}
-
-// set up nodes representing 2^63
-var (
-	bigi         gc.Node
-	bigf         gc.Node
-	bignodes_did bool
-)
-
-func bignodes() {
-	if bignodes_did {
-		return
-	}
-	bignodes_did = true
-
-	var i big.Int
-	i.SetInt64(1)
-	i.Lsh(&i, 63)
-
-	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
-	bigi.SetBigInt(&i)
-
-	bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
-}
-
-/*
- * generate move:
- *	t = f
- * hard part is conversions.
- */
-func gmove(f *gc.Node, t *gc.Node) {
-	if gc.Debug['M'] != 0 {
-		fmt.Printf("gmove %L -> %L\n", f, t)
-	}
-
-	ft := int(gc.Simsimtype(f.Type))
-	tt := int(gc.Simsimtype(t.Type))
-	cvt := t.Type
-
-	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
-		gc.Complexmove(f, t)
-		return
-	}
-
-	// cannot have two memory operands
-	var r2 gc.Node
-	var r1 gc.Node
-	var a obj.As
-	if gc.Ismem(f) && gc.Ismem(t) {
-		goto hard
-	}
-
-	// convert constant to desired type
-	if f.Op == gc.OLITERAL {
-		var con gc.Node
-		switch tt {
-		default:
-			f.Convconst(&con, t.Type)
-
-		case gc.TINT32,
-			gc.TINT16,
-			gc.TINT8:
-			var con gc.Node
-			f.Convconst(&con, gc.Types[gc.TINT64])
-			var r1 gc.Node
-			gc.Regalloc(&r1, con.Type, t)
-			gins(ppc64.AMOVD, &con, &r1)
-			gmove(&r1, t)
-			gc.Regfree(&r1)
-			return
-
-		case gc.TUINT32,
-			gc.TUINT16,
-			gc.TUINT8:
-			var con gc.Node
-			f.Convconst(&con, gc.Types[gc.TUINT64])
-			var r1 gc.Node
-			gc.Regalloc(&r1, con.Type, t)
-			gins(ppc64.AMOVD, &con, &r1)
-			gmove(&r1, t)
-			gc.Regfree(&r1)
-			return
-		}
-
-		f = &con
-		ft = tt // so big switch will choose a simple mov
-
-		// constants can't move directly to memory.
-		if gc.Ismem(t) {
-			goto hard
-		}
-	}
-
-	// float constants come from memory.
-	//if(isfloat[tt])
-	//	goto hard;
-
-	// 64-bit immediates are also from memory.
-	//if(isint[tt])
-	//	goto hard;
-	//// 64-bit immediates are really 32-bit sign-extended
-	//// unless moving into a register.
-	//if(isint[tt]) {
-	//	if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
-	//		goto hard;
-	//	if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
-	//		goto hard;
-	//}
-
-	// value -> value copy, only one memory operand.
-	// figure out the instruction to use.
-	// break out of switch for one-instruction gins.
-	// goto rdst for "destination must be register".
-	// goto hard for "convert to cvt type first".
-	// otherwise handle and return.
-
-	switch uint32(ft)<<16 | uint32(tt) {
-	default:
-		gc.Fatalf("gmove %L -> %L", f.Type, t.Type)
-
-		/*
-		 * integer copy and truncate
-		 */
-	case gc.TINT8<<16 | gc.TINT8, // same size
-		gc.TUINT8<<16 | gc.TINT8,
-		gc.TINT16<<16 | gc.TINT8,
-		// truncate
-		gc.TUINT16<<16 | gc.TINT8,
-		gc.TINT32<<16 | gc.TINT8,
-		gc.TUINT32<<16 | gc.TINT8,
-		gc.TINT64<<16 | gc.TINT8,
-		gc.TUINT64<<16 | gc.TINT8:
-		a = ppc64.AMOVB
-
-	case gc.TINT8<<16 | gc.TUINT8, // same size
-		gc.TUINT8<<16 | gc.TUINT8,
-		gc.TINT16<<16 | gc.TUINT8,
-		// truncate
-		gc.TUINT16<<16 | gc.TUINT8,
-		gc.TINT32<<16 | gc.TUINT8,
-		gc.TUINT32<<16 | gc.TUINT8,
-		gc.TINT64<<16 | gc.TUINT8,
-		gc.TUINT64<<16 | gc.TUINT8:
-		a = ppc64.AMOVBZ
-
-	case gc.TINT16<<16 | gc.TINT16, // same size
-		gc.TUINT16<<16 | gc.TINT16,
-		gc.TINT32<<16 | gc.TINT16,
-		// truncate
-		gc.TUINT32<<16 | gc.TINT16,
-		gc.TINT64<<16 | gc.TINT16,
-		gc.TUINT64<<16 | gc.TINT16:
-		a = ppc64.AMOVH
-
-	case gc.TINT16<<16 | gc.TUINT16, // same size
-		gc.TUINT16<<16 | gc.TUINT16,
-		gc.TINT32<<16 | gc.TUINT16,
-		// truncate
-		gc.TUINT32<<16 | gc.TUINT16,
-		gc.TINT64<<16 | gc.TUINT16,
-		gc.TUINT64<<16 | gc.TUINT16:
-		a = ppc64.AMOVHZ
-
-	case gc.TINT32<<16 | gc.TINT32, // same size
-		gc.TUINT32<<16 | gc.TINT32,
-		gc.TINT64<<16 | gc.TINT32,
-		// truncate
-		gc.TUINT64<<16 | gc.TINT32:
-		a = ppc64.AMOVW
-
-	case gc.TINT32<<16 | gc.TUINT32, // same size
-		gc.TUINT32<<16 | gc.TUINT32,
-		gc.TINT64<<16 | gc.TUINT32,
-		gc.TUINT64<<16 | gc.TUINT32:
-		a = ppc64.AMOVWZ
-
-	case gc.TINT64<<16 | gc.TINT64, // same size
-		gc.TINT64<<16 | gc.TUINT64,
-		gc.TUINT64<<16 | gc.TINT64,
-		gc.TUINT64<<16 | gc.TUINT64:
-		a = ppc64.AMOVD
-
-		/*
-		 * integer up-conversions
-		 */
-	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
-		gc.TINT8<<16 | gc.TUINT16,
-		gc.TINT8<<16 | gc.TINT32,
-		gc.TINT8<<16 | gc.TUINT32,
-		gc.TINT8<<16 | gc.TINT64,
-		gc.TINT8<<16 | gc.TUINT64:
-		a = ppc64.AMOVB
-
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
-		gc.TUINT8<<16 | gc.TUINT16,
-		gc.TUINT8<<16 | gc.TINT32,
-		gc.TUINT8<<16 | gc.TUINT32,
-		gc.TUINT8<<16 | gc.TINT64,
-		gc.TUINT8<<16 | gc.TUINT64:
-		a = ppc64.AMOVBZ
-
-		goto rdst
-
-	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
-		gc.TINT16<<16 | gc.TUINT32,
-		gc.TINT16<<16 | gc.TINT64,
-		gc.TINT16<<16 | gc.TUINT64:
-		a = ppc64.AMOVH
-
-		goto rdst
-
-	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
-		gc.TUINT16<<16 | gc.TUINT32,
-		gc.TUINT16<<16 | gc.TINT64,
-		gc.TUINT16<<16 | gc.TUINT64:
-		a = ppc64.AMOVHZ
-
-		goto rdst
-
-	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
-		gc.TINT32<<16 | gc.TUINT64:
-		a = ppc64.AMOVW
-
-		goto rdst
-
-	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
-		gc.TUINT32<<16 | gc.TUINT64:
-		a = ppc64.AMOVWZ
-
-		goto rdst
-
-		//warn("gmove: convert float to int not implemented: %N -> %N\n", f, t);
-	//return;
-	// algorithm is:
-	//	if small enough, use native float64 -> int64 conversion.
-	//	otherwise, subtract 2^63, convert, and add it back.
-	/*
-	* float to integer
-	 */
-	case gc.TFLOAT32<<16 | gc.TINT32,
-		gc.TFLOAT64<<16 | gc.TINT32,
-		gc.TFLOAT32<<16 | gc.TINT64,
-		gc.TFLOAT64<<16 | gc.TINT64,
-		gc.TFLOAT32<<16 | gc.TINT16,
-		gc.TFLOAT32<<16 | gc.TINT8,
-		gc.TFLOAT32<<16 | gc.TUINT16,
-		gc.TFLOAT32<<16 | gc.TUINT8,
-		gc.TFLOAT64<<16 | gc.TINT16,
-		gc.TFLOAT64<<16 | gc.TINT8,
-		gc.TFLOAT64<<16 | gc.TUINT16,
-		gc.TFLOAT64<<16 | gc.TUINT8,
-		gc.TFLOAT32<<16 | gc.TUINT32,
-		gc.TFLOAT64<<16 | gc.TUINT32,
-		gc.TFLOAT32<<16 | gc.TUINT64,
-		gc.TFLOAT64<<16 | gc.TUINT64:
-		bignodes()
-
-		var r1 gc.Node
-		gc.Regalloc(&r1, gc.Types[ft], f)
-		gmove(f, &r1)
-		if tt == gc.TUINT64 {
-			gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
-			gmove(&bigf, &r2)
-			gins(ppc64.AFCMPU, &r1, &r2)
-			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)
-			gins(ppc64.AFSUB, &r2, &r1)
-			gc.Patch(p1, gc.Pc)
-			gc.Regfree(&r2)
-		}
-
-		gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
-		var r3 gc.Node
-		gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
-		gins(ppc64.AFCTIDZ, &r1, &r2)
-		p1 := gins(ppc64.AFMOVD, &r2, nil)
-		p1.To.Type = obj.TYPE_MEM
-		p1.To.Reg = ppc64.REGSP
-		p1.To.Offset = -8
-		p1 = gins(ppc64.AMOVD, nil, &r3)
-		p1.From.Type = obj.TYPE_MEM
-		p1.From.Reg = ppc64.REGSP
-		p1.From.Offset = -8
-		gc.Regfree(&r2)
-		gc.Regfree(&r1)
-		if tt == gc.TUINT64 {
-			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1) // use CR0 here again
-			gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
-			gins(ppc64.AMOVD, &bigi, &r1)
-			gins(ppc64.AADD, &r1, &r3)
-			gc.Patch(p1, gc.Pc)
-		}
-
-		gmove(&r3, t)
-		gc.Regfree(&r3)
-		return
-
-	/*
-	 * integer to float
-	 */
-	case gc.TINT32<<16 | gc.TFLOAT32,
-		gc.TINT32<<16 | gc.TFLOAT64,
-		gc.TINT64<<16 | gc.TFLOAT32,
-		gc.TINT64<<16 | gc.TFLOAT64,
-		gc.TINT16<<16 | gc.TFLOAT32,
-		gc.TINT16<<16 | gc.TFLOAT64,
-		gc.TINT8<<16 | gc.TFLOAT32,
-		gc.TINT8<<16 | gc.TFLOAT64,
-		gc.TUINT16<<16 | gc.TFLOAT32,
-		gc.TUINT16<<16 | gc.TFLOAT64,
-		gc.TUINT8<<16 | gc.TFLOAT32,
-		gc.TUINT8<<16 | gc.TFLOAT64,
-		gc.TUINT32<<16 | gc.TFLOAT32,
-		gc.TUINT32<<16 | gc.TFLOAT64,
-		gc.TUINT64<<16 | gc.TFLOAT32,
-		gc.TUINT64<<16 | gc.TFLOAT64:
-		bignodes()
-
-		// The algorithm is:
-		//	if small enough, use native int64 -> float64 conversion,
-		//	otherwise halve (x -> (x>>1)|(x&1)), convert, and double.
-		// Note: could use FCFIDU instead if target supports it.
-		var r1 gc.Node
-		gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
-		gmove(f, &r1)
-		if ft == gc.TUINT64 {
-			gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
-			gmove(&bigi, &r2)
-			gins(ppc64.ACMPU, &r1, &r2)
-			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
-			var r3 gc.Node
-			gc.Regalloc(&r3, gc.Types[gc.TUINT64], nil)
-			p2 := gins(ppc64.AANDCC, nil, &r3) // andi.
-			p2.Reg = r1.Reg
-			p2.From.Type = obj.TYPE_CONST
-			p2.From.Offset = 1
-			p3 := gins(ppc64.ASRD, nil, &r1)
-			p3.From.Type = obj.TYPE_CONST
-			p3.From.Offset = 1
-			gins(ppc64.AOR, &r3, &r1)
-			gc.Regfree(&r3)
-			gc.Patch(p1, gc.Pc)
-		}
-		gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
-		p1 := gins(ppc64.AMOVD, &r1, nil)
-		p1.To.Type = obj.TYPE_MEM
-		p1.To.Reg = ppc64.REGSP
-		p1.To.Offset = -8
-		p1 = gins(ppc64.AFMOVD, nil, &r2)
-		p1.From.Type = obj.TYPE_MEM
-		p1.From.Reg = ppc64.REGSP
-		p1.From.Offset = -8
-		gins(ppc64.AFCFID, &r2, &r2)
-		gc.Regfree(&r1)
-		if ft == gc.TUINT64 {
-			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1) // use CR0 here again
-			gins(ppc64.AFADD, &r2, &r2)
-			gc.Patch(p1, gc.Pc)
-		}
-		gmove(&r2, t)
-		gc.Regfree(&r2)
-		return
-
-		/*
-		 * float to float
-		 */
-	case gc.TFLOAT32<<16 | gc.TFLOAT32:
-		a = ppc64.AFMOVS
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT64:
-		a = ppc64.AFMOVD
-
-	case gc.TFLOAT32<<16 | gc.TFLOAT64:
-		a = ppc64.AFMOVS
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT32:
-		a = ppc64.AFRSP
-		goto rdst
-	}
-
-	gins(a, f, t)
-	return
-
-	// requires register destination
-rdst:
-	{
-		gc.Regalloc(&r1, t.Type, t)
-
-		gins(a, f, &r1)
-		gmove(&r1, t)
-		gc.Regfree(&r1)
-		return
-	}
-
-	// requires register intermediate
-hard:
-	gc.Regalloc(&r1, cvt, t)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-}
-
 // gins is called by the front end.
 // It synthesizes some multiple-instruction sequences
 // so the front end can stay simpler.
@@ -675,396 +242,3 @@
 
 	return p
 }
-
-/*
- * return Axxx for Oxxx on type t.
- */
-func optoas(op gc.Op, t *gc.Type) obj.As {
-	if t == nil {
-		gc.Fatalf("optoas: t is nil")
-	}
-
-	// avoid constant conversions in switches below
-	const (
-		OMINUS_ = uint32(gc.OMINUS) << 16
-		OLSH_   = uint32(gc.OLSH) << 16
-		ORSH_   = uint32(gc.ORSH) << 16
-		OADD_   = uint32(gc.OADD) << 16
-		OSUB_   = uint32(gc.OSUB) << 16
-		OMUL_   = uint32(gc.OMUL) << 16
-		ODIV_   = uint32(gc.ODIV) << 16
-		OOR_    = uint32(gc.OOR) << 16
-		OAND_   = uint32(gc.OAND) << 16
-		OXOR_   = uint32(gc.OXOR) << 16
-		OEQ_    = uint32(gc.OEQ) << 16
-		ONE_    = uint32(gc.ONE) << 16
-		OLT_    = uint32(gc.OLT) << 16
-		OLE_    = uint32(gc.OLE) << 16
-		OGE_    = uint32(gc.OGE) << 16
-		OGT_    = uint32(gc.OGT) << 16
-		OCMP_   = uint32(gc.OCMP) << 16
-		OAS_    = uint32(gc.OAS) << 16
-		OHMUL_  = uint32(gc.OHMUL) << 16
-		OSQRT_  = uint32(gc.OSQRT) << 16
-	)
-
-	a := obj.AXXX
-	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
-	default:
-		gc.Fatalf("optoas: no entry for op=%v type=%v", op, t)
-
-	case OEQ_ | gc.TBOOL,
-		OEQ_ | gc.TINT8,
-		OEQ_ | gc.TUINT8,
-		OEQ_ | gc.TINT16,
-		OEQ_ | gc.TUINT16,
-		OEQ_ | gc.TINT32,
-		OEQ_ | gc.TUINT32,
-		OEQ_ | gc.TINT64,
-		OEQ_ | gc.TUINT64,
-		OEQ_ | gc.TPTR32,
-		OEQ_ | gc.TPTR64,
-		OEQ_ | gc.TFLOAT32,
-		OEQ_ | gc.TFLOAT64:
-		a = ppc64.ABEQ
-
-	case ONE_ | gc.TBOOL,
-		ONE_ | gc.TINT8,
-		ONE_ | gc.TUINT8,
-		ONE_ | gc.TINT16,
-		ONE_ | gc.TUINT16,
-		ONE_ | gc.TINT32,
-		ONE_ | gc.TUINT32,
-		ONE_ | gc.TINT64,
-		ONE_ | gc.TUINT64,
-		ONE_ | gc.TPTR32,
-		ONE_ | gc.TPTR64,
-		ONE_ | gc.TFLOAT32,
-		ONE_ | gc.TFLOAT64:
-		a = ppc64.ABNE
-
-	case OLT_ | gc.TINT8, // ACMP
-		OLT_ | gc.TINT16,
-		OLT_ | gc.TINT32,
-		OLT_ | gc.TINT64,
-		OLT_ | gc.TUINT8,
-		// ACMPU
-		OLT_ | gc.TUINT16,
-		OLT_ | gc.TUINT32,
-		OLT_ | gc.TUINT64,
-		OLT_ | gc.TFLOAT32,
-		// AFCMPU
-		OLT_ | gc.TFLOAT64:
-		a = ppc64.ABLT
-
-	case OLE_ | gc.TINT8, // ACMP
-		OLE_ | gc.TINT16,
-		OLE_ | gc.TINT32,
-		OLE_ | gc.TINT64,
-		OLE_ | gc.TUINT8,
-		// ACMPU
-		OLE_ | gc.TUINT16,
-		OLE_ | gc.TUINT32,
-		OLE_ | gc.TUINT64:
-		// No OLE for floats, because it mishandles NaN.
-		// Front end must reverse comparison or use OLT and OEQ together.
-		a = ppc64.ABLE
-
-	case OGT_ | gc.TINT8,
-		OGT_ | gc.TINT16,
-		OGT_ | gc.TINT32,
-		OGT_ | gc.TINT64,
-		OGT_ | gc.TUINT8,
-		OGT_ | gc.TUINT16,
-		OGT_ | gc.TUINT32,
-		OGT_ | gc.TUINT64,
-		OGT_ | gc.TFLOAT32,
-		OGT_ | gc.TFLOAT64:
-		a = ppc64.ABGT
-
-	case OGE_ | gc.TINT8,
-		OGE_ | gc.TINT16,
-		OGE_ | gc.TINT32,
-		OGE_ | gc.TINT64,
-		OGE_ | gc.TUINT8,
-		OGE_ | gc.TUINT16,
-		OGE_ | gc.TUINT32,
-		OGE_ | gc.TUINT64:
-		// No OGE for floats, because it mishandles NaN.
-		// Front end must reverse comparison or use OLT and OEQ together.
-		a = ppc64.ABGE
-
-	case OCMP_ | gc.TBOOL,
-		OCMP_ | gc.TINT8,
-		OCMP_ | gc.TINT16,
-		OCMP_ | gc.TINT32,
-		OCMP_ | gc.TPTR32,
-		OCMP_ | gc.TINT64:
-		a = ppc64.ACMP
-
-	case OCMP_ | gc.TUINT8,
-		OCMP_ | gc.TUINT16,
-		OCMP_ | gc.TUINT32,
-		OCMP_ | gc.TUINT64,
-		OCMP_ | gc.TPTR64:
-		a = ppc64.ACMPU
-
-	case OCMP_ | gc.TFLOAT32,
-		OCMP_ | gc.TFLOAT64:
-		a = ppc64.AFCMPU
-
-	case OAS_ | gc.TBOOL,
-		OAS_ | gc.TINT8:
-		a = ppc64.AMOVB
-
-	case OAS_ | gc.TUINT8:
-		a = ppc64.AMOVBZ
-
-	case OAS_ | gc.TINT16:
-		a = ppc64.AMOVH
-
-	case OAS_ | gc.TUINT16:
-		a = ppc64.AMOVHZ
-
-	case OAS_ | gc.TINT32:
-		a = ppc64.AMOVW
-
-	case OAS_ | gc.TUINT32,
-		OAS_ | gc.TPTR32:
-		a = ppc64.AMOVWZ
-
-	case OAS_ | gc.TINT64,
-		OAS_ | gc.TUINT64,
-		OAS_ | gc.TPTR64:
-		a = ppc64.AMOVD
-
-	case OAS_ | gc.TFLOAT32:
-		a = ppc64.AFMOVS
-
-	case OAS_ | gc.TFLOAT64:
-		a = ppc64.AFMOVD
-
-	case OADD_ | gc.TINT8,
-		OADD_ | gc.TUINT8,
-		OADD_ | gc.TINT16,
-		OADD_ | gc.TUINT16,
-		OADD_ | gc.TINT32,
-		OADD_ | gc.TUINT32,
-		OADD_ | gc.TPTR32,
-		OADD_ | gc.TINT64,
-		OADD_ | gc.TUINT64,
-		OADD_ | gc.TPTR64:
-		a = ppc64.AADD
-
-	case OADD_ | gc.TFLOAT32:
-		a = ppc64.AFADDS
-
-	case OADD_ | gc.TFLOAT64:
-		a = ppc64.AFADD
-
-	case OSUB_ | gc.TINT8,
-		OSUB_ | gc.TUINT8,
-		OSUB_ | gc.TINT16,
-		OSUB_ | gc.TUINT16,
-		OSUB_ | gc.TINT32,
-		OSUB_ | gc.TUINT32,
-		OSUB_ | gc.TPTR32,
-		OSUB_ | gc.TINT64,
-		OSUB_ | gc.TUINT64,
-		OSUB_ | gc.TPTR64:
-		a = ppc64.ASUB
-
-	case OSUB_ | gc.TFLOAT32:
-		a = ppc64.AFSUBS
-
-	case OSUB_ | gc.TFLOAT64:
-		a = ppc64.AFSUB
-
-	case OMINUS_ | gc.TINT8,
-		OMINUS_ | gc.TUINT8,
-		OMINUS_ | gc.TINT16,
-		OMINUS_ | gc.TUINT16,
-		OMINUS_ | gc.TINT32,
-		OMINUS_ | gc.TUINT32,
-		OMINUS_ | gc.TPTR32,
-		OMINUS_ | gc.TINT64,
-		OMINUS_ | gc.TUINT64,
-		OMINUS_ | gc.TPTR64:
-		a = ppc64.ANEG
-
-	case OAND_ | gc.TINT8,
-		OAND_ | gc.TUINT8,
-		OAND_ | gc.TINT16,
-		OAND_ | gc.TUINT16,
-		OAND_ | gc.TINT32,
-		OAND_ | gc.TUINT32,
-		OAND_ | gc.TPTR32,
-		OAND_ | gc.TINT64,
-		OAND_ | gc.TUINT64,
-		OAND_ | gc.TPTR64:
-		a = ppc64.AAND
-
-	case OOR_ | gc.TINT8,
-		OOR_ | gc.TUINT8,
-		OOR_ | gc.TINT16,
-		OOR_ | gc.TUINT16,
-		OOR_ | gc.TINT32,
-		OOR_ | gc.TUINT32,
-		OOR_ | gc.TPTR32,
-		OOR_ | gc.TINT64,
-		OOR_ | gc.TUINT64,
-		OOR_ | gc.TPTR64:
-		a = ppc64.AOR
-
-	case OXOR_ | gc.TINT8,
-		OXOR_ | gc.TUINT8,
-		OXOR_ | gc.TINT16,
-		OXOR_ | gc.TUINT16,
-		OXOR_ | gc.TINT32,
-		OXOR_ | gc.TUINT32,
-		OXOR_ | gc.TPTR32,
-		OXOR_ | gc.TINT64,
-		OXOR_ | gc.TUINT64,
-		OXOR_ | gc.TPTR64:
-		a = ppc64.AXOR
-
-		// TODO(minux): handle rotates
-	//case CASE(OLROT, TINT8):
-	//case CASE(OLROT, TUINT8):
-	//case CASE(OLROT, TINT16):
-	//case CASE(OLROT, TUINT16):
-	//case CASE(OLROT, TINT32):
-	//case CASE(OLROT, TUINT32):
-	//case CASE(OLROT, TPTR32):
-	//case CASE(OLROT, TINT64):
-	//case CASE(OLROT, TUINT64):
-	//case CASE(OLROT, TPTR64):
-	//	a = 0//???; RLDC?
-	//	break;
-
-	case OLSH_ | gc.TINT8,
-		OLSH_ | gc.TUINT8,
-		OLSH_ | gc.TINT16,
-		OLSH_ | gc.TUINT16,
-		OLSH_ | gc.TINT32,
-		OLSH_ | gc.TUINT32,
-		OLSH_ | gc.TPTR32,
-		OLSH_ | gc.TINT64,
-		OLSH_ | gc.TUINT64,
-		OLSH_ | gc.TPTR64:
-		a = ppc64.ASLD
-
-	case ORSH_ | gc.TUINT8,
-		ORSH_ | gc.TUINT16,
-		ORSH_ | gc.TUINT32,
-		ORSH_ | gc.TPTR32,
-		ORSH_ | gc.TUINT64,
-		ORSH_ | gc.TPTR64:
-		a = ppc64.ASRD
-
-	case ORSH_ | gc.TINT8,
-		ORSH_ | gc.TINT16,
-		ORSH_ | gc.TINT32,
-		ORSH_ | gc.TINT64:
-		a = ppc64.ASRAD
-
-		// TODO(minux): handle rotates
-	//case CASE(ORROTC, TINT8):
-	//case CASE(ORROTC, TUINT8):
-	//case CASE(ORROTC, TINT16):
-	//case CASE(ORROTC, TUINT16):
-	//case CASE(ORROTC, TINT32):
-	//case CASE(ORROTC, TUINT32):
-	//case CASE(ORROTC, TINT64):
-	//case CASE(ORROTC, TUINT64):
-	//	a = 0//??? RLDC??
-	//	break;
-
-	case OHMUL_ | gc.TINT64:
-		a = ppc64.AMULHD
-
-	case OHMUL_ | gc.TUINT64,
-		OHMUL_ | gc.TPTR64:
-		a = ppc64.AMULHDU
-
-	case OMUL_ | gc.TINT8,
-		OMUL_ | gc.TINT16,
-		OMUL_ | gc.TINT32,
-		OMUL_ | gc.TINT64:
-		a = ppc64.AMULLD
-
-	case OMUL_ | gc.TUINT8,
-		OMUL_ | gc.TUINT16,
-		OMUL_ | gc.TUINT32,
-		OMUL_ | gc.TPTR32,
-		// don't use word multiply, the high 32-bit are undefined.
-		OMUL_ | gc.TUINT64,
-		OMUL_ | gc.TPTR64:
-		// for 64-bit multiplies, signedness doesn't matter.
-		a = ppc64.AMULLD
-
-	case OMUL_ | gc.TFLOAT32:
-		a = ppc64.AFMULS
-
-	case OMUL_ | gc.TFLOAT64:
-		a = ppc64.AFMUL
-
-	case ODIV_ | gc.TINT8,
-		ODIV_ | gc.TINT16,
-		ODIV_ | gc.TINT32,
-		ODIV_ | gc.TINT64:
-		a = ppc64.ADIVD
-
-	case ODIV_ | gc.TUINT8,
-		ODIV_ | gc.TUINT16,
-		ODIV_ | gc.TUINT32,
-		ODIV_ | gc.TPTR32,
-		ODIV_ | gc.TUINT64,
-		ODIV_ | gc.TPTR64:
-		a = ppc64.ADIVDU
-
-	case ODIV_ | gc.TFLOAT32:
-		a = ppc64.AFDIVS
-
-	case ODIV_ | gc.TFLOAT64:
-		a = ppc64.AFDIV
-
-	case OSQRT_ | gc.TFLOAT64:
-		a = ppc64.AFSQRT
-	}
-
-	return a
-}
-
-const (
-	ODynam   = 1 << 0
-	OAddable = 1 << 1
-)
-
-func xgen(n *gc.Node, a *gc.Node, o int) bool {
-	// TODO(minux)
-
-	return -1 != 0 /*TypeKind(100016)*/
-}
-
-func sudoclean() {
-	return
-}
-
-/*
- * generate code to compute address of n,
- * a reference to a (perhaps nested) field inside
- * an array or struct.
- * return 0 on failure, 1 on success.
- * on success, leaves usable address in a.
- *
- * caller is responsible for calling sudoclean
- * after successful sudoaddable,
- * to release the register used for a.
- */
-func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
-	// TODO(minux)
-
-	*a = obj.Addr{}
-	return false
-}
diff --git a/src/cmd/compile/internal/ppc64/peep.go b/src/cmd/compile/internal/ppc64/peep.go
deleted file mode 100644
index ebdcc03..0000000
--- a/src/cmd/compile/internal/ppc64/peep.go
+++ /dev/null
@@ -1,1032 +0,0 @@
-// Derived from Inferno utils/6c/peep.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/peep.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ppc64
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/ppc64"
-	"fmt"
-)
-
-var gactive uint32
-
-func peep(firstp *obj.Prog) {
-	g := gc.Flowstart(firstp, nil)
-	if g == nil {
-		return
-	}
-	gactive = 0
-
-	var p *obj.Prog
-	var r *gc.Flow
-	var t obj.As
-loop1:
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		gc.Dumpit("loop1", g.Start, 0)
-	}
-
-	t = 0
-	for r = g.Start; r != nil; r = r.Link {
-		p = r.Prog
-
-		// TODO(austin) Handle smaller moves.  arm and amd64
-		// distinguish between moves that moves that *must*
-		// sign/zero extend and moves that don't care so they
-		// can eliminate moves that don't care without
-		// breaking moves that do care. This might let us
-		// simplify or remove the next peep loop, too.
-		if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
-			if regtyp(&p.To) {
-				// Try to eliminate reg->reg moves
-				if regtyp(&p.From) {
-					if p.From.Type == p.To.Type {
-						if copyprop(r) {
-							excise(r)
-							t++
-						} else if subprop(r) && copyprop(r) {
-							excise(r)
-							t++
-						}
-					}
-				}
-
-				// Convert uses to $0 to uses of R0 and
-				// propagate R0
-				if regzer(&p.From) {
-					if p.To.Type == obj.TYPE_REG {
-						p.From.Type = obj.TYPE_REG
-						p.From.Reg = ppc64.REGZERO
-						if copyprop(r) {
-							excise(r)
-							t++
-						} else if subprop(r) && copyprop(r) {
-							excise(r)
-							t++
-						}
-					}
-				}
-			}
-		}
-	}
-
-	if t != 0 {
-		goto loop1
-	}
-
-	/*
-	 * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
-	 */
-	var p1 *obj.Prog
-	var r1 *gc.Flow
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		default:
-			continue
-
-		case ppc64.AMOVH,
-			ppc64.AMOVHZ,
-			ppc64.AMOVB,
-			ppc64.AMOVBZ,
-			ppc64.AMOVW,
-			ppc64.AMOVWZ:
-			if p.To.Type != obj.TYPE_REG {
-				continue
-			}
-		}
-
-		r1 = r.Link
-		if r1 == nil {
-			continue
-		}
-		p1 = r1.Prog
-		if p1.As != p.As {
-			continue
-		}
-		if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
-			continue
-		}
-		if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
-			continue
-		}
-		excise(r1)
-	}
-
-	if gc.Debug['D'] > 1 {
-		goto ret /* allow following code improvement to be suppressed */
-	}
-
-	/*
-	 * look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
-	 * when OP can set condition codes correctly
-	 */
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		case ppc64.ACMP,
-			ppc64.ACMPW: /* always safe? */
-			if !regzer(&p.To) {
-				continue
-			}
-			r1 = r.S1
-			if r1 == nil {
-				continue
-			}
-			switch r1.Prog.As {
-			default:
-				continue
-
-				/* the conditions can be complex and these are currently little used */
-			case ppc64.ABCL,
-				ppc64.ABC:
-				continue
-
-			case ppc64.ABEQ,
-				ppc64.ABGE,
-				ppc64.ABGT,
-				ppc64.ABLE,
-				ppc64.ABLT,
-				ppc64.ABNE,
-				ppc64.ABVC,
-				ppc64.ABVS:
-				break
-			}
-
-			r1 = r
-			for {
-				r1 = gc.Uniqp(r1)
-				if r1 == nil || r1.Prog.As != obj.ANOP {
-					break
-				}
-			}
-
-			if r1 == nil {
-				continue
-			}
-			p1 = r1.Prog
-			if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.From.Reg {
-				continue
-			}
-			switch p1.As {
-			/* irregular instructions */
-			case ppc64.ASUB,
-				ppc64.AADD,
-				ppc64.AXOR,
-				ppc64.AOR:
-				if p1.From.Type == obj.TYPE_CONST || p1.From.Type == obj.TYPE_ADDR {
-					continue
-				}
-			}
-
-			switch p1.As {
-			default:
-				continue
-
-			case ppc64.AMOVW,
-				ppc64.AMOVD:
-				if p1.From.Type != obj.TYPE_REG {
-					continue
-				}
-				continue
-
-			case ppc64.AANDCC,
-				ppc64.AANDNCC,
-				ppc64.AORCC,
-				ppc64.AORNCC,
-				ppc64.AXORCC,
-				ppc64.ASUBCC,
-				ppc64.ASUBECC,
-				ppc64.ASUBMECC,
-				ppc64.ASUBZECC,
-				ppc64.AADDCC,
-				ppc64.AADDCCC,
-				ppc64.AADDECC,
-				ppc64.AADDMECC,
-				ppc64.AADDZECC,
-				ppc64.ARLWMICC,
-				ppc64.ARLWNMCC,
-				/* don't deal with floating point instructions for now */
-				/*
-					case AFABS:
-					case AFADD:
-					case AFADDS:
-					case AFCTIW:
-					case AFCTIWZ:
-					case AFDIV:
-					case AFDIVS:
-					case AFMADD:
-					case AFMADDS:
-					case AFMOVD:
-					case AFMSUB:
-					case AFMSUBS:
-					case AFMUL:
-					case AFMULS:
-					case AFNABS:
-					case AFNEG:
-					case AFNMADD:
-					case AFNMADDS:
-					case AFNMSUB:
-					case AFNMSUBS:
-					case AFRSP:
-					case AFSUB:
-					case AFSUBS:
-					case ACNTLZW:
-					case AMTFSB0:
-					case AMTFSB1:
-				*/
-				ppc64.AADD,
-				ppc64.AADDV,
-				ppc64.AADDC,
-				ppc64.AADDCV,
-				ppc64.AADDME,
-				ppc64.AADDMEV,
-				ppc64.AADDE,
-				ppc64.AADDEV,
-				ppc64.AADDZE,
-				ppc64.AADDZEV,
-				ppc64.AAND,
-				ppc64.AANDN,
-				ppc64.ADIVW,
-				ppc64.ADIVWV,
-				ppc64.ADIVWU,
-				ppc64.ADIVWUV,
-				ppc64.ADIVD,
-				ppc64.ADIVDV,
-				ppc64.ADIVDU,
-				ppc64.ADIVDUV,
-				ppc64.AEQV,
-				ppc64.AEXTSB,
-				ppc64.AEXTSH,
-				ppc64.AEXTSW,
-				ppc64.AMULHW,
-				ppc64.AMULHWU,
-				ppc64.AMULLW,
-				ppc64.AMULLWV,
-				ppc64.AMULHD,
-				ppc64.AMULHDU,
-				ppc64.AMULLD,
-				ppc64.AMULLDV,
-				ppc64.ANAND,
-				ppc64.ANEG,
-				ppc64.ANEGV,
-				ppc64.ANOR,
-				ppc64.AOR,
-				ppc64.AORN,
-				ppc64.AREM,
-				ppc64.AREMV,
-				ppc64.AREMU,
-				ppc64.AREMUV,
-				ppc64.AREMD,
-				ppc64.AREMDV,
-				ppc64.AREMDU,
-				ppc64.AREMDUV,
-				ppc64.ARLWMI,
-				ppc64.ARLWNM,
-				ppc64.ASLW,
-				ppc64.ASRAW,
-				ppc64.ASRW,
-				ppc64.ASLD,
-				ppc64.ASRAD,
-				ppc64.ASRD,
-				ppc64.ASUB,
-				ppc64.ASUBV,
-				ppc64.ASUBC,
-				ppc64.ASUBCV,
-				ppc64.ASUBME,
-				ppc64.ASUBMEV,
-				ppc64.ASUBE,
-				ppc64.ASUBEV,
-				ppc64.ASUBZE,
-				ppc64.ASUBZEV,
-				ppc64.AXOR:
-				t = variant2as(p1.As, as2variant(p1.As)|V_CC)
-			}
-
-			if gc.Debug['D'] != 0 {
-				fmt.Printf("cmp %v; %v -> ", p1, p)
-			}
-			p1.As = t
-			if gc.Debug['D'] != 0 {
-				fmt.Printf("%v\n", p1)
-			}
-			excise(r)
-			continue
-		}
-	}
-
-ret:
-	gc.Flowend(g)
-}
-
-func excise(r *gc.Flow) {
-	p := r.Prog
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("%v ===delete===\n", p)
-	}
-	obj.Nopout(p)
-	gc.Ostats.Ndelmov++
-}
-
-// regzer returns true if a's value is 0 (a is R0 or $0)
-func regzer(a *obj.Addr) bool {
-	if a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_ADDR {
-		if a.Sym == nil && a.Reg == 0 {
-			if a.Offset == 0 {
-				return true
-			}
-		}
-	}
-	return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGZERO
-}
-
-func regtyp(a *obj.Addr) bool {
-	// TODO(rsc): Floating point register exclusions?
-	return a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO
-}
-
-/*
- * the idea is to substitute
- * one register for another
- * from one MOV to another
- *	MOV	a, R1
- *	ADD	b, R1	/ no use of R2
- *	MOV	R1, R2
- * would be converted to
- *	MOV	a, R2
- *	ADD	b, R2
- *	MOV	R2, R1
- * hopefully, then the former or latter MOV
- * will be eliminated by copy propagation.
- *
- * r0 (the argument, not the register) is the MOV at the end of the
- * above sequences.  This returns 1 if it modified any instructions.
- */
-func subprop(r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	if !regtyp(v1) {
-		return false
-	}
-	v2 := &p.To
-	if !regtyp(v2) {
-		return false
-	}
-	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
-		if gc.Uniqs(r) == nil {
-			break
-		}
-		p = r.Prog
-		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
-			continue
-		}
-		if p.Info.Flags&gc.Call != 0 {
-			return false
-		}
-
-		if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
-			if p.To.Type == v1.Type {
-				if p.To.Reg == v1.Reg {
-					copysub(&p.To, v1, v2, true)
-					if gc.Debug['P'] != 0 {
-						fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
-						if p.From.Type == v2.Type {
-							fmt.Printf(" excise")
-						}
-						fmt.Printf("\n")
-					}
-
-					for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
-						p = r.Prog
-						copysub(&p.From, v1, v2, true)
-						copysub1(p, v1, v2, true)
-						copysub(&p.To, v1, v2, true)
-						if gc.Debug['P'] != 0 {
-							fmt.Printf("%v\n", r.Prog)
-						}
-					}
-
-					v1.Reg, v2.Reg = v2.Reg, v1.Reg
-					if gc.Debug['P'] != 0 {
-						fmt.Printf("%v last\n", r.Prog)
-					}
-					return true
-				}
-			}
-		}
-
-		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
-			break
-		}
-		if copysub(&p.From, v1, v2, false) || copysub1(p, v1, v2, false) || copysub(&p.To, v1, v2, false) {
-			break
-		}
-	}
-
-	return false
-}
-
-/*
- * The idea is to remove redundant copies.
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	use v2	return fail (v1->v2 move must remain)
- *	-----------------
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	set v2	return success (caller can remove v1->v2 move)
- */
-func copyprop(r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	v2 := &p.To
-	if copyas(v1, v2) {
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("eliminating self-move: %v\n", r0.Prog)
-		}
-		return true
-	}
-
-	gactive++
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
-	}
-	return copy1(v1, v2, r0.S1, false)
-}
-
-// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
-// all uses were rewritten.
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f bool) bool {
-	if uint32(r.Active) == gactive {
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("act set; return 1\n")
-		}
-		return true
-	}
-
-	r.Active = int32(gactive)
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("copy1 replace %v with %v f=%v\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
-	}
-	for ; r != nil; r = r.S1 {
-		p := r.Prog
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("%v", p)
-		}
-		if !f && gc.Uniqp(r) == nil {
-			// Multiple predecessors; conservatively
-			// assume v1 was set on other path
-			f = true
-
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; merge; f=%v", f)
-			}
-		}
-
-		switch t := copyu(p, v2, nil); t {
-		case 2: /* rar, can't split */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
-			}
-			return false
-
-		case 3: /* set */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
-			}
-			return true
-
-		case 1, /* used, substitute */
-			4: /* use and set */
-			if f {
-				if gc.Debug['P'] == 0 {
-					return false
-				}
-				if t == 4 {
-					fmt.Printf("; %v used+set and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				} else {
-					fmt.Printf("; %v used and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				}
-				return false
-			}
-
-			if copyu(p, v2, v1) != 0 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; sub fail; return 0\n")
-				}
-				return false
-			}
-
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
-			}
-			if t == 4 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
-				}
-				return true
-			}
-		}
-
-		if !f {
-			t := copyu(p, v1, nil)
-			if t == 2 || t == 3 || t == 4 {
-				f = true
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %v set and !f; f=%v", gc.Ctxt.Dconv(v1), f)
-				}
-			}
-		}
-
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("\n")
-		}
-		if r.S2 != nil {
-			if !copy1(v1, v2, r.S2, f) {
-				return false
-			}
-		}
-	}
-
-	return true
-}
-
-// If s==nil, copyu returns the set/use of v in p; otherwise, it
-// modifies p to replace reads of v with reads of s and returns 0 for
-// success or non-zero for failure.
-//
-// If s==nil, copy returns one of the following values:
-// 	1 if v only used
-//	2 if v is set and used in one address (read-alter-rewrite;
-// 	  can't substitute)
-//	3 if v is only set
-//	4 if v is set in one address and used in another (so addresses
-// 	  can be rewritten independently)
-//	0 otherwise (not touched)
-func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
-	if p.From3Type() != obj.TYPE_NONE {
-		// 9g never generates a from3
-		fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
-	}
-
-	switch p.As {
-	default:
-		fmt.Printf("copyu: can't find %v\n", p.As)
-		return 2
-
-	case obj.ANOP, /* read p->from, write p->to */
-		ppc64.AMOVH,
-		ppc64.AMOVHZ,
-		ppc64.AMOVB,
-		ppc64.AMOVBZ,
-		ppc64.AMOVW,
-		ppc64.AMOVWZ,
-		ppc64.AMOVD,
-		ppc64.ANEG,
-		ppc64.ANEGCC,
-		ppc64.AADDME,
-		ppc64.AADDMECC,
-		ppc64.AADDZE,
-		ppc64.AADDZECC,
-		ppc64.ASUBME,
-		ppc64.ASUBMECC,
-		ppc64.ASUBZE,
-		ppc64.ASUBZECC,
-		ppc64.AFCTIW,
-		ppc64.AFCTIWZ,
-		ppc64.AFCTID,
-		ppc64.AFCTIDZ,
-		ppc64.AFCFID,
-		ppc64.AFCFIDCC,
-		ppc64.AFCFIDU,
-		ppc64.AFCFIDUCC,
-		ppc64.AFMOVS,
-		ppc64.AFMOVD,
-		ppc64.AFRSP,
-		ppc64.AFNEG,
-		ppc64.AFNEGCC,
-		ppc64.AFSQRT:
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-
-			// Update only indirect uses of v in p->to
-			if !copyas(&p.To, v) {
-				if copysub(&p.To, v, s, true) {
-					return 1
-				}
-			}
-			return 0
-		}
-
-		if copyas(&p.To, v) {
-			// Fix up implicit from
-			if p.From.Type == obj.TYPE_NONE {
-				p.From = p.To
-			}
-			if copyau(&p.From, v) {
-				return 4
-			}
-			return 3
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau(&p.To, v) {
-			// p->to only indirectly uses v
-			return 1
-		}
-
-		return 0
-
-	case ppc64.AMOVBU, /* rar p->from, write p->to or read p->from, rar p->to */
-		ppc64.AMOVBZU,
-		ppc64.AMOVHU,
-		ppc64.AMOVHZU,
-		ppc64.AMOVWZU,
-		ppc64.AMOVDU:
-		if p.From.Type == obj.TYPE_MEM {
-			if copyas(&p.From, v) {
-				// No s!=nil check; need to fail
-				// anyway in that case
-				return 2
-			}
-
-			if s != nil {
-				if copysub(&p.To, v, s, true) {
-					return 1
-				}
-				return 0
-			}
-
-			if copyas(&p.To, v) {
-				return 3
-			}
-		} else if p.To.Type == obj.TYPE_MEM {
-			if copyas(&p.To, v) {
-				return 2
-			}
-			if s != nil {
-				if copysub(&p.From, v, s, true) {
-					return 1
-				}
-				return 0
-			}
-
-			if copyau(&p.From, v) {
-				return 1
-			}
-		} else {
-			fmt.Printf("copyu: bad %v\n", p)
-		}
-
-		return 0
-
-	case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
-		ppc64.ARLWMICC:
-		if copyas(&p.To, v) {
-			return 2
-		}
-		fallthrough
-
-		/* fall through */
-	case ppc64.AADD,
-		/* read p->from, read p->reg, write p->to */
-		ppc64.AADDC,
-		ppc64.AADDE,
-		ppc64.ASUB,
-		ppc64.ASLW,
-		ppc64.ASRW,
-		ppc64.ASRAW,
-		ppc64.ASLD,
-		ppc64.ASRD,
-		ppc64.ASRAD,
-		ppc64.AOR,
-		ppc64.AORCC,
-		ppc64.AORN,
-		ppc64.AORNCC,
-		ppc64.AAND,
-		ppc64.AANDCC,
-		ppc64.AANDN,
-		ppc64.AANDNCC,
-		ppc64.ANAND,
-		ppc64.ANANDCC,
-		ppc64.ANOR,
-		ppc64.ANORCC,
-		ppc64.AXOR,
-		ppc64.AMULHW,
-		ppc64.AMULHWU,
-		ppc64.AMULLW,
-		ppc64.AMULLD,
-		ppc64.ADIVW,
-		ppc64.ADIVD,
-		ppc64.ADIVWU,
-		ppc64.ADIVDU,
-		ppc64.AREM,
-		ppc64.AREMU,
-		ppc64.AREMD,
-		ppc64.AREMDU,
-		ppc64.ARLWNM,
-		ppc64.ARLWNMCC,
-		ppc64.AFADDS,
-		ppc64.AFADD,
-		ppc64.AFSUBS,
-		ppc64.AFSUB,
-		ppc64.AFMULS,
-		ppc64.AFMUL,
-		ppc64.AFDIVS,
-		ppc64.AFDIV:
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if copysub1(p, v, s, true) {
-				return 1
-			}
-
-			// Update only indirect uses of v in p->to
-			if !copyas(&p.To, v) {
-				if copysub(&p.To, v, s, true) {
-					return 1
-				}
-			}
-			return 0
-		}
-
-		if copyas(&p.To, v) {
-			if p.Reg == 0 {
-				// Fix up implicit reg (e.g., ADD
-				// R3,R4 -> ADD R3,R4,R4) so we can
-				// update reg and to separately.
-				p.Reg = p.To.Reg
-			}
-
-			if copyau(&p.From, v) {
-				return 4
-			}
-			if copyau1(p, v) {
-				return 4
-			}
-			return 3
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau1(p, v) {
-			return 1
-		}
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case ppc64.ABEQ,
-		ppc64.ABGT,
-		ppc64.ABGE,
-		ppc64.ABLT,
-		ppc64.ABLE,
-		ppc64.ABNE,
-		ppc64.ABVC,
-		ppc64.ABVS:
-		return 0
-
-	case obj.ACHECKNIL, /* read p->from */
-		ppc64.ACMP, /* read p->from, read p->to */
-		ppc64.ACMPU,
-		ppc64.ACMPW,
-		ppc64.ACMPWU,
-		ppc64.AFCMPO,
-		ppc64.AFCMPU:
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-		// 9g never generates a branch to a GPR (this isn't
-	// even a normal instruction; liblink turns it in to a
-	// mov and a branch).
-	case ppc64.ABR: /* read p->to */
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case obj.ARET: /* funny */
-		if s != nil {
-			return 0
-		}
-
-		// All registers die at this point, so claim
-		// everything is set (and not used).
-		return 3
-
-	case ppc64.ABL: /* funny */
-		if v.Type == obj.TYPE_REG {
-			// TODO(rsc): REG_R0 and REG_F0 used to be
-			// (when register numbers started at 0) exregoffset and exfregoffset,
-			// which are unset entirely.
-			// It's strange that this handles R0 and F0 differently from the other
-			// registers. Possible failure to optimize?
-			if ppc64.REG_R0 < v.Reg && v.Reg <= ppc64.REGEXT {
-				return 2
-			}
-			if v.Reg == ppc64.REGARG {
-				return 2
-			}
-			if ppc64.REG_F0 < v.Reg && v.Reg <= ppc64.FREGEXT {
-				return 2
-			}
-		}
-
-		if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
-			return 2
-		}
-
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-		if copyau(&p.To, v) {
-			return 4
-		}
-		return 3
-
-		// R0 is zero, used by DUFFZERO, cannot be substituted.
-	// R3 is ptr to memory, used and set, cannot be substituted.
-	case obj.ADUFFZERO:
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == 0 {
-				return 1
-			}
-			if v.Reg == 3 {
-				return 2
-			}
-		}
-
-		return 0
-
-		// R3, R4 are ptr to src, dst, used and set, cannot be substituted.
-	// R5 is scratch, set by DUFFCOPY, cannot be substituted.
-	case obj.ADUFFCOPY:
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == 3 || v.Reg == 4 {
-				return 2
-			}
-			if v.Reg == 5 {
-				return 3
-			}
-		}
-
-		return 0
-
-	case obj.ATEXT: /* funny */
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == ppc64.REGARG {
-				return 3
-			}
-		}
-		return 0
-
-	case obj.APCDATA,
-		obj.AFUNCDATA,
-		obj.AVARDEF,
-		obj.AVARKILL,
-		obj.AVARLIVE,
-		obj.AUSEFIELD:
-		return 0
-	}
-}
-
-// copyas returns true if a and v address the same register.
-//
-// If a is the from operand, this means this operation reads the
-// register in v. If a is the to operand, this means this operation
-// writes the register in v.
-func copyas(a *obj.Addr, v *obj.Addr) bool {
-	return regtyp(v) && a.Type == v.Type && a.Reg == v.Reg
-}
-
-// copyau returns true if a either directly or indirectly addresses the
-// same register as v.
-//
-// If a is the from operand, this means this operation reads the
-// register in v. If a is the to operand, this means the operation
-// either reads or writes the register in v (if !copyas(a, v), then
-// the operation reads the register in v).
-func copyau(a *obj.Addr, v *obj.Addr) bool {
-	if copyas(a, v) {
-		return true
-	}
-	if v.Type == obj.TYPE_REG {
-		if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
-			if v.Reg == a.Reg {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// copyau1 returns true if p->reg references the same register as v and v
-// is a direct reference.
-func copyau1(p *obj.Prog, v *obj.Addr) bool {
-	return regtyp(v) && v.Reg != 0 && p.Reg == v.Reg
-}
-
-// copysub replaces v with s in a if f==true or indicates it if could if f==false.
-// Returns true on failure to substitute (it always succeeds on ppc64).
-// TODO(dfc) remove unused return value and callers where f=false.
-func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool {
-	if f && copyau(a, v) {
-		a.Reg = s.Reg
-	}
-	return false
-}
-
-// copysub1 replaces v with s in p1->reg if f==true or indicates if it could if f==false.
-// Returns true on failure to substitute (it always succeeds on ppc64).
-// TODO(dfc) remove unused return value and callers where f=false.
-func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f bool) bool {
-	if f && copyau1(p1, v) {
-		p1.Reg = s.Reg
-	}
-	return false
-}
-
-func sameaddr(a *obj.Addr, v *obj.Addr) bool {
-	if a.Type != v.Type {
-		return false
-	}
-	if regtyp(v) && a.Reg == v.Reg {
-		return true
-	}
-	if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
-		if v.Offset == a.Offset {
-			return true
-		}
-	}
-	return false
-}
-
-func smallindir(a *obj.Addr, reg *obj.Addr) bool {
-	return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
-}
-
-func stackaddr(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP
-}
diff --git a/src/cmd/compile/internal/ppc64/reg.go b/src/cmd/compile/internal/ppc64/reg.go
index f7033f6..2dce8cd 100644
--- a/src/cmd/compile/internal/ppc64/reg.go
+++ b/src/cmd/compile/internal/ppc64/reg.go
@@ -31,100 +31,6 @@
 package ppc64
 
 import "cmd/internal/obj/ppc64"
-import "cmd/compile/internal/gc"
-
-const (
-	NREGVAR = 64 /* 32 general + 32 floating */
-)
-
-var regname = []string{
-	".R0",
-	".R1",
-	".R2",
-	".R3",
-	".R4",
-	".R5",
-	".R6",
-	".R7",
-	".R8",
-	".R9",
-	".R10",
-	".R11",
-	".R12",
-	".R13",
-	".R14",
-	".R15",
-	".R16",
-	".R17",
-	".R18",
-	".R19",
-	".R20",
-	".R21",
-	".R22",
-	".R23",
-	".R24",
-	".R25",
-	".R26",
-	".R27",
-	".R28",
-	".R29",
-	".R30",
-	".R31",
-	".F0",
-	".F1",
-	".F2",
-	".F3",
-	".F4",
-	".F5",
-	".F6",
-	".F7",
-	".F8",
-	".F9",
-	".F10",
-	".F11",
-	".F12",
-	".F13",
-	".F14",
-	".F15",
-	".F16",
-	".F17",
-	".F18",
-	".F19",
-	".F20",
-	".F21",
-	".F22",
-	".F23",
-	".F24",
-	".F25",
-	".F26",
-	".F27",
-	".F28",
-	".F29",
-	".F30",
-	".F31",
-}
-
-func regnames(n *int) []string {
-	*n = NREGVAR
-	return regname
-}
-
-func excludedregs() uint64 {
-	// Exclude registers with fixed functions
-	regbits := 1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS) | RtoB(ppc64.REGTMP)
-
-	if gc.Ctxt.Flag_shared {
-		// When compiling Go into PIC, R2 is reserved to be the TOC pointer
-		// and R12 so that calls via function pointer can stomp on it.
-		regbits |= RtoB(ppc64.REG_R2)
-		regbits |= RtoB(ppc64.REG_R12)
-	}
-	return regbits
-}
-
-func doregbits(r int) uint64 {
-	return 0
-}
 
 /*
  * track register variables including external registers:
@@ -147,19 +53,3 @@
 	}
 	return 0
 }
-
-func BtoR(b uint64) int {
-	b &= 0xffffffff
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + ppc64.REG_R0
-}
-
-func BtoF(b uint64) int {
-	b >>= 32
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + ppc64.REG_F0
-}
diff --git a/src/cmd/compile/internal/s390x/cgen.go b/src/cmd/compile/internal/s390x/cgen.go
deleted file mode 100644
index 28bb34e..0000000
--- a/src/cmd/compile/internal/s390x/cgen.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package s390x
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/s390x"
-)
-
-type direction int
-
-const (
-	_FORWARDS direction = iota
-	_BACKWARDS
-)
-
-// blockcopy copies w bytes from &n to &res
-func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
-	var dst gc.Node
-	var src gc.Node
-	if n.Ullman >= res.Ullman {
-		gc.Agenr(n, &dst, res) // temporarily use dst
-		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
-		gins(s390x.AMOVD, &dst, &src)
-		if res.Op == gc.ONAME {
-			gc.Gvardef(res)
-		}
-		gc.Agen(res, &dst)
-	} else {
-		if res.Op == gc.ONAME {
-			gc.Gvardef(res)
-		}
-		gc.Agenr(res, &dst, res)
-		gc.Agenr(n, &src, nil)
-	}
-	defer gc.Regfree(&src)
-	defer gc.Regfree(&dst)
-
-	var tmp gc.Node
-	gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
-	defer gc.Regfree(&tmp)
-
-	offset := int64(0)
-	dir := _FORWARDS
-	if osrc < odst && odst < osrc+w {
-		// Reverse. Can't use MVC, fall back onto basic moves.
-		dir = _BACKWARDS
-		const copiesPerIter = 2
-		if w >= 8*copiesPerIter {
-			cnt := w - (w % (8 * copiesPerIter))
-			ginscon(s390x.AADD, w, &src)
-			ginscon(s390x.AADD, w, &dst)
-
-			var end gc.Node
-			gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
-			p := gins(s390x.ASUB, nil, &end)
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = cnt
-			p.Reg = src.Reg
-
-			var label *obj.Prog
-			for i := 0; i < copiesPerIter; i++ {
-				offset := int64(-8 * (i + 1))
-				p := gins(s390x.AMOVD, &src, &tmp)
-				p.From.Type = obj.TYPE_MEM
-				p.From.Offset = offset
-				if i == 0 {
-					label = p
-				}
-				p = gins(s390x.AMOVD, &tmp, &dst)
-				p.To.Type = obj.TYPE_MEM
-				p.To.Offset = offset
-			}
-
-			ginscon(s390x.ASUB, 8*copiesPerIter, &src)
-			ginscon(s390x.ASUB, 8*copiesPerIter, &dst)
-			gins(s390x.ACMP, &src, &end)
-			gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), label)
-			gc.Regfree(&end)
-
-			w -= cnt
-		} else {
-			offset = w
-		}
-	}
-
-	if dir == _FORWARDS && w > 1024 {
-		// Loop over MVCs
-		cnt := w - (w % 256)
-
-		var end gc.Node
-		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
-		add := gins(s390x.AADD, nil, &end)
-		add.From.Type = obj.TYPE_CONST
-		add.From.Offset = cnt
-		add.Reg = src.Reg
-
-		mvc := gins(s390x.AMVC, &src, &dst)
-		mvc.From.Type = obj.TYPE_MEM
-		mvc.From.Offset = 0
-		mvc.To.Type = obj.TYPE_MEM
-		mvc.To.Offset = 0
-		mvc.From3 = new(obj.Addr)
-		mvc.From3.Type = obj.TYPE_CONST
-		mvc.From3.Offset = 256
-
-		ginscon(s390x.AADD, 256, &src)
-		ginscon(s390x.AADD, 256, &dst)
-		gins(s390x.ACMP, &src, &end)
-		gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), mvc)
-		gc.Regfree(&end)
-
-		w -= cnt
-	}
-
-	for w > 0 {
-		cnt := w
-		// If in reverse we can only do 8, 4, 2 or 1 bytes at a time.
-		if dir == _BACKWARDS {
-			switch {
-			case cnt >= 8:
-				cnt = 8
-			case cnt >= 4:
-				cnt = 4
-			case cnt >= 2:
-				cnt = 2
-			}
-		} else if cnt > 256 {
-			cnt = 256
-		}
-
-		switch cnt {
-		case 8, 4, 2, 1:
-			op := s390x.AMOVB
-			switch cnt {
-			case 8:
-				op = s390x.AMOVD
-			case 4:
-				op = s390x.AMOVW
-			case 2:
-				op = s390x.AMOVH
-			}
-			load := gins(op, &src, &tmp)
-			load.From.Type = obj.TYPE_MEM
-			load.From.Offset = offset
-
-			store := gins(op, &tmp, &dst)
-			store.To.Type = obj.TYPE_MEM
-			store.To.Offset = offset
-
-			if dir == _BACKWARDS {
-				load.From.Offset -= cnt
-				store.To.Offset -= cnt
-			}
-
-		default:
-			p := gins(s390x.AMVC, &src, &dst)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Offset = offset
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = offset
-			p.From3 = new(obj.Addr)
-			p.From3.Type = obj.TYPE_CONST
-			p.From3.Offset = cnt
-		}
-
-		switch dir {
-		case _FORWARDS:
-			offset += cnt
-		case _BACKWARDS:
-			offset -= cnt
-		}
-		w -= cnt
-	}
-}
diff --git a/src/cmd/compile/internal/s390x/galign.go b/src/cmd/compile/internal/s390x/galign.go
index f7a0f56..93ece5a 100644
--- a/src/cmd/compile/internal/s390x/galign.go
+++ b/src/cmd/compile/internal/s390x/galign.go
@@ -27,36 +27,9 @@
 	gc.Thearch.ReservedRegs = resvd
 
 	gc.Thearch.Betypeinit = betypeinit
-	gc.Thearch.Cgen_hmul = cgen_hmul
-	gc.Thearch.Cgen_shift = cgen_shift
-	gc.Thearch.Clearfat = clearfat
 	gc.Thearch.Defframe = defframe
-	gc.Thearch.Dodiv = dodiv
-	gc.Thearch.Excise = excise
-	gc.Thearch.Expandchecks = expandchecks
-	gc.Thearch.Getg = getg
 	gc.Thearch.Gins = gins
-	gc.Thearch.Ginscmp = ginscmp
-	gc.Thearch.Ginscon = ginscon
-	gc.Thearch.Ginsnop = ginsnop
-	gc.Thearch.Gmove = gmove
-	gc.Thearch.Peep = peep
 	gc.Thearch.Proginfo = proginfo
-	gc.Thearch.Regtyp = isReg
-	gc.Thearch.Sameaddr = sameaddr
-	gc.Thearch.Smallindir = smallindir
-	gc.Thearch.Stackaddr = stackaddr
-	gc.Thearch.Blockcopy = blockcopy
-	gc.Thearch.Sudoaddable = sudoaddable
-	gc.Thearch.Sudoclean = sudoclean
-	gc.Thearch.Excludedregs = excludedregs
-	gc.Thearch.RtoB = RtoB
-	gc.Thearch.FtoB = RtoB
-	gc.Thearch.BtoR = BtoR
-	gc.Thearch.BtoF = BtoF
-	gc.Thearch.Optoas = optoas
-	gc.Thearch.Doregbits = doregbits
-	gc.Thearch.Regnames = regnames
 
 	gc.Thearch.SSARegToReg = ssaRegToReg
 	gc.Thearch.SSAMarkMoves = ssaMarkMoves
diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go
index 1dd353a..cfaf88d 100644
--- a/src/cmd/compile/internal/s390x/ggen.go
+++ b/src/cmd/compile/internal/s390x/ggen.go
@@ -8,7 +8,6 @@
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
 	"cmd/internal/obj/s390x"
-	"fmt"
 )
 
 // clearLoopCutOff is the (somewhat arbitrary) value above which it is better
@@ -165,413 +164,3 @@
 	gc.Nodreg(&reg, gc.Types[gc.TINT], s390x.REG_R0)
 	gins(s390x.AOR, &reg, &reg)
 }
-
-var panicdiv *gc.Node
-
-/*
- * generate division.
- * generates one of:
- *	res = nl / nr
- *	res = nl % nr
- * according to op.
- */
-func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	// Have to be careful about handling
-	// most negative int divided by -1 correctly.
-	// The hardware will generate undefined result.
-	// Also need to explicitly trap on division on zero,
-	// the hardware will silently generate undefined result.
-	// DIVW will leave unpredicable result in higher 32-bit,
-	// so always use DIVD/DIVDU.
-	t := nl.Type
-
-	t0 := t
-	check := 0
-	if t.IsSigned() {
-		check = 1
-		if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<<uint64(t.Width*8-1)) {
-			check = 0
-		} else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 {
-			check = 0
-		}
-	}
-
-	if t.Width < 8 {
-		if t.IsSigned() {
-			t = gc.Types[gc.TINT64]
-		} else {
-			t = gc.Types[gc.TUINT64]
-		}
-		check = 0
-	}
-
-	a := optoas(gc.ODIV, t)
-
-	var tl gc.Node
-	gc.Regalloc(&tl, t0, nil)
-	var tr gc.Node
-	gc.Regalloc(&tr, t0, nil)
-	if nl.Ullman >= nr.Ullman {
-		gc.Cgen(nl, &tl)
-		gc.Cgen(nr, &tr)
-	} else {
-		gc.Cgen(nr, &tr)
-		gc.Cgen(nl, &tl)
-	}
-
-	if t != t0 {
-		// Convert
-		tl2 := tl
-
-		tr2 := tr
-		tl.Type = t
-		tr.Type = t
-		gmove(&tl2, &tl)
-		gmove(&tr2, &tr)
-	}
-
-	// Handle divide-by-zero panic.
-	p1 := gins(optoas(gc.OCMP, t), &tr, nil)
-
-	p1.To.Type = obj.TYPE_CONST
-	p1.To.Offset = 0
-	p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-	if panicdiv == nil {
-		panicdiv = gc.Sysfunc("panicdivide")
-	}
-	gc.Ginscall(panicdiv, -1)
-	gc.Patch(p1, gc.Pc)
-
-	var p2 *obj.Prog
-	if check != 0 {
-		var nm1 gc.Node
-		gc.Nodconst(&nm1, t, -1)
-		gins(optoas(gc.OCMP, t), &tr, &nm1)
-		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-		if op == gc.ODIV {
-			// a / (-1) is -a.
-			gins(optoas(gc.OMINUS, t), nil, &tl)
-
-			gmove(&tl, res)
-		} else {
-			// a % (-1) is 0.
-			var nz gc.Node
-			gc.Nodconst(&nz, t, 0)
-
-			gmove(&nz, res)
-		}
-
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-	}
-
-	p1 = gins(a, &tr, &tl)
-	if op == gc.ODIV {
-		gc.Regfree(&tr)
-		gmove(&tl, res)
-	} else {
-		// A%B = A-(A/B*B)
-		var tm gc.Node
-		gc.Regalloc(&tm, t, nil)
-
-		// patch div to use the 3 register form
-		// TODO(minux): add gins3?
-		p1.Reg = p1.To.Reg
-
-		p1.To.Reg = tm.Reg
-		gins(optoas(gc.OMUL, t), &tr, &tm)
-		gc.Regfree(&tr)
-		gins(optoas(gc.OSUB, t), &tm, &tl)
-		gc.Regfree(&tm)
-		gmove(&tl, res)
-	}
-
-	gc.Regfree(&tl)
-	if check != 0 {
-		gc.Patch(p2, gc.Pc)
-	}
-}
-
-/*
- * generate high multiply:
- *   res = (nl*nr) >> width
- */
-func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	// largest ullman on left.
-	if nl.Ullman < nr.Ullman {
-		nl, nr = nr, nl
-	}
-
-	t := nl.Type
-	w := int(t.Width) * 8
-	var n1 gc.Node
-	gc.Cgenr(nl, &n1, res)
-	var n2 gc.Node
-	gc.Cgenr(nr, &n2, nil)
-	switch gc.Simtype[t.Etype] {
-	case gc.TINT8,
-		gc.TINT16,
-		gc.TINT32:
-		gins(optoas(gc.OMUL, t), &n2, &n1)
-		p := gins(s390x.ASRAD, nil, &n1)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(w)
-
-	case gc.TUINT8,
-		gc.TUINT16,
-		gc.TUINT32:
-		gins(optoas(gc.OMUL, t), &n2, &n1)
-		p := gins(s390x.ASRD, nil, &n1)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(w)
-
-	case gc.TINT64:
-		gins(s390x.AMULHD, &n2, &n1)
-
-	case gc.TUINT64:
-		gins(s390x.AMULHDU, &n2, &n1)
-
-	default:
-		gc.Fatalf("cgen_hmul %v", t)
-	}
-
-	gc.Cgen(&n1, res)
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-/*
- * generate shift according to op, one of:
- *	res = nl << nr
- *	res = nl >> nr
- */
-func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	a := optoas(op, nl.Type)
-
-	if nr.Op == gc.OLITERAL {
-		var n1 gc.Node
-		gc.Regalloc(&n1, nl.Type, res)
-		gc.Cgen(nl, &n1)
-		sc := uint64(nr.Int64())
-		if sc >= uint64(nl.Type.Width*8) {
-			// large shift gets 2 shifts by width-1
-			var n3 gc.Node
-			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
-
-			gins(a, &n3, &n1)
-			gins(a, &n3, &n1)
-		} else {
-			gins(a, nr, &n1)
-		}
-		gmove(&n1, res)
-		gc.Regfree(&n1)
-		return
-	}
-
-	if nl.Ullman >= gc.UINF {
-		var n4 gc.Node
-		gc.Tempname(&n4, nl.Type)
-		gc.Cgen(nl, &n4)
-		nl = &n4
-	}
-
-	if nr.Ullman >= gc.UINF {
-		var n5 gc.Node
-		gc.Tempname(&n5, nr.Type)
-		gc.Cgen(nr, &n5)
-		nr = &n5
-	}
-
-	// Allow either uint32 or uint64 as shift type,
-	// to avoid unnecessary conversion from uint32 to uint64
-	// just to do the comparison.
-	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
-
-	if tcount.Etype < gc.TUINT32 {
-		tcount = gc.Types[gc.TUINT32]
-	}
-
-	var n1 gc.Node
-	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
-	var n3 gc.Node
-	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
-
-	var n2 gc.Node
-	gc.Regalloc(&n2, nl.Type, res)
-
-	if nl.Ullman >= nr.Ullman {
-		gc.Cgen(nl, &n2)
-		gc.Cgen(nr, &n1)
-		gmove(&n1, &n3)
-	} else {
-		gc.Cgen(nr, &n1)
-		gmove(&n1, &n3)
-		gc.Cgen(nl, &n2)
-	}
-
-	gc.Regfree(&n3)
-
-	// test and fix up large shifts
-	if !bounded {
-		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
-		gins(optoas(gc.OCMP, tcount), &n1, &n3)
-		p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, 1)
-		if op == gc.ORSH && nl.Type.IsSigned() {
-			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
-			gins(a, &n3, &n2)
-		} else {
-			gc.Nodconst(&n3, nl.Type, 0)
-			gmove(&n3, &n2)
-		}
-
-		gc.Patch(p1, gc.Pc)
-	}
-
-	gins(a, &n1, &n2)
-
-	gmove(&n2, res)
-
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-// clearfat clears (i.e. replaces with zeros) the value pointed to by nl.
-func clearfat(nl *gc.Node) {
-	if gc.Debug['g'] != 0 {
-		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
-	}
-
-	// Avoid taking the address for simple enough types.
-	if gc.Componentgen(nil, nl) {
-		return
-	}
-
-	var dst gc.Node
-	gc.Regalloc(&dst, gc.Types[gc.Tptr], nil)
-	gc.Agen(nl, &dst)
-
-	var boff int64
-	w := nl.Type.Width
-	if w > clearLoopCutoff {
-		// Generate a loop clearing 256 bytes per iteration using XCs.
-		var end gc.Node
-		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
-		p := gins(s390x.AMOVD, &dst, &end)
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = w - (w % 256)
-
-		p = gins(s390x.AXC, &dst, &dst)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Offset = 0
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 0
-		p.From3 = new(obj.Addr)
-		p.From3.Offset = 256
-		p.From3.Type = obj.TYPE_CONST
-		pl := p
-
-		ginscon(s390x.AADD, 256, &dst)
-		gins(s390x.ACMP, &dst, &end)
-		gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), pl)
-		gc.Regfree(&end)
-		w = w % 256
-	}
-
-	// Generate instructions to clear the remaining memory.
-	for w > 0 {
-		n := w
-
-		// Can clear at most 256 bytes per instruction.
-		if n > 256 {
-			n = 256
-		}
-
-		switch n {
-		// Handle very small clears using moves.
-		case 8, 4, 2, 1:
-			ins := s390x.AMOVB
-			switch n {
-			case 8:
-				ins = s390x.AMOVD
-			case 4:
-				ins = s390x.AMOVW
-			case 2:
-				ins = s390x.AMOVH
-			}
-			p := gins(ins, nil, &dst)
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = 0
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = boff
-
-		// Handle clears that would require multiple moves with a XC.
-		default:
-			p := gins(s390x.AXC, &dst, &dst)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Offset = boff
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = boff
-			p.From3 = new(obj.Addr)
-			p.From3.Offset = n
-			p.From3.Type = obj.TYPE_CONST
-		}
-
-		boff += n
-		w -= n
-	}
-
-	gc.Regfree(&dst)
-}
-
-// Called after regopt and peep have run.
-// Expand CHECKNIL pseudo-op into actual nil pointer check.
-func expandchecks(firstp *obj.Prog) {
-	for p := firstp; p != nil; p = p.Link {
-		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
-			fmt.Printf("expandchecks: %v\n", p)
-		}
-		if p.As != obj.ACHECKNIL {
-			continue
-		}
-		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
-			gc.Warnl(p.Lineno, "generated nil check")
-		}
-		if p.From.Type != obj.TYPE_REG {
-			gc.Fatalf("invalid nil check %v\n", p)
-		}
-
-		// check is
-		//	CMPBNE arg, $0, 2(PC) [likely]
-		//	MOVD   R0, 0(R0)
-		p1 := gc.Ctxt.NewProg()
-
-		gc.Clearp(p1)
-		p1.Link = p.Link
-		p.Link = p1
-		p1.Lineno = p.Lineno
-		p1.Pc = 9999
-		p.As = s390x.ACMPBNE
-		p.From3 = new(obj.Addr)
-		p.From3.Type = obj.TYPE_CONST
-		p.From3.Offset = 0
-
-		p.To.Type = obj.TYPE_BRANCH
-		p.To.Val = p1.Link
-
-		// crash by write to memory address 0.
-		p1.As = s390x.AMOVD
-
-		p1.From.Type = obj.TYPE_CONST
-		p1.From.Offset = 0
-		p1.To.Type = obj.TYPE_MEM
-		p1.To.Reg = s390x.REGZERO
-		p1.To.Offset = 0
-	}
-}
-
-// res = runtime.getg()
-func getg(res *gc.Node) {
-	var n1 gc.Node
-	gc.Nodreg(&n1, res.Type, s390x.REGG)
-	gmove(&n1, res)
-}
diff --git a/src/cmd/compile/internal/s390x/gsubr.go b/src/cmd/compile/internal/s390x/gsubr.go
index 66b6588..b8925ff 100644
--- a/src/cmd/compile/internal/s390x/gsubr.go
+++ b/src/cmd/compile/internal/s390x/gsubr.go
@@ -102,34 +102,6 @@
 	gc.Regfree(&ntmp)
 }
 
-func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
-	if t.IsInteger() && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
-		// Reverse comparison to place constant last.
-		op = gc.Brrev(op)
-		n1, n2 = n2, n1
-	}
-
-	var r1, r2, g1, g2 gc.Node
-	gc.Regalloc(&r1, t, n1)
-	gc.Regalloc(&g1, n1.Type, &r1)
-	gc.Cgen(n1, &g1)
-	gmove(&g1, &r1)
-	if t.IsInteger() && gc.Isconst(n2, gc.CTINT) {
-		ginscon2(optoas(gc.OCMP, t), &r1, n2.Int64())
-	} else {
-		gc.Regalloc(&r2, t, n2)
-		gc.Regalloc(&g2, n1.Type, &r2)
-		gc.Cgen(n2, &g2)
-		gmove(&g2, &r2)
-		rawgins(optoas(gc.OCMP, t), &r1, &r2)
-		gc.Regfree(&g2)
-		gc.Regfree(&r2)
-	}
-	gc.Regfree(&g1)
-	gc.Regfree(&r1)
-	return gc.Gbranch(optoas(op, t), nil, likely)
-}
-
 // gmvc tries to move f to t using a mvc instruction.
 // If successful it returns true, otherwise it returns false.
 func gmvc(f, t *gc.Node) bool {
@@ -177,338 +149,6 @@
 	return true
 }
 
-// generate move:
-//	t = f
-// hard part is conversions.
-func gmove(f *gc.Node, t *gc.Node) {
-	if gc.Debug['M'] != 0 {
-		fmt.Printf("gmove %L -> %L\n", f, t)
-	}
-
-	ft := int(gc.Simsimtype(f.Type))
-	tt := int(gc.Simsimtype(t.Type))
-	cvt := t.Type
-
-	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
-		gc.Complexmove(f, t)
-		return
-	}
-
-	var a obj.As
-
-	// cannot have two memory operands
-	if gc.Ismem(f) && gc.Ismem(t) {
-		if gmvc(f, t) {
-			return
-		}
-		goto hard
-	}
-
-	// convert constant to desired type
-	if f.Op == gc.OLITERAL {
-		var con gc.Node
-		f.Convconst(&con, t.Type)
-		f = &con
-		ft = tt // so big switch will choose a simple mov
-
-		// some constants can't move directly to memory.
-		if gc.Ismem(t) {
-			// float constants come from memory.
-			if t.Type.IsFloat() {
-				goto hard
-			}
-
-			// all immediates are 16-bit sign-extended
-			// unless moving into a register.
-			if t.Type.IsInteger() {
-				if i := con.Int64(); int64(int16(i)) != i {
-					goto hard
-				}
-			}
-
-			// immediate moves to memory have a 12-bit unsigned displacement
-			if t.Xoffset < 0 || t.Xoffset >= 4096-8 {
-				goto hard
-			}
-		}
-	}
-
-	// a float-to-int or int-to-float conversion requires the source operand in a register
-	if gc.Ismem(f) && ((f.Type.IsFloat() && t.Type.IsInteger()) || (f.Type.IsInteger() && t.Type.IsFloat())) {
-		cvt = f.Type
-		goto hard
-	}
-
-	// a float32-to-float64 or float64-to-float32 conversion requires the source operand in a register
-	if gc.Ismem(f) && f.Type.IsFloat() && t.Type.IsFloat() && (ft != tt) {
-		cvt = f.Type
-		goto hard
-	}
-
-	// value -> value copy, only one memory operand.
-	// figure out the instruction to use.
-	// break out of switch for one-instruction gins.
-	// goto rdst for "destination must be register".
-	// goto hard for "convert to cvt type first".
-	// otherwise handle and return.
-	switch uint32(ft)<<16 | uint32(tt) {
-	default:
-		gc.Fatalf("gmove %L -> %L", f.Type, t.Type)
-
-	// integer copy and truncate
-	case gc.TINT8<<16 | gc.TINT8,
-		gc.TUINT8<<16 | gc.TINT8,
-		gc.TINT16<<16 | gc.TINT8,
-		gc.TUINT16<<16 | gc.TINT8,
-		gc.TINT32<<16 | gc.TINT8,
-		gc.TUINT32<<16 | gc.TINT8,
-		gc.TINT64<<16 | gc.TINT8,
-		gc.TUINT64<<16 | gc.TINT8:
-		a = s390x.AMOVB
-
-	case gc.TINT8<<16 | gc.TUINT8,
-		gc.TUINT8<<16 | gc.TUINT8,
-		gc.TINT16<<16 | gc.TUINT8,
-		gc.TUINT16<<16 | gc.TUINT8,
-		gc.TINT32<<16 | gc.TUINT8,
-		gc.TUINT32<<16 | gc.TUINT8,
-		gc.TINT64<<16 | gc.TUINT8,
-		gc.TUINT64<<16 | gc.TUINT8:
-		a = s390x.AMOVBZ
-
-	case gc.TINT16<<16 | gc.TINT16,
-		gc.TUINT16<<16 | gc.TINT16,
-		gc.TINT32<<16 | gc.TINT16,
-		gc.TUINT32<<16 | gc.TINT16,
-		gc.TINT64<<16 | gc.TINT16,
-		gc.TUINT64<<16 | gc.TINT16:
-		a = s390x.AMOVH
-
-	case gc.TINT16<<16 | gc.TUINT16,
-		gc.TUINT16<<16 | gc.TUINT16,
-		gc.TINT32<<16 | gc.TUINT16,
-		gc.TUINT32<<16 | gc.TUINT16,
-		gc.TINT64<<16 | gc.TUINT16,
-		gc.TUINT64<<16 | gc.TUINT16:
-		a = s390x.AMOVHZ
-
-	case gc.TINT32<<16 | gc.TINT32,
-		gc.TUINT32<<16 | gc.TINT32,
-		gc.TINT64<<16 | gc.TINT32,
-		gc.TUINT64<<16 | gc.TINT32:
-		a = s390x.AMOVW
-
-	case gc.TINT32<<16 | gc.TUINT32,
-		gc.TUINT32<<16 | gc.TUINT32,
-		gc.TINT64<<16 | gc.TUINT32,
-		gc.TUINT64<<16 | gc.TUINT32:
-		a = s390x.AMOVWZ
-
-	case gc.TINT64<<16 | gc.TINT64,
-		gc.TINT64<<16 | gc.TUINT64,
-		gc.TUINT64<<16 | gc.TINT64,
-		gc.TUINT64<<16 | gc.TUINT64:
-		a = s390x.AMOVD
-
-	// sign extend int8
-	case gc.TINT8<<16 | gc.TINT16,
-		gc.TINT8<<16 | gc.TUINT16,
-		gc.TINT8<<16 | gc.TINT32,
-		gc.TINT8<<16 | gc.TUINT32,
-		gc.TINT8<<16 | gc.TINT64,
-		gc.TINT8<<16 | gc.TUINT64:
-		a = s390x.AMOVB
-		goto rdst
-
-	// sign extend uint8
-	case gc.TUINT8<<16 | gc.TINT16,
-		gc.TUINT8<<16 | gc.TUINT16,
-		gc.TUINT8<<16 | gc.TINT32,
-		gc.TUINT8<<16 | gc.TUINT32,
-		gc.TUINT8<<16 | gc.TINT64,
-		gc.TUINT8<<16 | gc.TUINT64:
-		a = s390x.AMOVBZ
-		goto rdst
-
-	// sign extend int16
-	case gc.TINT16<<16 | gc.TINT32,
-		gc.TINT16<<16 | gc.TUINT32,
-		gc.TINT16<<16 | gc.TINT64,
-		gc.TINT16<<16 | gc.TUINT64:
-		a = s390x.AMOVH
-		goto rdst
-
-	// zero extend uint16
-	case gc.TUINT16<<16 | gc.TINT32,
-		gc.TUINT16<<16 | gc.TUINT32,
-		gc.TUINT16<<16 | gc.TINT64,
-		gc.TUINT16<<16 | gc.TUINT64:
-		a = s390x.AMOVHZ
-		goto rdst
-
-	// sign extend int32
-	case gc.TINT32<<16 | gc.TINT64,
-		gc.TINT32<<16 | gc.TUINT64:
-		a = s390x.AMOVW
-		goto rdst
-
-	// zero extend uint32
-	case gc.TUINT32<<16 | gc.TINT64,
-		gc.TUINT32<<16 | gc.TUINT64:
-		a = s390x.AMOVWZ
-		goto rdst
-
-	// float to integer
-	case gc.TFLOAT32<<16 | gc.TUINT8,
-		gc.TFLOAT32<<16 | gc.TUINT16:
-		cvt = gc.Types[gc.TUINT32]
-		goto hard
-
-	case gc.TFLOAT32<<16 | gc.TUINT32:
-		a = s390x.ACLFEBR
-		goto rdst
-
-	case gc.TFLOAT32<<16 | gc.TUINT64:
-		a = s390x.ACLGEBR
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TUINT8,
-		gc.TFLOAT64<<16 | gc.TUINT16:
-		cvt = gc.Types[gc.TUINT32]
-		goto hard
-
-	case gc.TFLOAT64<<16 | gc.TUINT32:
-		a = s390x.ACLFDBR
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TUINT64:
-		a = s390x.ACLGDBR
-		goto rdst
-
-	case gc.TFLOAT32<<16 | gc.TINT8,
-		gc.TFLOAT32<<16 | gc.TINT16:
-		cvt = gc.Types[gc.TINT32]
-		goto hard
-
-	case gc.TFLOAT32<<16 | gc.TINT32:
-		a = s390x.ACFEBRA
-		goto rdst
-
-	case gc.TFLOAT32<<16 | gc.TINT64:
-		a = s390x.ACGEBRA
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TINT8,
-		gc.TFLOAT64<<16 | gc.TINT16:
-		cvt = gc.Types[gc.TINT32]
-		goto hard
-
-	case gc.TFLOAT64<<16 | gc.TINT32:
-		a = s390x.ACFDBRA
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TINT64:
-		a = s390x.ACGDBRA
-		goto rdst
-
-	// integer to float
-	case gc.TUINT8<<16 | gc.TFLOAT32,
-		gc.TUINT16<<16 | gc.TFLOAT32:
-		cvt = gc.Types[gc.TUINT32]
-		goto hard
-
-	case gc.TUINT32<<16 | gc.TFLOAT32:
-		a = s390x.ACELFBR
-		goto rdst
-
-	case gc.TUINT64<<16 | gc.TFLOAT32:
-		a = s390x.ACELGBR
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TFLOAT64,
-		gc.TUINT16<<16 | gc.TFLOAT64:
-		cvt = gc.Types[gc.TUINT32]
-		goto hard
-
-	case gc.TUINT32<<16 | gc.TFLOAT64:
-		a = s390x.ACDLFBR
-		goto rdst
-
-	case gc.TUINT64<<16 | gc.TFLOAT64:
-		a = s390x.ACDLGBR
-		goto rdst
-
-	case gc.TINT8<<16 | gc.TFLOAT32,
-		gc.TINT16<<16 | gc.TFLOAT32:
-		cvt = gc.Types[gc.TINT32]
-		goto hard
-
-	case gc.TINT32<<16 | gc.TFLOAT32:
-		a = s390x.ACEFBRA
-		goto rdst
-
-	case gc.TINT64<<16 | gc.TFLOAT32:
-		a = s390x.ACEGBRA
-		goto rdst
-
-	case gc.TINT8<<16 | gc.TFLOAT64,
-		gc.TINT16<<16 | gc.TFLOAT64:
-		cvt = gc.Types[gc.TINT32]
-		goto hard
-
-	case gc.TINT32<<16 | gc.TFLOAT64:
-		a = s390x.ACDFBRA
-		goto rdst
-
-	case gc.TINT64<<16 | gc.TFLOAT64:
-		a = s390x.ACDGBRA
-		goto rdst
-
-	// float to float
-	case gc.TFLOAT32<<16 | gc.TFLOAT32:
-		a = s390x.AFMOVS
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT64:
-		a = s390x.AFMOVD
-
-	case gc.TFLOAT32<<16 | gc.TFLOAT64:
-		a = s390x.ALDEBR
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT32:
-		a = s390x.ALEDBR
-		goto rdst
-	}
-
-	gins(a, f, t)
-	return
-
-	// requires register destination
-rdst:
-	if t != nil && t.Op == gc.OREGISTER {
-		gins(a, f, t)
-		return
-	} else {
-		var r1 gc.Node
-		gc.Regalloc(&r1, t.Type, t)
-
-		gins(a, f, &r1)
-		gmove(&r1, t)
-		gc.Regfree(&r1)
-		return
-	}
-
-	// requires register intermediate
-hard:
-	var r1 gc.Node
-	gc.Regalloc(&r1, cvt, t)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-}
-
 func intLiteral(n *gc.Node) (x int64, ok bool) {
 	switch {
 	case n == nil:
@@ -599,512 +239,3 @@
 
 	return p
 }
-
-// optoas returns the Axxx equivalent of Oxxx for type t
-func optoas(op gc.Op, t *gc.Type) obj.As {
-	if t == nil {
-		gc.Fatalf("optoas: t is nil")
-	}
-
-	// avoid constant conversions in switches below
-	const (
-		OMINUS_ = uint32(gc.OMINUS) << 16
-		OLSH_   = uint32(gc.OLSH) << 16
-		ORSH_   = uint32(gc.ORSH) << 16
-		OADD_   = uint32(gc.OADD) << 16
-		OSUB_   = uint32(gc.OSUB) << 16
-		OMUL_   = uint32(gc.OMUL) << 16
-		ODIV_   = uint32(gc.ODIV) << 16
-		OOR_    = uint32(gc.OOR) << 16
-		OAND_   = uint32(gc.OAND) << 16
-		OXOR_   = uint32(gc.OXOR) << 16
-		OEQ_    = uint32(gc.OEQ) << 16
-		ONE_    = uint32(gc.ONE) << 16
-		OLT_    = uint32(gc.OLT) << 16
-		OLE_    = uint32(gc.OLE) << 16
-		OGE_    = uint32(gc.OGE) << 16
-		OGT_    = uint32(gc.OGT) << 16
-		OCMP_   = uint32(gc.OCMP) << 16
-		OAS_    = uint32(gc.OAS) << 16
-		OHMUL_  = uint32(gc.OHMUL) << 16
-		OSQRT_  = uint32(gc.OSQRT) << 16
-		OLROT_  = uint32(gc.OLROT) << 16
-	)
-
-	a := obj.AXXX
-	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
-	default:
-		gc.Fatalf("optoas: no entry for op=%v type=%v", op, t)
-
-	case OEQ_ | gc.TBOOL,
-		OEQ_ | gc.TINT8,
-		OEQ_ | gc.TUINT8,
-		OEQ_ | gc.TINT16,
-		OEQ_ | gc.TUINT16,
-		OEQ_ | gc.TINT32,
-		OEQ_ | gc.TUINT32,
-		OEQ_ | gc.TINT64,
-		OEQ_ | gc.TUINT64,
-		OEQ_ | gc.TPTR32,
-		OEQ_ | gc.TPTR64,
-		OEQ_ | gc.TFLOAT32,
-		OEQ_ | gc.TFLOAT64:
-		a = s390x.ABEQ
-
-	case ONE_ | gc.TBOOL,
-		ONE_ | gc.TINT8,
-		ONE_ | gc.TUINT8,
-		ONE_ | gc.TINT16,
-		ONE_ | gc.TUINT16,
-		ONE_ | gc.TINT32,
-		ONE_ | gc.TUINT32,
-		ONE_ | gc.TINT64,
-		ONE_ | gc.TUINT64,
-		ONE_ | gc.TPTR32,
-		ONE_ | gc.TPTR64,
-		ONE_ | gc.TFLOAT32,
-		ONE_ | gc.TFLOAT64:
-		a = s390x.ABNE
-
-	case OLT_ | gc.TINT8, // ACMP
-		OLT_ | gc.TINT16,
-		OLT_ | gc.TINT32,
-		OLT_ | gc.TINT64,
-		OLT_ | gc.TUINT8,
-		// ACMPU
-		OLT_ | gc.TUINT16,
-		OLT_ | gc.TUINT32,
-		OLT_ | gc.TUINT64,
-		OLT_ | gc.TFLOAT32,
-		// AFCMPU
-		OLT_ | gc.TFLOAT64:
-		a = s390x.ABLT
-
-	case OLE_ | gc.TINT8, // ACMP
-		OLE_ | gc.TINT16,
-		OLE_ | gc.TINT32,
-		OLE_ | gc.TINT64,
-		OLE_ | gc.TUINT8,
-		// ACMPU
-		OLE_ | gc.TUINT16,
-		OLE_ | gc.TUINT32,
-		OLE_ | gc.TUINT64,
-		OLE_ | gc.TFLOAT32,
-		OLE_ | gc.TFLOAT64:
-		a = s390x.ABLE
-
-	case OGT_ | gc.TINT8,
-		OGT_ | gc.TINT16,
-		OGT_ | gc.TINT32,
-		OGT_ | gc.TINT64,
-		OGT_ | gc.TUINT8,
-		OGT_ | gc.TUINT16,
-		OGT_ | gc.TUINT32,
-		OGT_ | gc.TUINT64,
-		OGT_ | gc.TFLOAT32,
-		OGT_ | gc.TFLOAT64:
-		a = s390x.ABGT
-
-	case OGE_ | gc.TINT8,
-		OGE_ | gc.TINT16,
-		OGE_ | gc.TINT32,
-		OGE_ | gc.TINT64,
-		OGE_ | gc.TUINT8,
-		OGE_ | gc.TUINT16,
-		OGE_ | gc.TUINT32,
-		OGE_ | gc.TUINT64,
-		OGE_ | gc.TFLOAT32,
-		OGE_ | gc.TFLOAT64:
-		a = s390x.ABGE
-
-	case OCMP_ | gc.TBOOL,
-		OCMP_ | gc.TINT8,
-		OCMP_ | gc.TINT16,
-		OCMP_ | gc.TINT32,
-		OCMP_ | gc.TPTR32,
-		OCMP_ | gc.TINT64:
-		a = s390x.ACMP
-
-	case OCMP_ | gc.TUINT8,
-		OCMP_ | gc.TUINT16,
-		OCMP_ | gc.TUINT32,
-		OCMP_ | gc.TUINT64,
-		OCMP_ | gc.TPTR64:
-		a = s390x.ACMPU
-
-	case OCMP_ | gc.TFLOAT32:
-		a = s390x.ACEBR
-
-	case OCMP_ | gc.TFLOAT64:
-		a = s390x.AFCMPU
-
-	case OAS_ | gc.TBOOL,
-		OAS_ | gc.TINT8:
-		a = s390x.AMOVB
-
-	case OAS_ | gc.TUINT8:
-		a = s390x.AMOVBZ
-
-	case OAS_ | gc.TINT16:
-		a = s390x.AMOVH
-
-	case OAS_ | gc.TUINT16:
-		a = s390x.AMOVHZ
-
-	case OAS_ | gc.TINT32:
-		a = s390x.AMOVW
-
-	case OAS_ | gc.TUINT32,
-		OAS_ | gc.TPTR32:
-		a = s390x.AMOVWZ
-
-	case OAS_ | gc.TINT64,
-		OAS_ | gc.TUINT64,
-		OAS_ | gc.TPTR64:
-		a = s390x.AMOVD
-
-	case OAS_ | gc.TFLOAT32:
-		a = s390x.AFMOVS
-
-	case OAS_ | gc.TFLOAT64:
-		a = s390x.AFMOVD
-
-	case OADD_ | gc.TINT8,
-		OADD_ | gc.TUINT8,
-		OADD_ | gc.TINT16,
-		OADD_ | gc.TUINT16,
-		OADD_ | gc.TINT32,
-		OADD_ | gc.TUINT32,
-		OADD_ | gc.TPTR32,
-		OADD_ | gc.TINT64,
-		OADD_ | gc.TUINT64,
-		OADD_ | gc.TPTR64:
-		a = s390x.AADD
-
-	case OADD_ | gc.TFLOAT32:
-		a = s390x.AFADDS
-
-	case OADD_ | gc.TFLOAT64:
-		a = s390x.AFADD
-
-	case OSUB_ | gc.TINT8,
-		OSUB_ | gc.TUINT8,
-		OSUB_ | gc.TINT16,
-		OSUB_ | gc.TUINT16,
-		OSUB_ | gc.TINT32,
-		OSUB_ | gc.TUINT32,
-		OSUB_ | gc.TPTR32,
-		OSUB_ | gc.TINT64,
-		OSUB_ | gc.TUINT64,
-		OSUB_ | gc.TPTR64:
-		a = s390x.ASUB
-
-	case OSUB_ | gc.TFLOAT32:
-		a = s390x.AFSUBS
-
-	case OSUB_ | gc.TFLOAT64:
-		a = s390x.AFSUB
-
-	case OMINUS_ | gc.TINT8,
-		OMINUS_ | gc.TUINT8,
-		OMINUS_ | gc.TINT16,
-		OMINUS_ | gc.TUINT16,
-		OMINUS_ | gc.TINT32,
-		OMINUS_ | gc.TUINT32,
-		OMINUS_ | gc.TPTR32,
-		OMINUS_ | gc.TINT64,
-		OMINUS_ | gc.TUINT64,
-		OMINUS_ | gc.TPTR64:
-		a = s390x.ANEG
-
-	case OAND_ | gc.TINT8,
-		OAND_ | gc.TUINT8,
-		OAND_ | gc.TINT16,
-		OAND_ | gc.TUINT16,
-		OAND_ | gc.TINT32,
-		OAND_ | gc.TUINT32,
-		OAND_ | gc.TPTR32,
-		OAND_ | gc.TINT64,
-		OAND_ | gc.TUINT64,
-		OAND_ | gc.TPTR64:
-		a = s390x.AAND
-
-	case OOR_ | gc.TINT8,
-		OOR_ | gc.TUINT8,
-		OOR_ | gc.TINT16,
-		OOR_ | gc.TUINT16,
-		OOR_ | gc.TINT32,
-		OOR_ | gc.TUINT32,
-		OOR_ | gc.TPTR32,
-		OOR_ | gc.TINT64,
-		OOR_ | gc.TUINT64,
-		OOR_ | gc.TPTR64:
-		a = s390x.AOR
-
-	case OXOR_ | gc.TINT8,
-		OXOR_ | gc.TUINT8,
-		OXOR_ | gc.TINT16,
-		OXOR_ | gc.TUINT16,
-		OXOR_ | gc.TINT32,
-		OXOR_ | gc.TUINT32,
-		OXOR_ | gc.TPTR32,
-		OXOR_ | gc.TINT64,
-		OXOR_ | gc.TUINT64,
-		OXOR_ | gc.TPTR64:
-		a = s390x.AXOR
-
-	case OLSH_ | gc.TINT8,
-		OLSH_ | gc.TUINT8,
-		OLSH_ | gc.TINT16,
-		OLSH_ | gc.TUINT16,
-		OLSH_ | gc.TINT32,
-		OLSH_ | gc.TUINT32,
-		OLSH_ | gc.TPTR32,
-		OLSH_ | gc.TINT64,
-		OLSH_ | gc.TUINT64,
-		OLSH_ | gc.TPTR64:
-		a = s390x.ASLD
-
-	case ORSH_ | gc.TUINT8,
-		ORSH_ | gc.TUINT16,
-		ORSH_ | gc.TUINT32,
-		ORSH_ | gc.TPTR32,
-		ORSH_ | gc.TUINT64,
-		ORSH_ | gc.TPTR64:
-		a = s390x.ASRD
-
-	case ORSH_ | gc.TINT8,
-		ORSH_ | gc.TINT16,
-		ORSH_ | gc.TINT32,
-		ORSH_ | gc.TINT64:
-		a = s390x.ASRAD
-
-	case OHMUL_ | gc.TINT64:
-		a = s390x.AMULHD
-
-	case OHMUL_ | gc.TUINT64,
-		OHMUL_ | gc.TPTR64:
-		a = s390x.AMULHDU
-
-	case OMUL_ | gc.TINT8,
-		OMUL_ | gc.TINT16,
-		OMUL_ | gc.TINT32,
-		OMUL_ | gc.TINT64:
-		a = s390x.AMULLD
-
-	case OMUL_ | gc.TUINT8,
-		OMUL_ | gc.TUINT16,
-		OMUL_ | gc.TUINT32,
-		OMUL_ | gc.TPTR32,
-		// don't use word multiply, the high 32-bit are undefined.
-		OMUL_ | gc.TUINT64,
-		OMUL_ | gc.TPTR64:
-		// for 64-bit multiplies, signedness doesn't matter.
-		a = s390x.AMULLD
-
-	case OMUL_ | gc.TFLOAT32:
-		a = s390x.AFMULS
-
-	case OMUL_ | gc.TFLOAT64:
-		a = s390x.AFMUL
-
-	case ODIV_ | gc.TINT8,
-		ODIV_ | gc.TINT16,
-		ODIV_ | gc.TINT32,
-		ODIV_ | gc.TINT64:
-		a = s390x.ADIVD
-
-	case ODIV_ | gc.TUINT8,
-		ODIV_ | gc.TUINT16,
-		ODIV_ | gc.TUINT32,
-		ODIV_ | gc.TPTR32,
-		ODIV_ | gc.TUINT64,
-		ODIV_ | gc.TPTR64:
-		a = s390x.ADIVDU
-
-	case ODIV_ | gc.TFLOAT32:
-		a = s390x.AFDIVS
-
-	case ODIV_ | gc.TFLOAT64:
-		a = s390x.AFDIV
-
-	case OSQRT_ | gc.TFLOAT64:
-		a = s390x.AFSQRT
-
-	case OLROT_ | gc.TUINT32,
-		OLROT_ | gc.TPTR32,
-		OLROT_ | gc.TINT32:
-		a = s390x.ARLL
-
-	case OLROT_ | gc.TUINT64,
-		OLROT_ | gc.TPTR64,
-		OLROT_ | gc.TINT64:
-		a = s390x.ARLLG
-	}
-
-	return a
-}
-
-const (
-	ODynam   = 1 << 0
-	OAddable = 1 << 1
-)
-
-var clean [20]gc.Node
-
-var cleani int = 0
-
-func sudoclean() {
-	if clean[cleani-1].Op != gc.OEMPTY {
-		gc.Regfree(&clean[cleani-1])
-	}
-	if clean[cleani-2].Op != gc.OEMPTY {
-		gc.Regfree(&clean[cleani-2])
-	}
-	cleani -= 2
-}
-
-/*
- * generate code to compute address of n,
- * a reference to a (perhaps nested) field inside
- * an array or struct.
- * return 0 on failure, 1 on success.
- * on success, leaves usable address in a.
- *
- * caller is responsible for calling sudoclean
- * after successful sudoaddable,
- * to release the register used for a.
- */
-func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
-	if n.Type == nil {
-		return false
-	}
-
-	*a = obj.Addr{}
-
-	switch n.Op {
-	case gc.OLITERAL:
-		if !gc.Isconst(n, gc.CTINT) {
-			return false
-		}
-		v := n.Int64()
-		switch as {
-		default:
-			return false
-
-		// operations that can cope with a 32-bit immediate
-		// TODO(mundaym): logical operations can work on high bits
-		case s390x.AADD,
-			s390x.AADDC,
-			s390x.ASUB,
-			s390x.AMULLW,
-			s390x.AAND,
-			s390x.AOR,
-			s390x.AXOR,
-			s390x.ASLD,
-			s390x.ASLW,
-			s390x.ASRAW,
-			s390x.ASRAD,
-			s390x.ASRW,
-			s390x.ASRD,
-			s390x.AMOVB,
-			s390x.AMOVBZ,
-			s390x.AMOVH,
-			s390x.AMOVHZ,
-			s390x.AMOVW,
-			s390x.AMOVWZ,
-			s390x.AMOVD:
-			if int64(int32(v)) != v {
-				return false
-			}
-
-		// for comparisons avoid immediates unless they can
-		// fit into a int8/uint8
-		// this favours combined compare and branch instructions
-		case s390x.ACMP:
-			if int64(int8(v)) != v {
-				return false
-			}
-		case s390x.ACMPU:
-			if int64(uint8(v)) != v {
-				return false
-			}
-		}
-
-		cleani += 2
-		reg := &clean[cleani-1]
-		reg1 := &clean[cleani-2]
-		reg.Op = gc.OEMPTY
-		reg1.Op = gc.OEMPTY
-		gc.Naddr(a, n)
-		return true
-
-	case gc.ODOT,
-		gc.ODOTPTR:
-		cleani += 2
-		reg := &clean[cleani-1]
-		reg1 := &clean[cleani-2]
-		reg.Op = gc.OEMPTY
-		reg1.Op = gc.OEMPTY
-		var nn *gc.Node
-		var oary [10]int64
-		o := gc.Dotoffset(n, oary[:], &nn)
-		if nn == nil {
-			sudoclean()
-			return false
-		}
-
-		if nn.Addable && o == 1 && oary[0] >= 0 {
-			// directly addressable set of DOTs
-			n1 := *nn
-
-			n1.Type = n.Type
-			n1.Xoffset += oary[0]
-			// check that the offset fits into a 12-bit displacement
-			if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 {
-				sudoclean()
-				return false
-			}
-			gc.Naddr(a, &n1)
-			return true
-		}
-
-		gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
-		n1 := *reg
-		n1.Op = gc.OINDREG
-		if oary[0] >= 0 {
-			gc.Agen(nn, reg)
-			n1.Xoffset = oary[0]
-		} else {
-			gc.Cgen(nn, reg)
-			gc.Cgen_checknil(reg)
-			n1.Xoffset = -(oary[0] + 1)
-		}
-
-		for i := 1; i < o; i++ {
-			if oary[i] >= 0 {
-				gc.Fatalf("can't happen")
-			}
-			gins(s390x.AMOVD, &n1, reg)
-			gc.Cgen_checknil(reg)
-			n1.Xoffset = -(oary[i] + 1)
-		}
-
-		a.Type = obj.TYPE_NONE
-		a.Index = 0
-		// check that the offset fits into a 12-bit displacement
-		if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 {
-			tmp := n1
-			tmp.Op = gc.OREGISTER
-			tmp.Type = gc.Types[gc.Tptr]
-			tmp.Xoffset = 0
-			gc.Cgen_checknil(&tmp)
-			ginscon(s390x.AADD, n1.Xoffset, &tmp)
-			n1.Xoffset = 0
-		}
-		gc.Naddr(a, &n1)
-		return true
-	}
-
-	return false
-}
diff --git a/src/cmd/compile/internal/s390x/peep.go b/src/cmd/compile/internal/s390x/peep.go
deleted file mode 100644
index 6400f61..0000000
--- a/src/cmd/compile/internal/s390x/peep.go
+++ /dev/null
@@ -1,1664 +0,0 @@
-// Derived from Inferno utils/6c/peep.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/peep.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors.  All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package s390x
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/s390x"
-	"fmt"
-)
-
-type usage int
-
-const (
-	_None          usage = iota // no usage found
-	_Read                       // only read from
-	_ReadWriteSame              // both read from and written to in a single operand
-	_Write                      // only written to
-	_ReadWriteDiff              // both read from and written to in different operands
-)
-
-var gactive uint32
-
-func peep(firstp *obj.Prog) {
-	g := gc.Flowstart(firstp, nil)
-	if g == nil {
-		return
-	}
-	gactive = 0
-
-	run := func(name string, pass func(r *gc.Flow) int) int {
-		n := pass(g.Start)
-		if gc.Debug['P'] != 0 {
-			fmt.Println(name, ":", n)
-		}
-		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-			gc.Dumpit(name, g.Start, 0)
-		}
-		return n
-	}
-
-	for {
-		n := 0
-		n += run("constant propagation", constantPropagation)
-		n += run("copy propagation", copyPropagation)
-		n += run("cast propagation", castPropagation)
-		n += run("remove load-hit-stores", removeLoadHitStores)
-		n += run("dead code elimination", deadCodeElimination)
-		if n == 0 {
-			break
-		}
-	}
-	run("fuse op moves", fuseOpMoves)
-	run("fuse clears", fuseClear)
-	run("load pipelining", loadPipelining)
-	run("fuse compare branch", fuseCompareBranch)
-	run("simplify ops", simplifyOps)
-	run("dead code elimination", deadCodeElimination)
-
-	// TODO(mundaym): load/store multiple aren't currently handled by copyu
-	// so this pass must be last.
-	run("fuse multiple", fuseMultiple)
-
-	gc.Flowend(g)
-}
-
-func pushback(r0 *gc.Flow) {
-	var r *gc.Flow
-
-	var b *gc.Flow
-	p0 := r0.Prog
-	for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
-		p := r.Prog
-		if p.As != obj.ANOP {
-			if !(isReg(&p.From) || isConst(&p.From)) || !isReg(&p.To) {
-				break
-			}
-			if copyu(p, &p0.To, nil) != _None || copyu(p0, &p.To, nil) != _None {
-				break
-			}
-		}
-
-		if p.As == obj.ACALL {
-			break
-		}
-		b = r
-	}
-
-	if b == nil {
-		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-			fmt.Printf("no pushback: %v\n", r0.Prog)
-			if r != nil {
-				fmt.Printf("\t%v [%v]\n", r.Prog, gc.Uniqs(r) != nil)
-			}
-		}
-
-		return
-	}
-
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("pushback\n")
-		for r := b; ; r = r.Link {
-			fmt.Printf("\t%v\n", r.Prog)
-			if r == r0 {
-				break
-			}
-		}
-	}
-
-	t := *r0.Prog
-	for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
-		p0 = r.Link.Prog
-		p := r.Prog
-		p0.As = p.As
-		p0.Lineno = p.Lineno
-		p0.From = p.From
-		p0.To = p.To
-		p0.From3 = p.From3
-		p0.Reg = p.Reg
-		p0.RegTo2 = p.RegTo2
-		if r == b {
-			break
-		}
-	}
-
-	p0 = r.Prog
-	p0.As = t.As
-	p0.Lineno = t.Lineno
-	p0.From = t.From
-	p0.To = t.To
-	p0.From3 = t.From3
-	p0.Reg = t.Reg
-	p0.RegTo2 = t.RegTo2
-
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("\tafter\n")
-		for r := b; ; r = r.Link {
-			fmt.Printf("\t%v\n", r.Prog)
-			if r == r0 {
-				break
-			}
-		}
-	}
-}
-
-// excise replaces the given instruction with a NOP and clears
-// its operands.
-func excise(r *gc.Flow) {
-	p := r.Prog
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("%v ===delete===\n", p)
-	}
-	obj.Nopout(p)
-	gc.Ostats.Ndelmov++
-}
-
-// isZero returns true if a is either the constant 0 or the register
-// REGZERO.
-func isZero(a *obj.Addr) bool {
-	if a.Type == obj.TYPE_CONST && a.Offset == 0 {
-		return true
-	}
-	if a.Type == obj.TYPE_REG && a.Reg == s390x.REGZERO {
-		return true
-	}
-	return false
-}
-
-// isReg returns true if a is a general purpose or floating point
-// register (GPR or FPR).
-//
-// TODO(mundaym): currently this excludes REGZER0, but not other
-// special registers.
-func isReg(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG &&
-		s390x.REG_R0 <= a.Reg &&
-		a.Reg <= s390x.REG_F15 &&
-		a.Reg != s390x.REGZERO
-}
-
-// isGPR returns true if a is a general purpose register (GPR).
-// REGZERO is treated as a GPR.
-func isGPR(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG &&
-		s390x.REG_R0 <= a.Reg &&
-		a.Reg <= s390x.REG_R15
-}
-
-// isFPR returns true if a is a floating point register (FPR).
-func isFPR(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG &&
-		s390x.REG_F0 <= a.Reg &&
-		a.Reg <= s390x.REG_F15
-}
-
-// isConst returns true if a refers to a constant (integer or
-// floating point, not string currently).
-func isConst(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_FCONST
-}
-
-// isBDMem returns true if a refers to a memory location addressable by a
-// base register (B) and a displacement (D), such as:
-// 	x+8(R1)
-// and
-//	0(R10)
-// It returns false if the address contains an index register (X) such as:
-// 	16(R1)(R2*1)
-// or if a relocation is required.
-func isBDMem(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_MEM &&
-		a.Index == 0 &&
-		(a.Name == obj.NAME_NONE || a.Name == obj.NAME_AUTO || a.Name == obj.NAME_PARAM)
-}
-
-// the idea is to substitute
-// one register for another
-// from one MOV to another
-//	MOV	a, R1
-//	ADD	b, R1	/ no use of R2
-//	MOV	R1, R2
-// would be converted to
-//	MOV	a, R2
-//	ADD	b, R2
-//	MOV	R2, R1
-// hopefully, then the former or latter MOV
-// will be eliminated by copy propagation.
-//
-// r0 (the argument, not the register) is the MOV at the end of the
-// above sequences. subprop returns true if it modified any instructions.
-func subprop(r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	if !isReg(v1) {
-		return false
-	}
-	v2 := &p.To
-	if !isReg(v2) {
-		return false
-	}
-	cast := false
-	switch p.As {
-	case s390x.AMOVW, s390x.AMOVWZ,
-		s390x.AMOVH, s390x.AMOVHZ,
-		s390x.AMOVB, s390x.AMOVBZ:
-		cast = true
-	}
-	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
-		if gc.Uniqs(r) == nil {
-			break
-		}
-		p = r.Prog
-		switch copyu(p, v1, nil) {
-		case _Write, _ReadWriteDiff:
-			if p.As == obj.ACALL {
-				return false
-			}
-			if (!cast || p.As == r0.Prog.As) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
-				copysub(&p.To, v1, v2)
-				for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
-					p = r.Prog
-					copysub(&p.From, v1, v2)
-					copysub1(p, v1, v2)
-					copysub(&p.To, v1, v2)
-				}
-				v1.Reg, v2.Reg = v2.Reg, v1.Reg
-				return true
-			}
-			if cast {
-				return false
-			}
-		case _ReadWriteSame:
-			if cast {
-				return false
-			}
-		}
-		if copyu(p, v2, nil) != _None {
-			return false
-		}
-	}
-	return false
-}
-
-// The idea is to remove redundant copies.
-//     v1->v2  F=0
-//     (use v2 s/v2/v1/)*
-//     set v1  F=1
-//     use v2  return fail (v1->v2 move must remain)
-//     -----------------
-//     v1->v2  F=0
-//     (use v2 s/v2/v1/)*
-//     set v1  F=1
-//     set v2  return success (caller can remove v1->v2 move)
-func copyprop(r *gc.Flow) bool {
-	p := r.Prog
-
-	canSub := false
-	switch p.As {
-	case s390x.AFMOVS, s390x.AFMOVD, s390x.AMOVD:
-		canSub = true
-	default:
-		for rr := gc.Uniqp(r); rr != nil; rr = gc.Uniqp(rr) {
-			if gc.Uniqs(rr) == nil {
-				break
-			}
-			switch copyu(rr.Prog, &p.From, nil) {
-			case _Read, _None:
-				continue
-			}
-			// write
-			if rr.Prog.As == p.As {
-				canSub = true
-			}
-			break
-		}
-	}
-	if !canSub {
-		return false
-	}
-	if copyas(&p.From, &p.To) {
-		return true
-	}
-
-	gactive++
-	return copy1(&p.From, &p.To, r.S1, 0)
-}
-
-// copy1 replaces uses of v2 with v1 starting at r and returns true if
-// all uses were rewritten.
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
-	if uint32(r.Active) == gactive {
-		return true
-	}
-	r.Active = int32(gactive)
-	for ; r != nil; r = r.S1 {
-		p := r.Prog
-		if f == 0 && gc.Uniqp(r) == nil {
-			// Multiple predecessors; conservatively
-			// assume v1 was set on other path
-			f = 1
-		}
-		t := copyu(p, v2, nil)
-		switch t {
-		case _ReadWriteSame:
-			return false
-		case _Write:
-			return true
-		case _Read, _ReadWriteDiff:
-			if f != 0 {
-				return false
-			}
-			if copyu(p, v2, v1) != 0 {
-				return false
-			}
-			if t == _ReadWriteDiff {
-				return true
-			}
-		}
-		if f == 0 {
-			switch copyu(p, v1, nil) {
-			case _ReadWriteSame, _ReadWriteDiff, _Write:
-				f = 1
-			}
-		}
-		if r.S2 != nil {
-			if !copy1(v1, v2, r.S2, f) {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-// If s==nil, copyu returns the set/use of v in p; otherwise, it
-// modifies p to replace reads of v with reads of s and returns 0 for
-// success or non-zero for failure.
-//
-// If s==nil, copy returns one of the following values:
-// 	_Read           if v only used
-//	_ReadWriteSame  if v is set and used in one address (read-alter-rewrite;
-// 	                can't substitute)
-//	_Write          if v is only set
-//	_ReadWriteDiff  if v is set in one address and used in another (so addresses
-// 	                can be rewritten independently)
-//	_None           otherwise (not touched)
-func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) usage {
-	if p.From3Type() != obj.TYPE_NONE && p.From3Type() != obj.TYPE_CONST {
-		// Currently we never generate a From3 with anything other than a constant in it.
-		fmt.Printf("copyu: From3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
-	}
-
-	switch p.As {
-	default:
-		fmt.Printf("copyu: can't find %v\n", p.As)
-		return _ReadWriteSame
-
-	case // read p.From, write p.To
-		s390x.AMOVH,
-		s390x.AMOVHZ,
-		s390x.AMOVB,
-		s390x.AMOVBZ,
-		s390x.AMOVW,
-		s390x.AMOVWZ,
-		s390x.AMOVD,
-		s390x.ANEG,
-		s390x.AADDME,
-		s390x.AADDZE,
-		s390x.ASUBME,
-		s390x.ASUBZE,
-		s390x.AFMOVS,
-		s390x.AFMOVD,
-		s390x.ALEDBR,
-		s390x.AFNEG,
-		s390x.ALDEBR,
-		s390x.ACLFEBR,
-		s390x.ACLGEBR,
-		s390x.ACLFDBR,
-		s390x.ACLGDBR,
-		s390x.ACFEBRA,
-		s390x.ACGEBRA,
-		s390x.ACFDBRA,
-		s390x.ACGDBRA,
-		s390x.ACELFBR,
-		s390x.ACELGBR,
-		s390x.ACDLFBR,
-		s390x.ACDLGBR,
-		s390x.ACEFBRA,
-		s390x.ACEGBRA,
-		s390x.ACDFBRA,
-		s390x.ACDGBRA,
-		s390x.AFSQRT:
-
-		if s != nil {
-			copysub(&p.From, v, s)
-
-			// Update only indirect uses of v in p.To
-			if !copyas(&p.To, v) {
-				copysub(&p.To, v, s)
-			}
-			return _None
-		}
-
-		if copyas(&p.To, v) {
-			// Fix up implicit from
-			if p.From.Type == obj.TYPE_NONE {
-				p.From = p.To
-			}
-			if copyau(&p.From, v) {
-				return _ReadWriteDiff
-			}
-			return _Write
-		}
-
-		if copyau(&p.From, v) {
-			return _Read
-		}
-		if copyau(&p.To, v) {
-			// p.To only indirectly uses v
-			return _Read
-		}
-
-		return _None
-
-	// read p.From, read p.Reg, write p.To
-	case s390x.AADD,
-		s390x.AADDC,
-		s390x.AADDE,
-		s390x.ASUB,
-		s390x.ASLW,
-		s390x.ASRW,
-		s390x.ASRAW,
-		s390x.ASLD,
-		s390x.ASRD,
-		s390x.ASRAD,
-		s390x.ARLL,
-		s390x.ARLLG,
-		s390x.AOR,
-		s390x.AORN,
-		s390x.AAND,
-		s390x.AANDN,
-		s390x.ANAND,
-		s390x.ANOR,
-		s390x.AXOR,
-		s390x.AMULLW,
-		s390x.AMULLD,
-		s390x.AMULHD,
-		s390x.AMULHDU,
-		s390x.ADIVW,
-		s390x.ADIVD,
-		s390x.ADIVWU,
-		s390x.ADIVDU,
-		s390x.AFADDS,
-		s390x.AFADD,
-		s390x.AFSUBS,
-		s390x.AFSUB,
-		s390x.AFMULS,
-		s390x.AFMUL,
-		s390x.AFDIVS,
-		s390x.AFDIV:
-		if s != nil {
-			copysub(&p.From, v, s)
-			copysub1(p, v, s)
-
-			// Update only indirect uses of v in p.To
-			if !copyas(&p.To, v) {
-				copysub(&p.To, v, s)
-			}
-		}
-
-		if copyas(&p.To, v) {
-			if p.Reg == 0 {
-				p.Reg = p.To.Reg
-			}
-			if copyau(&p.From, v) || copyau1(p, v) {
-				return _ReadWriteDiff
-			}
-			return _Write
-		}
-
-		if copyau(&p.From, v) {
-			return _Read
-		}
-		if copyau1(p, v) {
-			return _Read
-		}
-		if copyau(&p.To, v) {
-			return _Read
-		}
-		return _None
-
-	case s390x.ABEQ,
-		s390x.ABGT,
-		s390x.ABGE,
-		s390x.ABLT,
-		s390x.ABLE,
-		s390x.ABNE,
-		s390x.ABVC,
-		s390x.ABVS:
-		return _None
-
-	case obj.ACHECKNIL, // read p.From
-		s390x.ACMP, // read p.From, read p.To
-		s390x.ACMPU,
-		s390x.ACMPW,
-		s390x.ACMPWU,
-		s390x.AFCMPO,
-		s390x.AFCMPU,
-		s390x.ACEBR,
-		s390x.AMVC,
-		s390x.ACLC,
-		s390x.AXC,
-		s390x.AOC,
-		s390x.ANC:
-		if s != nil {
-			copysub(&p.From, v, s)
-			copysub(&p.To, v, s)
-			return _None
-		}
-
-		if copyau(&p.From, v) {
-			return _Read
-		}
-		if copyau(&p.To, v) {
-			return _Read
-		}
-		return _None
-
-	case s390x.ACMPBNE, s390x.ACMPBEQ,
-		s390x.ACMPBLT, s390x.ACMPBLE,
-		s390x.ACMPBGT, s390x.ACMPBGE,
-		s390x.ACMPUBNE, s390x.ACMPUBEQ,
-		s390x.ACMPUBLT, s390x.ACMPUBLE,
-		s390x.ACMPUBGT, s390x.ACMPUBGE:
-		if s != nil {
-			copysub(&p.From, v, s)
-			copysub1(p, v, s)
-			return _None
-		}
-		if copyau(&p.From, v) {
-			return _Read
-		}
-		if copyau1(p, v) {
-			return _Read
-		}
-		return _None
-
-	case s390x.ACLEAR:
-		if s != nil {
-			copysub(&p.To, v, s)
-			return _None
-		}
-		if copyau(&p.To, v) {
-			return _Read
-		}
-		return _None
-
-	// go never generates a branch to a GPR
-	// read p.To
-	case s390x.ABR:
-		if s != nil {
-			copysub(&p.To, v, s)
-			return _None
-		}
-
-		if copyau(&p.To, v) {
-			return _Read
-		}
-		return _None
-
-	case obj.ARET, obj.AUNDEF:
-		if s != nil {
-			return _None
-		}
-
-		// All registers die at this point, so claim
-		// everything is set (and not used).
-		return _Write
-
-	case s390x.ABL:
-		if v.Type == obj.TYPE_REG {
-			if s390x.REGARG != -1 && v.Reg == s390x.REGARG {
-				return _ReadWriteSame
-			}
-			if p.From.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
-				return _ReadWriteSame
-			}
-			if v.Reg == s390x.REGZERO {
-				// Deliberately inserted nops set R0.
-				return _ReadWriteSame
-			}
-			if v.Reg == s390x.REGCTXT {
-				// Context register for closures.
-				// TODO(mundaym): not sure if we need to exclude this.
-				return _ReadWriteSame
-			}
-		}
-		if s != nil {
-			copysub(&p.To, v, s)
-			return _None
-		}
-		if copyau(&p.To, v) {
-			return _ReadWriteDiff
-		}
-		return _Write
-
-	case obj.ATEXT:
-		if v.Type == obj.TYPE_REG {
-			if v.Reg == s390x.REGARG {
-				return _Write
-			}
-		}
-		return _None
-
-	case obj.APCDATA,
-		obj.AFUNCDATA,
-		obj.AVARDEF,
-		obj.AVARKILL,
-		obj.AVARLIVE,
-		obj.AUSEFIELD,
-		obj.ANOP:
-		return _None
-	}
-}
-
-// copyas returns 1 if a and v address the same register.
-//
-// If a is the from operand, this means this operation reads the
-// register in v.  If a is the to operand, this means this operation
-// writes the register in v.
-func copyas(a *obj.Addr, v *obj.Addr) bool {
-	if isReg(v) {
-		if a.Type == v.Type {
-			if a.Reg == v.Reg {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// copyau returns 1 if a either directly or indirectly addresses the
-// same register as v.
-//
-// If a is the from operand, this means this operation reads the
-// register in v.  If a is the to operand, this means the operation
-// either reads or writes the register in v (if !copyas(a, v), then
-// the operation reads the register in v).
-func copyau(a *obj.Addr, v *obj.Addr) bool {
-	if copyas(a, v) {
-		return true
-	}
-	if v.Type == obj.TYPE_REG {
-		if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
-			if v.Reg == a.Reg {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// copyau1 returns 1 if p.Reg references the same register as v and v
-// is a direct reference.
-func copyau1(p *obj.Prog, v *obj.Addr) bool {
-	if isReg(v) && v.Reg != 0 {
-		if p.Reg == v.Reg {
-			return true
-		}
-	}
-	return false
-}
-
-// copysub replaces v.Reg with s.Reg if a.Reg and v.Reg are direct
-// references to the same register.
-func copysub(a, v, s *obj.Addr) {
-	if copyau(a, v) {
-		a.Reg = s.Reg
-	}
-}
-
-// copysub1 replaces p.Reg with s.Reg if p.Reg and v.Reg are direct
-// references to the same register.
-func copysub1(p *obj.Prog, v, s *obj.Addr) {
-	if copyau1(p, v) {
-		p.Reg = s.Reg
-	}
-}
-
-func sameaddr(a *obj.Addr, v *obj.Addr) bool {
-	if a.Type != v.Type {
-		return false
-	}
-	if isReg(v) && a.Reg == v.Reg {
-		return true
-	}
-	if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
-		// TODO(mundaym): is the offset enough here? Node?
-		if v.Offset == a.Offset {
-			return true
-		}
-	}
-	return false
-}
-
-func smallindir(a *obj.Addr, reg *obj.Addr) bool {
-	return reg.Type == obj.TYPE_REG &&
-		a.Type == obj.TYPE_MEM &&
-		a.Reg == reg.Reg &&
-		0 <= a.Offset && a.Offset < 4096
-}
-
-func stackaddr(a *obj.Addr) bool {
-	// TODO(mundaym): the name implies this should check
-	// for TYPE_ADDR with a base register REGSP.
-	return a.Type == obj.TYPE_REG && a.Reg == s390x.REGSP
-}
-
-// isMove returns true if p is a move. Moves may imply
-// sign/zero extension.
-func isMove(p *obj.Prog) bool {
-	switch p.As {
-	case s390x.AMOVD,
-		s390x.AMOVW, s390x.AMOVWZ,
-		s390x.AMOVH, s390x.AMOVHZ,
-		s390x.AMOVB, s390x.AMOVBZ,
-		s390x.AFMOVD, s390x.AFMOVS:
-		return true
-	}
-	return false
-}
-
-// isLoad returns true if p is a move from memory to a register.
-func isLoad(p *obj.Prog) bool {
-	if !isMove(p) {
-		return false
-	}
-	if !(isGPR(&p.To) || isFPR(&p.To)) {
-		return false
-	}
-	if p.From.Type != obj.TYPE_MEM {
-		return false
-	}
-	return true
-}
-
-// isStore returns true if p is a move from a register to memory.
-func isStore(p *obj.Prog) bool {
-	if !isMove(p) {
-		return false
-	}
-	if !(isGPR(&p.From) || isFPR(&p.From) || isConst(&p.From)) {
-		return false
-	}
-	if p.To.Type != obj.TYPE_MEM {
-		return false
-	}
-	return true
-}
-
-// sameStackMem returns true if a and b are both memory operands
-// and address the same location which must reside on the stack.
-func sameStackMem(a, b *obj.Addr) bool {
-	if a.Type != obj.TYPE_MEM ||
-		b.Type != obj.TYPE_MEM ||
-		a.Name != b.Name ||
-		a.Sym != b.Sym ||
-		a.Node != b.Node ||
-		a.Reg != b.Reg ||
-		a.Index != b.Index ||
-		a.Offset != b.Offset {
-		return false
-	}
-	switch a.Name {
-	case obj.NAME_NONE:
-		return a.Reg == s390x.REGSP
-	case obj.NAME_PARAM, obj.NAME_AUTO:
-		// params and autos are always on the stack
-		return true
-	}
-	return false
-}
-
-// removeLoadHitStores trys to remove loads that take place
-// immediately after a store to the same location. Returns
-// true if load-hit-stores were removed.
-//
-// For example:
-// 	MOVD	R1, 0(R15)
-// 	MOVD	0(R15), R2
-// Would become:
-// 	MOVD	R1, 0(R15)
-// 	MOVD	R1, R2
-func removeLoadHitStores(r *gc.Flow) int {
-	n := 0
-	for ; r != nil; r = r.Link {
-		p := r.Prog
-		if !isStore(p) {
-			continue
-		}
-		for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) {
-			pp := rr.Prog
-			if gc.Uniqp(rr) == nil {
-				break
-			}
-			if pp.As == obj.ANOP {
-				continue
-			}
-			if isLoad(pp) && sameStackMem(&p.To, &pp.From) {
-				if size(p.As) >= size(pp.As) && isGPR(&p.From) == isGPR(&pp.To) {
-					pp.From = p.From
-				}
-			}
-			if !isMove(pp) || isStore(pp) {
-				break
-			}
-			if copyau(&p.From, &pp.To) {
-				break
-			}
-		}
-	}
-	return n
-}
-
-// size returns the width of the given move.
-func size(as obj.As) int {
-	switch as {
-	case s390x.AMOVD, s390x.AFMOVD:
-		return 8
-	case s390x.AMOVW, s390x.AMOVWZ, s390x.AFMOVS:
-		return 4
-	case s390x.AMOVH, s390x.AMOVHZ:
-		return 2
-	case s390x.AMOVB, s390x.AMOVBZ:
-		return 1
-	}
-	return -1
-}
-
-// castPropagation tries to eliminate unecessary casts.
-//
-// For example:
-// 	MOVHZ	R1, R2     // uint16
-//	MOVB	R2, 0(R15) // int8
-// Can be simplified to:
-//	MOVB	R1, 0(R15)
-func castPropagation(r *gc.Flow) int {
-	n := 0
-	for ; r != nil; r = r.Link {
-		p := r.Prog
-		if !isMove(p) || !isGPR(&p.To) {
-			continue
-		}
-
-		// r is a move with a destination register
-		var move *gc.Flow
-		for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) {
-			if gc.Uniqp(rr) == nil {
-				// branch target: leave alone
-				break
-			}
-			pp := rr.Prog
-			if isMove(pp) && copyas(&pp.From, &p.To) {
-				if pp.To.Type == obj.TYPE_MEM {
-					if p.From.Type == obj.TYPE_MEM ||
-						p.From.Type == obj.TYPE_ADDR {
-						break
-					}
-					if p.From.Type == obj.TYPE_CONST &&
-						int64(int16(p.From.Offset)) != p.From.Offset {
-						break
-					}
-				}
-				move = rr
-				break
-			}
-			if pp.As == obj.ANOP {
-				continue
-			}
-			break
-		}
-		if move == nil {
-			continue
-		}
-
-		// we have a move that reads from our destination reg, check if any future
-		// instructions also read from the reg
-		mp := move.Prog
-		if !copyas(&mp.From, &mp.To) {
-			safe := false
-			for rr := gc.Uniqs(move); rr != nil; rr = gc.Uniqs(rr) {
-				if gc.Uniqp(rr) == nil {
-					break
-				}
-				switch copyu(rr.Prog, &p.To, nil) {
-				case _None:
-					continue
-				case _Write:
-					safe = true
-				}
-				break
-			}
-			if !safe {
-				continue
-			}
-		}
-
-		// at this point we have something like:
-		// MOV* const/mem/reg, reg
-		// MOV* reg, reg/mem
-		// now check if this is a cast that cannot be forward propagated
-		execute := false
-		if p.As == mp.As || isZero(&p.From) || size(p.As) == size(mp.As) {
-			execute = true
-		} else if isGPR(&p.From) && size(p.As) >= size(mp.As) {
-			execute = true
-		}
-
-		if execute {
-			mp.From = p.From
-			excise(r)
-			n++
-		}
-	}
-	return n
-}
-
-// fuseClear merges memory clear operations.
-//
-// Looks for this pattern (sequence of clears):
-// 	MOVD	R0, n(R15)
-// 	MOVD	R0, n+8(R15)
-// 	MOVD	R0, n+16(R15)
-// Replaces with:
-//	CLEAR	$24, n(R15)
-func fuseClear(r *gc.Flow) int {
-	n := 0
-	var align int64
-	var clear *obj.Prog
-	for ; r != nil; r = r.Link {
-		// If there is a branch into the instruction stream then
-		// we can't fuse into previous instructions.
-		if gc.Uniqp(r) == nil {
-			clear = nil
-		}
-
-		p := r.Prog
-		if p.As == obj.ANOP {
-			continue
-		}
-		if p.As == s390x.AXC {
-			if p.From.Reg == p.To.Reg && p.From.Offset == p.To.Offset {
-				// TODO(mundaym): merge clears?
-				p.As = s390x.ACLEAR
-				p.From.Offset = p.From3.Offset
-				p.From3 = nil
-				p.From.Type = obj.TYPE_CONST
-				p.From.Reg = 0
-				clear = p
-			} else {
-				clear = nil
-			}
-			continue
-		}
-
-		// Is our source a constant zero?
-		if !isZero(&p.From) {
-			clear = nil
-			continue
-		}
-
-		// Are we moving to memory?
-		if p.To.Type != obj.TYPE_MEM ||
-			p.To.Index != 0 ||
-			p.To.Offset >= 4096 ||
-			!(p.To.Name == obj.NAME_NONE || p.To.Name == obj.NAME_AUTO || p.To.Name == obj.NAME_PARAM) {
-			clear = nil
-			continue
-		}
-
-		size := int64(0)
-		switch p.As {
-		default:
-			clear = nil
-			continue
-		case s390x.AMOVB, s390x.AMOVBZ:
-			size = 1
-		case s390x.AMOVH, s390x.AMOVHZ:
-			size = 2
-		case s390x.AMOVW, s390x.AMOVWZ:
-			size = 4
-		case s390x.AMOVD:
-			size = 8
-		}
-
-		// doubleword aligned clears should be kept doubleword
-		// aligned
-		if (size == 8 && align != 8) || (size != 8 && align == 8) {
-			clear = nil
-		}
-
-		if clear != nil &&
-			clear.To.Reg == p.To.Reg &&
-			clear.To.Name == p.To.Name &&
-			clear.To.Node == p.To.Node &&
-			clear.To.Sym == p.To.Sym {
-
-			min := clear.To.Offset
-			max := clear.To.Offset + clear.From.Offset
-
-			// previous clear is already clearing this region
-			if min <= p.To.Offset && max >= p.To.Offset+size {
-				excise(r)
-				n++
-				continue
-			}
-
-			// merge forwards
-			if max == p.To.Offset {
-				clear.From.Offset += size
-				excise(r)
-				n++
-				continue
-			}
-
-			// merge backwards
-			if min-size == p.To.Offset {
-				clear.From.Offset += size
-				clear.To.Offset -= size
-				excise(r)
-				n++
-				continue
-			}
-		}
-
-		// transform into clear
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = size
-		p.From.Reg = 0
-		p.As = s390x.ACLEAR
-		clear = p
-		align = size
-	}
-	return n
-}
-
-// fuseMultiple merges memory loads and stores into load multiple and
-// store multiple operations.
-//
-// Looks for this pattern (sequence of loads or stores):
-// 	MOVD	R1, 0(R15)
-//	MOVD	R2, 8(R15)
-//	MOVD	R3, 16(R15)
-// Replaces with:
-//	STMG	R1, R3, 0(R15)
-func fuseMultiple(r *gc.Flow) int {
-	n := 0
-	var fused *obj.Prog
-	for ; r != nil; r = r.Link {
-		// If there is a branch into the instruction stream then
-		// we can't fuse into previous instructions.
-		if gc.Uniqp(r) == nil {
-			fused = nil
-		}
-
-		p := r.Prog
-
-		isStore := isGPR(&p.From) && isBDMem(&p.To)
-		isLoad := isGPR(&p.To) && isBDMem(&p.From)
-
-		// are we a candidate?
-		size := int64(0)
-		switch p.As {
-		default:
-			fused = nil
-			continue
-		case obj.ANOP:
-			// skip over nops
-			continue
-		case s390x.AMOVW, s390x.AMOVWZ:
-			size = 4
-			// TODO(mundaym): 32-bit load multiple is currently not supported
-			// as it requires sign/zero extension.
-			if !isStore {
-				fused = nil
-				continue
-			}
-		case s390x.AMOVD:
-			size = 8
-			if !isLoad && !isStore {
-				fused = nil
-				continue
-			}
-		}
-
-		// If we merge two loads/stores with different source/destination Nodes
-		// then we will lose a reference the second Node which means that the
-		// compiler might mark the Node as unused and free its slot on the stack.
-		// TODO(mundaym): allow this by adding a dummy reference to the Node.
-		if fused == nil ||
-			fused.From.Node != p.From.Node ||
-			fused.From.Type != p.From.Type ||
-			fused.To.Node != p.To.Node ||
-			fused.To.Type != p.To.Type {
-			fused = p
-			continue
-		}
-
-		// check two addresses
-		ca := func(a, b *obj.Addr, offset int64) bool {
-			return a.Reg == b.Reg && a.Offset+offset == b.Offset &&
-				a.Sym == b.Sym && a.Name == b.Name
-		}
-
-		switch fused.As {
-		default:
-			fused = p
-		case s390x.AMOVW, s390x.AMOVWZ:
-			if size == 4 && fused.From.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, 4) {
-				fused.As = s390x.ASTMY
-				fused.Reg = p.From.Reg
-				excise(r)
-				n++
-			} else {
-				fused = p
-			}
-		case s390x.AMOVD:
-			if size == 8 && fused.From.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, 8) {
-				fused.As = s390x.ASTMG
-				fused.Reg = p.From.Reg
-				excise(r)
-				n++
-			} else if size == 8 && fused.To.Reg+1 == p.To.Reg && ca(&fused.From, &p.From, 8) {
-				fused.As = s390x.ALMG
-				fused.Reg = fused.To.Reg
-				fused.To.Reg = p.To.Reg
-				excise(r)
-				n++
-			} else {
-				fused = p
-			}
-		case s390x.ASTMG, s390x.ASTMY:
-			if (fused.As == s390x.ASTMY && size != 4) ||
-				(fused.As == s390x.ASTMG && size != 8) {
-				fused = p
-				continue
-			}
-			offset := size * int64(fused.Reg-fused.From.Reg+1)
-			if fused.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, offset) {
-				fused.Reg = p.From.Reg
-				excise(r)
-				n++
-			} else {
-				fused = p
-			}
-		case s390x.ALMG:
-			offset := 8 * int64(fused.To.Reg-fused.Reg+1)
-			if size == 8 && fused.To.Reg+1 == p.To.Reg && ca(&fused.From, &p.From, offset) {
-				fused.To.Reg = p.To.Reg
-				excise(r)
-				n++
-			} else {
-				fused = p
-			}
-		}
-	}
-	return n
-}
-
-// simplifyOps looks for side-effect free ops that can be removed or
-// replaced with moves.
-//
-// For example:
-// 	XOR $0, R1 => NOP
-//	ADD $0, R1, R2 => MOVD R1, R2
-func simplifyOps(r *gc.Flow) int {
-	n := 0
-	for ; r != nil; r = r.Link {
-		p := r.Prog
-
-		// if the target is R0 then this is a required NOP
-		if isGPR(&p.To) && p.To.Reg == s390x.REGZERO {
-			continue
-		}
-
-		switch p.As {
-		case s390x.AADD, s390x.ASUB,
-			s390x.AOR, s390x.AXOR,
-			s390x.ASLW, s390x.ASRW, s390x.ASRAW,
-			s390x.ASLD, s390x.ASRD, s390x.ASRAD,
-			s390x.ARLL, s390x.ARLLG:
-			if isZero(&p.From) && isGPR(&p.To) {
-				if p.Reg == 0 || p.Reg == p.To.Reg {
-					excise(r)
-					n++
-				} else {
-					p.As = s390x.AMOVD
-					p.From.Type = obj.TYPE_REG
-					p.From.Reg = p.Reg
-					p.Reg = 0
-				}
-			}
-		case s390x.AMULLW, s390x.AAND:
-			if isZero(&p.From) && isGPR(&p.To) {
-				p.As = s390x.AMOVD
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = s390x.REGZERO
-				p.Reg = 0
-			}
-		}
-	}
-	return n
-}
-
-// fuseOpMoves looks for moves following 2-operand operations and trys to merge them into
-// a 3-operand operation.
-//
-// For example:
-//	ADD R1, R2
-//	MOVD R2, R3
-// might become
-//	ADD R1, R2, R3
-func fuseOpMoves(r *gc.Flow) int {
-	n := 0
-	for ; r != nil; r = r.Link {
-		p := r.Prog
-		switch p.As {
-		case s390x.AADD:
-		case s390x.ASUB:
-			if isConst(&p.From) && int64(int16(p.From.Offset)) != p.From.Offset {
-				continue
-			}
-		case s390x.ASLW,
-			s390x.ASRW,
-			s390x.ASRAW,
-			s390x.ASLD,
-			s390x.ASRD,
-			s390x.ASRAD,
-			s390x.ARLL,
-			s390x.ARLLG:
-			// ok - p.From will be a reg or a constant
-		case s390x.AOR,
-			s390x.AORN,
-			s390x.AAND,
-			s390x.AANDN,
-			s390x.ANAND,
-			s390x.ANOR,
-			s390x.AXOR,
-			s390x.AMULLW,
-			s390x.AMULLD:
-			if isConst(&p.From) {
-				// these instructions can either use 3 register form
-				// or have an immediate but not both
-				continue
-			}
-		default:
-			continue
-		}
-
-		if p.Reg != 0 && p.Reg != p.To.Reg {
-			continue
-		}
-
-		var move *gc.Flow
-		rr := gc.Uniqs(r)
-		for {
-			if rr == nil || gc.Uniqp(rr) == nil || rr == r {
-				break
-			}
-			pp := rr.Prog
-			switch copyu(pp, &p.To, nil) {
-			case _None:
-				rr = gc.Uniqs(rr)
-				continue
-			case _Read:
-				if move == nil && pp.As == s390x.AMOVD && isGPR(&pp.From) && isGPR(&pp.To) {
-					move = rr
-					rr = gc.Uniqs(rr)
-					continue
-				}
-			case _Write:
-				if move == nil {
-					// dead code
-					excise(r)
-					n++
-				} else {
-					for prev := gc.Uniqp(move); prev != r; prev = gc.Uniqp(prev) {
-						if copyu(prev.Prog, &move.Prog.To, nil) != 0 {
-							move = nil
-							break
-						}
-					}
-					if move == nil {
-						break
-					}
-					p.Reg, p.To.Reg = p.To.Reg, move.Prog.To.Reg
-					excise(move)
-					n++
-
-					// clean up
-					if p.From.Reg == p.To.Reg && isCommutative(p.As) {
-						p.From.Reg, p.Reg = p.Reg, 0
-					}
-					if p.To.Reg == p.Reg {
-						p.Reg = 0
-					}
-					// we could try again if p has become a 2-operand op
-					// but in testing nothing extra was extracted
-				}
-			}
-			break
-		}
-	}
-	return n
-}
-
-// isCommutative returns true if the order of input operands
-// does not affect the result. For example:
-//	x + y == y + x so ADD is commutative
-//	x ^ y == y ^ x so XOR is commutative
-func isCommutative(as obj.As) bool {
-	switch as {
-	case s390x.AADD,
-		s390x.AOR,
-		s390x.AAND,
-		s390x.AXOR,
-		s390x.AMULLW,
-		s390x.AMULLD:
-		return true
-	}
-	return false
-}
-
-// applyCast applies the cast implied by the given move
-// instruction to v and returns the result.
-func applyCast(cast obj.As, v int64) int64 {
-	switch cast {
-	case s390x.AMOVWZ:
-		return int64(uint32(v))
-	case s390x.AMOVHZ:
-		return int64(uint16(v))
-	case s390x.AMOVBZ:
-		return int64(uint8(v))
-	case s390x.AMOVW:
-		return int64(int32(v))
-	case s390x.AMOVH:
-		return int64(int16(v))
-	case s390x.AMOVB:
-		return int64(int8(v))
-	}
-	return v
-}
-
-// constantPropagation removes redundant constant copies.
-func constantPropagation(r *gc.Flow) int {
-	n := 0
-	// find MOV $con,R followed by
-	// another MOV $con,R without
-	// setting R in the interim
-	for ; r != nil; r = r.Link {
-		p := r.Prog
-		if isMove(p) {
-			if !isReg(&p.To) {
-				continue
-			}
-			if !isConst(&p.From) {
-				continue
-			}
-		} else {
-			continue
-		}
-
-		rr := r
-		for {
-			rr = gc.Uniqs(rr)
-			if rr == nil || rr == r {
-				break
-			}
-			if gc.Uniqp(rr) == nil {
-				break
-			}
-
-			pp := rr.Prog
-			t := copyu(pp, &p.To, nil)
-			switch t {
-			case _None:
-				continue
-			case _Read:
-				if !isGPR(&pp.From) || !isMove(pp) {
-					continue
-				}
-				if p.From.Type == obj.TYPE_CONST {
-					v := applyCast(p.As, p.From.Offset)
-					if isGPR(&pp.To) {
-						if int64(int32(v)) == v || ((v>>32)<<32) == v {
-							pp.From.Reg = 0
-							pp.From.Offset = v
-							pp.From.Type = obj.TYPE_CONST
-							n++
-						}
-					} else if int64(int16(v)) == v {
-						pp.From.Reg = 0
-						pp.From.Offset = v
-						pp.From.Type = obj.TYPE_CONST
-						n++
-					}
-				}
-				continue
-			case _Write:
-				if p.As != pp.As || p.From.Type != pp.From.Type {
-					break
-				}
-				if p.From.Type == obj.TYPE_CONST && p.From.Offset == pp.From.Offset {
-					excise(rr)
-					n++
-					continue
-				} else if p.From.Type == obj.TYPE_FCONST {
-					if p.From.Val.(float64) == pp.From.Val.(float64) {
-						excise(rr)
-						n++
-						continue
-					}
-				}
-			}
-			break
-		}
-	}
-	return n
-}
-
-// copyPropagation tries to eliminate register-to-register moves.
-func copyPropagation(r *gc.Flow) int {
-	n := 0
-	for ; r != nil; r = r.Link {
-		p := r.Prog
-		if isMove(p) && isReg(&p.To) {
-			// Convert uses to $0 to uses of R0 and
-			// propagate R0
-			if isGPR(&p.To) && isZero(&p.From) {
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = s390x.REGZERO
-			}
-
-			// Try to eliminate reg->reg moves
-			if isGPR(&p.From) || isFPR(&p.From) {
-				if copyprop(r) || (subprop(r) && copyprop(r)) {
-					excise(r)
-					n++
-				}
-			}
-		}
-	}
-	return n
-}
-
-// loadPipelining pushes any load from memory as early as possible.
-func loadPipelining(r *gc.Flow) int {
-	for ; r != nil; r = r.Link {
-		p := r.Prog
-		if isLoad(p) {
-			pushback(r)
-		}
-	}
-	return 0
-}
-
-// fuseCompareBranch finds comparisons followed by a branch and converts
-// them into a compare-and-branch instruction (which avoid setting the
-// condition code).
-func fuseCompareBranch(r *gc.Flow) int {
-	n := 0
-	for ; r != nil; r = r.Link {
-		p := r.Prog
-		r1 := gc.Uniqs(r)
-		if r1 == nil {
-			continue
-		}
-		p1 := r1.Prog
-
-		var ins obj.As
-		switch p.As {
-		case s390x.ACMP:
-			switch p1.As {
-			case s390x.ABCL, s390x.ABC:
-				continue
-			case s390x.ABEQ:
-				ins = s390x.ACMPBEQ
-			case s390x.ABGE:
-				ins = s390x.ACMPBGE
-			case s390x.ABGT:
-				ins = s390x.ACMPBGT
-			case s390x.ABLE:
-				ins = s390x.ACMPBLE
-			case s390x.ABLT:
-				ins = s390x.ACMPBLT
-			case s390x.ABNE:
-				ins = s390x.ACMPBNE
-			default:
-				continue
-			}
-
-		case s390x.ACMPU:
-			switch p1.As {
-			case s390x.ABCL, s390x.ABC:
-				continue
-			case s390x.ABEQ:
-				ins = s390x.ACMPUBEQ
-			case s390x.ABGE:
-				ins = s390x.ACMPUBGE
-			case s390x.ABGT:
-				ins = s390x.ACMPUBGT
-			case s390x.ABLE:
-				ins = s390x.ACMPUBLE
-			case s390x.ABLT:
-				ins = s390x.ACMPUBLT
-			case s390x.ABNE:
-				ins = s390x.ACMPUBNE
-			default:
-				continue
-			}
-
-		case s390x.ACMPW, s390x.ACMPWU:
-			continue
-
-		default:
-			continue
-		}
-
-		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-			fmt.Printf("cnb %v; %v  ", p, p1)
-		}
-
-		if p1.To.Sym != nil {
-			continue
-		}
-
-		if p.To.Type == obj.TYPE_REG {
-			p1.As = ins
-			p1.From = p.From
-			p1.Reg = p.To.Reg
-			p1.From3 = nil
-		} else if p.To.Type == obj.TYPE_CONST {
-			switch p.As {
-			case s390x.ACMP, s390x.ACMPW:
-				if (p.To.Offset < -(1 << 7)) || (p.To.Offset >= ((1 << 7) - 1)) {
-					continue
-				}
-			case s390x.ACMPU, s390x.ACMPWU:
-				if p.To.Offset >= (1 << 8) {
-					continue
-				}
-			default:
-			}
-			p1.As = ins
-			p1.From = p.From
-			p1.Reg = 0
-			p1.From3 = new(obj.Addr)
-			*(p1.From3) = p.To
-		} else {
-			continue
-		}
-
-		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-			fmt.Printf("%v\n", p1)
-		}
-		excise(r)
-		n++
-	}
-	return n
-}
-
-// deadCodeElimination removes writes to registers which are written
-// to again before they are next read.
-func deadCodeElimination(r *gc.Flow) int {
-	n := 0
-	for ; r != nil; r = r.Link {
-		p := r.Prog
-		// Currently there are no instructions which write to multiple
-		// registers in copyu. This check will need to change if there
-		// ever are.
-		if !(isGPR(&p.To) || isFPR(&p.To)) || copyu(p, &p.To, nil) != _Write {
-			continue
-		}
-		for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) {
-			t := copyu(rr.Prog, &p.To, nil)
-			if t == _None {
-				continue
-			}
-			if t == _Write {
-				excise(r)
-				n++
-			}
-			break
-		}
-	}
-	return n
-}
diff --git a/src/cmd/compile/internal/s390x/reg.go b/src/cmd/compile/internal/s390x/reg.go
index b42314d..835abc1 100644
--- a/src/cmd/compile/internal/s390x/reg.go
+++ b/src/cmd/compile/internal/s390x/reg.go
@@ -31,65 +31,6 @@
 package s390x
 
 import "cmd/internal/obj/s390x"
-import "cmd/compile/internal/gc"
-
-const (
-	NREGVAR = 32 /* 16 general + 16 floating */
-)
-
-var regname = []string{
-	".R0",
-	".R1",
-	".R2",
-	".R3",
-	".R4",
-	".R5",
-	".R6",
-	".R7",
-	".R8",
-	".R9",
-	".R10",
-	".R11",
-	".R12",
-	".R13",
-	".R14",
-	".R15",
-	".F0",
-	".F1",
-	".F2",
-	".F3",
-	".F4",
-	".F5",
-	".F6",
-	".F7",
-	".F8",
-	".F9",
-	".F10",
-	".F11",
-	".F12",
-	".F13",
-	".F14",
-	".F15",
-}
-
-func regnames(n *int) []string {
-	*n = NREGVAR
-	return regname
-}
-
-func excludedregs() uint64 {
-	// Exclude registers with fixed functions
-	return RtoB(s390x.REG_R0) |
-		RtoB(s390x.REGSP) |
-		RtoB(s390x.REGG) |
-		RtoB(s390x.REGTMP) |
-		RtoB(s390x.REGTMP2) |
-		RtoB(s390x.REG_LR)
-}
-
-func doregbits(r int) uint64 {
-	return 0
-}
 
 /*
  * track register variables including external registers:
@@ -111,20 +52,3 @@
 	}
 	return 0
 }
-
-func BtoR(b uint64) int {
-	b &= 0xffff
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + s390x.REG_R0
-}
-
-func BtoF(b uint64) int {
-	b >>= 16
-	b &= 0xffff
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + s390x.REG_F0
-}
diff --git a/src/cmd/compile/internal/x86/cgen.go b/src/cmd/compile/internal/x86/cgen.go
deleted file mode 100644
index 90a773d..0000000
--- a/src/cmd/compile/internal/x86/cgen.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x86
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/x86"
-)
-
-/*
- * generate an addressable node in res, containing the value of n.
- * n is an array index, and might be any size; res width is <= 32-bit.
- * returns Prog* to patch to panic call.
- */
-func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
-	if !gc.Is64(n.Type) {
-		if n.Addable && (gc.Simtype[n.Etype] == gc.TUINT32 || gc.Simtype[n.Etype] == gc.TINT32) {
-			// nothing to do.
-			*res = *n
-		} else {
-			gc.Tempname(res, gc.Types[gc.TUINT32])
-			gc.Cgen(n, res)
-		}
-
-		return nil
-	}
-
-	var tmp gc.Node
-	gc.Tempname(&tmp, gc.Types[gc.TINT64])
-	gc.Cgen(n, &tmp)
-	var lo gc.Node
-	var hi gc.Node
-	split64(&tmp, &lo, &hi)
-	gc.Tempname(res, gc.Types[gc.TUINT32])
-	gmove(&lo, res)
-	if bounded {
-		splitclean()
-		return nil
-	}
-
-	var zero gc.Node
-	gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
-	gins(x86.ACMPL, &hi, &zero)
-	splitclean()
-	return gc.Gbranch(x86.AJNE, nil, +1)
-}
-
-func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
-	var dst gc.Node
-	gc.Nodreg(&dst, gc.Types[gc.Tptr], x86.REG_DI)
-	var src gc.Node
-	gc.Nodreg(&src, gc.Types[gc.Tptr], x86.REG_SI)
-
-	var tsrc gc.Node
-	gc.Tempname(&tsrc, gc.Types[gc.Tptr])
-	var tdst gc.Node
-	gc.Tempname(&tdst, gc.Types[gc.Tptr])
-	if !n.Addable {
-		gc.Agen(n, &tsrc)
-	}
-	if !res.Addable {
-		gc.Agen(res, &tdst)
-	}
-	if n.Addable {
-		gc.Agen(n, &src)
-	} else {
-		gmove(&tsrc, &src)
-	}
-
-	if res.Op == gc.ONAME {
-		gc.Gvardef(res)
-	}
-
-	if res.Addable {
-		gc.Agen(res, &dst)
-	} else {
-		gmove(&tdst, &dst)
-	}
-
-	c := int32(w % 4) // bytes
-	q := int32(w / 4) // doublewords
-
-	// if we are copying forward on the stack and
-	// the src and dst overlap, then reverse direction
-	if osrc < odst && odst < osrc+w {
-		// reverse direction
-		gins(x86.ASTD, nil, nil) // set direction flag
-		if c > 0 {
-			gconreg(x86.AADDL, w-1, x86.REG_SI)
-			gconreg(x86.AADDL, w-1, x86.REG_DI)
-
-			gconreg(x86.AMOVL, int64(c), x86.REG_CX)
-			gins(x86.AREP, nil, nil)   // repeat
-			gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
-		}
-
-		if q > 0 {
-			if c > 0 {
-				gconreg(x86.AADDL, -3, x86.REG_SI)
-				gconreg(x86.AADDL, -3, x86.REG_DI)
-			} else {
-				gconreg(x86.AADDL, w-4, x86.REG_SI)
-				gconreg(x86.AADDL, w-4, x86.REG_DI)
-			}
-
-			gconreg(x86.AMOVL, int64(q), x86.REG_CX)
-			gins(x86.AREP, nil, nil)   // repeat
-			gins(x86.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
-		}
-
-		// we leave with the flag clear
-		gins(x86.ACLD, nil, nil)
-	} else {
-		gins(x86.ACLD, nil, nil) // paranoia.  TODO(rsc): remove?
-
-		// normal direction
-		if q > 128 || (q >= 4 && gc.Nacl) {
-			gconreg(x86.AMOVL, int64(q), x86.REG_CX)
-			gins(x86.AREP, nil, nil)   // repeat
-			gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
-		} else if q >= 4 {
-			p := gins(obj.ADUFFCOPY, nil, nil)
-			p.To.Type = obj.TYPE_ADDR
-			p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
-
-			// 10 and 128 = magic constants: see ../../runtime/asm_386.s
-			p.To.Offset = 10 * (128 - int64(q))
-		} else if !gc.Nacl && c == 0 {
-			var cx gc.Node
-			gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
-
-			// We don't need the MOVSL side-effect of updating SI and DI,
-			// and issuing a sequence of MOVLs directly is faster.
-			src.Op = gc.OINDREG
-
-			dst.Op = gc.OINDREG
-			for q > 0 {
-				gmove(&src, &cx) // MOVL x+(SI),CX
-				gmove(&cx, &dst) // MOVL CX,x+(DI)
-				src.Xoffset += 4
-				dst.Xoffset += 4
-				q--
-			}
-		} else {
-			for q > 0 {
-				gins(x86.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
-				q--
-			}
-		}
-
-		for c > 0 {
-			gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
-			c--
-		}
-	}
-}
diff --git a/src/cmd/compile/internal/x86/cgen64.go b/src/cmd/compile/internal/x86/cgen64.go
deleted file mode 100644
index ea52d69..0000000
--- a/src/cmd/compile/internal/x86/cgen64.go
+++ /dev/null
@@ -1,598 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x86
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/x86"
-)
-
-/*
- * attempt to generate 64-bit
- *	res = n
- * return 1 on success, 0 if op not handled.
- */
-func cgen64(n *gc.Node, res *gc.Node) {
-	if res.Op != gc.OINDREG && res.Op != gc.ONAME {
-		gc.Dump("n", n)
-		gc.Dump("res", res)
-		gc.Fatalf("cgen64 %v of %v", n.Op, res.Op)
-	}
-
-	switch n.Op {
-	default:
-		gc.Fatalf("cgen64 %v", n.Op)
-
-	case gc.OMINUS:
-		gc.Cgen(n.Left, res)
-		var hi1 gc.Node
-		var lo1 gc.Node
-		split64(res, &lo1, &hi1)
-		gins(x86.ANEGL, nil, &lo1)
-		gins(x86.AADCL, ncon(0), &hi1)
-		gins(x86.ANEGL, nil, &hi1)
-		splitclean()
-		return
-
-	case gc.OCOM:
-		gc.Cgen(n.Left, res)
-		var lo1 gc.Node
-		var hi1 gc.Node
-		split64(res, &lo1, &hi1)
-		gins(x86.ANOTL, nil, &lo1)
-		gins(x86.ANOTL, nil, &hi1)
-		splitclean()
-		return
-
-		// binary operators.
-	// common setup below.
-	case gc.OADD,
-		gc.OSUB,
-		gc.OMUL,
-		gc.OLROT,
-		gc.OLSH,
-		gc.ORSH,
-		gc.OAND,
-		gc.OOR,
-		gc.OXOR:
-		break
-	}
-
-	l := n.Left
-	r := n.Right
-	if !l.Addable {
-		var t1 gc.Node
-		gc.Tempname(&t1, l.Type)
-		gc.Cgen(l, &t1)
-		l = &t1
-	}
-
-	if r != nil && !r.Addable {
-		var t2 gc.Node
-		gc.Tempname(&t2, r.Type)
-		gc.Cgen(r, &t2)
-		r = &t2
-	}
-
-	var ax gc.Node
-	gc.Nodreg(&ax, gc.Types[gc.TINT32], x86.REG_AX)
-	var cx gc.Node
-	gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
-	var dx gc.Node
-	gc.Nodreg(&dx, gc.Types[gc.TINT32], x86.REG_DX)
-
-	// Setup for binary operation.
-	var hi1 gc.Node
-	var lo1 gc.Node
-	split64(l, &lo1, &hi1)
-
-	var lo2 gc.Node
-	var hi2 gc.Node
-	if gc.Is64(r.Type) {
-		split64(r, &lo2, &hi2)
-	}
-
-	// Do op. Leave result in DX:AX.
-	switch n.Op {
-	// TODO: Constants
-	case gc.OADD:
-		gins(x86.AMOVL, &lo1, &ax)
-
-		gins(x86.AMOVL, &hi1, &dx)
-		gins(x86.AADDL, &lo2, &ax)
-		gins(x86.AADCL, &hi2, &dx)
-
-		// TODO: Constants.
-	case gc.OSUB:
-		gins(x86.AMOVL, &lo1, &ax)
-
-		gins(x86.AMOVL, &hi1, &dx)
-		gins(x86.ASUBL, &lo2, &ax)
-		gins(x86.ASBBL, &hi2, &dx)
-
-	case gc.OMUL:
-		// let's call the next three EX, FX and GX
-		var ex, fx, gx gc.Node
-		gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil)
-		gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil)
-		gc.Regalloc(&gx, gc.Types[gc.TPTR32], nil)
-
-		// load args into DX:AX and EX:GX.
-		gins(x86.AMOVL, &lo1, &ax)
-
-		gins(x86.AMOVL, &hi1, &dx)
-		gins(x86.AMOVL, &lo2, &gx)
-		gins(x86.AMOVL, &hi2, &ex)
-
-		// if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply.
-		gins(x86.AMOVL, &dx, &fx)
-
-		gins(x86.AORL, &ex, &fx)
-		p1 := gc.Gbranch(x86.AJNE, nil, 0)
-		gins(x86.AMULL, &gx, nil) // implicit &ax
-		p2 := gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-
-		// full 64x64 -> 64, from 32x32 -> 64.
-		gins(x86.AIMULL, &gx, &dx)
-
-		gins(x86.AMOVL, &ax, &fx)
-		gins(x86.AIMULL, &ex, &fx)
-		gins(x86.AADDL, &dx, &fx)
-		gins(x86.AMOVL, &gx, &dx)
-		gins(x86.AMULL, &dx, nil) // implicit &ax
-		gins(x86.AADDL, &fx, &dx)
-		gc.Patch(p2, gc.Pc)
-
-		gc.Regfree(&ex)
-		gc.Regfree(&fx)
-		gc.Regfree(&gx)
-
-	// We only rotate by a constant c in [0,64).
-	// if c >= 32:
-	//	lo, hi = hi, lo
-	//	c -= 32
-	// if c == 0:
-	//	no-op
-	// else:
-	//	t = hi
-	//	shld hi:lo, c
-	//	shld lo:t, c
-	case gc.OLROT:
-		v := uint64(r.Int64())
-
-		if v >= 32 {
-			// reverse during load to do the first 32 bits of rotate
-			v -= 32
-
-			gins(x86.AMOVL, &lo1, &dx)
-			gins(x86.AMOVL, &hi1, &ax)
-		} else {
-			gins(x86.AMOVL, &lo1, &ax)
-			gins(x86.AMOVL, &hi1, &dx)
-		}
-
-		if v == 0 {
-		} else // done
-		{
-			gins(x86.AMOVL, &dx, &cx)
-			p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx)
-			p1.From.Index = x86.REG_AX // double-width shift
-			p1.From.Scale = 0
-			p1 = gins(x86.ASHLL, ncon(uint32(v)), &ax)
-			p1.From.Index = x86.REG_CX // double-width shift
-			p1.From.Scale = 0
-		}
-
-	case gc.OLSH:
-		if r.Op == gc.OLITERAL {
-			v := uint64(r.Int64())
-			if v >= 64 {
-				if gc.Is64(r.Type) {
-					splitclean()
-				}
-				splitclean()
-				split64(res, &lo2, &hi2)
-				gins(x86.AMOVL, ncon(0), &lo2)
-				gins(x86.AMOVL, ncon(0), &hi2)
-				splitclean()
-				return
-			}
-
-			if v >= 32 {
-				if gc.Is64(r.Type) {
-					splitclean()
-				}
-				split64(res, &lo2, &hi2)
-				gmove(&lo1, &hi2)
-				if v > 32 {
-					gins(x86.ASHLL, ncon(uint32(v-32)), &hi2)
-				}
-
-				gins(x86.AMOVL, ncon(0), &lo2)
-				splitclean()
-				splitclean()
-				return
-			}
-
-			// general shift
-			gins(x86.AMOVL, &lo1, &ax)
-
-			gins(x86.AMOVL, &hi1, &dx)
-			p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx)
-			p1.From.Index = x86.REG_AX // double-width shift
-			p1.From.Scale = 0
-			gins(x86.ASHLL, ncon(uint32(v)), &ax)
-			break
-		}
-
-		// load value into DX:AX.
-		gins(x86.AMOVL, &lo1, &ax)
-
-		gins(x86.AMOVL, &hi1, &dx)
-
-		// load shift value into register.
-		// if high bits are set, zero value.
-		var p1 *obj.Prog
-
-		if gc.Is64(r.Type) {
-			gins(x86.ACMPL, &hi2, ncon(0))
-			p1 = gc.Gbranch(x86.AJNE, nil, +1)
-			gins(x86.AMOVL, &lo2, &cx)
-		} else {
-			cx.Type = gc.Types[gc.TUINT32]
-			gmove(r, &cx)
-		}
-
-		// if shift count is >=64, zero value
-		gins(x86.ACMPL, &cx, ncon(64))
-
-		p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
-		if p1 != nil {
-			gc.Patch(p1, gc.Pc)
-		}
-		gins(x86.AXORL, &dx, &dx)
-		gins(x86.AXORL, &ax, &ax)
-		gc.Patch(p2, gc.Pc)
-
-		// if shift count is >= 32, zero low.
-		gins(x86.ACMPL, &cx, ncon(32))
-
-		p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
-		gins(x86.AMOVL, &ax, &dx)
-		gins(x86.ASHLL, &cx, &dx) // SHLL only uses bottom 5 bits of count
-		gins(x86.AXORL, &ax, &ax)
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-
-		// general shift
-		p1 = gins(x86.ASHLL, &cx, &dx)
-
-		p1.From.Index = x86.REG_AX // double-width shift
-		p1.From.Scale = 0
-		gins(x86.ASHLL, &cx, &ax)
-		gc.Patch(p2, gc.Pc)
-
-	case gc.ORSH:
-		if r.Op == gc.OLITERAL {
-			v := uint64(r.Int64())
-			if v >= 64 {
-				if gc.Is64(r.Type) {
-					splitclean()
-				}
-				splitclean()
-				split64(res, &lo2, &hi2)
-				if hi1.Type.Etype == gc.TINT32 {
-					gmove(&hi1, &lo2)
-					gins(x86.ASARL, ncon(31), &lo2)
-					gmove(&hi1, &hi2)
-					gins(x86.ASARL, ncon(31), &hi2)
-				} else {
-					gins(x86.AMOVL, ncon(0), &lo2)
-					gins(x86.AMOVL, ncon(0), &hi2)
-				}
-
-				splitclean()
-				return
-			}
-
-			if v >= 32 {
-				if gc.Is64(r.Type) {
-					splitclean()
-				}
-				split64(res, &lo2, &hi2)
-				gmove(&hi1, &lo2)
-				if v > 32 {
-					gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v-32)), &lo2)
-				}
-				if hi1.Type.Etype == gc.TINT32 {
-					gmove(&hi1, &hi2)
-					gins(x86.ASARL, ncon(31), &hi2)
-				} else {
-					gins(x86.AMOVL, ncon(0), &hi2)
-				}
-				splitclean()
-				splitclean()
-				return
-			}
-
-			// general shift
-			gins(x86.AMOVL, &lo1, &ax)
-
-			gins(x86.AMOVL, &hi1, &dx)
-			p1 := gins(x86.ASHRL, ncon(uint32(v)), &ax)
-			p1.From.Index = x86.REG_DX // double-width shift
-			p1.From.Scale = 0
-			gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx)
-			break
-		}
-
-		// load value into DX:AX.
-		gins(x86.AMOVL, &lo1, &ax)
-
-		gins(x86.AMOVL, &hi1, &dx)
-
-		// load shift value into register.
-		// if high bits are set, zero value.
-		var p1 *obj.Prog
-
-		if gc.Is64(r.Type) {
-			gins(x86.ACMPL, &hi2, ncon(0))
-			p1 = gc.Gbranch(x86.AJNE, nil, +1)
-			gins(x86.AMOVL, &lo2, &cx)
-		} else {
-			cx.Type = gc.Types[gc.TUINT32]
-			gmove(r, &cx)
-		}
-
-		// if shift count is >=64, zero or sign-extend value
-		gins(x86.ACMPL, &cx, ncon(64))
-
-		p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
-		if p1 != nil {
-			gc.Patch(p1, gc.Pc)
-		}
-		if hi1.Type.Etype == gc.TINT32 {
-			gins(x86.ASARL, ncon(31), &dx)
-			gins(x86.AMOVL, &dx, &ax)
-		} else {
-			gins(x86.AXORL, &dx, &dx)
-			gins(x86.AXORL, &ax, &ax)
-		}
-
-		gc.Patch(p2, gc.Pc)
-
-		// if shift count is >= 32, sign-extend hi.
-		gins(x86.ACMPL, &cx, ncon(32))
-
-		p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
-		gins(x86.AMOVL, &dx, &ax)
-		if hi1.Type.Etype == gc.TINT32 {
-			gins(x86.ASARL, &cx, &ax) // SARL only uses bottom 5 bits of count
-			gins(x86.ASARL, ncon(31), &dx)
-		} else {
-			gins(x86.ASHRL, &cx, &ax)
-			gins(x86.AXORL, &dx, &dx)
-		}
-
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-
-		// general shift
-		p1 = gins(x86.ASHRL, &cx, &ax)
-
-		p1.From.Index = x86.REG_DX // double-width shift
-		p1.From.Scale = 0
-		gins(optoas(gc.ORSH, hi1.Type), &cx, &dx)
-		gc.Patch(p2, gc.Pc)
-
-		// make constant the right side (it usually is anyway).
-	case gc.OXOR,
-		gc.OAND,
-		gc.OOR:
-		if lo1.Op == gc.OLITERAL {
-			nswap(&lo1, &lo2)
-			nswap(&hi1, &hi2)
-		}
-
-		if lo2.Op == gc.OLITERAL {
-			// special cases for constants.
-			lv := uint32(lo2.Int64())
-			hv := uint32(hi2.Int64())
-			splitclean() // right side
-			split64(res, &lo2, &hi2)
-			switch n.Op {
-			case gc.OXOR:
-				gmove(&lo1, &lo2)
-				gmove(&hi1, &hi2)
-				switch lv {
-				case 0:
-					break
-
-				case 0xffffffff:
-					gins(x86.ANOTL, nil, &lo2)
-
-				default:
-					gins(x86.AXORL, ncon(lv), &lo2)
-				}
-
-				switch hv {
-				case 0:
-					break
-
-				case 0xffffffff:
-					gins(x86.ANOTL, nil, &hi2)
-
-				default:
-					gins(x86.AXORL, ncon(hv), &hi2)
-				}
-
-			case gc.OAND:
-				switch lv {
-				case 0:
-					gins(x86.AMOVL, ncon(0), &lo2)
-
-				default:
-					gmove(&lo1, &lo2)
-					if lv != 0xffffffff {
-						gins(x86.AANDL, ncon(lv), &lo2)
-					}
-				}
-
-				switch hv {
-				case 0:
-					gins(x86.AMOVL, ncon(0), &hi2)
-
-				default:
-					gmove(&hi1, &hi2)
-					if hv != 0xffffffff {
-						gins(x86.AANDL, ncon(hv), &hi2)
-					}
-				}
-
-			case gc.OOR:
-				switch lv {
-				case 0:
-					gmove(&lo1, &lo2)
-
-				case 0xffffffff:
-					gins(x86.AMOVL, ncon(0xffffffff), &lo2)
-
-				default:
-					gmove(&lo1, &lo2)
-					gins(x86.AORL, ncon(lv), &lo2)
-				}
-
-				switch hv {
-				case 0:
-					gmove(&hi1, &hi2)
-
-				case 0xffffffff:
-					gins(x86.AMOVL, ncon(0xffffffff), &hi2)
-
-				default:
-					gmove(&hi1, &hi2)
-					gins(x86.AORL, ncon(hv), &hi2)
-				}
-			}
-
-			splitclean()
-			splitclean()
-			return
-		}
-
-		gins(x86.AMOVL, &lo1, &ax)
-		gins(x86.AMOVL, &hi1, &dx)
-		gins(optoas(n.Op, lo1.Type), &lo2, &ax)
-		gins(optoas(n.Op, lo1.Type), &hi2, &dx)
-	}
-
-	if gc.Is64(r.Type) {
-		splitclean()
-	}
-	splitclean()
-
-	split64(res, &lo1, &hi1)
-	gins(x86.AMOVL, &ax, &lo1)
-	gins(x86.AMOVL, &dx, &hi1)
-	splitclean()
-}
-
-/*
- * generate comparison of nl, nr, both 64-bit.
- * nl is memory; nr is constant or memory.
- */
-func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) {
-	var lo1 gc.Node
-	var hi1 gc.Node
-	var lo2 gc.Node
-	var hi2 gc.Node
-	var rr gc.Node
-
-	split64(nl, &lo1, &hi1)
-	split64(nr, &lo2, &hi2)
-
-	// compare most significant word;
-	// if they differ, we're done.
-	t := hi1.Type
-
-	if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
-		gins(x86.ACMPL, &hi1, &hi2)
-	} else {
-		gc.Regalloc(&rr, gc.Types[gc.TINT32], nil)
-		gins(x86.AMOVL, &hi1, &rr)
-		gins(x86.ACMPL, &rr, &hi2)
-		gc.Regfree(&rr)
-	}
-
-	var br *obj.Prog
-	switch op {
-	default:
-		gc.Fatalf("cmp64 %v %v", op, t)
-
-		// cmp hi
-	// jne L
-	// cmp lo
-	// jeq to
-	// L:
-	case gc.OEQ:
-		br = gc.Gbranch(x86.AJNE, nil, -likely)
-
-		// cmp hi
-	// jne to
-	// cmp lo
-	// jne to
-	case gc.ONE:
-		gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)
-
-		// cmp hi
-	// jgt to
-	// jlt L
-	// cmp lo
-	// jge to (or jgt to)
-	// L:
-	case gc.OGE,
-		gc.OGT:
-		gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
-
-		br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
-
-		// cmp hi
-	// jlt to
-	// jgt L
-	// cmp lo
-	// jle to (or jlt to)
-	// L:
-	case gc.OLE,
-		gc.OLT:
-		gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
-
-		br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
-	}
-
-	// compare least significant word
-	t = lo1.Type
-
-	if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
-		gins(x86.ACMPL, &lo1, &lo2)
-	} else {
-		gc.Regalloc(&rr, gc.Types[gc.TINT32], nil)
-		gins(x86.AMOVL, &lo1, &rr)
-		gins(x86.ACMPL, &rr, &lo2)
-		gc.Regfree(&rr)
-	}
-
-	// jump again
-	gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
-
-	// point first branch down here if appropriate
-	if br != nil {
-		gc.Patch(br, gc.Pc)
-	}
-
-	splitclean()
-	splitclean()
-}
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
index 9fb4712..269ff56 100644
--- a/src/cmd/compile/internal/x86/galign.go
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -40,42 +40,9 @@
 	gc.Thearch.ReservedRegs = resvd
 
 	gc.Thearch.Betypeinit = betypeinit
-	gc.Thearch.Bgen_float = bgen_float
-	gc.Thearch.Cgen64 = cgen64
-	gc.Thearch.Cgen_bmul = cgen_bmul
-	gc.Thearch.Cgen_float = cgen_float
-	gc.Thearch.Cgen_hmul = cgen_hmul
-	gc.Thearch.Cgen_shift = cgen_shift
-	gc.Thearch.Clearfat = clearfat
-	gc.Thearch.Cmp64 = cmp64
 	gc.Thearch.Defframe = defframe
-	gc.Thearch.Dodiv = cgen_div
-	gc.Thearch.Excise = excise
-	gc.Thearch.Expandchecks = expandchecks
-	gc.Thearch.Getg = getg
 	gc.Thearch.Gins = gins
-	gc.Thearch.Ginscmp = ginscmp
-	gc.Thearch.Ginscon = ginscon
-	gc.Thearch.Ginsnop = ginsnop
-	gc.Thearch.Gmove = gmove
-	gc.Thearch.Igenindex = igenindex
-	gc.Thearch.Peep = peep
 	gc.Thearch.Proginfo = proginfo
-	gc.Thearch.Regtyp = regtyp
-	gc.Thearch.Sameaddr = sameaddr
-	gc.Thearch.Smallindir = smallindir
-	gc.Thearch.Stackaddr = stackaddr
-	gc.Thearch.Blockcopy = blockcopy
-	gc.Thearch.Sudoaddable = sudoaddable
-	gc.Thearch.Sudoclean = sudoclean
-	gc.Thearch.Excludedregs = excludedregs
-	gc.Thearch.RtoB = RtoB
-	gc.Thearch.FtoB = FtoB
-	gc.Thearch.BtoR = BtoR
-	gc.Thearch.BtoF = BtoF
-	gc.Thearch.Optoas = optoas
-	gc.Thearch.Doregbits = doregbits
-	gc.Thearch.Regnames = regnames
 
 	gc.Thearch.SSARegToReg = ssaRegToReg
 	gc.Thearch.SSAMarkMoves = ssaMarkMoves
diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go
index 71dbd74..f701266 100644
--- a/src/cmd/compile/internal/x86/ggen.go
+++ b/src/cmd/compile/internal/x86/ggen.go
@@ -99,821 +99,3 @@
 	p.Link = q
 	return q
 }
-
-func clearfat(nl *gc.Node) {
-	/* clear a fat object */
-	if gc.Debug['g'] != 0 {
-		gc.Dump("\nclearfat", nl)
-	}
-
-	w := uint32(nl.Type.Width)
-
-	// Avoid taking the address for simple enough types.
-	if gc.Componentgen(nil, nl) {
-		return
-	}
-
-	c := w % 4 // bytes
-	q := w / 4 // quads
-
-	if q < 4 {
-		// Write sequence of MOV 0, off(base) instead of using STOSL.
-		// The hope is that although the code will be slightly longer,
-		// the MOVs will have no dependencies and pipeline better
-		// than the unrolled STOSL loop.
-		// NOTE: Must use agen, not igen, so that optimizer sees address
-		// being taken. We are not writing on field boundaries.
-		var n1 gc.Node
-		gc.Regalloc(&n1, gc.Types[gc.Tptr], nil)
-
-		gc.Agen(nl, &n1)
-		n1.Op = gc.OINDREG
-		var z gc.Node
-		gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
-		for ; q > 0; q-- {
-			n1.Type = z.Type
-			gins(x86.AMOVL, &z, &n1)
-			n1.Xoffset += 4
-		}
-
-		gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
-		for ; c > 0; c-- {
-			n1.Type = z.Type
-			gins(x86.AMOVB, &z, &n1)
-			n1.Xoffset++
-		}
-
-		gc.Regfree(&n1)
-		return
-	}
-
-	var n1 gc.Node
-	gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI)
-	gc.Agen(nl, &n1)
-	gconreg(x86.AMOVL, 0, x86.REG_AX)
-
-	if q > 128 || (q >= 4 && gc.Nacl) {
-		gconreg(x86.AMOVL, int64(q), x86.REG_CX)
-		gins(x86.AREP, nil, nil)   // repeat
-		gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
-	} else if q >= 4 {
-		p := gins(obj.ADUFFZERO, nil, nil)
-		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
-
-		// 1 and 128 = magic constants: see ../../runtime/asm_386.s
-		p.To.Offset = 1 * (128 - int64(q))
-	} else {
-		for q > 0 {
-			gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
-			q--
-		}
-	}
-
-	for c > 0 {
-		gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
-		c--
-	}
-}
-
-var panicdiv *gc.Node
-
-/*
- * generate division.
- * caller must set:
- *	ax = allocated AX register
- *	dx = allocated DX register
- * generates one of:
- *	res = nl / nr
- *	res = nl % nr
- * according to op.
- */
-func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
-	// Have to be careful about handling
-	// most negative int divided by -1 correctly.
-	// The hardware will trap.
-	// Also the byte divide instruction needs AH,
-	// which we otherwise don't have to deal with.
-	// Easiest way to avoid for int8, int16: use int32.
-	// For int32 and int64, use explicit test.
-	// Could use int64 hw for int32.
-	t := nl.Type
-
-	t0 := t
-	check := false
-	if t.IsSigned() {
-		check = true
-		if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -1<<uint64(t.Width*8-1) {
-			check = false
-		} else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 {
-			check = false
-		}
-	}
-
-	if t.Width < 4 {
-		if t.IsSigned() {
-			t = gc.Types[gc.TINT32]
-		} else {
-			t = gc.Types[gc.TUINT32]
-		}
-		check = false
-	}
-
-	var t1 gc.Node
-	gc.Tempname(&t1, t)
-	var t2 gc.Node
-	gc.Tempname(&t2, t)
-	if t0 != t {
-		var t3 gc.Node
-		gc.Tempname(&t3, t0)
-		var t4 gc.Node
-		gc.Tempname(&t4, t0)
-		gc.Cgen(nl, &t3)
-		gc.Cgen(nr, &t4)
-
-		// Convert.
-		gmove(&t3, &t1)
-
-		gmove(&t4, &t2)
-	} else {
-		gc.Cgen(nl, &t1)
-		gc.Cgen(nr, &t2)
-	}
-
-	var n1 gc.Node
-	if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
-		gc.Regalloc(&n1, t, res)
-	} else {
-		gc.Regalloc(&n1, t, nil)
-	}
-	gmove(&t2, &n1)
-	gmove(&t1, ax)
-	var p2 *obj.Prog
-	var n4 gc.Node
-	if gc.Nacl {
-		// Native Client does not relay the divide-by-zero trap
-		// to the executing program, so we must insert a check
-		// for ourselves.
-		gc.Nodconst(&n4, t, 0)
-
-		gins(optoas(gc.OCMP, t), &n1, &n4)
-		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-		if panicdiv == nil {
-			panicdiv = gc.Sysfunc("panicdivide")
-		}
-		gc.Ginscall(panicdiv, -1)
-		gc.Patch(p1, gc.Pc)
-	}
-
-	if check {
-		gc.Nodconst(&n4, t, -1)
-		gins(optoas(gc.OCMP, t), &n1, &n4)
-		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
-		if op == gc.ODIV {
-			// a / (-1) is -a.
-			gins(optoas(gc.OMINUS, t), nil, ax)
-
-			gmove(ax, res)
-		} else {
-			// a % (-1) is 0.
-			gc.Nodconst(&n4, t, 0)
-
-			gmove(&n4, res)
-		}
-
-		p2 = gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-	}
-
-	if !t.IsSigned() {
-		var nz gc.Node
-		gc.Nodconst(&nz, t, 0)
-		gmove(&nz, dx)
-	} else {
-		gins(optoas(gc.OEXTEND, t), nil, nil)
-	}
-	gins(optoas(op, t), &n1, nil)
-	gc.Regfree(&n1)
-
-	if op == gc.ODIV {
-		gmove(ax, res)
-	} else {
-		gmove(dx, res)
-	}
-	if check {
-		gc.Patch(p2, gc.Pc)
-	}
-}
-
-func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
-	r := gc.GetReg(dr)
-	gc.Nodreg(x, gc.Types[gc.TINT32], dr)
-
-	// save current ax and dx if they are live
-	// and not the destination
-	*oldx = gc.Node{}
-
-	if r > 0 && !gc.Samereg(x, res) {
-		gc.Tempname(oldx, gc.Types[gc.TINT32])
-		gmove(x, oldx)
-	}
-
-	gc.Regalloc(x, t, x)
-}
-
-func restx(x *gc.Node, oldx *gc.Node) {
-	gc.Regfree(x)
-
-	if oldx.Op != 0 {
-		x.Type = gc.Types[gc.TINT32]
-		gmove(oldx, x)
-	}
-}
-
-/*
- * generate division according to op, one of:
- *	res = nl / nr
- *	res = nl % nr
- */
-func cgen_div(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	if gc.Is64(nl.Type) {
-		gc.Fatalf("cgen_div %v", nl.Type)
-	}
-
-	var t *gc.Type
-	if nl.Type.IsSigned() {
-		t = gc.Types[gc.TINT32]
-	} else {
-		t = gc.Types[gc.TUINT32]
-	}
-	var ax gc.Node
-	var oldax gc.Node
-	savex(x86.REG_AX, &ax, &oldax, res, t)
-	var olddx gc.Node
-	var dx gc.Node
-	savex(x86.REG_DX, &dx, &olddx, res, t)
-	dodiv(op, nl, nr, res, &ax, &dx)
-	restx(&dx, &olddx)
-	restx(&ax, &oldax)
-}
-
-/*
- * generate shift according to op, one of:
- *	res = nl << nr
- *	res = nl >> nr
- */
-func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	if nl.Type.Width > 4 {
-		gc.Fatalf("cgen_shift %v", nl.Type)
-	}
-
-	w := int(nl.Type.Width * 8)
-
-	a := optoas(op, nl.Type)
-
-	if nr.Op == gc.OLITERAL {
-		var n2 gc.Node
-		gc.Tempname(&n2, nl.Type)
-		gc.Cgen(nl, &n2)
-		var n1 gc.Node
-		gc.Regalloc(&n1, nl.Type, res)
-		gmove(&n2, &n1)
-		sc := uint64(nr.Int64())
-		if sc >= uint64(nl.Type.Width*8) {
-			// large shift gets 2 shifts by width-1
-			gins(a, ncon(uint32(w)-1), &n1)
-
-			gins(a, ncon(uint32(w)-1), &n1)
-		} else {
-			gins(a, nr, &n1)
-		}
-		gmove(&n1, res)
-		gc.Regfree(&n1)
-		return
-	}
-
-	var oldcx gc.Node
-	var cx gc.Node
-	gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX)
-	if gc.GetReg(x86.REG_CX) > 1 && !gc.Samereg(&cx, res) {
-		gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
-		gmove(&cx, &oldcx)
-	}
-
-	var n1 gc.Node
-	var nt gc.Node
-	if nr.Type.Width > 4 {
-		gc.Tempname(&nt, nr.Type)
-		n1 = nt
-	} else {
-		gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
-		gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
-	}
-
-	var n2 gc.Node
-	if gc.Samereg(&cx, res) {
-		gc.Regalloc(&n2, nl.Type, nil)
-	} else {
-		gc.Regalloc(&n2, nl.Type, res)
-	}
-	if nl.Ullman >= nr.Ullman {
-		gc.Cgen(nl, &n2)
-		gc.Cgen(nr, &n1)
-	} else {
-		gc.Cgen(nr, &n1)
-		gc.Cgen(nl, &n2)
-	}
-
-	// test and fix up large shifts
-	if bounded {
-		if nr.Type.Width > 4 {
-			// delayed reg alloc
-			gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
-
-			gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
-			var lo gc.Node
-			var hi gc.Node
-			split64(&nt, &lo, &hi)
-			gmove(&lo, &n1)
-			splitclean()
-		}
-	} else {
-		var p1 *obj.Prog
-		if nr.Type.Width > 4 {
-			// delayed reg alloc
-			gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
-
-			gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
-			var lo gc.Node
-			var hi gc.Node
-			split64(&nt, &lo, &hi)
-			gmove(&lo, &n1)
-			gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0))
-			p2 := gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
-			gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w)))
-			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
-			splitclean()
-			gc.Patch(p2, gc.Pc)
-		} else {
-			gins(optoas(gc.OCMP, nr.Type), &n1, ncon(uint32(w)))
-			p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
-		}
-
-		if op == gc.ORSH && nl.Type.IsSigned() {
-			gins(a, ncon(uint32(w)-1), &n2)
-		} else {
-			gmove(ncon(0), &n2)
-		}
-
-		gc.Patch(p1, gc.Pc)
-	}
-
-	gins(a, &n1, &n2)
-
-	if oldcx.Op != 0 {
-		gmove(&oldcx, &cx)
-	}
-
-	gmove(&n2, res)
-
-	gc.Regfree(&n1)
-	gc.Regfree(&n2)
-}
-
-/*
- * generate byte multiply:
- *	res = nl * nr
- * there is no 2-operand byte multiply instruction so
- * we do a full-width multiplication and truncate afterwards.
- */
-func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
-	if optoas(op, nl.Type) != x86.AIMULB {
-		return false
-	}
-
-	// copy from byte to full registers
-	t := gc.Types[gc.TUINT32]
-
-	if nl.Type.IsSigned() {
-		t = gc.Types[gc.TINT32]
-	}
-
-	// largest ullman on left.
-	if nl.Ullman < nr.Ullman {
-		nl, nr = nr, nl
-	}
-
-	var nt gc.Node
-	gc.Tempname(&nt, nl.Type)
-	gc.Cgen(nl, &nt)
-	var n1 gc.Node
-	gc.Regalloc(&n1, t, res)
-	gc.Cgen(nr, &n1)
-	var n2 gc.Node
-	gc.Regalloc(&n2, t, nil)
-	gmove(&nt, &n2)
-	a := optoas(op, t)
-	gins(a, &n2, &n1)
-	gc.Regfree(&n2)
-	gmove(&n1, res)
-	gc.Regfree(&n1)
-
-	return true
-}
-
-/*
- * generate high multiply:
- *   res = (nl*nr) >> width
- */
-func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
-	var n1 gc.Node
-	var n2 gc.Node
-
-	t := nl.Type
-	a := optoas(gc.OHMUL, t)
-
-	// gen nl in n1.
-	gc.Tempname(&n1, t)
-	gc.Cgen(nl, &n1)
-
-	// gen nr in n2.
-	gc.Regalloc(&n2, t, res)
-	gc.Cgen(nr, &n2)
-
-	var ax, oldax, dx, olddx gc.Node
-	savex(x86.REG_AX, &ax, &oldax, res, gc.Types[gc.TUINT32])
-	savex(x86.REG_DX, &dx, &olddx, res, gc.Types[gc.TUINT32])
-
-	gmove(&n2, &ax)
-	gins(a, &n1, nil)
-	gc.Regfree(&n2)
-
-	if t.Width == 1 {
-		// byte multiply behaves differently.
-		var byteAH, byteDX gc.Node
-		gc.Nodreg(&byteAH, t, x86.REG_AH)
-		gc.Nodreg(&byteDX, t, x86.REG_DX)
-		gmove(&byteAH, &byteDX)
-	}
-
-	gmove(&dx, res)
-
-	restx(&ax, &oldax)
-	restx(&dx, &olddx)
-}
-
-/*
- * generate floating-point operation.
- */
-func cgen_float(n *gc.Node, res *gc.Node) {
-	nl := n.Left
-	switch n.Op {
-	case gc.OEQ,
-		gc.ONE,
-		gc.OLT,
-		gc.OLE,
-		gc.OGE:
-		p1 := gc.Gbranch(obj.AJMP, nil, 0)
-		p2 := gc.Pc
-		gmove(gc.Nodbool(true), res)
-		p3 := gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-		gc.Bgen(n, true, 0, p2)
-		gmove(gc.Nodbool(false), res)
-		gc.Patch(p3, gc.Pc)
-		return
-
-	case gc.OPLUS:
-		gc.Cgen(nl, res)
-		return
-
-	case gc.OCONV:
-		if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
-			gc.Cgen(nl, res)
-			return
-		}
-
-		var n2 gc.Node
-		gc.Tempname(&n2, n.Type)
-		var n1 gc.Node
-		gc.Mgen(nl, &n1, res)
-		gmove(&n1, &n2)
-		gmove(&n2, res)
-		gc.Mfree(&n1)
-		return
-	}
-
-	if gc.Thearch.Use387 {
-		cgen_float387(n, res)
-	} else {
-		cgen_floatsse(n, res)
-	}
-}
-
-// floating-point.  387 (not SSE2)
-func cgen_float387(n *gc.Node, res *gc.Node) {
-	var f0 gc.Node
-	var f1 gc.Node
-
-	nl := n.Left
-	nr := n.Right
-	gc.Nodreg(&f0, nl.Type, x86.REG_F0)
-	gc.Nodreg(&f1, n.Type, x86.REG_F0+1)
-	if nr != nil {
-		// binary
-		if nl.Ullman >= nr.Ullman {
-			gc.Cgen(nl, &f0)
-			if nr.Addable {
-				gins(foptoas(n.Op, n.Type, 0), nr, &f0)
-			} else {
-				gc.Cgen(nr, &f0)
-				gins(foptoas(n.Op, n.Type, Fpop), &f0, &f1)
-			}
-		} else {
-			gc.Cgen(nr, &f0)
-			if nl.Addable {
-				gins(foptoas(n.Op, n.Type, Frev), nl, &f0)
-			} else {
-				gc.Cgen(nl, &f0)
-				gins(foptoas(n.Op, n.Type, Frev|Fpop), &f0, &f1)
-			}
-		}
-
-		gmove(&f0, res)
-		return
-	}
-
-	// unary
-	gc.Cgen(nl, &f0)
-
-	if n.Op != gc.OCONV && n.Op != gc.OPLUS {
-		gins(foptoas(n.Op, n.Type, 0), nil, nil)
-	}
-	gmove(&f0, res)
-	return
-}
-
-func cgen_floatsse(n *gc.Node, res *gc.Node) {
-	var a obj.As
-
-	nl := n.Left
-	nr := n.Right
-	switch n.Op {
-	default:
-		gc.Dump("cgen_floatsse", n)
-		gc.Fatalf("cgen_floatsse %v", n.Op)
-		return
-
-	case gc.OMINUS,
-		gc.OCOM:
-		nr = gc.NegOne(n.Type)
-		a = foptoas(gc.OMUL, nl.Type, 0)
-		goto sbop
-
-		// symmetric binary
-	case gc.OADD,
-		gc.OMUL:
-		a = foptoas(n.Op, nl.Type, 0)
-
-		goto sbop
-
-		// asymmetric binary
-	case gc.OSUB,
-		gc.OMOD,
-		gc.ODIV:
-		a = foptoas(n.Op, nl.Type, 0)
-
-		goto abop
-	}
-
-sbop: // symmetric binary
-	if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
-		nl, nr = nr, nl
-	}
-
-abop: // asymmetric binary
-	if nl.Ullman >= nr.Ullman {
-		var nt gc.Node
-		gc.Tempname(&nt, nl.Type)
-		gc.Cgen(nl, &nt)
-		var n2 gc.Node
-		gc.Mgen(nr, &n2, nil)
-		var n1 gc.Node
-		gc.Regalloc(&n1, nl.Type, res)
-		gmove(&nt, &n1)
-		gins(a, &n2, &n1)
-		gmove(&n1, res)
-		gc.Regfree(&n1)
-		gc.Mfree(&n2)
-	} else {
-		var n2 gc.Node
-		gc.Regalloc(&n2, nr.Type, res)
-		gc.Cgen(nr, &n2)
-		var n1 gc.Node
-		gc.Regalloc(&n1, nl.Type, nil)
-		gc.Cgen(nl, &n1)
-		gins(a, &n2, &n1)
-		gc.Regfree(&n2)
-		gmove(&n1, res)
-		gc.Regfree(&n1)
-	}
-
-	return
-}
-
-func bgen_float(n *gc.Node, wantTrue bool, likely int, to *obj.Prog) {
-	nl := n.Left
-	nr := n.Right
-	op := n.Op
-	if !wantTrue {
-		// brcom is not valid on floats when NaN is involved.
-		p1 := gc.Gbranch(obj.AJMP, nil, 0)
-		p2 := gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p1, gc.Pc)
-
-		// No need to avoid re-genning ninit.
-		bgen_float(n, true, -likely, p2)
-
-		gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
-		gc.Patch(p2, gc.Pc)
-		return
-	}
-
-	if gc.Thearch.Use387 {
-		op = gc.Brrev(op) // because the args are stacked
-		if op == gc.OGE || op == gc.OGT {
-			// only < and <= work right with NaN; reverse if needed
-			nl, nr = nr, nl
-			op = gc.Brrev(op)
-		}
-
-		var ax, n2, tmp gc.Node
-		gc.Nodreg(&tmp, nr.Type, x86.REG_F0)
-		gc.Nodreg(&n2, nr.Type, x86.REG_F0+1)
-		gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX)
-		if gc.Simsimtype(nr.Type) == gc.TFLOAT64 {
-			if nl.Ullman > nr.Ullman {
-				gc.Cgen(nl, &tmp)
-				gc.Cgen(nr, &tmp)
-				gins(x86.AFXCHD, &tmp, &n2)
-			} else {
-				gc.Cgen(nr, &tmp)
-				gc.Cgen(nl, &tmp)
-			}
-			gins(x86.AFUCOMPP, &tmp, &n2)
-		} else {
-			// TODO(rsc): The moves back and forth to memory
-			// here are for truncating the value to 32 bits.
-			// This handles 32-bit comparison but presumably
-			// all the other ops have the same problem.
-			// We need to figure out what the right general
-			// solution is, besides telling people to use float64.
-			var t1 gc.Node
-			gc.Tempname(&t1, gc.Types[gc.TFLOAT32])
-
-			var t2 gc.Node
-			gc.Tempname(&t2, gc.Types[gc.TFLOAT32])
-			gc.Cgen(nr, &t1)
-			gc.Cgen(nl, &t2)
-			gmove(&t2, &tmp)
-			gins(x86.AFCOMFP, &t1, &tmp)
-		}
-		gins(x86.AFSTSW, nil, &ax)
-		gins(x86.ASAHF, nil, nil)
-	} else {
-		// Not 387
-		if !nl.Addable {
-			nl = gc.CgenTemp(nl)
-		}
-		if !nr.Addable {
-			nr = gc.CgenTemp(nr)
-		}
-
-		var n2 gc.Node
-		gc.Regalloc(&n2, nr.Type, nil)
-		gmove(nr, &n2)
-		nr = &n2
-
-		if nl.Op != gc.OREGISTER {
-			var n3 gc.Node
-			gc.Regalloc(&n3, nl.Type, nil)
-			gmove(nl, &n3)
-			nl = &n3
-		}
-
-		if op == gc.OGE || op == gc.OGT {
-			// only < and <= work right with NopN; reverse if needed
-			nl, nr = nr, nl
-			op = gc.Brrev(op)
-		}
-
-		gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
-		if nl.Op == gc.OREGISTER {
-			gc.Regfree(nl)
-		}
-		gc.Regfree(nr)
-	}
-
-	switch op {
-	case gc.OEQ:
-		// neither NE nor P
-		p1 := gc.Gbranch(x86.AJNE, nil, -likely)
-		p2 := gc.Gbranch(x86.AJPS, nil, -likely)
-		gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
-		gc.Patch(p1, gc.Pc)
-		gc.Patch(p2, gc.Pc)
-	case gc.ONE:
-		// either NE or P
-		gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)
-		gc.Patch(gc.Gbranch(x86.AJPS, nil, likely), to)
-	default:
-		gc.Patch(gc.Gbranch(optoas(op, nr.Type), nil, likely), to)
-	}
-}
-
-// Called after regopt and peep have run.
-// Expand CHECKNIL pseudo-op into actual nil pointer check.
-func expandchecks(firstp *obj.Prog) {
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-
-	for p := firstp; p != nil; p = p.Link {
-		if p.As != obj.ACHECKNIL {
-			continue
-		}
-		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
-			gc.Warnl(p.Lineno, "generated nil check")
-		}
-
-		// check is
-		//	CMP arg, $0
-		//	JNE 2(PC) (likely)
-		//	MOV AX, 0
-		p1 = gc.Ctxt.NewProg()
-
-		p2 = gc.Ctxt.NewProg()
-		gc.Clearp(p1)
-		gc.Clearp(p2)
-		p1.Link = p2
-		p2.Link = p.Link
-		p.Link = p1
-		p1.Lineno = p.Lineno
-		p2.Lineno = p.Lineno
-		p1.Pc = 9999
-		p2.Pc = 9999
-		p.As = x86.ACMPL
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = 0
-		p1.As = x86.AJNE
-		p1.From.Type = obj.TYPE_CONST
-		p1.From.Offset = 1 // likely
-		p1.To.Type = obj.TYPE_BRANCH
-		p1.To.Val = p2.Link
-
-		// crash by write to memory address 0.
-		// if possible, since we know arg is 0, use 0(arg),
-		// which will be shorter to encode than plain 0.
-		p2.As = x86.AMOVL
-
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = x86.REG_AX
-		if regtyp(&p.From) {
-			p2.To.Type = obj.TYPE_MEM
-			p2.To.Reg = p.From.Reg
-		} else {
-			p2.To.Type = obj.TYPE_MEM
-		}
-		p2.To.Offset = 0
-	}
-}
-
-// addr += index*width if possible.
-func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
-	switch width {
-	case 1, 2, 4, 8:
-		p1 := gins(x86.ALEAL, index, addr)
-		p1.From.Type = obj.TYPE_MEM
-		p1.From.Scale = int16(width)
-		p1.From.Index = p1.From.Reg
-		p1.From.Reg = p1.To.Reg
-		return true
-	}
-	return false
-}
-
-// res = runtime.getg()
-func getg(res *gc.Node) {
-	var n1 gc.Node
-	gc.Regalloc(&n1, res.Type, res)
-	mov := optoas(gc.OAS, gc.Types[gc.Tptr])
-	p := gins(mov, nil, &n1)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = x86.REG_TLS
-	p = gins(mov, nil, &n1)
-	p.From = p.To
-	p.From.Type = obj.TYPE_MEM
-	p.From.Index = x86.REG_TLS
-	p.From.Scale = 1
-	gmove(&n1, res)
-	gc.Regfree(&n1)
-}
diff --git a/src/cmd/compile/internal/x86/gsubr.go b/src/cmd/compile/internal/x86/gsubr.go
index 9ad6305..a0d36a2 100644
--- a/src/cmd/compile/internal/x86/gsubr.go
+++ b/src/cmd/compile/internal/x86/gsubr.go
@@ -31,566 +31,12 @@
 package x86
 
 import (
-	"cmd/compile/internal/big"
 	"cmd/compile/internal/gc"
 	"cmd/internal/obj"
 	"cmd/internal/obj/x86"
 	"fmt"
 )
 
-// TODO(rsc): Can make this bigger if we move
-// the text segment up higher in 8l for all GOOS.
-// At the same time, can raise StackBig in ../../runtime/stack.h.
-var unmappedzero uint32 = 4096
-
-// foptoas flags
-const (
-	Frev  = 1 << 0
-	Fpop  = 1 << 1
-	Fpop2 = 1 << 2
-)
-
-/*
- * return Axxx for Oxxx on type t.
- */
-func optoas(op gc.Op, t *gc.Type) obj.As {
-	if t == nil {
-		gc.Fatalf("optoas: t is nil")
-	}
-
-	// avoid constant conversions in switches below
-	const (
-		OMINUS_  = uint32(gc.OMINUS) << 16
-		OLSH_    = uint32(gc.OLSH) << 16
-		ORSH_    = uint32(gc.ORSH) << 16
-		OADD_    = uint32(gc.OADD) << 16
-		OSUB_    = uint32(gc.OSUB) << 16
-		OMUL_    = uint32(gc.OMUL) << 16
-		ODIV_    = uint32(gc.ODIV) << 16
-		OMOD_    = uint32(gc.OMOD) << 16
-		OOR_     = uint32(gc.OOR) << 16
-		OAND_    = uint32(gc.OAND) << 16
-		OXOR_    = uint32(gc.OXOR) << 16
-		OEQ_     = uint32(gc.OEQ) << 16
-		ONE_     = uint32(gc.ONE) << 16
-		OLT_     = uint32(gc.OLT) << 16
-		OLE_     = uint32(gc.OLE) << 16
-		OGE_     = uint32(gc.OGE) << 16
-		OGT_     = uint32(gc.OGT) << 16
-		OCMP_    = uint32(gc.OCMP) << 16
-		OAS_     = uint32(gc.OAS) << 16
-		OHMUL_   = uint32(gc.OHMUL) << 16
-		OADDR_   = uint32(gc.OADDR) << 16
-		OINC_    = uint32(gc.OINC) << 16
-		ODEC_    = uint32(gc.ODEC) << 16
-		OLROT_   = uint32(gc.OLROT) << 16
-		OEXTEND_ = uint32(gc.OEXTEND) << 16
-		OCOM_    = uint32(gc.OCOM) << 16
-	)
-
-	a := obj.AXXX
-	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
-	default:
-		gc.Fatalf("optoas: no entry %v-%v", op, t)
-
-	case OADDR_ | gc.TPTR32:
-		a = x86.ALEAL
-
-	case OEQ_ | gc.TBOOL,
-		OEQ_ | gc.TINT8,
-		OEQ_ | gc.TUINT8,
-		OEQ_ | gc.TINT16,
-		OEQ_ | gc.TUINT16,
-		OEQ_ | gc.TINT32,
-		OEQ_ | gc.TUINT32,
-		OEQ_ | gc.TINT64,
-		OEQ_ | gc.TUINT64,
-		OEQ_ | gc.TPTR32,
-		OEQ_ | gc.TPTR64,
-		OEQ_ | gc.TFLOAT32,
-		OEQ_ | gc.TFLOAT64:
-		a = x86.AJEQ
-
-	case ONE_ | gc.TBOOL,
-		ONE_ | gc.TINT8,
-		ONE_ | gc.TUINT8,
-		ONE_ | gc.TINT16,
-		ONE_ | gc.TUINT16,
-		ONE_ | gc.TINT32,
-		ONE_ | gc.TUINT32,
-		ONE_ | gc.TINT64,
-		ONE_ | gc.TUINT64,
-		ONE_ | gc.TPTR32,
-		ONE_ | gc.TPTR64,
-		ONE_ | gc.TFLOAT32,
-		ONE_ | gc.TFLOAT64:
-		a = x86.AJNE
-
-	case OLT_ | gc.TINT8,
-		OLT_ | gc.TINT16,
-		OLT_ | gc.TINT32,
-		OLT_ | gc.TINT64:
-		a = x86.AJLT
-
-	case OLT_ | gc.TUINT8,
-		OLT_ | gc.TUINT16,
-		OLT_ | gc.TUINT32,
-		OLT_ | gc.TUINT64:
-		a = x86.AJCS
-
-	case OLE_ | gc.TINT8,
-		OLE_ | gc.TINT16,
-		OLE_ | gc.TINT32,
-		OLE_ | gc.TINT64:
-		a = x86.AJLE
-
-	case OLE_ | gc.TUINT8,
-		OLE_ | gc.TUINT16,
-		OLE_ | gc.TUINT32,
-		OLE_ | gc.TUINT64:
-		a = x86.AJLS
-
-	case OGT_ | gc.TINT8,
-		OGT_ | gc.TINT16,
-		OGT_ | gc.TINT32,
-		OGT_ | gc.TINT64:
-		a = x86.AJGT
-
-	case OGT_ | gc.TUINT8,
-		OGT_ | gc.TUINT16,
-		OGT_ | gc.TUINT32,
-		OGT_ | gc.TUINT64,
-		OLT_ | gc.TFLOAT32,
-		OLT_ | gc.TFLOAT64:
-		a = x86.AJHI
-
-	case OGE_ | gc.TINT8,
-		OGE_ | gc.TINT16,
-		OGE_ | gc.TINT32,
-		OGE_ | gc.TINT64:
-		a = x86.AJGE
-
-	case OGE_ | gc.TUINT8,
-		OGE_ | gc.TUINT16,
-		OGE_ | gc.TUINT32,
-		OGE_ | gc.TUINT64,
-		OLE_ | gc.TFLOAT32,
-		OLE_ | gc.TFLOAT64:
-		a = x86.AJCC
-
-	case OCMP_ | gc.TBOOL,
-		OCMP_ | gc.TINT8,
-		OCMP_ | gc.TUINT8:
-		a = x86.ACMPB
-
-	case OCMP_ | gc.TINT16,
-		OCMP_ | gc.TUINT16:
-		a = x86.ACMPW
-
-	case OCMP_ | gc.TINT32,
-		OCMP_ | gc.TUINT32,
-		OCMP_ | gc.TPTR32:
-		a = x86.ACMPL
-
-	case OAS_ | gc.TBOOL,
-		OAS_ | gc.TINT8,
-		OAS_ | gc.TUINT8:
-		a = x86.AMOVB
-
-	case OAS_ | gc.TINT16,
-		OAS_ | gc.TUINT16:
-		a = x86.AMOVW
-
-	case OAS_ | gc.TINT32,
-		OAS_ | gc.TUINT32,
-		OAS_ | gc.TPTR32:
-		a = x86.AMOVL
-
-	case OAS_ | gc.TFLOAT32:
-		a = x86.AMOVSS
-
-	case OAS_ | gc.TFLOAT64:
-		a = x86.AMOVSD
-
-	case OADD_ | gc.TINT8,
-		OADD_ | gc.TUINT8:
-		a = x86.AADDB
-
-	case OADD_ | gc.TINT16,
-		OADD_ | gc.TUINT16:
-		a = x86.AADDW
-
-	case OADD_ | gc.TINT32,
-		OADD_ | gc.TUINT32,
-		OADD_ | gc.TPTR32:
-		a = x86.AADDL
-
-	case OSUB_ | gc.TINT8,
-		OSUB_ | gc.TUINT8:
-		a = x86.ASUBB
-
-	case OSUB_ | gc.TINT16,
-		OSUB_ | gc.TUINT16:
-		a = x86.ASUBW
-
-	case OSUB_ | gc.TINT32,
-		OSUB_ | gc.TUINT32,
-		OSUB_ | gc.TPTR32:
-		a = x86.ASUBL
-
-	case OINC_ | gc.TINT8,
-		OINC_ | gc.TUINT8:
-		a = x86.AINCB
-
-	case OINC_ | gc.TINT16,
-		OINC_ | gc.TUINT16:
-		a = x86.AINCW
-
-	case OINC_ | gc.TINT32,
-		OINC_ | gc.TUINT32,
-		OINC_ | gc.TPTR32:
-		a = x86.AINCL
-
-	case ODEC_ | gc.TINT8,
-		ODEC_ | gc.TUINT8:
-		a = x86.ADECB
-
-	case ODEC_ | gc.TINT16,
-		ODEC_ | gc.TUINT16:
-		a = x86.ADECW
-
-	case ODEC_ | gc.TINT32,
-		ODEC_ | gc.TUINT32,
-		ODEC_ | gc.TPTR32:
-		a = x86.ADECL
-
-	case OCOM_ | gc.TINT8,
-		OCOM_ | gc.TUINT8:
-		a = x86.ANOTB
-
-	case OCOM_ | gc.TINT16,
-		OCOM_ | gc.TUINT16:
-		a = x86.ANOTW
-
-	case OCOM_ | gc.TINT32,
-		OCOM_ | gc.TUINT32,
-		OCOM_ | gc.TPTR32:
-		a = x86.ANOTL
-
-	case OMINUS_ | gc.TINT8,
-		OMINUS_ | gc.TUINT8:
-		a = x86.ANEGB
-
-	case OMINUS_ | gc.TINT16,
-		OMINUS_ | gc.TUINT16:
-		a = x86.ANEGW
-
-	case OMINUS_ | gc.TINT32,
-		OMINUS_ | gc.TUINT32,
-		OMINUS_ | gc.TPTR32:
-		a = x86.ANEGL
-
-	case OAND_ | gc.TINT8,
-		OAND_ | gc.TUINT8:
-		a = x86.AANDB
-
-	case OAND_ | gc.TINT16,
-		OAND_ | gc.TUINT16:
-		a = x86.AANDW
-
-	case OAND_ | gc.TINT32,
-		OAND_ | gc.TUINT32,
-		OAND_ | gc.TPTR32:
-		a = x86.AANDL
-
-	case OOR_ | gc.TINT8,
-		OOR_ | gc.TUINT8:
-		a = x86.AORB
-
-	case OOR_ | gc.TINT16,
-		OOR_ | gc.TUINT16:
-		a = x86.AORW
-
-	case OOR_ | gc.TINT32,
-		OOR_ | gc.TUINT32,
-		OOR_ | gc.TPTR32:
-		a = x86.AORL
-
-	case OXOR_ | gc.TINT8,
-		OXOR_ | gc.TUINT8:
-		a = x86.AXORB
-
-	case OXOR_ | gc.TINT16,
-		OXOR_ | gc.TUINT16:
-		a = x86.AXORW
-
-	case OXOR_ | gc.TINT32,
-		OXOR_ | gc.TUINT32,
-		OXOR_ | gc.TPTR32:
-		a = x86.AXORL
-
-	case OLROT_ | gc.TINT8,
-		OLROT_ | gc.TUINT8:
-		a = x86.AROLB
-
-	case OLROT_ | gc.TINT16,
-		OLROT_ | gc.TUINT16:
-		a = x86.AROLW
-
-	case OLROT_ | gc.TINT32,
-		OLROT_ | gc.TUINT32,
-		OLROT_ | gc.TPTR32:
-		a = x86.AROLL
-
-	case OLSH_ | gc.TINT8,
-		OLSH_ | gc.TUINT8:
-		a = x86.ASHLB
-
-	case OLSH_ | gc.TINT16,
-		OLSH_ | gc.TUINT16:
-		a = x86.ASHLW
-
-	case OLSH_ | gc.TINT32,
-		OLSH_ | gc.TUINT32,
-		OLSH_ | gc.TPTR32:
-		a = x86.ASHLL
-
-	case ORSH_ | gc.TUINT8:
-		a = x86.ASHRB
-
-	case ORSH_ | gc.TUINT16:
-		a = x86.ASHRW
-
-	case ORSH_ | gc.TUINT32,
-		ORSH_ | gc.TPTR32:
-		a = x86.ASHRL
-
-	case ORSH_ | gc.TINT8:
-		a = x86.ASARB
-
-	case ORSH_ | gc.TINT16:
-		a = x86.ASARW
-
-	case ORSH_ | gc.TINT32:
-		a = x86.ASARL
-
-	case OHMUL_ | gc.TINT8,
-		OMUL_ | gc.TINT8,
-		OMUL_ | gc.TUINT8:
-		a = x86.AIMULB
-
-	case OHMUL_ | gc.TINT16,
-		OMUL_ | gc.TINT16,
-		OMUL_ | gc.TUINT16:
-		a = x86.AIMULW
-
-	case OHMUL_ | gc.TINT32,
-		OMUL_ | gc.TINT32,
-		OMUL_ | gc.TUINT32,
-		OMUL_ | gc.TPTR32:
-		a = x86.AIMULL
-
-	case OHMUL_ | gc.TUINT8:
-		a = x86.AMULB
-
-	case OHMUL_ | gc.TUINT16:
-		a = x86.AMULW
-
-	case OHMUL_ | gc.TUINT32,
-		OHMUL_ | gc.TPTR32:
-		a = x86.AMULL
-
-	case ODIV_ | gc.TINT8,
-		OMOD_ | gc.TINT8:
-		a = x86.AIDIVB
-
-	case ODIV_ | gc.TUINT8,
-		OMOD_ | gc.TUINT8:
-		a = x86.ADIVB
-
-	case ODIV_ | gc.TINT16,
-		OMOD_ | gc.TINT16:
-		a = x86.AIDIVW
-
-	case ODIV_ | gc.TUINT16,
-		OMOD_ | gc.TUINT16:
-		a = x86.ADIVW
-
-	case ODIV_ | gc.TINT32,
-		OMOD_ | gc.TINT32:
-		a = x86.AIDIVL
-
-	case ODIV_ | gc.TUINT32,
-		ODIV_ | gc.TPTR32,
-		OMOD_ | gc.TUINT32,
-		OMOD_ | gc.TPTR32:
-		a = x86.ADIVL
-
-	case OEXTEND_ | gc.TINT16:
-		a = x86.ACWD
-
-	case OEXTEND_ | gc.TINT32:
-		a = x86.ACDQ
-	}
-
-	return a
-}
-
-func foptoas(op gc.Op, t *gc.Type, flg int) obj.As {
-	a := obj.AXXX
-	et := gc.Simtype[t.Etype]
-
-	// avoid constant conversions in switches below
-	const (
-		OCMP_   = uint32(gc.OCMP) << 16
-		OAS_    = uint32(gc.OAS) << 16
-		OADD_   = uint32(gc.OADD) << 16
-		OSUB_   = uint32(gc.OSUB) << 16
-		OMUL_   = uint32(gc.OMUL) << 16
-		ODIV_   = uint32(gc.ODIV) << 16
-		OMINUS_ = uint32(gc.OMINUS) << 16
-	)
-
-	if !gc.Thearch.Use387 {
-		switch uint32(op)<<16 | uint32(et) {
-		default:
-			gc.Fatalf("foptoas-sse: no entry %v-%v", op, t)
-
-		case OCMP_ | gc.TFLOAT32:
-			a = x86.AUCOMISS
-
-		case OCMP_ | gc.TFLOAT64:
-			a = x86.AUCOMISD
-
-		case OAS_ | gc.TFLOAT32:
-			a = x86.AMOVSS
-
-		case OAS_ | gc.TFLOAT64:
-			a = x86.AMOVSD
-
-		case OADD_ | gc.TFLOAT32:
-			a = x86.AADDSS
-
-		case OADD_ | gc.TFLOAT64:
-			a = x86.AADDSD
-
-		case OSUB_ | gc.TFLOAT32:
-			a = x86.ASUBSS
-
-		case OSUB_ | gc.TFLOAT64:
-			a = x86.ASUBSD
-
-		case OMUL_ | gc.TFLOAT32:
-			a = x86.AMULSS
-
-		case OMUL_ | gc.TFLOAT64:
-			a = x86.AMULSD
-
-		case ODIV_ | gc.TFLOAT32:
-			a = x86.ADIVSS
-
-		case ODIV_ | gc.TFLOAT64:
-			a = x86.ADIVSD
-		}
-
-		return a
-	}
-
-	// If we need Fpop, it means we're working on
-	// two different floating-point registers, not memory.
-	// There the instruction only has a float64 form.
-	if flg&Fpop != 0 {
-		et = gc.TFLOAT64
-	}
-
-	// clear Frev if unneeded
-	switch op {
-	case gc.OADD,
-		gc.OMUL:
-		flg &^= Frev
-	}
-
-	switch uint32(op)<<16 | (uint32(et)<<8 | uint32(flg)) {
-	case OADD_ | (gc.TFLOAT32<<8 | 0):
-		return x86.AFADDF
-
-	case OADD_ | (gc.TFLOAT64<<8 | 0):
-		return x86.AFADDD
-
-	case OADD_ | (gc.TFLOAT64<<8 | Fpop):
-		return x86.AFADDDP
-
-	case OSUB_ | (gc.TFLOAT32<<8 | 0):
-		return x86.AFSUBF
-
-	case OSUB_ | (gc.TFLOAT32<<8 | Frev):
-		return x86.AFSUBRF
-
-	case OSUB_ | (gc.TFLOAT64<<8 | 0):
-		return x86.AFSUBD
-
-	case OSUB_ | (gc.TFLOAT64<<8 | Frev):
-		return x86.AFSUBRD
-
-	case OSUB_ | (gc.TFLOAT64<<8 | Fpop):
-		return x86.AFSUBDP
-
-	case OSUB_ | (gc.TFLOAT64<<8 | (Fpop | Frev)):
-		return x86.AFSUBRDP
-
-	case OMUL_ | (gc.TFLOAT32<<8 | 0):
-		return x86.AFMULF
-
-	case OMUL_ | (gc.TFLOAT64<<8 | 0):
-		return x86.AFMULD
-
-	case OMUL_ | (gc.TFLOAT64<<8 | Fpop):
-		return x86.AFMULDP
-
-	case ODIV_ | (gc.TFLOAT32<<8 | 0):
-		return x86.AFDIVF
-
-	case ODIV_ | (gc.TFLOAT32<<8 | Frev):
-		return x86.AFDIVRF
-
-	case ODIV_ | (gc.TFLOAT64<<8 | 0):
-		return x86.AFDIVD
-
-	case ODIV_ | (gc.TFLOAT64<<8 | Frev):
-		return x86.AFDIVRD
-
-	case ODIV_ | (gc.TFLOAT64<<8 | Fpop):
-		return x86.AFDIVDP
-
-	case ODIV_ | (gc.TFLOAT64<<8 | (Fpop | Frev)):
-		return x86.AFDIVRDP
-
-	case OCMP_ | (gc.TFLOAT32<<8 | 0):
-		return x86.AFCOMF
-
-	case OCMP_ | (gc.TFLOAT32<<8 | Fpop):
-		return x86.AFCOMFP
-
-	case OCMP_ | (gc.TFLOAT64<<8 | 0):
-		return x86.AFCOMD
-
-	case OCMP_ | (gc.TFLOAT64<<8 | Fpop):
-		return x86.AFCOMDP
-
-	case OCMP_ | (gc.TFLOAT64<<8 | Fpop2):
-		return x86.AFCOMDPP
-
-	case OMINUS_ | (gc.TFLOAT32<<8 | 0):
-		return x86.AFCHS
-
-	case OMINUS_ | (gc.TFLOAT64<<8 | 0):
-		return x86.AFCHS
-	}
-
-	gc.Fatalf("foptoas %v %v %#x", op, t, flg)
-	return 0
-}
-
 var resvd = []int{
 	//	REG_DI,	// for movstring
 	//	REG_SI,	// for movstring
@@ -601,1129 +47,6 @@
 	x86.REG_SP, // for stack
 }
 
-/*
- * generate
- *	as $c, reg
- */
-func gconreg(as obj.As, c int64, reg int) {
-	var n1 gc.Node
-	var n2 gc.Node
-
-	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
-	gc.Nodreg(&n2, gc.Types[gc.TINT64], reg)
-	gins(as, &n1, &n2)
-}
-
-/*
- * generate
- *	as $c, n
- */
-func ginscon(as obj.As, c int64, n2 *gc.Node) {
-	var n1 gc.Node
-	gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
-	gins(as, &n1, n2)
-}
-
-func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
-	if t.IsInteger() || t.Etype == gc.Tptr {
-		if (n1.Op == gc.OLITERAL || n1.Op == gc.OADDR && n1.Left.Op == gc.ONAME) && n2.Op != gc.OLITERAL {
-			// Reverse comparison to place constant (including address constant) last.
-			op = gc.Brrev(op)
-			n1, n2 = n2, n1
-		}
-	}
-
-	// General case.
-	var r1, r2, g1, g2 gc.Node
-
-	// A special case to make write barriers more efficient.
-	// Comparing the first field of a named struct can be done directly.
-	base := n1
-	if n1.Op == gc.ODOT && n1.Left.Type.IsStruct() && n1.Left.Type.Field(0).Sym == n1.Sym {
-		base = n1.Left
-	}
-
-	if base.Op == gc.ONAME && base.Class != gc.PAUTOHEAP || n1.Op == gc.OINDREG {
-		r1 = *n1
-	} else {
-		gc.Regalloc(&r1, t, n1)
-		gc.Regalloc(&g1, n1.Type, &r1)
-		gc.Cgen(n1, &g1)
-		gmove(&g1, &r1)
-	}
-	if n2.Op == gc.OLITERAL && t.IsInteger() || n2.Op == gc.OADDR && n2.Left.Op == gc.ONAME && n2.Left.Class == gc.PEXTERN {
-		r2 = *n2
-	} else {
-		gc.Regalloc(&r2, t, n2)
-		gc.Regalloc(&g2, n1.Type, &r2)
-		gc.Cgen(n2, &g2)
-		gmove(&g2, &r2)
-	}
-	gins(optoas(gc.OCMP, t), &r1, &r2)
-	if r1.Op == gc.OREGISTER {
-		gc.Regfree(&g1)
-		gc.Regfree(&r1)
-	}
-	if r2.Op == gc.OREGISTER {
-		gc.Regfree(&g2)
-		gc.Regfree(&r2)
-	}
-	return gc.Gbranch(optoas(op, t), nil, likely)
-}
-
-/*
- * swap node contents
- */
-func nswap(a *gc.Node, b *gc.Node) {
-	t := *a
-	*a = *b
-	*b = t
-}
-
-/*
- * return constant i node.
- * overwritten by next call, but useful in calls to gins.
- */
-
-var ncon_n gc.Node
-
-func ncon(i uint32) *gc.Node {
-	if ncon_n.Type == nil {
-		gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
-	}
-	ncon_n.SetInt(int64(i))
-	return &ncon_n
-}
-
-var sclean [10]gc.Node
-
-var nsclean int
-
-/*
- * n is a 64-bit value.  fill in lo and hi to refer to its 32-bit halves.
- */
-func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
-	if !gc.Is64(n.Type) {
-		gc.Fatalf("split64 %v", n.Type)
-	}
-
-	if nsclean >= len(sclean) {
-		gc.Fatalf("split64 clean")
-	}
-	sclean[nsclean].Op = gc.OEMPTY
-	nsclean++
-	switch n.Op {
-	default:
-		switch n.Op {
-		default:
-			var n1 gc.Node
-			if !dotaddable(n, &n1) {
-				gc.Igen(n, &n1, nil)
-				sclean[nsclean-1] = n1
-			}
-
-			n = &n1
-
-		case gc.ONAME, gc.OINDREG:
-			// nothing
-		}
-
-		*lo = *n
-		*hi = *n
-		lo.Type = gc.Types[gc.TUINT32]
-		if n.Type.Etype == gc.TINT64 {
-			hi.Type = gc.Types[gc.TINT32]
-		} else {
-			hi.Type = gc.Types[gc.TUINT32]
-		}
-		hi.Xoffset += 4
-
-	case gc.OLITERAL:
-		var n1 gc.Node
-		n.Convconst(&n1, n.Type)
-		i := n1.Int64()
-		gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
-		i >>= 32
-		if n.Type.Etype == gc.TINT64 {
-			gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
-		} else {
-			gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
-		}
-	}
-}
-
-func splitclean() {
-	if nsclean <= 0 {
-		gc.Fatalf("splitclean")
-	}
-	nsclean--
-	if sclean[nsclean].Op != gc.OEMPTY {
-		gc.Regfree(&sclean[nsclean])
-	}
-}
-
-// set up nodes representing fp constants
-var (
-	zerof        gc.Node
-	two63f       gc.Node
-	two64f       gc.Node
-	bignodes_did bool
-)
-
-func bignodes() {
-	if bignodes_did {
-		return
-	}
-	bignodes_did = true
-
-	gc.Nodconst(&zerof, gc.Types[gc.TINT64], 0)
-	zerof.Convconst(&zerof, gc.Types[gc.TFLOAT64])
-
-	var i big.Int
-	i.SetInt64(1)
-	i.Lsh(&i, 63)
-	var bigi gc.Node
-
-	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
-	bigi.SetBigInt(&i)
-	bigi.Convconst(&two63f, gc.Types[gc.TFLOAT64])
-
-	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
-	i.Lsh(&i, 1)
-	bigi.SetBigInt(&i)
-	bigi.Convconst(&two64f, gc.Types[gc.TFLOAT64])
-}
-
-func memname(n *gc.Node, t *gc.Type) {
-	gc.Tempname(n, t)
-	n.Sym = gc.Lookup("." + n.Sym.Name[1:]) // keep optimizer from registerizing
-	n.Orig.Sym = n.Sym
-}
-
-func gmove(f *gc.Node, t *gc.Node) {
-	if gc.Debug['M'] != 0 {
-		fmt.Printf("gmove %v -> %v\n", f, t)
-	}
-
-	ft := gc.Simsimtype(f.Type)
-	tt := gc.Simsimtype(t.Type)
-	cvt := t.Type
-
-	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
-		gc.Complexmove(f, t)
-		return
-	}
-
-	if gc.Isfloat[ft] || gc.Isfloat[tt] {
-		floatmove(f, t)
-		return
-	}
-
-	// cannot have two integer memory operands;
-	// except 64-bit, which always copies via registers anyway.
-	var r1 gc.Node
-	var a obj.As
-	if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
-		goto hard
-	}
-
-	// convert constant to desired type
-	if f.Op == gc.OLITERAL {
-		var con gc.Node
-		f.Convconst(&con, t.Type)
-		f = &con
-		ft = gc.Simsimtype(con.Type)
-	}
-
-	// value -> value copy, only one memory operand.
-	// figure out the instruction to use.
-	// break out of switch for one-instruction gins.
-	// goto rdst for "destination must be register".
-	// goto hard for "convert to cvt type first".
-	// otherwise handle and return.
-
-	switch uint32(ft)<<16 | uint32(tt) {
-	default:
-		// should not happen
-		gc.Fatalf("gmove %v -> %v", f, t)
-		return
-
-		/*
-		 * integer copy and truncate
-		 */
-	case gc.TINT8<<16 | gc.TINT8, // same size
-		gc.TINT8<<16 | gc.TUINT8,
-		gc.TUINT8<<16 | gc.TINT8,
-		gc.TUINT8<<16 | gc.TUINT8:
-		a = x86.AMOVB
-
-	case gc.TINT16<<16 | gc.TINT8, // truncate
-		gc.TUINT16<<16 | gc.TINT8,
-		gc.TINT32<<16 | gc.TINT8,
-		gc.TUINT32<<16 | gc.TINT8,
-		gc.TINT16<<16 | gc.TUINT8,
-		gc.TUINT16<<16 | gc.TUINT8,
-		gc.TINT32<<16 | gc.TUINT8,
-		gc.TUINT32<<16 | gc.TUINT8:
-		a = x86.AMOVB
-
-		goto rsrc
-
-	case gc.TINT64<<16 | gc.TINT8, // truncate low word
-		gc.TUINT64<<16 | gc.TINT8,
-		gc.TINT64<<16 | gc.TUINT8,
-		gc.TUINT64<<16 | gc.TUINT8:
-		var flo gc.Node
-		var fhi gc.Node
-		split64(f, &flo, &fhi)
-
-		var r1 gc.Node
-		gc.Nodreg(&r1, t.Type, x86.REG_AX)
-		gmove(&flo, &r1)
-		gins(x86.AMOVB, &r1, t)
-		splitclean()
-		return
-
-	case gc.TINT16<<16 | gc.TINT16, // same size
-		gc.TINT16<<16 | gc.TUINT16,
-		gc.TUINT16<<16 | gc.TINT16,
-		gc.TUINT16<<16 | gc.TUINT16:
-		a = x86.AMOVW
-
-	case gc.TINT32<<16 | gc.TINT16, // truncate
-		gc.TUINT32<<16 | gc.TINT16,
-		gc.TINT32<<16 | gc.TUINT16,
-		gc.TUINT32<<16 | gc.TUINT16:
-		a = x86.AMOVW
-
-		goto rsrc
-
-	case gc.TINT64<<16 | gc.TINT16, // truncate low word
-		gc.TUINT64<<16 | gc.TINT16,
-		gc.TINT64<<16 | gc.TUINT16,
-		gc.TUINT64<<16 | gc.TUINT16:
-		var flo gc.Node
-		var fhi gc.Node
-		split64(f, &flo, &fhi)
-
-		var r1 gc.Node
-		gc.Nodreg(&r1, t.Type, x86.REG_AX)
-		gmove(&flo, &r1)
-		gins(x86.AMOVW, &r1, t)
-		splitclean()
-		return
-
-	case gc.TINT32<<16 | gc.TINT32, // same size
-		gc.TINT32<<16 | gc.TUINT32,
-		gc.TUINT32<<16 | gc.TINT32,
-		gc.TUINT32<<16 | gc.TUINT32:
-		a = x86.AMOVL
-
-	case gc.TINT64<<16 | gc.TINT32, // truncate
-		gc.TUINT64<<16 | gc.TINT32,
-		gc.TINT64<<16 | gc.TUINT32,
-		gc.TUINT64<<16 | gc.TUINT32:
-		var fhi gc.Node
-		var flo gc.Node
-		split64(f, &flo, &fhi)
-
-		var r1 gc.Node
-		gc.Nodreg(&r1, t.Type, x86.REG_AX)
-		gmove(&flo, &r1)
-		gins(x86.AMOVL, &r1, t)
-		splitclean()
-		return
-
-	case gc.TINT64<<16 | gc.TINT64, // same size
-		gc.TINT64<<16 | gc.TUINT64,
-		gc.TUINT64<<16 | gc.TINT64,
-		gc.TUINT64<<16 | gc.TUINT64:
-		var fhi gc.Node
-		var flo gc.Node
-		split64(f, &flo, &fhi)
-
-		var tlo gc.Node
-		var thi gc.Node
-		split64(t, &tlo, &thi)
-		if f.Op == gc.OLITERAL {
-			gins(x86.AMOVL, &flo, &tlo)
-			gins(x86.AMOVL, &fhi, &thi)
-		} else {
-			// Implementation of conversion-free x = y for int64 or uint64 x.
-			// This is generated by the code that copies small values out of closures,
-			// and that code has DX live, so avoid DX and just use AX twice.
-			var r1 gc.Node
-			gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX)
-			gins(x86.AMOVL, &flo, &r1)
-			gins(x86.AMOVL, &r1, &tlo)
-			gins(x86.AMOVL, &fhi, &r1)
-			gins(x86.AMOVL, &r1, &thi)
-		}
-
-		splitclean()
-		splitclean()
-		return
-
-		/*
-		 * integer up-conversions
-		 */
-	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
-		gc.TINT8<<16 | gc.TUINT16:
-		a = x86.AMOVBWSX
-
-		goto rdst
-
-	case gc.TINT8<<16 | gc.TINT32,
-		gc.TINT8<<16 | gc.TUINT32:
-		a = x86.AMOVBLSX
-		goto rdst
-
-	case gc.TINT8<<16 | gc.TINT64, // convert via int32
-		gc.TINT8<<16 | gc.TUINT64:
-		cvt = gc.Types[gc.TINT32]
-
-		goto hard
-
-	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
-		gc.TUINT8<<16 | gc.TUINT16:
-		a = x86.AMOVBWZX
-
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TINT32,
-		gc.TUINT8<<16 | gc.TUINT32:
-		a = x86.AMOVBLZX
-		goto rdst
-
-	case gc.TUINT8<<16 | gc.TINT64, // convert via uint32
-		gc.TUINT8<<16 | gc.TUINT64:
-		cvt = gc.Types[gc.TUINT32]
-
-		goto hard
-
-	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
-		gc.TINT16<<16 | gc.TUINT32:
-		a = x86.AMOVWLSX
-
-		goto rdst
-
-	case gc.TINT16<<16 | gc.TINT64, // convert via int32
-		gc.TINT16<<16 | gc.TUINT64:
-		cvt = gc.Types[gc.TINT32]
-
-		goto hard
-
-	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
-		gc.TUINT16<<16 | gc.TUINT32:
-		a = x86.AMOVWLZX
-
-		goto rdst
-
-	case gc.TUINT16<<16 | gc.TINT64, // convert via uint32
-		gc.TUINT16<<16 | gc.TUINT64:
-		cvt = gc.Types[gc.TUINT32]
-
-		goto hard
-
-	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
-		gc.TINT32<<16 | gc.TUINT64:
-		var thi gc.Node
-		var tlo gc.Node
-		split64(t, &tlo, &thi)
-
-		var flo gc.Node
-		gc.Nodreg(&flo, tlo.Type, x86.REG_AX)
-		var fhi gc.Node
-		gc.Nodreg(&fhi, thi.Type, x86.REG_DX)
-		gmove(f, &flo)
-		gins(x86.ACDQ, nil, nil)
-		gins(x86.AMOVL, &flo, &tlo)
-		gins(x86.AMOVL, &fhi, &thi)
-		splitclean()
-		return
-
-	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
-		gc.TUINT32<<16 | gc.TUINT64:
-		var tlo gc.Node
-		var thi gc.Node
-		split64(t, &tlo, &thi)
-
-		gmove(f, &tlo)
-		gins(x86.AMOVL, ncon(0), &thi)
-		splitclean()
-		return
-	}
-
-	gins(a, f, t)
-	return
-
-	// requires register source
-rsrc:
-	gc.Regalloc(&r1, f.Type, t)
-
-	gmove(f, &r1)
-	gins(a, &r1, t)
-	gc.Regfree(&r1)
-	return
-
-	// requires register destination
-rdst:
-	{
-		gc.Regalloc(&r1, t.Type, t)
-
-		gins(a, f, &r1)
-		gmove(&r1, t)
-		gc.Regfree(&r1)
-		return
-	}
-
-	// requires register intermediate
-hard:
-	gc.Regalloc(&r1, cvt, t)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-}
-
-func floatmove(f *gc.Node, t *gc.Node) {
-	var r1 gc.Node
-
-	ft := gc.Simsimtype(f.Type)
-	tt := gc.Simsimtype(t.Type)
-	cvt := t.Type
-
-	// cannot have two floating point memory operands.
-	if gc.Isfloat[ft] && gc.Isfloat[tt] && gc.Ismem(f) && gc.Ismem(t) {
-		goto hard
-	}
-
-	// convert constant to desired type
-	if f.Op == gc.OLITERAL {
-		var con gc.Node
-		f.Convconst(&con, t.Type)
-		f = &con
-		ft = gc.Simsimtype(con.Type)
-
-		// some constants can't move directly to memory.
-		if gc.Ismem(t) {
-			// float constants come from memory.
-			if gc.Isfloat[tt] {
-				goto hard
-			}
-		}
-	}
-
-	// value -> value copy, only one memory operand.
-	// figure out the instruction to use.
-	// break out of switch for one-instruction gins.
-	// goto rdst for "destination must be register".
-	// goto hard for "convert to cvt type first".
-	// otherwise handle and return.
-
-	switch uint32(ft)<<16 | uint32(tt) {
-	default:
-		if gc.Thearch.Use387 {
-			floatmove_387(f, t)
-		} else {
-			floatmove_sse(f, t)
-		}
-		return
-
-		// float to very long integer.
-	case gc.TFLOAT32<<16 | gc.TINT64,
-		gc.TFLOAT64<<16 | gc.TINT64:
-		if f.Op == gc.OREGISTER {
-			cvt = f.Type
-			goto hardmem
-		}
-
-		var r1 gc.Node
-		gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0)
-		if ft == gc.TFLOAT32 {
-			gins(x86.AFMOVF, f, &r1)
-		} else {
-			gins(x86.AFMOVD, f, &r1)
-		}
-
-		// set round to zero mode during conversion
-		var t1 gc.Node
-		memname(&t1, gc.Types[gc.TUINT16])
-
-		var t2 gc.Node
-		memname(&t2, gc.Types[gc.TUINT16])
-		gins(x86.AFSTCW, nil, &t1)
-		gins(x86.AMOVW, ncon(0xf7f), &t2)
-		gins(x86.AFLDCW, &t2, nil)
-		if tt == gc.TINT16 {
-			gins(x86.AFMOVWP, &r1, t)
-		} else if tt == gc.TINT32 {
-			gins(x86.AFMOVLP, &r1, t)
-		} else {
-			gins(x86.AFMOVVP, &r1, t)
-		}
-		gins(x86.AFLDCW, &t1, nil)
-		return
-
-	case gc.TFLOAT32<<16 | gc.TUINT64,
-		gc.TFLOAT64<<16 | gc.TUINT64:
-		if !gc.Ismem(f) {
-			cvt = f.Type
-			goto hardmem
-		}
-
-		bignodes()
-		var f0 gc.Node
-		gc.Nodreg(&f0, gc.Types[ft], x86.REG_F0)
-		var f1 gc.Node
-		gc.Nodreg(&f1, gc.Types[ft], x86.REG_F0+1)
-		var ax gc.Node
-		gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX)
-
-		if ft == gc.TFLOAT32 {
-			gins(x86.AFMOVF, f, &f0)
-		} else {
-			gins(x86.AFMOVD, f, &f0)
-		}
-
-		// if 0 > v { answer = 0 }
-		gins(x86.AFMOVD, &zerof, &f0)
-		gins(x86.AFUCOMP, &f0, &f1)
-		gins(x86.AFSTSW, nil, &ax)
-		gins(x86.ASAHF, nil, nil)
-		p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
-
-		// if 1<<64 <= v { answer = 0 too }
-		gins(x86.AFMOVD, &two64f, &f0)
-
-		gins(x86.AFUCOMP, &f0, &f1)
-		gins(x86.AFSTSW, nil, &ax)
-		gins(x86.ASAHF, nil, nil)
-		p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
-		gc.Patch(p1, gc.Pc)
-		gins(x86.AFMOVVP, &f0, t) // don't care about t, but will pop the stack
-		var thi gc.Node
-		var tlo gc.Node
-		split64(t, &tlo, &thi)
-		gins(x86.AMOVL, ncon(0), &tlo)
-		gins(x86.AMOVL, ncon(0), &thi)
-		splitclean()
-		p1 = gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p2, gc.Pc)
-
-		// in range; algorithm is:
-		//	if small enough, use native float64 -> int64 conversion.
-		//	otherwise, subtract 2^63, convert, and add it back.
-
-		// set round to zero mode during conversion
-		var t1 gc.Node
-		memname(&t1, gc.Types[gc.TUINT16])
-
-		var t2 gc.Node
-		memname(&t2, gc.Types[gc.TUINT16])
-		gins(x86.AFSTCW, nil, &t1)
-		gins(x86.AMOVW, ncon(0xf7f), &t2)
-		gins(x86.AFLDCW, &t2, nil)
-
-		// actual work
-		gins(x86.AFMOVD, &two63f, &f0)
-
-		gins(x86.AFUCOMP, &f0, &f1)
-		gins(x86.AFSTSW, nil, &ax)
-		gins(x86.ASAHF, nil, nil)
-		p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0)
-		gins(x86.AFMOVVP, &f0, t)
-		p3 := gc.Gbranch(obj.AJMP, nil, 0)
-		gc.Patch(p2, gc.Pc)
-		gins(x86.AFMOVD, &two63f, &f0)
-		gins(x86.AFSUBDP, &f0, &f1)
-		gins(x86.AFMOVVP, &f0, t)
-		split64(t, &tlo, &thi)
-		gins(x86.AXORL, ncon(0x80000000), &thi) // + 2^63
-		gc.Patch(p3, gc.Pc)
-		splitclean()
-
-		// restore rounding mode
-		gins(x86.AFLDCW, &t1, nil)
-
-		gc.Patch(p1, gc.Pc)
-		return
-
-		/*
-		 * integer to float
-		 */
-	case gc.TINT64<<16 | gc.TFLOAT32,
-		gc.TINT64<<16 | gc.TFLOAT64:
-		if t.Op == gc.OREGISTER {
-			goto hardmem
-		}
-		var f0 gc.Node
-		gc.Nodreg(&f0, t.Type, x86.REG_F0)
-		gins(x86.AFMOVV, f, &f0)
-		if tt == gc.TFLOAT32 {
-			gins(x86.AFMOVFP, &f0, t)
-		} else {
-			gins(x86.AFMOVDP, &f0, t)
-		}
-		return
-
-		// algorithm is:
-	//	if small enough, use native int64 -> float64 conversion.
-	//	otherwise, halve (rounding to odd?), convert, and double.
-	case gc.TUINT64<<16 | gc.TFLOAT32,
-		gc.TUINT64<<16 | gc.TFLOAT64:
-		var ax gc.Node
-		gc.Nodreg(&ax, gc.Types[gc.TUINT32], x86.REG_AX)
-
-		var dx gc.Node
-		gc.Nodreg(&dx, gc.Types[gc.TUINT32], x86.REG_DX)
-		var cx gc.Node
-		gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX)
-		var t1 gc.Node
-		gc.Tempname(&t1, f.Type)
-		var tlo gc.Node
-		var thi gc.Node
-		split64(&t1, &tlo, &thi)
-		gmove(f, &t1)
-		gins(x86.ACMPL, &thi, ncon(0))
-		p1 := gc.Gbranch(x86.AJLT, nil, 0)
-
-		// native
-		var r1 gc.Node
-		gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0)
-
-		gins(x86.AFMOVV, &t1, &r1)
-		if tt == gc.TFLOAT32 {
-			gins(x86.AFMOVFP, &r1, t)
-		} else {
-			gins(x86.AFMOVDP, &r1, t)
-		}
-		p2 := gc.Gbranch(obj.AJMP, nil, 0)
-
-		// simulated
-		gc.Patch(p1, gc.Pc)
-
-		gmove(&tlo, &ax)
-		gmove(&thi, &dx)
-		p1 = gins(x86.ASHRL, ncon(1), &ax)
-		p1.From.Index = x86.REG_DX // double-width shift DX -> AX
-		p1.From.Scale = 0
-		gins(x86.AMOVL, ncon(0), &cx)
-		gins(x86.ASETCC, nil, &cx)
-		gins(x86.AORL, &cx, &ax)
-		gins(x86.ASHRL, ncon(1), &dx)
-		gmove(&dx, &thi)
-		gmove(&ax, &tlo)
-		gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0)
-		var r2 gc.Node
-		gc.Nodreg(&r2, gc.Types[tt], x86.REG_F0+1)
-		gins(x86.AFMOVV, &t1, &r1)
-		gins(x86.AFMOVD, &r1, &r1)
-		gins(x86.AFADDDP, &r1, &r2)
-		if tt == gc.TFLOAT32 {
-			gins(x86.AFMOVFP, &r1, t)
-		} else {
-			gins(x86.AFMOVDP, &r1, t)
-		}
-		gc.Patch(p2, gc.Pc)
-		splitclean()
-		return
-	}
-
-	// requires register intermediate
-hard:
-	gc.Regalloc(&r1, cvt, t)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-
-	// requires memory intermediate
-hardmem:
-	gc.Tempname(&r1, cvt)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	return
-}
-
-func floatmove_387(f *gc.Node, t *gc.Node) {
-	var r1 gc.Node
-	var a obj.As
-
-	ft := gc.Simsimtype(f.Type)
-	tt := gc.Simsimtype(t.Type)
-	cvt := t.Type
-
-	switch uint32(ft)<<16 | uint32(tt) {
-	default:
-		goto fatal
-
-		/*
-		* float to integer
-		 */
-	case gc.TFLOAT32<<16 | gc.TINT16,
-		gc.TFLOAT32<<16 | gc.TINT32,
-		gc.TFLOAT32<<16 | gc.TINT64,
-		gc.TFLOAT64<<16 | gc.TINT16,
-		gc.TFLOAT64<<16 | gc.TINT32,
-		gc.TFLOAT64<<16 | gc.TINT64:
-		if t.Op == gc.OREGISTER {
-			goto hardmem
-		}
-		var r1 gc.Node
-		gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0)
-		if f.Op != gc.OREGISTER {
-			if ft == gc.TFLOAT32 {
-				gins(x86.AFMOVF, f, &r1)
-			} else {
-				gins(x86.AFMOVD, f, &r1)
-			}
-		}
-
-		// set round to zero mode during conversion
-		var t1 gc.Node
-		memname(&t1, gc.Types[gc.TUINT16])
-
-		var t2 gc.Node
-		memname(&t2, gc.Types[gc.TUINT16])
-		gins(x86.AFSTCW, nil, &t1)
-		gins(x86.AMOVW, ncon(0xf7f), &t2)
-		gins(x86.AFLDCW, &t2, nil)
-		if tt == gc.TINT16 {
-			gins(x86.AFMOVWP, &r1, t)
-		} else if tt == gc.TINT32 {
-			gins(x86.AFMOVLP, &r1, t)
-		} else {
-			gins(x86.AFMOVVP, &r1, t)
-		}
-		gins(x86.AFLDCW, &t1, nil)
-		return
-
-		// convert via int32.
-	case gc.TFLOAT32<<16 | gc.TINT8,
-		gc.TFLOAT32<<16 | gc.TUINT16,
-		gc.TFLOAT32<<16 | gc.TUINT8,
-		gc.TFLOAT64<<16 | gc.TINT8,
-		gc.TFLOAT64<<16 | gc.TUINT16,
-		gc.TFLOAT64<<16 | gc.TUINT8:
-		var t1 gc.Node
-		gc.Tempname(&t1, gc.Types[gc.TINT32])
-
-		gmove(f, &t1)
-		switch tt {
-		default:
-			gc.Fatalf("gmove %v", t)
-
-		case gc.TINT8:
-			gins(x86.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
-			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
-			gins(x86.ACMPL, &t1, ncon(0x7f))
-			p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
-			p3 := gc.Gbranch(obj.AJMP, nil, 0)
-			gc.Patch(p1, gc.Pc)
-			gc.Patch(p2, gc.Pc)
-			gmove(ncon(-0x80&(1<<32-1)), &t1)
-			gc.Patch(p3, gc.Pc)
-			gmove(&t1, t)
-
-		case gc.TUINT8:
-			gins(x86.ATESTL, ncon(0xffffff00), &t1)
-			p1 := gc.Gbranch(x86.AJEQ, nil, +1)
-			gins(x86.AMOVL, ncon(0), &t1)
-			gc.Patch(p1, gc.Pc)
-			gmove(&t1, t)
-
-		case gc.TUINT16:
-			gins(x86.ATESTL, ncon(0xffff0000), &t1)
-			p1 := gc.Gbranch(x86.AJEQ, nil, +1)
-			gins(x86.AMOVL, ncon(0), &t1)
-			gc.Patch(p1, gc.Pc)
-			gmove(&t1, t)
-		}
-
-		return
-
-		// convert via int64.
-	case gc.TFLOAT32<<16 | gc.TUINT32,
-		gc.TFLOAT64<<16 | gc.TUINT32:
-		cvt = gc.Types[gc.TINT64]
-
-		goto hardmem
-
-		/*
-		 * integer to float
-		 */
-	case gc.TINT16<<16 | gc.TFLOAT32,
-		gc.TINT16<<16 | gc.TFLOAT64,
-		gc.TINT32<<16 | gc.TFLOAT32,
-		gc.TINT32<<16 | gc.TFLOAT64,
-		gc.TINT64<<16 | gc.TFLOAT32,
-		gc.TINT64<<16 | gc.TFLOAT64:
-		if t.Op != gc.OREGISTER {
-			goto hard
-		}
-		if f.Op == gc.OREGISTER {
-			cvt = f.Type
-			goto hardmem
-		}
-
-		switch ft {
-		case gc.TINT16:
-			a = x86.AFMOVW
-
-		case gc.TINT32:
-			a = x86.AFMOVL
-
-		default:
-			a = x86.AFMOVV
-		}
-
-		// convert via int32 memory
-	case gc.TINT8<<16 | gc.TFLOAT32,
-		gc.TINT8<<16 | gc.TFLOAT64,
-		gc.TUINT16<<16 | gc.TFLOAT32,
-		gc.TUINT16<<16 | gc.TFLOAT64,
-		gc.TUINT8<<16 | gc.TFLOAT32,
-		gc.TUINT8<<16 | gc.TFLOAT64:
-		cvt = gc.Types[gc.TINT32]
-
-		goto hardmem
-
-		// convert via int64 memory
-	case gc.TUINT32<<16 | gc.TFLOAT32,
-		gc.TUINT32<<16 | gc.TFLOAT64:
-		cvt = gc.Types[gc.TINT64]
-
-		goto hardmem
-
-		// The way the code generator uses floating-point
-	// registers, a move from F0 to F0 is intended as a no-op.
-	// On the x86, it's not: it pushes a second copy of F0
-	// on the floating point stack. So toss it away here.
-	// Also, F0 is the *only* register we ever evaluate
-	// into, so we should only see register/register as F0/F0.
-	/*
-	 * float to float
-	 */
-	case gc.TFLOAT32<<16 | gc.TFLOAT32,
-		gc.TFLOAT64<<16 | gc.TFLOAT64:
-		if gc.Ismem(f) && gc.Ismem(t) {
-			goto hard
-		}
-		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
-			if f.Reg != x86.REG_F0 || t.Reg != x86.REG_F0 {
-				goto fatal
-			}
-			return
-		}
-
-		a = x86.AFMOVF
-		if ft == gc.TFLOAT64 {
-			a = x86.AFMOVD
-		}
-		if gc.Ismem(t) {
-			if f.Op != gc.OREGISTER || f.Reg != x86.REG_F0 {
-				gc.Fatalf("gmove %v", f)
-			}
-			a = x86.AFMOVFP
-			if ft == gc.TFLOAT64 {
-				a = x86.AFMOVDP
-			}
-		}
-
-	case gc.TFLOAT32<<16 | gc.TFLOAT64:
-		if gc.Ismem(f) && gc.Ismem(t) {
-			goto hard
-		}
-		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
-			if f.Reg != x86.REG_F0 || t.Reg != x86.REG_F0 {
-				goto fatal
-			}
-			return
-		}
-
-		if f.Op == gc.OREGISTER {
-			gins(x86.AFMOVDP, f, t)
-		} else {
-			gins(x86.AFMOVF, f, t)
-		}
-		return
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT32:
-		if gc.Ismem(f) && gc.Ismem(t) {
-			goto hard
-		}
-		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
-			var r1 gc.Node
-			gc.Tempname(&r1, gc.Types[gc.TFLOAT32])
-			gins(x86.AFMOVFP, f, &r1)
-			gins(x86.AFMOVF, &r1, t)
-			return
-		}
-
-		if f.Op == gc.OREGISTER {
-			gins(x86.AFMOVFP, f, t)
-		} else {
-			gins(x86.AFMOVD, f, t)
-		}
-		return
-	}
-
-	gins(a, f, t)
-	return
-
-	// requires register intermediate
-hard:
-	gc.Regalloc(&r1, cvt, t)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-
-	// requires memory intermediate
-hardmem:
-	gc.Tempname(&r1, cvt)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	return
-
-	// should not happen
-fatal:
-	gc.Fatalf("gmove %L -> %L", f, t)
-
-	return
-}
-
-func floatmove_sse(f *gc.Node, t *gc.Node) {
-	var r1 gc.Node
-	var cvt *gc.Type
-	var a obj.As
-
-	ft := gc.Simsimtype(f.Type)
-	tt := gc.Simsimtype(t.Type)
-
-	switch uint32(ft)<<16 | uint32(tt) {
-	// should not happen
-	default:
-		gc.Fatalf("gmove %v -> %v", f, t)
-
-		return
-
-		// convert via int32.
-	/*
-	* float to integer
-	 */
-	case gc.TFLOAT32<<16 | gc.TINT16,
-		gc.TFLOAT32<<16 | gc.TINT8,
-		gc.TFLOAT32<<16 | gc.TUINT16,
-		gc.TFLOAT32<<16 | gc.TUINT8,
-		gc.TFLOAT64<<16 | gc.TINT16,
-		gc.TFLOAT64<<16 | gc.TINT8,
-		gc.TFLOAT64<<16 | gc.TUINT16,
-		gc.TFLOAT64<<16 | gc.TUINT8:
-		cvt = gc.Types[gc.TINT32]
-
-		goto hard
-
-		// convert via int64.
-	case gc.TFLOAT32<<16 | gc.TUINT32,
-		gc.TFLOAT64<<16 | gc.TUINT32:
-		cvt = gc.Types[gc.TINT64]
-
-		goto hardmem
-
-	case gc.TFLOAT32<<16 | gc.TINT32:
-		a = x86.ACVTTSS2SL
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TINT32:
-		a = x86.ACVTTSD2SL
-		goto rdst
-
-		// convert via int32 memory
-	/*
-	 * integer to float
-	 */
-	case gc.TINT8<<16 | gc.TFLOAT32,
-		gc.TINT8<<16 | gc.TFLOAT64,
-		gc.TINT16<<16 | gc.TFLOAT32,
-		gc.TINT16<<16 | gc.TFLOAT64,
-		gc.TUINT16<<16 | gc.TFLOAT32,
-		gc.TUINT16<<16 | gc.TFLOAT64,
-		gc.TUINT8<<16 | gc.TFLOAT32,
-		gc.TUINT8<<16 | gc.TFLOAT64:
-		cvt = gc.Types[gc.TINT32]
-
-		goto hard
-
-		// convert via int64 memory
-	case gc.TUINT32<<16 | gc.TFLOAT32,
-		gc.TUINT32<<16 | gc.TFLOAT64:
-		cvt = gc.Types[gc.TINT64]
-
-		goto hardmem
-
-	case gc.TINT32<<16 | gc.TFLOAT32:
-		a = x86.ACVTSL2SS
-		goto rdst
-
-	case gc.TINT32<<16 | gc.TFLOAT64:
-		a = x86.ACVTSL2SD
-		goto rdst
-
-		/*
-		 * float to float
-		 */
-	case gc.TFLOAT32<<16 | gc.TFLOAT32:
-		a = x86.AMOVSS
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT64:
-		a = x86.AMOVSD
-
-	case gc.TFLOAT32<<16 | gc.TFLOAT64:
-		a = x86.ACVTSS2SD
-		goto rdst
-
-	case gc.TFLOAT64<<16 | gc.TFLOAT32:
-		a = x86.ACVTSD2SS
-		goto rdst
-	}
-
-	gins(a, f, t)
-	return
-
-	// requires register intermediate
-hard:
-	gc.Regalloc(&r1, cvt, t)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-
-	// requires memory intermediate
-hardmem:
-	gc.Tempname(&r1, cvt)
-
-	gmove(f, &r1)
-	gmove(&r1, t)
-	return
-
-	// requires register destination
-rdst:
-	gc.Regalloc(&r1, t.Type, t)
-
-	gins(a, f, &r1)
-	gmove(&r1, t)
-	gc.Regfree(&r1)
-	return
-}
-
 func samaddr(f *gc.Node, t *gc.Node) bool {
 	if f.Op != t.Op {
 		return false
@@ -1816,29 +139,3 @@
 	gc.Nodreg(&reg, gc.Types[gc.TINT], x86.REG_AX)
 	gins(x86.AXCHGL, &reg, &reg)
 }
-
-func dotaddable(n *gc.Node, n1 *gc.Node) bool {
-	if n.Op != gc.ODOT {
-		return false
-	}
-
-	var oary [10]int64
-	var nn *gc.Node
-	o := gc.Dotoffset(n, oary[:], &nn)
-	if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 {
-		*n1 = *nn
-		n1.Type = n.Type
-		n1.Xoffset += oary[0]
-		return true
-	}
-
-	return false
-}
-
-func sudoclean() {
-}
-
-func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
-	*a = obj.Addr{}
-	return false
-}
diff --git a/src/cmd/compile/internal/x86/peep.go b/src/cmd/compile/internal/x86/peep.go
deleted file mode 100644
index 6a07fe2..0000000
--- a/src/cmd/compile/internal/x86/peep.go
+++ /dev/null
@@ -1,807 +0,0 @@
-// Derived from Inferno utils/6c/peep.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/peep.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package x86
-
-import (
-	"cmd/compile/internal/gc"
-	"cmd/internal/obj"
-	"cmd/internal/obj/x86"
-	"fmt"
-)
-
-const (
-	REGEXT      = 0
-	exregoffset = x86.REG_DI
-)
-
-var gactive uint32
-
-// do we need the carry bit
-func needc(p *obj.Prog) bool {
-	for p != nil {
-		if p.Info.Flags&gc.UseCarry != 0 {
-			return true
-		}
-		if p.Info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
-			return false
-		}
-		p = p.Link
-	}
-
-	return false
-}
-
-func rnops(r *gc.Flow) *gc.Flow {
-	if r != nil {
-		var p *obj.Prog
-		var r1 *gc.Flow
-		for {
-			p = r.Prog
-			if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
-				break
-			}
-			r1 = gc.Uniqs(r)
-			if r1 == nil {
-				break
-			}
-			r = r1
-		}
-	}
-
-	return r
-}
-
-func peep(firstp *obj.Prog) {
-	g := gc.Flowstart(firstp, nil)
-	if g == nil {
-		return
-	}
-	gactive = 0
-
-	// byte, word arithmetic elimination.
-	elimshortmov(g)
-
-	// constant propagation
-	// find MOV $con,R followed by
-	// another MOV $con,R without
-	// setting R in the interim
-	var p *obj.Prog
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		case x86.ALEAL:
-			if regtyp(&p.To) {
-				if p.From.Sym != nil {
-					if p.From.Index == x86.REG_NONE {
-						conprop(r)
-					}
-				}
-			}
-
-		case x86.AMOVB,
-			x86.AMOVW,
-			x86.AMOVL,
-			x86.AMOVSS,
-			x86.AMOVSD:
-			if regtyp(&p.To) {
-				if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
-					conprop(r)
-				}
-			}
-		}
-	}
-
-	var r1 *gc.Flow
-	var p1 *obj.Prog
-	var r *gc.Flow
-	var t int
-loop1:
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		gc.Dumpit("loop1", g.Start, 0)
-	}
-
-	t = 0
-	for r = g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		switch p.As {
-		case x86.AMOVL,
-			x86.AMOVSS,
-			x86.AMOVSD:
-			if regtyp(&p.To) {
-				if regtyp(&p.From) {
-					if copyprop(g, r) {
-						excise(r)
-						t++
-					} else if subprop(r) && copyprop(g, r) {
-						excise(r)
-						t++
-					}
-				}
-			}
-
-		case x86.AMOVBLZX,
-			x86.AMOVWLZX,
-			x86.AMOVBLSX,
-			x86.AMOVWLSX:
-			if regtyp(&p.To) {
-				r1 = rnops(gc.Uniqs(r))
-				if r1 != nil {
-					p1 = r1.Prog
-					if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
-						p1.As = x86.AMOVL
-						t++
-					}
-				}
-			}
-
-		case x86.AADDL,
-			x86.AADDW:
-			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
-				break
-			}
-			if p.From.Offset == -1 {
-				if p.As == x86.AADDL {
-					p.As = x86.ADECL
-				} else {
-					p.As = x86.ADECW
-				}
-				p.From = obj.Addr{}
-				break
-			}
-
-			if p.From.Offset == 1 {
-				if p.As == x86.AADDL {
-					p.As = x86.AINCL
-				} else {
-					p.As = x86.AINCW
-				}
-				p.From = obj.Addr{}
-				break
-			}
-
-		case x86.ASUBL,
-			x86.ASUBW:
-			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
-				break
-			}
-			if p.From.Offset == -1 {
-				if p.As == x86.ASUBL {
-					p.As = x86.AINCL
-				} else {
-					p.As = x86.AINCW
-				}
-				p.From = obj.Addr{}
-				break
-			}
-
-			if p.From.Offset == 1 {
-				if p.As == x86.ASUBL {
-					p.As = x86.ADECL
-				} else {
-					p.As = x86.ADECW
-				}
-				p.From = obj.Addr{}
-				break
-			}
-		}
-	}
-
-	if t != 0 {
-		goto loop1
-	}
-
-	// MOVSD removal.
-	// We never use packed registers, so a MOVSD between registers
-	// can be replaced by MOVAPD, which moves the pair of float64s
-	// instead of just the lower one. We only use the lower one, but
-	// the processor can do better if we do moves using both.
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		if p.As == x86.AMOVSD {
-			if regtyp(&p.From) {
-				if regtyp(&p.To) {
-					p.As = x86.AMOVAPD
-				}
-			}
-		}
-	}
-
-	gc.Flowend(g)
-}
-
-func excise(r *gc.Flow) {
-	p := r.Prog
-	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-		fmt.Printf("%v ===delete===\n", p)
-	}
-
-	obj.Nopout(p)
-
-	gc.Ostats.Ndelmov++
-}
-
-func regtyp(a *obj.Addr) bool {
-	if gc.Ctxt.Flag_shared && a.Type == obj.TYPE_REG && a.Reg == x86.REG_CX {
-		// don't propagate CX, it is used implicitly by PIC global references
-		return false
-	}
-	return a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_DI || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X7)
-}
-
-// movb elimination.
-// movb is simulated by the linker
-// when a register other than ax, bx, cx, dx
-// is used, so rewrite to other instructions
-// when possible.  a movb into a register
-// can smash the entire 64-bit register without
-// causing any trouble.
-func elimshortmov(g *gc.Graph) {
-	var p *obj.Prog
-
-	for r := g.Start; r != nil; r = r.Link {
-		p = r.Prog
-		if regtyp(&p.To) {
-			switch p.As {
-			case x86.AINCB,
-				x86.AINCW:
-				p.As = x86.AINCL
-
-			case x86.ADECB,
-				x86.ADECW:
-				p.As = x86.ADECL
-
-			case x86.ANEGB,
-				x86.ANEGW:
-				p.As = x86.ANEGL
-
-			case x86.ANOTB,
-				x86.ANOTW:
-				p.As = x86.ANOTL
-			}
-
-			if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
-				// move or arithmetic into partial register.
-				// from another register or constant can be movl.
-				// we don't switch to 32-bit arithmetic if it can
-				// change how the carry bit is set (and the carry bit is needed).
-				switch p.As {
-				case x86.AMOVB,
-					x86.AMOVW:
-					p.As = x86.AMOVL
-
-				case x86.AADDB,
-					x86.AADDW:
-					if !needc(p.Link) {
-						p.As = x86.AADDL
-					}
-
-				case x86.ASUBB,
-					x86.ASUBW:
-					if !needc(p.Link) {
-						p.As = x86.ASUBL
-					}
-
-				case x86.AMULB,
-					x86.AMULW:
-					p.As = x86.AMULL
-
-				case x86.AIMULB,
-					x86.AIMULW:
-					p.As = x86.AIMULL
-
-				case x86.AANDB,
-					x86.AANDW:
-					p.As = x86.AANDL
-
-				case x86.AORB,
-					x86.AORW:
-					p.As = x86.AORL
-
-				case x86.AXORB,
-					x86.AXORW:
-					p.As = x86.AXORL
-
-				case x86.ASHLB,
-					x86.ASHLW:
-					p.As = x86.ASHLL
-				}
-			} else {
-				// explicit zero extension
-				switch p.As {
-				case x86.AMOVB:
-					p.As = x86.AMOVBLZX
-
-				case x86.AMOVW:
-					p.As = x86.AMOVWLZX
-				}
-			}
-		}
-	}
-}
-
-/*
- * the idea is to substitute
- * one register for another
- * from one MOV to another
- *	MOV	a, R0
- *	ADD	b, R0	/ no use of R1
- *	MOV	R0, R1
- * would be converted to
- *	MOV	a, R1
- *	ADD	b, R1
- *	MOV	R1, R0
- * hopefully, then the former or latter MOV
- * will be eliminated by copy propagation.
- */
-func subprop(r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	if !regtyp(v1) {
-		return false
-	}
-	v2 := &p.To
-	if !regtyp(v2) {
-		return false
-	}
-	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
-		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
-			fmt.Printf("\t? %v\n", r.Prog)
-		}
-		if gc.Uniqs(r) == nil {
-			break
-		}
-		p = r.Prog
-		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
-			continue
-		}
-		if p.Info.Flags&gc.Call != 0 {
-			return false
-		}
-
-		if p.Info.Reguse|p.Info.Regset != 0 {
-			return false
-		}
-
-		if (p.Info.Flags&gc.Move != 0) && (p.Info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
-			copysub(&p.To, v1, v2, true)
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
-				if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
-					fmt.Printf(" excise")
-				}
-				fmt.Printf("\n")
-			}
-
-			for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
-				p = r.Prog
-				copysub(&p.From, v1, v2, true)
-				copysub(&p.To, v1, v2, true)
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("%v\n", r.Prog)
-				}
-			}
-
-			t := int(v1.Reg)
-			v1.Reg = v2.Reg
-			v2.Reg = int16(t)
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("%v last\n", r.Prog)
-			}
-			return true
-		}
-
-		if copyau(&p.From, v2) || copyau(&p.To, v2) {
-			break
-		}
-		if copysub(&p.From, v1, v2, false) || copysub(&p.To, v1, v2, false) {
-			break
-		}
-	}
-
-	return false
-}
-
-/*
- * The idea is to remove redundant copies.
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	use v2	return fail
- *	-----------------
- *	v1->v2	F=0
- *	(use v2	s/v2/v1/)*
- *	set v1	F=1
- *	set v2	return success
- */
-func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
-	p := r0.Prog
-	v1 := &p.From
-	v2 := &p.To
-	if copyas(v1, v2) {
-		return true
-	}
-	gactive++
-	return copy1(v1, v2, r0.S1, false)
-}
-
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f bool) bool {
-	if uint32(r.Active) == gactive {
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("act set; return 1\n")
-		}
-		return true
-	}
-
-	r.Active = int32(gactive)
-	if gc.Debug['P'] != 0 {
-		fmt.Printf("copy %v->%v f=%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
-	}
-	for ; r != nil; r = r.S1 {
-		p := r.Prog
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("%v", p)
-		}
-		if !f && gc.Uniqp(r) == nil {
-			f = true
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; merge; f=%v", f)
-			}
-		}
-
-		switch t := copyu(p, v2, nil); t {
-		case 2: /* rar, can't split */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
-			}
-			return false
-
-		case 3: /* set */
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
-			}
-			return true
-
-		case 1, /* used, substitute */
-			4: /* use and set */
-			if f {
-				if gc.Debug['P'] == 0 {
-					return false
-				}
-				if t == 4 {
-					fmt.Printf("; %v used+set and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				} else {
-					fmt.Printf("; %v used and f=%v; return 0\n", gc.Ctxt.Dconv(v2), f)
-				}
-				return false
-			}
-
-			if copyu(p, v2, v1) != 0 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; sub fail; return 0\n")
-				}
-				return false
-			}
-
-			if gc.Debug['P'] != 0 {
-				fmt.Printf("; sub %v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
-			}
-			if t == 4 {
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
-				}
-				return true
-			}
-		}
-
-		if !f {
-			t := copyu(p, v1, nil)
-			if t == 2 || t == 3 || t == 4 {
-				f = true
-				if gc.Debug['P'] != 0 {
-					fmt.Printf("; %v set and !f; f=%v", gc.Ctxt.Dconv(v1), f)
-				}
-			}
-		}
-
-		if gc.Debug['P'] != 0 {
-			fmt.Printf("\n")
-		}
-		if r.S2 != nil {
-			if !copy1(v1, v2, r.S2, f) {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-/*
- * return
- * 1 if v only used (and substitute),
- * 2 if read-alter-rewrite
- * 3 if set
- * 4 if set and used
- * 0 otherwise (not touched)
- */
-func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
-	switch p.As {
-	case obj.AJMP:
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.To, v) {
-			return 1
-		}
-		return 0
-
-	case obj.ARET:
-		if s != nil {
-			return 1
-		}
-		return 3
-
-	case obj.ACALL:
-		if REGEXT != 0 /*TypeKind(100016)*/ && v.Type == obj.TYPE_REG && v.Reg <= REGEXT && v.Reg > exregoffset {
-			return 2
-		}
-		if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
-			return 2
-		}
-		if v.Type == p.From.Type && v.Reg == p.From.Reg {
-			return 2
-		}
-
-		if s != nil {
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-
-		if copyau(&p.To, v) {
-			return 4
-		}
-		return 3
-
-	case obj.ATEXT:
-		if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
-			return 3
-		}
-		return 0
-	}
-
-	if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
-		return 0
-	}
-
-	if (p.Info.Reguse|p.Info.Regset)&RtoB(int(v.Reg)) != 0 {
-		return 2
-	}
-
-	if p.Info.Flags&gc.LeftAddr != 0 {
-		if copyas(&p.From, v) {
-			return 2
-		}
-	}
-
-	if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
-		if copyas(&p.To, v) {
-			return 2
-		}
-	}
-
-	if p.Info.Flags&gc.RightWrite != 0 {
-		if copyas(&p.To, v) {
-			if s != nil {
-				if copysub(&p.From, v, s, true) {
-					return 1
-				}
-				return 0
-			}
-			if copyau(&p.From, v) {
-				return 4
-			}
-			return 3
-		}
-	}
-
-	if p.Info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
-		if s != nil {
-			if copysub(&p.From, v, s, true) {
-				return 1
-			}
-			if copysub(&p.To, v, s, true) {
-				return 1
-			}
-			return 0
-		}
-		if copyau(&p.From, v) {
-			return 1
-		}
-		if copyau(&p.To, v) {
-			return 1
-		}
-	}
-	return 0
-}
-
-/*
- * direct reference,
- * could be set/use depending on
- * semantics
- */
-func copyas(a *obj.Addr, v *obj.Addr) bool {
-	if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_BL {
-		gc.Fatalf("use of byte register")
-	}
-	if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_BL {
-		gc.Fatalf("use of byte register")
-	}
-
-	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
-		return false
-	}
-	if regtyp(v) {
-		return true
-	}
-	if (v.Type == obj.TYPE_MEM || v.Type == obj.TYPE_ADDR) && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
-		if v.Offset == a.Offset {
-			return true
-		}
-	}
-	return false
-}
-
-func sameaddr(a *obj.Addr, v *obj.Addr) bool {
-	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
-		return false
-	}
-	if regtyp(v) {
-		return true
-	}
-	if (v.Type == obj.TYPE_MEM || v.Type == obj.TYPE_ADDR) && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
-		if v.Offset == a.Offset {
-			return true
-		}
-	}
-	return false
-}
-
-/*
- * either direct or indirect
- */
-func copyau(a *obj.Addr, v *obj.Addr) bool {
-	if copyas(a, v) {
-		return true
-	}
-	if regtyp(v) {
-		if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Reg == v.Reg {
-			return true
-		}
-		if a.Index == v.Reg {
-			return true
-		}
-	}
-
-	return false
-}
-
-// copysub substitute s for v in a.
-// copysub returns true on failure to substitute.
-// TODO(dfc) reverse this logic to return false on sunstitution failure.
-func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool {
-	if copyas(a, v) {
-		if s.Reg >= x86.REG_AX && s.Reg <= x86.REG_DI || s.Reg >= x86.REG_X0 && s.Reg <= x86.REG_X7 {
-			if f {
-				a.Reg = s.Reg
-			}
-		}
-		return false
-	}
-
-	if regtyp(v) {
-		if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Reg == v.Reg {
-			if (s.Reg == x86.REG_BP) && a.Index != x86.REG_NONE {
-				return true /* can't use BP-base with index */
-			}
-			if f {
-				a.Reg = s.Reg
-			}
-		}
-
-		if a.Index == v.Reg {
-			if f {
-				a.Index = s.Reg
-			}
-		}
-	}
-	return false
-}
-
-func conprop(r0 *gc.Flow) {
-	var p *obj.Prog
-
-	p0 := r0.Prog
-	v0 := &p0.To
-	r := r0
-
-loop:
-	r = gc.Uniqs(r)
-	if r == nil || r == r0 {
-		return
-	}
-	if gc.Uniqp(r) == nil {
-		return
-	}
-
-	p = r.Prog
-	switch copyu(p, v0, nil) {
-	case 0, // miss
-		1: // use
-		goto loop
-
-	case 2, // rar
-		4: // use and set
-		break
-
-	case 3: // set
-		if p.As == p0.As {
-			if p.From.Type == p0.From.Type {
-				if p.From.Reg == p0.From.Reg {
-					if p.From.Node == p0.From.Node {
-						if p.From.Offset == p0.From.Offset {
-							if p.From.Scale == p0.From.Scale {
-								if p.From.Type == obj.TYPE_FCONST && p.From.Val.(float64) == p0.From.Val.(float64) {
-									if p.From.Index == p0.From.Index {
-										excise(r)
-										goto loop
-									}
-								}
-							}
-						}
-					}
-				}
-			}
-		}
-	}
-}
-
-func smallindir(a *obj.Addr, reg *obj.Addr) bool {
-	return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096
-}
-
-func stackaddr(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP
-}
diff --git a/src/cmd/compile/internal/x86/reg.go b/src/cmd/compile/internal/x86/reg.go
index 2d7bd22..af9da3f 100644
--- a/src/cmd/compile/internal/x86/reg.go
+++ b/src/cmd/compile/internal/x86/reg.go
@@ -31,57 +31,6 @@
 package x86
 
 import "cmd/internal/obj/x86"
-import "cmd/compile/internal/gc"
-
-const (
-	NREGVAR = 16 /* 8 integer + 8 floating */
-)
-
-var regname = []string{
-	".ax",
-	".cx",
-	".dx",
-	".bx",
-	".sp",
-	".bp",
-	".si",
-	".di",
-	".x0",
-	".x1",
-	".x2",
-	".x3",
-	".x4",
-	".x5",
-	".x6",
-	".x7",
-}
-
-func regnames(n *int) []string {
-	*n = NREGVAR
-	return regname
-}
-
-func excludedregs() uint64 {
-	if gc.Ctxt.Flag_shared {
-		return RtoB(x86.REG_SP) | RtoB(x86.REG_CX)
-	} else {
-		return RtoB(x86.REG_SP)
-	}
-}
-
-func doregbits(r int) uint64 {
-	b := uint64(0)
-	if r >= x86.REG_AX && r <= x86.REG_DI {
-		b |= RtoB(r)
-	} else if r >= x86.REG_AL && r <= x86.REG_BL {
-		b |= RtoB(r - x86.REG_AL + x86.REG_AX)
-	} else if r >= x86.REG_AH && r <= x86.REG_BH {
-		b |= RtoB(r - x86.REG_AH + x86.REG_AX)
-	} else if r >= x86.REG_X0 && r <= x86.REG_X0+7 {
-		b |= FtoB(r)
-	}
-	return b
-}
 
 func RtoB(r int) uint64 {
 	if r < x86.REG_AX || r > x86.REG_DI {
@@ -89,26 +38,3 @@
 	}
 	return 1 << uint(r-x86.REG_AX)
 }
-
-func BtoR(b uint64) int {
-	b &= 0xff
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) + x86.REG_AX
-}
-
-func FtoB(f int) uint64 {
-	if f < x86.REG_X0 || f > x86.REG_X7 {
-		return 0
-	}
-	return 1 << uint(f-x86.REG_X0+8)
-}
-
-func BtoF(b uint64) int {
-	b &= 0xFF00
-	if b == 0 {
-		return 0
-	}
-	return gc.Bitno(b) - 8 + x86.REG_X0
-}