cmd/internal/obj: add As type for assembly opcodes

Passes toolstash/buildall.

Fixes #14692.

Change-Id: I4352678d8251309f2b8b7793674c550fac948006
Reviewed-on: https://go-review.googlesource.com/20350
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
index 376fdf9..3491bb9 100644
--- a/src/cmd/compile/internal/amd64/galign.go
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -28,10 +28,10 @@
 var MAXWIDTH int64 = 1 << 50
 
 var (
-	addptr int = x86.AADDQ
-	movptr int = x86.AMOVQ
-	leaptr int = x86.ALEAQ
-	cmpptr int = x86.ACMPQ
+	addptr = x86.AADDQ
+	movptr = x86.AMOVQ
+	leaptr = x86.ALEAQ
+	cmpptr = x86.ACMPQ
 )
 
 func betypeinit() {
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index a6706ee..9721616 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -166,10 +166,10 @@
 	return p
 }
 
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
 	q := gc.Ctxt.NewProg()
 	gc.Clearp(q)
-	q.As = int16(as)
+	q.As = as
 	q.Lineno = p.Lineno
 	q.From.Type = ftype
 	q.From.Reg = int16(freg)
@@ -747,7 +747,7 @@
 		p2.Lineno = p.Lineno
 		p1.Pc = 9999
 		p2.Pc = 9999
-		p.As = int16(cmpptr)
+		p.As = cmpptr
 		p.To.Type = obj.TYPE_CONST
 		p.To.Offset = 0
 		p1.As = x86.AJNE
diff --git a/src/cmd/compile/internal/amd64/gsubr.go b/src/cmd/compile/internal/amd64/gsubr.go
index d305076..4d99474 100644
--- a/src/cmd/compile/internal/amd64/gsubr.go
+++ b/src/cmd/compile/internal/amd64/gsubr.go
@@ -52,7 +52,7 @@
  * generate
  *	as $c, reg
  */
-func gconreg(as int, c int64, reg int) {
+func gconreg(as obj.As, c int64, reg int) {
 	var nr gc.Node
 
 	switch as {
@@ -72,7 +72,7 @@
  * generate
  *	as $c, n
  */
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
 	var n1 gc.Node
 
 	switch as {
@@ -144,7 +144,7 @@
 	return gc.Gbranch(optoas(op, t), nil, likely)
 }
 
-func ginsboolval(a int, n *gc.Node) {
+func ginsboolval(a obj.As, n *gc.Node) {
 	gins(jmptoset(a), nil, n)
 }
 
@@ -191,7 +191,7 @@
 	}
 
 	// cannot have two memory operands
-	var a int
+	var a obj.As
 	if gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
@@ -583,7 +583,7 @@
  * generate one instruction:
  *	as f, t
  */
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
 	//	Node nod;
 
 	//	if(f != N && f->op == OINDEX) {
@@ -681,7 +681,7 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
@@ -1229,7 +1229,7 @@
 }
 
 // jmptoset returns ASETxx for AJxx.
-func jmptoset(jmp int) int {
+func jmptoset(jmp obj.As) obj.As {
 	switch jmp {
 	case x86.AJEQ:
 		return x86.ASETEQ
@@ -1298,7 +1298,7 @@
  * after successful sudoaddable,
  * to release the register used for a.
  */
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
 	if n.Type == nil {
 		return false
 	}
diff --git a/src/cmd/compile/internal/arm/cgen.go b/src/cmd/compile/internal/arm/cgen.go
index 1eabbf4..c60df08 100644
--- a/src/cmd/compile/internal/arm/cgen.go
+++ b/src/cmd/compile/internal/arm/cgen.go
@@ -60,7 +60,7 @@
 	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
 	align := int(n.Type.Align)
 
-	var op int
+	var op obj.As
 	switch align {
 	default:
 		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
index ffe4f55..4d06354 100644
--- a/src/cmd/compile/internal/arm/ggen.go
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -95,10 +95,10 @@
 	return p
 }
 
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int32, ttype obj.AddrType, treg int, toffset int32) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int32, ttype obj.AddrType, treg int, toffset int32) *obj.Prog {
 	q := gc.Ctxt.NewProg()
 	gc.Clearp(q)
-	q.As = int16(as)
+	q.As = as
 	q.Lineno = p.Lineno
 	q.From.Type = ftype
 	q.From.Reg = int16(freg)
@@ -464,7 +464,7 @@
  * generate
  *	as $c, n
  */
-func ginscon(as int, c int64, n *gc.Node) {
+func ginscon(as obj.As, c int64, n *gc.Node) {
 	var n1 gc.Node
 	gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
 	var n2 gc.Node
diff --git a/src/cmd/compile/internal/arm/gsubr.go b/src/cmd/compile/internal/arm/gsubr.go
index 97ca0cd..a985633 100644
--- a/src/cmd/compile/internal/arm/gsubr.go
+++ b/src/cmd/compile/internal/arm/gsubr.go
@@ -149,7 +149,7 @@
 
 	// cannot have two memory operands;
 	// except 64-bit, which always copies via registers anyway.
-	var a int
+	var a obj.As
 	var r1 gc.Node
 	if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
@@ -636,7 +636,7 @@
  * generate one instruction:
  *	as f, t
  */
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
 	//	Node nod;
 	//	int32 v;
 
@@ -732,7 +732,7 @@
 /* generate a constant shift
  * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
  */
-func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
+func gshift(as obj.As, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
 	if sval <= 0 || sval > 32 {
 		gc.Fatalf("bad shift value: %d", sval)
 	}
@@ -747,7 +747,7 @@
 
 /* generate a register shift
  */
-func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
+func gregshift(as obj.As, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
 	p := gins(as, nil, rhs)
 	p.From.Type = obj.TYPE_SHIFT
 	p.From.Offset = int64(stype) | (int64(reg.Reg)&15)<<8 | 1<<4 | int64(lhs.Reg)&15
@@ -757,7 +757,7 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
@@ -1131,7 +1131,7 @@
  * after successful sudoaddable,
  * to release the register used for a.
  */
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
 	if n.Type == nil {
 		return false
 	}
diff --git a/src/cmd/compile/internal/arm/peep.go b/src/cmd/compile/internal/arm/peep.go
index 3f13b49..3638a98 100644
--- a/src/cmd/compile/internal/arm/peep.go
+++ b/src/cmd/compile/internal/arm/peep.go
@@ -543,7 +543,7 @@
 	}
 
 	if gc.Debug['P'] != 0 {
-		fmt.Printf(" => %v\n", obj.Aconv(int(p.As)))
+		fmt.Printf(" => %v\n", obj.Aconv(p.As))
 	}
 	return true
 }
@@ -1041,7 +1041,7 @@
 func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
 	switch p.As {
 	default:
-		fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+		fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
 		return 2
 
 	case arm.AMOVM:
@@ -1501,8 +1501,8 @@
 }
 
 var predinfo = []struct {
-	opcode    int
-	notopcode int
+	opcode    obj.As
+	notopcode obj.As
 	scond     int
 	notscond  int
 }{
@@ -1672,9 +1672,9 @@
 				excise(r)
 			} else {
 				if cond == Truecond {
-					r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].opcode)
+					r.Prog.As = predinfo[rstart.Prog.As-arm.ABEQ].opcode
 				} else {
-					r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].notopcode)
+					r.Prog.As = predinfo[rstart.Prog.As-arm.ABEQ].notopcode
 				}
 			}
 		} else if predicable(r.Prog) {
diff --git a/src/cmd/compile/internal/arm64/cgen.go b/src/cmd/compile/internal/arm64/cgen.go
index 4d0071c..87f3498 100644
--- a/src/cmd/compile/internal/arm64/cgen.go
+++ b/src/cmd/compile/internal/arm64/cgen.go
@@ -17,7 +17,7 @@
 	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
 	align := int(n.Type.Align)
 
-	var op int
+	var op obj.As
 	switch align {
 	default:
 		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
index 9e1149d..6e74905 100644
--- a/src/cmd/compile/internal/arm64/ggen.go
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -106,10 +106,10 @@
 	return p
 }
 
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
 	q := gc.Ctxt.NewProg()
 	gc.Clearp(q)
-	q.As = int16(as)
+	q.As = as
 	q.Lineno = p.Lineno
 	q.From.Type = ftype
 	q.From.Reg = int16(freg)
diff --git a/src/cmd/compile/internal/arm64/gsubr.go b/src/cmd/compile/internal/arm64/gsubr.go
index 86d6530..73be9c6 100644
--- a/src/cmd/compile/internal/arm64/gsubr.go
+++ b/src/cmd/compile/internal/arm64/gsubr.go
@@ -53,7 +53,7 @@
  * generate
  *	as $c, n
  */
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
 	var n1 gc.Node
 
 	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
@@ -77,7 +77,7 @@
  * generate
  *	as n, $c (CMP)
  */
-func ginscon2(as int, n2 *gc.Node, c int64) {
+func ginscon2(as obj.As, n2 *gc.Node, c int64) {
 	var n1 gc.Node
 
 	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
@@ -151,7 +151,7 @@
 
 	// cannot have two memory operands
 	var r1 gc.Node
-	var a int
+	var a obj.As
 	if gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
@@ -470,7 +470,7 @@
 // gins is called by the front end.
 // It synthesizes some multiple-instruction sequences
 // so the front end can stay simpler.
-func gins(as int, f, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f, t *gc.Node) *obj.Prog {
 	if as >= obj.A_ARCHSPECIFIC {
 		if x, ok := f.IntLiteral(); ok {
 			ginscon(as, x, t)
@@ -490,7 +490,7 @@
  * generate one instruction:
  *	as f, t
  */
-func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
 	// TODO(austin): Add self-move test like in 6g (but be careful
 	// of truncation moves)
 
@@ -577,7 +577,7 @@
 	}
 }
 
-func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
+func gcmp(as obj.As, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
 	if lhs.Op != gc.OREGISTER {
 		gc.Fatalf("bad operands to gcmp: %v %v", gc.Oconv(lhs.Op, 0), gc.Oconv(rhs.Op, 0))
 	}
@@ -590,7 +590,7 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
@@ -619,7 +619,7 @@
 		OSQRT_  = uint32(gc.OSQRT) << 16
 	)
 
-	a := int(obj.AXXX)
+	a := obj.AXXX
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
@@ -987,7 +987,7 @@
  * after successful sudoaddable,
  * to release the register used for a.
  */
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
 	// TODO(minux)
 
 	*a = obj.Addr{}
diff --git a/src/cmd/compile/internal/arm64/peep.go b/src/cmd/compile/internal/arm64/peep.go
index d821edf..6a5b222 100644
--- a/src/cmd/compile/internal/arm64/peep.go
+++ b/src/cmd/compile/internal/arm64/peep.go
@@ -162,7 +162,7 @@
 			continue
 		}
 		if gc.Debug['P'] != 0 {
-			fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(int(p1.As)), p, p1)
+			fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(p1.As), p, p1)
 		}
 		p1.From.Type = obj.TYPE_CONST
 		p1.From = p.From
@@ -427,7 +427,7 @@
 
 	switch p.As {
 	default:
-		fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+		fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
 		return 2
 
 	case obj.ANOP, /* read p->from, write p->to */
diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go
index 389764c..7286d39 100644
--- a/src/cmd/compile/internal/gc/cgen.go
+++ b/src/cmd/compile/internal/gc/cgen.go
@@ -356,7 +356,7 @@
 		}
 	}
 
-	var a int
+	var a obj.As
 	switch n.Op {
 	default:
 		Dump("cgen", n)
@@ -3011,7 +3011,7 @@
 		regalloc = func(n *Node, t *Type, reuse *Node) {
 			Tempname(n, t)
 		}
-		ginscon = func(as int, c int64, n *Node) {
+		ginscon = func(as obj.As, c int64, n *Node) {
 			var n1 Node
 			Regalloc(&n1, n.Type, n)
 			Thearch.Gmove(n, &n1)
@@ -3019,7 +3019,7 @@
 			Thearch.Gmove(&n1, n)
 			Regfree(&n1)
 		}
-		gins = func(as int, f, t *Node) *obj.Prog {
+		gins = func(as obj.As, f, t *Node) *obj.Prog {
 			var n1 Node
 			Regalloc(&n1, t.Type, t)
 			Thearch.Gmove(t, &n1)
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index d890909..397e278 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -691,7 +691,7 @@
 	Excise       func(*Flow)
 	Expandchecks func(*obj.Prog)
 	Getg         func(*Node)
-	Gins         func(int, *Node, *Node) *obj.Prog
+	Gins         func(obj.As, *Node, *Node) *obj.Prog
 
 	// Ginscmp generates code comparing n1 to n2 and jumping away if op is satisfied.
 	// The returned prog should be Patch'ed with the jump target.
@@ -711,9 +711,9 @@
 	// corresponding to the desired value.
 	// The second argument is the destination.
 	// If not present, Ginsboolval will be emulated with jumps.
-	Ginsboolval func(int, *Node)
+	Ginsboolval func(obj.As, *Node)
 
-	Ginscon      func(int, int64, *Node)
+	Ginscon      func(obj.As, int64, *Node)
 	Ginsnop      func()
 	Gmove        func(*Node, *Node)
 	Igenindex    func(*Node, *Node, bool) *obj.Prog
@@ -725,14 +725,14 @@
 	Smallindir   func(*obj.Addr, *obj.Addr) bool
 	Stackaddr    func(*obj.Addr) bool
 	Blockcopy    func(*Node, *Node, int64, int64, int64)
-	Sudoaddable  func(int, *Node, *obj.Addr) bool
+	Sudoaddable  func(obj.As, *Node, *obj.Addr) bool
 	Sudoclean    func()
 	Excludedregs func() uint64
 	RtoB         func(int) uint64
 	FtoB         func(int) uint64
 	BtoR         func(uint64) int
 	BtoF         func(uint64) int
-	Optoas       func(Op, *Type) int
+	Optoas       func(Op, *Type) obj.As
 	Doregbits    func(int) uint64
 	Regnames     func(*int) []string
 	Use387       bool // should 8g use 387 FP instructions instead of sse2.
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index c533bd1..a194821 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -79,7 +79,7 @@
 	return true
 }
 
-func Gbranch(as int, t *Type, likely int) *obj.Prog {
+func Gbranch(as obj.As, t *Type, likely int) *obj.Prog {
 	p := Prog(as)
 	p.To.Type = obj.TYPE_BRANCH
 	p.To.Val = nil
@@ -97,7 +97,7 @@
 	return p
 }
 
-func Prog(as int) *obj.Prog {
+func Prog(as obj.As) *obj.Prog {
 	var p *obj.Prog
 
 	if as == obj.ADATA || as == obj.AGLOBL {
@@ -125,7 +125,7 @@
 		}
 	}
 
-	p.As = int16(as)
+	p.As = as
 	p.Lineno = lineno
 	return p
 }
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index c41d7fe..cd6018e 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -85,7 +85,7 @@
 // that its argument is certainly dead, for use when the liveness analysis
 // would not otherwise be able to deduce that fact.
 
-func gvardefx(n *Node, as int) {
+func gvardefx(n *Node, as obj.As) {
 	if n == nil {
 		Fatalf("gvardef nil")
 	}
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index bebad8f..e710478 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -1018,10 +1018,10 @@
 }
 
 // Construct a disembodied instruction.
-func unlinkedprog(as int) *obj.Prog {
+func unlinkedprog(as obj.As) *obj.Prog {
 	p := Ctxt.NewProg()
 	Clearp(p)
-	p.As = int16(as)
+	p.As = as
 	return p
 }
 
diff --git a/src/cmd/compile/internal/gc/reg.go b/src/cmd/compile/internal/gc/reg.go
index 59a4a3e..09cf7f5 100644
--- a/src/cmd/compile/internal/gc/reg.go
+++ b/src/cmd/compile/internal/gc/reg.go
@@ -246,11 +246,11 @@
 	else if(a->sym == nil)
 		a->type = TYPE_CONST;
 	*/
-	p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
+	p1.As = Thearch.Optoas(OAS, Types[uint8(v.etype)])
 
 	// TODO(rsc): Remove special case here.
 	if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
-		p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
+		p1.As = Thearch.Optoas(OAS, Types[TUINT8])
 	}
 	p1.From.Type = obj.TYPE_REG
 	p1.From.Reg = int16(rn)
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 66792e7..b13bc86 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -3766,7 +3766,7 @@
 //     dest := dest(To) op src(From)
 // and also returns the created obj.Prog so it
 // may be further adjusted (offset, scale, etc).
-func opregreg(op int, dest, src int16) *obj.Prog {
+func opregreg(op obj.As, dest, src int16) *obj.Prog {
 	p := Prog(op)
 	p.From.Type = obj.TYPE_REG
 	p.To.Type = obj.TYPE_REG
@@ -3796,7 +3796,7 @@
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = r
 		default:
-			var asm int
+			var asm obj.As
 			switch v.Op {
 			case ssa.OpAMD64ADDQ:
 				asm = x86.ALEAQ
@@ -4039,7 +4039,7 @@
 		a := regnum(v.Args[0])
 		if r == a {
 			if v.AuxInt2Int64() == 1 {
-				var asm int
+				var asm obj.As
 				switch v.Op {
 				// Software optimization manual recommends add $1,reg.
 				// But inc/dec is 1 byte smaller. ICC always uses inc
@@ -4058,7 +4058,7 @@
 				p.To.Reg = r
 				return
 			} else if v.AuxInt2Int64() == -1 {
-				var asm int
+				var asm obj.As
 				switch v.Op {
 				case ssa.OpAMD64ADDQconst:
 					asm = x86.ADECQ
@@ -4080,7 +4080,7 @@
 				return
 			}
 		}
-		var asm int
+		var asm obj.As
 		switch v.Op {
 		case ssa.OpAMD64ADDQconst:
 			asm = x86.ALEAQ
@@ -4138,7 +4138,7 @@
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = r
 		} else if x == r && v.AuxInt2Int64() == -1 {
-			var asm int
+			var asm obj.As
 			// x = x - (-1) is the same as x++
 			// See OpAMD64ADDQconst comments about inc vs add $1,reg
 			switch v.Op {
@@ -4153,7 +4153,7 @@
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = r
 		} else if x == r && v.AuxInt2Int64() == 1 {
-			var asm int
+			var asm obj.As
 			switch v.Op {
 			case ssa.OpAMD64SUBQconst:
 				asm = x86.ADECQ
@@ -4166,7 +4166,7 @@
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = r
 		} else {
-			var asm int
+			var asm obj.As
 			switch v.Op {
 			case ssa.OpAMD64SUBQconst:
 				asm = x86.ALEAQ
@@ -4735,7 +4735,7 @@
 }
 
 // movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset
-func movZero(as int, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
+func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
 	p := Prog(as)
 	// TODO: use zero register on archs that support it.
 	p.From.Type = obj.TYPE_CONST
@@ -4749,7 +4749,7 @@
 }
 
 var blockJump = [...]struct {
-	asm, invasm int
+	asm, invasm obj.As
 }{
 	ssa.BlockAMD64EQ:  {x86.AJEQ, x86.AJNE},
 	ssa.BlockAMD64NE:  {x86.AJNE, x86.AJEQ},
@@ -4766,7 +4766,8 @@
 }
 
 type floatingEQNEJump struct {
-	jump, index int
+	jump  obj.As
+	index int
 }
 
 var eqfJumps = [2][2]floatingEQNEJump{
@@ -5034,7 +5035,7 @@
 }
 
 // loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) int {
+func loadByType(t ssa.Type) obj.As {
 	// Avoid partial register write
 	if !t.IsFloat() && t.Size() <= 2 {
 		if t.Size() == 1 {
@@ -5048,7 +5049,7 @@
 }
 
 // storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) int {
+func storeByType(t ssa.Type) obj.As {
 	width := t.Size()
 	if t.IsFloat() {
 		switch width {
@@ -5073,7 +5074,7 @@
 }
 
 // moveByType returns the reg->reg move instruction of the given type.
-func moveByType(t ssa.Type) int {
+func moveByType(t ssa.Type) obj.As {
 	if t.IsFloat() {
 		// Moving the whole sse2 register is faster
 		// than moving just the correct low portion of it.
diff --git a/src/cmd/compile/internal/mips64/cgen.go b/src/cmd/compile/internal/mips64/cgen.go
index 67d2e0f..998afea 100644
--- a/src/cmd/compile/internal/mips64/cgen.go
+++ b/src/cmd/compile/internal/mips64/cgen.go
@@ -17,7 +17,7 @@
 	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
 	align := int(n.Type.Align)
 
-	var op int
+	var op obj.As
 	switch align {
 	default:
 		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
index 57508c8..338e3f1 100644
--- a/src/cmd/compile/internal/mips64/ggen.go
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -101,10 +101,10 @@
 	return p
 }
 
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
 	q := gc.Ctxt.NewProg()
 	gc.Clearp(q)
-	q.As = int16(as)
+	q.As = as
 	q.Lineno = p.Lineno
 	q.From.Type = ftype
 	q.From.Reg = int16(freg)
diff --git a/src/cmd/compile/internal/mips64/gsubr.go b/src/cmd/compile/internal/mips64/gsubr.go
index caa4a61..0ca8cfb 100644
--- a/src/cmd/compile/internal/mips64/gsubr.go
+++ b/src/cmd/compile/internal/mips64/gsubr.go
@@ -56,7 +56,7 @@
  * generate
  *	as $c, n
  */
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
 	var n1 gc.Node
 
 	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
@@ -78,7 +78,7 @@
 
 // generate branch
 // n1, n2 are registers
-func ginsbranch(as int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginsbranch(as obj.As, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
 	p := gc.Gbranch(as, t, likely)
 	gc.Naddr(&p.From, n1)
 	if n2 != nil {
@@ -233,7 +233,7 @@
 	// cannot have two memory operands
 	var r2 gc.Node
 	var r1 gc.Node
-	var a int
+	var a obj.As
 	if gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
@@ -562,7 +562,7 @@
 // gins is called by the front end.
 // It synthesizes some multiple-instruction sequences
 // so the front end can stay simpler.
-func gins(as int, f, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f, t *gc.Node) *obj.Prog {
 	if as >= obj.A_ARCHSPECIFIC {
 		if x, ok := f.IntLiteral(); ok {
 			ginscon(as, x, t)
@@ -577,7 +577,7 @@
  *	as f, r, t
  * r must be register, if not nil
  */
-func gins3(as int, f, r, t *gc.Node) *obj.Prog {
+func gins3(as obj.As, f, r, t *gc.Node) *obj.Prog {
 	p := rawgins(as, f, t)
 	if r != nil {
 		p.Reg = r.Reg
@@ -589,7 +589,7 @@
  * generate one instruction:
  *	as f, t
  */
-func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
 	// TODO(austin): Add self-move test like in 6g (but be careful
 	// of truncation moves)
 
@@ -684,7 +684,7 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
@@ -712,7 +712,7 @@
 		OHMUL_  = uint32(gc.OHMUL) << 16
 	)
 
-	a := int(obj.AXXX)
+	a := obj.AXXX
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
@@ -1055,7 +1055,7 @@
  * after successful sudoaddable,
  * to release the register used for a.
  */
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
 	// TODO(minux)
 
 	*a = obj.Addr{}
diff --git a/src/cmd/compile/internal/mips64/peep.go b/src/cmd/compile/internal/mips64/peep.go
index 1da5500..0e3ea31 100644
--- a/src/cmd/compile/internal/mips64/peep.go
+++ b/src/cmd/compile/internal/mips64/peep.go
@@ -412,7 +412,7 @@
 
 	switch p.As {
 	default:
-		fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+		fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
 		return 2
 
 	case obj.ANOP, /* read p->from, write p->to */
diff --git a/src/cmd/compile/internal/ppc64/cgen.go b/src/cmd/compile/internal/ppc64/cgen.go
index c049530..f4cc9c4 100644
--- a/src/cmd/compile/internal/ppc64/cgen.go
+++ b/src/cmd/compile/internal/ppc64/cgen.go
@@ -17,7 +17,7 @@
 	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
 	align := int(n.Type.Align)
 
-	var op int
+	var op obj.As
 	switch align {
 	default:
 		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
index 5030630..884f492 100644
--- a/src/cmd/compile/internal/ppc64/ggen.go
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -93,10 +93,10 @@
 	return p
 }
 
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
 	q := gc.Ctxt.NewProg()
 	gc.Clearp(q)
-	q.As = int16(as)
+	q.As = as
 	q.Lineno = p.Lineno
 	q.From.Type = ftype
 	q.From.Reg = int16(freg)
diff --git a/src/cmd/compile/internal/ppc64/gsubr.go b/src/cmd/compile/internal/ppc64/gsubr.go
index 3ec81cd..ce1d550 100644
--- a/src/cmd/compile/internal/ppc64/gsubr.go
+++ b/src/cmd/compile/internal/ppc64/gsubr.go
@@ -62,7 +62,7 @@
  * generate
  *	as $c, n
  */
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
 	var n1 gc.Node
 
 	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
@@ -86,7 +86,7 @@
  * generate
  *	as n, $c (CMP/CMPU)
  */
-func ginscon2(as int, n2 *gc.Node, c int64) {
+func ginscon2(as obj.As, n2 *gc.Node, c int64) {
 	var n1 gc.Node
 
 	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
@@ -190,7 +190,7 @@
 	// cannot have two memory operands
 	var r2 gc.Node
 	var r1 gc.Node
-	var a int
+	var a obj.As
 	if gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
@@ -548,7 +548,7 @@
 // gins is called by the front end.
 // It synthesizes some multiple-instruction sequences
 // so the front end can stay simpler.
-func gins(as int, f, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f, t *gc.Node) *obj.Prog {
 	if as >= obj.A_ARCHSPECIFIC {
 		if x, ok := f.IntLiteral(); ok {
 			ginscon(as, x, t)
@@ -568,7 +568,7 @@
  * generate one instruction:
  *	as f, t
  */
-func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
 	// TODO(austin): Add self-move test like in 6g (but be careful
 	// of truncation moves)
 
@@ -680,7 +680,7 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
@@ -708,7 +708,7 @@
 		OHMUL_  = uint32(gc.OHMUL) << 16
 	)
 
-	a := int(obj.AXXX)
+	a := obj.AXXX
 	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
 	default:
 		gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
@@ -1059,7 +1059,7 @@
  * after successful sudoaddable,
  * to release the register used for a.
  */
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
 	// TODO(minux)
 
 	*a = obj.Addr{}
diff --git a/src/cmd/compile/internal/ppc64/peep.go b/src/cmd/compile/internal/ppc64/peep.go
index a23ed10..c6fb615 100644
--- a/src/cmd/compile/internal/ppc64/peep.go
+++ b/src/cmd/compile/internal/ppc64/peep.go
@@ -48,7 +48,7 @@
 
 	var p *obj.Prog
 	var r *gc.Flow
-	var t int
+	var t obj.As
 loop1:
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		gc.Dumpit("loop1", g.Start, 0)
@@ -328,13 +328,13 @@
 				ppc64.ASUBZE,
 				ppc64.ASUBZEV,
 				ppc64.AXOR:
-				t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC)
+				t = variant2as(p1.As, as2variant(p1.As)|V_CC)
 			}
 
 			if gc.Debug['D'] != 0 {
 				fmt.Printf("cmp %v; %v -> ", p1, p)
 			}
-			p1.As = int16(t)
+			p1.As = t
 			if gc.Debug['D'] != 0 {
 				fmt.Printf("%v\n", p1)
 			}
@@ -611,7 +611,7 @@
 
 	switch p.As {
 	default:
-		fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+		fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
 		return 2
 
 	case obj.ANOP, /* read p->from, write p->to */
diff --git a/src/cmd/compile/internal/ppc64/prog.go b/src/cmd/compile/internal/ppc64/prog.go
index c028c59..272707a 100644
--- a/src/cmd/compile/internal/ppc64/prog.go
+++ b/src/cmd/compile/internal/ppc64/prog.go
@@ -110,7 +110,8 @@
 
 	// Perform one-time expansion of instructions in progtable to
 	// their CC, V, and VCC variants
-	for as := range progtable {
+	for i := range progtable {
+		as := obj.As(i)
 		if progtable[as].Flags == 0 {
 			continue
 		}
@@ -171,7 +172,7 @@
 // Instruction variants table. Initially this contains entries only
 // for the "base" form of each instruction. On the first call to
 // as2variant or variant2as, we'll add the variants to the table.
-var varianttable = [ppc64.ALAST][4]int{
+var varianttable = [ppc64.ALAST][4]obj.As{
 	ppc64.AADD:     {ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
 	ppc64.AADDC:    {ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
 	ppc64.AADDE:    {ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
@@ -261,12 +262,12 @@
 	for i := range varianttable {
 		if varianttable[i][0] == 0 {
 			// Instruction has no variants
-			varianttable[i][0] = i
+			varianttable[i][0] = obj.As(i)
 			continue
 		}
 
 		// Copy base form to other variants
-		if varianttable[i][0] == i {
+		if varianttable[i][0] == obj.As(i) {
 			for j := range varianttable[i] {
 				varianttable[varianttable[i][j]] = varianttable[i]
 			}
@@ -275,7 +276,7 @@
 }
 
 // as2variant returns the variant (V_*) flags of instruction as.
-func as2variant(as int) int {
+func as2variant(as obj.As) int {
 	for i := range varianttable[as] {
 		if varianttable[as][i] == as {
 			return i
@@ -287,6 +288,6 @@
 
 // variant2as returns the instruction as with the given variant (V_*) flags.
 // If no such variant exists, this returns 0.
-func variant2as(as int, flags int) int {
+func variant2as(as obj.As, flags int) obj.As {
 	return varianttable[as][flags]
 }
diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go
index 2736ed7..087633c 100644
--- a/src/cmd/compile/internal/ssa/gen/main.go
+++ b/src/cmd/compile/internal/ssa/gen/main.go
@@ -78,7 +78,10 @@
 	fmt.Fprintln(w)
 	fmt.Fprintln(w, "package ssa")
 
-	fmt.Fprintln(w, "import \"cmd/internal/obj/x86\"")
+	fmt.Fprintln(w, "import (")
+	fmt.Fprintln(w, "\"cmd/internal/obj\"")
+	fmt.Fprintln(w, "\"cmd/internal/obj/x86\"")
+	fmt.Fprintln(w, ")")
 
 	// generate Block* declarations
 	fmt.Fprintln(w, "const (")
@@ -184,7 +187,7 @@
 	}
 	fmt.Fprintln(w, "}")
 
-	fmt.Fprintln(w, "func (o Op) Asm() int {return opcodeTable[o].asm}")
+	fmt.Fprintln(w, "func (o Op) Asm() obj.As {return opcodeTable[o].asm}")
 
 	// generate op string method
 	fmt.Fprintln(w, "func (o Op) String() string {return opcodeTable[o].name }")
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index d64a41e..daba6f4 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -4,7 +4,10 @@
 
 package ssa
 
-import "fmt"
+import (
+	"cmd/internal/obj"
+	"fmt"
+)
 
 // An Op encodes the specific operation that a Value performs.
 // Opcodes' semantics can be modified by the type and aux fields of the Value.
@@ -16,13 +19,13 @@
 
 type opInfo struct {
 	name              string
-	asm               int
 	reg               regInfo
 	auxType           auxType
 	argLen            int32 // the number of arugments, -1 if variable length
-	generic           bool  // this is a generic (arch-independent) opcode
-	rematerializeable bool  // this op is rematerializeable
-	commutative       bool  // this operation is commutative (e.g. addition)
+	asm               obj.As
+	generic           bool // this is a generic (arch-independent) opcode
+	rematerializeable bool // this op is rematerializeable
+	commutative       bool // this operation is commutative (e.g. addition)
 }
 
 type inputInfo struct {
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index cbd5ece..f1f3f7b 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -3,7 +3,10 @@
 
 package ssa
 
-import "cmd/internal/obj/x86"
+import (
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+)
 
 const (
 	BlockInvalid BlockKind = iota
@@ -5261,5 +5264,5 @@
 	},
 }
 
-func (o Op) Asm() int       { return opcodeTable[o].asm }
+func (o Op) Asm() obj.As    { return opcodeTable[o].asm }
 func (o Op) String() string { return opcodeTable[o].name }
diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go
index fccb553..8b0a953 100644
--- a/src/cmd/compile/internal/x86/ggen.go
+++ b/src/cmd/compile/internal/x86/ggen.go
@@ -84,10 +84,10 @@
 	return p
 }
 
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
 	q := gc.Ctxt.NewProg()
 	gc.Clearp(q)
-	q.As = int16(as)
+	q.As = as
 	q.Lineno = p.Lineno
 	q.From.Type = ftype
 	q.From.Reg = int16(freg)
@@ -654,7 +654,7 @@
 }
 
 func cgen_floatsse(n *gc.Node, res *gc.Node) {
-	var a int
+	var a obj.As
 
 	nl := n.Left
 	nr := n.Right
diff --git a/src/cmd/compile/internal/x86/gsubr.go b/src/cmd/compile/internal/x86/gsubr.go
index 555606c..ee9f6c2 100644
--- a/src/cmd/compile/internal/x86/gsubr.go
+++ b/src/cmd/compile/internal/x86/gsubr.go
@@ -53,7 +53,7 @@
 /*
  * return Axxx for Oxxx on type t.
  */
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
 	if t == nil {
 		gc.Fatalf("optoas: t is nil")
 	}
@@ -436,7 +436,7 @@
 	return a
 }
 
-func foptoas(op gc.Op, t *gc.Type, flg int) int {
+func foptoas(op gc.Op, t *gc.Type, flg int) obj.As {
 	a := obj.AXXX
 	et := gc.Simtype[t.Etype]
 
@@ -605,7 +605,7 @@
  * generate
  *	as $c, reg
  */
-func gconreg(as int, c int64, reg int) {
+func gconreg(as obj.As, c int64, reg int) {
 	var n1 gc.Node
 	var n2 gc.Node
 
@@ -618,7 +618,7 @@
  * generate
  *	as $c, n
  */
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
 	var n1 gc.Node
 	gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
 	gins(as, &n1, n2)
@@ -831,7 +831,7 @@
 	// cannot have two integer memory operands;
 	// except 64-bit, which always copies via registers anyway.
 	var r1 gc.Node
-	var a int
+	var a obj.As
 	if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
@@ -1360,7 +1360,7 @@
 
 func floatmove_387(f *gc.Node, t *gc.Node) {
 	var r1 gc.Node
-	var a int
+	var a obj.As
 
 	ft := gc.Simsimtype(f.Type)
 	tt := gc.Simsimtype(t.Type)
@@ -1611,7 +1611,7 @@
 func floatmove_sse(f *gc.Node, t *gc.Node) {
 	var r1 gc.Node
 	var cvt *gc.Type
-	var a int
+	var a obj.As
 
 	ft := gc.Simsimtype(f.Type)
 	tt := gc.Simsimtype(t.Type)
@@ -1753,7 +1753,7 @@
  * generate one instruction:
  *	as f, t
  */
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
 	if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
 		gc.Fatalf("gins MOVF reg, reg")
 	}
@@ -1847,7 +1847,7 @@
 func sudoclean() {
 }
 
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
 	*a = obj.Addr{}
 	return false
 }