cmd/internal/obj: add As type for assembly opcodes
Passes toolstash/buildall.
Fixes #14692.
Change-Id: I4352678d8251309f2b8b7793674c550fac948006
Reviewed-on: https://go-review.googlesource.com/20350
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go
index 389764c..7286d39 100644
--- a/src/cmd/compile/internal/gc/cgen.go
+++ b/src/cmd/compile/internal/gc/cgen.go
@@ -356,7 +356,7 @@
}
}
- var a int
+ var a obj.As
switch n.Op {
default:
Dump("cgen", n)
@@ -3011,7 +3011,7 @@
regalloc = func(n *Node, t *Type, reuse *Node) {
Tempname(n, t)
}
- ginscon = func(as int, c int64, n *Node) {
+ ginscon = func(as obj.As, c int64, n *Node) {
var n1 Node
Regalloc(&n1, n.Type, n)
Thearch.Gmove(n, &n1)
@@ -3019,7 +3019,7 @@
Thearch.Gmove(&n1, n)
Regfree(&n1)
}
- gins = func(as int, f, t *Node) *obj.Prog {
+ gins = func(as obj.As, f, t *Node) *obj.Prog {
var n1 Node
Regalloc(&n1, t.Type, t)
Thearch.Gmove(t, &n1)
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index d890909..397e278 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -691,7 +691,7 @@
Excise func(*Flow)
Expandchecks func(*obj.Prog)
Getg func(*Node)
- Gins func(int, *Node, *Node) *obj.Prog
+ Gins func(obj.As, *Node, *Node) *obj.Prog
// Ginscmp generates code comparing n1 to n2 and jumping away if op is satisfied.
// The returned prog should be Patch'ed with the jump target.
@@ -711,9 +711,9 @@
// corresponding to the desired value.
// The second argument is the destination.
// If not present, Ginsboolval will be emulated with jumps.
- Ginsboolval func(int, *Node)
+ Ginsboolval func(obj.As, *Node)
- Ginscon func(int, int64, *Node)
+ Ginscon func(obj.As, int64, *Node)
Ginsnop func()
Gmove func(*Node, *Node)
Igenindex func(*Node, *Node, bool) *obj.Prog
@@ -725,14 +725,14 @@
Smallindir func(*obj.Addr, *obj.Addr) bool
Stackaddr func(*obj.Addr) bool
Blockcopy func(*Node, *Node, int64, int64, int64)
- Sudoaddable func(int, *Node, *obj.Addr) bool
+ Sudoaddable func(obj.As, *Node, *obj.Addr) bool
Sudoclean func()
Excludedregs func() uint64
RtoB func(int) uint64
FtoB func(int) uint64
BtoR func(uint64) int
BtoF func(uint64) int
- Optoas func(Op, *Type) int
+ Optoas func(Op, *Type) obj.As
Doregbits func(int) uint64
Regnames func(*int) []string
Use387 bool // should 8g use 387 FP instructions instead of sse2.
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index c533bd1..a194821 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -79,7 +79,7 @@
return true
}
-func Gbranch(as int, t *Type, likely int) *obj.Prog {
+func Gbranch(as obj.As, t *Type, likely int) *obj.Prog {
p := Prog(as)
p.To.Type = obj.TYPE_BRANCH
p.To.Val = nil
@@ -97,7 +97,7 @@
return p
}
-func Prog(as int) *obj.Prog {
+func Prog(as obj.As) *obj.Prog {
var p *obj.Prog
if as == obj.ADATA || as == obj.AGLOBL {
@@ -125,7 +125,7 @@
}
}
- p.As = int16(as)
+ p.As = as
p.Lineno = lineno
return p
}
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index c41d7fe..cd6018e 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -85,7 +85,7 @@
// that its argument is certainly dead, for use when the liveness analysis
// would not otherwise be able to deduce that fact.
-func gvardefx(n *Node, as int) {
+func gvardefx(n *Node, as obj.As) {
if n == nil {
Fatalf("gvardef nil")
}
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index bebad8f..e710478 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -1018,10 +1018,10 @@
}
// Construct a disembodied instruction.
-func unlinkedprog(as int) *obj.Prog {
+func unlinkedprog(as obj.As) *obj.Prog {
p := Ctxt.NewProg()
Clearp(p)
- p.As = int16(as)
+ p.As = as
return p
}
diff --git a/src/cmd/compile/internal/gc/reg.go b/src/cmd/compile/internal/gc/reg.go
index 59a4a3e..09cf7f5 100644
--- a/src/cmd/compile/internal/gc/reg.go
+++ b/src/cmd/compile/internal/gc/reg.go
@@ -246,11 +246,11 @@
else if(a->sym == nil)
a->type = TYPE_CONST;
*/
- p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
+ p1.As = Thearch.Optoas(OAS, Types[uint8(v.etype)])
// TODO(rsc): Remove special case here.
if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
- p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
+ p1.As = Thearch.Optoas(OAS, Types[TUINT8])
}
p1.From.Type = obj.TYPE_REG
p1.From.Reg = int16(rn)
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 66792e7..b13bc86 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -3766,7 +3766,7 @@
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregreg(op int, dest, src int16) *obj.Prog {
+func opregreg(op obj.As, dest, src int16) *obj.Prog {
p := Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
@@ -3796,7 +3796,7 @@
p.To.Type = obj.TYPE_REG
p.To.Reg = r
default:
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64ADDQ:
asm = x86.ALEAQ
@@ -4039,7 +4039,7 @@
a := regnum(v.Args[0])
if r == a {
if v.AuxInt2Int64() == 1 {
- var asm int
+ var asm obj.As
switch v.Op {
// Software optimization manual recommends add $1,reg.
// But inc/dec is 1 byte smaller. ICC always uses inc
@@ -4058,7 +4058,7 @@
p.To.Reg = r
return
} else if v.AuxInt2Int64() == -1 {
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64ADDQconst:
asm = x86.ADECQ
@@ -4080,7 +4080,7 @@
return
}
}
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64ADDQconst:
asm = x86.ALEAQ
@@ -4138,7 +4138,7 @@
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else if x == r && v.AuxInt2Int64() == -1 {
- var asm int
+ var asm obj.As
// x = x - (-1) is the same as x++
// See OpAMD64ADDQconst comments about inc vs add $1,reg
switch v.Op {
@@ -4153,7 +4153,7 @@
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else if x == r && v.AuxInt2Int64() == 1 {
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64SUBQconst:
asm = x86.ADECQ
@@ -4166,7 +4166,7 @@
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else {
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64SUBQconst:
asm = x86.ALEAQ
@@ -4735,7 +4735,7 @@
}
// movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset
-func movZero(as int, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
+func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
p := Prog(as)
// TODO: use zero register on archs that support it.
p.From.Type = obj.TYPE_CONST
@@ -4749,7 +4749,7 @@
}
var blockJump = [...]struct {
- asm, invasm int
+ asm, invasm obj.As
}{
ssa.BlockAMD64EQ: {x86.AJEQ, x86.AJNE},
ssa.BlockAMD64NE: {x86.AJNE, x86.AJEQ},
@@ -4766,7 +4766,8 @@
}
type floatingEQNEJump struct {
- jump, index int
+ jump obj.As
+ index int
}
var eqfJumps = [2][2]floatingEQNEJump{
@@ -5034,7 +5035,7 @@
}
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) int {
+func loadByType(t ssa.Type) obj.As {
// Avoid partial register write
if !t.IsFloat() && t.Size() <= 2 {
if t.Size() == 1 {
@@ -5048,7 +5049,7 @@
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) int {
+func storeByType(t ssa.Type) obj.As {
width := t.Size()
if t.IsFloat() {
switch width {
@@ -5073,7 +5074,7 @@
}
// moveByType returns the reg->reg move instruction of the given type.
-func moveByType(t ssa.Type) int {
+func moveByType(t ssa.Type) obj.As {
if t.IsFloat() {
// Moving the whole sse2 register is faster
// than moving just the correct low portion of it.