cmd/compile: added support for mips64{,le}
It is based on ppc64 compiler.
Change-Id: I15a101df05f2919ba5292136957ba0009227d067
Reviewed-on: https://go-review.googlesource.com/14445
Reviewed-by: Minux Ma <minux@golang.org>
diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go
index cbb84f9d..42b7792 100644
--- a/src/cmd/compile/internal/gc/cgen.go
+++ b/src/cmd/compile/internal/gc/cgen.go
@@ -251,7 +251,7 @@
return
}
- if Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+ if Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
// if both are addressable, move
if n.Addable {
if n.Op == OREGISTER || res.Op == OREGISTER {
@@ -751,14 +751,14 @@
Regalloc(&n1, nl.Type, res)
Cgen(nl, &n1)
- if Smallintconst(nr) && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
+ if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
n2 = *nr
} else {
Regalloc(&n2, nr.Type, nil)
Cgen(nr, &n2)
}
} else {
- if Smallintconst(nr) && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
+ if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
n2 = *nr
} else {
Regalloc(&n2, nr.Type, res)
@@ -1829,8 +1829,8 @@
// but they don't support direct generation of a bool value yet.
// We can fix that as we go.
switch Ctxt.Arch.Thechar {
- case '5', '7', '9':
- Fatalf("genval 5g, 7g, 9g ONAMES not fully implemented")
+ case '0', '5', '7', '9':
+ Fatalf("genval 0g, 5g, 7g, 9g ONAMES not fully implemented")
}
Cgen(n, res)
if !wantTrue {
@@ -1839,7 +1839,7 @@
return
}
- if n.Addable && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' {
+ if n.Addable && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' {
// no need for a temporary
bgenNonZero(n, nil, wantTrue, likely, to)
return
@@ -2023,7 +2023,7 @@
Cgen(nl, &n1)
nl = &n1
- if Smallintconst(nr) && Ctxt.Arch.Thechar != '9' {
+ if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '9' {
Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), nl, nr)
bins(nr.Type, res, op, likely, to)
return
@@ -2048,6 +2048,13 @@
op = Brrev(op)
}
+ // MIPS does not have CMP instruction
+ if Ctxt.Arch.Thechar == '0' {
+ p := Thearch.Ginscmp(op, nr.Type, l, r, likely)
+ Patch(p, to)
+ return
+ }
+
// Do the comparison.
Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), l, r)
@@ -2133,6 +2140,15 @@
if !wantTrue {
op = OEQ
}
+
+ // MIPS does not have CMP instruction
+ if Thearch.Thechar == '0' {
+ p := Gbranch(Thearch.Optoas(op, n.Type), n.Type, likely)
+ Naddr(&p.From, n)
+ Patch(p, to)
+ return
+ }
+
var zero Node
Nodconst(&zero, n.Type, 0)
Thearch.Gins(Thearch.Optoas(OCMP, n.Type), n, &zero)
@@ -2597,7 +2613,7 @@
// in peep and optoas in order to enable this.
// TODO(rsc): ppc64 needs to support the relevant instructions
// in peep and optoas in order to enable this.
- if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+ if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
goto longdiv
}
w = int(nl.Type.Width * 8)
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index 6b11ed2..ba2c601 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -83,7 +83,7 @@
p := Prog(as)
p.To.Type = obj.TYPE_BRANCH
p.To.Val = nil
- if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' {
+ if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' && Thearch.Thechar != '0' {
p.From.Type = obj.TYPE_CONST
if likely > 0 {
p.From.Offset = 1
@@ -438,7 +438,7 @@
case OADDR:
Naddr(a, n.Left)
a.Etype = uint8(Tptr)
- if Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
+ if Thearch.Thechar != '0' && Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
a.Width = int64(Widthptr)
}
if a.Type != obj.TYPE_MEM {
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index 015d2fd..a407383 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -282,7 +282,7 @@
if haspointers(n.Type) {
stkptrsize = Stksize
}
- if Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+ if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
Stksize = Rnd(Stksize, int64(Widthptr))
}
if Stksize >= 1<<31 {
@@ -319,7 +319,7 @@
Fatalf("bad checknil")
}
- if ((Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
+ if ((Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
var reg Node
Regalloc(®, Types[Tptr], n)
Cgen(n, ®)
diff --git a/src/cmd/compile/internal/gc/reg.go b/src/cmd/compile/internal/gc/reg.go
index b4ef9931..f575094 100644
--- a/src/cmd/compile/internal/gc/reg.go
+++ b/src/cmd/compile/internal/gc/reg.go
@@ -249,7 +249,7 @@
p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
// TODO(rsc): Remove special case here.
- if (Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
+ if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
}
p1.From.Type = obj.TYPE_REG
@@ -302,7 +302,7 @@
// TODO(rsc): Remove special case here.
case obj.TYPE_ADDR:
var bit Bits
- if Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+ if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
goto memcase
}
a.Type = obj.TYPE_MEM
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index a2bdbdc..9051be0 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -3290,7 +3290,7 @@
}
func walkrotate(np **Node) {
- if Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+ if Thearch.Thechar == '0' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
return
}
@@ -3418,7 +3418,7 @@
// if >= 0, nr is 1<<pow // 1 if nr is negative.
// TODO(minux)
- if Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+ if Thearch.Thechar == '0' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
return
}
diff --git a/src/cmd/compile/internal/mips64/cgen.go b/src/cmd/compile/internal/mips64/cgen.go
index 4f3092c..434bfc7 100644
--- a/src/cmd/compile/internal/mips64/cgen.go
+++ b/src/cmd/compile/internal/mips64/cgen.go
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package ppc64
+package mips64
import (
"cmd/compile/internal/gc"
"cmd/internal/obj"
- "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/mips"
)
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
@@ -23,16 +23,16 @@
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
case 1:
- op = ppc64.AMOVBU
+ op = mips.AMOVB
case 2:
- op = ppc64.AMOVHU
+ op = mips.AMOVH
case 4:
- op = ppc64.AMOVWZU // there is no lwau, only lwaux
+ op = mips.AMOVW
case 8:
- op = ppc64.AMOVDU
+ op = mips.AMOVV
}
if w%int64(align) != 0 {
@@ -53,7 +53,7 @@
if n.Ullman >= res.Ullman {
gc.Agenr(n, &dst, res) // temporarily use dst
gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
- gins(ppc64.AMOVD, &dst, &src)
+ gins(mips.AMOVV, &dst, &src)
if res.Op == gc.ONAME {
gc.Gvardef(res)
}
@@ -76,28 +76,28 @@
if dir < 0 {
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
- gins(ppc64.AMOVD, &src, &nend)
+ gins(mips.AMOVV, &src, &nend)
}
- p := gins(ppc64.AADD, nil, &src)
+ p := gins(mips.AADDV, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
- p = gins(ppc64.AADD, nil, &dst)
+ p = gins(mips.AADDV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
} else {
- p := gins(ppc64.AADD, nil, &src)
+ p := gins(mips.AADDV, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
- p = gins(ppc64.AADD, nil, &dst)
+ p = gins(mips.AADDV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
if c >= 4 {
gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
- p := gins(ppc64.AMOVD, &src, &nend)
+ p := gins(mips.AMOVV, &src, &nend)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w
}
@@ -111,35 +111,43 @@
p.From.Offset = int64(dir)
ploop := p
+ p = gins(mips.AADDV, nil, &src)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(dir)
+
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
- p = gins(ppc64.ACMP, &src, &nend)
+ p = gins(mips.AADDV, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(dir)
- gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
+ gc.Patch(ginsbranch(mips.ABNE, nil, &src, &nend, 0), ploop)
gc.Regfree(&nend)
} else {
- // TODO(austin): Instead of generating ADD $-8,R8; ADD
- // $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
- // generate the offsets directly and eliminate the
- // ADDs. That will produce shorter, more
+ // TODO: Instead of generating ADDV $-8,R8; ADDV
+ // $-8,R7; n*(MOVV 8(R8),R9; ADDV $8,R8; MOVV R9,8(R7);
+ // ADDV $8,R7;) just generate the offsets directly and
+ // eliminate the ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
- for {
- tmp14 := c
- c--
- if tmp14 <= 0 {
- break
- }
-
+ for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
+ p = gins(mips.AADDV, nil, &src)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(dir)
+
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(dir)
+
+ p = gins(mips.AADDV, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(dir)
}
}
diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go
index 16509da..00ffe17 100644
--- a/src/cmd/compile/internal/mips64/galign.go
+++ b/src/cmd/compile/internal/mips64/galign.go
@@ -2,27 +2,27 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package ppc64
+package mips64
import (
"cmd/compile/internal/gc"
"cmd/internal/obj"
- "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/mips"
)
-var thechar int = '9'
+var thechar int = '0'
-var thestring string = "ppc64"
+var thestring string = "mips64"
var thelinkarch *obj.LinkArch
func linkarchinit() {
thestring = obj.Getgoarch()
gc.Thearch.Thestring = thestring
- if thestring == "ppc64le" {
- thelinkarch = &ppc64.Linkppc64le
+ if thestring == "mips64le" {
+ thelinkarch = &mips.Linkmips64le
} else {
- thelinkarch = &ppc64.Linkppc64
+ thelinkarch = &mips.Linkmips64
}
gc.Thearch.Thelinkarch = thelinkarch
}
@@ -50,15 +50,15 @@
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
gc.Thearch.Typedefs = typedefs
- gc.Thearch.REGSP = ppc64.REGSP
- gc.Thearch.REGCTXT = ppc64.REGCTXT
- gc.Thearch.REGCALLX = ppc64.REG_R3
- gc.Thearch.REGCALLX2 = ppc64.REG_R4
- gc.Thearch.REGRETURN = ppc64.REG_R3
- gc.Thearch.REGMIN = ppc64.REG_R0
- gc.Thearch.REGMAX = ppc64.REG_R31
- gc.Thearch.FREGMIN = ppc64.REG_F0
- gc.Thearch.FREGMAX = ppc64.REG_F31
+ gc.Thearch.REGSP = mips.REGSP
+ gc.Thearch.REGCTXT = mips.REGCTXT
+ gc.Thearch.REGCALLX = mips.REG_R1
+ gc.Thearch.REGCALLX2 = mips.REG_R2
+ gc.Thearch.REGRETURN = mips.REGRET
+ gc.Thearch.REGMIN = mips.REG_R0
+ gc.Thearch.REGMAX = mips.REG_R31
+ gc.Thearch.FREGMIN = mips.REG_F0
+ gc.Thearch.FREGMAX = mips.REG_F31
gc.Thearch.MAXWIDTH = MAXWIDTH
gc.Thearch.ReservedRegs = resvd
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
index 0322e85..8c285a2 100644
--- a/src/cmd/compile/internal/mips64/ggen.go
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package ppc64
+package mips64
import (
"cmd/compile/internal/gc"
"cmd/internal/obj"
- "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/mips"
"fmt"
)
@@ -69,30 +69,35 @@
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
+ p = appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+frame+lo+i)
}
// TODO(dfc): https://golang.org/issue/12108
// If DUFFZERO is used inside a tail call (see genwrapper) it will
// overwrite the link register.
} else if false && cnt <= int64(128*gc.Widthptr) {
- p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
- p.Reg = ppc64.REGSP
+ p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
f := gc.Sysfunc("duffzero")
gc.Naddr(&p.To, f)
gc.Afunclit(&p.To, f)
- p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
} else {
- p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
- p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
- p.Reg = ppc64.REGSP
- p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
- p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
- p.Reg = ppc64.REGRT1
- p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
+ // ADDV $(8+frame+lo-8), SP, r1
+ // ADDV $cnt, r1, r2
+ // loop:
+ // MOVV R0, (Widthptr)r1
+ // ADDV $Widthptr, r1
+ // BNE r1, r2, loop
+ p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
+ p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p.Reg = mips.REGRT1
+ p = appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
p1 := p
- p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
- p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
+ p = appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = mips.REGRT2
gc.Patch(p, p1)
}
@@ -117,8 +122,8 @@
func ginsnop() {
var reg gc.Node
- gc.Nodreg(®, gc.Types[gc.TINT], ppc64.REG_R0)
- gins(ppc64.AOR, ®, ®)
+ gc.Nodreg(®, gc.Types[gc.TINT], mips.REG_R0)
+ gins(mips.ANOR, ®, ®)
}
var panicdiv *gc.Node
@@ -131,25 +136,9 @@
* according to op.
*/
func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- // Have to be careful about handling
- // most negative int divided by -1 correctly.
- // The hardware will generate undefined result.
- // Also need to explicitly trap on division on zero,
- // the hardware will silently generate undefined result.
- // DIVW will leave unpredicable result in higher 32-bit,
- // so always use DIVD/DIVDU.
t := nl.Type
t0 := t
- check := 0
- if gc.Issigned[t.Etype] {
- check = 1
- if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
- check = 0
- } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
- check = 0
- }
- }
if t.Width < 8 {
if gc.Issigned[t.Etype] {
@@ -157,7 +146,6 @@
} else {
t = gc.Types[gc.TUINT64]
}
- check = 0
}
a := optoas(gc.ODIV, t)
@@ -186,65 +174,26 @@
}
// Handle divide-by-zero panic.
- p1 := gins(optoas(gc.OCMP, t), &tr, nil)
-
- p1.To.Type = obj.TYPE_REG
- p1.To.Reg = ppc64.REGZERO
- p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ p1 := ginsbranch(mips.ABNE, nil, &tr, nil, 0)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
- var p2 *obj.Prog
- if check != 0 {
- var nm1 gc.Node
- gc.Nodconst(&nm1, t, -1)
- gins(optoas(gc.OCMP, t), &tr, &nm1)
- p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
- if op == gc.ODIV {
- // a / (-1) is -a.
- gins(optoas(gc.OMINUS, t), nil, &tl)
-
- gmove(&tl, res)
- } else {
- // a % (-1) is 0.
- var nz gc.Node
- gc.Nodconst(&nz, t, 0)
-
- gmove(&nz, res)
- }
-
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
- gc.Patch(p1, gc.Pc)
- }
-
- p1 = gins(a, &tr, &tl)
+ gins3(a, &tr, &tl, nil)
+ gc.Regfree(&tr)
if op == gc.ODIV {
- gc.Regfree(&tr)
- gmove(&tl, res)
- } else {
- // A%B = A-(A/B*B)
- var tm gc.Node
- gc.Regalloc(&tm, t, nil)
-
- // patch div to use the 3 register form
- // TODO(minux): add gins3?
- p1.Reg = p1.To.Reg
-
- p1.To.Reg = tm.Reg
- gins(optoas(gc.OMUL, t), &tr, &tm)
- gc.Regfree(&tr)
- gins(optoas(gc.OSUB, t), &tm, &tl)
- gc.Regfree(&tm)
- gmove(&tl, res)
+ var lo gc.Node
+ gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
+ gins(mips.AMOVV, &lo, &tl)
+ } else { // remainder in REG_HI
+ var hi gc.Node
+ gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
+ gins(mips.AMOVV, &hi, &tl)
}
-
+ gmove(&tl, res)
gc.Regfree(&tl)
- if check != 0 {
- gc.Patch(p2, gc.Pc)
- }
}
/*
@@ -254,9 +203,7 @@
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp := (*gc.Node)(nl)
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
t := (*gc.Type)(nl.Type)
@@ -269,26 +216,35 @@
case gc.TINT8,
gc.TINT16,
gc.TINT32:
- gins(optoas(gc.OMUL, t), &n2, &n1)
- p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
+ gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
+ var lo gc.Node
+ gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
+ gins(mips.AMOVV, &lo, &n1)
+ p := (*obj.Prog)(gins(mips.ASRAV, nil, &n1))
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(w)
case gc.TUINT8,
gc.TUINT16,
gc.TUINT32:
- gins(optoas(gc.OMUL, t), &n2, &n1)
- p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
+ gins3(optoas(gc.OMUL, t), &n2, &n1, nil)
+ var lo gc.Node
+ gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
+ gins(mips.AMOVV, &lo, &n1)
+ p := (*obj.Prog)(gins(mips.ASRLV, nil, &n1))
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(w)
case gc.TINT64,
gc.TUINT64:
if gc.Issigned[t.Etype] {
- gins(ppc64.AMULHD, &n2, &n1)
+ gins3(mips.AMULV, &n2, &n1, nil)
} else {
- gins(ppc64.AMULHDU, &n2, &n1)
+ gins3(mips.AMULVU, &n2, &n1, nil)
}
+ var hi gc.Node
+ gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI)
+ gins(mips.AMOVV, &hi, &n1)
default:
gc.Fatalf("cgen_hmul %v", t)
@@ -372,9 +328,11 @@
// test and fix up large shifts
if !bounded {
+ var rtmp gc.Node
+ gc.Nodreg(&rtmp, tcount, mips.REGTMP)
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
- gins(optoas(gc.OCMP, tcount), &n1, &n3)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
+ gins3(mips.ASGTU, &n3, &n1, &rtmp)
+ p1 := ginsbranch(mips.ABNE, nil, &rtmp, nil, 0)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
@@ -410,61 +368,64 @@
c := uint64(w % 8) // bytes
q := uint64(w / 8) // dwords
- if gc.Reginuse(ppc64.REGRT1) {
- gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
+ if gc.Reginuse(mips.REGRT1) {
+ gc.Fatalf("%v in use during clearfat", obj.Rconv(mips.REGRT1))
}
var r0 gc.Node
- gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
+ gc.Nodreg(&r0, gc.Types[gc.TUINT64], mips.REGZERO)
var dst gc.Node
- gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
+ gc.Nodreg(&dst, gc.Types[gc.Tptr], mips.REGRT1)
gc.Regrealloc(&dst)
gc.Agen(nl, &dst)
var boff uint64
if q > 128 {
- p := gins(ppc64.ASUB, nil, &dst)
+ p := gins(mips.ASUBV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
var end gc.Node
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
- p = gins(ppc64.AMOVD, &dst, &end)
+ p = gins(mips.AMOVV, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q * 8)
- p = gins(ppc64.AMOVDU, &r0, &dst)
+ p = gins(mips.AMOVV, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 8
pl := (*obj.Prog)(p)
- p = gins(ppc64.ACMP, &dst, &end)
- gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
+ p = gins(mips.AADDV, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+
+ gc.Patch(ginsbranch(mips.ABNE, nil, &dst, &end, 0), pl)
gc.Regfree(&end)
- // The loop leaves R3 on the last zeroed dword
+ // The loop leaves R1 on the last zeroed dword
boff = 8
// TODO(dfc): https://golang.org/issue/12108
// If DUFFZERO is used inside a tail call (see genwrapper) it will
// overwrite the link register.
} else if false && q >= 4 {
- p := gins(ppc64.ASUB, nil, &dst)
+ p := gins(mips.ASUBV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
f := (*gc.Node)(gc.Sysfunc("duffzero"))
p = gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
- // 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
- p.To.Offset = int64(4 * (128 - q))
+ // 8 and 128 = magic constants: see ../../runtime/asm_mips64x.s
+ p.To.Offset = int64(8 * (128 - q))
- // duffzero leaves R3 on the last zeroed dword
+ // duffzero leaves R1 on the last zeroed dword
boff = 8
} else {
var p *obj.Prog
for t := uint64(0); t < q; t++ {
- p = gins(ppc64.AMOVD, &r0, &dst)
+ p = gins(mips.AMOVV, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(8 * t)
}
@@ -474,7 +435,7 @@
var p *obj.Prog
for t := uint64(0); t < c; t++ {
- p = gins(ppc64.AMOVB, &r0, &dst)
+ p = gins(mips.AMOVB, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(t + boff)
}
@@ -486,7 +447,6 @@
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
var p1 *obj.Prog
- var p2 *obj.Prog
for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
@@ -502,63 +462,33 @@
gc.Fatalf("invalid nil check %v\n", p)
}
- /*
- // check is
- // TD $4, R0, arg (R0 is always zero)
- // eqv. to:
- // tdeq r0, arg
- // NOTE: this needs special runtime support to make SIGTRAP recoverable.
- reg = p->from.reg;
- p->as = ATD;
- p->from = p->to = p->from3 = zprog.from;
- p->from.type = TYPE_CONST;
- p->from.offset = 4;
- p->from.reg = 0;
- p->reg = REGZERO;
- p->to.type = TYPE_REG;
- p->to.reg = reg;
- */
// check is
- // CMP arg, R0
- // BNE 2(PC) [likely]
- // MOVD R0, 0(R0)
+ // BNE arg, 2(PC)
+ // MOVV R0, 0(R0)
p1 = gc.Ctxt.NewProg()
-
- p2 = gc.Ctxt.NewProg()
gc.Clearp(p1)
- gc.Clearp(p2)
- p1.Link = p2
- p2.Link = p.Link
+ p1.Link = p.Link
p.Link = p1
p1.Lineno = p.Lineno
- p2.Lineno = p.Lineno
p1.Pc = 9999
- p2.Pc = 9999
- p.As = ppc64.ACMP
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ppc64.REGZERO
- p1.As = ppc64.ABNE
- //p1->from.type = TYPE_CONST;
- //p1->from.offset = 1; // likely
- p1.To.Type = obj.TYPE_BRANCH
-
- p1.To.Val = p2.Link
+ p.As = mips.ABNE
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Val = p1.Link
// crash by write to memory address 0.
- p2.As = ppc64.AMOVD
-
- p2.From.Type = obj.TYPE_REG
- p2.From.Reg = ppc64.REGZERO
- p2.To.Type = obj.TYPE_MEM
- p2.To.Reg = ppc64.REGZERO
- p2.To.Offset = 0
+ p1.As = mips.AMOVV
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = mips.REGZERO
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = mips.REGZERO
+ p1.To.Offset = 0
}
}
// res = runtime.getg()
func getg(res *gc.Node) {
var n1 gc.Node
- gc.Nodreg(&n1, res.Type, ppc64.REGG)
+ gc.Nodreg(&n1, res.Type, mips.REGG)
gmove(&n1, res)
}
diff --git a/src/cmd/compile/internal/mips64/gsubr.go b/src/cmd/compile/internal/mips64/gsubr.go
index f0cf2e1..d2065d9 100644
--- a/src/cmd/compile/internal/mips64/gsubr.go
+++ b/src/cmd/compile/internal/mips64/gsubr.go
@@ -28,34 +28,28 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package ppc64
+package mips64
import (
"cmd/compile/internal/big"
"cmd/compile/internal/gc"
"cmd/internal/obj"
- "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/mips"
"fmt"
)
var resvd = []int{
- ppc64.REGZERO,
- ppc64.REGSP, // reserved for SP
- // We need to preserve the C ABI TLS pointer because sigtramp
- // may happen during C code and needs to access the g. C
- // clobbers REGG, so if Go were to clobber REGTLS, sigtramp
- // won't know which convention to use. By preserving REGTLS,
- // we can just retrieve g from TLS when we aren't sure.
- ppc64.REGTLS,
-
- // TODO(austin): Consolidate REGTLS and REGG?
- ppc64.REGG,
- ppc64.REGTMP, // REGTMP
- ppc64.FREGCVI,
- ppc64.FREGZERO,
- ppc64.FREGHALF,
- ppc64.FREGONE,
- ppc64.FREGTWO,
+ mips.REGZERO,
+ mips.REGSP, // reserved for SP
+ mips.REGLINK, // reserved for link
+ mips.REGG,
+ mips.REGTMP,
+ mips.REG_R26, // kernel
+ mips.REG_R27, // kernel
+ mips.FREGZERO,
+ mips.FREGHALF,
+ mips.FREGONE,
+ mips.FREGTWO,
}
/*
@@ -67,13 +61,13 @@
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
- if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD {
+ if as != mips.AMOVV && (c < -mips.BIG || c > mips.BIG) || n2.Op != gc.OREGISTER || as == mips.AMUL || as == mips.AMULU || as == mips.AMULV || as == mips.AMULVU {
// cannot have more than 16-bit of immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
- rawgins(ppc64.AMOVD, &n1, &ntmp)
+ rawgins(mips.AMOVV, &n1, &ntmp)
rawgins(as, &ntmp, n2)
gc.Regfree(&ntmp)
return
@@ -82,67 +76,116 @@
rawgins(as, &n1, n2)
}
-/*
- * generate
- * as n, $c (CMP/CMPU)
- */
-func ginscon2(as int, n2 *gc.Node, c int64) {
- var n1 gc.Node
-
- gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
-
- switch as {
- default:
- gc.Fatalf("ginscon2")
-
- case ppc64.ACMP:
- if -ppc64.BIG <= c && c <= ppc64.BIG {
- rawgins(as, n2, &n1)
- return
- }
-
- case ppc64.ACMPU:
- if 0 <= c && c <= 2*ppc64.BIG {
- rawgins(as, n2, &n1)
- return
- }
+// generate branch
+// n1, n2 are registers
+func ginsbranch(as int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+ p := gc.Gbranch(as, t, likely)
+ gc.Naddr(&p.From, n1)
+ if n2 != nil {
+ p.Reg = n2.Reg
}
-
- // MOV n1 into register first
- var ntmp gc.Node
- gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
-
- rawgins(ppc64.AMOVD, &n1, &ntmp)
- rawgins(as, n2, &ntmp)
- gc.Regfree(&ntmp)
+ return p
}
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
- if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
- // Reverse comparison to place constant last.
- op = gc.Brrev(op)
+ if !gc.Isfloat[t.Etype] && (op == gc.OLT || op == gc.OGE) {
+ // swap nodes to fit SGT instruction
n1, n2 = n2, n1
}
+ if gc.Isfloat[t.Etype] && (op == gc.OLT || op == gc.OLE) {
+ // swap nodes to fit CMPGT, CMPGE instructions and reverse relation
+ n1, n2 = n2, n1
+ if op == gc.OLT {
+ op = gc.OGT
+ } else {
+ op = gc.OGE
+ }
+ }
var r1, r2, g1, g2 gc.Node
gc.Regalloc(&r1, t, n1)
gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1)
gmove(&g1, &r1)
- if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
- ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
- } else {
- gc.Regalloc(&r2, t, n2)
- gc.Regalloc(&g2, n1.Type, &r2)
- gc.Cgen(n2, &g2)
- gmove(&g2, &r2)
- rawgins(optoas(gc.OCMP, t), &r1, &r2)
- gc.Regfree(&g2)
- gc.Regfree(&r2)
+
+ gc.Regalloc(&r2, t, n2)
+ gc.Regalloc(&g2, n1.Type, &r2)
+ gc.Cgen(n2, &g2)
+ gmove(&g2, &r2)
+
+ var p *obj.Prog
+ var ntmp gc.Node
+ gc.Nodreg(&ntmp, gc.Types[gc.TINT], mips.REGTMP)
+
+ switch gc.Simtype[t.Etype] {
+ case gc.TINT8,
+ gc.TINT16,
+ gc.TINT32,
+ gc.TINT64:
+ if op == gc.OEQ || op == gc.ONE {
+ p = ginsbranch(optoas(op, t), nil, &r1, &r2, likely)
+ } else {
+ gins3(mips.ASGT, &r1, &r2, &ntmp)
+
+ p = ginsbranch(optoas(op, t), nil, &ntmp, nil, likely)
+ }
+
+ case gc.TBOOL,
+ gc.TUINT8,
+ gc.TUINT16,
+ gc.TUINT32,
+ gc.TUINT64,
+ gc.TPTR32,
+ gc.TPTR64:
+ if op == gc.OEQ || op == gc.ONE {
+ p = ginsbranch(optoas(op, t), nil, &r1, &r2, likely)
+ } else {
+ gins3(mips.ASGTU, &r1, &r2, &ntmp)
+
+ p = ginsbranch(optoas(op, t), nil, &ntmp, nil, likely)
+ }
+
+ case gc.TFLOAT32:
+ switch op {
+ default:
+ gc.Fatalf("ginscmp: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+
+ case gc.OEQ,
+ gc.ONE:
+ gins3(mips.ACMPEQF, &r1, &r2, nil)
+
+ case gc.OGE:
+ gins3(mips.ACMPGEF, &r1, &r2, nil)
+
+ case gc.OGT:
+ gins3(mips.ACMPGTF, &r1, &r2, nil)
+ }
+ p = gc.Gbranch(optoas(op, t), nil, likely)
+
+ case gc.TFLOAT64:
+ switch op {
+ default:
+ gc.Fatalf("ginscmp: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+
+ case gc.OEQ,
+ gc.ONE:
+ gins3(mips.ACMPEQD, &r1, &r2, nil)
+
+ case gc.OGE:
+ gins3(mips.ACMPGED, &r1, &r2, nil)
+
+ case gc.OGT:
+ gins3(mips.ACMPGTD, &r1, &r2, nil)
+ }
+ p = gc.Gbranch(optoas(op, t), nil, likely)
}
+
+ gc.Regfree(&g2)
+ gc.Regfree(&r2)
gc.Regfree(&g1)
gc.Regfree(&r1)
- return gc.Gbranch(optoas(op, t), nil, likely)
+
+ return p
}
// set up nodes representing 2^63
@@ -209,7 +252,7 @@
f.Convconst(&con, gc.Types[gc.TINT64])
var r1 gc.Node
gc.Regalloc(&r1, con.Type, t)
- gins(ppc64.AMOVD, &con, &r1)
+ gins(mips.AMOVV, &con, &r1)
gmove(&r1, t)
gc.Regfree(&r1)
return
@@ -221,7 +264,7 @@
f.Convconst(&con, gc.Types[gc.TUINT64])
var r1 gc.Node
gc.Regalloc(&r1, con.Type, t)
- gins(ppc64.AMOVD, &con, &r1)
+ gins(mips.AMOVV, &con, &r1)
gmove(&r1, t)
gc.Regfree(&r1)
return
@@ -236,21 +279,13 @@
}
}
- // float constants come from memory.
- //if(isfloat[tt])
- // goto hard;
-
- // 64-bit immediates are also from memory.
- //if(isint[tt])
- // goto hard;
- //// 64-bit immediates are really 32-bit sign-extended
- //// unless moving into a register.
- //if(isint[tt]) {
- // if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
- // goto hard;
- // if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
- // goto hard;
- //}
+ // value -> value copy, first operand in memory.
+ // any floating point operand requires register
+ // src, so goto hard to copy to register first.
+ if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) {
+ cvt = gc.Types[ft]
+ goto hard
+ }
// value -> value copy, only one memory operand.
// figure out the instruction to use.
@@ -268,62 +303,57 @@
*/
case gc.TINT8<<16 | gc.TINT8, // same size
gc.TUINT8<<16 | gc.TINT8,
- gc.TINT16<<16 | gc.TINT8,
- // truncate
+ gc.TINT16<<16 | gc.TINT8, // truncate
gc.TUINT16<<16 | gc.TINT8,
gc.TINT32<<16 | gc.TINT8,
gc.TUINT32<<16 | gc.TINT8,
gc.TINT64<<16 | gc.TINT8,
gc.TUINT64<<16 | gc.TINT8:
- a = ppc64.AMOVB
+ a = mips.AMOVB
case gc.TINT8<<16 | gc.TUINT8, // same size
gc.TUINT8<<16 | gc.TUINT8,
- gc.TINT16<<16 | gc.TUINT8,
- // truncate
+ gc.TINT16<<16 | gc.TUINT8, // truncate
gc.TUINT16<<16 | gc.TUINT8,
gc.TINT32<<16 | gc.TUINT8,
gc.TUINT32<<16 | gc.TUINT8,
gc.TINT64<<16 | gc.TUINT8,
gc.TUINT64<<16 | gc.TUINT8:
- a = ppc64.AMOVBZ
+ a = mips.AMOVBU
case gc.TINT16<<16 | gc.TINT16, // same size
gc.TUINT16<<16 | gc.TINT16,
- gc.TINT32<<16 | gc.TINT16,
- // truncate
+ gc.TINT32<<16 | gc.TINT16, // truncate
gc.TUINT32<<16 | gc.TINT16,
gc.TINT64<<16 | gc.TINT16,
gc.TUINT64<<16 | gc.TINT16:
- a = ppc64.AMOVH
+ a = mips.AMOVH
case gc.TINT16<<16 | gc.TUINT16, // same size
gc.TUINT16<<16 | gc.TUINT16,
- gc.TINT32<<16 | gc.TUINT16,
- // truncate
+ gc.TINT32<<16 | gc.TUINT16, // truncate
gc.TUINT32<<16 | gc.TUINT16,
gc.TINT64<<16 | gc.TUINT16,
gc.TUINT64<<16 | gc.TUINT16:
- a = ppc64.AMOVHZ
+ a = mips.AMOVHU
case gc.TINT32<<16 | gc.TINT32, // same size
gc.TUINT32<<16 | gc.TINT32,
- gc.TINT64<<16 | gc.TINT32,
- // truncate
+ gc.TINT64<<16 | gc.TINT32, // truncate
gc.TUINT64<<16 | gc.TINT32:
- a = ppc64.AMOVW
+ a = mips.AMOVW
case gc.TINT32<<16 | gc.TUINT32, // same size
gc.TUINT32<<16 | gc.TUINT32,
- gc.TINT64<<16 | gc.TUINT32,
+ gc.TINT64<<16 | gc.TUINT32, // truncate
gc.TUINT64<<16 | gc.TUINT32:
- a = ppc64.AMOVWZ
+ a = mips.AMOVWU
case gc.TINT64<<16 | gc.TINT64, // same size
gc.TINT64<<16 | gc.TUINT64,
gc.TUINT64<<16 | gc.TINT64,
gc.TUINT64<<16 | gc.TUINT64:
- a = ppc64.AMOVD
+ a = mips.AMOVV
/*
* integer up-conversions
@@ -334,7 +364,7 @@
gc.TINT8<<16 | gc.TUINT32,
gc.TINT8<<16 | gc.TINT64,
gc.TINT8<<16 | gc.TUINT64:
- a = ppc64.AMOVB
+ a = mips.AMOVB
goto rdst
@@ -344,7 +374,7 @@
gc.TUINT8<<16 | gc.TUINT32,
gc.TUINT8<<16 | gc.TINT64,
gc.TUINT8<<16 | gc.TUINT64:
- a = ppc64.AMOVBZ
+ a = mips.AMOVBU
goto rdst
@@ -352,7 +382,7 @@
gc.TINT16<<16 | gc.TUINT32,
gc.TINT16<<16 | gc.TINT64,
gc.TINT16<<16 | gc.TUINT64:
- a = ppc64.AMOVH
+ a = mips.AMOVH
goto rdst
@@ -360,19 +390,19 @@
gc.TUINT16<<16 | gc.TUINT32,
gc.TUINT16<<16 | gc.TINT64,
gc.TUINT16<<16 | gc.TUINT64:
- a = ppc64.AMOVHZ
+ a = mips.AMOVHU
goto rdst
case gc.TINT32<<16 | gc.TINT64, // sign extend int32
gc.TINT32<<16 | gc.TUINT64:
- a = ppc64.AMOVW
+ a = mips.AMOVW
goto rdst
case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
gc.TUINT32<<16 | gc.TUINT64:
- a = ppc64.AMOVWZ
+ a = mips.AMOVWU
goto rdst
@@ -402,49 +432,39 @@
gc.TFLOAT64<<16 | gc.TUINT64:
bignodes()
- var r1 gc.Node
- gc.Regalloc(&r1, gc.Types[ft], f)
+ gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], nil)
gmove(f, &r1)
if tt == gc.TUINT64 {
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
gmove(&bigf, &r2)
- gins(ppc64.AFCMPU, &r1, &r2)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
- gins(ppc64.AFSUB, &r2, &r1)
+ gins3(mips.ACMPGED, &r1, &r2, nil)
+ p1 := gc.Gbranch(mips.ABFPF, nil, 0)
+ gins(mips.ASUBD, &r2, &r1)
gc.Patch(p1, gc.Pc)
gc.Regfree(&r2)
}
- gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
- var r3 gc.Node
- gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
- gins(ppc64.AFCTIDZ, &r1, &r2)
- p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
- p1.To.Type = obj.TYPE_MEM
- p1.To.Reg = ppc64.REGSP
- p1.To.Offset = -8
- p1 = gins(ppc64.AMOVD, nil, &r3)
- p1.From.Type = obj.TYPE_MEM
- p1.From.Reg = ppc64.REGSP
- p1.From.Offset = -8
- gc.Regfree(&r2)
+ gc.Regalloc(&r2, gc.Types[gc.TINT64], t)
+ gins(mips.ATRUNCDV, &r1, &r1)
+ gins(mips.AMOVV, &r1, &r2)
gc.Regfree(&r1)
+
if tt == gc.TUINT64 {
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
- gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
- gins(ppc64.AMOVD, &bigi, &r1)
- gins(ppc64.AADD, &r1, &r3)
+ p1 := gc.Gbranch(mips.ABFPF, nil, 0) // use FCR0 here again
+ gc.Nodreg(&r1, gc.Types[gc.TINT64], mips.REGTMP)
+ gmove(&bigi, &r1)
+ gins(mips.AADDVU, &r1, &r2)
gc.Patch(p1, gc.Pc)
}
- gmove(&r3, t)
- gc.Regfree(&r3)
+ gmove(&r2, t)
+ gc.Regfree(&r2)
return
//warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
//return;
// algorithm is:
- // if small enough, use native int64 -> uint64 conversion.
+ // if small enough, use native int64 -> float64 conversion.
// otherwise, halve (rounding to odd?), convert, and double.
/*
* integer to float
@@ -467,35 +487,29 @@
gc.TUINT64<<16 | gc.TFLOAT64:
bignodes()
- var r1 gc.Node
+ var rtmp gc.Node
gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
gmove(f, &r1)
if ft == gc.TUINT64 {
- gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
- gmove(&bigi, &r2)
- gins(ppc64.ACMPU, &r1, &r2)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
- p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
+ gc.Nodreg(&rtmp, gc.Types[gc.TUINT64], mips.REGTMP)
+ gmove(&bigi, &rtmp)
+ gins(mips.AAND, &r1, &rtmp)
+ p1 := ginsbranch(mips.ABEQ, nil, &rtmp, nil, 0)
+ p2 := gins(mips.ASRLV, nil, &r1)
p2.From.Type = obj.TYPE_CONST
p2.From.Offset = 1
gc.Patch(p1, gc.Pc)
}
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
- p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
- p1.To.Type = obj.TYPE_MEM
- p1.To.Reg = ppc64.REGSP
- p1.To.Offset = -8
- p1 = gins(ppc64.AFMOVD, nil, &r2)
- p1.From.Type = obj.TYPE_MEM
- p1.From.Reg = ppc64.REGSP
- p1.From.Offset = -8
- gins(ppc64.AFCFID, &r2, &r2)
+ gins(mips.AMOVV, &r1, &r2)
+ gins(mips.AMOVVD, &r2, &r2)
gc.Regfree(&r1)
+
if ft == gc.TUINT64 {
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
- gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
- gins(ppc64.AFMUL, &r1, &r2)
+ p1 := ginsbranch(mips.ABEQ, nil, &rtmp, nil, 0)
+ gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], mips.FREGTWO)
+ gins(mips.AMULD, &r1, &r2)
gc.Patch(p1, gc.Pc)
}
@@ -507,17 +521,17 @@
* float to float
*/
case gc.TFLOAT32<<16 | gc.TFLOAT32:
- a = ppc64.AFMOVS
+ a = mips.AMOVF
case gc.TFLOAT64<<16 | gc.TFLOAT64:
- a = ppc64.AFMOVD
+ a = mips.AMOVD
case gc.TFLOAT32<<16 | gc.TFLOAT64:
- a = ppc64.AFMOVS
+ a = mips.AMOVFD
goto rdst
case gc.TFLOAT64<<16 | gc.TFLOAT32:
- a = ppc64.AFRSP
+ a = mips.AMOVDF
goto rdst
}
@@ -555,17 +569,24 @@
return nil // caller must not use
}
}
- if as == ppc64.ACMP || as == ppc64.ACMPU {
- if x, ok := t.IntLiteral(); ok {
- ginscon2(as, f, x)
- return nil // caller must not use
- }
- }
return rawgins(as, f, t)
}
/*
* generate one instruction:
+ * as f, r, t
+ * r must be register, if not nil
+ */
+func gins3(as int, f, r, t *gc.Node) *obj.Prog {
+ p := rawgins(as, f, t)
+ if r != nil {
+ p.Reg = r.Reg
+ }
+ return p
+}
+
+/*
+ * generate one instruction:
* as f, t
*/
func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
@@ -578,37 +599,52 @@
switch as {
case obj.ACALL:
- if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
- // Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
- pp := gc.Prog(as)
- pp.From = p.From
- pp.To.Type = obj.TYPE_REG
- pp.To.Reg = ppc64.REG_CTR
-
- p.As = ppc64.AMOVD
- p.From = p.To
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ppc64.REG_CTR
+ if p.To.Type == obj.TYPE_REG {
+ // Allow front end to emit CALL REG, and rewrite into CALL (REG).
+ p.From = obj.Addr{}
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
- fmt.Printf("%v\n", pp)
}
- return pp
+ return p
}
// Bad things the front end has done to us. Crash to find call stack.
- case ppc64.AAND, ppc64.AMULLD:
+ case mips.AAND:
if p.From.Type == obj.TYPE_CONST {
gc.Debug['h'] = 1
gc.Fatalf("bad inst: %v", p)
}
- case ppc64.ACMP, ppc64.ACMPU:
+ case mips.ASGT, mips.ASGTU:
if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
gc.Debug['h'] = 1
gc.Fatalf("bad inst: %v", p)
}
+
+ // Special cases
+ case mips.AMUL, mips.AMULU, mips.AMULV, mips.AMULVU:
+ if p.From.Type == obj.TYPE_CONST {
+ gc.Debug['h'] = 1
+ gc.Fatalf("bad inst: %v", p)
+ }
+
+ pp := gc.Prog(mips.AMOVV)
+ pp.From.Type = obj.TYPE_REG
+ pp.From.Reg = mips.REG_LO
+ pp.To = p.To
+
+ p.Reg = p.To.Reg
+ p.To = obj.Addr{}
+
+ case mips.ASUBVU:
+ // unary
+ if f == nil {
+ p.From = p.To
+ p.Reg = mips.REGZERO
+ }
}
if gc.Debug['g'] != 0 {
@@ -617,26 +653,19 @@
w := int32(0)
switch as {
- case ppc64.AMOVB,
- ppc64.AMOVBU,
- ppc64.AMOVBZ,
- ppc64.AMOVBZU:
+ case mips.AMOVB,
+ mips.AMOVBU:
w = 1
- case ppc64.AMOVH,
- ppc64.AMOVHU,
- ppc64.AMOVHZ,
- ppc64.AMOVHZU:
+ case mips.AMOVH,
+ mips.AMOVHU:
w = 2
- case ppc64.AMOVW,
- ppc64.AMOVWU,
- ppc64.AMOVWZ,
- ppc64.AMOVWZU:
+ case mips.AMOVW,
+ mips.AMOVWU:
w = 4
- case ppc64.AMOVD,
- ppc64.AMOVDU:
+ case mips.AMOVV:
if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
break
}
@@ -698,10 +727,12 @@
OEQ_ | gc.TINT64,
OEQ_ | gc.TUINT64,
OEQ_ | gc.TPTR32,
- OEQ_ | gc.TPTR64,
- OEQ_ | gc.TFLOAT32,
- OEQ_ | gc.TFLOAT64:
- a = ppc64.ABEQ
+ OEQ_ | gc.TPTR64:
+ a = mips.ABEQ
+
+ case OEQ_ | gc.TFLOAT32, // ACMPEQF
+ OEQ_ | gc.TFLOAT64: // ACMPEQD
+ a = mips.ABFPT
case ONE_ | gc.TBOOL,
ONE_ | gc.TINT8,
@@ -713,111 +744,99 @@
ONE_ | gc.TINT64,
ONE_ | gc.TUINT64,
ONE_ | gc.TPTR32,
- ONE_ | gc.TPTR64,
- ONE_ | gc.TFLOAT32,
- ONE_ | gc.TFLOAT64:
- a = ppc64.ABNE
+ ONE_ | gc.TPTR64:
+ a = mips.ABNE
- case OLT_ | gc.TINT8, // ACMP
+ case ONE_ | gc.TFLOAT32, // ACMPEQF
+ ONE_ | gc.TFLOAT64: // ACMPEQD
+ a = mips.ABFPF
+
+ case OLT_ | gc.TINT8, // ASGT
OLT_ | gc.TINT16,
OLT_ | gc.TINT32,
OLT_ | gc.TINT64,
- OLT_ | gc.TUINT8,
- // ACMPU
+ OLT_ | gc.TUINT8, // ASGTU
OLT_ | gc.TUINT16,
OLT_ | gc.TUINT32,
- OLT_ | gc.TUINT64,
- OLT_ | gc.TFLOAT32,
- // AFCMPU
- OLT_ | gc.TFLOAT64:
- a = ppc64.ABLT
+ OLT_ | gc.TUINT64:
+ a = mips.ABNE
- case OLE_ | gc.TINT8, // ACMP
+ case OLT_ | gc.TFLOAT32, // ACMPGEF
+ OLT_ | gc.TFLOAT64: // ACMPGED
+ a = mips.ABFPT
+
+ case OLE_ | gc.TINT8, // ASGT
OLE_ | gc.TINT16,
OLE_ | gc.TINT32,
OLE_ | gc.TINT64,
- OLE_ | gc.TUINT8,
- // ACMPU
+ OLE_ | gc.TUINT8, // ASGTU
OLE_ | gc.TUINT16,
OLE_ | gc.TUINT32,
OLE_ | gc.TUINT64:
- // No OLE for floats, because it mishandles NaN.
- // Front end must reverse comparison or use OLT and OEQ together.
- a = ppc64.ABLE
+ a = mips.ABEQ
- case OGT_ | gc.TINT8,
+ case OLE_ | gc.TFLOAT32, // ACMPGTF
+ OLE_ | gc.TFLOAT64: // ACMPGTD
+ a = mips.ABFPT
+
+ case OGT_ | gc.TINT8, // ASGT
OGT_ | gc.TINT16,
OGT_ | gc.TINT32,
OGT_ | gc.TINT64,
- OGT_ | gc.TUINT8,
+ OGT_ | gc.TUINT8, // ASGTU
OGT_ | gc.TUINT16,
OGT_ | gc.TUINT32,
- OGT_ | gc.TUINT64,
- OGT_ | gc.TFLOAT32,
- OGT_ | gc.TFLOAT64:
- a = ppc64.ABGT
+ OGT_ | gc.TUINT64:
+ a = mips.ABNE
- case OGE_ | gc.TINT8,
+ case OGT_ | gc.TFLOAT32, // ACMPGTF
+ OGT_ | gc.TFLOAT64: // ACMPGTD
+ a = mips.ABFPT
+
+ case OGE_ | gc.TINT8, // ASGT
OGE_ | gc.TINT16,
OGE_ | gc.TINT32,
OGE_ | gc.TINT64,
- OGE_ | gc.TUINT8,
+ OGE_ | gc.TUINT8, // ASGTU
OGE_ | gc.TUINT16,
OGE_ | gc.TUINT32,
OGE_ | gc.TUINT64:
- // No OGE for floats, because it mishandles NaN.
- // Front end must reverse comparison or use OLT and OEQ together.
- a = ppc64.ABGE
+ a = mips.ABEQ
- case OCMP_ | gc.TBOOL,
- OCMP_ | gc.TINT8,
- OCMP_ | gc.TINT16,
- OCMP_ | gc.TINT32,
- OCMP_ | gc.TPTR32,
- OCMP_ | gc.TINT64:
- a = ppc64.ACMP
-
- case OCMP_ | gc.TUINT8,
- OCMP_ | gc.TUINT16,
- OCMP_ | gc.TUINT32,
- OCMP_ | gc.TUINT64,
- OCMP_ | gc.TPTR64:
- a = ppc64.ACMPU
-
- case OCMP_ | gc.TFLOAT32,
- OCMP_ | gc.TFLOAT64:
- a = ppc64.AFCMPU
+ case OGE_ | gc.TFLOAT32, // ACMPGEF
+ OGE_ | gc.TFLOAT64: // ACMPGED
+ a = mips.ABFPT
case OAS_ | gc.TBOOL,
OAS_ | gc.TINT8:
- a = ppc64.AMOVB
+ a = mips.AMOVB
case OAS_ | gc.TUINT8:
- a = ppc64.AMOVBZ
+ a = mips.AMOVBU
case OAS_ | gc.TINT16:
- a = ppc64.AMOVH
+ a = mips.AMOVH
case OAS_ | gc.TUINT16:
- a = ppc64.AMOVHZ
+ a = mips.AMOVHU
case OAS_ | gc.TINT32:
- a = ppc64.AMOVW
+ a = mips.AMOVW
case OAS_ | gc.TUINT32,
OAS_ | gc.TPTR32:
- a = ppc64.AMOVWZ
+ a = mips.AMOVWU
case OAS_ | gc.TINT64,
OAS_ | gc.TUINT64,
OAS_ | gc.TPTR64:
- a = ppc64.AMOVD
+ a = mips.AMOVV
case OAS_ | gc.TFLOAT32:
- a = ppc64.AFMOVS
+ a = mips.AMOVF
case OAS_ | gc.TFLOAT64:
- a = ppc64.AFMOVD
+ a = mips.AMOVD
case OADD_ | gc.TINT8,
OADD_ | gc.TUINT8,
@@ -825,17 +844,19 @@
OADD_ | gc.TUINT16,
OADD_ | gc.TINT32,
OADD_ | gc.TUINT32,
- OADD_ | gc.TPTR32,
- OADD_ | gc.TINT64,
+ OADD_ | gc.TPTR32:
+ a = mips.AADDU
+
+ case OADD_ | gc.TINT64,
OADD_ | gc.TUINT64,
OADD_ | gc.TPTR64:
- a = ppc64.AADD
+ a = mips.AADDVU
case OADD_ | gc.TFLOAT32:
- a = ppc64.AFADDS
+ a = mips.AADDF
case OADD_ | gc.TFLOAT64:
- a = ppc64.AFADD
+ a = mips.AADDD
case OSUB_ | gc.TINT8,
OSUB_ | gc.TUINT8,
@@ -843,17 +864,19 @@
OSUB_ | gc.TUINT16,
OSUB_ | gc.TINT32,
OSUB_ | gc.TUINT32,
- OSUB_ | gc.TPTR32,
- OSUB_ | gc.TINT64,
+ OSUB_ | gc.TPTR32:
+ a = mips.ASUBU
+
+ case OSUB_ | gc.TINT64,
OSUB_ | gc.TUINT64,
OSUB_ | gc.TPTR64:
- a = ppc64.ASUB
+ a = mips.ASUBVU
case OSUB_ | gc.TFLOAT32:
- a = ppc64.AFSUBS
+ a = mips.ASUBF
case OSUB_ | gc.TFLOAT64:
- a = ppc64.AFSUB
+ a = mips.ASUBD
case OMINUS_ | gc.TINT8,
OMINUS_ | gc.TUINT8,
@@ -865,7 +888,7 @@
OMINUS_ | gc.TINT64,
OMINUS_ | gc.TUINT64,
OMINUS_ | gc.TPTR64:
- a = ppc64.ANEG
+ a = mips.ASUBVU
case OAND_ | gc.TINT8,
OAND_ | gc.TUINT8,
@@ -877,7 +900,7 @@
OAND_ | gc.TINT64,
OAND_ | gc.TUINT64,
OAND_ | gc.TPTR64:
- a = ppc64.AAND
+ a = mips.AAND
case OOR_ | gc.TINT8,
OOR_ | gc.TUINT8,
@@ -889,7 +912,7 @@
OOR_ | gc.TINT64,
OOR_ | gc.TUINT64,
OOR_ | gc.TPTR64:
- a = ppc64.AOR
+ a = mips.AOR
case OXOR_ | gc.TINT8,
OXOR_ | gc.TUINT8,
@@ -901,7 +924,7 @@
OXOR_ | gc.TINT64,
OXOR_ | gc.TUINT64,
OXOR_ | gc.TPTR64:
- a = ppc64.AXOR
+ a = mips.AXOR
// TODO(minux): handle rotates
//case CASE(OLROT, TINT8):
@@ -927,7 +950,7 @@
OLSH_ | gc.TINT64,
OLSH_ | gc.TUINT64,
OLSH_ | gc.TPTR64:
- a = ppc64.ASLD
+ a = mips.ASLLV
case ORSH_ | gc.TUINT8,
ORSH_ | gc.TUINT16,
@@ -935,13 +958,13 @@
ORSH_ | gc.TPTR32,
ORSH_ | gc.TUINT64,
ORSH_ | gc.TPTR64:
- a = ppc64.ASRD
+ a = mips.ASRLV
case ORSH_ | gc.TINT8,
ORSH_ | gc.TINT16,
ORSH_ | gc.TINT32,
ORSH_ | gc.TINT64:
- a = ppc64.ASRAD
+ a = mips.ASRAV
// TODO(minux): handle rotates
//case CASE(ORROTC, TINT8):
@@ -956,39 +979,37 @@
// break;
case OHMUL_ | gc.TINT64:
- a = ppc64.AMULHD
+ a = mips.AMULV
case OHMUL_ | gc.TUINT64,
OHMUL_ | gc.TPTR64:
- a = ppc64.AMULHDU
+ a = mips.AMULVU
case OMUL_ | gc.TINT8,
OMUL_ | gc.TINT16,
OMUL_ | gc.TINT32,
OMUL_ | gc.TINT64:
- a = ppc64.AMULLD
+ a = mips.AMULV
case OMUL_ | gc.TUINT8,
OMUL_ | gc.TUINT16,
OMUL_ | gc.TUINT32,
OMUL_ | gc.TPTR32,
- // don't use word multiply, the high 32-bit are undefined.
OMUL_ | gc.TUINT64,
OMUL_ | gc.TPTR64:
- // for 64-bit multiplies, signedness doesn't matter.
- a = ppc64.AMULLD
+ a = mips.AMULVU
case OMUL_ | gc.TFLOAT32:
- a = ppc64.AFMULS
+ a = mips.AMULF
case OMUL_ | gc.TFLOAT64:
- a = ppc64.AFMUL
+ a = mips.AMULD
case ODIV_ | gc.TINT8,
ODIV_ | gc.TINT16,
ODIV_ | gc.TINT32,
ODIV_ | gc.TINT64:
- a = ppc64.ADIVD
+ a = mips.ADIVV
case ODIV_ | gc.TUINT8,
ODIV_ | gc.TUINT16,
@@ -996,13 +1017,13 @@
ODIV_ | gc.TPTR32,
ODIV_ | gc.TUINT64,
ODIV_ | gc.TPTR64:
- a = ppc64.ADIVDU
+ a = mips.ADIVVU
case ODIV_ | gc.TFLOAT32:
- a = ppc64.AFDIVS
+ a = mips.ADIVF
case ODIV_ | gc.TFLOAT64:
- a = ppc64.AFDIV
+ a = mips.ADIVD
}
return a
diff --git a/src/cmd/compile/internal/mips64/opt.go b/src/cmd/compile/internal/mips64/opt.go
deleted file mode 100644
index 1704f63..0000000
--- a/src/cmd/compile/internal/mips64/opt.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ppc64
-
-// Many Power ISA arithmetic and logical instructions come in four
-// standard variants. These bits let us map between variants.
-const (
- V_CC = 1 << 0 // xCC (affect CR field 0 flags)
- V_V = 1 << 1 // xV (affect SO and OV flags)
-)
diff --git a/src/cmd/compile/internal/mips64/peep.go b/src/cmd/compile/internal/mips64/peep.go
index 9c3f1ed..681a31c 100644
--- a/src/cmd/compile/internal/mips64/peep.go
+++ b/src/cmd/compile/internal/mips64/peep.go
@@ -28,12 +28,12 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package ppc64
+package mips64
import (
"cmd/compile/internal/gc"
"cmd/internal/obj"
- "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/mips"
"fmt"
)
@@ -64,11 +64,11 @@
// can eliminate moves that don't care without
// breaking moves that do care. This might let us
// simplify or remove the next peep loop, too.
- if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
+ if p.As == mips.AMOVV || p.As == mips.AMOVF || p.As == mips.AMOVD {
if regtyp(&p.To) {
// Try to eliminate reg->reg moves
if regtyp(&p.From) {
- if p.From.Type == p.To.Type {
+ if isfreg(&p.From) == isfreg(&p.To) {
if copyprop(r) {
excise(r)
t++
@@ -82,9 +82,9 @@
// Convert uses to $0 to uses of R0 and
// propagate R0
if regzer(&p.From) != 0 {
- if p.To.Type == obj.TYPE_REG {
+ if p.To.Type == obj.TYPE_REG && !isfreg(&p.To) {
p.From.Type = obj.TYPE_REG
- p.From.Reg = ppc64.REGZERO
+ p.From.Reg = mips.REGZERO
if copyprop(r) {
excise(r)
t++
@@ -113,12 +113,12 @@
default:
continue
- case ppc64.AMOVH,
- ppc64.AMOVHZ,
- ppc64.AMOVB,
- ppc64.AMOVBZ,
- ppc64.AMOVW,
- ppc64.AMOVWZ:
+ case mips.AMOVH,
+ mips.AMOVHU,
+ mips.AMOVB,
+ mips.AMOVBU,
+ mips.AMOVW,
+ mips.AMOVWU:
if p.To.Type != obj.TYPE_REG {
continue
}
@@ -141,209 +141,6 @@
excise(r1)
}
- if gc.Debug['D'] > 1 {
- goto ret /* allow following code improvement to be suppressed */
- }
-
- /*
- * look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
- * when OP can set condition codes correctly
- */
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
- p = r.Prog
- switch p.As {
- case ppc64.ACMP,
- ppc64.ACMPW: /* always safe? */
- if regzer(&p.To) == 0 {
- continue
- }
- r1 = r.S1
- if r1 == nil {
- continue
- }
- switch r1.Prog.As {
- default:
- continue
-
- /* the conditions can be complex and these are currently little used */
- case ppc64.ABCL,
- ppc64.ABC:
- continue
-
- case ppc64.ABEQ,
- ppc64.ABGE,
- ppc64.ABGT,
- ppc64.ABLE,
- ppc64.ABLT,
- ppc64.ABNE,
- ppc64.ABVC,
- ppc64.ABVS:
- break
- }
-
- r1 = r
- for {
- r1 = gc.Uniqp(r1)
- if r1 == nil || r1.Prog.As != obj.ANOP {
- break
- }
- }
-
- if r1 == nil {
- continue
- }
- p1 = r1.Prog
- if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.From.Reg {
- continue
- }
- switch p1.As {
- /* irregular instructions */
- case ppc64.ASUB,
- ppc64.AADD,
- ppc64.AXOR,
- ppc64.AOR:
- if p1.From.Type == obj.TYPE_CONST || p1.From.Type == obj.TYPE_ADDR {
- continue
- }
- }
-
- switch p1.As {
- default:
- continue
-
- case ppc64.AMOVW,
- ppc64.AMOVD:
- if p1.From.Type != obj.TYPE_REG {
- continue
- }
- continue
-
- case ppc64.AANDCC,
- ppc64.AANDNCC,
- ppc64.AORCC,
- ppc64.AORNCC,
- ppc64.AXORCC,
- ppc64.ASUBCC,
- ppc64.ASUBECC,
- ppc64.ASUBMECC,
- ppc64.ASUBZECC,
- ppc64.AADDCC,
- ppc64.AADDCCC,
- ppc64.AADDECC,
- ppc64.AADDMECC,
- ppc64.AADDZECC,
- ppc64.ARLWMICC,
- ppc64.ARLWNMCC,
- /* don't deal with floating point instructions for now */
- /*
- case AFABS:
- case AFADD:
- case AFADDS:
- case AFCTIW:
- case AFCTIWZ:
- case AFDIV:
- case AFDIVS:
- case AFMADD:
- case AFMADDS:
- case AFMOVD:
- case AFMSUB:
- case AFMSUBS:
- case AFMUL:
- case AFMULS:
- case AFNABS:
- case AFNEG:
- case AFNMADD:
- case AFNMADDS:
- case AFNMSUB:
- case AFNMSUBS:
- case AFRSP:
- case AFSUB:
- case AFSUBS:
- case ACNTLZW:
- case AMTFSB0:
- case AMTFSB1:
- */
- ppc64.AADD,
- ppc64.AADDV,
- ppc64.AADDC,
- ppc64.AADDCV,
- ppc64.AADDME,
- ppc64.AADDMEV,
- ppc64.AADDE,
- ppc64.AADDEV,
- ppc64.AADDZE,
- ppc64.AADDZEV,
- ppc64.AAND,
- ppc64.AANDN,
- ppc64.ADIVW,
- ppc64.ADIVWV,
- ppc64.ADIVWU,
- ppc64.ADIVWUV,
- ppc64.ADIVD,
- ppc64.ADIVDV,
- ppc64.ADIVDU,
- ppc64.ADIVDUV,
- ppc64.AEQV,
- ppc64.AEXTSB,
- ppc64.AEXTSH,
- ppc64.AEXTSW,
- ppc64.AMULHW,
- ppc64.AMULHWU,
- ppc64.AMULLW,
- ppc64.AMULLWV,
- ppc64.AMULHD,
- ppc64.AMULHDU,
- ppc64.AMULLD,
- ppc64.AMULLDV,
- ppc64.ANAND,
- ppc64.ANEG,
- ppc64.ANEGV,
- ppc64.ANOR,
- ppc64.AOR,
- ppc64.AORN,
- ppc64.AREM,
- ppc64.AREMV,
- ppc64.AREMU,
- ppc64.AREMUV,
- ppc64.AREMD,
- ppc64.AREMDV,
- ppc64.AREMDU,
- ppc64.AREMDUV,
- ppc64.ARLWMI,
- ppc64.ARLWNM,
- ppc64.ASLW,
- ppc64.ASRAW,
- ppc64.ASRW,
- ppc64.ASLD,
- ppc64.ASRAD,
- ppc64.ASRD,
- ppc64.ASUB,
- ppc64.ASUBV,
- ppc64.ASUBC,
- ppc64.ASUBCV,
- ppc64.ASUBME,
- ppc64.ASUBMEV,
- ppc64.ASUBE,
- ppc64.ASUBEV,
- ppc64.ASUBZE,
- ppc64.ASUBZEV,
- ppc64.AXOR:
- t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC)
- }
-
- if gc.Debug['D'] != 0 {
- fmt.Printf("cmp %v; %v -> ", p1, p)
- }
- p1.As = int16(t)
- if gc.Debug['D'] != 0 {
- fmt.Printf("%v\n", p1)
- }
- excise(r)
- continue
- }
- }
-
-ret:
gc.Flowend(g)
}
@@ -368,7 +165,7 @@
}
}
if a.Type == obj.TYPE_REG {
- if a.Reg == ppc64.REGZERO {
+ if a.Reg == mips.REGZERO {
return 1
}
}
@@ -377,7 +174,11 @@
func regtyp(a *obj.Addr) bool {
// TODO(rsc): Floating point register exclusions?
- return a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO
+ return a.Type == obj.TYPE_REG && mips.REG_R0 <= a.Reg && a.Reg <= mips.REG_F31 && a.Reg != mips.REGZERO
+}
+
+func isfreg(a *obj.Addr) bool {
+ return mips.REG_F0 <= a.Reg && a.Reg <= mips.REG_F31
}
/*
@@ -607,7 +408,7 @@
// 0 otherwise (not touched)
func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
if p.From3Type() != obj.TYPE_NONE {
- // 9g never generates a from3
+ // never generates a from3
fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
}
@@ -617,34 +418,29 @@
return 2
case obj.ANOP, /* read p->from, write p->to */
- ppc64.AMOVH,
- ppc64.AMOVHZ,
- ppc64.AMOVB,
- ppc64.AMOVBZ,
- ppc64.AMOVW,
- ppc64.AMOVWZ,
- ppc64.AMOVD,
- ppc64.ANEG,
- ppc64.ANEGCC,
- ppc64.AADDME,
- ppc64.AADDMECC,
- ppc64.AADDZE,
- ppc64.AADDZECC,
- ppc64.ASUBME,
- ppc64.ASUBMECC,
- ppc64.ASUBZE,
- ppc64.ASUBZECC,
- ppc64.AFCTIW,
- ppc64.AFCTIWZ,
- ppc64.AFCTID,
- ppc64.AFCTIDZ,
- ppc64.AFCFID,
- ppc64.AFCFIDCC,
- ppc64.AFMOVS,
- ppc64.AFMOVD,
- ppc64.AFRSP,
- ppc64.AFNEG,
- ppc64.AFNEGCC:
+ mips.AMOVV,
+ mips.AMOVF,
+ mips.AMOVD,
+ mips.AMOVH,
+ mips.AMOVHU,
+ mips.AMOVB,
+ mips.AMOVBU,
+ mips.AMOVW,
+ mips.AMOVWU,
+ mips.AMOVFD,
+ mips.AMOVDF,
+ mips.AMOVDW,
+ mips.AMOVWD,
+ mips.AMOVFW,
+ mips.AMOVWF,
+ mips.AMOVDV,
+ mips.AMOVVD,
+ mips.AMOVFV,
+ mips.AMOVVF,
+ mips.ATRUNCFV,
+ mips.ATRUNCDV,
+ mips.ATRUNCFW,
+ mips.ATRUNCDW:
if s != nil {
if copysub(&p.From, v, s, 1) != 0 {
return 1
@@ -680,103 +476,37 @@
return 0
- case ppc64.AMOVBU, /* rar p->from, write p->to or read p->from, rar p->to */
- ppc64.AMOVBZU,
- ppc64.AMOVHU,
- ppc64.AMOVHZU,
- ppc64.AMOVWZU,
- ppc64.AMOVDU:
- if p.From.Type == obj.TYPE_MEM {
- if copyas(&p.From, v) {
- // No s!=nil check; need to fail
- // anyway in that case
- return 2
- }
+ case mips.ASGT, /* read p->from, read p->reg, write p->to */
+ mips.ASGTU,
- if s != nil {
- if copysub(&p.To, v, s, 1) != 0 {
- return 1
- }
- return 0
- }
+ mips.AADD,
+ mips.AADDU,
+ mips.ASUB,
+ mips.ASUBU,
+ mips.ASLL,
+ mips.ASRL,
+ mips.ASRA,
+ mips.AOR,
+ mips.ANOR,
+ mips.AAND,
+ mips.AXOR,
- if copyas(&p.To, v) {
- return 3
- }
- } else if p.To.Type == obj.TYPE_MEM {
- if copyas(&p.To, v) {
- return 2
- }
- if s != nil {
- if copysub(&p.From, v, s, 1) != 0 {
- return 1
- }
- return 0
- }
+ mips.AADDV,
+ mips.AADDVU,
+ mips.ASUBV,
+ mips.ASUBVU,
+ mips.ASLLV,
+ mips.ASRLV,
+ mips.ASRAV,
- if copyau(&p.From, v) {
- return 1
- }
- } else {
- fmt.Printf("copyu: bad %v\n", p)
- }
-
- return 0
-
- case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
- ppc64.ARLWMICC:
- if copyas(&p.To, v) {
- return 2
- }
- fallthrough
-
- /* fall through */
- case ppc64.AADD,
- /* read p->from, read p->reg, write p->to */
- ppc64.AADDC,
- ppc64.AADDE,
- ppc64.ASUB,
- ppc64.ASLW,
- ppc64.ASRW,
- ppc64.ASRAW,
- ppc64.ASLD,
- ppc64.ASRD,
- ppc64.ASRAD,
- ppc64.AOR,
- ppc64.AORCC,
- ppc64.AORN,
- ppc64.AORNCC,
- ppc64.AAND,
- ppc64.AANDCC,
- ppc64.AANDN,
- ppc64.AANDNCC,
- ppc64.ANAND,
- ppc64.ANANDCC,
- ppc64.ANOR,
- ppc64.ANORCC,
- ppc64.AXOR,
- ppc64.AMULHW,
- ppc64.AMULHWU,
- ppc64.AMULLW,
- ppc64.AMULLD,
- ppc64.ADIVW,
- ppc64.ADIVD,
- ppc64.ADIVWU,
- ppc64.ADIVDU,
- ppc64.AREM,
- ppc64.AREMU,
- ppc64.AREMD,
- ppc64.AREMDU,
- ppc64.ARLWNM,
- ppc64.ARLWNMCC,
- ppc64.AFADDS,
- ppc64.AFADD,
- ppc64.AFSUBS,
- ppc64.AFSUB,
- ppc64.AFMULS,
- ppc64.AFMUL,
- ppc64.AFDIVS,
- ppc64.AFDIV:
+ mips.AADDF,
+ mips.AADDD,
+ mips.ASUBF,
+ mips.ASUBD,
+ mips.AMULF,
+ mips.AMULD,
+ mips.ADIVF,
+ mips.ADIVD:
if s != nil {
if copysub(&p.From, v, s, 1) != 0 {
return 1
@@ -822,42 +552,47 @@
}
return 0
- case ppc64.ABEQ,
- ppc64.ABGT,
- ppc64.ABGE,
- ppc64.ABLT,
- ppc64.ABLE,
- ppc64.ABNE,
- ppc64.ABVC,
- ppc64.ABVS:
- return 0
-
case obj.ACHECKNIL, /* read p->from */
- ppc64.ACMP, /* read p->from, read p->to */
- ppc64.ACMPU,
- ppc64.ACMPW,
- ppc64.ACMPWU,
- ppc64.AFCMPO,
- ppc64.AFCMPU:
+ mips.ABEQ, /* read p->from, read p->reg */
+ mips.ABNE,
+ mips.ABGTZ,
+ mips.ABGEZ,
+ mips.ABLTZ,
+ mips.ABLEZ,
+
+ mips.ACMPEQD,
+ mips.ACMPEQF,
+ mips.ACMPGED,
+ mips.ACMPGEF,
+ mips.ACMPGTD,
+ mips.ACMPGTF,
+ mips.ABFPF,
+ mips.ABFPT,
+
+ mips.AMUL,
+ mips.AMULU,
+ mips.ADIV,
+ mips.ADIVU,
+ mips.AMULV,
+ mips.AMULVU,
+ mips.ADIVV,
+ mips.ADIVVU:
if s != nil {
if copysub(&p.From, v, s, 1) != 0 {
return 1
}
- return copysub(&p.To, v, s, 1)
+ return copysub1(p, v, s, 1)
}
if copyau(&p.From, v) {
return 1
}
- if copyau(&p.To, v) {
+ if copyau1(p, v) {
return 1
}
return 0
- // 9g never generates a branch to a GPR (this isn't
- // even a normal instruction; liblink turns it in to a
- // mov and a branch).
- case ppc64.ABR: /* read p->to */
+ case mips.AJMP: /* read p->to */
if s != nil {
if copysub(&p.To, v, s, 1) != 0 {
return 1
@@ -870,7 +605,7 @@
}
return 0
- case obj.ARET: /* funny */
+ case mips.ARET: /* funny */
if s != nil {
return 0
}
@@ -879,20 +614,20 @@
// everything is set (and not used).
return 3
- case ppc64.ABL: /* funny */
+ case mips.AJAL: /* funny */
if v.Type == obj.TYPE_REG {
// TODO(rsc): REG_R0 and REG_F0 used to be
// (when register numbers started at 0) exregoffset and exfregoffset,
// which are unset entirely.
// It's strange that this handles R0 and F0 differently from the other
// registers. Possible failure to optimize?
- if ppc64.REG_R0 < v.Reg && v.Reg <= ppc64.REGEXT {
+ if mips.REG_R0 < v.Reg && v.Reg <= mips.REG_R31 {
return 2
}
- if v.Reg == ppc64.REGARG {
+ if v.Reg == mips.REGARG {
return 2
}
- if ppc64.REG_F0 < v.Reg && v.Reg <= ppc64.FREGEXT {
+ if mips.REG_F0 < v.Reg && v.Reg <= mips.REG_F31 {
return 2
}
}
@@ -913,28 +648,28 @@
}
return 3
- // R0 is zero, used by DUFFZERO, cannot be substituted.
- // R3 is ptr to memory, used and set, cannot be substituted.
+ // R0 is zero, used by DUFFZERO, cannot be substituted.
+ // R1 is ptr to memory, used and set, cannot be substituted.
case obj.ADUFFZERO:
if v.Type == obj.TYPE_REG {
if v.Reg == 0 {
return 1
}
- if v.Reg == 3 {
+ if v.Reg == 1 {
return 2
}
}
return 0
- // R3, R4 are ptr to src, dst, used and set, cannot be substituted.
- // R5 is scratch, set by DUFFCOPY, cannot be substituted.
+ // R1, R2 are ptr to src, dst, used and set, cannot be substituted.
+ // R3 is scratch, set by DUFFCOPY, cannot be substituted.
case obj.ADUFFCOPY:
if v.Type == obj.TYPE_REG {
- if v.Reg == 3 || v.Reg == 4 {
+ if v.Reg == 1 || v.Reg == 2 {
return 2
}
- if v.Reg == 5 {
+ if v.Reg == 3 {
return 3
}
}
@@ -943,7 +678,7 @@
case obj.ATEXT: /* funny */
if v.Type == obj.TYPE_REG {
- if v.Reg == ppc64.REGARG {
+ if v.Reg == mips.REGARG {
return 3
}
}
@@ -1006,7 +741,7 @@
}
// copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
-// Returns 1 on failure to substitute (it always succeeds on ppc64).
+// Returns 1 on failure to substitute (it always succeeds on mips).
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
if copyau(a, v) {
@@ -1017,7 +752,7 @@
}
// copysub1 replaces v with s in p1->reg if f!=0 or indicates if it could if f==0.
-// Returns 1 on failure to substitute (it always succeeds on ppc64).
+// Returns 1 on failure to substitute (it always succeeds on mips).
func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
if copyau1(p1, v) {
@@ -1047,5 +782,5 @@
}
func stackaddr(a *obj.Addr) bool {
- return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP
+ return a.Type == obj.TYPE_REG && a.Reg == mips.REGSP
}
diff --git a/src/cmd/compile/internal/mips64/prog.go b/src/cmd/compile/internal/mips64/prog.go
index 9b8719b..bf13d82 100644
--- a/src/cmd/compile/internal/mips64/prog.go
+++ b/src/cmd/compile/internal/mips64/prog.go
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package ppc64
+package mips64
import (
"cmd/compile/internal/gc"
"cmd/internal/obj"
- "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/mips"
)
const (
@@ -24,7 +24,7 @@
// size variants of an operation even if we just use a subset.
//
// The table is formatted for 8-space tabs.
-var progtable = [ppc64.ALAST]obj.ProgInfo{
+var progtable = [mips.ALAST]obj.ProgInfo{
obj.ATYPE: {Flags: gc.Pseudo | gc.Skip},
obj.ATEXT: {Flags: gc.Pseudo},
obj.AFUNCDATA: {Flags: gc.Pseudo},
@@ -36,106 +36,105 @@
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
// NOP is an internal no-op that also stands
- // for USED and SET annotations, not the Power opcode.
+ // for USED and SET annotations, not the MIPS opcode.
obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
// Integer
- ppc64.AADD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.ASUB: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.ANEG: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AAND: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AXOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AMULLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AMULLW: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AMULHD: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AMULHDU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.ADIVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.ADIVDU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.ASLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.ASRD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.ASRAD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.ACMP: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
- ppc64.ACMPU: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
- ppc64.ATD: {Flags: gc.SizeQ | gc.RightRead},
+ mips.AADD: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AADDU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AADDV: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AADDVU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASUB: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASUBU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASUBV: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASUBVU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AAND: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AXOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ANOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AMUL: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+ mips.AMULU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+ mips.AMULV: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+ mips.AMULVU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+ mips.ADIV: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+ mips.ADIVU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+ mips.ADIVV: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+ mips.ADIVVU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+ mips.AREM: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+ mips.AREMU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
+ mips.AREMV: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+ mips.AREMVU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+ mips.ASLL: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASLLV: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASRA: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASRAV: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASRL: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASRLV: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASGT: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASGTU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
// Floating point.
- ppc64.AFADD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AFADDS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AFSUB: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AFSUBS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AFMUL: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AFMULS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AFDIV: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AFDIVS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AFCTIDZ: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AFCFID: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
- ppc64.AFCMPU: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
- ppc64.AFRSP: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.AADDF: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AADDD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASUBF: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ASUBD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AMULF: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AMULD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ADIVF: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.ADIVD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ mips.AABSF: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
+ mips.AABSD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
+ mips.ANEGF: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
+ mips.ANEGD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
+ mips.ACMPEQF: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
+ mips.ACMPEQD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
+ mips.ACMPGTF: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
+ mips.ACMPGTD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
+ mips.ACMPGEF: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
+ mips.ACMPGED: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
+ mips.AMOVFD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.AMOVDF: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.AMOVFW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.AMOVWF: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.AMOVDW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.AMOVWD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.AMOVFV: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.AMOVVF: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.AMOVDV: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.AMOVVD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.ATRUNCFW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.ATRUNCDW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.ATRUNCFV: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
+ mips.ATRUNCDV: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
// Moves
- ppc64.AMOVB: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
- ppc64.AMOVBU: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
- ppc64.AMOVBZ: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
- ppc64.AMOVH: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
- ppc64.AMOVHU: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
- ppc64.AMOVHZ: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
- ppc64.AMOVW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-
- // there is no AMOVWU.
- ppc64.AMOVWZU: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
- ppc64.AMOVWZ: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
- ppc64.AMOVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
- ppc64.AMOVDU: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc},
- ppc64.AFMOVS: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
- ppc64.AFMOVD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
+ mips.AMOVB: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ mips.AMOVBU: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ mips.AMOVH: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ mips.AMOVHU: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ mips.AMOVW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ mips.AMOVWU: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ mips.AMOVV: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
+ mips.AMOVF: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ mips.AMOVD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
// Jumps
- ppc64.ABR: {Flags: gc.Jump | gc.Break},
- ppc64.ABL: {Flags: gc.Call},
- ppc64.ABEQ: {Flags: gc.Cjmp},
- ppc64.ABNE: {Flags: gc.Cjmp},
- ppc64.ABGE: {Flags: gc.Cjmp},
- ppc64.ABLT: {Flags: gc.Cjmp},
- ppc64.ABGT: {Flags: gc.Cjmp},
- ppc64.ABLE: {Flags: gc.Cjmp},
- obj.ARET: {Flags: gc.Break},
+ mips.AJMP: {Flags: gc.Jump | gc.Break},
+ mips.AJAL: {Flags: gc.Call},
+ mips.ABEQ: {Flags: gc.Cjmp},
+ mips.ABNE: {Flags: gc.Cjmp},
+ mips.ABGEZ: {Flags: gc.Cjmp},
+ mips.ABLTZ: {Flags: gc.Cjmp},
+ mips.ABGTZ: {Flags: gc.Cjmp},
+ mips.ABLEZ: {Flags: gc.Cjmp},
+ mips.ABFPF: {Flags: gc.Cjmp},
+ mips.ABFPT: {Flags: gc.Cjmp},
+ mips.ARET: {Flags: gc.Break},
obj.ADUFFZERO: {Flags: gc.Call},
obj.ADUFFCOPY: {Flags: gc.Call},
}
-var initproginfo_initialized int
-
-func initproginfo() {
- var addvariant = []int{V_CC, V_V, V_CC | V_V}
-
- if initproginfo_initialized != 0 {
- return
- }
- initproginfo_initialized = 1
-
- // Perform one-time expansion of instructions in progtable to
- // their CC, V, and VCC variants
- var as2 int
- var i int
- var variant int
- for as := int(0); as < len(progtable); as++ {
- if progtable[as].Flags == 0 {
- continue
- }
- variant = as2variant(as)
- for i = 0; i < len(addvariant); i++ {
- as2 = variant2as(as, variant|addvariant[i])
- if as2 != 0 && progtable[as2].Flags == 0 {
- progtable[as2] = progtable[as]
- }
- }
- }
-}
-
func proginfo(p *obj.Prog) {
- initproginfo()
-
info := &p.Info
*info = progtable[p.As]
if info.Flags == 0 {
@@ -149,16 +148,10 @@
if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
info.Regindex |= RtoB(int(p.From.Reg))
- if info.Flags&gc.PostInc != 0 {
- info.Regset |= RtoB(int(p.From.Reg))
- }
}
if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
info.Regindex |= RtoB(int(p.To.Reg))
- if info.Flags&gc.PostInc != 0 {
- info.Regset |= RtoB(int(p.To.Reg))
- }
}
if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
@@ -167,148 +160,14 @@
}
if p.As == obj.ADUFFZERO {
- info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3)
- info.Regset |= RtoB(ppc64.REG_R3)
+ info.Reguse |= 1<<0 | RtoB(mips.REGRT1)
+ info.Regset |= RtoB(mips.REGRT1)
}
if p.As == obj.ADUFFCOPY {
// TODO(austin) Revisit when duffcopy is implemented
- info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5)
+ info.Reguse |= RtoB(mips.REGRT1) | RtoB(mips.REGRT2) | RtoB(mips.REG_R3)
- info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4)
+ info.Regset |= RtoB(mips.REGRT1) | RtoB(mips.REGRT2)
}
}
-
-// Instruction variants table. Initially this contains entries only
-// for the "base" form of each instruction. On the first call to
-// as2variant or variant2as, we'll add the variants to the table.
-var varianttable = [ppc64.ALAST][4]int{
- ppc64.AADD: [4]int{ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
- ppc64.AADDC: [4]int{ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
- ppc64.AADDE: [4]int{ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
- ppc64.AADDME: [4]int{ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
- ppc64.AADDZE: [4]int{ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
- ppc64.AAND: [4]int{ppc64.AAND, ppc64.AANDCC, 0, 0},
- ppc64.AANDN: [4]int{ppc64.AANDN, ppc64.AANDNCC, 0, 0},
- ppc64.ACNTLZD: [4]int{ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
- ppc64.ACNTLZW: [4]int{ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
- ppc64.ADIVD: [4]int{ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
- ppc64.ADIVDU: [4]int{ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
- ppc64.ADIVW: [4]int{ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
- ppc64.ADIVWU: [4]int{ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
- ppc64.AEQV: [4]int{ppc64.AEQV, ppc64.AEQVCC, 0, 0},
- ppc64.AEXTSB: [4]int{ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
- ppc64.AEXTSH: [4]int{ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
- ppc64.AEXTSW: [4]int{ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
- ppc64.AFABS: [4]int{ppc64.AFABS, ppc64.AFABSCC, 0, 0},
- ppc64.AFADD: [4]int{ppc64.AFADD, ppc64.AFADDCC, 0, 0},
- ppc64.AFADDS: [4]int{ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
- ppc64.AFCFID: [4]int{ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
- ppc64.AFCTID: [4]int{ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
- ppc64.AFCTIDZ: [4]int{ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
- ppc64.AFCTIW: [4]int{ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
- ppc64.AFCTIWZ: [4]int{ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
- ppc64.AFDIV: [4]int{ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
- ppc64.AFDIVS: [4]int{ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
- ppc64.AFMADD: [4]int{ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
- ppc64.AFMADDS: [4]int{ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
- ppc64.AFMOVD: [4]int{ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
- ppc64.AFMSUB: [4]int{ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
- ppc64.AFMSUBS: [4]int{ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
- ppc64.AFMUL: [4]int{ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
- ppc64.AFMULS: [4]int{ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
- ppc64.AFNABS: [4]int{ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
- ppc64.AFNEG: [4]int{ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
- ppc64.AFNMADD: [4]int{ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
- ppc64.AFNMADDS: [4]int{ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
- ppc64.AFNMSUB: [4]int{ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
- ppc64.AFNMSUBS: [4]int{ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
- ppc64.AFRES: [4]int{ppc64.AFRES, ppc64.AFRESCC, 0, 0},
- ppc64.AFRSP: [4]int{ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
- ppc64.AFRSQRTE: [4]int{ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
- ppc64.AFSEL: [4]int{ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
- ppc64.AFSQRT: [4]int{ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
- ppc64.AFSQRTS: [4]int{ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
- ppc64.AFSUB: [4]int{ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
- ppc64.AFSUBS: [4]int{ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
- ppc64.AMTFSB0: [4]int{ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
- ppc64.AMTFSB1: [4]int{ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
- ppc64.AMULHD: [4]int{ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
- ppc64.AMULHDU: [4]int{ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
- ppc64.AMULHW: [4]int{ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
- ppc64.AMULHWU: [4]int{ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
- ppc64.AMULLD: [4]int{ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
- ppc64.AMULLW: [4]int{ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
- ppc64.ANAND: [4]int{ppc64.ANAND, ppc64.ANANDCC, 0, 0},
- ppc64.ANEG: [4]int{ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
- ppc64.ANOR: [4]int{ppc64.ANOR, ppc64.ANORCC, 0, 0},
- ppc64.AOR: [4]int{ppc64.AOR, ppc64.AORCC, 0, 0},
- ppc64.AORN: [4]int{ppc64.AORN, ppc64.AORNCC, 0, 0},
- ppc64.AREM: [4]int{ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
- ppc64.AREMD: [4]int{ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
- ppc64.AREMDU: [4]int{ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
- ppc64.AREMU: [4]int{ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
- ppc64.ARLDC: [4]int{ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
- ppc64.ARLDCL: [4]int{ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
- ppc64.ARLDCR: [4]int{ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
- ppc64.ARLDMI: [4]int{ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
- ppc64.ARLWMI: [4]int{ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
- ppc64.ARLWNM: [4]int{ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
- ppc64.ASLD: [4]int{ppc64.ASLD, ppc64.ASLDCC, 0, 0},
- ppc64.ASLW: [4]int{ppc64.ASLW, ppc64.ASLWCC, 0, 0},
- ppc64.ASRAD: [4]int{ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
- ppc64.ASRAW: [4]int{ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
- ppc64.ASRD: [4]int{ppc64.ASRD, ppc64.ASRDCC, 0, 0},
- ppc64.ASRW: [4]int{ppc64.ASRW, ppc64.ASRWCC, 0, 0},
- ppc64.ASUB: [4]int{ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
- ppc64.ASUBC: [4]int{ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
- ppc64.ASUBE: [4]int{ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
- ppc64.ASUBME: [4]int{ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
- ppc64.ASUBZE: [4]int{ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
- ppc64.AXOR: [4]int{ppc64.AXOR, ppc64.AXORCC, 0, 0},
-}
-
-var initvariants_initialized int
-
-func initvariants() {
- if initvariants_initialized != 0 {
- return
- }
- initvariants_initialized = 1
-
- var j int
- for i := int(0); i < len(varianttable); i++ {
- if varianttable[i][0] == 0 {
- // Instruction has no variants
- varianttable[i][0] = i
-
- continue
- }
-
- // Copy base form to other variants
- if varianttable[i][0] == i {
- for j = 0; j < len(varianttable[i]); j++ {
- varianttable[varianttable[i][j]] = varianttable[i]
- }
- }
- }
-}
-
-// as2variant returns the variant (V_*) flags of instruction as.
-func as2variant(as int) int {
- initvariants()
- for i := int(0); i < len(varianttable[as]); i++ {
- if varianttable[as][i] == as {
- return i
- }
- }
- gc.Fatalf("as2variant: instruction %v is not a variant of itself", obj.Aconv(as))
- return 0
-}
-
-// variant2as returns the instruction as with the given variant (V_*) flags.
-// If no such variant exists, this returns 0.
-func variant2as(as int, flags int) int {
- initvariants()
- return varianttable[as][flags]
-}
diff --git a/src/cmd/compile/internal/mips64/reg.go b/src/cmd/compile/internal/mips64/reg.go
index fa1cb71..5c46588 100644
--- a/src/cmd/compile/internal/mips64/reg.go
+++ b/src/cmd/compile/internal/mips64/reg.go
@@ -28,9 +28,9 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package ppc64
+package mips64
-import "cmd/internal/obj/ppc64"
+import "cmd/internal/obj/mips"
import "cmd/compile/internal/gc"
const (
@@ -111,10 +111,10 @@
func excludedregs() uint64 {
// Exclude registers with fixed functions
- regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS))
+ regbits := uint64(1<<0 | RtoB(mips.REGSP) | RtoB(mips.REGG) | RtoB(mips.REGTMP) | RtoB(mips.REGLINK) | RtoB(mips.REG_R26) | RtoB(mips.REG_R27))
// Also exclude floating point registers with fixed constants
- regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
+ regbits |= RtoB(mips.FREGZERO) | RtoB(mips.FREGHALF) | RtoB(mips.FREGONE) | RtoB(mips.FREGTWO)
return regbits
}
@@ -136,11 +136,11 @@
* 32+31 F31
*/
func RtoB(r int) uint64 {
- if r > ppc64.REG_R0 && r <= ppc64.REG_R31 {
- return 1 << uint(r-ppc64.REG_R0)
+ if r > mips.REG_R0 && r <= mips.REG_R31 {
+ return 1 << uint(r-mips.REG_R0)
}
- if r >= ppc64.REG_F0 && r <= ppc64.REG_F31 {
- return 1 << uint(32+r-ppc64.REG_F0)
+ if r >= mips.REG_F0 && r <= mips.REG_F31 {
+ return 1 << uint(32+r-mips.REG_F0)
}
return 0
}
@@ -150,7 +150,7 @@
if b == 0 {
return 0
}
- return gc.Bitno(b) + ppc64.REG_R0
+ return gc.Bitno(b) + mips.REG_R0
}
func BtoF(b uint64) int {
@@ -158,5 +158,5 @@
if b == 0 {
return 0
}
- return gc.Bitno(b) + ppc64.REG_F0
+ return gc.Bitno(b) + mips.REG_F0
}
diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go
index 7b69c34..91c4831 100644
--- a/src/cmd/compile/main.go
+++ b/src/cmd/compile/main.go
@@ -8,6 +8,7 @@
"cmd/compile/internal/amd64"
"cmd/compile/internal/arm"
"cmd/compile/internal/arm64"
+ "cmd/compile/internal/mips64"
"cmd/compile/internal/ppc64"
"cmd/compile/internal/x86"
"cmd/internal/obj"
@@ -28,6 +29,8 @@
arm.Main()
case "arm64":
arm64.Main()
+ case "mips64", "mips64le":
+ mips64.Main()
case "ppc64", "ppc64le":
ppc64.Main()
}