[dev.ssa] Merge remote-tracking branch 'origin/master' into mergebranch
Semi-regular merge of master into dev.ssa.
Change-Id: I48aa17700096a14f2a20ad07491ebfcd7529f6d5
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
index 79bf94a..9837ea6 100644
--- a/src/cmd/compile/internal/amd64/galign.go
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -39,9 +39,9 @@
* int, uint, and uintptr
*/
var typedefs = []gc.Typedef{
- gc.Typedef{"int", gc.TINT, gc.TINT64},
- gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
- gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+ {"int", gc.TINT, gc.TINT64},
+ {"uint", gc.TUINT, gc.TUINT64},
+ {"uintptr", gc.TUINTPTR, gc.TUINT64},
}
func betypeinit() {
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index 65cf694..f1f4955 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -36,10 +36,10 @@
continue
}
if n.Class != gc.PAUTO {
- gc.Fatal("needzero class %d", n.Class)
+ gc.Fatalf("needzero class %d", n.Class)
}
if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
- gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
}
if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
@@ -124,7 +124,7 @@
if cnt%int64(gc.Widthreg) != 0 {
// should only happen with nacl
if cnt%int64(gc.Widthptr) != 0 {
- gc.Fatal("zerorange count not a multiple of widthptr %d", cnt)
+ gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
lo += int64(gc.Widthptr)
@@ -187,13 +187,13 @@
t := nl.Type
t0 := t
- check := 0
+ check := false
if gc.Issigned[t.Etype] {
- check = 1
+ check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
- check = 0
+ check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
- check = 0
+ check = false
}
}
@@ -203,7 +203,7 @@
} else {
t = gc.Types[gc.TUINT32]
}
- check = 0
+ check = false
}
a := optoas(op, t)
@@ -252,7 +252,7 @@
}
var p2 *obj.Prog
- if check != 0 {
+ if check {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
@@ -289,7 +289,7 @@
gmove(&dx, res)
}
restx(&dx, &olddx)
- if check != 0 {
+ if check {
gc.Patch(p2, gc.Pc)
}
restx(&ax, &oldax)
@@ -340,9 +340,7 @@
t := nl.Type
a := optoas(gc.OHMUL, t)
if nl.Ullman < nr.Ullman {
- tmp := nl
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
var n1 gc.Node
@@ -500,9 +498,7 @@
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp := nl
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
// generate operands in "8-bit" registers.
@@ -564,12 +560,7 @@
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
- for {
- tmp14 := q
- q--
- if tmp14 <= 0 {
- break
- }
+ for ; q > 0; q-- {
n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8
@@ -584,12 +575,7 @@
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
- for {
- tmp15 := c
- c--
- if tmp15 <= 0 {
- break
- }
+ for ; c > 0; c-- {
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
diff --git a/src/cmd/compile/internal/amd64/gsubr.go b/src/cmd/compile/internal/amd64/gsubr.go
index a8e4170..7b57902 100644
--- a/src/cmd/compile/internal/amd64/gsubr.go
+++ b/src/cmd/compile/internal/amd64/gsubr.go
@@ -221,7 +221,7 @@
switch uint32(ft)<<16 | uint32(tt) {
default:
- gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+ gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
/*
* integer copy and truncate
@@ -621,7 +621,7 @@
case x86.ALEAQ:
if f != nil && gc.Isconst(f, gc.CTNIL) {
- gc.Fatal("gins LEAQ nil %v", f.Type)
+ gc.Fatalf("gins LEAQ nil %v", f.Type)
}
}
@@ -651,11 +651,11 @@
if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Width > int64(w))) {
gc.Dump("f", f)
gc.Dump("t", t)
- gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+ gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
}
if p.To.Type == obj.TYPE_ADDR && w > 0 {
- gc.Fatal("bad use of addr: %v", p)
+ gc.Fatalf("bad use of addr: %v", p)
}
return p
@@ -675,13 +675,13 @@
*/
func optoas(op int, t *gc.Type) int {
if t == nil {
- gc.Fatal("optoas: t is nil")
+ gc.Fatalf("optoas: t is nil")
}
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
case gc.OADDR<<16 | gc.TPTR32:
a = x86.ALEAL
@@ -1223,7 +1223,7 @@
case x86.AJPS:
return x86.ASETPS
}
- gc.Fatal("jmptoset: no entry for %v", gc.Oconv(jmp, 0))
+ gc.Fatalf("jmptoset: no entry for %v", gc.Oconv(jmp, 0))
panic("unreachable")
}
@@ -1359,7 +1359,7 @@
for i := 1; i < o; i++ {
if oary[i] >= 0 {
- gc.Fatal("can't happen")
+ gc.Fatalf("can't happen")
}
gins(movptr, &n1, reg)
gc.Cgen_checknil(reg)
diff --git a/src/cmd/compile/internal/amd64/peep.go b/src/cmd/compile/internal/amd64/peep.go
index 19db68e9..130f369 100644
--- a/src/cmd/compile/internal/amd64/peep.go
+++ b/src/cmd/compile/internal/amd64/peep.go
@@ -873,10 +873,10 @@
*/
func copyas(a *obj.Addr, v *obj.Addr) bool {
if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_R15B {
- gc.Fatal("use of byte register")
+ gc.Fatalf("use of byte register")
}
if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_R15B {
- gc.Fatal("use of byte register")
+ gc.Fatalf("use of byte register")
}
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
diff --git a/src/cmd/compile/internal/amd64/prog.go b/src/cmd/compile/internal/amd64/prog.go
index 56d402a..59fb0761 100644
--- a/src/cmd/compile/internal/amd64/prog.go
+++ b/src/cmd/compile/internal/amd64/prog.go
@@ -266,7 +266,7 @@
info := &p.Info
*info = progtable[p.As]
if info.Flags == 0 {
- gc.Fatal("unknown instruction %v", p)
+ gc.Fatalf("unknown instruction %v", p)
}
if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST {
diff --git a/src/cmd/compile/internal/arm/cgen.go b/src/cmd/compile/internal/arm/cgen.go
index 8ea6c5f..289da5d 100644
--- a/src/cmd/compile/internal/arm/cgen.go
+++ b/src/cmd/compile/internal/arm/cgen.go
@@ -63,7 +63,7 @@
var op int
switch align {
default:
- gc.Fatal("sgen: invalid alignment %d for %v", align, n.Type)
+ gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
case 1:
op = arm.AMOVB
@@ -76,12 +76,12 @@
}
if w%int64(align) != 0 {
- gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
+ gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
}
c := int32(w / int64(align))
if osrc%int64(align) != 0 || odst%int64(align) != 0 {
- gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
+ gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
}
// if we are copying forward on the stack and
@@ -205,12 +205,7 @@
gc.Regfree(&nend)
} else {
var p *obj.Prog
- for {
- tmp14 := c
- c--
- if tmp14 <= 0 {
- break
- }
+ for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
diff --git a/src/cmd/compile/internal/arm/cgen64.go b/src/cmd/compile/internal/arm/cgen64.go
index cc969b8..a9fe77b 100644
--- a/src/cmd/compile/internal/arm/cgen64.go
+++ b/src/cmd/compile/internal/arm/cgen64.go
@@ -19,7 +19,7 @@
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
- gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
+ gc.Fatalf("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
}
l := n.Left
@@ -35,7 +35,7 @@
split64(l, &lo1, &hi1)
switch n.Op {
default:
- gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
+ gc.Fatalf("cgen64 %v", gc.Oconv(int(n.Op), 0))
case gc.OMINUS:
var lo2 gc.Node
@@ -129,7 +129,7 @@
// Do op. Leave result in ah:al.
switch n.Op {
default:
- gc.Fatal("cgen64: not implemented: %v\n", n)
+ gc.Fatalf("cgen64: not implemented: %v\n", n)
// TODO: Constants
case gc.OADD:
@@ -793,7 +793,7 @@
var br *obj.Prog
switch op {
default:
- gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("cmp64 %v %v", gc.Oconv(int(op), 0), t)
// cmp hi
// bne L
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
index 60a39d3..58c7f7b 100644
--- a/src/cmd/compile/internal/arm/galign.go
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -26,9 +26,9 @@
* int, uint, and uintptr
*/
var typedefs = []gc.Typedef{
- gc.Typedef{"int", gc.TINT, gc.TINT32},
- gc.Typedef{"uint", gc.TUINT, gc.TUINT32},
- gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT32},
+ {"int", gc.TINT, gc.TINT32},
+ {"uint", gc.TUINT, gc.TUINT32},
+ {"uintptr", gc.TUINTPTR, gc.TUINT32},
}
func betypeinit() {
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
index 2d19d75..193d4af 100644
--- a/src/cmd/compile/internal/arm/ggen.go
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -34,10 +34,10 @@
continue
}
if n.Class != gc.PAUTO {
- gc.Fatal("needzero class %d", n.Class)
+ gc.Fatalf("needzero class %d", n.Class)
}
if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
- gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
}
if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) {
// merge with range we already have
@@ -120,9 +120,7 @@
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Ullman < nr.Ullman {
- tmp := nl
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
t := nl.Type
@@ -162,7 +160,7 @@
p.To.Offset = int64(n2.Reg)
default:
- gc.Fatal("cgen_hmul %v", t)
+ gc.Fatalf("cgen_hmul %v", t)
}
gc.Cgen(&n1, res)
@@ -177,7 +175,7 @@
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
- gc.Fatal("cgen_shift %v", nl.Type)
+ gc.Fatalf("cgen_shift %v", nl.Type)
}
w := int(nl.Type.Width * 8)
@@ -429,7 +427,7 @@
gc.Warnl(int(p.Lineno), "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
- gc.Fatal("invalid nil check %v", p)
+ gc.Fatalf("invalid nil check %v", p)
}
reg = int(p.From.Reg)
diff --git a/src/cmd/compile/internal/arm/gsubr.go b/src/cmd/compile/internal/arm/gsubr.go
index a0a7ba2..acc6765 100644
--- a/src/cmd/compile/internal/arm/gsubr.go
+++ b/src/cmd/compile/internal/arm/gsubr.go
@@ -66,11 +66,11 @@
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
if !gc.Is64(n.Type) {
- gc.Fatal("split64 %v", n.Type)
+ gc.Fatalf("split64 %v", n.Type)
}
if nsclean >= len(sclean) {
- gc.Fatal("split64 clean")
+ gc.Fatalf("split64 clean")
}
sclean[nsclean].Op = gc.OEMPTY
nsclean++
@@ -125,7 +125,7 @@
func splitclean() {
if nsclean <= 0 {
- gc.Fatal("splitclean")
+ gc.Fatalf("splitclean")
}
nsclean--
if sclean[nsclean].Op != gc.OEMPTY {
@@ -204,7 +204,7 @@
switch uint32(ft)<<16 | uint32(tt) {
default:
// should not happen
- gc.Fatal("gmove %v -> %v", f, t)
+ gc.Fatalf("gmove %v -> %v", f, t)
return
/*
@@ -546,7 +546,7 @@
case gc.TUINT64<<16 | gc.TFLOAT32,
gc.TUINT64<<16 | gc.TFLOAT64:
- gc.Fatal("gmove UINT64, TFLOAT not implemented")
+ gc.Fatalf("gmove UINT64, TFLOAT not implemented")
return
/*
@@ -641,7 +641,7 @@
// int32 v;
if f != nil && f.Op == gc.OINDEX {
- gc.Fatal("gins OINDEX not implemented")
+ gc.Fatalf("gins OINDEX not implemented")
}
// gc.Regalloc(&nod, ®node, Z);
@@ -651,7 +651,7 @@
// idx.reg = nod.reg;
// gc.Regfree(&nod);
if t != nil && t.Op == gc.OINDEX {
- gc.Fatal("gins OINDEX not implemented")
+ gc.Fatalf("gins OINDEX not implemented")
}
// gc.Regalloc(&nod, ®node, Z);
@@ -677,7 +677,7 @@
/* generate a comparison
TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
*/
- gc.Fatal("bad operands to gcmp")
+ gc.Fatalf("bad operands to gcmp")
}
p.From = p.To
p.To = obj.Addr{}
@@ -686,22 +686,22 @@
case arm.AMULU:
if f != nil && f.Op != gc.OREGISTER {
- gc.Fatal("bad operands to mul")
+ gc.Fatalf("bad operands to mul")
}
case arm.AMOVW:
if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) {
- gc.Fatal("gins double memory")
+ gc.Fatalf("gins double memory")
}
case arm.AADD:
if p.To.Type == obj.TYPE_MEM {
- gc.Fatal("gins arith to mem")
+ gc.Fatalf("gins arith to mem")
}
case arm.ARSB:
if p.From.Type == obj.TYPE_NONE {
- gc.Fatal("rsb with no from")
+ gc.Fatalf("rsb with no from")
}
}
@@ -719,9 +719,9 @@
gc.Naddr(&a, n)
if a.Type != obj.TYPE_REG {
if n != nil {
- gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
+ gc.Fatalf("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
} else {
- gc.Fatal("bad in raddr: <null>")
+ gc.Fatalf("bad in raddr: <null>")
}
p.Reg = 0
} else {
@@ -734,7 +734,7 @@
*/
func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
if sval <= 0 || sval > 32 {
- gc.Fatal("bad shift value: %d", sval)
+ gc.Fatalf("bad shift value: %d", sval)
}
sval = sval & 0x1f
@@ -759,13 +759,13 @@
*/
func optoas(op int, t *gc.Type) int {
if t == nil {
- gc.Fatal("optoas: t is nil")
+ gc.Fatalf("optoas: t is nil")
}
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]])
+ gc.Fatalf("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]])
/* case CASE(OADDR, TPTR32):
a = ALEAL;
@@ -1188,7 +1188,7 @@
for i := 1; i < o; i++ {
if oary[i] >= 0 {
- gc.Fatal("can't happen")
+ gc.Fatalf("can't happen")
}
gins(arm.AMOVW, &n1, reg)
gc.Cgen_checknil(reg)
diff --git a/src/cmd/compile/internal/arm/peep.go b/src/cmd/compile/internal/arm/peep.go
index 66eba41..4fba434 100644
--- a/src/cmd/compile/internal/arm/peep.go
+++ b/src/cmd/compile/internal/arm/peep.go
@@ -1198,7 +1198,6 @@
arm.ACMPD,
arm.ACMP,
arm.ACMN,
- arm.ACASE,
arm.ATST:
/* read,, */
if s != nil {
@@ -1560,9 +1559,7 @@
obj.ADATA,
obj.AGLOBL,
obj.ATEXT,
- arm.AWORD,
- arm.ABCASE,
- arm.ACASE:
+ arm.AWORD:
return false
}
diff --git a/src/cmd/compile/internal/arm/prog.go b/src/cmd/compile/internal/arm/prog.go
index 9dcec46..8a304e2 100644
--- a/src/cmd/compile/internal/arm/prog.go
+++ b/src/cmd/compile/internal/arm/prog.go
@@ -138,7 +138,7 @@
info := &p.Info
*info = progtable[p.As]
if info.Flags == 0 {
- gc.Fatal("unknown instruction %v", p)
+ gc.Fatalf("unknown instruction %v", p)
}
if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
diff --git a/src/cmd/compile/internal/arm64/cgen.go b/src/cmd/compile/internal/arm64/cgen.go
index 30326d7..a7f1c18 100644
--- a/src/cmd/compile/internal/arm64/cgen.go
+++ b/src/cmd/compile/internal/arm64/cgen.go
@@ -20,7 +20,7 @@
var op int
switch align {
default:
- gc.Fatal("sgen: invalid alignment %d for %v", align, n.Type)
+ gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
case 1:
op = arm64.AMOVB
@@ -36,12 +36,12 @@
}
if w%int64(align) != 0 {
- gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
+ gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
}
c := int32(w / int64(align))
if osrc%int64(align) != 0 || odst%int64(align) != 0 {
- gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
+ gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
}
// if we are copying forward on the stack and
@@ -132,13 +132,7 @@
// ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
- for {
- tmp14 := c
- c--
- if tmp14 <= 0 {
- break
- }
-
+ for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
index 38def8f..d61fd3c 100644
--- a/src/cmd/compile/internal/arm64/galign.go
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -26,9 +26,9 @@
* int, uint, and uintptr
*/
var typedefs = []gc.Typedef{
- gc.Typedef{"int", gc.TINT, gc.TINT64},
- gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
- gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+ {"int", gc.TINT, gc.TINT64},
+ {"uint", gc.TUINT, gc.TUINT64},
+ {"uintptr", gc.TUINTPTR, gc.TUINT64},
}
func betypeinit() {
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
index cba4d99..2cbd663 100644
--- a/src/cmd/compile/internal/arm64/ggen.go
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -43,10 +43,10 @@
continue
}
if n.Class != gc.PAUTO {
- gc.Fatal("needzero class %d", n.Class)
+ gc.Fatalf("needzero class %d", n.Class)
}
if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
- gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
}
if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
@@ -151,13 +151,13 @@
t := nl.Type
t0 := t
- check := 0
+ check := false
if gc.Issigned[t.Etype] {
- check = 1
+ check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
- check = 0
+ check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
- check = 0
+ check = false
}
}
@@ -167,7 +167,7 @@
} else {
t = gc.Types[gc.TUINT64]
}
- check = 0
+ check = false
}
a := optoas(gc.ODIV, t)
@@ -206,7 +206,7 @@
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
- if check != 0 {
+ if check {
var nm1 gc.Node
gc.Nodconst(&nm1, t, -1)
gcmp(optoas(gc.OCMP, t), &tr, &nm1)
@@ -250,7 +250,7 @@
}
gc.Regfree(&tl)
- if check != 0 {
+ if check {
gc.Patch(p2, gc.Pc)
}
}
@@ -262,9 +262,7 @@
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp := (*gc.Node)(nl)
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
t := (*gc.Type)(nl.Type)
@@ -299,7 +297,7 @@
}
default:
- gc.Fatal("cgen_hmul %v", t)
+ gc.Fatalf("cgen_hmul %v", t)
}
gc.Cgen(&n1, res)
@@ -499,7 +497,7 @@
gc.Warnl(int(p.Lineno), "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
- gc.Fatal("invalid nil check %v\n", p)
+ gc.Fatalf("invalid nil check %v\n", p)
}
// check is
diff --git a/src/cmd/compile/internal/arm64/gsubr.go b/src/cmd/compile/internal/arm64/gsubr.go
index 0a14654..1121478 100644
--- a/src/cmd/compile/internal/arm64/gsubr.go
+++ b/src/cmd/compile/internal/arm64/gsubr.go
@@ -84,7 +84,7 @@
switch as {
default:
- gc.Fatal("ginscon2")
+ gc.Fatalf("ginscon2")
case arm64.ACMP:
if -arm64.BIG <= c && c <= arm64.BIG {
@@ -214,7 +214,7 @@
switch uint32(ft)<<16 | uint32(tt) {
default:
- gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+ gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
/*
* integer copy and truncate
@@ -514,7 +514,7 @@
case arm64.ACMP, arm64.AFCMPS, arm64.AFCMPD:
if t != nil {
if f.Op != gc.OREGISTER {
- gc.Fatal("bad operands to gcmp")
+ gc.Fatalf("bad operands to gcmp")
}
p.From = p.To
p.To = obj.Addr{}
@@ -527,12 +527,12 @@
case arm64.AAND, arm64.AMUL:
if p.From.Type == obj.TYPE_CONST {
gc.Debug['h'] = 1
- gc.Fatal("bad inst: %v", p)
+ gc.Fatalf("bad inst: %v", p)
}
case arm64.ACMP:
if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
gc.Debug['h'] = 1
- gc.Fatal("bad inst: %v", p)
+ gc.Fatalf("bad inst: %v", p)
}
}
@@ -564,7 +564,7 @@
if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
gc.Dump("f", f)
gc.Dump("t", t)
- gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+ gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
}
return p
@@ -579,9 +579,9 @@
gc.Naddr(&a, n)
if a.Type != obj.TYPE_REG {
if n != nil {
- gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
+ gc.Fatalf("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
} else {
- gc.Fatal("bad in raddr: <null>")
+ gc.Fatalf("bad in raddr: <null>")
}
p.Reg = 0
} else {
@@ -591,7 +591,7 @@
func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
if lhs.Op != gc.OREGISTER {
- gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
+ gc.Fatalf("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
}
p := rawgins(as, rhs, nil)
@@ -604,13 +604,13 @@
*/
func optoas(op int, t *gc.Type) int {
if t == nil {
- gc.Fatal("optoas: t is nil")
+ gc.Fatalf("optoas: t is nil")
}
a := int(obj.AXXX)
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
case gc.OEQ<<16 | gc.TBOOL,
gc.OEQ<<16 | gc.TINT8,
diff --git a/src/cmd/compile/internal/arm64/prog.go b/src/cmd/compile/internal/arm64/prog.go
index 4b498b7..a4b8ebe 100644
--- a/src/cmd/compile/internal/arm64/prog.go
+++ b/src/cmd/compile/internal/arm64/prog.go
@@ -133,7 +133,7 @@
info := &p.Info
*info = progtable[p.As]
if info.Flags == 0 {
- gc.Fatal("proginfo: unknown instruction %v", p)
+ gc.Fatalf("proginfo: unknown instruction %v", p)
}
if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
index 60c59fc..741588e 100644
--- a/src/cmd/compile/internal/gc/align.go
+++ b/src/cmd/compile/internal/gc/align.go
@@ -16,7 +16,7 @@
func Rnd(o int64, r int64) int64 {
if r < 1 || r > 8 || r&(r-1) != 0 {
- Fatal("rnd %d", r)
+ Fatalf("rnd %d", r)
}
return (o + r - 1) &^ (r - 1)
}
@@ -25,7 +25,7 @@
o := int32(0)
for f := t.Type; f != nil; f = f.Down {
if f.Etype != TFIELD {
- Fatal("offmod: not TFIELD: %v", Tconv(f, obj.FmtLong))
+ Fatalf("offmod: not TFIELD: %v", Tconv(f, obj.FmtLong))
}
f.Width = int64(o)
o += int32(Widthptr)
@@ -46,7 +46,7 @@
var w int64
for f := t.Type; f != nil; f = f.Down {
if f.Etype != TFIELD {
- Fatal("widstruct: not TFIELD: %v", Tconv(f, obj.FmtLong))
+ Fatalf("widstruct: not TFIELD: %v", Tconv(f, obj.FmtLong))
}
if f.Type == nil {
// broken field, just skip it so that other valid fields
@@ -59,7 +59,7 @@
maxalign = int32(f.Type.Align)
}
if f.Type.Width < 0 {
- Fatal("invalid width %d", f.Type.Width)
+ Fatalf("invalid width %d", f.Type.Width)
}
w = f.Type.Width
if f.Type.Align > 0 {
@@ -111,7 +111,7 @@
func dowidth(t *Type) {
if Widthptr == 0 {
- Fatal("dowidth without betypeinit")
+ Fatalf("dowidth without betypeinit")
}
if t == nil {
@@ -121,7 +121,7 @@
if t.Width > 0 {
if t.Align == 0 {
// See issue 11354
- Fatal("zero alignment with nonzero size %v", t)
+ Fatalf("zero alignment with nonzero size %v", t)
}
return
}
@@ -129,8 +129,8 @@
if t.Width == -2 {
lno := int(lineno)
lineno = int32(t.Lineno)
- if t.Broke == 0 {
- t.Broke = 1
+ if !t.Broke {
+ t.Broke = true
Yyerror("invalid recursive type %v", t)
}
@@ -141,7 +141,7 @@
// break infinite recursion if the broken recursive type
// is referenced again
- if t.Broke != 0 && t.Width == 0 {
+ if t.Broke && t.Width == 0 {
return
}
@@ -168,7 +168,7 @@
w := int64(0)
switch et {
default:
- Fatal("dowidth: unknown type: %v", t)
+ Fatalf("dowidth: unknown type: %v", t)
/* compiler-specific stuff */
case TINT8, TUINT8, TBOOL:
@@ -233,7 +233,7 @@
checkwidth(t.Down)
case TFORW: // should have been filled in
- if t.Broke == 0 {
+ if !t.Broke {
Yyerror("invalid recursive type %v", t)
}
w = 1 // anything will do
@@ -241,13 +241,13 @@
// dummy type; should be replaced before use.
case TANY:
if Debug['A'] == 0 {
- Fatal("dowidth any")
+ Fatalf("dowidth any")
}
w = 1 // anything will do
case TSTRING:
if sizeof_String == 0 {
- Fatal("early dowidth string")
+ Fatalf("early dowidth string")
}
w = int64(sizeof_String)
t.Align = uint8(Widthptr)
@@ -272,17 +272,17 @@
checkwidth(t.Type)
t.Align = uint8(Widthptr)
} else if t.Bound == -100 {
- if t.Broke == 0 {
+ if !t.Broke {
Yyerror("use of [...] array outside of array literal")
- t.Broke = 1
+ t.Broke = true
}
} else {
- Fatal("dowidth %v", t) // probably [...]T
+ Fatalf("dowidth %v", t) // probably [...]T
}
case TSTRUCT:
- if t.Funarg != 0 {
- Fatal("dowidth fn struct %v", t)
+ if t.Funarg {
+ Fatalf("dowidth fn struct %v", t)
}
w = widstruct(t, t, 0, 1)
@@ -319,7 +319,7 @@
t.Width = w
if t.Align == 0 {
if w > 8 || w&(w-1) != 0 {
- Fatal("invalid alignment for %v", t)
+ Fatalf("invalid alignment for %v", t)
}
t.Align = uint8(w)
}
@@ -366,8 +366,8 @@
// function arg structs should not be checked
// outside of the enclosing function.
- if t.Funarg != 0 {
- Fatal("checkwidth %v", t)
+ if t.Funarg {
+ Fatalf("checkwidth %v", t)
}
if defercalc == 0 {
@@ -375,10 +375,10 @@
return
}
- if t.Deferwidth != 0 {
+ if t.Deferwidth {
return
}
- t.Deferwidth = 1
+ t.Deferwidth = true
l := tlfree
if l != nil {
@@ -395,17 +395,17 @@
func defercheckwidth() {
// we get out of sync on syntax errors, so don't be pedantic.
if defercalc != 0 && nerrors == 0 {
- Fatal("defercheckwidth")
+ Fatalf("defercheckwidth")
}
defercalc = 1
}
func resumecheckwidth() {
if defercalc == 0 {
- Fatal("resumecheckwidth")
+ Fatalf("resumecheckwidth")
}
for l := tlq; l != nil; l = tlq {
- l.t.Deferwidth = 0
+ l.t.Deferwidth = false
tlq = l.next
dowidth(l.t)
l.next = tlfree
@@ -419,7 +419,7 @@
func typeinit() {
if Widthptr == 0 {
- Fatal("typeinit before betypeinit")
+ Fatalf("typeinit before betypeinit")
}
for i := 0; i < NTYPE; i++ {
@@ -637,11 +637,11 @@
etype = Thearch.Typedefs[i].Etype
if etype < 0 || etype >= len(Types) {
- Fatal("typeinit: %s bad etype", s.Name)
+ Fatalf("typeinit: %s bad etype", s.Name)
}
sameas = Thearch.Typedefs[i].Sameas
if sameas < 0 || sameas >= len(Types) {
- Fatal("typeinit: %s bad sameas", s.Name)
+ Fatalf("typeinit: %s bad sameas", s.Name)
}
Simtype[etype] = uint8(sameas)
minfltval[etype] = minfltval[sameas]
@@ -651,7 +651,7 @@
t = Types[etype]
if t != nil {
- Fatal("typeinit: %s already defined", s.Name)
+ Fatalf("typeinit: %s already defined", s.Name)
}
t = typ(etype)
@@ -707,7 +707,7 @@
w = (w + int64(Widthptr) - 1) &^ (int64(Widthptr) - 1)
if int64(int(w)) != w {
- Fatal("argsize too big")
+ Fatalf("argsize too big")
}
return int(w)
}
diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go
index 2b988e6..b40339e 100644
--- a/src/cmd/compile/internal/gc/bv.go
+++ b/src/cmd/compile/internal/gc/bv.go
@@ -65,7 +65,7 @@
func bvcmp(bv1 Bvec, bv2 Bvec) int {
if bv1.n != bv2.n {
- Fatal("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
+ Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
}
for i, x := range bv1.b {
if x != bv2.b[i] {
@@ -98,7 +98,7 @@
func bvget(bv Bvec, i int32) int {
if i < 0 || i >= bv.n {
- Fatal("bvget: index %d is out of bounds with length %d\n", i, bv.n)
+ Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
}
return int((bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)) & 1)
}
@@ -174,7 +174,7 @@
func bvreset(bv Bvec, i int32) {
if i < 0 || i >= bv.n {
- Fatal("bvreset: index %d is out of bounds with length %d\n", i, bv.n)
+ Fatalf("bvreset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(^(1 << uint(i%WORDBITS)))
bv.b[i/WORDBITS] &= mask
@@ -188,7 +188,7 @@
func bvset(bv Bvec, i int32) {
if i < 0 || i >= bv.n {
- Fatal("bvset: index %d is out of bounds with length %d\n", i, bv.n)
+ Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%WORDBITS))
bv.b[i/WORDBITS] |= mask
diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go
index b6a3e5b..951f84f 100644
--- a/src/cmd/compile/internal/gc/cgen.go
+++ b/src/cmd/compile/internal/gc/cgen.go
@@ -34,7 +34,7 @@
}
if res == nil || res.Type == nil {
- Fatal("cgen: res nil")
+ Fatalf("cgen: res nil")
}
for n.Op == OCONVNOP {
@@ -68,7 +68,7 @@
if n.Ullman >= UINF {
if n.Op == OINDREG {
- Fatal("cgen: this is going to miscompile")
+ Fatalf("cgen: this is going to miscompile")
}
if res.Ullman >= UINF {
var n1 Node
@@ -81,7 +81,7 @@
if Isfat(n.Type) {
if n.Type.Width < 0 {
- Fatal("forgot to compute width for %v", n.Type)
+ Fatalf("forgot to compute width for %v", n.Type)
}
sgen_wb(n, res, n.Type.Width, wb)
return
@@ -103,7 +103,7 @@
if n1.Ullman > res.Ullman {
Dump("n1", &n1)
Dump("res", res)
- Fatal("loop in cgen")
+ Fatalf("loop in cgen")
}
cgen_wb(&n1, res, wb)
@@ -191,7 +191,7 @@
if wb {
if int(Simtype[res.Type.Etype]) != Tptr {
- Fatal("cgen_wb of type %v", res.Type)
+ Fatalf("cgen_wb of type %v", res.Type)
}
if n.Ullman >= UINF {
var n1 Node
@@ -362,7 +362,7 @@
default:
Dump("cgen", n)
Dump("cgen-res", res)
- Fatal("cgen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+ Fatalf("cgen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
case OOROR, OANDAND,
OEQ, ONE,
@@ -593,7 +593,7 @@
break
}
- Fatal("cgen: OLEN: unknown type %v", Tconv(nl.Type, obj.FmtLong))
+ Fatalf("cgen: OLEN: unknown type %v", Tconv(nl.Type, obj.FmtLong))
case OCAP:
if Istype(nl.Type, TCHAN) {
@@ -631,7 +631,7 @@
break
}
- Fatal("cgen: OCAP: unknown type %v", Tconv(nl.Type, obj.FmtLong))
+ Fatalf("cgen: OCAP: unknown type %v", Tconv(nl.Type, obj.FmtLong))
case OADDR:
if n.Bounded { // let race detector avoid nil checks
@@ -705,9 +705,7 @@
*/
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) {
- r := nl
- nl = nr
- nr = r
+ nl, nr = nr, nl
}
abop: // asymmetric binary
@@ -928,7 +926,7 @@
}
if Isfat(n.Type) {
- Fatal("cgenr on fat node")
+ Fatalf("cgenr on fat node")
}
if n.Addable {
@@ -1034,7 +1032,7 @@
// constant index
if Isconst(nr, CTINT) {
if Isconst(nl, CTSTR) {
- Fatal("constant string constant index")
+ Fatalf("constant string constant index")
}
v := uint64(Mpgetfix(nr.Val().U.(*Mpint)))
var n2 Node
@@ -1186,7 +1184,7 @@
// constant index
if Isconst(nr, CTINT) {
if Isconst(nl, CTSTR) {
- Fatal("constant string constant index") // front end should handle
+ Fatalf("constant string constant index") // front end should handle
}
v := uint64(Mpgetfix(nr.Val().U.(*Mpint)))
if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
@@ -1376,7 +1374,7 @@
index:
if Isconst(nr, CTINT) {
if Isconst(nl, CTSTR) {
- Fatal("constant string constant index") // front end should handle
+ Fatalf("constant string constant index") // front end should handle
}
v := uint64(Mpgetfix(nr.Val().U.(*Mpint)))
if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
@@ -1529,7 +1527,7 @@
if n.Addable {
if n.Op == OREGISTER {
- Fatal("agen OREGISTER")
+ Fatalf("agen OREGISTER")
}
var n1 Node
n1.Op = OADDR
@@ -1546,7 +1544,7 @@
switch n.Op {
default:
- Fatal("agen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+ Fatalf("agen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
case OCALLMETH:
cgen_callmeth(n, 0)
@@ -1576,13 +1574,13 @@
// should only get here with names in this func.
if n.Name.Funcdepth > 0 && n.Name.Funcdepth != Funcdepth {
Dump("bad agen", n)
- Fatal("agen: bad ONAME funcdepth %d != %d", n.Name.Funcdepth, Funcdepth)
+ Fatalf("agen: bad ONAME funcdepth %d != %d", n.Name.Funcdepth, Funcdepth)
}
// should only get here for heap vars or paramref
if n.Class&PHEAP == 0 && n.Class != PPARAMREF {
Dump("bad agen", n)
- Fatal("agen: bad ONAME class %#x", n.Class)
+ Fatalf("agen: bad ONAME class %#x", n.Class)
}
Cgen(n.Name.Heapaddr, res)
@@ -1800,7 +1798,7 @@
}
if n.Type.Etype != TBOOL {
- Fatal("bgen: bad type %v for %v", n.Type, Oconv(int(n.Op), 0))
+ Fatalf("bgen: bad type %v for %v", n.Type, Oconv(int(n.Op), 0))
}
for n.Op == OCONVNOP {
@@ -1841,7 +1839,7 @@
// We can fix that as we go.
switch Ctxt.Arch.Thechar {
case '5', '7', '9':
- Fatal("genval 5g, 7g, 9g ONAMES not fully implemented")
+ Fatalf("genval 5g, 7g, 9g ONAMES not fully implemented")
}
Cgen(n, res)
if !wantTrue {
@@ -1865,7 +1863,7 @@
case OLITERAL:
// n is a constant.
if !Isconst(n, CTBOOL) {
- Fatal("bgen: non-bool const %v\n", Nconv(n, obj.FmtLong))
+ Fatalf("bgen: non-bool const %v\n", Nconv(n, obj.FmtLong))
}
if genval {
Cgen(Nodbool(wantTrue == n.Val().U.(bool)), res)
@@ -2068,7 +2066,7 @@
switch Ctxt.Arch.Thechar {
case '5':
if genval {
- Fatal("genval 5g Isfloat special cases not implemented")
+ Fatalf("genval 5g Isfloat special cases not implemented")
}
switch n.Op {
case ONE:
@@ -2116,7 +2114,7 @@
}
case '7', '9':
if genval {
- Fatal("genval 7g, 9g Isfloat special cases not implemented")
+ Fatalf("genval 7g, 9g Isfloat special cases not implemented")
}
switch n.Op {
// On arm64 and ppc64, <= and >= mishandle NaN. Must decompose into < or > and =.
@@ -2251,11 +2249,11 @@
}
if n.Ullman >= UINF && ns.Ullman >= UINF {
- Fatal("sgen UINF")
+ Fatalf("sgen UINF")
}
if w < 0 {
- Fatal("sgen copy %d", w)
+ Fatalf("sgen copy %d", w)
}
// If copying .args, that's all the results, so record definition sites
@@ -2336,7 +2334,7 @@
switch proc {
default:
- Fatal("Ginscall: bad proc %d", proc)
+ Fatalf("Ginscall: bad proc %d", proc)
case 0, // normal call
-1: // normal call but no return
@@ -2401,8 +2399,8 @@
if proc == 1 {
Ginscall(Newproc, 0)
} else {
- if Hasdefer == 0 {
- Fatal("hasdefer=0 but has defer")
+ if !hasdefer {
+ Fatalf("hasdefer=0 but has defer")
}
Ginscall(Deferproc, 0)
}
@@ -2423,12 +2421,12 @@
func cgen_callinter(n *Node, res *Node, proc int) {
i := n.Left
if i.Op != ODOTINTER {
- Fatal("cgen_callinter: not ODOTINTER %v", Oconv(int(i.Op), 0))
+ Fatalf("cgen_callinter: not ODOTINTER %v", Oconv(int(i.Op), 0))
}
f := i.Right // field
if f.Op != ONAME {
- Fatal("cgen_callinter: not ONAME %v", Oconv(int(f.Op), 0))
+ Fatalf("cgen_callinter: not ONAME %v", Oconv(int(f.Op), 0))
}
i = i.Left // interface
@@ -2471,7 +2469,7 @@
var nodr Node
Regalloc(&nodr, Types[Tptr], &nodo)
if n.Left.Xoffset == BADWIDTH {
- Fatal("cgen_callinter: badwidth")
+ Fatalf("cgen_callinter: badwidth")
}
Cgen_checknil(&nodo) // in case offset is huge
nodo.Op = OINDREG
@@ -2562,7 +2560,7 @@
var flist Iter
fp := Structfirst(&flist, Getoutarg(t))
if fp == nil {
- Fatal("cgen_callret: nil")
+ Fatalf("cgen_callret: nil")
}
var nod Node
@@ -2592,7 +2590,7 @@
var flist Iter
fp := Structfirst(&flist, Getoutarg(t))
if fp == nil {
- Fatal("cgen_aret: nil")
+ Fatalf("cgen_aret: nil")
}
var nod1 Node
@@ -2624,7 +2622,7 @@
if n != nil {
Genlist(n.List) // copy out args
}
- if Hasdefer != 0 {
+ if hasdefer {
Ginscall(Deferreturn, 0)
}
Genlist(Curfn.Func.Exit)
@@ -2814,11 +2812,11 @@
if res.Op != ONAME && !samesafeexpr(res, n.List.N) {
Dump("cgen_append-n", n)
Dump("cgen_append-res", res)
- Fatal("append not lowered")
+ Fatalf("append not lowered")
}
for l := n.List; l != nil; l = l.Next {
if l.N.Ullman >= UINF {
- Fatal("append with function call arguments")
+ Fatalf("append with function call arguments")
}
}
@@ -3261,7 +3259,7 @@
// but it will be represented in 32 bits.
if Ctxt.Arch.Regsize == 4 && Is64(n1.Type) {
if mpcmpfixc(n1.Val().U.(*Mpint), 1<<31) >= 0 {
- Fatal("missed slice out of bounds check")
+ Fatalf("missed slice out of bounds check")
}
var tmp Node
Nodconst(&tmp, indexRegType, Mpgetfix(n1.Val().U.(*Mpint)))
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
index a0dfa0b..e7bece8 100644
--- a/src/cmd/compile/internal/gc/closure.go
+++ b/src/cmd/compile/internal/gc/closure.go
@@ -87,7 +87,7 @@
if !n.Name.Captured {
n.Name.Captured = true
if n.Name.Decldepth == 0 {
- Fatal("typecheckclosure: var %v does not have decldepth assigned", Nconv(n, obj.FmtShort))
+ Fatalf("typecheckclosure: var %v does not have decldepth assigned", Nconv(n, obj.FmtShort))
}
// Ignore assignments to the variable in straightline code
@@ -172,7 +172,7 @@
n.Func.Outerfunc.Func.Closgen++
gen = n.Func.Outerfunc.Func.Closgen
} else {
- Fatal("closurename called for %v", Nconv(n, obj.FmtShort))
+ Fatalf("closurename called for %v", Nconv(n, obj.FmtShort))
}
n.Sym = Lookupf("%s.%s%d", outer, prefix, gen)
return n.Sym
@@ -204,7 +204,7 @@
xfunc.Nbody = func_.Nbody
xfunc.Func.Dcl = concat(func_.Func.Dcl, xfunc.Func.Dcl)
if xfunc.Nbody == nil {
- Fatal("empty body - won't generate any code")
+ Fatalf("empty body - won't generate any code")
}
typecheck(&xfunc, Etop)
@@ -322,7 +322,7 @@
continue
}
fld = typ(TFIELD)
- fld.Funarg = 1
+ fld.Funarg = true
if v.Name.Byval {
// If v is captured by value, we merely downgrade it to PPARAM.
v.Class = PPARAM
@@ -355,7 +355,7 @@
// Recalculate param offsets.
if f.Type.Width > 0 {
- Fatal("transformclosure: width is already calculated")
+ Fatalf("transformclosure: width is already calculated")
}
dowidth(f.Type)
xfunc.Type = f.Type // update type of ODCLFUNC
@@ -491,7 +491,7 @@
break
default:
- Fatal("invalid typecheckpartialcall")
+ Fatalf("invalid typecheckpartialcall")
}
// Create top-level function.
@@ -518,7 +518,7 @@
basetype = basetype.Type
}
if basetype.Etype != TINTER && basetype.Sym == nil {
- Fatal("missing base type for %v", rcvrtype)
+ Fatalf("missing base type for %v", rcvrtype)
}
var spkg *Pkg
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
index 9eb4983..de23190 100644
--- a/src/cmd/compile/internal/gc/const.go
+++ b/src/cmd/compile/internal/gc/const.go
@@ -14,7 +14,7 @@
// n must be an integer constant.
func (n *Node) Int() int64 {
if !Isconst(n, CTINT) {
- Fatal("Int(%v)", n)
+ Fatalf("Int(%v)", n)
}
return Mpgetfix(n.Val().U.(*Mpint))
}
@@ -23,7 +23,7 @@
// n must be an integer constant.
func (n *Node) SetInt(i int64) {
if !Isconst(n, CTINT) {
- Fatal("SetInt(%v)", n)
+ Fatalf("SetInt(%v)", n)
}
Mpmovecfix(n.Val().U.(*Mpint), i)
}
@@ -32,7 +32,7 @@
// n must be an integer constant.
func (n *Node) SetBigInt(x *big.Int) {
if !Isconst(n, CTINT) {
- Fatal("SetBigInt(%v)", n)
+ Fatalf("SetBigInt(%v)", n)
}
n.Val().U.(*Mpint).Val.Set(x)
}
@@ -41,7 +41,7 @@
// n must be an boolean constant.
func (n *Node) Bool() bool {
if !Isconst(n, CTBOOL) {
- Fatal("Int(%v)", n)
+ Fatalf("Int(%v)", n)
}
return n.Val().U.(bool)
}
@@ -292,7 +292,7 @@
bad:
if n.Diag == 0 {
- if t.Broke == 0 {
+ if !t.Broke {
Yyerror("cannot convert %v to type %v", n, t)
}
n.Diag = 1
@@ -396,7 +396,7 @@
switch v.Ctype() {
case CTINT, CTRUNE:
if !Isint[t.Etype] {
- Fatal("overflow: %v integer constant", t)
+ Fatalf("overflow: %v integer constant", t)
}
if Mpcmpfixfix(v.U.(*Mpint), Minintval[t.Etype]) < 0 || Mpcmpfixfix(v.U.(*Mpint), Maxintval[t.Etype]) > 0 {
return true
@@ -404,7 +404,7 @@
case CTFLT:
if !Isfloat[t.Etype] {
- Fatal("overflow: %v floating-point constant", t)
+ Fatalf("overflow: %v floating-point constant", t)
}
if mpcmpfltflt(v.U.(*Mpflt), minfltval[t.Etype]) <= 0 || mpcmpfltflt(v.U.(*Mpflt), maxfltval[t.Etype]) >= 0 {
return true
@@ -412,7 +412,7 @@
case CTCPLX:
if !Iscomplex[t.Etype] {
- Fatal("overflow: %v complex constant", t)
+ Fatalf("overflow: %v complex constant", t)
}
if mpcmpfltflt(&v.U.(*Mpcplx).Real, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.(*Mpcplx).Real, maxfltval[t.Etype]) >= 0 || mpcmpfltflt(&v.U.(*Mpcplx).Imag, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.(*Mpcplx).Imag, maxfltval[t.Etype]) >= 0 {
return true
@@ -787,7 +787,7 @@
if (v.Ctype() == 0 || rv.Ctype() == 0) && nerrors > 0 {
return
}
- Fatal("constant type mismatch %v(%d) %v(%d)", nl.Type, v.Ctype(), nr.Type, rv.Ctype())
+ Fatalf("constant type mismatch %v(%d) %v(%d)", nl.Type, v.Ctype(), nr.Type, rv.Ctype())
}
// run op
@@ -1106,7 +1106,7 @@
n.SetVal(v)
switch v.Ctype() {
default:
- Fatal("nodlit ctype %d", v.Ctype())
+ Fatalf("nodlit ctype %d", v.Ctype())
case CTSTR:
n.Type = idealstring
@@ -1134,7 +1134,7 @@
n.SetVal(Val{c})
if r.Ctype() != CTFLT || i.Ctype() != CTFLT {
- Fatal("nodcplxlit ctype %d/%d", r.Ctype(), i.Ctype())
+ Fatalf("nodcplxlit ctype %d/%d", r.Ctype(), i.Ctype())
}
mpmovefltflt(&c.Real, r.U.(*Mpflt))
@@ -1249,7 +1249,7 @@
Yyerror("defaultlit: unknown literal: %v", n)
case CTxxx:
- Fatal("defaultlit: idealkind is CTxxx: %v", Nconv(n, obj.FmtSign))
+ Fatalf("defaultlit: idealkind is CTxxx: %v", Nconv(n, obj.FmtSign))
case CTBOOL:
t1 := Types[TBOOL]
@@ -1450,7 +1450,7 @@
var i int64
switch n.Val().Ctype() {
default:
- Fatal("convconst ctype=%d %v", n.Val().Ctype(), Tconv(t, obj.FmtLong))
+ Fatalf("convconst ctype=%d %v", n.Val().Ctype(), Tconv(t, obj.FmtLong))
case CTINT, CTRUNE:
i = Mpgetfix(n.Val().U.(*Mpint))
@@ -1470,7 +1470,7 @@
if Isfloat[tt] {
con.SetVal(toflt(con.Val()))
if con.Val().Ctype() != CTFLT {
- Fatal("convconst ctype=%d %v", con.Val().Ctype(), t)
+ Fatalf("convconst ctype=%d %v", con.Val().Ctype(), t)
}
if tt == TFLOAT32 {
con.SetVal(Val{truncfltlit(con.Val().U.(*Mpflt), t)})
@@ -1487,7 +1487,7 @@
return
}
- Fatal("convconst %v constant", Tconv(t, obj.FmtLong))
+ Fatalf("convconst %v constant", Tconv(t, obj.FmtLong))
}
// complex multiply v *= rv
diff --git a/src/cmd/compile/internal/gc/cplx.go b/src/cmd/compile/internal/gc/cplx.go
index 1643f26..9f11b96 100644
--- a/src/cmd/compile/internal/gc/cplx.go
+++ b/src/cmd/compile/internal/gc/cplx.go
@@ -81,7 +81,7 @@
// break addable nc-complex into nr-real and ni-imaginary
func subnode(nr *Node, ni *Node, nc *Node) {
if !nc.Addable {
- Fatal("subnode not addable")
+ Fatalf("subnode not addable")
}
tc := Simsimtype(nc.Type)
@@ -230,7 +230,7 @@
n.Type = t
if !Isfloat[t.Etype] {
- Fatal("nodfconst: bad type %v", t)
+ Fatalf("nodfconst: bad type %v", t)
}
}
@@ -288,14 +288,14 @@
}
if !t.Addable {
- Fatal("complexmove: to not addable")
+ Fatalf("complexmove: to not addable")
}
ft := Simsimtype(f.Type)
tt := Simsimtype(t.Type)
switch uint32(ft)<<16 | uint32(tt) {
default:
- Fatal("complexmove: unknown conversion: %v -> %v\n", f.Type, t.Type)
+ Fatalf("complexmove: unknown conversion: %v -> %v\n", f.Type, t.Type)
// complex to complex move/convert.
// make f addable.
@@ -403,7 +403,7 @@
switch n.Op {
default:
Dump("complexgen: unknown op", n)
- Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
+ Fatalf("complexgen: unknown op %v", Oconv(int(n.Op), 0))
case ODOT,
ODOTPTR,
@@ -462,7 +462,7 @@
switch n.Op {
default:
- Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
+ Fatalf("complexgen: unknown op %v", Oconv(int(n.Op), 0))
case OCONV:
Complexmove(nl, res)
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
index c8864f3..0609274 100644
--- a/src/cmd/compile/internal/gc/dcl.go
+++ b/src/cmd/compile/internal/gc/dcl.go
@@ -73,7 +73,7 @@
}
if d == nil {
- Fatal("popdcl: no mark")
+ Fatalf("popdcl: no mark")
}
dclstack = d.Link
block = d.Block
@@ -182,7 +182,7 @@
s := n.Sym
// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
- if importpkg == nil && typecheckok == 0 && s.Pkg != localpkg {
+ if importpkg == nil && !typecheckok && s.Pkg != localpkg {
Yyerror("cannot declare name %v", s)
}
@@ -198,7 +198,7 @@
}
} else {
if Curfn == nil && ctxt == PAUTO {
- Fatal("automatic outside function")
+ Fatalf("automatic outside function")
}
if Curfn != nil {
Curfn.Func.Dcl = list(Curfn.Func.Dcl, n)
@@ -238,7 +238,7 @@
func addvar(n *Node, t *Type, ctxt uint8) {
if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
- Fatal("addvar: n=%v t=%v nil", n, t)
+ Fatalf("addvar: n=%v t=%v nil", n, t)
}
n.Op = ONAME
@@ -366,7 +366,7 @@
*/
func newname(s *Sym) *Node {
if s == nil {
- Fatal("newname nil")
+ Fatalf("newname nil")
}
n := Nod(ONAME, nil, nil)
@@ -548,7 +548,7 @@
*/
func ifacedcl(n *Node) {
if n.Op != ODCLFIELD || n.Right == nil {
- Fatal("ifacedcl")
+ Fatalf("ifacedcl")
}
if isblank(n.Left) {
@@ -582,7 +582,7 @@
func funchdr(n *Node) {
// change the declaration context from extern to auto
if Funcdepth == 0 && dclcontext != PEXTERN {
- Fatal("funchdr: dclcontext")
+ Fatalf("funchdr: dclcontext")
}
if importpkg == nil && n.Func.Nname != nil {
@@ -607,7 +607,7 @@
func funcargs(nt *Node) {
if nt.Op != OTFUNC {
- Fatal("funcargs %v", Oconv(int(nt.Op), 0))
+ Fatalf("funcargs %v", Oconv(int(nt.Op), 0))
}
// re-start the variable generation number
@@ -621,7 +621,7 @@
if nt.Left != nil {
n := nt.Left
if n.Op != ODCLFIELD {
- Fatal("funcargs receiver %v", Oconv(int(n.Op), 0))
+ Fatalf("funcargs receiver %v", Oconv(int(n.Op), 0))
}
if n.Left != nil {
n.Left.Op = ONAME
@@ -638,7 +638,7 @@
for l := nt.List; l != nil; l = l.Next {
n = l.N
if n.Op != ODCLFIELD {
- Fatal("funcargs in %v", Oconv(int(n.Op), 0))
+ Fatalf("funcargs in %v", Oconv(int(n.Op), 0))
}
if n.Left != nil {
n.Left.Op = ONAME
@@ -659,7 +659,7 @@
n = l.N
if n.Op != ODCLFIELD {
- Fatal("funcargs out %v", Oconv(int(n.Op), 0))
+ Fatalf("funcargs out %v", Oconv(int(n.Op), 0))
}
if n.Left == nil {
@@ -705,7 +705,7 @@
*/
func funcargs2(t *Type) {
if t.Etype != TFUNC {
- Fatal("funcargs2 %v", t)
+ Fatalf("funcargs2 %v", t)
}
if t.Thistuple != 0 {
@@ -753,7 +753,7 @@
func funcbody(n *Node) {
// change the declaration context from auto to extern
if dclcontext != PAUTO {
- Fatal("funcbody: dclcontext")
+ Fatalf("funcbody: dclcontext")
}
popdcl()
Funcdepth--
@@ -813,7 +813,7 @@
lineno = n.Lineno
if n.Op != ODCLFIELD {
- Fatal("structfield: oops %v\n", n)
+ Fatalf("structfield: oops %v\n", n)
}
f := typ(TFIELD)
@@ -834,7 +834,7 @@
f.Type = n.Type
if f.Type == nil {
- f.Broke = 1
+ f.Broke = true
}
switch n.Val().Ctype() {
@@ -894,16 +894,16 @@
tp = &f.Down
}
- for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
- if f.Broke != 0 {
- t.Broke = 1
+ for f := t.Type; f != nil && !t.Broke; f = f.Down {
+ if f.Broke {
+ t.Broke = true
}
}
uniqgen++
checkdupfields(t.Type, "field")
- if t.Broke == 0 {
+ if !t.Broke {
checkwidth(t)
}
@@ -914,11 +914,11 @@
var f *Type
t := typ(TSTRUCT)
- t.Funarg = 1
+ t.Funarg = true
for tp := &t.Type; l != nil; l = l.Next {
f = structfield(l.N)
- f.Funarg = 1
+ f.Funarg = true
// esc.c needs to find f given a PPARAM to add the tag.
if l.N.Left != nil && l.N.Left.Class == PPARAM {
@@ -929,9 +929,9 @@
tp = &f.Down
}
- for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
- if f.Broke != 0 {
- t.Broke = 1
+ for f := t.Type; f != nil && !t.Broke; f = f.Down {
+ if f.Broke {
+ t.Broke = true
}
}
@@ -943,7 +943,7 @@
lineno = n.Lineno
if n.Op != ODCLFIELD {
- Fatal("interfacefield: oops %v\n", n)
+ Fatalf("interfacefield: oops %v\n", n)
}
if n.Val().Ctype() != CTxxx {
@@ -984,11 +984,11 @@
case TFORW:
Yyerror("interface type loop involving %v", n.Type)
- f.Broke = 1
+ f.Broke = true
default:
Yyerror("interface contains embedded non-interface %v", n.Type)
- f.Broke = 1
+ f.Broke = true
}
}
}
@@ -998,7 +998,7 @@
f.Type = n.Type
if f.Type == nil {
- f.Broke = 1
+ f.Broke = true
}
lineno = int32(lno)
@@ -1034,9 +1034,9 @@
}
}
- for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
- if f.Broke != 0 {
- t.Broke = 1
+ for f := t.Type; f != nil && !t.Broke; f = f.Down {
+ if f.Broke {
+ t.Broke = true
}
}
@@ -1224,8 +1224,8 @@
checkdupfields(t.Type.Down.Type, "argument")
checkdupfields(t.Type.Down.Down.Type, "argument")
- if t.Type.Broke != 0 || t.Type.Down.Broke != 0 || t.Type.Down.Down.Broke != 0 {
- t.Broke = 1
+ if t.Type.Broke || t.Type.Down.Broke || t.Type.Down.Down.Broke {
+ t.Broke = true
}
if this != nil {
@@ -1233,11 +1233,11 @@
}
t.Outtuple = count(out)
t.Intuple = count(in)
- t.Outnamed = 0
+ t.Outnamed = false
if t.Outtuple > 0 && out.N.Left != nil && out.N.Left.Orig != nil {
s := out.N.Left.Orig.Sym
if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result
- t.Outnamed = 1
+ t.Outnamed = true
}
}
@@ -1356,7 +1356,7 @@
func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
// get field sym
if sf == nil {
- Fatal("no method symbol")
+ Fatalf("no method symbol")
}
// get parent type sym
@@ -1383,7 +1383,7 @@
t = t.Type
}
- if t.Broke != 0 { // rely on typecheck having complained before
+ if t.Broke { // rely on typecheck having complained before
return
}
if t.Sym == nil {
@@ -1433,7 +1433,7 @@
for f := pa.Method; f != nil; f = f.Down {
d = f
if f.Etype != TFIELD {
- Fatal("addmethod: not TFIELD: %v", Tconv(f, obj.FmtLong))
+ Fatalf("addmethod: not TFIELD: %v", Tconv(f, obj.FmtLong))
}
if sf.Name != f.Sym.Name {
continue
@@ -1449,7 +1449,7 @@
// during import unexported method names should be in the type's package
if importpkg != nil && f.Sym != nil && !exportname(f.Sym.Name) && f.Sym.Pkg != structpkg {
- Fatal("imported method name %v in wrong package %s\n", Sconv(f.Sym, obj.FmtSign), structpkg.Name)
+ Fatalf("imported method name %v in wrong package %s\n", Sconv(f.Sym, obj.FmtSign), structpkg.Name)
}
if d == nil {
@@ -1466,7 +1466,7 @@
if n.Type == nil {
if nerrors == 0 {
- Fatal("funccompile missing type")
+ Fatalf("funccompile missing type")
}
return
}
@@ -1475,7 +1475,7 @@
checkwidth(n.Type)
if Curfn != nil {
- Fatal("funccompile %v inside %v", n.Func.Nname.Sym, Curfn.Func.Nname.Sym)
+ Fatalf("funccompile %v inside %v", n.Func.Nname.Sym, Curfn.Func.Nname.Sym)
}
Stksize = 0
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
index 4c4455f..585b327 100644
--- a/src/cmd/compile/internal/gc/esc.go
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -34,10 +34,10 @@
// when analyzing a set of mutually recursive functions.
type bottomUpVisitor struct {
- analyze func(*NodeList, bool)
+ analyze func([]*Node, bool)
visitgen uint32
nodeID map[*Node]uint32
- stack *NodeList
+ stack []*Node
}
// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
@@ -53,7 +53,7 @@
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
-func visitBottomUp(list *NodeList, analyze func(list *NodeList, recursive bool)) {
+func visitBottomUp(list *NodeList, analyze func(list []*Node, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
v.nodeID = make(map[*Node]uint32)
@@ -76,10 +76,7 @@
v.visitgen++
min := v.visitgen
- l := new(NodeList)
- l.Next = v.stack
- l.N = n
- v.stack = l
+ v.stack = append(v.stack, n)
min = v.visitcodelist(n.Nbody, min)
if (min == id || min == id+1) && n.Func.FCurfn == nil {
// This node is the root of a strongly connected component.
@@ -93,17 +90,19 @@
// Remove connected component from stack.
// Mark walkgen so that future visits return a large number
// so as not to affect the caller's min.
- block := v.stack
- var l *NodeList
- for l = v.stack; l.N != n; l = l.Next {
- v.nodeID[l.N] = ^uint32(0)
+ var i int
+ for i = len(v.stack) - 1; i >= 0; i-- {
+ x := v.stack[i]
+ if x == n {
+ break
+ }
+ v.nodeID[x] = ^uint32(0)
}
v.nodeID[n] = ^uint32(0)
- v.stack = l.Next
- l.Next = nil
-
+ block := v.stack[i:]
// Run escape analysis on this set of functions.
+ v.stack = v.stack[:i]
v.analyze(block, recursive)
}
@@ -323,7 +322,7 @@
return nE
}
if n.Opt() != nil {
- Fatal("nodeEscState: opt in use (%T)", n.Opt())
+ Fatalf("nodeEscState: opt in use (%T)", n.Opt())
}
nE := new(NodeEscState)
nE.Curfn = Curfn
@@ -334,7 +333,7 @@
func (e *EscState) track(n *Node) {
if Curfn == nil {
- Fatal("EscState.track: Curfn nil")
+ Fatalf("EscState.track: Curfn nil")
}
n.Esc = EscNone // until proven otherwise
nE := e.nodeEscState(n)
@@ -368,7 +367,7 @@
if e&EscMask >= EscScope {
// normalize
if e&^EscMask != 0 {
- Fatal("Escape information had unexpected return encoding bits (w/ EscScope, EscHeap, EscNever), e&EscMask=%v", e&EscMask)
+ Fatalf("Escape information had unexpected return encoding bits (w/ EscScope, EscHeap, EscNever), e&EscMask=%v", e&EscMask)
}
}
if e&EscMask > etype {
@@ -425,7 +424,7 @@
return funcSym(nE.Curfn)
}
-func escAnalyze(all *NodeList, recursive bool) {
+func escAnalyze(all []*Node, recursive bool) {
var es EscState
e := &es
e.theSink.Op = ONAME
@@ -435,16 +434,16 @@
e.nodeEscState(&e.theSink).Escloopdepth = -1
e.recursive = recursive
- for l := all; l != nil; l = l.Next {
- if l.N.Op == ODCLFUNC {
- l.N.Esc = EscFuncPlanned
+ for i := len(all) - 1; i >= 0; i-- {
+ if n := all[i]; n.Op == ODCLFUNC {
+ n.Esc = EscFuncPlanned
}
}
// flow-analyze functions
- for l := all; l != nil; l = l.Next {
- if l.N.Op == ODCLFUNC {
- escfunc(e, l.N)
+ for i := len(all) - 1; i >= 0; i-- {
+ if n := all[i]; n.Op == ODCLFUNC {
+ escfunc(e, n)
}
}
@@ -457,9 +456,9 @@
}
// for all top level functions, tag the typenodes corresponding to the param nodes
- for l := all; l != nil; l = l.Next {
- if l.N.Op == ODCLFUNC {
- esctag(e, l.N)
+ for i := len(all) - 1; i >= 0; i-- {
+ if n := all[i]; n.Op == ODCLFUNC {
+ esctag(e, n)
}
}
@@ -478,7 +477,7 @@
func escfunc(e *EscState, func_ *Node) {
// print("escfunc %N %s\n", func->nname, e->recursive?"(recursive)":"");
if func_.Esc != 1 {
- Fatal("repeat escfunc %v", func_.Func.Nname)
+ Fatalf("repeat escfunc %v", func_.Func.Nname)
}
func_.Esc = EscFuncStarted
@@ -549,7 +548,7 @@
switch n.Op {
case OLABEL:
if n.Left == nil || n.Left.Sym == nil {
- Fatal("esc:label without label: %v", Nconv(n, obj.FmtSign))
+ Fatalf("esc:label without label: %v", Nconv(n, obj.FmtSign))
}
// Walk will complain about this label being already defined, but that's not until
@@ -560,7 +559,7 @@
case OGOTO:
if n.Left == nil || n.Left.Sym == nil {
- Fatal("esc:goto without label: %v", Nconv(n, obj.FmtSign))
+ Fatalf("esc:goto without label: %v", Nconv(n, obj.FmtSign))
}
// If we come past one that's uninitialized, this must be a (harmless) forward jump
@@ -766,7 +765,7 @@
escassign(e, ll.N, lr.N)
}
if lr != nil || ll != nil {
- Fatal("esc oas2func")
+ Fatalf("esc oas2func")
}
case ORETURN:
@@ -787,7 +786,7 @@
}
if ll != nil {
- Fatal("esc return list")
+ Fatalf("esc return list")
}
// Argument could leak through recover.
@@ -949,7 +948,7 @@
switch dst.Op {
default:
Dump("dst", dst)
- Fatal("escassign: unexpected dst")
+ Fatalf("escassign: unexpected dst")
case OARRAYLIT,
OCLOSURE,
@@ -1112,7 +1111,7 @@
break
default:
- Fatal("escape mktag")
+ Fatalf("escape mktag")
}
if mask < len(tags) && tags[mask] != "" {
@@ -1239,7 +1238,7 @@
// so there is no need to check here.
if em != 0 && dsts == nil {
- Fatal("corrupt esc tag %q or messed up escretval list\n", note)
+ Fatalf("corrupt esc tag %q or messed up escretval list\n", note)
}
return em0
}
@@ -1334,7 +1333,7 @@
var fn *Node
switch n.Op {
default:
- Fatal("esccall")
+ Fatalf("esccall")
case OCALLFUNC:
fn = n.Left
@@ -1357,7 +1356,7 @@
ll := n.List
if n.List != nil && n.List.Next == nil {
a := n.List.N
- if a.Type.Etype == TSTRUCT && a.Type.Funarg != 0 { // f(g()).
+ if a.Type.Etype == TSTRUCT && a.Type.Funarg { // f(g()).
ll = e.nodeEscState(a).Escretval
}
}
@@ -1394,7 +1393,7 @@
// function in same mutually recursive group. Incorporate into flow graph.
// print("esc local fn: %N\n", fn->ntype);
if fn.Name.Defn.Esc == EscFuncUnknown || nE.Escretval != nil {
- Fatal("graph inconsistency")
+ Fatalf("graph inconsistency")
}
// set up out list on this call node
@@ -1443,7 +1442,7 @@
// Imported or completely analyzed function. Use the escape tags.
if nE.Escretval != nil {
- Fatal("esc already decorated call %v\n", Nconv(n, obj.FmtSign))
+ Fatalf("esc already decorated call %v\n", Nconv(n, obj.FmtSign))
}
if Debug['m'] > 2 {
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index 66ae881..de3edfe 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -31,7 +31,7 @@
if Debug['E'] != 0 {
fmt.Printf("export symbol %v\n", n.Sym)
}
- exportlist = list(exportlist, n)
+ exportlist = append(exportlist, n)
}
func exportname(s string) bool {
@@ -79,12 +79,12 @@
}
func dumppkg(p *Pkg) {
- if p == nil || p == localpkg || p.Exported != 0 || p == builtinpkg {
+ if p == nil || p == localpkg || p.Exported || p == builtinpkg {
return
}
- p.Exported = 1
+ p.Exported = true
suffix := ""
- if p.Direct == 0 {
+ if !p.Direct {
suffix = " // indirect"
}
fmt.Fprintf(bout, "\timport %s %q%s\n", p.Name, p.Path, suffix)
@@ -124,7 +124,7 @@
if Debug['E'] != 0 {
fmt.Printf("reexport name %v\n", n.Sym)
}
- exportlist = list(exportlist, n)
+ exportlist = append(exportlist, n)
}
}
@@ -140,7 +140,7 @@
if Debug['E'] != 0 {
fmt.Printf("reexport type %v from declaration\n", t.Sym)
}
- exportlist = list(exportlist, t.Sym.Def)
+ exportlist = append(exportlist, t.Sym.Def)
}
}
@@ -154,7 +154,7 @@
if Debug['E'] != 0 {
fmt.Printf("reexport literal type %v\n", t.Sym)
}
- exportlist = list(exportlist, t.Sym.Def)
+ exportlist = append(exportlist, t.Sym.Def)
}
}
fallthrough
@@ -164,7 +164,7 @@
if Debug['E'] != 0 {
fmt.Printf("reexport literal/type %v\n", n.Sym)
}
- exportlist = list(exportlist, n)
+ exportlist = append(exportlist, n)
}
// for operations that need a type when rendered, put the type on the export list.
@@ -193,7 +193,7 @@
if Debug['E'] != 0 {
fmt.Printf("reexport type for expression %v\n", t.Sym)
}
- exportlist = list(exportlist, t.Sym.Def)
+ exportlist = append(exportlist, t.Sym.Def)
}
}
@@ -209,7 +209,7 @@
n := s.Def
typecheck(&n, Erv)
if n == nil || n.Op != OLITERAL {
- Fatal("dumpexportconst: oconst nil: %v", s)
+ Fatalf("dumpexportconst: oconst nil: %v", s)
}
t := n.Type // may or may not be specified
@@ -273,10 +273,10 @@
if t == nil {
return
}
- if t.Printed != 0 || t == Types[t.Etype] || t == bytetype || t == runetype || t == errortype {
+ if t.Printed || t == Types[t.Etype] || t == bytetype || t == runetype || t == errortype {
return
}
- t.Printed = 1
+ t.Printed = true
if t.Sym != nil && t.Etype != TFIELD {
dumppkg(t.Sym.Pkg)
@@ -371,14 +371,17 @@
fmt.Fprintf(bout, "\n")
for _, p := range pkgs {
- if p.Direct != 0 {
+ if p.Direct {
dumppkg(p)
}
}
- for l := exportlist; l != nil; l = l.Next {
- lineno = l.N.Lineno
- dumpsym(l.N.Sym)
+ // exportlist grows during iteration - cannot use range
+ for len(exportlist) > 0 {
+ n := exportlist[0]
+ exportlist = exportlist[1:]
+ lineno = n.Lineno
+ dumpsym(n.Sym)
}
fmt.Fprintf(bout, "\n$$\n")
@@ -529,7 +532,7 @@
b, err := obj.Bopenw(asmhdr)
if err != nil {
- Fatal("%v", err)
+ Fatalf("%v", err)
}
fmt.Fprintf(b, "// generated by %cg -asmhdr from package %s\n\n", Thearch.Thechar, localpkg.Name)
var n *Node
@@ -545,7 +548,7 @@
case OTYPE:
t = n.Type
- if t.Etype != TSTRUCT || t.Map != nil || t.Funarg != 0 {
+ if t.Etype != TSTRUCT || t.Map != nil || t.Funarg {
break
}
fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width))
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
index c505799..08994aa 100644
--- a/src/cmd/compile/internal/gc/fmt.go
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -356,7 +356,7 @@
return "nil"
}
- return fmt.Sprintf("<ctype=%d>", v.Ctype)
+ return fmt.Sprintf("<ctype=%d>", v.Ctype())
}
/*
@@ -434,7 +434,7 @@
case FExp:
if s.Name != "" && s.Name[0] == '.' {
- Fatal("exporting synthetic symbol %s", s.Name)
+ Fatalf("exporting synthetic symbol %s", s.Name)
}
if s.Pkg != builtinpkg {
return fmt.Sprintf("@%q.%s", s.Pkg.Path, s.Name)
@@ -648,7 +648,7 @@
}
var buf bytes.Buffer
- if t.Funarg != 0 {
+ if t.Funarg {
buf.WriteString("(")
if fmtmode == FTypeId || fmtmode == FErr { // no argument names on function signature, and no "noescape"/"nosplit" tags
for t1 := t.Type; t1 != nil; t1 = t1.Down {
@@ -705,7 +705,7 @@
}
if s != nil && t.Embedded == 0 {
- if t.Funarg != 0 {
+ if t.Funarg {
name = Nconv(t.Nname, 0)
} else if flag&obj.FmtLong != 0 {
name = Sconv(s, obj.FmtShort|obj.FmtByte) // qualify non-exported names (used on structs, not on funarg)
@@ -756,7 +756,7 @@
}
if fmtmode == FExp {
- Fatal("missing %v case during export", Econv(int(t.Etype), 0))
+ Fatalf("missing %v case during export", Econv(int(t.Etype), 0))
}
// Don't know how to handle - fall back to detailed prints.
@@ -1673,7 +1673,7 @@
dumpdepth--
default:
- Fatal("unhandled %%N mode")
+ Fatalf("unhandled %%N mode")
}
flag = sf
diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go
index 4ff4f7a..5d24515 100644
--- a/src/cmd/compile/internal/gc/gen.go
+++ b/src/cmd/compile/internal/gc/gen.go
@@ -62,7 +62,7 @@
n.Name.Param.Stackparam.Type = n.Type
n.Name.Param.Stackparam.Addable = true
if n.Xoffset == BADWIDTH {
- Fatal("addrescapes before param assignment")
+ Fatalf("addrescapes before param assignment")
}
n.Name.Param.Stackparam.Xoffset = n.Xoffset
fallthrough
@@ -135,7 +135,7 @@
lab.Def = n
}
} else {
- lab.Use = list(lab.Use, n)
+ lab.Use = append(lab.Use, n)
}
return lab
@@ -228,7 +228,7 @@
func cgen_proc(n *Node, proc int) {
switch n.Left.Op {
default:
- Fatal("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
+ Fatalf("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
case OCALLMETH:
cgen_callmeth(n.Left, proc)
@@ -252,7 +252,7 @@
}
if n.Op != ONAME {
Dump("cgen_dcl", n)
- Fatal("cgen_dcl")
+ Fatalf("cgen_dcl")
}
if n.Class&PHEAP == 0 {
@@ -362,7 +362,7 @@
Mpmovecfix(z.Val().U.(*Mpint), 0)
default:
- Fatal("clearslim called on type %v", n.Type)
+ Fatalf("clearslim called on type %v", n.Type)
}
ullmancalc(&z)
@@ -563,7 +563,7 @@
case ODOT:
if n.Xoffset == BADWIDTH {
Dump("bad width in dotoffset", n)
- Fatal("bad width in dotoffset")
+ Fatalf("bad width in dotoffset")
}
i = Dotoffset(n.Left, oary, nn)
@@ -584,7 +584,7 @@
case ODOTPTR:
if n.Xoffset == BADWIDTH {
Dump("bad width in dotoffset", n)
- Fatal("bad width in dotoffset")
+ Fatalf("bad width in dotoffset")
}
i = Dotoffset(n.Left, oary, nn)
@@ -609,7 +609,7 @@
*/
func Tempname(nn *Node, t *Type) {
if Curfn == nil {
- Fatal("no curfn for tempname")
+ Fatalf("no curfn for tempname")
}
if t == nil {
@@ -663,7 +663,7 @@
switch n.Op {
default:
- Fatal("gen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+ Fatalf("gen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
case OCASE,
OFALL,
@@ -731,7 +731,7 @@
break
}
- lab.Used = 1
+ lab.Used = true
if lab.Breakpc == nil {
Yyerror("invalid break label %v", n.Left.Sym)
break
@@ -756,7 +756,7 @@
break
}
- lab.Used = 1
+ lab.Used = true
if lab.Continpc == nil {
Yyerror("invalid continue label %v", n.Left.Sym)
break
@@ -899,7 +899,7 @@
ret:
if Anyregalloc() != wasregalloc {
Dump("node", n)
- Fatal("registers left allocated")
+ Fatalf("registers left allocated")
}
lineno = lno
@@ -965,7 +965,7 @@
l := n.Left
if l.Op != ODOTMETH {
- Fatal("cgen_callmeth: not dotmethod: %v", l)
+ Fatalf("cgen_callmeth: not dotmethod: %v", l)
}
n2 := *n
@@ -988,26 +988,24 @@
}
func checklabels() {
- var l *NodeList
-
for lab := labellist; lab != nil; lab = lab.Link {
if lab.Def == nil {
- for l = lab.Use; l != nil; l = l.Next {
- yyerrorl(int(l.N.Lineno), "label %v not defined", lab.Sym)
+ for _, n := range lab.Use {
+ yyerrorl(int(n.Lineno), "label %v not defined", lab.Sym)
}
continue
}
- if lab.Use == nil && lab.Used == 0 {
+ if lab.Use == nil && !lab.Used {
yyerrorl(int(lab.Def.Lineno), "label %v defined and not used", lab.Sym)
continue
}
if lab.Gotopc != nil {
- Fatal("label %v never resolved", lab.Sym)
+ Fatalf("label %v never resolved", lab.Sym)
}
- for l = lab.Use; l != nil; l = l.Next {
- checkgoto(l.N, lab.Def)
+ for _, n := range lab.Use {
+ checkgoto(n, lab.Def)
}
}
}
@@ -1138,7 +1136,7 @@
nodr = *nr
if !cadable(nr) {
if nr.Ullman >= UINF && nodl.Op == OINDREG {
- Fatal("miscompile")
+ Fatalf("miscompile")
}
Igen(nr, &nodr, nil)
defer Regfree(&nodr)
@@ -1159,7 +1157,7 @@
visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
if wb && int(Simtype[t.Etype]) == Tptr && t != itable {
if ptrType != nil {
- Fatal("componentgen_wb %v", Tconv(nl.Type, 0))
+ Fatalf("componentgen_wb %v", Tconv(nl.Type, 0))
}
ptrType = t
ptrOffset = offset
@@ -1199,7 +1197,7 @@
// NOTE: Assuming little endian (signed top half at offset 4).
// We don't have any 32-bit big-endian systems.
if Thearch.Thechar != '5' && Thearch.Thechar != '8' {
- Fatal("unknown 32-bit architecture")
+ Fatalf("unknown 32-bit architecture")
}
return f(Types[TUINT32], startOffset) &&
f(Types[TINT32], startOffset+4)
@@ -1222,7 +1220,6 @@
case TINTER:
return f(itable, startOffset) &&
f(Ptrto(Types[TUINT8]), startOffset+int64(Widthptr))
- return true
case TSTRING:
return f(Ptrto(Types[TUINT8]), startOffset) &&
@@ -1256,12 +1253,12 @@
// in code introduced in CL 6932045 to fix issue #4518.
// But the test case in issue 4518 does not trigger this anymore,
// so maybe this complication is no longer needed.
- Fatal("struct not at offset 0")
+ Fatalf("struct not at offset 0")
}
for field := t.Type; field != nil; field = field.Down {
if field.Etype != TFIELD {
- Fatal("bad struct")
+ Fatalf("bad struct")
}
if !visitComponents(field.Type, startOffset+field.Width, f) {
return false
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index af8be29..424e647 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -98,7 +98,7 @@
func (v Val) Ctype() int {
switch x := v.U.(type) {
default:
- Fatal("unexpected Ctype for %T", v.U)
+ Fatalf("unexpected Ctype for %T", v.U)
panic("not reached")
case nil:
return 0
@@ -125,9 +125,9 @@
Path string // string literal used in import statement
Pathsym *Sym
Prefix string // escaped path for use in symbol table
- Imported uint8 // export data of this package was parsed
- Exported int8 // import line written in export data
- Direct int8 // imported directly
+ Imported bool // export data of this package was parsed
+ Exported bool // import line written in export data
+ Direct bool // imported directly
Safe bool // whether the package is marked as safe
Syms map[string]*Sym
}
@@ -155,18 +155,17 @@
type Type struct {
Etype uint8
Nointerface bool
- Noalg uint8
+ Noalg bool
Chan uint8
Trecur uint8 // to detect loops
- Printed uint8
+ Printed bool
Embedded uint8 // TFIELD embedded type
- Siggen uint8
- Funarg uint8 // on TSTRUCT and TFIELD
- Copyany uint8
+ Funarg bool // on TSTRUCT and TFIELD
+ Copyany bool
Local bool // created in this file
- Deferwidth uint8
- Broke uint8 // broken type definition.
- Isddd bool // TFIELD is ... argument
+ Deferwidth bool
+ Broke bool // broken type definition.
+ Isddd bool // TFIELD is ... argument
Align uint8
Haspointers uint8 // 0 unknown, 1 no, 2 yes
@@ -178,7 +177,7 @@
Thistuple int
Outtuple int
Intuple int
- Outnamed uint8
+ Outnamed bool
Method *Type
Xmethod *Type
@@ -217,10 +216,9 @@
}
type Label struct {
- Used uint8
Sym *Sym
Def *Node
- Use *NodeList
+ Use []*Node
Link *Label
// for use during gen
@@ -228,6 +226,8 @@
Labelpc *obj.Prog // pointer to code
Breakpc *obj.Prog // pointer to code
Continpc *obj.Prog // pointer to code
+
+ Used bool
}
type InitEntry struct {
@@ -586,9 +586,9 @@
var externdcl *NodeList
-var exportlist *NodeList
+var exportlist []*Node
-var importlist *NodeList // imported functions and methods with inlinable bodies
+var importlist []*Node // imported functions and methods with inlinable bodies
var funcsyms *NodeList
@@ -616,7 +616,7 @@
var block int32 // current block number
-var Hasdefer int // flag that curfn has defer statetment
+var hasdefer bool // flag that curfn has defer statement
var Curfn *Node
@@ -638,7 +638,7 @@
var Funcdepth int32
-var typecheckok int
+var typecheckok bool
var compiling_runtime int
@@ -685,8 +685,6 @@
var Disable_checknil int
-var zerosize int64
-
type Flow struct {
Prog *obj.Prog // actual instruction
P1 *Flow // predecessors of this instruction: p1,
@@ -701,7 +699,7 @@
Id int32 // sequence number in flow graph
Rpo int32 // reverse post ordering
Loop uint16 // x5 for every loop
- Refset uint8 // diagnostic generated
+ Refset bool // diagnostic generated
Data interface{} // for use by client
}
diff --git a/src/cmd/compile/internal/gc/go.y b/src/cmd/compile/internal/gc/go.y
index c6d1607..6d148e4 100644
--- a/src/cmd/compile/internal/gc/go.y
+++ b/src/cmd/compile/internal/gc/go.y
@@ -275,7 +275,7 @@
// no package statement. This allows us to test more
// than one invalid import statement in a single file.
if nerrors == 0 {
- Fatal("phase error in import");
+ Fatalf("phase error in import");
}
}
@@ -315,7 +315,7 @@
} else if importpkg.Name != $2.Name {
Yyerror("conflicting names %s and %s for package %q", importpkg.Name, $2.Name, importpkg.Path);
}
- importpkg.Direct = 1;
+ importpkg.Direct = true;
importpkg.Safe = curio.importsafe
if safemode != 0 && !curio.importsafe {
@@ -2038,7 +2038,7 @@
$2.Func.Inl = $3;
funcbody($2);
- importlist = list(importlist, $2);
+ importlist = append(importlist, $2);
if Debug['E'] > 0 {
fmt.Printf("import [%q] func %v \n", importpkg.Path, $2)
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index 2c575f3..7e085d9 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -102,7 +102,7 @@
if as == obj.ADATA || as == obj.AGLOBL {
if ddumped != 0 {
- Fatal("already dumped data")
+ Fatalf("already dumped data")
}
if dpc == nil {
dpc = Ctxt.NewProg()
@@ -132,7 +132,7 @@
func Nodreg(n *Node, t *Type, r int) {
if t == nil {
- Fatal("nodreg: t nil")
+ Fatalf("nodreg: t nil")
}
*n = Node{}
@@ -310,7 +310,7 @@
a := a // copy to let escape into Ctxt.Dconv
Debug['h'] = 1
Dump("naddr", n)
- Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
+ Fatalf("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
case OREGISTER:
a.Type = obj.TYPE_REG
@@ -346,7 +346,7 @@
case OCLOSUREVAR:
if !Curfn.Func.Needctxt {
- Fatal("closurevar without needctxt")
+ Fatalf("closurevar without needctxt")
}
a.Type = obj.TYPE_MEM
a.Reg = int16(Thearch.REGCTXT)
@@ -384,7 +384,7 @@
a.Type = obj.TYPE_MEM
switch n.Class {
default:
- Fatal("naddr: ONAME class %v %d\n", n.Sym, n.Class)
+ Fatalf("naddr: ONAME class %v %d\n", n.Sym, n.Class)
case PEXTERN:
a.Name = obj.NAME_EXTERN
@@ -410,7 +410,7 @@
}
switch n.Val().Ctype() {
default:
- Fatal("naddr: const %v", Tconv(n.Type, obj.FmtLong))
+ Fatalf("naddr: const %v", Tconv(n.Type, obj.FmtLong))
case CTFLT:
a.Type = obj.TYPE_FCONST
@@ -443,7 +443,7 @@
}
if a.Type != obj.TYPE_MEM {
a := a // copy to let escape into Ctxt.Dconv
- Fatal("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
+ Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
}
a.Type = obj.TYPE_ADDR
@@ -511,17 +511,17 @@
var n *Node
// entire argument struct, not just one arg
- if t.Etype == TSTRUCT && t.Funarg != 0 {
+ if t.Etype == TSTRUCT && t.Funarg {
n = Nod(ONAME, nil, nil)
n.Sym = Lookup(".args")
n.Type = t
var savet Iter
first := Structfirst(&savet, &t)
if first == nil {
- Fatal("nodarg: bad struct")
+ Fatalf("nodarg: bad struct")
}
if first.Width == BADWIDTH {
- Fatal("nodarg: offset not computed for %v", t)
+ Fatalf("nodarg: offset not computed for %v", t)
}
n.Xoffset = first.Width
n.Addable = true
@@ -529,7 +529,7 @@
}
if t.Etype != TFIELD {
- Fatal("nodarg: not field %v", t)
+ Fatalf("nodarg: not field %v", t)
}
if fp == 1 {
@@ -547,7 +547,7 @@
n.Sym = t.Sym
if t.Width == BADWIDTH {
- Fatal("nodarg: offset not computed for %v", t)
+ Fatalf("nodarg: offset not computed for %v", t)
}
n.Xoffset = t.Width
n.Addable = true
@@ -574,7 +574,7 @@
n.Class = PPARAM
case 2: // offset output arg
- Fatal("shouldn't be used")
+ Fatalf("shouldn't be used")
}
n.Typecheck = 1
@@ -583,7 +583,7 @@
func Patch(p *obj.Prog, to *obj.Prog) {
if p.To.Type != obj.TYPE_BRANCH {
- Fatal("patch: not a branch")
+ Fatalf("patch: not a branch")
}
p.To.Val = to
p.To.Offset = to.Pc
@@ -591,7 +591,7 @@
func unpatch(p *obj.Prog) *obj.Prog {
if p.To.Type != obj.TYPE_BRANCH {
- Fatal("unpatch: not a branch")
+ Fatalf("unpatch: not a branch")
}
q, _ := p.To.Val.(*obj.Prog)
p.To.Val = nil
@@ -669,18 +669,18 @@
*/
func Regalloc(n *Node, t *Type, o *Node) {
if t == nil {
- Fatal("regalloc: t nil")
+ Fatalf("regalloc: t nil")
}
et := int(Simtype[t.Etype])
if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) {
- Fatal("regalloc 64bit")
+ Fatalf("regalloc 64bit")
}
var i int
Switch:
switch et {
default:
- Fatal("regalloc: unknown type %v", t)
+ Fatalf("regalloc: unknown type %v", t)
case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TPTR32, TPTR64, TBOOL:
if o != nil && o.Op == OREGISTER {
@@ -696,7 +696,7 @@
}
Flusherrors()
Regdump()
- Fatal("out of fixed registers")
+ Fatalf("out of fixed registers")
case TFLOAT32, TFLOAT64:
if Thearch.Use387 {
@@ -716,7 +716,7 @@
}
Flusherrors()
Regdump()
- Fatal("out of floating registers")
+ Fatalf("out of floating registers")
case TCOMPLEX64, TCOMPLEX128:
Tempname(n, t)
@@ -741,7 +741,7 @@
return
}
if n.Op != OREGISTER && n.Op != OINDREG {
- Fatal("regfree: not a register")
+ Fatalf("regfree: not a register")
}
i := int(n.Reg)
if i == Thearch.REGSP {
@@ -752,12 +752,12 @@
Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
// ok
default:
- Fatal("regfree: reg out of range")
+ Fatalf("regfree: reg out of range")
}
i -= Thearch.REGMIN
if reg[i] <= 0 {
- Fatal("regfree: reg not allocated")
+ Fatalf("regfree: reg not allocated")
}
reg[i]--
if reg[i] == 0 {
@@ -772,7 +772,7 @@
Thearch.FREGMIN <= r && r <= Thearch.FREGMAX:
// ok
default:
- Fatal("reginuse: reg out of range")
+ Fatalf("reginuse: reg out of range")
}
return reg[r-Thearch.REGMIN] > 0
@@ -782,7 +782,7 @@
// so that a register can be given up but then reclaimed.
func Regrealloc(n *Node) {
if n.Op != OREGISTER && n.Op != OINDREG {
- Fatal("regrealloc: not a register")
+ Fatalf("regrealloc: not a register")
}
i := int(n.Reg)
if i == Thearch.REGSP {
@@ -793,7 +793,7 @@
Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
// ok
default:
- Fatal("regrealloc: reg out of range")
+ Fatalf("regrealloc: reg out of range")
}
i -= Thearch.REGMIN
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
index b2eeeed..1f9b473 100644
--- a/src/cmd/compile/internal/gc/inl.go
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -54,7 +54,7 @@
rcvr = rcvr.Type
}
if rcvr.Sym == nil {
- Fatal("receiver with no sym: [%v] %v (%v)", fn.Sym, Nconv(fn, obj.FmtLong), rcvr)
+ Fatalf("receiver with no sym: [%v] %v (%v)", fn.Sym, Nconv(fn, obj.FmtLong), rcvr)
}
return rcvr.Sym.Pkg
}
@@ -100,10 +100,10 @@
// fn and ->nbody will already have been typechecked.
func caninl(fn *Node) {
if fn.Op != ODCLFUNC {
- Fatal("caninl %v", fn)
+ Fatalf("caninl %v", fn)
}
if fn.Func.Nname == nil {
- Fatal("caninl no nname %v", Nconv(fn, obj.FmtSign))
+ Fatalf("caninl no nname %v", Nconv(fn, obj.FmtSign))
}
// If fn has no body (is defined outside of Go), cannot inline it.
@@ -112,7 +112,7 @@
}
if fn.Typecheck == 0 {
- Fatal("caninl on non-typechecked function %v", fn)
+ Fatalf("caninl on non-typechecked function %v", fn)
}
// can't handle ... args yet
@@ -196,10 +196,10 @@
// Call is okay if inlinable and we have the budget for the body.
case OCALLMETH:
if n.Left.Type == nil {
- Fatal("no function type for [%p] %v\n", n.Left, Nconv(n.Left, obj.FmtSign))
+ Fatalf("no function type for [%p] %v\n", n.Left, Nconv(n.Left, obj.FmtSign))
}
if n.Left.Type.Nname == nil {
- Fatal("no function definition for [%p] %v\n", n.Left.Type, Tconv(n.Left.Type, obj.FmtSign))
+ Fatalf("no function definition for [%p] %v\n", n.Left.Type, Tconv(n.Left.Type, obj.FmtSign))
}
if n.Left.Type.Nname.Func.Inl != nil {
*budget -= int(n.Left.Type.Nname.Func.InlCost)
@@ -277,7 +277,7 @@
Curfn = fn
inlnode(&fn)
if fn != Curfn {
- Fatal("inlnode replaced curfn")
+ Fatalf("inlnode replaced curfn")
}
Curfn = savefn
}
@@ -308,7 +308,7 @@
// statements.
func inlconv2list(n *Node) *NodeList {
if n.Op != OINLCALL || n.Rlist == nil {
- Fatal("inlconv2list %v\n", Nconv(n, obj.FmtSign))
+ Fatalf("inlconv2list %v\n", Nconv(n, obj.FmtSign))
}
l := n.Rlist
@@ -470,11 +470,11 @@
// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
if n.Left.Type == nil {
- Fatal("no function type for [%p] %v\n", n.Left, Nconv(n.Left, obj.FmtSign))
+ Fatalf("no function type for [%p] %v\n", n.Left, Nconv(n.Left, obj.FmtSign))
}
if n.Left.Type.Nname == nil {
- Fatal("no function definition for [%p] %v\n", n.Left.Type, Tconv(n.Left.Type, obj.FmtSign))
+ Fatalf("no function definition for [%p] %v\n", n.Left.Type, Tconv(n.Left.Type, obj.FmtSign))
}
mkinlcall(np, n.Left.Type.Nname, n.Isddd)
@@ -500,7 +500,7 @@
func tinlvar(t *Type) *Node {
if t.Nname != nil && !isblank(t.Nname) {
if t.Nname.Name.Inlvar == nil {
- Fatal("missing inlvar for %v\n", t.Nname)
+ Fatalf("missing inlvar for %v\n", t.Nname)
}
return t.Nname.Name.Inlvar
}
@@ -600,13 +600,13 @@
t := getthisx(fn.Type).Type
if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Name.Inlvar == nil {
- Fatal("missing inlvar for %v\n", t.Nname)
+ Fatalf("missing inlvar for %v\n", t.Nname)
}
if n.Left.Left == nil {
- Fatal("method call without receiver: %v", Nconv(n, obj.FmtSign))
+ Fatalf("method call without receiver: %v", Nconv(n, obj.FmtSign))
}
if t == nil {
- Fatal("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
+ Fatalf("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
}
as = Nod(OAS, tinlvar(t), n.Left.Left)
if as != nil {
@@ -662,17 +662,17 @@
if fn.Type.Thistuple != 0 && n.Left.Op != ODOTMETH {
// non-method call to method
if n.List == nil {
- Fatal("non-method call to method without first arg: %v", Nconv(n, obj.FmtSign))
+ Fatalf("non-method call to method without first arg: %v", Nconv(n, obj.FmtSign))
}
// append receiver inlvar to LHS.
t := getthisx(fn.Type).Type
if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Name.Inlvar == nil {
- Fatal("missing inlvar for %v\n", t.Nname)
+ Fatalf("missing inlvar for %v\n", t.Nname)
}
if t == nil {
- Fatal("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
+ Fatalf("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
}
as.List = list(as.List, tinlvar(t))
ll = ll.Next // track argument count.
@@ -732,7 +732,7 @@
}
if ll != nil || t != nil {
- Fatal("arg count mismatch: %v vs %v\n", Tconv(getinargx(fn.Type), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+ Fatalf("arg count mismatch: %v vs %v\n", Tconv(getinargx(fn.Type), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
}
}
@@ -956,7 +956,7 @@
m.Ninit = nil
if n.Op == OCLOSURE {
- Fatal("cannot inline function containing closure: %v", Nconv(n, obj.FmtSign))
+ Fatalf("cannot inline function containing closure: %v", Nconv(n, obj.FmtSign))
}
m.Left = inlsubst(n.Left)
diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go
index d6019b3..5150e2b 100644
--- a/src/cmd/compile/internal/gc/lex.go
+++ b/src/cmd/compile/internal/gc/lex.go
@@ -237,9 +237,11 @@
obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y'])
var flag_shared int
var flag_dynlink bool
+ if Thearch.Thechar == '6' || Thearch.Thechar == '5' {
+ obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared)
+ }
if Thearch.Thechar == '6' {
obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel)
- obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared)
flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
}
obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile)
@@ -305,7 +307,7 @@
Thearch.Betypeinit()
if Widthptr == 0 {
- Fatal("betypeinit failed")
+ Fatalf("betypeinit failed")
}
lexinit()
@@ -317,6 +319,7 @@
dclcontext = PEXTERN
nerrors = 0
lexlineno = 1
+ const BOM = 0xFEFF
for _, infile = range flag.Args() {
linehistpush(infile)
@@ -336,7 +339,7 @@
curio.last = 0
// Skip initial BOM if present.
- if obj.Bgetrune(curio.bin) != obj.BOM {
+ if obj.Bgetrune(curio.bin) != BOM {
obj.Bungetrune(curio.bin)
}
@@ -360,7 +363,7 @@
mkpackage(localpkg.Name) // final import not used checks
lexfini()
- typecheckok = 1
+ typecheckok = true
if Debug['f'] != 0 {
frame(1)
}
@@ -421,10 +424,10 @@
if Debug['l'] > 1 {
// Typecheck imported function bodies if debug['l'] > 1,
// otherwise lazily when used or re-exported.
- for l := importlist; l != nil; l = l.Next {
- if l.N.Func.Inl != nil {
+ for _, n := range importlist {
+ if n.Func.Inl != nil {
saveerrors()
- typecheckinl(l.N)
+ typecheckinl(n)
}
}
@@ -435,11 +438,13 @@
if Debug['l'] != 0 {
// Find functions that can be inlined and clone them before walk expands them.
- visitBottomUp(xtop, func(list *NodeList, recursive bool) {
- for l := list; l != nil; l = l.Next {
- if l.N.Op == ODCLFUNC {
- caninl(l.N)
- inlcalls(l.N)
+ visitBottomUp(xtop, func(list []*Node, recursive bool) {
+ // TODO: use a range statement here if the order does not matter
+ for i := len(list) - 1; i >= 0; i-- {
+ n := list[i]
+ if n.Op == ODCLFUNC {
+ caninl(n)
+ inlcalls(n)
}
}
})
@@ -597,11 +602,11 @@
// if there is an array.6 in the array.a library,
// want to find all of array.a, not just array.6.
file = fmt.Sprintf("%s.a", name)
- if obj.Access(file, 0) >= 0 {
+ if _, err := os.Stat(file); err == nil {
return file, true
}
file = fmt.Sprintf("%s.o", name)
- if obj.Access(file, 0) >= 0 {
+ if _, err := os.Stat(file); err == nil {
return file, true
}
return "", false
@@ -619,11 +624,11 @@
for p := idirs; p != nil; p = p.link {
file = fmt.Sprintf("%s/%s.a", p.dir, name)
- if obj.Access(file, 0) >= 0 {
+ if _, err := os.Stat(file); err == nil {
return file, true
}
file = fmt.Sprintf("%s/%s.o", p.dir, name)
- if obj.Access(file, 0) >= 0 {
+ if _, err := os.Stat(file); err == nil {
return file, true
}
}
@@ -640,11 +645,11 @@
}
file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", goroot, goos, goarch, suffixsep, suffix, name)
- if obj.Access(file, 0) >= 0 {
+ if _, err := os.Stat(file); err == nil {
return file, true
}
file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", goroot, goos, goarch, suffixsep, suffix, name)
- if obj.Access(file, 0) >= 0 {
+ if _, err := os.Stat(file); err == nil {
return file, true
}
}
@@ -740,7 +745,7 @@
// If we already saw that package, feed a dummy statement
// to the lexer to avoid parsing export data twice.
- if importpkg.Imported != 0 {
+ if importpkg.Imported {
tag := ""
if importpkg.Safe {
tag = "safe"
@@ -751,7 +756,7 @@
return
}
- importpkg.Imported = 1
+ importpkg.Imported = true
var err error
var imp *obj.Biobuf
@@ -799,7 +804,7 @@
curio.peekc1 = 0
curio.infile = file
curio.nlsemi = 0
- typecheckok = 1
+ typecheckok = true
var c int32
for {
@@ -836,7 +841,7 @@
pushedio.bin = nil
incannedimport = 0
- typecheckok = 0
+ typecheckok = false
}
func cannedimports(file string, cp string) {
@@ -852,7 +857,7 @@
curio.nlsemi = 0
curio.importsafe = false
- typecheckok = 1
+ typecheckok = true
incannedimport = 1
}
@@ -1623,6 +1628,9 @@
}
if verb == "go:systemstack" {
+ if compiling_runtime == 0 {
+ Yyerror("//go:systemstack only allowed in runtime")
+ }
systemstack = true
return c
}
@@ -2200,7 +2208,7 @@
etype = syms[i].etype
if etype != Txxx {
if etype < 0 || etype >= len(Types) {
- Fatal("lexinit: %s bad etype", s.Name)
+ Fatalf("lexinit: %s bad etype", s.Name)
}
s1 = Pkglookup(syms[i].name, builtinpkg)
t = Types[etype]
@@ -2281,20 +2289,20 @@
rcvr.Type = typ(TFIELD)
rcvr.Type.Type = Ptrto(typ(TSTRUCT))
- rcvr.Funarg = 1
+ rcvr.Funarg = true
in := typ(TSTRUCT)
- in.Funarg = 1
+ in.Funarg = true
out := typ(TSTRUCT)
out.Type = typ(TFIELD)
out.Type.Type = Types[TSTRING]
- out.Funarg = 1
+ out.Funarg = true
f := typ(TFUNC)
*getthis(f) = rcvr
*Getoutarg(f) = out
*getinarg(f) = in
f.Thistuple = 1
f.Intuple = 0
- f.Outnamed = 0
+ f.Outnamed = false
f.Outtuple = 1
t := typ(TINTER)
t.Type = typ(TFIELD)
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index d2ac813..ec74009 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -89,9 +89,6 @@
dumpglobls()
externdcl = tmp
- zero := Pkglookup("zerovalue", Runtimepkg)
- ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA)
-
dumpdata()
obj.Writeobjdirect(Ctxt, bout)
@@ -120,7 +117,7 @@
}
if n.Type == nil {
- Fatal("external %v nil type\n", n)
+ Fatalf("external %v nil type\n", n)
}
if n.Class == PFUNC {
continue
@@ -279,7 +276,7 @@
ggloblsym(sym, int32(off), obj.NOPTR|obj.LOCAL)
if nam.Op != ONAME {
- Fatal("slicebytes %v", nam)
+ Fatalf("slicebytes %v", nam)
}
off = int(nam.Xoffset)
off = dsymptr(nam.Sym, off, sym, 0)
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index 799a17e..c783d64 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -182,7 +182,7 @@
return a
}
- Fatal("ordersafeexpr %v", Oconv(int(n.Op), 0))
+ Fatalf("ordersafeexpr %v", Oconv(int(n.Op), 0))
return nil // not reached
}
@@ -336,8 +336,8 @@
// Copyret emits t1, t2, ... = n, where n is a function call,
// and then returns the list t1, t2, ....
func copyret(n *Node, order *Order) *NodeList {
- if n.Type.Etype != TSTRUCT || n.Type.Funarg == 0 {
- Fatal("copyret %v %d", n.Type, n.Left.Type.Outtuple)
+ if n.Type.Etype != TSTRUCT || !n.Type.Funarg {
+ Fatalf("copyret %v %d", n.Type, n.Left.Type.Outtuple)
}
var l1 *NodeList
@@ -403,7 +403,7 @@
func ordermapassign(n *Node, order *Order) {
switch n.Op {
default:
- Fatal("ordermapassign %v", Oconv(int(n.Op), 0))
+ Fatalf("ordermapassign %v", Oconv(int(n.Op), 0))
case OAS:
order.out = list(order.out, n)
@@ -462,7 +462,7 @@
switch n.Op {
default:
- Fatal("orderstmt %v", Oconv(int(n.Op), 0))
+ Fatalf("orderstmt %v", Oconv(int(n.Op), 0))
case OVARKILL:
order.out = list(order.out, n)
@@ -704,7 +704,7 @@
orderexpr(&n.Right, order, nil)
switch n.Type.Etype {
default:
- Fatal("orderstmt range %v", n.Type)
+ Fatalf("orderstmt range %v", n.Type)
// Mark []byte(str) range expression to reuse string backing storage.
// It is safe because the storage cannot be mutated.
@@ -773,7 +773,7 @@
var r *Node
for l := n.List; l != nil; l = l.Next {
if l.N.Op != OXCASE {
- Fatal("order select case %v", Oconv(int(l.N.Op), 0))
+ Fatalf("order select case %v", Oconv(int(l.N.Op), 0))
}
r = l.N.Left
setlineno(l.N)
@@ -781,7 +781,7 @@
// Append any new body prologue to ninit.
// The next loop will insert ninit into nbody.
if l.N.Ninit != nil {
- Fatal("order select ninit")
+ Fatalf("order select ninit")
}
if r != nil {
switch r.Op {
@@ -927,7 +927,7 @@
orderexpr(&n.Left, order, nil)
for l := n.List; l != nil; l = l.Next {
if l.N.Op != OXCASE {
- Fatal("order switch case %v", Oconv(int(l.N.Op), 0))
+ Fatalf("order switch case %v", Oconv(int(l.N.Op), 0))
}
orderexprlistinplace(l.N.List, order)
orderblock(&l.N.Nbody)
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index 33c600a..67fe8e6 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -86,7 +86,7 @@
func gvardefx(n *Node, as int) {
if n == nil {
- Fatal("gvardef nil")
+ Fatalf("gvardef nil")
}
if n.Op != ONAME {
Yyerror("gvardef %v; %v", Oconv(int(n.Op), obj.FmtSharp), n)
@@ -123,7 +123,7 @@
func gcsymdup(s *Sym) {
ls := Linksym(s)
if len(ls.R) > 0 {
- Fatal("cannot rosymdup %s with relocations", ls.Name)
+ Fatalf("cannot rosymdup %s with relocations", ls.Name)
}
ls.Name = fmt.Sprintf("gclocals·%x", md5.Sum(ls.P))
ls.Dupok = 1
@@ -274,7 +274,7 @@
dowidth(n.Type)
w = n.Type.Width
if w >= Thearch.MAXWIDTH || w < 0 {
- Fatal("bad width")
+ Fatalf("bad width")
}
Stksize += w
Stksize = Rnd(Stksize, int64(n.Type.Align))
@@ -315,7 +315,7 @@
// Ideally we wouldn't see any integer types here, but we do.
if n.Type == nil || (!Isptr[n.Type.Etype] && !Isint[n.Type.Etype] && n.Type.Etype != TUNSAFEPTR) {
Dump("checknil", n)
- Fatal("bad checknil")
+ Fatalf("bad checknil")
}
if ((Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
@@ -374,7 +374,7 @@
// set up domain for labels
clearlabels()
- if Curfn.Type.Outnamed != 0 {
+ if Curfn.Type.Outnamed {
// add clearing of the output parameters
var save Iter
t := Structfirst(&save, Getoutarg(Curfn.Type))
@@ -395,7 +395,7 @@
goto ret
}
- Hasdefer = 0
+ hasdefer = false
walk(Curfn)
if nerrors != 0 {
goto ret
@@ -498,7 +498,7 @@
// TODO: Determine when the final cgen_ret can be omitted. Perhaps always?
cgen_ret(nil)
- if Hasdefer != 0 {
+ if hasdefer {
// deferreturn pretends to have one uintptr argument.
// Reserve space for it so stack scanner is happy.
if Maxarg < int64(Widthptr) {
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index efaf69f..fa8bc20 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -95,7 +95,7 @@
func xmalloc(size uint32) interface{} {
result := (interface{})(make([]byte, size))
if result == nil {
- Fatal("malloc failed")
+ Fatalf("malloc failed")
}
return result
}
@@ -103,7 +103,7 @@
// Constructs a new basic block containing a single instruction.
func newblock(prog *obj.Prog) *BasicBlock {
if prog == nil {
- Fatal("newblock: prog cannot be nil")
+ Fatalf("newblock: prog cannot be nil")
}
result := new(BasicBlock)
result.rpo = -1
@@ -118,7 +118,7 @@
// Frees a basic block and all of its leaf data structures.
func freeblock(bb *BasicBlock) {
if bb == nil {
- Fatal("freeblock: cannot free nil")
+ Fatalf("freeblock: cannot free nil")
}
}
@@ -126,10 +126,10 @@
// to a successor of from.
func addedge(from *BasicBlock, to *BasicBlock) {
if from == nil {
- Fatal("addedge: from is nil")
+ Fatalf("addedge: from is nil")
}
if to == nil {
- Fatal("addedge: to is nil")
+ Fatalf("addedge: to is nil")
}
from.succ = append(from.succ, to)
to.pred = append(to.pred, from)
@@ -290,10 +290,10 @@
// is a call to a specific package qualified function name.
func iscall(prog *obj.Prog, name *obj.LSym) bool {
if prog == nil {
- Fatal("iscall: prog is nil")
+ Fatalf("iscall: prog is nil")
}
if name == nil {
- Fatal("iscall: function name is nil")
+ Fatalf("iscall: function name is nil")
}
if prog.As != obj.ACALL {
return false
@@ -363,14 +363,14 @@
pred := selectgo
for {
if len(pred.pred) == 0 {
- Fatal("selectgo does not have a newselect")
+ Fatalf("selectgo does not have a newselect")
}
pred = pred.pred[0]
if blockany(pred, isselectcommcasecall) {
// A select comm case block should have exactly one
// successor.
if len(pred.succ) != 1 {
- Fatal("select comm case has too many successors")
+ Fatalf("select comm case has too many successors")
}
succ = pred.succ[0]
@@ -379,7 +379,7 @@
// and the branch should lead to the select case
// statements block.
if len(succ.succ) != 2 {
- Fatal("select comm case successor has too many successors")
+ Fatalf("select comm case successor has too many successors")
}
// Add the block as a successor of the selectgo block.
@@ -429,7 +429,7 @@
Thearch.Proginfo(p)
if p.To.Type == obj.TYPE_BRANCH {
if p.To.Val == nil {
- Fatal("prog branch to nil")
+ Fatalf("prog branch to nil")
}
if p.To.Val.(*obj.Prog).Opt == nil {
p.To.Val.(*obj.Prog).Opt = newblock(p.To.Val.(*obj.Prog))
@@ -524,7 +524,7 @@
if bb.rpo == -1 {
fmt.Printf("newcfg: unreachable basic block for %v\n", bb.last)
printcfg(cfg)
- Fatal("newcfg: invalid control flow graph")
+ Fatalf("newcfg: invalid control flow graph")
}
return cfg
@@ -626,7 +626,7 @@
goto Next
}
if pos >= int32(len(vars)) || vars[pos] != from.Node {
- Fatal("bad bookkeeping in liveness %v %d", Nconv(from.Node.(*Node), 0), pos)
+ Fatalf("bad bookkeeping in liveness %v %d", Nconv(from.Node.(*Node), 0), pos)
}
if ((from.Node).(*Node)).Addrtaken {
bvset(avarinit, pos)
@@ -655,7 +655,7 @@
return
}
if pos >= int32(len(vars)) || vars[pos] != to.Node {
- Fatal("bad bookkeeping in liveness %v %d", Nconv(to.Node.(*Node), 0), pos)
+ Fatalf("bad bookkeeping in liveness %v %d", Nconv(to.Node.(*Node), 0), pos)
}
if ((to.Node).(*Node)).Addrtaken {
if prog.As != obj.AVARKILL {
@@ -718,7 +718,7 @@
// Frees the liveness structure and all of its leaf data structures.
func freeliveness(lv *Liveness) {
if lv == nil {
- Fatal("freeliveness: cannot free nil")
+ Fatalf("freeliveness: cannot free nil")
}
}
@@ -890,7 +890,7 @@
// accounts for 40% of the 6g execution time.
func onebitwalktype1(t *Type, xoffset *int64, bv Bvec) {
if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 {
- Fatal("onebitwalktype1: invalid initial alignment, %v", t)
+ Fatalf("onebitwalktype1: invalid initial alignment, %v", t)
}
switch t.Etype {
@@ -919,7 +919,7 @@
TCHAN,
TMAP:
if *xoffset&int64(Widthptr-1) != 0 {
- Fatal("onebitwalktype1: invalid alignment, %v", t)
+ Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bvset(bv, int32(*xoffset/int64(Widthptr))) // pointer
*xoffset += t.Width
@@ -927,7 +927,7 @@
case TSTRING:
// struct { byte *str; intgo len; }
if *xoffset&int64(Widthptr-1) != 0 {
- Fatal("onebitwalktype1: invalid alignment, %v", t)
+ Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bvset(bv, int32(*xoffset/int64(Widthptr))) //pointer in first slot
*xoffset += t.Width
@@ -937,7 +937,7 @@
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
if *xoffset&int64(Widthptr-1) != 0 {
- Fatal("onebitwalktype1: invalid alignment, %v", t)
+ Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bvset(bv, int32(*xoffset/int64(Widthptr))) // pointer in first slot
bvset(bv, int32(*xoffset/int64(Widthptr)+1)) // pointer in second slot
@@ -947,12 +947,12 @@
// The value of t->bound is -1 for slices types and >=0 for
// for fixed array types. All other values are invalid.
if t.Bound < -1 {
- Fatal("onebitwalktype1: invalid bound, %v", t)
+ Fatalf("onebitwalktype1: invalid bound, %v", t)
}
if Isslice(t) {
// struct { byte *array; uintgo len; uintgo cap; }
if *xoffset&int64(Widthptr-1) != 0 {
- Fatal("onebitwalktype1: invalid TARRAY alignment, %v", t)
+ Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
}
bvset(bv, int32(*xoffset/int64(Widthptr))) // pointer in first slot (BitsPointer)
*xoffset += t.Width
@@ -975,7 +975,7 @@
*xoffset += t.Width - o
default:
- Fatal("onebitwalktype1: unexpected type, %v", t)
+ Fatalf("onebitwalktype1: unexpected type, %v", t)
}
}
@@ -1346,7 +1346,7 @@
if pos < 0 {
// the first block we encounter should have the ATEXT so
// at no point should pos ever be less than zero.
- Fatal("livenessepilogue")
+ Fatalf("livenessepilogue")
}
bvcopy(livein, bb.liveout)
@@ -1685,15 +1685,13 @@
for j = 0; j < len(lv.vars); j++ {
n = lv.vars[j]
if islive(n, args, locals) {
- tmp9 := printed
- printed++
- if tmp9 != 0 {
+ if printed != 0 {
fmt.Printf(",")
}
fmt.Printf("%v", n)
+ printed++
}
}
-
fmt.Printf("\n")
}
diff --git a/src/cmd/compile/internal/gc/popt.go b/src/cmd/compile/internal/gc/popt.go
index 4fc562c..0b3bde5 100644
--- a/src/cmd/compile/internal/gc/popt.go
+++ b/src/cmd/compile/internal/gc/popt.go
@@ -306,11 +306,11 @@
if p.To.Type == obj.TYPE_BRANCH {
if p.To.Val == nil {
- Fatal("pnil %v", p)
+ Fatalf("pnil %v", p)
}
f1 = p.To.Val.(*obj.Prog).Opt.(*Flow)
if f1 == nil {
- Fatal("fnil %v / %v", p, p.To.Val.(*obj.Prog))
+ Fatalf("fnil %v / %v", p, p.To.Val.(*obj.Prog))
}
if f1 == f {
//fatal("self loop %v", p);
@@ -380,7 +380,7 @@
for rpo1 < rpo2 {
t = idom[rpo2]
if t >= rpo2 {
- Fatal("bad idom")
+ Fatalf("bad idom")
}
rpo2 = t
}
@@ -435,7 +435,7 @@
d := postorder(g.Start, rpo2r, 0)
nr := int32(g.Num)
if d > nr {
- Fatal("too many reg nodes %d %d", d, nr)
+ Fatalf("too many reg nodes %d %d", d, nr)
}
nr = d
var r1 *Flow
@@ -605,7 +605,7 @@
for f := g.Start; f != nil; f = f.Link {
p := f.Prog
if p.From.Node != nil && ((p.From.Node).(*Node)).Opt() != nil && p.To.Node != nil && ((p.To.Node).(*Node)).Opt() != nil {
- Fatal("double node %v", p)
+ Fatalf("double node %v", p)
}
v = nil
n, _ = p.From.Node.(*Node)
@@ -655,7 +655,7 @@
fmt.Printf("drop write-only %v\n", v.node.Sym)
}
} else {
- Fatal("temp used and not set: %v", p)
+ Fatalf("temp used and not set: %v", p)
}
nkill++
continue
diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go
index f53e8ec..9301d87 100644
--- a/src/cmd/compile/internal/gc/racewalk.go
+++ b/src/cmd/compile/internal/gc/racewalk.go
@@ -116,7 +116,7 @@
}
setlineno(n)
if init == nil {
- Fatal("racewalk: bad init list")
+ Fatalf("racewalk: bad init list")
}
if init == &n.Ninit {
// If init == &n->ninit and n->ninit is non-nil,
@@ -136,7 +136,7 @@
switch n.Op {
default:
- Fatal("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
+ Fatalf("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
case OAS, OASWB, OAS2FUNC:
racewalknode(&n.Left, init, 1, 0)
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
index 26f05d9..dbfd674 100644
--- a/src/cmd/compile/internal/gc/range.go
+++ b/src/cmd/compile/internal/gc/range.go
@@ -165,7 +165,7 @@
var init *NodeList
switch t.Etype {
default:
- Fatal("walkrange")
+ Fatalf("walkrange")
// Lower n into runtime·memclr if possible, for
// fast zeroing of slices and arrays (issue 5373).
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
index 1ac4a03..f579ef8 100644
--- a/src/cmd/compile/internal/gc/reflect.go
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -192,7 +192,7 @@
field = append(field, ovf)
// link up fields
- bucket.Noalg = 1
+ bucket.Noalg = true
bucket.Local = t.Local
bucket.Type = field[0]
for n := int32(0); n < int32(len(field)-1); n++ {
@@ -232,7 +232,7 @@
field[7] = makefield("overflow", Types[TUNSAFEPTR])
h := typ(TSTRUCT)
- h.Noalg = 1
+ h.Noalg = true
h.Local = t.Local
h.Type = field[0]
for n := int32(0); n < int32(len(field)-1); n++ {
@@ -284,7 +284,7 @@
// build iterator struct holding the above fields
i := typ(TSTRUCT)
- i.Noalg = 1
+ i.Noalg = true
i.Type = field[0]
for n := int32(0); n < int32(len(field)-1); n++ {
field[n].Down = field[n+1]
@@ -364,13 +364,13 @@
var method *Sym
for f := mt.Xmethod; f != nil; f = f.Down {
if f.Etype != TFIELD {
- Fatal("methods: not field %v", f)
+ Fatalf("methods: not field %v", f)
}
if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 {
- Fatal("non-method on %v method %v %v\n", mt, f.Sym, f)
+ Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
}
if getthisx(f.Type).Type == nil {
- Fatal("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
+ Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
}
if f.Nointerface {
continue
@@ -401,7 +401,7 @@
a.name = method.Name
if !exportname(method.Name) {
if method.Pkg == nil {
- Fatal("methods: missing package")
+ Fatalf("methods: missing package")
}
a.pkg = method.Pkg
}
@@ -445,7 +445,7 @@
var last *Sig
for f := t.Type; f != nil; f = f.Down {
if f.Etype != TFIELD {
- Fatal("imethods: not field")
+ Fatalf("imethods: not field")
}
if f.Type.Etype != TFUNC || f.Sym == nil {
continue
@@ -455,7 +455,7 @@
a.name = method.Name
if !exportname(method.Name) {
if method.Pkg == nil {
- Fatal("imethods: missing package")
+ Fatalf("imethods: missing package")
}
a.pkg = method.Pkg
}
@@ -465,7 +465,7 @@
a.type_ = methodfunc(f.Type, nil)
if last != nil && sigcmp(last, a) >= 0 {
- Fatal("sigcmp vs sortinter %s %s", last.name, a.name)
+ Fatalf("sigcmp vs sortinter %s %s", last.name, a.name)
}
if last == nil {
all = a
@@ -707,7 +707,7 @@
ret = true
case TFIELD:
- Fatal("haspointers: unexpected type, %v", t)
+ Fatalf("haspointers: unexpected type, %v", t)
}
t.Haspointers = 1 + uint8(obj.Bool2int(ret))
@@ -758,7 +758,7 @@
return lastPtrField.Width + typeptrdata(lastPtrField.Type)
default:
- Fatal("typeptrdata: unexpected type, %v", t)
+ Fatalf("typeptrdata: unexpected type, %v", t)
return 0
}
}
@@ -772,7 +772,7 @@
func dcommontype(s *Sym, ot int, t *Type) int {
if ot != 0 {
- Fatal("dcommontype %d", ot)
+ Fatalf("dcommontype %d", ot)
}
sizeofAlg := 2 * Widthptr
@@ -794,20 +794,8 @@
sptr = weaktypesym(tptr)
}
- // All (non-reflect-allocated) Types share the same zero object.
- // Each place in the compiler where a pointer to the zero object
- // might be returned by a runtime call (map access return value,
- // 2-arg type cast) declares the size of the zerovalue it needs.
- // The linker magically takes the max of all the sizes.
- zero := Pkglookup("zerovalue", Runtimepkg)
-
gcsym, useGCProg, ptrdata := dgcsym(t)
- // We use size 0 here so we get the pointer to the zero value,
- // but don't allocate space for the zero value unless we need it.
- // TODO: how do we get this symbol into bss? We really want
- // a read-only bss, but I don't think such a thing exists.
-
// ../../pkg/reflect/type.go:/^type.commonType
// actual type structure
// type commonType struct {
@@ -823,7 +811,6 @@
// string *string
// *extraType
// ptrToThis *Type
- // zero unsafe.Pointer
// }
ot = duintptr(s, ot, uint64(t.Width))
ot = duintptr(s, ot, uint64(ptrdata))
@@ -838,7 +825,7 @@
i = 1
}
if i&(i-1) != 0 {
- Fatal("invalid alignment %d for %v", t.Align, t)
+ Fatalf("invalid alignment %d for %v", t.Align, t)
}
ot = duint8(s, ot, t.Align) // align
ot = duint8(s, ot, t.Align) // fieldAlign
@@ -876,7 +863,6 @@
ot += Widthptr
ot = dsymptr(s, ot, sptr, 0) // ptrto type
- ot = dsymptr(s, ot, zero, 0) // ptr to zero value
return ot
}
@@ -918,7 +904,7 @@
func typenamesym(t *Type) *Sym {
if t == nil || (Isptr[t.Etype] && t.Type == nil) || isideal(t) {
- Fatal("typename %v", t)
+ Fatalf("typename %v", t)
}
s := typesym(t)
if s.Def == nil {
@@ -991,7 +977,7 @@
case TARRAY:
if Isslice(t) {
- Fatal("slice can't be a map key: %v", t)
+ Fatalf("slice can't be a map key: %v", t)
}
return isreflexive(t.Type)
@@ -1005,7 +991,7 @@
return true
default:
- Fatal("bad type for map key: %v", t)
+ Fatalf("bad type for map key: %v", t)
return false
}
}
@@ -1019,7 +1005,7 @@
}
if isideal(t) {
- Fatal("dtypesym %v", t)
+ Fatalf("dtypesym %v", t)
}
s := typesym(t)
@@ -1059,7 +1045,7 @@
switch t.Etype {
default:
ot = dcommontype(s, ot, t)
- xt = ot - 3*Widthptr
+ xt = ot - 2*Widthptr
case TARRAY:
if t.Bound >= 0 {
@@ -1071,7 +1057,7 @@
t2.Bound = -1 // slice
s2 := dtypesym(t2)
ot = dcommontype(s, ot, t)
- xt = ot - 3*Widthptr
+ xt = ot - 2*Widthptr
ot = dsymptr(s, ot, s1, 0)
ot = dsymptr(s, ot, s2, 0)
ot = duintptr(s, ot, uint64(t.Bound))
@@ -1080,7 +1066,7 @@
s1 := dtypesym(t.Type)
ot = dcommontype(s, ot, t)
- xt = ot - 3*Widthptr
+ xt = ot - 2*Widthptr
ot = dsymptr(s, ot, s1, 0)
}
@@ -1089,7 +1075,7 @@
s1 := dtypesym(t.Type)
ot = dcommontype(s, ot, t)
- xt = ot - 3*Widthptr
+ xt = ot - 2*Widthptr
ot = dsymptr(s, ot, s1, 0)
ot = duintptr(s, ot, uint64(t.Chan))
@@ -1108,7 +1094,7 @@
}
ot = dcommontype(s, ot, t)
- xt = ot - 3*Widthptr
+ xt = ot - 2*Widthptr
ot = duint8(s, ot, uint8(obj.Bool2int(isddd)))
// two slice headers: in and out.
@@ -1147,7 +1133,7 @@
// ../../runtime/type.go:/InterfaceType
ot = dcommontype(s, ot, t)
- xt = ot - 3*Widthptr
+ xt = ot - 2*Widthptr
ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
ot = duintxx(s, ot, uint64(n), Widthint)
ot = duintxx(s, ot, uint64(n), Widthint)
@@ -1167,7 +1153,7 @@
s3 := dtypesym(mapbucket(t))
s4 := dtypesym(hmap(t))
ot = dcommontype(s, ot, t)
- xt = ot - 3*Widthptr
+ xt = ot - 2*Widthptr
ot = dsymptr(s, ot, s1, 0)
ot = dsymptr(s, ot, s2, 0)
ot = dsymptr(s, ot, s3, 0)
@@ -1203,7 +1189,7 @@
s1 := dtypesym(t.Type)
ot = dcommontype(s, ot, t)
- xt = ot - 3*Widthptr
+ xt = ot - 2*Widthptr
ot = dsymptr(s, ot, s1, 0)
// ../../runtime/type.go:/StructType
@@ -1217,7 +1203,7 @@
}
ot = dcommontype(s, ot, t)
- xt = ot - 3*Widthptr
+ xt = ot - 2*Widthptr
ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
ot = duintxx(s, ot, uint64(n), Widthint)
ot = duintxx(s, ot, uint64(n), Widthint)
@@ -1307,7 +1293,7 @@
// generate import strings for imported packages
for _, p := range pkgs {
- if p.Direct != 0 {
+ if p.Direct {
dimportpath(p)
}
}
@@ -1505,7 +1491,7 @@
func dgcprog(t *Type) (*Sym, int64) {
dowidth(t)
if t.Width == BADWIDTH {
- Fatal("dgcprog: %v badwidth", t)
+ Fatalf("dgcprog: %v badwidth", t)
}
sym := typesymprefix(".gcprog", t)
var p GCProg
@@ -1514,7 +1500,7 @@
offset := p.w.BitIndex() * int64(Widthptr)
p.end()
if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width {
- Fatal("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
+ Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
}
return sym, offset
}
@@ -1561,7 +1547,7 @@
}
switch t.Etype {
default:
- Fatal("GCProg.emit: unexpected type %v", t)
+ Fatalf("GCProg.emit: unexpected type %v", t)
case TSTRING:
p.w.Ptr(offset / int64(Widthptr))
@@ -1577,7 +1563,7 @@
}
if t.Bound == 0 {
// should have been handled by haspointers check above
- Fatal("GCProg.emit: empty array")
+ Fatalf("GCProg.emit: empty array")
}
// Flatten array-of-array-of-array to just a big array by multiplying counts.
diff --git a/src/cmd/compile/internal/gc/reg.go b/src/cmd/compile/internal/gc/reg.go
index 0fa0535..b3e9621 100644
--- a/src/cmd/compile/internal/gc/reg.go
+++ b/src/cmd/compile/internal/gc/reg.go
@@ -351,7 +351,7 @@
}
node = node.Orig
if node.Orig != node {
- Fatal("%v: bad node", Ctxt.Dconv(a))
+ Fatalf("%v: bad node", Ctxt.Dconv(a))
}
if node.Sym == nil || node.Sym.Name[0] == '.' {
return zbits
@@ -360,7 +360,7 @@
o := a.Offset
w := a.Width
if w < 0 {
- Fatal("bad width %d for %v", w, Ctxt.Dconv(a))
+ Fatalf("bad width %d for %v", w, Ctxt.Dconv(a))
}
flag := 0
@@ -396,7 +396,7 @@
if nvar >= NVAR {
if Debug['w'] > 1 && node != nil {
- Fatal("variable not optimized: %v", Nconv(node, obj.FmtSharp))
+ Fatalf("variable not optimized: %v", Nconv(node, obj.FmtSharp))
}
if Debug['v'] > 0 {
Warn("variable not optimized: %v", Nconv(node, obj.FmtSharp))
@@ -486,7 +486,7 @@
//
// Disable registerization for results if using defer, because the deferred func
// might recover and return, causing the current values to be used.
- if node.Class == PEXTERN || (Hasdefer != 0 && node.Class == PPARAMOUT) {
+ if node.Class == PEXTERN || (hasdefer && node.Class == PPARAMOUT) {
v.addr = 1
}
@@ -655,7 +655,7 @@
r.regno = 0
switch v.etype {
default:
- Fatal("unknown etype %d/%v", Bitno(b), Econv(int(v.etype), 0))
+ Fatalf("unknown etype %d/%v", Bitno(b), Econv(int(v.etype), 0))
case TINT8,
TUINT8,
@@ -1120,7 +1120,7 @@
// Currently we never generate three register forms.
// If we do, this will need to change.
if p.From3Type() != obj.TYPE_NONE {
- Fatal("regopt not implemented for from3")
+ Fatalf("regopt not implemented for from3")
}
bit = mkvar(f, &p.To)
@@ -1289,12 +1289,12 @@
for z := 0; z < BITS; z++ {
bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
}
- if bany(&bit) && f.Refset == 0 {
+ if bany(&bit) && !f.Refset {
// should never happen - all variables are preset
if Debug['w'] != 0 {
fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), &bit)
}
- f.Refset = 1
+ f.Refset = true
}
}
@@ -1309,11 +1309,11 @@
for z := 0; z < BITS; z++ {
bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
}
- if bany(&bit) && f.Refset == 0 {
+ if bany(&bit) && !f.Refset {
if Debug['w'] != 0 {
fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), &bit)
}
- f.Refset = 1
+ f.Refset = true
Thearch.Excise(f)
}
@@ -1472,7 +1472,7 @@
}
}
- Fatal("bad in bnum")
+ Fatalf("bad in bnum")
return 0
}
@@ -1499,10 +1499,10 @@
}
// Bitno reports the lowest index of a 1 bit in b.
-// It calls Fatal if there is no 1 bit.
+// It calls Fatalf if there is no 1 bit.
func Bitno(b uint64) int {
if b == 0 {
- Fatal("bad in bitno")
+ Fatalf("bad in bitno")
}
n := 0
if b&(1<<32-1) == 0 {
diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go
index db20778..83f53c1 100644
--- a/src/cmd/compile/internal/gc/select.go
+++ b/src/cmd/compile/internal/gc/select.go
@@ -20,7 +20,7 @@
ncase = l.N
setlineno(ncase)
if ncase.Op != OXCASE {
- Fatal("typecheckselect %v", Oconv(int(ncase.Op), 0))
+ Fatalf("typecheckselect %v", Oconv(int(ncase.Op), 0))
}
if ncase.List == nil {
@@ -90,7 +90,7 @@
func walkselect(sel *Node) {
if sel.List == nil && sel.Xoffset != 0 {
- Fatal("double walkselect") // already rewrote
+ Fatalf("double walkselect") // already rewrote
}
lno := int(setlineno(sel))
@@ -122,7 +122,7 @@
var ch *Node
switch n.Op {
default:
- Fatal("select %v", Oconv(int(n.Op), 0))
+ Fatalf("select %v", Oconv(int(n.Op), 0))
// ok already
case OSEND:
@@ -218,7 +218,7 @@
r.Ninit = cas.Ninit
switch n.Op {
default:
- Fatal("select %v", Oconv(int(n.Op), 0))
+ Fatalf("select %v", Oconv(int(n.Op), 0))
// if selectnbsend(c, v) { body } else { default body }
case OSEND:
@@ -284,7 +284,7 @@
} else {
switch n.Op {
default:
- Fatal("select %v", Oconv(int(n.Op), 0))
+ Fatalf("select %v", Oconv(int(n.Op), 0))
// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
case OSEND:
@@ -335,7 +335,7 @@
sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("nrelease")), typenod(Types[TINT32])))
sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("waitlink")), typenod(Ptrto(Types[TUINT8]))))
typecheck(&sudog, Etype)
- sudog.Type.Noalg = 1
+ sudog.Type.Noalg = true
sudog.Type.Local = true
scase := Nod(OTSTRUCT, nil, nil)
@@ -347,7 +347,7 @@
scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("receivedp")), typenod(Ptrto(Types[TUINT8]))))
scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
typecheck(&scase, Etype)
- scase.Type.Noalg = 1
+ scase.Type.Noalg = true
scase.Type.Local = true
sel := Nod(OTSTRUCT, nil, nil)
@@ -362,7 +362,7 @@
arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Types[TUINT16]))
sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorderarr")), arr))
typecheck(&sel, Etype)
- sel.Type.Noalg = 1
+ sel.Type.Noalg = true
sel.Type.Local = true
return sel.Type
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
index ce95839..a6eeaf0 100644
--- a/src/cmd/compile/internal/gc/sinit.go
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -19,7 +19,7 @@
)
var (
- initlist *NodeList
+ initlist []*Node
initplans map[*Node]*InitPlan
inittemps = make(map[*Node]*Node)
)
@@ -47,15 +47,12 @@
}
switch n.Class {
case PEXTERN, PFUNC:
- break
-
default:
if isblank(n) && n.Name.Curfn == nil && n.Name.Defn != nil && n.Name.Defn.Initorder == InitNotStarted {
// blank names initialization is part of init() but not
// when they are inside a function.
break
}
-
return
}
@@ -72,90 +69,43 @@
// Conversely, if there exists an initialization cycle involving
// a variable in the program, the tree walk will reach a cycle
// involving that variable.
- var nv *Node
if n.Class != PFUNC {
- nv = n
- goto foundinitloop
+ foundinitloop(n, n)
}
- for l := initlist; l.N != n; l = l.Next {
- if l.N.Class != PFUNC {
- nv = l.N
- goto foundinitloop
+ for i := len(initlist) - 1; i >= 0; i-- {
+ x := initlist[i]
+ if x == n {
+ break
+ }
+ if x.Class != PFUNC {
+ foundinitloop(n, x)
}
}
// The loop involves only functions, ok.
return
-
- // if there have already been errors printed,
- // those errors probably confused us and
- // there might not be a loop. let the user
- // fix those first.
- foundinitloop:
- Flusherrors()
-
- if nerrors > 0 {
- errorexit()
- }
-
- // There is a loop involving nv. We know about
- // n and initlist = n1 <- ... <- nv <- ... <- n <- ...
- fmt.Printf("%v: initialization loop:\n", nv.Line())
-
- // Build back pointers in initlist.
- for l := initlist; l != nil; l = l.Next {
- if l.Next != nil {
- l.Next.End = l
- }
- }
-
- // Print nv -> ... -> n1 -> n.
- var l *NodeList
- for l = initlist; l.N != nv; l = l.Next {
- }
- for ; l != nil; l = l.End {
- fmt.Printf("\t%v %v refers to\n", l.N.Line(), l.N.Sym)
- }
-
- // Print n -> ... -> nv.
- for l = initlist; l.N != n; l = l.Next {
- }
- for ; l.N != nv; l = l.End {
- fmt.Printf("\t%v %v refers to\n", l.N.Line(), l.N.Sym)
- }
- fmt.Printf("\t%v %v\n", nv.Line(), nv.Sym)
- errorexit()
}
// reached a new unvisited node.
n.Initorder = InitPending
-
- l := new(NodeList)
- if l == nil {
- Flusherrors()
- Yyerror("out of memory")
- errorexit()
- }
-
- l.Next = initlist
- l.N = n
- l.End = nil
- initlist = l
+ initlist = append(initlist, n)
// make sure that everything n depends on is initialized.
// n->defn is an assignment to n
if defn := n.Name.Defn; defn != nil {
switch defn.Op {
default:
- goto bad
+ Dump("defn", defn)
+ Fatalf("init1: bad defn")
case ODCLFUNC:
init2list(defn.Nbody, out)
case OAS:
if defn.Left != n {
- goto bad
+ Dump("defn", defn)
+ Fatalf("init1: bad defn")
}
if isblank(defn.Left) && candiscard(defn.Right) {
defn.Op = OEMPTY
@@ -190,18 +140,51 @@
}
}
- l = initlist
- initlist = l.Next
- if l.N != n {
- Fatal("bad initlist")
+ last := len(initlist) - 1
+ if initlist[last] != n {
+ Fatalf("bad initlist %v", initlist)
}
+ initlist[last] = nil // allow GC
+ initlist = initlist[:last]
n.Initorder = InitDone
return
+}
-bad:
- Dump("defn", n.Name.Defn)
- Fatal("init1: bad defn")
+// foundinitloop prints an init loop error and exits.
+func foundinitloop(node, visited *Node) {
+ // If there have already been errors printed,
+ // those errors probably confused us and
+ // there might not be a loop. Let the user
+ // fix those first.
+ Flusherrors()
+ if nerrors > 0 {
+ errorexit()
+ }
+
+ // Find the index of node and visited in the initlist.
+ var nodeindex, visitedindex int
+ for ; initlist[nodeindex] != node; nodeindex++ {
+ }
+ for ; initlist[visitedindex] != visited; visitedindex++ {
+ }
+
+ // There is a loop involving visited. We know about node and
+ // initlist = n1 <- ... <- visited <- ... <- node <- ...
+ fmt.Printf("%v: initialization loop:\n", visited.Line())
+
+ // Print visited -> ... -> n1 -> node.
+ for _, n := range initlist[visitedindex:] {
+ fmt.Printf("\t%v %v refers to\n", n.Line(), n.Sym)
+ }
+
+ // Print node -> ... -> visited.
+ for _, n := range initlist[nodeindex:visitedindex] {
+ fmt.Printf("\t%v %v refers to\n", n.Line(), n.Sym)
+ }
+
+ fmt.Printf("\t%v %v\n", visited.Line(), visited.Sym)
+ errorexit()
}
// recurse over n, doing init1 everywhere.
@@ -211,7 +194,7 @@
}
if n.Op == ONAME && n.Ninit != nil {
- Fatal("name %v with ninit: %v\n", n.Sym, Nconv(n, obj.FmtSign))
+ Fatalf("name %v with ninit: %v\n", n.Sym, Nconv(n, obj.FmtSign))
}
init1(n, out)
@@ -271,7 +254,7 @@
*/
func staticinit(n *Node, out **NodeList) bool {
if n.Op != ONAME || n.Class != PEXTERN || n.Name.Defn == nil || n.Name.Defn.Op != OAS {
- Fatal("staticinit")
+ Fatalf("staticinit")
}
lineno = n.Lineno
@@ -607,7 +590,7 @@
for nl := n.List; nl != nil; nl = nl.Next {
r = nl.N
if r.Op != OKEY {
- Fatal("structlit: rhs not OKEY: %v", r)
+ Fatalf("structlit: rhs not OKEY: %v", r)
}
index = r.Left
value = r.Right
@@ -654,7 +637,7 @@
if pass == 1 {
walkexpr(&a, init) // add any assignments in r to top
if a.Op != OAS {
- Fatal("structlit: not as")
+ Fatalf("structlit: not as")
}
a.Dodata = 2
} else {
@@ -675,7 +658,7 @@
for l := n.List; l != nil; l = l.Next {
r = l.N
if r.Op != OKEY {
- Fatal("arraylit: rhs not OKEY: %v", r)
+ Fatalf("arraylit: rhs not OKEY: %v", r)
}
index = r.Left
value = r.Right
@@ -722,7 +705,7 @@
if pass == 1 {
walkexpr(&a, init)
if a.Op != OAS {
- Fatal("arraylit: not as")
+ Fatalf("arraylit: not as")
}
a.Dodata = 2
} else {
@@ -851,7 +834,7 @@
for l := n.List; l != nil; l = l.Next {
r = l.N
if r.Op != OKEY {
- Fatal("slicelit: rhs not OKEY: %v", r)
+ Fatalf("slicelit: rhs not OKEY: %v", r)
}
index = r.Left
value = r.Right
@@ -909,7 +892,7 @@
r = l.N
if r.Op != OKEY {
- Fatal("maplit: rhs not OKEY: %v", r)
+ Fatalf("maplit: rhs not OKEY: %v", r)
}
index = r.Left
value = r.Right
@@ -960,7 +943,7 @@
r = l.N
if r.Op != OKEY {
- Fatal("maplit: rhs not OKEY: %v", r)
+ Fatalf("maplit: rhs not OKEY: %v", r)
}
index = r.Left
value = r.Right
@@ -1031,7 +1014,7 @@
r = l.N
if r.Op != OKEY {
- Fatal("maplit: rhs not OKEY: %v", r)
+ Fatalf("maplit: rhs not OKEY: %v", r)
}
index = r.Left
value = r.Right
@@ -1083,11 +1066,11 @@
t := n.Type
switch n.Op {
default:
- Fatal("anylit: not lit")
+ Fatalf("anylit: not lit")
case OPTRLIT:
if !Isptr[t.Etype] {
- Fatal("anylit: not ptr")
+ Fatalf("anylit: not ptr")
}
var r *Node
@@ -1113,7 +1096,7 @@
case OSTRUCTLIT:
if t.Etype != TSTRUCT {
- Fatal("anylit: not struct")
+ Fatalf("anylit: not struct")
}
if simplename(var_) && count(n.List) > 4 {
@@ -1153,7 +1136,7 @@
case OARRAYLIT:
if t.Etype != TARRAY {
- Fatal("anylit: not array")
+ Fatalf("anylit: not array")
}
if t.Bound < 0 {
slicelit(ctxt, n, var_, init)
@@ -1197,7 +1180,7 @@
case OMAPLIT:
if t.Etype != TMAP {
- Fatal("anylit: not map")
+ Fatalf("anylit: not map")
}
maplit(ctxt, n, var_, init)
}
@@ -1305,14 +1288,14 @@
initplans[n] = p
switch n.Op {
default:
- Fatal("initplan")
+ Fatalf("initplan")
case OARRAYLIT:
var a *Node
for l := n.List; l != nil; l = l.Next {
a = l.N
if a.Op != OKEY || !Smallintconst(a.Left) {
- Fatal("initplan arraylit")
+ Fatalf("initplan arraylit")
}
addvalue(p, n.Type.Type.Width*Mpgetfix(a.Left.Val().U.(*Mpint)), nil, a.Right)
}
@@ -1322,7 +1305,7 @@
for l := n.List; l != nil; l = l.Next {
a = l.N
if a.Op != OKEY || a.Left.Type == nil {
- Fatal("initplan structlit")
+ Fatalf("initplan structlit")
}
addvalue(p, a.Left.Type.Width, nil, a.Right)
}
@@ -1332,7 +1315,7 @@
for l := n.List; l != nil; l = l.Next {
a = l.N
if a.Op != OKEY {
- Fatal("initplan maplit")
+ Fatalf("initplan maplit")
}
addvalue(p, -1, a.Left, a.Right)
}
@@ -1378,7 +1361,7 @@
switch n.Val().Ctype() {
default:
Dump("unexpected literal", n)
- Fatal("iszero")
+ Fatalf("iszero")
case CTNIL:
return true
@@ -1557,7 +1540,7 @@
no:
if n.Dodata == 2 {
Dump("\ngen_as_init", n)
- Fatal("gen_as_init couldnt make data statement")
+ Fatalf("gen_as_init couldnt make data statement")
}
return false
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index a554a1d..96d6204 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -455,7 +455,7 @@
return
}
if compiling_runtime != 0 {
- Fatal("%v escapes to heap, not allowed in runtime.", n)
+ Fatalf("%v escapes to heap, not allowed in runtime.", n)
}
// TODO: the old pass hides the details of PHEAP
@@ -1783,7 +1783,7 @@
// Rewrite to an OCALLFUNC: (p.f)(...) becomes (f)(p, ...)
// Take care not to modify the original AST.
if left.Op != ODOTMETH {
- Fatal("OCALLMETH: n.Left not an ODOTMETH: %v", left)
+ Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", left)
}
newLeft := *left.Right
@@ -2675,7 +2675,7 @@
if f.StaticData != nil {
for _, n := range f.StaticData.([]*Node) {
if !gen_as_init(n, false) {
- Fatal("non-static data marked as static: %v\n\n", n, f)
+ Fatalf("non-static data marked as static: %v\n\n", n, f)
}
}
}
@@ -3468,7 +3468,7 @@
}
case ssa.BlockExit:
case ssa.BlockRet:
- if Hasdefer != 0 {
+ if hasdefer {
s.deferReturn()
}
Prog(obj.ARET)
@@ -3780,7 +3780,7 @@
func (e *ssaExport) Fatalf(msg string, args ...interface{}) {
// If e was marked as unimplemented, anything could happen. Ignore.
if !e.unimplemented {
- Fatal(msg, args...)
+ Fatalf(msg, args...)
}
}
@@ -3788,7 +3788,7 @@
// It will be removed once SSA work is complete.
func (e *ssaExport) Unimplementedf(msg string, args ...interface{}) {
if e.mustImplement {
- Fatal(msg, args...)
+ Fatalf(msg, args...)
}
const alwaysLog = false // enable to calculate top unimplemented features
if !e.unimplemented && (e.log || alwaysLog) {
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index 866d8e1..df5e398 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -181,7 +181,7 @@
}
}
-func Fatal(fmt_ string, args ...interface{}) {
+func Fatalf(fmt_ string, args ...interface{}) {
Flusherrors()
fmt.Printf("%v: internal compiler error: ", Ctxt.Line(int(lineno)))
@@ -339,7 +339,7 @@
s1.Block = s.Block
if s1.Def.Name == nil {
Dump("s1def", s1.Def)
- Fatal("missing Name")
+ Fatalf("missing Name")
}
s1.Def.Name.Pack = pack
s1.Origpkg = opkg
@@ -414,7 +414,7 @@
// the last field, total gives the size of the enclosing struct.
func ispaddedfield(t *Type, total int64) bool {
if t.Etype != TFIELD {
- Fatal("ispaddedfield called non-field %v", t)
+ Fatalf("ispaddedfield called non-field %v", t)
}
if t.Down == nil {
return t.Width+t.Type.Width != total
@@ -426,10 +426,10 @@
if bad != nil {
*bad = nil
}
- if t.Broke != 0 {
+ if t.Broke {
return AMEM
}
- if t.Noalg != 0 {
+ if t.Noalg {
return ANOEQ
}
@@ -530,7 +530,7 @@
return ret
}
- Fatal("algtype1: unexpected type %v", t)
+ Fatalf("algtype1: unexpected type %v", t)
return 0
}
@@ -665,12 +665,7 @@
i++
}
sort.Sort(methcmp(a[:i]))
- for {
- tmp11 := i
- i--
- if tmp11 <= 0 {
- break
- }
+ for i--; i >= 0; i-- {
a[i].Down = f
f = a[i]
}
@@ -709,7 +704,7 @@
n.Type = t
if Isfloat[t.Etype] {
- Fatal("nodconst: bad type %v", t)
+ Fatalf("nodconst: bad type %v", t)
}
}
@@ -775,7 +770,7 @@
}
if m.Name != nil && n.Op != ODCLFIELD {
Dump("treecopy", n)
- Fatal("treecopy Name")
+ Fatalf("treecopy Name")
}
case ONONAME:
@@ -938,7 +933,7 @@
return TFLOAT64
}
- Fatal("cplxsubtype: %v\n", Econv(int(et), 0))
+ Fatalf("cplxsubtype: %v\n", Econv(int(et), 0))
return 0
}
@@ -1010,7 +1005,7 @@
t2 = t2.Type
for ; t1 != nil && t2 != nil; t1, t2 = t1.Down, t2.Down {
if t1.Etype != TFIELD || t2.Etype != TFIELD {
- Fatal("struct/interface missing field: %v %v", t1, t2)
+ Fatalf("struct/interface missing field: %v %v", t1, t2)
}
if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
return false
@@ -1028,7 +1023,7 @@
t2 = t2.Type
for ; t1 != nil && t2 != nil; t1, t2 = t1.Down, t2.Down {
if t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
- Fatal("func missing struct: %v %v", t1, t2)
+ Fatalf("func missing struct: %v %v", t1, t2)
}
// Loop over fields in structs, ignoring argument names.
@@ -1036,7 +1031,7 @@
tb := t2.Type
for ; ta != nil && tb != nil; ta, tb = ta.Down, tb.Down {
if ta.Etype != TFIELD || tb.Etype != TFIELD {
- Fatal("func struct missing field: %v %v", ta, tb)
+ Fatalf("func struct missing field: %v %v", ta, tb)
}
if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
return false
@@ -1138,7 +1133,7 @@
}
// we'll have complained about this method anyway, suppress spurious messages.
- if have != nil && have.Sym == missing.Sym && (have.Type.Broke != 0 || missing.Type.Broke != 0) {
+ if have != nil && have.Sym == missing.Sym && (have.Type.Broke || missing.Type.Broke) {
return OCONVIFACE
}
@@ -1322,7 +1317,7 @@
// Convert node n for assignment to type t.
func assignconvfn(n *Node, t *Type, context func() string) *Node {
- if n == nil || n.Type == nil || n.Type.Broke != 0 {
+ if n == nil || n.Type == nil || n.Type.Broke {
return n
}
@@ -1378,7 +1373,7 @@
}
substAny(&n.Type, &types)
if len(types) > 0 {
- Fatal("substArgTypes: too many argument types")
+ Fatalf("substArgTypes: too many argument types")
}
}
@@ -1390,9 +1385,9 @@
if t == nil {
return
}
- if t.Etype == TANY && t.Copyany != 0 {
+ if t.Etype == TANY && t.Copyany {
if len(*types) == 0 {
- Fatal("substArgTypes: not enough argument types")
+ Fatalf("substArgTypes: not enough argument types")
}
*tp = (*types)[0]
*types = (*types)[1:]
@@ -1491,7 +1486,7 @@
case TANY:
nt = shallow(t)
- nt.Copyany = 1
+ nt.Copyany = true
case TPTR32, TPTR64, TCHAN, TARRAY:
nt = shallow(t)
@@ -1526,7 +1521,7 @@
func syslook(name string, copy int) *Node {
s := Pkglookup(name, Runtimepkg)
if s == nil || s.Def == nil {
- Fatal("syslook: can't find runtime.%s", name)
+ Fatalf("syslook: can't find runtime.%s", name)
}
if copy == 0 {
@@ -1600,7 +1595,7 @@
// The returned struct must not be modified.
func Ptrto(t *Type) *Type {
if Tptr == 0 {
- Fatal("ptrto: no tptr")
+ Fatalf("ptrto: no tptr")
}
// Reduce allocations by pre-creating common cases.
if !initPtrtoDone {
@@ -1760,14 +1755,14 @@
}
if t.Etype != TFIELD {
- Fatal("structfirst: not field %v", t)
+ Fatalf("structfirst: not field %v", t)
}
s.T = t
return t
bad:
- Fatal("structfirst: not struct %v", n)
+ Fatalf("structfirst: not struct %v", n)
return nil
}
@@ -1780,7 +1775,7 @@
}
if t.Etype != TFIELD {
- Fatal("structnext: not struct %v", n)
+ Fatalf("structnext: not struct %v", n)
return nil
}
@@ -1814,7 +1809,7 @@
return fp
bad:
- Fatal("funcfirst: not func %v", t)
+ Fatalf("funcfirst: not func %v", t)
return nil
}
@@ -1830,21 +1825,21 @@
func getthis(t *Type) **Type {
if t.Etype != TFUNC {
- Fatal("getthis: not a func %v", t)
+ Fatalf("getthis: not a func %v", t)
}
return &t.Type
}
func Getoutarg(t *Type) **Type {
if t.Etype != TFUNC {
- Fatal("getoutarg: not a func %v", t)
+ Fatalf("getoutarg: not a func %v", t)
}
return &t.Type.Down
}
func getinarg(t *Type) **Type {
if t.Etype != TFUNC {
- Fatal("getinarg: not a func %v", t)
+ Fatalf("getinarg: not a func %v", t)
}
return &t.Type.Down.Down
}
@@ -1878,7 +1873,7 @@
case OGE:
return OLT
}
- Fatal("brcom: no com for %v\n", Oconv(a, 0))
+ Fatalf("brcom: no com for %v\n", Oconv(a, 0))
return a
}
@@ -1899,7 +1894,7 @@
case OGE:
return OLE
}
- Fatal("brrev: no rev for %v\n", Oconv(a, 0))
+ Fatalf("brrev: no rev for %v\n", Oconv(a, 0))
return a
}
@@ -1961,7 +1956,7 @@
// make a copy; must not be used as an lvalue
if islvalue(n) {
- Fatal("missing lvalue case in safeexpr: %v", n)
+ Fatalf("missing lvalue case in safeexpr: %v", n)
}
return cheapexpr(n, init)
}
@@ -2005,11 +2000,11 @@
dowidth(t)
w := t.Argwid
if w >= Thearch.MAXWIDTH {
- Fatal("bad argwid %v", t)
+ Fatalf("bad argwid %v", t)
}
w += int64(extra)
if w >= Thearch.MAXWIDTH {
- Fatal("bad argwid %d + %v", extra, t)
+ Fatalf("bad argwid %d + %v", extra, t)
}
if w > Maxarg {
Maxarg = w
@@ -2499,7 +2494,7 @@
typechecklist(fn.Nbody, Etop)
inlcalls(fn)
- escAnalyze(list1(fn), false)
+ escAnalyze([]*Node{fn}, false)
Curfn = nil
funccompile(fn)
@@ -2526,7 +2521,7 @@
a := algtype1(t, nil)
switch a {
case AMEM:
- Fatal("hashfor with AMEM type")
+ Fatalf("hashfor with AMEM type")
case AINTER:
sym = Pkglookup("interhash", Runtimepkg)
@@ -2601,11 +2596,11 @@
// so t must be either an array or a struct.
switch t.Etype {
default:
- Fatal("genhash %v", t)
+ Fatalf("genhash %v", t)
case TARRAY:
if Isslice(t) {
- Fatal("genhash %v", t)
+ Fatalf("genhash %v", t)
}
// An array of pure memory would be handled by the
@@ -2852,11 +2847,11 @@
// so t must be either an array or a struct.
switch t.Etype {
default:
- Fatal("geneq %v", t)
+ Fatalf("geneq %v", t)
case TARRAY:
if Isslice(t) {
- Fatal("geneq %v", t)
+ Fatalf("geneq %v", t)
}
// An array of pure memory would be handled by the
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
index f34b1c6..a0e0c41 100644
--- a/src/cmd/compile/internal/gc/swt.go
+++ b/src/cmd/compile/internal/gc/swt.go
@@ -153,9 +153,9 @@
// reset to original type
ll.N = n.Left.Right
case ll.N.Type.Etype != TINTER && t.Etype == TINTER && !implements(ll.N.Type, t, &missing, &have, &ptr):
- if have != nil && missing.Broke == 0 && have.Broke == 0 {
+ if have != nil && !missing.Broke && !have.Broke {
Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (wrong type for %v method)\n\thave %v%v\n\twant %v%v", Nconv(n.Left.Right, obj.FmtLong), ll.N.Type, missing.Sym, have.Sym, Tconv(have.Type, obj.FmtShort), missing.Sym, Tconv(missing.Type, obj.FmtShort))
- } else if missing.Broke == 0 {
+ } else if !missing.Broke {
Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (missing %v method)", Nconv(n.Left.Right, obj.FmtLong), ll.N.Type, missing.Sym)
}
}
@@ -348,7 +348,7 @@
n := l.N
setlineno(n)
if n.Op != OXCASE {
- Fatal("casebody %v", Oconv(int(n.Op), 0))
+ Fatalf("casebody %v", Oconv(int(n.Op), 0))
}
n.Op = OCASE
needvar := count(n.List) != 1 || n.List.N.Op == OLITERAL
@@ -679,7 +679,7 @@
for _, c := range cc {
n := c.node
if c.typ != caseKindTypeConst {
- Fatal("typeSwitch walkCases")
+ Fatalf("typeSwitch walkCases")
}
a := Nod(OIF, nil, nil)
a.Left = Nod(OEQ, s.hashname, Nodintconst(int64(c.hash)))
diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go
index 7f03a4e..5081ea0 100644
--- a/src/cmd/compile/internal/gc/syntax.go
+++ b/src/cmd/compile/internal/gc/syntax.go
@@ -81,7 +81,7 @@
if n.hasVal == -1 {
Debug['h'] = 1
Dump("have Opt", n)
- Fatal("have Opt")
+ Fatalf("have Opt")
}
n.hasVal = +1
n.E = v.U
@@ -104,7 +104,7 @@
if n.hasVal == +1 {
Debug['h'] = 1
Dump("have Val", n)
- Fatal("have Val")
+ Fatalf("have Val")
}
n.hasVal = -1
n.E = x
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index befe3b2..314c3a9 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -89,37 +89,30 @@
return fmt.Sprintf("etype=%d", et)
}
-/*
- * sprint_depchain prints a dependency chain
- * of nodes into fmt.
- * It is used by typecheck in the case of OLITERAL nodes
- * to print constant definition loops.
- */
-func sprint_depchain(fmt_ *string, stack *NodeList, cur *Node, first *Node) {
- for l := stack; l != nil; l = l.Next {
- if l.N.Op == cur.Op {
- if l.N != first {
- sprint_depchain(fmt_, l.Next, l.N, first)
+// sprint_depchain prints a dependency chain of nodes into fmt.
+// It is used by typecheck in the case of OLITERAL nodes
+// to print constant definition loops.
+func sprint_depchain(fmt_ *string, stack []*Node, cur *Node, first *Node) {
+ for i := len(stack) - 1; i >= 0; i-- {
+ if n := stack[i]; n.Op == cur.Op {
+ if n != first {
+ sprint_depchain(fmt_, stack[:i], n, first)
}
- *fmt_ += fmt.Sprintf("\n\t%v: %v uses %v", l.N.Line(), l.N, cur)
+ *fmt_ += fmt.Sprintf("\n\t%v: %v uses %v", n.Line(), n, cur)
return
}
}
}
-/*
- * type check node *np.
- * replaces *np with a new pointer in some cases.
- * returns the final value of *np as a convenience.
- */
+var typecheck_tcstack []*Node
-var typecheck_tcstack *NodeList
-var typecheck_tcfree *NodeList
-
+// typecheck type checks node *np.
+// It replaces *np with a new pointer in some cases.
+// It returns the final value of *np as a convenience.
func typecheck(np **Node, top int) *Node {
// cannot type check until all the source has been parsed
- if typecheckok == 0 {
- Fatal("early typecheck")
+ if !typecheckok {
+ Fatalf("early typecheck")
}
n := *np
@@ -168,16 +161,15 @@
Yyerror("%v is not a type", n)
break
}
-
- fmt_ = ""
sprint_depchain(&fmt_, typecheck_tcstack, n, n)
yyerrorl(int(n.Lineno), "constant definition loop%s", fmt_)
}
if nsavederrors+nerrors == 0 {
fmt_ = ""
- for l := typecheck_tcstack; l != nil; l = l.Next {
- fmt_ += fmt.Sprintf("\n\t%v %v", l.N.Line(), l.N)
+ for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
+ x := typecheck_tcstack[i]
+ fmt_ += fmt.Sprintf("\n\t%v %v", x.Line(), x)
}
Yyerror("typechecking loop involving %v%s", n, fmt_)
}
@@ -188,27 +180,15 @@
n.Typecheck = 2
- var l *NodeList
- if typecheck_tcfree != nil {
- l = typecheck_tcfree
- typecheck_tcfree = l.Next
- } else {
- l = new(NodeList)
- }
- l.Next = typecheck_tcstack
- l.N = n
- typecheck_tcstack = l
-
+ typecheck_tcstack = append(typecheck_tcstack, n)
typecheck1(&n, top)
*np = n
+
n.Typecheck = 1
- if typecheck_tcstack != l {
- Fatal("typecheck stack out of sync")
- }
- typecheck_tcstack = l.Next
- l.Next = typecheck_tcfree
- typecheck_tcfree = l
+ last := len(typecheck_tcstack) - 1
+ typecheck_tcstack[last] = nil
+ typecheck_tcstack = typecheck_tcstack[:last]
lineno = int32(lno)
return n
@@ -293,7 +273,7 @@
default:
Dump("typecheck", n)
- Fatal("typecheck %v", Oconv(int(n.Op), 0))
+ Fatalf("typecheck %v", Oconv(int(n.Op), 0))
/*
* names
@@ -368,7 +348,7 @@
} else if l.Op == ODDD {
t.Bound = -100 // to be filled in
if top&Ecomplit == 0 && n.Diag == 0 {
- t.Broke = 1
+ t.Broke = true
n.Diag = 1
Yyerror("use of [...] array outside of array literal")
}
@@ -450,7 +430,7 @@
ok |= Etype
n.Op = OTYPE
n.Type = tostruct(n.List)
- if n.Type == nil || n.Type.Broke != 0 {
+ if n.Type == nil || n.Type.Broke {
n.Type = nil
return
}
@@ -820,7 +800,7 @@
}
if l.Orig != l && l.Op == ONAME {
- Fatal("found non-orig name node %v", l)
+ Fatalf("found non-orig name node %v", l)
}
l.Addrtaken = true
if l.Name != nil && l.Name.Param != nil && l.Name.Param.Closure != nil {
@@ -1306,7 +1286,7 @@
l = n.Left
if l.Op == OTYPE {
if n.Isddd || l.Type.Bound == -100 {
- if l.Type.Broke == 0 {
+ if !l.Type.Broke {
Yyerror("invalid use of ... in type conversion to %v", l.Type)
}
n.Diag = 1
@@ -1354,7 +1334,7 @@
tp := getthisx(t).Type.Type
if l.Left == nil || !Eqtype(l.Left.Type, tp) {
- Fatal("method receiver")
+ Fatalf("method receiver")
}
default:
@@ -1641,7 +1621,7 @@
// Unpack multiple-return result before type-checking.
var funarg *Type
- if Istype(t, TSTRUCT) && t.Funarg != 0 {
+ if Istype(t, TSTRUCT) && t.Funarg {
funarg = t
t = t.Type.Type
}
@@ -1773,7 +1753,7 @@
var why string
n.Op = uint8(convertop(t, n.Type, &why))
if (n.Op) == 0 {
- if n.Diag == 0 && n.Type.Broke == 0 {
+ if n.Diag == 0 && !n.Type.Broke {
Yyerror("cannot convert %v to type %v%s", Nconv(n.Left, obj.FmtLong), n.Type, why)
n.Diag = 1
}
@@ -2008,7 +1988,7 @@
return
}
if t.Etype != TINTER {
- Fatal("OITAB of %v", t)
+ Fatalf("OITAB of %v", t)
}
n.Type = Ptrto(Types[TUINTPTR])
break OpSwitch
@@ -2022,7 +2002,7 @@
return
}
if !Isslice(t) && t.Etype != TSTRING {
- Fatal("OSPTR of %v", t)
+ Fatalf("OSPTR of %v", t)
}
if t.Etype == TSTRING {
n.Type = Ptrto(Types[TUINT8])
@@ -2137,7 +2117,7 @@
return
}
- if Curfn.Type.Outnamed != 0 && n.List == nil {
+ if Curfn.Type.Outnamed && n.List == nil {
break OpSwitch
}
typecheckaste(ORETURN, nil, false, getoutargx(Curfn.Type), n.List, func() string { return "return argument" })
@@ -2193,7 +2173,7 @@
}
t := n.Type
- if t != nil && t.Funarg == 0 && n.Op != OTYPE {
+ if t != nil && !t.Funarg && n.Op != OTYPE {
switch t.Etype {
case TFUNC, // might have TANY; wait until its called
TANY,
@@ -2327,7 +2307,7 @@
// type is broken or missing, most likely a method call on a broken type
// we will warn about the broken type elsewhere. no need to emit a potentially confusing error
- if n.Left.Type == nil || n.Left.Type.Broke != 0 {
+ if n.Left.Type == nil || n.Left.Type.Broke {
return
}
@@ -2527,7 +2507,7 @@
Yyerror("%v is both field and method", n.Right.Sym)
}
if f1.Width == BADWIDTH {
- Fatal("lookdot badwidth %v %p", f1, f1)
+ Fatalf("lookdot badwidth %v %p", f1, f1)
}
n.Xoffset = f1.Width
n.Type = f1.Type
@@ -2578,7 +2558,7 @@
tt = tt.Type
}
} else {
- Fatal("method mismatch: %v for %v", rcvr, tt)
+ Fatalf("method mismatch: %v for %v", rcvr, tt)
}
}
@@ -2647,7 +2627,7 @@
lno := int(lineno)
- if tstruct.Broke != 0 {
+ if tstruct.Broke {
goto out
}
@@ -2655,7 +2635,7 @@
if nl != nil && nl.Next == nil {
n = nl.N
if n.Type != nil {
- if n.Type.Etype == TSTRUCT && n.Type.Funarg != 0 {
+ if n.Type.Etype == TSTRUCT && n.Type.Funarg {
if !hasddd(tstruct) {
n1 := downcount(tstruct)
n2 := downcount(n.Type)
@@ -2820,7 +2800,7 @@
*/
func fielddup(n *Node, hash map[string]bool) {
if n.Op != ONAME {
- Fatal("fielddup: not ONAME")
+ Fatalf("fielddup: not ONAME")
}
name := n.Sym.Name
if hash[name] {
@@ -2893,7 +2873,7 @@
func indexdup(n *Node, hash map[int64]*Node) {
if n.Op != OLITERAL {
- Fatal("indexdup: not OLITERAL")
+ Fatalf("indexdup: not OLITERAL")
}
v := Mpgetfix(n.Val().U.(*Mpint))
@@ -3093,11 +3073,10 @@
setlineno(ll.N)
typecheck(&ll.N, Erv)
if f == nil {
- tmp12 := bad
- bad++
- if tmp12 == 0 {
+ if bad == 0 {
Yyerror("too many values in struct initializer")
}
+ bad++
continue
}
@@ -3130,11 +3109,10 @@
l = ll.N
setlineno(l)
if l.Op != OKEY {
- tmp13 := bad
- bad++
- if tmp13 == 0 {
+ if bad == 0 {
Yyerror("mixture of field:value and value initializers")
}
+ bad++
typecheck(&ll.N, Erv)
continue
}
@@ -3397,7 +3375,7 @@
}
switch r.Op {
case OCALLMETH, OCALLINTER, OCALLFUNC:
- if r.Type.Etype != TSTRUCT || r.Type.Funarg == 0 {
+ if r.Type.Etype != TSTRUCT || !r.Type.Funarg {
break
}
cr = structcount(r.Type)
@@ -3497,7 +3475,7 @@
func stringtoarraylit(np **Node) {
n := *np
if n.Left.Op != OLITERAL || n.Left.Val().Ctype() != CTSTR {
- Fatal("stringtoarraylit %v", n)
+ Fatalf("stringtoarraylit %v", n)
}
s := n.Left.Val().U.(string)
@@ -3578,12 +3556,11 @@
if n.Name != nil {
t.Vargen = n.Name.Vargen
}
- t.Siggen = 0
t.Method = nil
t.Xmethod = nil
t.Nod = nil
- t.Printed = 0
- t.Deferwidth = 0
+ t.Printed = false
+ t.Deferwidth = false
t.Copyto = nil
// Update nodes waiting on this type.
@@ -3709,7 +3686,7 @@
fmt.Printf(" %v", l.N.Sym)
}
fmt.Printf("\n")
- Fatal("typecheckdef loop")
+ Fatalf("typecheckdef loop")
}
n.Walkdef = 2
@@ -3720,7 +3697,7 @@
switch n.Op {
default:
- Fatal("typecheckdef %v", Oconv(int(n.Op), 0))
+ Fatalf("typecheckdef %v", Oconv(int(n.Op), 0))
// not really syms
case OGOTO, OLABEL:
@@ -3803,7 +3780,7 @@
break
}
- Fatal("var without type, init: %v", n.Sym)
+ Fatalf("var without type, init: %v", n.Sym)
}
if n.Name.Defn.Op == ONAME {
@@ -3826,7 +3803,7 @@
if n.Type.Etype == TFORW && nerrors > nerrors0 {
// Something went wrong during type-checking,
// but it was reported. Silence future errors.
- n.Type.Broke = 1
+ n.Type.Broke = true
}
if Curfn != nil {
@@ -3840,10 +3817,10 @@
ret:
if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) {
- Fatal("got %v for %v", n.Type, n)
+ Fatalf("got %v for %v", n.Type, n)
}
if typecheckdefstack.N != n {
- Fatal("typecheckdefstack mismatch")
+ Fatalf("typecheckdefstack mismatch")
}
l = typecheckdefstack
typecheckdefstack = l.Next
diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go
index a01765b..44a658f 100644
--- a/src/cmd/compile/internal/gc/unsafe.go
+++ b/src/cmd/compile/internal/gc/unsafe.go
@@ -89,7 +89,7 @@
default:
Dump("unsafenmagic", r)
- Fatal("impossible %v node after dot insertion", Oconv(int(r1.Op), obj.FmtSharp))
+ Fatalf("impossible %v node after dot insertion", Oconv(int(r1.Op), obj.FmtSharp))
goto bad
}
}
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
index c59af06..8620e0b 100644
--- a/src/cmd/compile/internal/gc/util.go
+++ b/src/cmd/compile/internal/gc/util.go
@@ -78,10 +78,10 @@
if cpuprofile != "" {
f, err := os.Create(cpuprofile)
if err != nil {
- Fatal("%v", err)
+ Fatalf("%v", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
- Fatal("%v", err)
+ Fatalf("%v", err)
}
AtExit(pprof.StopCPUProfile)
}
@@ -91,12 +91,12 @@
}
f, err := os.Create(memprofile)
if err != nil {
- Fatal("%v", err)
+ Fatalf("%v", err)
}
AtExit(func() {
runtime.GC() // profile all outstanding allocations
if err := pprof.WriteHeapProfile(f); err != nil {
- Fatal("%v", err)
+ Fatalf("%v", err)
}
})
}
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index af3e1cc..ae19e6f 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -182,7 +182,7 @@
ORECOVER,
OGETG:
if n.Typecheck == 0 {
- Fatal("missing typecheck: %v", Nconv(n, obj.FmtSign))
+ Fatalf("missing typecheck: %v", Nconv(n, obj.FmtSign))
}
init := n.Ninit
n.Ninit = nil
@@ -196,7 +196,7 @@
// the value received.
case ORECV:
if n.Typecheck == 0 {
- Fatal("missing typecheck: %v", Nconv(n, obj.FmtSign))
+ Fatalf("missing typecheck: %v", Nconv(n, obj.FmtSign))
}
init := n.Ninit
n.Ninit = nil
@@ -231,7 +231,7 @@
walkstmt(&n.Right)
case ODEFER:
- Hasdefer = 1
+ hasdefer = true
switch n.Left.Op {
case OPRINT, OPRINTN:
walkprintfunc(&n.Left, &n.Ninit)
@@ -283,7 +283,7 @@
if n.List == nil {
break
}
- if (Curfn.Type.Outnamed != 0 && count(n.List) > 1) || paramoutheap(Curfn) {
+ if (Curfn.Type.Outnamed && count(n.List) > 1) || paramoutheap(Curfn) {
// assign to the function out parameters,
// so that reorder3 can fix up conflicts
var rl *NodeList
@@ -311,7 +311,7 @@
f := n.List.N
if f.Op != OCALLFUNC && f.Op != OCALLMETH && f.Op != OCALLINTER {
- Fatal("expected return of call, have %v", f)
+ Fatalf("expected return of call, have %v", f)
}
n.List = concat(list1(f), ascompatet(int(n.Op), rl, &f.Type, 0, &n.Ninit))
break
@@ -346,7 +346,7 @@
}
if n.Op == ONAME {
- Fatal("walkstmt ended up with name: %v", Nconv(n, obj.FmtSign))
+ Fatalf("walkstmt ended up with name: %v", Nconv(n, obj.FmtSign))
}
*np = n
@@ -404,7 +404,7 @@
// not okay to use n->ninit when walking n,
// because we might replace n with some other node
// and would lose the init list.
- Fatal("walkexpr init == &n->ninit")
+ Fatalf("walkexpr init == &n->ninit")
}
if n.Ninit != nil {
@@ -427,13 +427,13 @@
}
if n.Typecheck != 1 {
- Fatal("missed typecheck: %v\n", Nconv(n, obj.FmtSign))
+ Fatalf("missed typecheck: %v\n", Nconv(n, obj.FmtSign))
}
switch n.Op {
default:
Dump("walk", n)
- Fatal("walkexpr: switch 1 unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+ Fatalf("walkexpr: switch 1 unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
case OTYPE,
ONONAME,
@@ -872,11 +872,6 @@
typecheck(&n, Etop)
walkexpr(&n, init)
- // mapaccess needs a zero value to be at least this big.
- if zerosize < t.Type.Width {
- zerosize = t.Type.Width
- }
-
// TODO: ptr is always non-nil, so disable nil check for this OIND op.
goto ret
@@ -973,7 +968,7 @@
case ODOTTYPE, ODOTTYPE2:
if !isdirectiface(n.Type) || Isfat(n.Type) {
- Fatal("walkexpr ODOTTYPE") // should see inside OAS only
+ Fatalf("walkexpr ODOTTYPE") // should see inside OAS only
}
walkexpr(&n.Left, init)
goto ret
@@ -1285,14 +1280,10 @@
n.Type = t.Type
n.Typecheck = 1
- // mapaccess needs a zero value to be at least this big.
- if zerosize < t.Type.Width {
- zerosize = t.Type.Width
- }
goto ret
case ORECV:
- Fatal("walkexpr ORECV") // should see inside OAS only
+ Fatalf("walkexpr ORECV") // should see inside OAS only
case OSLICE, OSLICEARR, OSLICESTR:
walkexpr(&n.Left, init)
@@ -1336,7 +1327,7 @@
case ONEW:
if n.Esc == EscNone {
if n.Type.Type.Width >= 1<<16 {
- Fatal("large ONEW with EscNone: %v", n)
+ Fatalf("large ONEW with EscNone: %v", n)
}
r := temp(n.Type.Type)
r = Nod(OAS, r, nil) // zero temp
@@ -1406,7 +1397,7 @@
typecheck(&r, Erv)
if n.Type.Etype != TBOOL {
- Fatal("cmp %v", n.Type)
+ Fatalf("cmp %v", n.Type)
}
r.Type = n.Type
n = r
@@ -1418,7 +1409,7 @@
case OAPPEND:
// order should make sure we only see OAS(node, OAPPEND), which we handle above.
- Fatal("append outside assignment")
+ Fatalf("append outside assignment")
case OCOPY:
n = copyany(n, init, flag_race)
@@ -1477,7 +1468,7 @@
t := n.Type
if n.Esc == EscNone {
if !isSmallMakeSlice(n) {
- Fatal("non-small OMAKESLICE with EscNone: %v", n)
+ Fatalf("non-small OMAKESLICE with EscNone: %v", n)
}
// var arr [r]T
// n = arr[:l]
@@ -1585,7 +1576,7 @@
// ifaceeq(i1 any-1, i2 any-2) (ret bool);
case OCMPIFACE:
if !Eqtype(n.Left.Type, n.Right.Type) {
- Fatal("ifaceeq %v %v %v", Oconv(int(n.Op), 0), n.Left.Type, n.Right.Type)
+ Fatalf("ifaceeq %v %v %v", Oconv(int(n.Op), 0), n.Left.Type, n.Right.Type)
}
var fn *Node
if isnilinter(n.Left.Type) {
@@ -1637,7 +1628,7 @@
goto ret
}
- Fatal("missing switch %v", Oconv(int(n.Op), 0))
+ Fatalf("missing switch %v", Oconv(int(n.Op), 0))
// Expressions that are constant at run time but not
// considered const by the language spec are not turned into
@@ -1800,7 +1791,7 @@
}
if ucount != 0 {
- Fatal("ascompatet: too many function calls evaluating parameters")
+ Fatalf("ascompatet: too many function calls evaluating parameters")
}
return concat(nn, mm)
}
@@ -1831,7 +1822,7 @@
n.Esc = esc
typecheck(&n, Erv)
if n.Type == nil {
- Fatal("mkdotargslice: typecheck failed")
+ Fatalf("mkdotargslice: typecheck failed")
}
walkexpr(&n, init)
}
@@ -1909,7 +1900,7 @@
var l2 string
var ll *Type
var l1 string
- if r != nil && lr.Next == nil && r.Type.Etype == TSTRUCT && r.Type.Funarg != 0 {
+ if r != nil && lr.Next == nil && r.Type.Etype == TSTRUCT && r.Type.Funarg {
// optimization - can do block copy
if eqtypenoname(r.Type, *nl) {
a := nodarg(*nl, fp)
@@ -2249,7 +2240,7 @@
func convas(n *Node, init **NodeList) *Node {
if n.Op != OAS {
- Fatal("convas: not OAS %v", Oconv(int(n.Op), 0))
+ Fatalf("convas: not OAS %v", Oconv(int(n.Op), 0))
}
n.Typecheck = 1
@@ -2400,7 +2391,7 @@
switch l.Op {
default:
- Fatal("reorder3 unexpected lvalue %v", Oconv(int(l.Op), obj.FmtSharp))
+ Fatalf("reorder3 unexpected lvalue %v", Oconv(int(l.Op), obj.FmtSharp))
case ONAME:
break
@@ -2450,7 +2441,7 @@
func outervalue(n *Node) *Node {
for {
if n.Op == OXDOT {
- Fatal("OXDOT in walk")
+ Fatalf("OXDOT in walk")
}
if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP {
n = n.Left
@@ -2748,7 +2739,7 @@
func vmkcall(fn *Node, t *Type, init **NodeList, va []*Node) *Node {
if fn.Type == nil || fn.Type.Etype != TFUNC {
- Fatal("mkcall %v %v", fn, fn.Type)
+ Fatalf("mkcall %v %v", fn, fn.Type)
}
var args *NodeList
@@ -2789,12 +2780,12 @@
func chanfn(name string, n int, t *Type) *Node {
if t.Etype != TCHAN {
- Fatal("chanfn %v", t)
+ Fatalf("chanfn %v", t)
}
fn := syslook(name, 1)
switch n {
default:
- Fatal("chanfn %d", n)
+ Fatalf("chanfn %d", n)
case 1:
substArgTypes(fn, t.Type)
case 2:
@@ -2805,7 +2796,7 @@
func mapfn(name string, t *Type) *Node {
if t.Etype != TMAP {
- Fatal("mapfn %v", t)
+ Fatalf("mapfn %v", t)
}
fn := syslook(name, 1)
substArgTypes(fn, t.Down, t.Type, t.Down, t.Type)
@@ -2814,7 +2805,7 @@
func mapfndel(name string, t *Type) *Node {
if t.Etype != TMAP {
- Fatal("mapfn %v", t)
+ Fatalf("mapfn %v", t)
}
fn := syslook(name, 1)
substArgTypes(fn, t.Down, t.Type, t.Down)
@@ -3165,7 +3156,7 @@
a := algtype1(t, nil)
if a != AMEM && a != -1 {
- Fatal("eqfor %v", t)
+ Fatalf("eqfor %v", t)
}
if a == AMEM {
@@ -3277,7 +3268,7 @@
}
if !islvalue(cmpl) || !islvalue(cmpr) {
- Fatal("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+ Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
}
l = temp(Ptrto(t))
@@ -3868,7 +3859,7 @@
switch n.Op {
default:
- Fatal("usefield %v", Oconv(int(n.Op), 0))
+ Fatalf("usefield %v", Oconv(int(n.Op), 0))
case ODOT, ODOTPTR:
break
@@ -3880,7 +3871,7 @@
}
field := dotField[typeSym{t.Orig, n.Right.Sym}]
if field == nil {
- Fatal("usefield %v %v without paramfld", n.Left.Type, n.Right.Sym)
+ Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Right.Sym)
}
if field.Note == nil || !strings.Contains(*field.Note, "go:\"track\"") {
return
diff --git a/src/cmd/compile/internal/gc/y.go b/src/cmd/compile/internal/gc/y.go
index 2b61c07..fafbdf1 100644
--- a/src/cmd/compile/internal/gc/y.go
+++ b/src/cmd/compile/internal/gc/y.go
@@ -1057,6 +1057,7 @@
var yylval yySymType
var yyVAL yySymType
var yyDollar []yySymType
+ _ = yyDollar // silence set and not used
yyS := make([]yySymType, yyMaxDepth)
Nerrs := 0 /* number of errors */
@@ -1313,7 +1314,7 @@
// no package statement. This allows us to test more
// than one invalid import statement in a single file.
if nerrors == 0 {
- Fatal("phase error in import")
+ Fatalf("phase error in import")
}
}
case 15:
@@ -1353,7 +1354,7 @@
} else if importpkg.Name != yyDollar[2].sym.Name {
Yyerror("conflicting names %s and %s for package %q", importpkg.Name, yyDollar[2].sym.Name, importpkg.Path)
}
- importpkg.Direct = 1
+ importpkg.Direct = true
importpkg.Safe = curio.importsafe
if safemode != 0 && !curio.importsafe {
@@ -3240,7 +3241,7 @@
yyDollar[2].node.Func.Inl = yyDollar[3].list
funcbody(yyDollar[2].node)
- importlist = list(importlist, yyDollar[2].node)
+ importlist = append(importlist, yyDollar[2].node)
if Debug['E'] > 0 {
fmt.Printf("import [%q] func %v \n", importpkg.Path, yyDollar[2].node)
diff --git a/src/cmd/compile/internal/mips64/cgen.go b/src/cmd/compile/internal/mips64/cgen.go
new file mode 100644
index 0000000..4f3092c
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/cgen.go
@@ -0,0 +1,149 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+
+func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
+ // determine alignment.
+ // want to avoid unaligned access, so have to use
+ // smaller operations for less aligned types.
+ // for example moving [4]byte must use 4 MOVB not 1 MOVW.
+ align := int(n.Type.Align)
+
+ var op int
+ switch align {
+ default:
+ gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
+
+ case 1:
+ op = ppc64.AMOVBU
+
+ case 2:
+ op = ppc64.AMOVHU
+
+ case 4:
+ op = ppc64.AMOVWZU // there is no lwau, only lwaux
+
+ case 8:
+ op = ppc64.AMOVDU
+ }
+
+ if w%int64(align) != 0 {
+ gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
+ }
+ c := int32(w / int64(align))
+
+ // if we are copying forward on the stack and
+ // the src and dst overlap, then reverse direction
+ dir := align
+
+ if osrc < odst && int64(odst) < int64(osrc)+w {
+ dir = -dir
+ }
+
+ var dst gc.Node
+ var src gc.Node
+ if n.Ullman >= res.Ullman {
+ gc.Agenr(n, &dst, res) // temporarily use dst
+ gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
+ gins(ppc64.AMOVD, &dst, &src)
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ gc.Agen(res, &dst)
+ } else {
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ gc.Agenr(res, &dst, res)
+ gc.Agenr(n, &src, nil)
+ }
+
+ var tmp gc.Node
+ gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
+
+ // set up end marker
+ var nend gc.Node
+
+ // move src and dest to the end of block if necessary
+ if dir < 0 {
+ if c >= 4 {
+ gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
+ gins(ppc64.AMOVD, &src, &nend)
+ }
+
+ p := gins(ppc64.AADD, nil, &src)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = w
+
+ p = gins(ppc64.AADD, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = w
+ } else {
+ p := gins(ppc64.AADD, nil, &src)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-dir)
+
+ p = gins(ppc64.AADD, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-dir)
+
+ if c >= 4 {
+ gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
+ p := gins(ppc64.AMOVD, &src, &nend)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = w
+ }
+ }
+
+ // move
+ // TODO: enable duffcopy for larger copies.
+ if c >= 4 {
+ p := gins(op, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(dir)
+ ploop := p
+
+ p = gins(op, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(dir)
+
+ p = gins(ppc64.ACMP, &src, &nend)
+
+ gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
+ gc.Regfree(&nend)
+ } else {
+ // TODO(austin): Instead of generating ADD $-8,R8; ADD
+ // $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
+ // generate the offsets directly and eliminate the
+ // ADDs. That will produce shorter, more
+ // pipeline-able code.
+ var p *obj.Prog
+ for {
+ tmp14 := c
+ c--
+ if tmp14 <= 0 {
+ break
+ }
+
+ p = gins(op, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(dir)
+
+ p = gins(op, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(dir)
+ }
+ }
+
+ gc.Regfree(&dst)
+ gc.Regfree(&src)
+ gc.Regfree(&tmp)
+}
diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go
new file mode 100644
index 0000000..16509da
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/galign.go
@@ -0,0 +1,100 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+
+var thechar int = '9'
+
+var thestring string = "ppc64"
+
+var thelinkarch *obj.LinkArch
+
+func linkarchinit() {
+ thestring = obj.Getgoarch()
+ gc.Thearch.Thestring = thestring
+ if thestring == "ppc64le" {
+ thelinkarch = &ppc64.Linkppc64le
+ } else {
+ thelinkarch = &ppc64.Linkppc64
+ }
+ gc.Thearch.Thelinkarch = thelinkarch
+}
+
+var MAXWIDTH int64 = 1 << 50
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, and uintptr
+ */
+var typedefs = []gc.Typedef{
+ {"int", gc.TINT, gc.TINT64},
+ {"uint", gc.TUINT, gc.TUINT64},
+ {"uintptr", gc.TUINTPTR, gc.TUINT64},
+}
+
+func betypeinit() {
+ gc.Widthptr = 8
+ gc.Widthint = 8
+ gc.Widthreg = 8
+}
+
+func Main() {
+ gc.Thearch.Thechar = thechar
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = thelinkarch
+ gc.Thearch.Typedefs = typedefs
+ gc.Thearch.REGSP = ppc64.REGSP
+ gc.Thearch.REGCTXT = ppc64.REGCTXT
+ gc.Thearch.REGCALLX = ppc64.REG_R3
+ gc.Thearch.REGCALLX2 = ppc64.REG_R4
+ gc.Thearch.REGRETURN = ppc64.REG_R3
+ gc.Thearch.REGMIN = ppc64.REG_R0
+ gc.Thearch.REGMAX = ppc64.REG_R31
+ gc.Thearch.FREGMIN = ppc64.REG_F0
+ gc.Thearch.FREGMAX = ppc64.REG_F31
+ gc.Thearch.MAXWIDTH = MAXWIDTH
+ gc.Thearch.ReservedRegs = resvd
+
+ gc.Thearch.Betypeinit = betypeinit
+ gc.Thearch.Cgen_hmul = cgen_hmul
+ gc.Thearch.Cgen_shift = cgen_shift
+ gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Defframe = defframe
+ gc.Thearch.Dodiv = dodiv
+ gc.Thearch.Excise = excise
+ gc.Thearch.Expandchecks = expandchecks
+ gc.Thearch.Getg = getg
+ gc.Thearch.Gins = gins
+ gc.Thearch.Ginscmp = ginscmp
+ gc.Thearch.Ginscon = ginscon
+ gc.Thearch.Ginsnop = ginsnop
+ gc.Thearch.Gmove = gmove
+ gc.Thearch.Linkarchinit = linkarchinit
+ gc.Thearch.Peep = peep
+ gc.Thearch.Proginfo = proginfo
+ gc.Thearch.Regtyp = regtyp
+ gc.Thearch.Sameaddr = sameaddr
+ gc.Thearch.Smallindir = smallindir
+ gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Blockcopy = blockcopy
+ gc.Thearch.Sudoaddable = sudoaddable
+ gc.Thearch.Sudoclean = sudoclean
+ gc.Thearch.Excludedregs = excludedregs
+ gc.Thearch.RtoB = RtoB
+ gc.Thearch.FtoB = RtoB
+ gc.Thearch.BtoR = BtoR
+ gc.Thearch.BtoF = BtoF
+ gc.Thearch.Optoas = optoas
+ gc.Thearch.Doregbits = doregbits
+ gc.Thearch.Regnames = regnames
+
+ gc.Main()
+ gc.Exit(0)
+}
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
new file mode 100644
index 0000000..2779140
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -0,0 +1,564 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+
+func defframe(ptxt *obj.Prog) {
+ var n *gc.Node
+
+ // fill in argument size, stack size
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+ ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+ frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ ptxt.To.Offset = int64(frame)
+
+ // insert code to zero ambiguously live variables
+ // so that the garbage collector only sees initialized values
+ // when it looks for pointers.
+ p := ptxt
+
+ hi := int64(0)
+ lo := hi
+
+ // iterate through declarations - they are sorted in decreasing xoffset order.
+ for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if !n.Name.Needzero {
+ continue
+ }
+ if n.Class != gc.PAUTO {
+ gc.Fatalf("needzero class %d", n.Class)
+ }
+ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+ gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ }
+
+ if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
+ // merge with range we already have
+ lo = n.Xoffset
+
+ continue
+ }
+
+ // zero old range
+ p = zerorange(p, int64(frame), lo, hi)
+
+ // set new range
+ hi = n.Xoffset + n.Type.Width
+
+ lo = n.Xoffset
+ }
+
+ // zero final range
+ zerorange(p, int64(frame), lo, hi)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
+ cnt := hi - lo
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*gc.Widthptr) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+ p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
+ }
+ // TODO(dfc): https://golang.org/issue/12108
+ // If DUFFZERO is used inside a tail call (see genwrapper) it will
+ // overwrite the link register.
+ } else if false && cnt <= int64(128*gc.Widthptr) {
+ p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ f := gc.Sysfunc("duffzero")
+ gc.Naddr(&p.To, f)
+ gc.Afunclit(&p.To, f)
+ p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ } else {
+ p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p.Reg = ppc64.REGRT1
+ p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
+ p1 := p
+ p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ gc.Patch(p, p1)
+ }
+
+ return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+ q := gc.Ctxt.NewProg()
+ gc.Clearp(q)
+ q.As = int16(as)
+ q.Lineno = p.Lineno
+ q.From.Type = int16(ftype)
+ q.From.Reg = int16(freg)
+ q.From.Offset = foffset
+ q.To.Type = int16(ttype)
+ q.To.Reg = int16(treg)
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+func ginsnop() {
+ var reg gc.Node
+ gc.Nodreg(®, gc.Types[gc.TINT], ppc64.REG_R0)
+ gins(ppc64.AOR, ®, ®)
+}
+
+var panicdiv *gc.Node
+
+/*
+ * generate division.
+ * generates one of:
+ * res = nl / nr
+ * res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ // Have to be careful about handling
+ // most negative int divided by -1 correctly.
+ // The hardware will generate undefined result.
+ // Also need to explicitly trap on division on zero,
+ // the hardware will silently generate undefined result.
+ // DIVW will leave unpredicable result in higher 32-bit,
+ // so always use DIVD/DIVDU.
+ t := nl.Type
+
+ t0 := t
+ check := 0
+ if gc.Issigned[t.Etype] {
+ check = 1
+ if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
+ check = 0
+ } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
+ check = 0
+ }
+ }
+
+ if t.Width < 8 {
+ if gc.Issigned[t.Etype] {
+ t = gc.Types[gc.TINT64]
+ } else {
+ t = gc.Types[gc.TUINT64]
+ }
+ check = 0
+ }
+
+ a := optoas(gc.ODIV, t)
+
+ var tl gc.Node
+ gc.Regalloc(&tl, t0, nil)
+ var tr gc.Node
+ gc.Regalloc(&tr, t0, nil)
+ if nl.Ullman >= nr.Ullman {
+ gc.Cgen(nl, &tl)
+ gc.Cgen(nr, &tr)
+ } else {
+ gc.Cgen(nr, &tr)
+ gc.Cgen(nl, &tl)
+ }
+
+ if t != t0 {
+ // Convert
+ tl2 := tl
+
+ tr2 := tr
+ tl.Type = t
+ tr.Type = t
+ gmove(&tl2, &tl)
+ gmove(&tr2, &tr)
+ }
+
+ // Handle divide-by-zero panic.
+ p1 := gins(optoas(gc.OCMP, t), &tr, nil)
+
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = ppc64.REGZERO
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if panicdiv == nil {
+ panicdiv = gc.Sysfunc("panicdivide")
+ }
+ gc.Ginscall(panicdiv, -1)
+ gc.Patch(p1, gc.Pc)
+
+ var p2 *obj.Prog
+ if check != 0 {
+ var nm1 gc.Node
+ gc.Nodconst(&nm1, t, -1)
+ gins(optoas(gc.OCMP, t), &tr, &nm1)
+ p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if op == gc.ODIV {
+ // a / (-1) is -a.
+ gins(optoas(gc.OMINUS, t), nil, &tl)
+
+ gmove(&tl, res)
+ } else {
+ // a % (-1) is 0.
+ var nz gc.Node
+ gc.Nodconst(&nz, t, 0)
+
+ gmove(&nz, res)
+ }
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ p1 = gins(a, &tr, &tl)
+ if op == gc.ODIV {
+ gc.Regfree(&tr)
+ gmove(&tl, res)
+ } else {
+ // A%B = A-(A/B*B)
+ var tm gc.Node
+ gc.Regalloc(&tm, t, nil)
+
+ // patch div to use the 3 register form
+ // TODO(minux): add gins3?
+ p1.Reg = p1.To.Reg
+
+ p1.To.Reg = tm.Reg
+ gins(optoas(gc.OMUL, t), &tr, &tm)
+ gc.Regfree(&tr)
+ gins(optoas(gc.OSUB, t), &tm, &tl)
+ gc.Regfree(&tm)
+ gmove(&tl, res)
+ }
+
+ gc.Regfree(&tl)
+ if check != 0 {
+ gc.Patch(p2, gc.Pc)
+ }
+}
+
+/*
+ * generate high multiply:
+ * res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ // largest ullman on left.
+ if nl.Ullman < nr.Ullman {
+ tmp := (*gc.Node)(nl)
+ nl = nr
+ nr = tmp
+ }
+
+ t := (*gc.Type)(nl.Type)
+ w := int(int(t.Width * 8))
+ var n1 gc.Node
+ gc.Cgenr(nl, &n1, res)
+ var n2 gc.Node
+ gc.Cgenr(nr, &n2, nil)
+ switch gc.Simtype[t.Etype] {
+ case gc.TINT8,
+ gc.TINT16,
+ gc.TINT32:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(w)
+
+ case gc.TUINT8,
+ gc.TUINT16,
+ gc.TUINT32:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(w)
+
+ case gc.TINT64,
+ gc.TUINT64:
+ if gc.Issigned[t.Etype] {
+ gins(ppc64.AMULHD, &n2, &n1)
+ } else {
+ gins(ppc64.AMULHDU, &n2, &n1)
+ }
+
+ default:
+ gc.Fatalf("cgen_hmul %v", t)
+ }
+
+ gc.Cgen(&n1, res)
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
+}
+
+/*
+ * generate shift according to op, one of:
+ * res = nl << nr
+ * res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ a := int(optoas(op, nl.Type))
+
+ if nr.Op == gc.OLITERAL {
+ var n1 gc.Node
+ gc.Regalloc(&n1, nl.Type, res)
+ gc.Cgen(nl, &n1)
+ sc := uint64(nr.Int())
+ if sc >= uint64(nl.Type.Width*8) {
+ // large shift gets 2 shifts by width-1
+ var n3 gc.Node
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+
+ gins(a, &n3, &n1)
+ gins(a, &n3, &n1)
+ } else {
+ gins(a, nr, &n1)
+ }
+ gmove(&n1, res)
+ gc.Regfree(&n1)
+ return
+ }
+
+ if nl.Ullman >= gc.UINF {
+ var n4 gc.Node
+ gc.Tempname(&n4, nl.Type)
+ gc.Cgen(nl, &n4)
+ nl = &n4
+ }
+
+ if nr.Ullman >= gc.UINF {
+ var n5 gc.Node
+ gc.Tempname(&n5, nr.Type)
+ gc.Cgen(nr, &n5)
+ nr = &n5
+ }
+
+ // Allow either uint32 or uint64 as shift type,
+ // to avoid unnecessary conversion from uint32 to uint64
+ // just to do the comparison.
+ tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
+
+ if tcount.Etype < gc.TUINT32 {
+ tcount = gc.Types[gc.TUINT32]
+ }
+
+ var n1 gc.Node
+ gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
+ var n3 gc.Node
+ gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
+
+ var n2 gc.Node
+ gc.Regalloc(&n2, nl.Type, res)
+
+ if nl.Ullman >= nr.Ullman {
+ gc.Cgen(nl, &n2)
+ gc.Cgen(nr, &n1)
+ gmove(&n1, &n3)
+ } else {
+ gc.Cgen(nr, &n1)
+ gmove(&n1, &n3)
+ gc.Cgen(nl, &n2)
+ }
+
+ gc.Regfree(&n3)
+
+ // test and fix up large shifts
+ if !bounded {
+ gc.Nodconst(&n3, tcount, nl.Type.Width*8)
+ gins(optoas(gc.OCMP, tcount), &n1, &n3)
+ p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+ gins(a, &n3, &n2)
+ } else {
+ gc.Nodconst(&n3, nl.Type, 0)
+ gmove(&n3, &n2)
+ }
+
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gins(a, &n1, &n2)
+
+ gmove(&n2, res)
+
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
+}
+
+func clearfat(nl *gc.Node) {
+ /* clear a fat object */
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
+ }
+
+ w := uint64(uint64(nl.Type.Width))
+
+ // Avoid taking the address for simple enough types.
+ if gc.Componentgen(nil, nl) {
+ return
+ }
+
+ c := uint64(w % 8) // bytes
+ q := uint64(w / 8) // dwords
+
+ if gc.Reginuse(ppc64.REGRT1) {
+ gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
+ }
+
+ var r0 gc.Node
+ gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
+ var dst gc.Node
+ gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
+ gc.Regrealloc(&dst)
+ gc.Agen(nl, &dst)
+
+ var boff uint64
+ if q > 128 {
+ p := gins(ppc64.ASUB, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+
+ var end gc.Node
+ gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
+ p = gins(ppc64.AMOVD, &dst, &end)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = int64(q * 8)
+
+ p = gins(ppc64.AMOVDU, &r0, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 8
+ pl := (*obj.Prog)(p)
+
+ p = gins(ppc64.ACMP, &dst, &end)
+ gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
+
+ gc.Regfree(&end)
+
+ // The loop leaves R3 on the last zeroed dword
+ boff = 8
+ // TODO(dfc): https://golang.org/issue/12108
+ // If DUFFZERO is used inside a tail call (see genwrapper) it will
+ // overwrite the link register.
+ } else if false && q >= 4 {
+ p := gins(ppc64.ASUB, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+ f := (*gc.Node)(gc.Sysfunc("duffzero"))
+ p = gins(obj.ADUFFZERO, nil, f)
+ gc.Afunclit(&p.To, f)
+
+ // 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
+ p.To.Offset = int64(4 * (128 - q))
+
+ // duffzero leaves R3 on the last zeroed dword
+ boff = 8
+ } else {
+ var p *obj.Prog
+ for t := uint64(0); t < q; t++ {
+ p = gins(ppc64.AMOVD, &r0, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(8 * t)
+ }
+
+ boff = 8 * q
+ }
+
+ var p *obj.Prog
+ for t := uint64(0); t < c; t++ {
+ p = gins(ppc64.AMOVB, &r0, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(t + boff)
+ }
+
+ gc.Regfree(&dst)
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
+ if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
+ fmt.Printf("expandchecks: %v\n", p)
+ }
+ if p.As != obj.ACHECKNIL {
+ continue
+ }
+ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+ gc.Warnl(int(p.Lineno), "generated nil check")
+ }
+ if p.From.Type != obj.TYPE_REG {
+ gc.Fatalf("invalid nil check %v\n", p)
+ }
+
+ /*
+ // check is
+ // TD $4, R0, arg (R0 is always zero)
+ // eqv. to:
+ // tdeq r0, arg
+ // NOTE: this needs special runtime support to make SIGTRAP recoverable.
+ reg = p->from.reg;
+ p->as = ATD;
+ p->from = p->to = p->from3 = zprog.from;
+ p->from.type = TYPE_CONST;
+ p->from.offset = 4;
+ p->from.reg = 0;
+ p->reg = REGZERO;
+ p->to.type = TYPE_REG;
+ p->to.reg = reg;
+ */
+ // check is
+ // CMP arg, R0
+ // BNE 2(PC) [likely]
+ // MOVD R0, 0(R0)
+ p1 = gc.Ctxt.NewProg()
+
+ p2 = gc.Ctxt.NewProg()
+ gc.Clearp(p1)
+ gc.Clearp(p2)
+ p1.Link = p2
+ p2.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+ p2.Lineno = p.Lineno
+ p1.Pc = 9999
+ p2.Pc = 9999
+ p.As = ppc64.ACMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGZERO
+ p1.As = ppc64.ABNE
+
+ //p1->from.type = TYPE_CONST;
+ //p1->from.offset = 1; // likely
+ p1.To.Type = obj.TYPE_BRANCH
+
+ p1.To.Val = p2.Link
+
+ // crash by write to memory address 0.
+ p2.As = ppc64.AMOVD
+
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = ppc64.REGZERO
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = ppc64.REGZERO
+ p2.To.Offset = 0
+ }
+}
+
+// res = runtime.getg()
+func getg(res *gc.Node) {
+ var n1 gc.Node
+ gc.Nodreg(&n1, res.Type, ppc64.REGG)
+ gmove(&n1, res)
+}
diff --git a/src/cmd/compile/internal/mips64/gsubr.go b/src/cmd/compile/internal/mips64/gsubr.go
new file mode 100644
index 0000000..4ef928c
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/gsubr.go
@@ -0,0 +1,1031 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/big"
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+
+var resvd = []int{
+ ppc64.REGZERO,
+ ppc64.REGSP, // reserved for SP
+ // We need to preserve the C ABI TLS pointer because sigtramp
+ // may happen during C code and needs to access the g. C
+ // clobbers REGG, so if Go were to clobber REGTLS, sigtramp
+ // won't know which convention to use. By preserving REGTLS,
+ // we can just retrieve g from TLS when we aren't sure.
+ ppc64.REGTLS,
+
+ // TODO(austin): Consolidate REGTLS and REGG?
+ ppc64.REGG,
+ ppc64.REGTMP, // REGTMP
+ ppc64.FREGCVI,
+ ppc64.FREGZERO,
+ ppc64.FREGHALF,
+ ppc64.FREGONE,
+ ppc64.FREGTWO,
+}
+
+/*
+ * generate
+ * as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+ var n1 gc.Node
+
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+ if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD {
+ // cannot have more than 16-bit of immediate in ADD, etc.
+ // instead, MOV into register first.
+ var ntmp gc.Node
+ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+ rawgins(ppc64.AMOVD, &n1, &ntmp)
+ rawgins(as, &ntmp, n2)
+ gc.Regfree(&ntmp)
+ return
+ }
+
+ rawgins(as, &n1, n2)
+}
+
+/*
+ * generate
+ * as n, $c (CMP/CMPU)
+ */
+func ginscon2(as int, n2 *gc.Node, c int64) {
+ var n1 gc.Node
+
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+ switch as {
+ default:
+ gc.Fatalf("ginscon2")
+
+ case ppc64.ACMP:
+ if -ppc64.BIG <= c && c <= ppc64.BIG {
+ rawgins(as, n2, &n1)
+ return
+ }
+
+ case ppc64.ACMPU:
+ if 0 <= c && c <= 2*ppc64.BIG {
+ rawgins(as, n2, &n1)
+ return
+ }
+ }
+
+ // MOV n1 into register first
+ var ntmp gc.Node
+ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+ rawgins(ppc64.AMOVD, &n1, &ntmp)
+ rawgins(as, n2, &ntmp)
+ gc.Regfree(&ntmp)
+}
+
+func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+ if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
+ // Reverse comparison to place constant last.
+ op = gc.Brrev(op)
+ n1, n2 = n2, n1
+ }
+
+ var r1, r2, g1, g2 gc.Node
+ gc.Regalloc(&r1, t, n1)
+ gc.Regalloc(&g1, n1.Type, &r1)
+ gc.Cgen(n1, &g1)
+ gmove(&g1, &r1)
+ if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
+ ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
+ } else {
+ gc.Regalloc(&r2, t, n2)
+ gc.Regalloc(&g2, n1.Type, &r2)
+ gc.Cgen(n2, &g2)
+ gmove(&g2, &r2)
+ rawgins(optoas(gc.OCMP, t), &r1, &r2)
+ gc.Regfree(&g2)
+ gc.Regfree(&r2)
+ }
+ gc.Regfree(&g1)
+ gc.Regfree(&r1)
+ return gc.Gbranch(optoas(op, t), nil, likely)
+}
+
+// set up nodes representing 2^63
+var (
+ bigi gc.Node
+ bigf gc.Node
+ bignodes_did bool
+)
+
+func bignodes() {
+ if bignodes_did {
+ return
+ }
+ bignodes_did = true
+
+ var i big.Int
+ i.SetInt64(1)
+ i.Lsh(&i, 63)
+
+ gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
+ bigi.SetBigInt(&i)
+
+ bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
+}
+
+/*
+ * generate move:
+ * t = f
+ * hard part is conversions.
+ */
+func gmove(f *gc.Node, t *gc.Node) {
+ if gc.Debug['M'] != 0 {
+ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+ }
+
+ ft := int(gc.Simsimtype(f.Type))
+ tt := int(gc.Simsimtype(t.Type))
+ cvt := (*gc.Type)(t.Type)
+
+ if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
+ gc.Complexmove(f, t)
+ return
+ }
+
+ // cannot have two memory operands
+ var r2 gc.Node
+ var r1 gc.Node
+ var a int
+ if gc.Ismem(f) && gc.Ismem(t) {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ var con gc.Node
+ switch tt {
+ default:
+ f.Convconst(&con, t.Type)
+
+ case gc.TINT32,
+ gc.TINT16,
+ gc.TINT8:
+ var con gc.Node
+ f.Convconst(&con, gc.Types[gc.TINT64])
+ var r1 gc.Node
+ gc.Regalloc(&r1, con.Type, t)
+ gins(ppc64.AMOVD, &con, &r1)
+ gmove(&r1, t)
+ gc.Regfree(&r1)
+ return
+
+ case gc.TUINT32,
+ gc.TUINT16,
+ gc.TUINT8:
+ var con gc.Node
+ f.Convconst(&con, gc.Types[gc.TUINT64])
+ var r1 gc.Node
+ gc.Regalloc(&r1, con.Type, t)
+ gins(ppc64.AMOVD, &con, &r1)
+ gmove(&r1, t)
+ gc.Regfree(&r1)
+ return
+ }
+
+ f = &con
+ ft = tt // so big switch will choose a simple mov
+
+ // constants can't move directly to memory.
+ if gc.Ismem(t) {
+ goto hard
+ }
+ }
+
+ // float constants come from memory.
+ //if(isfloat[tt])
+ // goto hard;
+
+ // 64-bit immediates are also from memory.
+ //if(isint[tt])
+ // goto hard;
+ //// 64-bit immediates are really 32-bit sign-extended
+ //// unless moving into a register.
+ //if(isint[tt]) {
+ // if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
+ // goto hard;
+ // if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
+ // goto hard;
+ //}
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+
+ /*
+ * integer copy and truncate
+ */
+ case gc.TINT8<<16 | gc.TINT8, // same size
+ gc.TUINT8<<16 | gc.TINT8,
+ gc.TINT16<<16 | gc.TINT8,
+ // truncate
+ gc.TUINT16<<16 | gc.TINT8,
+ gc.TINT32<<16 | gc.TINT8,
+ gc.TUINT32<<16 | gc.TINT8,
+ gc.TINT64<<16 | gc.TINT8,
+ gc.TUINT64<<16 | gc.TINT8:
+ a = ppc64.AMOVB
+
+ case gc.TINT8<<16 | gc.TUINT8, // same size
+ gc.TUINT8<<16 | gc.TUINT8,
+ gc.TINT16<<16 | gc.TUINT8,
+ // truncate
+ gc.TUINT16<<16 | gc.TUINT8,
+ gc.TINT32<<16 | gc.TUINT8,
+ gc.TUINT32<<16 | gc.TUINT8,
+ gc.TINT64<<16 | gc.TUINT8,
+ gc.TUINT64<<16 | gc.TUINT8:
+ a = ppc64.AMOVBZ
+
+ case gc.TINT16<<16 | gc.TINT16, // same size
+ gc.TUINT16<<16 | gc.TINT16,
+ gc.TINT32<<16 | gc.TINT16,
+ // truncate
+ gc.TUINT32<<16 | gc.TINT16,
+ gc.TINT64<<16 | gc.TINT16,
+ gc.TUINT64<<16 | gc.TINT16:
+ a = ppc64.AMOVH
+
+ case gc.TINT16<<16 | gc.TUINT16, // same size
+ gc.TUINT16<<16 | gc.TUINT16,
+ gc.TINT32<<16 | gc.TUINT16,
+ // truncate
+ gc.TUINT32<<16 | gc.TUINT16,
+ gc.TINT64<<16 | gc.TUINT16,
+ gc.TUINT64<<16 | gc.TUINT16:
+ a = ppc64.AMOVHZ
+
+ case gc.TINT32<<16 | gc.TINT32, // same size
+ gc.TUINT32<<16 | gc.TINT32,
+ gc.TINT64<<16 | gc.TINT32,
+ // truncate
+ gc.TUINT64<<16 | gc.TINT32:
+ a = ppc64.AMOVW
+
+ case gc.TINT32<<16 | gc.TUINT32, // same size
+ gc.TUINT32<<16 | gc.TUINT32,
+ gc.TINT64<<16 | gc.TUINT32,
+ gc.TUINT64<<16 | gc.TUINT32:
+ a = ppc64.AMOVWZ
+
+ case gc.TINT64<<16 | gc.TINT64, // same size
+ gc.TINT64<<16 | gc.TUINT64,
+ gc.TUINT64<<16 | gc.TINT64,
+ gc.TUINT64<<16 | gc.TUINT64:
+ a = ppc64.AMOVD
+
+ /*
+ * integer up-conversions
+ */
+ case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+ gc.TINT8<<16 | gc.TUINT16,
+ gc.TINT8<<16 | gc.TINT32,
+ gc.TINT8<<16 | gc.TUINT32,
+ gc.TINT8<<16 | gc.TINT64,
+ gc.TINT8<<16 | gc.TUINT64:
+ a = ppc64.AMOVB
+
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+ gc.TUINT8<<16 | gc.TUINT16,
+ gc.TUINT8<<16 | gc.TINT32,
+ gc.TUINT8<<16 | gc.TUINT32,
+ gc.TUINT8<<16 | gc.TINT64,
+ gc.TUINT8<<16 | gc.TUINT64:
+ a = ppc64.AMOVBZ
+
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+ gc.TINT16<<16 | gc.TUINT32,
+ gc.TINT16<<16 | gc.TINT64,
+ gc.TINT16<<16 | gc.TUINT64:
+ a = ppc64.AMOVH
+
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+ gc.TUINT16<<16 | gc.TUINT32,
+ gc.TUINT16<<16 | gc.TINT64,
+ gc.TUINT16<<16 | gc.TUINT64:
+ a = ppc64.AMOVHZ
+
+ goto rdst
+
+ case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+ gc.TINT32<<16 | gc.TUINT64:
+ a = ppc64.AMOVW
+
+ goto rdst
+
+ case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+ gc.TUINT32<<16 | gc.TUINT64:
+ a = ppc64.AMOVWZ
+
+ goto rdst
+
+ //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t);
+ //return;
+ // algorithm is:
+ // if small enough, use native float64 -> int64 conversion.
+ // otherwise, subtract 2^63, convert, and add it back.
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT32,
+ gc.TFLOAT64<<16 | gc.TINT32,
+ gc.TFLOAT32<<16 | gc.TINT64,
+ gc.TFLOAT64<<16 | gc.TINT64,
+ gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TUINT8,
+ gc.TFLOAT32<<16 | gc.TUINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32,
+ gc.TFLOAT32<<16 | gc.TUINT64,
+ gc.TFLOAT64<<16 | gc.TUINT64:
+ bignodes()
+
+ var r1 gc.Node
+ gc.Regalloc(&r1, gc.Types[ft], f)
+ gmove(f, &r1)
+ if tt == gc.TUINT64 {
+ gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+ gmove(&bigf, &r2)
+ gins(ppc64.AFCMPU, &r1, &r2)
+ p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
+ gins(ppc64.AFSUB, &r2, &r1)
+ gc.Patch(p1, gc.Pc)
+ gc.Regfree(&r2)
+ }
+
+ gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+ var r3 gc.Node
+ gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
+ gins(ppc64.AFCTIDZ, &r1, &r2)
+ p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = ppc64.REGSP
+ p1.To.Offset = -8
+ p1 = gins(ppc64.AMOVD, nil, &r3)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = ppc64.REGSP
+ p1.From.Offset = -8
+ gc.Regfree(&r2)
+ gc.Regfree(&r1)
+ if tt == gc.TUINT64 {
+ p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
+ gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
+ gins(ppc64.AMOVD, &bigi, &r1)
+ gins(ppc64.AADD, &r1, &r3)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gmove(&r3, t)
+ gc.Regfree(&r3)
+ return
+
+ //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
+ //return;
+ // algorithm is:
+ // if small enough, use native int64 -> uint64 conversion.
+ // otherwise, halve (rounding to odd?), convert, and double.
+ /*
+ * integer to float
+ */
+ case gc.TINT32<<16 | gc.TFLOAT32,
+ gc.TINT32<<16 | gc.TFLOAT64,
+ gc.TINT64<<16 | gc.TFLOAT32,
+ gc.TINT64<<16 | gc.TFLOAT64,
+ gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT64,
+ gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT64,
+ gc.TUINT64<<16 | gc.TFLOAT32,
+ gc.TUINT64<<16 | gc.TFLOAT64:
+ bignodes()
+
+ var r1 gc.Node
+ gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
+ gmove(f, &r1)
+ if ft == gc.TUINT64 {
+ gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
+ gmove(&bigi, &r2)
+ gins(ppc64.ACMPU, &r1, &r2)
+ p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
+ p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
+ p2.From.Type = obj.TYPE_CONST
+ p2.From.Offset = 1
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
+ p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = ppc64.REGSP
+ p1.To.Offset = -8
+ p1 = gins(ppc64.AFMOVD, nil, &r2)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = ppc64.REGSP
+ p1.From.Offset = -8
+ gins(ppc64.AFCFID, &r2, &r2)
+ gc.Regfree(&r1)
+ if ft == gc.TUINT64 {
+ p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
+ gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
+ gins(ppc64.AFMUL, &r1, &r2)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gmove(&r2, t)
+ gc.Regfree(&r2)
+ return
+
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32:
+ a = ppc64.AFMOVS
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT64:
+ a = ppc64.AFMOVD
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ a = ppc64.AFMOVS
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ a = ppc64.AFRSP
+ goto rdst
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register destination
+rdst:
+ {
+ gc.Regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ gc.Regfree(&r1)
+ return
+ }
+
+ // requires register intermediate
+hard:
+ gc.Regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ gc.Regfree(&r1)
+ return
+}
+
+func intLiteral(n *gc.Node) (x int64, ok bool) {
+ switch {
+ case n == nil:
+ return
+ case gc.Isconst(n, gc.CTINT):
+ return n.Int(), true
+ case gc.Isconst(n, gc.CTBOOL):
+ return int64(obj.Bool2int(n.Bool())), true
+ }
+ return
+}
+
+// gins is called by the front end.
+// It synthesizes some multiple-instruction sequences
+// so the front end can stay simpler.
+func gins(as int, f, t *gc.Node) *obj.Prog {
+ if as >= obj.A_ARCHSPECIFIC {
+ if x, ok := intLiteral(f); ok {
+ ginscon(as, x, t)
+ return nil // caller must not use
+ }
+ }
+ if as == ppc64.ACMP || as == ppc64.ACMPU {
+ if x, ok := intLiteral(t); ok {
+ ginscon2(as, f, x)
+ return nil // caller must not use
+ }
+ }
+ return rawgins(as, f, t)
+}
+
+/*
+ * generate one instruction:
+ * as f, t
+ */
+func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+ // TODO(austin): Add self-move test like in 6g (but be careful
+ // of truncation moves)
+
+ p := gc.Prog(as)
+ gc.Naddr(&p.From, f)
+ gc.Naddr(&p.To, t)
+
+ switch as {
+ case obj.ACALL:
+ if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
+ // Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
+ pp := gc.Prog(as)
+ pp.From = p.From
+ pp.To.Type = obj.TYPE_REG
+ pp.To.Reg = ppc64.REG_CTR
+
+ p.As = ppc64.AMOVD
+ p.From = p.To
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ fmt.Printf("%v\n", pp)
+ }
+
+ return pp
+ }
+
+ // Bad things the front end has done to us. Crash to find call stack.
+ case ppc64.AAND, ppc64.AMULLD:
+ if p.From.Type == obj.TYPE_CONST {
+ gc.Debug['h'] = 1
+ gc.Fatalf("bad inst: %v", p)
+ }
+ case ppc64.ACMP, ppc64.ACMPU:
+ if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
+ gc.Debug['h'] = 1
+ gc.Fatalf("bad inst: %v", p)
+ }
+ }
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+
+ w := int32(0)
+ switch as {
+ case ppc64.AMOVB,
+ ppc64.AMOVBU,
+ ppc64.AMOVBZ,
+ ppc64.AMOVBZU:
+ w = 1
+
+ case ppc64.AMOVH,
+ ppc64.AMOVHU,
+ ppc64.AMOVHZ,
+ ppc64.AMOVHZU:
+ w = 2
+
+ case ppc64.AMOVW,
+ ppc64.AMOVWU,
+ ppc64.AMOVWZ,
+ ppc64.AMOVWZU:
+ w = 4
+
+ case ppc64.AMOVD,
+ ppc64.AMOVDU:
+ if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
+ break
+ }
+ w = 8
+ }
+
+ if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
+ gc.Dump("f", f)
+ gc.Dump("t", t)
+ gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+ }
+
+ return p
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+ if t == nil {
+ gc.Fatalf("optoas: t is nil")
+ }
+
+ a := int(obj.AXXX)
+ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+ default:
+ gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+
+ case gc.OEQ<<16 | gc.TBOOL,
+ gc.OEQ<<16 | gc.TINT8,
+ gc.OEQ<<16 | gc.TUINT8,
+ gc.OEQ<<16 | gc.TINT16,
+ gc.OEQ<<16 | gc.TUINT16,
+ gc.OEQ<<16 | gc.TINT32,
+ gc.OEQ<<16 | gc.TUINT32,
+ gc.OEQ<<16 | gc.TINT64,
+ gc.OEQ<<16 | gc.TUINT64,
+ gc.OEQ<<16 | gc.TPTR32,
+ gc.OEQ<<16 | gc.TPTR64,
+ gc.OEQ<<16 | gc.TFLOAT32,
+ gc.OEQ<<16 | gc.TFLOAT64:
+ a = ppc64.ABEQ
+
+ case gc.ONE<<16 | gc.TBOOL,
+ gc.ONE<<16 | gc.TINT8,
+ gc.ONE<<16 | gc.TUINT8,
+ gc.ONE<<16 | gc.TINT16,
+ gc.ONE<<16 | gc.TUINT16,
+ gc.ONE<<16 | gc.TINT32,
+ gc.ONE<<16 | gc.TUINT32,
+ gc.ONE<<16 | gc.TINT64,
+ gc.ONE<<16 | gc.TUINT64,
+ gc.ONE<<16 | gc.TPTR32,
+ gc.ONE<<16 | gc.TPTR64,
+ gc.ONE<<16 | gc.TFLOAT32,
+ gc.ONE<<16 | gc.TFLOAT64:
+ a = ppc64.ABNE
+
+ case gc.OLT<<16 | gc.TINT8, // ACMP
+ gc.OLT<<16 | gc.TINT16,
+ gc.OLT<<16 | gc.TINT32,
+ gc.OLT<<16 | gc.TINT64,
+ gc.OLT<<16 | gc.TUINT8,
+ // ACMPU
+ gc.OLT<<16 | gc.TUINT16,
+ gc.OLT<<16 | gc.TUINT32,
+ gc.OLT<<16 | gc.TUINT64,
+ gc.OLT<<16 | gc.TFLOAT32,
+ // AFCMPU
+ gc.OLT<<16 | gc.TFLOAT64:
+ a = ppc64.ABLT
+
+ case gc.OLE<<16 | gc.TINT8, // ACMP
+ gc.OLE<<16 | gc.TINT16,
+ gc.OLE<<16 | gc.TINT32,
+ gc.OLE<<16 | gc.TINT64,
+ gc.OLE<<16 | gc.TUINT8,
+ // ACMPU
+ gc.OLE<<16 | gc.TUINT16,
+ gc.OLE<<16 | gc.TUINT32,
+ gc.OLE<<16 | gc.TUINT64:
+ // No OLE for floats, because it mishandles NaN.
+ // Front end must reverse comparison or use OLT and OEQ together.
+ a = ppc64.ABLE
+
+ case gc.OGT<<16 | gc.TINT8,
+ gc.OGT<<16 | gc.TINT16,
+ gc.OGT<<16 | gc.TINT32,
+ gc.OGT<<16 | gc.TINT64,
+ gc.OGT<<16 | gc.TUINT8,
+ gc.OGT<<16 | gc.TUINT16,
+ gc.OGT<<16 | gc.TUINT32,
+ gc.OGT<<16 | gc.TUINT64,
+ gc.OGT<<16 | gc.TFLOAT32,
+ gc.OGT<<16 | gc.TFLOAT64:
+ a = ppc64.ABGT
+
+ case gc.OGE<<16 | gc.TINT8,
+ gc.OGE<<16 | gc.TINT16,
+ gc.OGE<<16 | gc.TINT32,
+ gc.OGE<<16 | gc.TINT64,
+ gc.OGE<<16 | gc.TUINT8,
+ gc.OGE<<16 | gc.TUINT16,
+ gc.OGE<<16 | gc.TUINT32,
+ gc.OGE<<16 | gc.TUINT64:
+ // No OGE for floats, because it mishandles NaN.
+ // Front end must reverse comparison or use OLT and OEQ together.
+ a = ppc64.ABGE
+
+ case gc.OCMP<<16 | gc.TBOOL,
+ gc.OCMP<<16 | gc.TINT8,
+ gc.OCMP<<16 | gc.TINT16,
+ gc.OCMP<<16 | gc.TINT32,
+ gc.OCMP<<16 | gc.TPTR32,
+ gc.OCMP<<16 | gc.TINT64:
+ a = ppc64.ACMP
+
+ case gc.OCMP<<16 | gc.TUINT8,
+ gc.OCMP<<16 | gc.TUINT16,
+ gc.OCMP<<16 | gc.TUINT32,
+ gc.OCMP<<16 | gc.TUINT64,
+ gc.OCMP<<16 | gc.TPTR64:
+ a = ppc64.ACMPU
+
+ case gc.OCMP<<16 | gc.TFLOAT32,
+ gc.OCMP<<16 | gc.TFLOAT64:
+ a = ppc64.AFCMPU
+
+ case gc.OAS<<16 | gc.TBOOL,
+ gc.OAS<<16 | gc.TINT8:
+ a = ppc64.AMOVB
+
+ case gc.OAS<<16 | gc.TUINT8:
+ a = ppc64.AMOVBZ
+
+ case gc.OAS<<16 | gc.TINT16:
+ a = ppc64.AMOVH
+
+ case gc.OAS<<16 | gc.TUINT16:
+ a = ppc64.AMOVHZ
+
+ case gc.OAS<<16 | gc.TINT32:
+ a = ppc64.AMOVW
+
+ case gc.OAS<<16 | gc.TUINT32,
+ gc.OAS<<16 | gc.TPTR32:
+ a = ppc64.AMOVWZ
+
+ case gc.OAS<<16 | gc.TINT64,
+ gc.OAS<<16 | gc.TUINT64,
+ gc.OAS<<16 | gc.TPTR64:
+ a = ppc64.AMOVD
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = ppc64.AFMOVS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = ppc64.AFMOVD
+
+ case gc.OADD<<16 | gc.TINT8,
+ gc.OADD<<16 | gc.TUINT8,
+ gc.OADD<<16 | gc.TINT16,
+ gc.OADD<<16 | gc.TUINT16,
+ gc.OADD<<16 | gc.TINT32,
+ gc.OADD<<16 | gc.TUINT32,
+ gc.OADD<<16 | gc.TPTR32,
+ gc.OADD<<16 | gc.TINT64,
+ gc.OADD<<16 | gc.TUINT64,
+ gc.OADD<<16 | gc.TPTR64:
+ a = ppc64.AADD
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = ppc64.AFADDS
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = ppc64.AFADD
+
+ case gc.OSUB<<16 | gc.TINT8,
+ gc.OSUB<<16 | gc.TUINT8,
+ gc.OSUB<<16 | gc.TINT16,
+ gc.OSUB<<16 | gc.TUINT16,
+ gc.OSUB<<16 | gc.TINT32,
+ gc.OSUB<<16 | gc.TUINT32,
+ gc.OSUB<<16 | gc.TPTR32,
+ gc.OSUB<<16 | gc.TINT64,
+ gc.OSUB<<16 | gc.TUINT64,
+ gc.OSUB<<16 | gc.TPTR64:
+ a = ppc64.ASUB
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = ppc64.AFSUBS
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = ppc64.AFSUB
+
+ case gc.OMINUS<<16 | gc.TINT8,
+ gc.OMINUS<<16 | gc.TUINT8,
+ gc.OMINUS<<16 | gc.TINT16,
+ gc.OMINUS<<16 | gc.TUINT16,
+ gc.OMINUS<<16 | gc.TINT32,
+ gc.OMINUS<<16 | gc.TUINT32,
+ gc.OMINUS<<16 | gc.TPTR32,
+ gc.OMINUS<<16 | gc.TINT64,
+ gc.OMINUS<<16 | gc.TUINT64,
+ gc.OMINUS<<16 | gc.TPTR64:
+ a = ppc64.ANEG
+
+ case gc.OAND<<16 | gc.TINT8,
+ gc.OAND<<16 | gc.TUINT8,
+ gc.OAND<<16 | gc.TINT16,
+ gc.OAND<<16 | gc.TUINT16,
+ gc.OAND<<16 | gc.TINT32,
+ gc.OAND<<16 | gc.TUINT32,
+ gc.OAND<<16 | gc.TPTR32,
+ gc.OAND<<16 | gc.TINT64,
+ gc.OAND<<16 | gc.TUINT64,
+ gc.OAND<<16 | gc.TPTR64:
+ a = ppc64.AAND
+
+ case gc.OOR<<16 | gc.TINT8,
+ gc.OOR<<16 | gc.TUINT8,
+ gc.OOR<<16 | gc.TINT16,
+ gc.OOR<<16 | gc.TUINT16,
+ gc.OOR<<16 | gc.TINT32,
+ gc.OOR<<16 | gc.TUINT32,
+ gc.OOR<<16 | gc.TPTR32,
+ gc.OOR<<16 | gc.TINT64,
+ gc.OOR<<16 | gc.TUINT64,
+ gc.OOR<<16 | gc.TPTR64:
+ a = ppc64.AOR
+
+ case gc.OXOR<<16 | gc.TINT8,
+ gc.OXOR<<16 | gc.TUINT8,
+ gc.OXOR<<16 | gc.TINT16,
+ gc.OXOR<<16 | gc.TUINT16,
+ gc.OXOR<<16 | gc.TINT32,
+ gc.OXOR<<16 | gc.TUINT32,
+ gc.OXOR<<16 | gc.TPTR32,
+ gc.OXOR<<16 | gc.TINT64,
+ gc.OXOR<<16 | gc.TUINT64,
+ gc.OXOR<<16 | gc.TPTR64:
+ a = ppc64.AXOR
+
+ // TODO(minux): handle rotates
+ //case CASE(OLROT, TINT8):
+ //case CASE(OLROT, TUINT8):
+ //case CASE(OLROT, TINT16):
+ //case CASE(OLROT, TUINT16):
+ //case CASE(OLROT, TINT32):
+ //case CASE(OLROT, TUINT32):
+ //case CASE(OLROT, TPTR32):
+ //case CASE(OLROT, TINT64):
+ //case CASE(OLROT, TUINT64):
+ //case CASE(OLROT, TPTR64):
+ // a = 0//???; RLDC?
+ // break;
+
+ case gc.OLSH<<16 | gc.TINT8,
+ gc.OLSH<<16 | gc.TUINT8,
+ gc.OLSH<<16 | gc.TINT16,
+ gc.OLSH<<16 | gc.TUINT16,
+ gc.OLSH<<16 | gc.TINT32,
+ gc.OLSH<<16 | gc.TUINT32,
+ gc.OLSH<<16 | gc.TPTR32,
+ gc.OLSH<<16 | gc.TINT64,
+ gc.OLSH<<16 | gc.TUINT64,
+ gc.OLSH<<16 | gc.TPTR64:
+ a = ppc64.ASLD
+
+ case gc.ORSH<<16 | gc.TUINT8,
+ gc.ORSH<<16 | gc.TUINT16,
+ gc.ORSH<<16 | gc.TUINT32,
+ gc.ORSH<<16 | gc.TPTR32,
+ gc.ORSH<<16 | gc.TUINT64,
+ gc.ORSH<<16 | gc.TPTR64:
+ a = ppc64.ASRD
+
+ case gc.ORSH<<16 | gc.TINT8,
+ gc.ORSH<<16 | gc.TINT16,
+ gc.ORSH<<16 | gc.TINT32,
+ gc.ORSH<<16 | gc.TINT64:
+ a = ppc64.ASRAD
+
+ // TODO(minux): handle rotates
+ //case CASE(ORROTC, TINT8):
+ //case CASE(ORROTC, TUINT8):
+ //case CASE(ORROTC, TINT16):
+ //case CASE(ORROTC, TUINT16):
+ //case CASE(ORROTC, TINT32):
+ //case CASE(ORROTC, TUINT32):
+ //case CASE(ORROTC, TINT64):
+ //case CASE(ORROTC, TUINT64):
+ // a = 0//??? RLDC??
+ // break;
+
+ case gc.OHMUL<<16 | gc.TINT64:
+ a = ppc64.AMULHD
+
+ case gc.OHMUL<<16 | gc.TUINT64,
+ gc.OHMUL<<16 | gc.TPTR64:
+ a = ppc64.AMULHDU
+
+ case gc.OMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TINT64:
+ a = ppc64.AMULLD
+
+ case gc.OMUL<<16 | gc.TUINT8,
+ gc.OMUL<<16 | gc.TUINT16,
+ gc.OMUL<<16 | gc.TUINT32,
+ gc.OMUL<<16 | gc.TPTR32,
+ // don't use word multiply, the high 32-bit are undefined.
+ gc.OMUL<<16 | gc.TUINT64,
+ gc.OMUL<<16 | gc.TPTR64:
+ // for 64-bit multiplies, signedness doesn't matter.
+ a = ppc64.AMULLD
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = ppc64.AFMULS
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = ppc64.AFMUL
+
+ case gc.ODIV<<16 | gc.TINT8,
+ gc.ODIV<<16 | gc.TINT16,
+ gc.ODIV<<16 | gc.TINT32,
+ gc.ODIV<<16 | gc.TINT64:
+ a = ppc64.ADIVD
+
+ case gc.ODIV<<16 | gc.TUINT8,
+ gc.ODIV<<16 | gc.TUINT16,
+ gc.ODIV<<16 | gc.TUINT32,
+ gc.ODIV<<16 | gc.TPTR32,
+ gc.ODIV<<16 | gc.TUINT64,
+ gc.ODIV<<16 | gc.TPTR64:
+ a = ppc64.ADIVDU
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = ppc64.AFDIVS
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = ppc64.AFDIV
+ }
+
+ return a
+}
+
+const (
+ ODynam = 1 << 0
+ OAddable = 1 << 1
+)
+
+func xgen(n *gc.Node, a *gc.Node, o int) bool {
+ // TODO(minux)
+
+ return -1 != 0 /*TypeKind(100016)*/
+}
+
+func sudoclean() {
+ return
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+ // TODO(minux)
+
+ *a = obj.Addr{}
+ return false
+}
diff --git a/src/cmd/compile/internal/mips64/opt.go b/src/cmd/compile/internal/mips64/opt.go
new file mode 100644
index 0000000..1704f63
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/opt.go
@@ -0,0 +1,12 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+// Many Power ISA arithmetic and logical instructions come in four
+// standard variants. These bits let us map between variants.
+const (
+ V_CC = 1 << 0 // xCC (affect CR field 0 flags)
+ V_V = 1 << 1 // xV (affect SO and OV flags)
+)
diff --git a/src/cmd/compile/internal/mips64/peep.go b/src/cmd/compile/internal/mips64/peep.go
new file mode 100644
index 0000000..9c3f1ed
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/peep.go
@@ -0,0 +1,1051 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+
+var gactive uint32
+
+func peep(firstp *obj.Prog) {
+ g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ if g == nil {
+ return
+ }
+ gactive = 0
+
+ var p *obj.Prog
+ var r *gc.Flow
+ var t int
+loop1:
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("loop1", g.Start, 0)
+ }
+
+ t = 0
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+
+ // TODO(austin) Handle smaller moves. arm and amd64
+ // distinguish between moves that moves that *must*
+ // sign/zero extend and moves that don't care so they
+ // can eliminate moves that don't care without
+ // breaking moves that do care. This might let us
+ // simplify or remove the next peep loop, too.
+ if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
+ if regtyp(&p.To) {
+ // Try to eliminate reg->reg moves
+ if regtyp(&p.From) {
+ if p.From.Type == p.To.Type {
+ if copyprop(r) {
+ excise(r)
+ t++
+ } else if subprop(r) && copyprop(r) {
+ excise(r)
+ t++
+ }
+ }
+ }
+
+ // Convert uses to $0 to uses of R0 and
+ // propagate R0
+ if regzer(&p.From) != 0 {
+ if p.To.Type == obj.TYPE_REG {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGZERO
+ if copyprop(r) {
+ excise(r)
+ t++
+ } else if subprop(r) && copyprop(r) {
+ excise(r)
+ t++
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if t != 0 {
+ goto loop1
+ }
+
+ /*
+ * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
+ */
+ var p1 *obj.Prog
+ var r1 *gc.Flow
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ default:
+ continue
+
+ case ppc64.AMOVH,
+ ppc64.AMOVHZ,
+ ppc64.AMOVB,
+ ppc64.AMOVBZ,
+ ppc64.AMOVW,
+ ppc64.AMOVWZ:
+ if p.To.Type != obj.TYPE_REG {
+ continue
+ }
+ }
+
+ r1 = r.Link
+ if r1 == nil {
+ continue
+ }
+ p1 = r1.Prog
+ if p1.As != p.As {
+ continue
+ }
+ if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
+ continue
+ }
+ if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
+ continue
+ }
+ excise(r1)
+ }
+
+ if gc.Debug['D'] > 1 {
+ goto ret /* allow following code improvement to be suppressed */
+ }
+
+ /*
+ * look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
+ * when OP can set condition codes correctly
+ */
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case ppc64.ACMP,
+ ppc64.ACMPW: /* always safe? */
+ if regzer(&p.To) == 0 {
+ continue
+ }
+ r1 = r.S1
+ if r1 == nil {
+ continue
+ }
+ switch r1.Prog.As {
+ default:
+ continue
+
+ /* the conditions can be complex and these are currently little used */
+ case ppc64.ABCL,
+ ppc64.ABC:
+ continue
+
+ case ppc64.ABEQ,
+ ppc64.ABGE,
+ ppc64.ABGT,
+ ppc64.ABLE,
+ ppc64.ABLT,
+ ppc64.ABNE,
+ ppc64.ABVC,
+ ppc64.ABVS:
+ break
+ }
+
+ r1 = r
+ for {
+ r1 = gc.Uniqp(r1)
+ if r1 == nil || r1.Prog.As != obj.ANOP {
+ break
+ }
+ }
+
+ if r1 == nil {
+ continue
+ }
+ p1 = r1.Prog
+ if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.From.Reg {
+ continue
+ }
+ switch p1.As {
+ /* irregular instructions */
+ case ppc64.ASUB,
+ ppc64.AADD,
+ ppc64.AXOR,
+ ppc64.AOR:
+ if p1.From.Type == obj.TYPE_CONST || p1.From.Type == obj.TYPE_ADDR {
+ continue
+ }
+ }
+
+ switch p1.As {
+ default:
+ continue
+
+ case ppc64.AMOVW,
+ ppc64.AMOVD:
+ if p1.From.Type != obj.TYPE_REG {
+ continue
+ }
+ continue
+
+ case ppc64.AANDCC,
+ ppc64.AANDNCC,
+ ppc64.AORCC,
+ ppc64.AORNCC,
+ ppc64.AXORCC,
+ ppc64.ASUBCC,
+ ppc64.ASUBECC,
+ ppc64.ASUBMECC,
+ ppc64.ASUBZECC,
+ ppc64.AADDCC,
+ ppc64.AADDCCC,
+ ppc64.AADDECC,
+ ppc64.AADDMECC,
+ ppc64.AADDZECC,
+ ppc64.ARLWMICC,
+ ppc64.ARLWNMCC,
+ /* don't deal with floating point instructions for now */
+ /*
+ case AFABS:
+ case AFADD:
+ case AFADDS:
+ case AFCTIW:
+ case AFCTIWZ:
+ case AFDIV:
+ case AFDIVS:
+ case AFMADD:
+ case AFMADDS:
+ case AFMOVD:
+ case AFMSUB:
+ case AFMSUBS:
+ case AFMUL:
+ case AFMULS:
+ case AFNABS:
+ case AFNEG:
+ case AFNMADD:
+ case AFNMADDS:
+ case AFNMSUB:
+ case AFNMSUBS:
+ case AFRSP:
+ case AFSUB:
+ case AFSUBS:
+ case ACNTLZW:
+ case AMTFSB0:
+ case AMTFSB1:
+ */
+ ppc64.AADD,
+ ppc64.AADDV,
+ ppc64.AADDC,
+ ppc64.AADDCV,
+ ppc64.AADDME,
+ ppc64.AADDMEV,
+ ppc64.AADDE,
+ ppc64.AADDEV,
+ ppc64.AADDZE,
+ ppc64.AADDZEV,
+ ppc64.AAND,
+ ppc64.AANDN,
+ ppc64.ADIVW,
+ ppc64.ADIVWV,
+ ppc64.ADIVWU,
+ ppc64.ADIVWUV,
+ ppc64.ADIVD,
+ ppc64.ADIVDV,
+ ppc64.ADIVDU,
+ ppc64.ADIVDUV,
+ ppc64.AEQV,
+ ppc64.AEXTSB,
+ ppc64.AEXTSH,
+ ppc64.AEXTSW,
+ ppc64.AMULHW,
+ ppc64.AMULHWU,
+ ppc64.AMULLW,
+ ppc64.AMULLWV,
+ ppc64.AMULHD,
+ ppc64.AMULHDU,
+ ppc64.AMULLD,
+ ppc64.AMULLDV,
+ ppc64.ANAND,
+ ppc64.ANEG,
+ ppc64.ANEGV,
+ ppc64.ANOR,
+ ppc64.AOR,
+ ppc64.AORN,
+ ppc64.AREM,
+ ppc64.AREMV,
+ ppc64.AREMU,
+ ppc64.AREMUV,
+ ppc64.AREMD,
+ ppc64.AREMDV,
+ ppc64.AREMDU,
+ ppc64.AREMDUV,
+ ppc64.ARLWMI,
+ ppc64.ARLWNM,
+ ppc64.ASLW,
+ ppc64.ASRAW,
+ ppc64.ASRW,
+ ppc64.ASLD,
+ ppc64.ASRAD,
+ ppc64.ASRD,
+ ppc64.ASUB,
+ ppc64.ASUBV,
+ ppc64.ASUBC,
+ ppc64.ASUBCV,
+ ppc64.ASUBME,
+ ppc64.ASUBMEV,
+ ppc64.ASUBE,
+ ppc64.ASUBEV,
+ ppc64.ASUBZE,
+ ppc64.ASUBZEV,
+ ppc64.AXOR:
+ t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC)
+ }
+
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("cmp %v; %v -> ", p1, p)
+ }
+ p1.As = int16(t)
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("%v\n", p1)
+ }
+ excise(r)
+ continue
+ }
+ }
+
+ret:
+ gc.Flowend(g)
+}
+
+func excise(r *gc.Flow) {
+ p := (*obj.Prog)(r.Prog)
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("%v ===delete===\n", p)
+ }
+ obj.Nopout(p)
+ gc.Ostats.Ndelmov++
+}
+
+/*
+ * regzer returns 1 if a's value is 0 (a is R0 or $0)
+ */
+func regzer(a *obj.Addr) int {
+ if a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_ADDR {
+ if a.Sym == nil && a.Reg == 0 {
+ if a.Offset == 0 {
+ return 1
+ }
+ }
+ }
+ if a.Type == obj.TYPE_REG {
+ if a.Reg == ppc64.REGZERO {
+ return 1
+ }
+ }
+ return 0
+}
+
+func regtyp(a *obj.Addr) bool {
+ // TODO(rsc): Floating point register exclusions?
+ return a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ * MOV a, R1
+ * ADD b, R1 / no use of R2
+ * MOV R1, R2
+ * would be converted to
+ * MOV a, R2
+ * ADD b, R2
+ * MOV R2, R1
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ *
+ * r0 (the argument, not the register) is the MOV at the end of the
+ * above sequences. This returns 1 if it modified any instructions.
+ */
+func subprop(r0 *gc.Flow) bool {
+ p := (*obj.Prog)(r0.Prog)
+ v1 := (*obj.Addr)(&p.From)
+ if !regtyp(v1) {
+ return false
+ }
+ v2 := (*obj.Addr)(&p.To)
+ if !regtyp(v2) {
+ return false
+ }
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ if gc.Uniqs(r) == nil {
+ break
+ }
+ p = r.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ if p.Info.Flags&gc.Call != 0 {
+ return false
+ }
+
+ if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
+ if p.To.Type == v1.Type {
+ if p.To.Reg == v1.Reg {
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub1(p, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t := int(int(v1.Reg))
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
+ }
+ }
+ }
+
+ if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
+ break
+ }
+ if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+ break
+ }
+ }
+
+ return false
+}
+
+/*
+ * The idea is to remove redundant copies.
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * use v2 return fail (v1->v2 move must remain)
+ * -----------------
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * set v2 return success (caller can remove v1->v2 move)
+ */
+func copyprop(r0 *gc.Flow) bool {
+ p := (*obj.Prog)(r0.Prog)
+ v1 := (*obj.Addr)(&p.From)
+ v2 := (*obj.Addr)(&p.To)
+ if copyas(v1, v2) {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("eliminating self-move: %v\n", r0.Prog)
+ }
+ return true
+ }
+
+ gactive++
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
+ }
+ return copy1(v1, v2, r0.S1, 0)
+}
+
+// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
+// all uses were rewritten.
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+ if uint32(r.Active) == gactive {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("act set; return 1\n")
+ }
+ return true
+ }
+
+ r.Active = int32(gactive)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
+ }
+ var t int
+ var p *obj.Prog
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if f == 0 && gc.Uniqp(r) == nil {
+ // Multiple predecessors; conservatively
+ // assume v1 was set on other path
+ f = 1
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; f=%d", f)
+ }
+ }
+
+ t = copyu(p, v2, nil)
+ switch t {
+ case 2: /* rar, can't split */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return false
+
+ case 3: /* set */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+
+ case 1, /* used, substitute */
+ 4: /* use and set */
+ if f != 0 {
+ if gc.Debug['P'] == 0 {
+ return false
+ }
+ if t == 4 {
+ fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ } else {
+ fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ }
+ return false
+ }
+
+ if copyu(p, v2, v1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub fail; return 0\n")
+ }
+ return false
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
+ }
+ if t == 4 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+ }
+ }
+
+ if f == 0 {
+ t = copyu(p, v1, nil)
+ if f == 0 && (t == 2 || t == 3 || t == 4) {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+ }
+ }
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ if !copy1(v1, v2, r.S2, f) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// If s==nil, copyu returns the set/use of v in p; otherwise, it
+// modifies p to replace reads of v with reads of s and returns 0 for
+// success or non-zero for failure.
+//
+// If s==nil, copy returns one of the following values:
+// 1 if v only used
+// 2 if v is set and used in one address (read-alter-rewrite;
+// can't substitute)
+// 3 if v is only set
+// 4 if v is set in one address and used in another (so addresses
+// can be rewritten independently)
+// 0 otherwise (not touched)
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+ if p.From3Type() != obj.TYPE_NONE {
+ // 9g never generates a from3
+ fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
+ }
+
+ switch p.As {
+ default:
+ fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+ return 2
+
+ case obj.ANOP, /* read p->from, write p->to */
+ ppc64.AMOVH,
+ ppc64.AMOVHZ,
+ ppc64.AMOVB,
+ ppc64.AMOVBZ,
+ ppc64.AMOVW,
+ ppc64.AMOVWZ,
+ ppc64.AMOVD,
+ ppc64.ANEG,
+ ppc64.ANEGCC,
+ ppc64.AADDME,
+ ppc64.AADDMECC,
+ ppc64.AADDZE,
+ ppc64.AADDZECC,
+ ppc64.ASUBME,
+ ppc64.ASUBMECC,
+ ppc64.ASUBZE,
+ ppc64.ASUBZECC,
+ ppc64.AFCTIW,
+ ppc64.AFCTIWZ,
+ ppc64.AFCTID,
+ ppc64.AFCTIDZ,
+ ppc64.AFCFID,
+ ppc64.AFCFIDCC,
+ ppc64.AFMOVS,
+ ppc64.AFMOVD,
+ ppc64.AFRSP,
+ ppc64.AFNEG,
+ ppc64.AFNEGCC:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+
+ // Update only indirect uses of v in p->to
+ if !copyas(&p.To, v) {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) {
+ // Fix up implicit from
+ if p.From.Type == obj.TYPE_NONE {
+ p.From = p.To
+ }
+ if copyau(&p.From, v) {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ // p->to only indirectly uses v
+ return 1
+ }
+
+ return 0
+
+ case ppc64.AMOVBU, /* rar p->from, write p->to or read p->from, rar p->to */
+ ppc64.AMOVBZU,
+ ppc64.AMOVHU,
+ ppc64.AMOVHZU,
+ ppc64.AMOVWZU,
+ ppc64.AMOVDU:
+ if p.From.Type == obj.TYPE_MEM {
+ if copyas(&p.From, v) {
+ // No s!=nil check; need to fail
+ // anyway in that case
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) {
+ return 3
+ }
+ } else if p.To.Type == obj.TYPE_MEM {
+ if copyas(&p.To, v) {
+ return 2
+ }
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ } else {
+ fmt.Printf("copyu: bad %v\n", p)
+ }
+
+ return 0
+
+ case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
+ ppc64.ARLWMICC:
+ if copyas(&p.To, v) {
+ return 2
+ }
+ fallthrough
+
+ /* fall through */
+ case ppc64.AADD,
+ /* read p->from, read p->reg, write p->to */
+ ppc64.AADDC,
+ ppc64.AADDE,
+ ppc64.ASUB,
+ ppc64.ASLW,
+ ppc64.ASRW,
+ ppc64.ASRAW,
+ ppc64.ASLD,
+ ppc64.ASRD,
+ ppc64.ASRAD,
+ ppc64.AOR,
+ ppc64.AORCC,
+ ppc64.AORN,
+ ppc64.AORNCC,
+ ppc64.AAND,
+ ppc64.AANDCC,
+ ppc64.AANDN,
+ ppc64.AANDNCC,
+ ppc64.ANAND,
+ ppc64.ANANDCC,
+ ppc64.ANOR,
+ ppc64.ANORCC,
+ ppc64.AXOR,
+ ppc64.AMULHW,
+ ppc64.AMULHWU,
+ ppc64.AMULLW,
+ ppc64.AMULLD,
+ ppc64.ADIVW,
+ ppc64.ADIVD,
+ ppc64.ADIVWU,
+ ppc64.ADIVDU,
+ ppc64.AREM,
+ ppc64.AREMU,
+ ppc64.AREMD,
+ ppc64.AREMDU,
+ ppc64.ARLWNM,
+ ppc64.ARLWNMCC,
+ ppc64.AFADDS,
+ ppc64.AFADD,
+ ppc64.AFSUBS,
+ ppc64.AFSUB,
+ ppc64.AFMULS,
+ ppc64.AFMUL,
+ ppc64.AFDIVS,
+ ppc64.AFDIV:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ if copysub1(p, v, s, 1) != 0 {
+ return 1
+ }
+
+ // Update only indirect uses of v in p->to
+ if !copyas(&p.To, v) {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) {
+ if p.Reg == 0 {
+ // Fix up implicit reg (e.g., ADD
+ // R3,R4 -> ADD R3,R4,R4) so we can
+ // update reg and to separately.
+ p.Reg = p.To.Reg
+ }
+
+ if copyau(&p.From, v) {
+ return 4
+ }
+ if copyau1(p, v) {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau1(p, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case ppc64.ABEQ,
+ ppc64.ABGT,
+ ppc64.ABGE,
+ ppc64.ABLT,
+ ppc64.ABLE,
+ ppc64.ABNE,
+ ppc64.ABVC,
+ ppc64.ABVS:
+ return 0
+
+ case obj.ACHECKNIL, /* read p->from */
+ ppc64.ACMP, /* read p->from, read p->to */
+ ppc64.ACMPU,
+ ppc64.ACMPW,
+ ppc64.ACMPWU,
+ ppc64.AFCMPO,
+ ppc64.AFCMPU:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return copysub(&p.To, v, s, 1)
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ // 9g never generates a branch to a GPR (this isn't
+ // even a normal instruction; liblink turns it in to a
+ // mov and a branch).
+ case ppc64.ABR: /* read p->to */
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case obj.ARET: /* funny */
+ if s != nil {
+ return 0
+ }
+
+ // All registers die at this point, so claim
+ // everything is set (and not used).
+ return 3
+
+ case ppc64.ABL: /* funny */
+ if v.Type == obj.TYPE_REG {
+ // TODO(rsc): REG_R0 and REG_F0 used to be
+ // (when register numbers started at 0) exregoffset and exfregoffset,
+ // which are unset entirely.
+ // It's strange that this handles R0 and F0 differently from the other
+ // registers. Possible failure to optimize?
+ if ppc64.REG_R0 < v.Reg && v.Reg <= ppc64.REGEXT {
+ return 2
+ }
+ if v.Reg == ppc64.REGARG {
+ return 2
+ }
+ if ppc64.REG_F0 < v.Reg && v.Reg <= ppc64.FREGEXT {
+ return 2
+ }
+ }
+
+ if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 4
+ }
+ return 3
+
+ // R0 is zero, used by DUFFZERO, cannot be substituted.
+ // R3 is ptr to memory, used and set, cannot be substituted.
+ case obj.ADUFFZERO:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == 0 {
+ return 1
+ }
+ if v.Reg == 3 {
+ return 2
+ }
+ }
+
+ return 0
+
+ // R3, R4 are ptr to src, dst, used and set, cannot be substituted.
+ // R5 is scratch, set by DUFFCOPY, cannot be substituted.
+ case obj.ADUFFCOPY:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == 3 || v.Reg == 4 {
+ return 2
+ }
+ if v.Reg == 5 {
+ return 3
+ }
+ }
+
+ return 0
+
+ case obj.ATEXT: /* funny */
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == ppc64.REGARG {
+ return 3
+ }
+ }
+ return 0
+
+ case obj.APCDATA,
+ obj.AFUNCDATA,
+ obj.AVARDEF,
+ obj.AVARKILL:
+ return 0
+ }
+}
+
+// copyas returns 1 if a and v address the same register.
+//
+// If a is the from operand, this means this operation reads the
+// register in v. If a is the to operand, this means this operation
+// writes the register in v.
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+ if regtyp(v) {
+ if a.Type == v.Type {
+ if a.Reg == v.Reg {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// copyau returns 1 if a either directly or indirectly addresses the
+// same register as v.
+//
+// If a is the from operand, this means this operation reads the
+// register in v. If a is the to operand, this means the operation
+// either reads or writes the register in v (if !copyas(a, v), then
+// the operation reads the register in v).
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+ if copyas(a, v) {
+ return true
+ }
+ if v.Type == obj.TYPE_REG {
+ if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
+ if v.Reg == a.Reg {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// copyau1 returns 1 if p->reg references the same register as v and v
+// is a direct reference.
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
+ if regtyp(v) && v.Reg != 0 {
+ if p.Reg == v.Reg {
+ return true
+ }
+ }
+ return false
+}
+
+// copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on ppc64).
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+ if f != 0 {
+ if copyau(a, v) {
+ a.Reg = s.Reg
+ }
+ }
+ return 0
+}
+
+// copysub1 replaces v with s in p1->reg if f!=0 or indicates if it could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on ppc64).
+func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
+ if f != 0 {
+ if copyau1(p1, v) {
+ p1.Reg = s.Reg
+ }
+ }
+ return 0
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+ if a.Type != v.Type {
+ return false
+ }
+ if regtyp(v) && a.Reg == v.Reg {
+ return true
+ }
+ if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
+ if v.Offset == a.Offset {
+ return true
+ }
+ }
+ return false
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+ return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
+}
+
+func stackaddr(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP
+}
diff --git a/src/cmd/compile/internal/mips64/prog.go b/src/cmd/compile/internal/mips64/prog.go
new file mode 100644
index 0000000..9b8719b
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/prog.go
@@ -0,0 +1,314 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+
+const (
+ LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
+ RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [ppc64.ALAST]obj.ProgInfo{
+ obj.ATYPE: {Flags: gc.Pseudo | gc.Skip},
+ obj.ATEXT: {Flags: gc.Pseudo},
+ obj.AFUNCDATA: {Flags: gc.Pseudo},
+ obj.APCDATA: {Flags: gc.Pseudo},
+ obj.AUNDEF: {Flags: gc.Break},
+ obj.AUSEFIELD: {Flags: gc.OK},
+ obj.ACHECKNIL: {Flags: gc.LeftRead},
+ obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
+ obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
+
+ // NOP is an internal no-op that also stands
+ // for USED and SET annotations, not the Power opcode.
+ obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
+
+ // Integer
+ ppc64.AADD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ASUB: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ANEG: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AAND: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AXOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AMULLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AMULLW: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AMULHD: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AMULHDU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ADIVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ADIVDU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ASLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ASRD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ASRAD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ACMP: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
+ ppc64.ACMPU: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
+ ppc64.ATD: {Flags: gc.SizeQ | gc.RightRead},
+
+ // Floating point.
+ ppc64.AFADD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFADDS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFSUB: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFSUBS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFMUL: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFMULS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFDIV: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFDIVS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFCTIDZ: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFCFID: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFCMPU: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
+ ppc64.AFRSP: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+
+ // Moves
+ ppc64.AMOVB: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ ppc64.AMOVBU: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
+ ppc64.AMOVBZ: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ ppc64.AMOVH: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ ppc64.AMOVHU: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
+ ppc64.AMOVHZ: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ ppc64.AMOVW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+
+ // there is no AMOVWU.
+ ppc64.AMOVWZU: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
+ ppc64.AMOVWZ: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ ppc64.AMOVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
+ ppc64.AMOVDU: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc},
+ ppc64.AFMOVS: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ ppc64.AFMOVD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
+
+ // Jumps
+ ppc64.ABR: {Flags: gc.Jump | gc.Break},
+ ppc64.ABL: {Flags: gc.Call},
+ ppc64.ABEQ: {Flags: gc.Cjmp},
+ ppc64.ABNE: {Flags: gc.Cjmp},
+ ppc64.ABGE: {Flags: gc.Cjmp},
+ ppc64.ABLT: {Flags: gc.Cjmp},
+ ppc64.ABGT: {Flags: gc.Cjmp},
+ ppc64.ABLE: {Flags: gc.Cjmp},
+ obj.ARET: {Flags: gc.Break},
+ obj.ADUFFZERO: {Flags: gc.Call},
+ obj.ADUFFCOPY: {Flags: gc.Call},
+}
+
+var initproginfo_initialized int
+
+func initproginfo() {
+ var addvariant = []int{V_CC, V_V, V_CC | V_V}
+
+ if initproginfo_initialized != 0 {
+ return
+ }
+ initproginfo_initialized = 1
+
+ // Perform one-time expansion of instructions in progtable to
+ // their CC, V, and VCC variants
+ var as2 int
+ var i int
+ var variant int
+ for as := int(0); as < len(progtable); as++ {
+ if progtable[as].Flags == 0 {
+ continue
+ }
+ variant = as2variant(as)
+ for i = 0; i < len(addvariant); i++ {
+ as2 = variant2as(as, variant|addvariant[i])
+ if as2 != 0 && progtable[as2].Flags == 0 {
+ progtable[as2] = progtable[as]
+ }
+ }
+ }
+}
+
+func proginfo(p *obj.Prog) {
+ initproginfo()
+
+ info := &p.Info
+ *info = progtable[p.As]
+ if info.Flags == 0 {
+ gc.Fatalf("proginfo: unknown instruction %v", p)
+ }
+
+ if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
+ info.Flags &^= gc.RegRead
+ info.Flags |= gc.RightRead /*CanRegRead |*/
+ }
+
+ if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
+ info.Regindex |= RtoB(int(p.From.Reg))
+ if info.Flags&gc.PostInc != 0 {
+ info.Regset |= RtoB(int(p.From.Reg))
+ }
+ }
+
+ if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
+ info.Regindex |= RtoB(int(p.To.Reg))
+ if info.Flags&gc.PostInc != 0 {
+ info.Regset |= RtoB(int(p.To.Reg))
+ }
+ }
+
+ if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
+ info.Flags &^= gc.LeftRead
+ info.Flags |= gc.LeftAddr
+ }
+
+ if p.As == obj.ADUFFZERO {
+ info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3)
+ info.Regset |= RtoB(ppc64.REG_R3)
+ }
+
+ if p.As == obj.ADUFFCOPY {
+ // TODO(austin) Revisit when duffcopy is implemented
+ info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5)
+
+ info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4)
+ }
+}
+
+// Instruction variants table. Initially this contains entries only
+// for the "base" form of each instruction. On the first call to
+// as2variant or variant2as, we'll add the variants to the table.
+var varianttable = [ppc64.ALAST][4]int{
+ ppc64.AADD: [4]int{ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
+ ppc64.AADDC: [4]int{ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
+ ppc64.AADDE: [4]int{ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
+ ppc64.AADDME: [4]int{ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
+ ppc64.AADDZE: [4]int{ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
+ ppc64.AAND: [4]int{ppc64.AAND, ppc64.AANDCC, 0, 0},
+ ppc64.AANDN: [4]int{ppc64.AANDN, ppc64.AANDNCC, 0, 0},
+ ppc64.ACNTLZD: [4]int{ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
+ ppc64.ACNTLZW: [4]int{ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
+ ppc64.ADIVD: [4]int{ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
+ ppc64.ADIVDU: [4]int{ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
+ ppc64.ADIVW: [4]int{ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
+ ppc64.ADIVWU: [4]int{ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
+ ppc64.AEQV: [4]int{ppc64.AEQV, ppc64.AEQVCC, 0, 0},
+ ppc64.AEXTSB: [4]int{ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
+ ppc64.AEXTSH: [4]int{ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
+ ppc64.AEXTSW: [4]int{ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
+ ppc64.AFABS: [4]int{ppc64.AFABS, ppc64.AFABSCC, 0, 0},
+ ppc64.AFADD: [4]int{ppc64.AFADD, ppc64.AFADDCC, 0, 0},
+ ppc64.AFADDS: [4]int{ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
+ ppc64.AFCFID: [4]int{ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
+ ppc64.AFCTID: [4]int{ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
+ ppc64.AFCTIDZ: [4]int{ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
+ ppc64.AFCTIW: [4]int{ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
+ ppc64.AFCTIWZ: [4]int{ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
+ ppc64.AFDIV: [4]int{ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
+ ppc64.AFDIVS: [4]int{ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
+ ppc64.AFMADD: [4]int{ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
+ ppc64.AFMADDS: [4]int{ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
+ ppc64.AFMOVD: [4]int{ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
+ ppc64.AFMSUB: [4]int{ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
+ ppc64.AFMSUBS: [4]int{ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
+ ppc64.AFMUL: [4]int{ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
+ ppc64.AFMULS: [4]int{ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
+ ppc64.AFNABS: [4]int{ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
+ ppc64.AFNEG: [4]int{ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
+ ppc64.AFNMADD: [4]int{ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
+ ppc64.AFNMADDS: [4]int{ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
+ ppc64.AFNMSUB: [4]int{ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
+ ppc64.AFNMSUBS: [4]int{ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
+ ppc64.AFRES: [4]int{ppc64.AFRES, ppc64.AFRESCC, 0, 0},
+ ppc64.AFRSP: [4]int{ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
+ ppc64.AFRSQRTE: [4]int{ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
+ ppc64.AFSEL: [4]int{ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
+ ppc64.AFSQRT: [4]int{ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
+ ppc64.AFSQRTS: [4]int{ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
+ ppc64.AFSUB: [4]int{ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
+ ppc64.AFSUBS: [4]int{ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
+ ppc64.AMTFSB0: [4]int{ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
+ ppc64.AMTFSB1: [4]int{ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
+ ppc64.AMULHD: [4]int{ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
+ ppc64.AMULHDU: [4]int{ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
+ ppc64.AMULHW: [4]int{ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
+ ppc64.AMULHWU: [4]int{ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
+ ppc64.AMULLD: [4]int{ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
+ ppc64.AMULLW: [4]int{ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
+ ppc64.ANAND: [4]int{ppc64.ANAND, ppc64.ANANDCC, 0, 0},
+ ppc64.ANEG: [4]int{ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
+ ppc64.ANOR: [4]int{ppc64.ANOR, ppc64.ANORCC, 0, 0},
+ ppc64.AOR: [4]int{ppc64.AOR, ppc64.AORCC, 0, 0},
+ ppc64.AORN: [4]int{ppc64.AORN, ppc64.AORNCC, 0, 0},
+ ppc64.AREM: [4]int{ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
+ ppc64.AREMD: [4]int{ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
+ ppc64.AREMDU: [4]int{ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
+ ppc64.AREMU: [4]int{ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
+ ppc64.ARLDC: [4]int{ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
+ ppc64.ARLDCL: [4]int{ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
+ ppc64.ARLDCR: [4]int{ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
+ ppc64.ARLDMI: [4]int{ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
+ ppc64.ARLWMI: [4]int{ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
+ ppc64.ARLWNM: [4]int{ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
+ ppc64.ASLD: [4]int{ppc64.ASLD, ppc64.ASLDCC, 0, 0},
+ ppc64.ASLW: [4]int{ppc64.ASLW, ppc64.ASLWCC, 0, 0},
+ ppc64.ASRAD: [4]int{ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
+ ppc64.ASRAW: [4]int{ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
+ ppc64.ASRD: [4]int{ppc64.ASRD, ppc64.ASRDCC, 0, 0},
+ ppc64.ASRW: [4]int{ppc64.ASRW, ppc64.ASRWCC, 0, 0},
+ ppc64.ASUB: [4]int{ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
+ ppc64.ASUBC: [4]int{ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
+ ppc64.ASUBE: [4]int{ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
+ ppc64.ASUBME: [4]int{ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
+ ppc64.ASUBZE: [4]int{ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
+ ppc64.AXOR: [4]int{ppc64.AXOR, ppc64.AXORCC, 0, 0},
+}
+
+var initvariants_initialized int
+
+func initvariants() {
+ if initvariants_initialized != 0 {
+ return
+ }
+ initvariants_initialized = 1
+
+ var j int
+ for i := int(0); i < len(varianttable); i++ {
+ if varianttable[i][0] == 0 {
+ // Instruction has no variants
+ varianttable[i][0] = i
+
+ continue
+ }
+
+ // Copy base form to other variants
+ if varianttable[i][0] == i {
+ for j = 0; j < len(varianttable[i]); j++ {
+ varianttable[varianttable[i][j]] = varianttable[i]
+ }
+ }
+ }
+}
+
+// as2variant returns the variant (V_*) flags of instruction as.
+func as2variant(as int) int {
+ initvariants()
+ for i := int(0); i < len(varianttable[as]); i++ {
+ if varianttable[as][i] == as {
+ return i
+ }
+ }
+ gc.Fatalf("as2variant: instruction %v is not a variant of itself", obj.Aconv(as))
+ return 0
+}
+
+// variant2as returns the instruction as with the given variant (V_*) flags.
+// If no such variant exists, this returns 0.
+func variant2as(as int, flags int) int {
+ initvariants()
+ return varianttable[as][flags]
+}
diff --git a/src/cmd/compile/internal/mips64/reg.go b/src/cmd/compile/internal/mips64/reg.go
new file mode 100644
index 0000000..fa1cb71
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/reg.go
@@ -0,0 +1,162 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import "cmd/internal/obj/ppc64"
+import "cmd/compile/internal/gc"
+
+const (
+ NREGVAR = 64 /* 32 general + 32 floating */
+)
+
+var regname = []string{
+ ".R0",
+ ".R1",
+ ".R2",
+ ".R3",
+ ".R4",
+ ".R5",
+ ".R6",
+ ".R7",
+ ".R8",
+ ".R9",
+ ".R10",
+ ".R11",
+ ".R12",
+ ".R13",
+ ".R14",
+ ".R15",
+ ".R16",
+ ".R17",
+ ".R18",
+ ".R19",
+ ".R20",
+ ".R21",
+ ".R22",
+ ".R23",
+ ".R24",
+ ".R25",
+ ".R26",
+ ".R27",
+ ".R28",
+ ".R29",
+ ".R30",
+ ".R31",
+ ".F0",
+ ".F1",
+ ".F2",
+ ".F3",
+ ".F4",
+ ".F5",
+ ".F6",
+ ".F7",
+ ".F8",
+ ".F9",
+ ".F10",
+ ".F11",
+ ".F12",
+ ".F13",
+ ".F14",
+ ".F15",
+ ".F16",
+ ".F17",
+ ".F18",
+ ".F19",
+ ".F20",
+ ".F21",
+ ".F22",
+ ".F23",
+ ".F24",
+ ".F25",
+ ".F26",
+ ".F27",
+ ".F28",
+ ".F29",
+ ".F30",
+ ".F31",
+}
+
+func regnames(n *int) []string {
+ *n = NREGVAR
+ return regname
+}
+
+func excludedregs() uint64 {
+ // Exclude registers with fixed functions
+ regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS))
+
+ // Also exclude floating point registers with fixed constants
+ regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
+
+ return regbits
+}
+
+func doregbits(r int) uint64 {
+ return 0
+}
+
+/*
+ * track register variables including external registers:
+ * bit reg
+ * 0 R0
+ * 1 R1
+ * ... ...
+ * 31 R31
+ * 32+0 F0
+ * 32+1 F1
+ * ... ...
+ * 32+31 F31
+ */
+func RtoB(r int) uint64 {
+ if r > ppc64.REG_R0 && r <= ppc64.REG_R31 {
+ return 1 << uint(r-ppc64.REG_R0)
+ }
+ if r >= ppc64.REG_F0 && r <= ppc64.REG_F31 {
+ return 1 << uint(32+r-ppc64.REG_F0)
+ }
+ return 0
+}
+
+func BtoR(b uint64) int {
+ b &= 0xffffffff
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + ppc64.REG_R0
+}
+
+func BtoF(b uint64) int {
+ b >>= 32
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + ppc64.REG_F0
+}
diff --git a/src/cmd/compile/internal/ppc64/cgen.go b/src/cmd/compile/internal/ppc64/cgen.go
index 37dd6ce..740e64c 100644
--- a/src/cmd/compile/internal/ppc64/cgen.go
+++ b/src/cmd/compile/internal/ppc64/cgen.go
@@ -20,7 +20,7 @@
var op int
switch align {
default:
- gc.Fatal("sgen: invalid alignment %d for %v", align, n.Type)
+ gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
case 1:
op = ppc64.AMOVBU
@@ -36,7 +36,7 @@
}
if w%int64(align) != 0 {
- gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
+ gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
}
c := int32(w / int64(align))
@@ -126,13 +126,7 @@
// ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
- for {
- tmp14 := c
- c--
- if tmp14 <= 0 {
- break
- }
-
+ for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
index 73aef6f..16509da 100644
--- a/src/cmd/compile/internal/ppc64/galign.go
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -34,9 +34,9 @@
* int, uint, and uintptr
*/
var typedefs = []gc.Typedef{
- gc.Typedef{"int", gc.TINT, gc.TINT64},
- gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
- gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+ {"int", gc.TINT, gc.TINT64},
+ {"uint", gc.TUINT, gc.TUINT64},
+ {"uintptr", gc.TUINTPTR, gc.TUINT64},
}
func betypeinit() {
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
index 5b282eb..173e2f0 100644
--- a/src/cmd/compile/internal/ppc64/ggen.go
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -36,10 +36,10 @@
continue
}
if n.Class != gc.PAUTO {
- gc.Fatal("needzero class %d", n.Class)
+ gc.Fatalf("needzero class %d", n.Class)
}
if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
- gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
}
if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
@@ -141,13 +141,13 @@
t := nl.Type
t0 := t
- check := 0
+ check := false
if gc.Issigned[t.Etype] {
- check = 1
+ check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
- check = 0
+ check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
- check = 0
+ check = false
}
}
@@ -157,7 +157,7 @@
} else {
t = gc.Types[gc.TUINT64]
}
- check = 0
+ check = false
}
a := optoas(gc.ODIV, t)
@@ -198,7 +198,7 @@
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
- if check != 0 {
+ if check {
var nm1 gc.Node
gc.Nodconst(&nm1, t, -1)
gins(optoas(gc.OCMP, t), &tr, &nm1)
@@ -242,7 +242,7 @@
}
gc.Regfree(&tl)
- if check != 0 {
+ if check {
gc.Patch(p2, gc.Pc)
}
}
@@ -254,9 +254,7 @@
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp := (*gc.Node)(nl)
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
t := (*gc.Type)(nl.Type)
@@ -291,7 +289,7 @@
}
default:
- gc.Fatal("cgen_hmul %v", t)
+ gc.Fatalf("cgen_hmul %v", t)
}
gc.Cgen(&n1, res)
@@ -411,7 +409,7 @@
q := uint64(w / 8) // dwords
if gc.Reginuse(ppc64.REGRT1) {
- gc.Fatal("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
+ gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
}
var r0 gc.Node
@@ -499,7 +497,7 @@
gc.Warnl(int(p.Lineno), "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
- gc.Fatal("invalid nil check %v\n", p)
+ gc.Fatalf("invalid nil check %v\n", p)
}
/*
diff --git a/src/cmd/compile/internal/ppc64/gsubr.go b/src/cmd/compile/internal/ppc64/gsubr.go
index 2501972..4ef928c 100644
--- a/src/cmd/compile/internal/ppc64/gsubr.go
+++ b/src/cmd/compile/internal/ppc64/gsubr.go
@@ -93,7 +93,7 @@
switch as {
default:
- gc.Fatal("ginscon2")
+ gc.Fatalf("ginscon2")
case ppc64.ACMP:
if -ppc64.BIG <= c && c <= ppc64.BIG {
@@ -261,7 +261,7 @@
switch uint32(ft)<<16 | uint32(tt) {
default:
- gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+ gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
/*
* integer copy and truncate
@@ -614,12 +614,12 @@
case ppc64.AAND, ppc64.AMULLD:
if p.From.Type == obj.TYPE_CONST {
gc.Debug['h'] = 1
- gc.Fatal("bad inst: %v", p)
+ gc.Fatalf("bad inst: %v", p)
}
case ppc64.ACMP, ppc64.ACMPU:
if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
gc.Debug['h'] = 1
- gc.Fatal("bad inst: %v", p)
+ gc.Fatalf("bad inst: %v", p)
}
}
@@ -658,7 +658,7 @@
if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
gc.Dump("f", f)
gc.Dump("t", t)
- gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+ gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
}
return p
@@ -669,13 +669,13 @@
*/
func optoas(op int, t *gc.Type) int {
if t == nil {
- gc.Fatal("optoas: t is nil")
+ gc.Fatalf("optoas: t is nil")
}
a := int(obj.AXXX)
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
case gc.OEQ<<16 | gc.TBOOL,
gc.OEQ<<16 | gc.TINT8,
diff --git a/src/cmd/compile/internal/ppc64/prog.go b/src/cmd/compile/internal/ppc64/prog.go
index 406f235..9b8719b 100644
--- a/src/cmd/compile/internal/ppc64/prog.go
+++ b/src/cmd/compile/internal/ppc64/prog.go
@@ -139,7 +139,7 @@
info := &p.Info
*info = progtable[p.As]
if info.Flags == 0 {
- gc.Fatal("proginfo: unknown instruction %v", p)
+ gc.Fatalf("proginfo: unknown instruction %v", p)
}
if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
@@ -302,7 +302,7 @@
return i
}
}
- gc.Fatal("as2variant: instruction %v is not a variant of itself", obj.Aconv(as))
+ gc.Fatalf("as2variant: instruction %v is not a variant of itself", obj.Aconv(as))
return 0
}
diff --git a/src/cmd/compile/internal/x86/cgen64.go b/src/cmd/compile/internal/x86/cgen64.go
index 0b061ff..f1e570d 100644
--- a/src/cmd/compile/internal/x86/cgen64.go
+++ b/src/cmd/compile/internal/x86/cgen64.go
@@ -19,12 +19,12 @@
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
- gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
+ gc.Fatalf("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
}
switch n.Op {
default:
- gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
+ gc.Fatalf("cgen64 %v", gc.Oconv(int(n.Op), 0))
case gc.OMINUS:
gc.Cgen(n.Left, res)
@@ -531,7 +531,7 @@
var br *obj.Prog
switch op {
default:
- gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("cmp64 %v %v", gc.Oconv(int(op), 0), t)
// cmp hi
// jne L
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
index 2b602e1..2535e3e 100644
--- a/src/cmd/compile/internal/x86/galign.go
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -28,9 +28,9 @@
* int, uint, and uintptr
*/
var typedefs = []gc.Typedef{
- gc.Typedef{"int", gc.TINT, gc.TINT32},
- gc.Typedef{"uint", gc.TUINT, gc.TUINT32},
- gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT32},
+ {"int", gc.TINT, gc.TINT32},
+ {"uint", gc.TUINT, gc.TUINT32},
+ {"uintptr", gc.TUINTPTR, gc.TUINT32},
}
func betypeinit() {
diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go
index ae9881d..85ae808 100644
--- a/src/cmd/compile/internal/x86/ggen.go
+++ b/src/cmd/compile/internal/x86/ggen.go
@@ -34,10 +34,10 @@
continue
}
if n.Class != gc.PAUTO {
- gc.Fatal("needzero class %d", n.Class)
+ gc.Fatalf("needzero class %d", n.Class)
}
if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
- gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
}
if lo != hi && n.Xoffset+n.Type.Width == lo-int64(2*gc.Widthptr) {
// merge with range we already have
@@ -133,24 +133,14 @@
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
- for {
- tmp14 := q
- q--
- if tmp14 <= 0 {
- break
- }
+ for ; q > 0; q-- {
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
- for {
- tmp15 := c
- c--
- if tmp15 <= 0 {
- break
- }
+ for ; c > 0; c-- {
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
@@ -213,13 +203,13 @@
t := nl.Type
t0 := t
- check := 0
+ check := false
if gc.Issigned[t.Etype] {
- check = 1
+ check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -1<<uint64(t.Width*8-1) {
- check = 0
+ check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
- check = 0
+ check = false
}
}
@@ -229,7 +219,7 @@
} else {
t = gc.Types[gc.TUINT32]
}
- check = 0
+ check = false
}
var t1 gc.Node
@@ -278,7 +268,7 @@
gc.Patch(p1, gc.Pc)
}
- if check != 0 {
+ if check {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n1, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
@@ -313,7 +303,7 @@
} else {
gmove(dx, res)
}
- if check != 0 {
+ if check {
gc.Patch(p2, gc.Pc)
}
}
@@ -350,7 +340,7 @@
*/
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if gc.Is64(nl.Type) {
- gc.Fatal("cgen_div %v", nl.Type)
+ gc.Fatalf("cgen_div %v", nl.Type)
}
var t *gc.Type
@@ -377,7 +367,7 @@
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
- gc.Fatal("cgen_shift %v", nl.Type)
+ gc.Fatalf("cgen_shift %v", nl.Type)
}
w := int(nl.Type.Width * 8)
@@ -513,9 +503,7 @@
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp := nl
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
var nt gc.Node
@@ -677,7 +665,7 @@
switch n.Op {
default:
gc.Dump("cgen_floatsse", n)
- gc.Fatal("cgen_floatsse %v", gc.Oconv(int(n.Op), 0))
+ gc.Fatalf("cgen_floatsse %v", gc.Oconv(int(n.Op), 0))
return
case gc.OMINUS,
@@ -705,9 +693,7 @@
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
- r := nl
- nl = nr
- nr = r
+ nl, nr = nr, nl
}
abop: // asymmetric binary
diff --git a/src/cmd/compile/internal/x86/gsubr.go b/src/cmd/compile/internal/x86/gsubr.go
index 7593d04..f57bbcb 100644
--- a/src/cmd/compile/internal/x86/gsubr.go
+++ b/src/cmd/compile/internal/x86/gsubr.go
@@ -55,13 +55,13 @@
*/
func optoas(op int, t *gc.Type) int {
if t == nil {
- gc.Fatal("optoas: t is nil")
+ gc.Fatalf("optoas: t is nil")
}
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
case gc.OADDR<<16 | gc.TPTR32:
a = x86.ALEAL
@@ -413,7 +413,7 @@
if !gc.Thearch.Use387 {
switch uint32(op)<<16 | uint32(et) {
default:
- gc.Fatal("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), t)
case gc.OCMP<<16 | gc.TFLOAT32:
a = x86.AUCOMISS
@@ -546,7 +546,7 @@
return x86.AFCHS
}
- gc.Fatal("foptoas %v %v %#x", gc.Oconv(int(op), 0), t, flg)
+ gc.Fatalf("foptoas %v %v %#x", gc.Oconv(int(op), 0), t, flg)
return 0
}
@@ -655,11 +655,11 @@
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
if !gc.Is64(n.Type) {
- gc.Fatal("split64 %v", n.Type)
+ gc.Fatalf("split64 %v", n.Type)
}
if nsclean >= len(sclean) {
- gc.Fatal("split64 clean")
+ gc.Fatalf("split64 clean")
}
sclean[nsclean].Op = gc.OEMPTY
nsclean++
@@ -714,7 +714,7 @@
func splitclean() {
if nsclean <= 0 {
- gc.Fatal("splitclean")
+ gc.Fatalf("splitclean")
}
nsclean--
if sclean[nsclean].Op != gc.OEMPTY {
@@ -805,7 +805,7 @@
switch uint32(ft)<<16 | uint32(tt) {
default:
// should not happen
- gc.Fatal("gmove %v -> %v", f, t)
+ gc.Fatalf("gmove %v -> %v", f, t)
return
/*
@@ -1372,7 +1372,7 @@
gmove(f, &t1)
switch tt {
default:
- gc.Fatal("gmove %v", t)
+ gc.Fatalf("gmove %v", t)
case gc.TINT8:
gins(x86.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
@@ -1483,7 +1483,7 @@
}
if gc.Ismem(t) {
if f.Op != gc.OREGISTER || f.Reg != x86.REG_F0 {
- gc.Fatal("gmove %v", f)
+ gc.Fatalf("gmove %v", f)
}
a = x86.AFMOVFP
if ft == gc.TFLOAT64 {
@@ -1551,7 +1551,7 @@
// should not happen
fatal:
- gc.Fatal("gmove %v -> %v", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+ gc.Fatalf("gmove %v -> %v", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
return
}
@@ -1567,7 +1567,7 @@
switch uint32(ft)<<16 | uint32(tt) {
// should not happen
default:
- gc.Fatal("gmove %v -> %v", f, t)
+ gc.Fatalf("gmove %v -> %v", f, t)
return
@@ -1703,13 +1703,13 @@
*/
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
- gc.Fatal("gins MOVF reg, reg")
+ gc.Fatalf("gins MOVF reg, reg")
}
if as == x86.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL {
- gc.Fatal("gins CVTSD2SS const")
+ gc.Fatalf("gins CVTSD2SS const")
}
if as == x86.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Reg == x86.REG_F0 {
- gc.Fatal("gins MOVSD into F0")
+ gc.Fatalf("gins MOVSD into F0")
}
if as == x86.AMOVL && f != nil && f.Op == gc.OADDR && f.Left.Op == gc.ONAME && f.Left.Class != gc.PEXTERN && f.Left.Class != gc.PFUNC {
@@ -1731,7 +1731,7 @@
case x86.ALEAL:
if f != nil && gc.Isconst(f, gc.CTNIL) {
- gc.Fatal("gins LEAL nil %v", f.Type)
+ gc.Fatalf("gins LEAL nil %v", f.Type)
}
}
@@ -1758,11 +1758,11 @@
if true && w != 0 && f != nil && (p.From.Width > int64(w) || p.To.Width > int64(w)) {
gc.Dump("bad width from:", f)
gc.Dump("bad width to:", t)
- gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+ gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
}
if p.To.Type == obj.TYPE_ADDR && w > 0 {
- gc.Fatal("bad use of addr: %v", p)
+ gc.Fatalf("bad use of addr: %v", p)
}
return p
diff --git a/src/cmd/compile/internal/x86/peep.go b/src/cmd/compile/internal/x86/peep.go
index 8b50eab..63e64cb 100644
--- a/src/cmd/compile/internal/x86/peep.go
+++ b/src/cmd/compile/internal/x86/peep.go
@@ -660,10 +660,10 @@
*/
func copyas(a *obj.Addr, v *obj.Addr) bool {
if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_BL {
- gc.Fatal("use of byte register")
+ gc.Fatalf("use of byte register")
}
if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_BL {
- gc.Fatal("use of byte register")
+ gc.Fatalf("use of byte register")
}
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
diff --git a/src/cmd/compile/internal/x86/prog.go b/src/cmd/compile/internal/x86/prog.go
index ef38ad1..465a21f 100644
--- a/src/cmd/compile/internal/x86/prog.go
+++ b/src/cmd/compile/internal/x86/prog.go
@@ -262,7 +262,7 @@
info := &p.Info
*info = progtable[p.As]
if info.Flags == 0 {
- gc.Fatal("unknown instruction %v", p)
+ gc.Fatalf("unknown instruction %v", p)
}
if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST {