[dev.cc] cmd/5g etc: code cleanup: delay var decls and eliminate dead code
Ran rsc.io/grind rev 6f0e601 on the source files.
The cleanups move var declarations as close to the use
as possible, splitting disjoint uses of the var into separate
variables. They also remove dead code (especially in
func sudoaddable), which helps with the var moving.
There's more cleanup to come, but this alone cuts the
time spent compiling html/template on my 2013 MacBook Pro
from 3.1 seconds to 2.3 seconds.
Change-Id: I4de499f47b1dd47a560c310bbcde6b08d425cfd6
Reviewed-on: https://go-review.googlesource.com/5637
Reviewed-by: Rob Pike <r@golang.org>
diff --git a/src/cmd/5g/cgen.go b/src/cmd/5g/cgen.go
index bdee52a..638c5a6 100644
--- a/src/cmd/5g/cgen.go
+++ b/src/cmd/5g/cgen.go
@@ -20,26 +20,18 @@
* simplifies and calls gmove.
*/
func cgen(n *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var nr *gc.Node
- var r *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var f0 gc.Node
- var f1 gc.Node
- var a int
- var w int
- var rg int
- var p1 *obj.Prog
- var p2 *obj.Prog
- var p3 *obj.Prog
- var addr obj.Addr
-
if gc.Debug['g'] != 0 {
gc.Dump("\ncgen-n", n)
gc.Dump("cgen-res", res)
}
+ var n1 gc.Node
+ var nr *gc.Node
+ var nl *gc.Node
+ var a int
+ var f1 gc.Node
+ var f0 gc.Node
+ var n2 gc.Node
if n == nil || n.Type == nil {
goto ret
}
@@ -55,6 +47,7 @@
gc.OSLICE3,
gc.OSLICE3ARR:
if res.Op != gc.ONAME || res.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
@@ -65,6 +58,7 @@
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
@@ -83,6 +77,7 @@
gc.Fatal("cgen: this is going to misscompile")
}
if res.Ullman >= gc.UINF {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
@@ -122,6 +117,7 @@
if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] != 0 || gc.Iscomplex[res.Type.Etype] != 0 {
gmove(n, res)
} else {
+ var n1 gc.Node
regalloc(&n1, n.Type, nil)
gmove(n, &n1)
cgen(&n1, res)
@@ -135,6 +131,7 @@
if n.Addable == 0 && res.Addable == 0 {
// could use regalloc here sometimes,
// but have to check for ullman >= UINF.
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
@@ -145,6 +142,7 @@
// if result is not addressable directly but n is,
// compute its address and then store via the address.
if res.Addable == 0 {
+ var n1 gc.Node
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
@@ -158,11 +156,14 @@
// if n is sudoaddable generate addr and move
if !gc.Is64(n.Type) && !gc.Is64(res.Type) && gc.Iscomplex[n.Type.Etype] == 0 && gc.Iscomplex[res.Type.Etype] == 0 {
- a = optoas(gc.OAS, n.Type)
+ a := optoas(gc.OAS, n.Type)
+ var w int
+ var addr obj.Addr
if sudoaddable(a, n, &addr, &w) {
if res.Op != gc.OREGISTER {
+ var n2 gc.Node
regalloc(&n2, res.Type, nil)
- p1 = gins(a, nil, &n2)
+ p1 := gins(a, nil, &n2)
p1.From = addr
if gc.Debug['g'] != 0 {
fmt.Printf("%v [ignore previous line]\n", p1)
@@ -170,7 +171,7 @@
gmove(&n2, res)
regfree(&n2)
} else {
- p1 = gins(a, nil, res)
+ p1 := gins(a, nil, res)
p1.From = addr
if gc.Debug['g'] != 0 {
fmt.Printf("%v [ignore previous line]\n", p1)
@@ -191,9 +192,10 @@
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
+ var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
- n2 = *n
+ n2 := *n
n2.Left = &n1
cgen(&n2, res)
goto ret
@@ -244,11 +246,11 @@
gc.OGE,
gc.OGT,
gc.ONOT:
- p1 = gc.Gbranch(arm.AB, nil, 0)
+ p1 := gc.Gbranch(arm.AB, nil, 0)
- p2 = gc.Pc
+ p2 := gc.Pc
gmove(gc.Nodbool(true), res)
- p3 = gc.Gbranch(arm.AB, nil, 0)
+ p3 := gc.Gbranch(arm.AB, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
@@ -261,7 +263,7 @@
// unary
case gc.OCOM:
- a = optoas(gc.OXOR, nl.Type)
+ a := optoas(gc.OXOR, nl.Type)
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
@@ -306,6 +308,7 @@
break
}
+ var n1 gc.Node
if nl.Addable != 0 && !gc.Is64(nl.Type) {
regalloc(&n1, nl.Type, res)
gmove(nl, &n1)
@@ -318,6 +321,7 @@
cgen(nl, &n1)
}
+ var n2 gc.Node
if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) || gc.Isfloat[n.Type.Etype] != 0 {
gc.Tempname(&n2, n.Type)
} else {
@@ -337,6 +341,7 @@
gc.OINDEX,
gc.OIND,
gc.ONAME: // PHEAP or PPARAMREF var
+ var n1 gc.Node
igen(n, &n1, res)
gmove(&n1, res)
@@ -344,6 +349,7 @@
// interface table is first word of interface value
case gc.OITAB:
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = n.Type
@@ -353,14 +359,16 @@
// pointer is the first word of string or slice.
case gc.OSPTR:
if gc.Isconst(nl, gc.CTSTR) {
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
- p1 = gins(arm.AMOVW, nil, &n1)
+ p1 := gins(arm.AMOVW, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
gmove(&n1, res)
regfree(&n1)
break
}
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = n.Type
gmove(&n1, res)
@@ -370,13 +378,15 @@
if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map has len in the first 32-bit word.
// a zero pointer means zero length
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
cgen(nl, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+ p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
n2 = n1
n2.Op = gc.OINDREG
@@ -392,6 +402,7 @@
if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = gc.Types[gc.TUINT32]
@@ -407,13 +418,15 @@
if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second 32-bit word.
// a zero pointer means zero length
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
cgen(nl, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+ p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
n2 = n1
n2.Op = gc.OINDREG
@@ -429,6 +442,7 @@
}
if gc.Isslice(nl.Type) {
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = gc.Types[gc.TUINT32]
n1.Xoffset += int64(gc.Array_cap)
@@ -446,7 +460,7 @@
// Pick it up again after the call.
case gc.OCALLMETH,
gc.OCALLFUNC:
- rg = -1
+ rg := -1
if n.Ullman >= gc.UINF {
if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
@@ -479,7 +493,7 @@
sbop: // symmetric binary
if nl.Ullman < nr.Ullman {
- r = nl
+ r := nl
nl = nr
nr = r
}
@@ -601,20 +615,16 @@
* returns Prog* to patch to panic call.
*/
func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
- var tmp gc.Node
- var lo gc.Node
- var hi gc.Node
- var zero gc.Node
- var n1 gc.Node
- var n2 gc.Node
-
if !gc.Is64(n.Type) {
cgen(n, res)
return nil
}
+ var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
cgen(n, &tmp)
+ var lo gc.Node
+ var hi gc.Node
split64(&tmp, &lo, &hi)
gmove(&lo, res)
if bounded {
@@ -622,8 +632,11 @@
return nil
}
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.TINT32], nil)
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.TINT32], nil)
+ var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gmove(&hi, &n1)
gmove(&zero, &n2)
@@ -640,12 +653,6 @@
* The generated code checks that the result is not nil.
*/
func agen(n *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var r int
-
if gc.Debug['g'] != 0 {
gc.Dump("\nagen-res", res)
gc.Dump("agen-r", n)
@@ -659,15 +666,18 @@
n = n.Left
}
+ var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
// be terribly efficient. See issue 3670.
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Gvardef(&n1)
clearfat(&n1)
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], res)
gins(arm.AMOVW, &n1, &n2)
gmove(&n2, res)
@@ -676,9 +686,10 @@
}
if n.Addable != 0 {
- n1 = gc.Node{}
+ n1 := gc.Node{}
n1.Op = gc.OADDR
n1.Left = n
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], res)
gins(arm.AMOVW, &n1, &n2)
gmove(&n2, res)
@@ -696,7 +707,7 @@
// Pick it up again after the call.
case gc.OCALLMETH,
gc.OCALLFUNC:
- r = -1
+ r := -1
if n.Ullman >= gc.UINF {
if res.Op == gc.OREGISTER || res.Op == gc.OINDREG {
@@ -724,16 +735,19 @@
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
agen(&n1, res)
case gc.OEFACE:
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
agen(&n1, res)
case gc.OINDEX:
+ var n1 gc.Node
agenr(n, &n1, res)
gmove(&n1, res)
regfree(&n1)
@@ -753,8 +767,11 @@
cgen(n.Heapaddr, res)
if n.Xoffset != 0 {
+ var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+ var n2 gc.Node
regalloc(&n2, n1.Type, nil)
+ var n3 gc.Node
regalloc(&n3, gc.Types[gc.TINT32], nil)
gmove(&n1, &n2)
gmove(res, &n3)
@@ -771,8 +788,11 @@
case gc.ODOT:
agen(nl, res)
if n.Xoffset != 0 {
+ var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+ var n2 gc.Node
regalloc(&n2, n1.Type, nil)
+ var n3 gc.Node
regalloc(&n3, gc.Types[gc.TINT32], nil)
gmove(&n1, &n2)
gmove(res, &n3)
@@ -786,8 +806,11 @@
cgen(nl, res)
gc.Cgen_checknil(res)
if n.Xoffset != 0 {
+ var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+ var n2 gc.Node
regalloc(&n2, n1.Type, nil)
+ var n3 gc.Node
regalloc(&n3, gc.Types[gc.Tptr], nil)
gmove(&n1, &n2)
gmove(res, &n3)
@@ -811,9 +834,6 @@
* The generated code checks that the result is not *nil.
*/
func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var r int
-
if gc.Debug['g'] != 0 {
gc.Dump("\nigen-n", n)
}
@@ -844,6 +864,7 @@
case gc.ODOTPTR:
if n.Left.Addable != 0 || n.Left.Op == gc.OCALLFUNC || n.Left.Op == gc.OCALLMETH || n.Left.Op == gc.OCALLINTER {
// igen-able nodes.
+ var n1 gc.Node
igen(n.Left, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
@@ -865,7 +886,7 @@
case gc.OCALLMETH,
gc.OCALLFUNC,
gc.OCALLINTER:
- r = -1
+ r := -1
if n.Ullman >= gc.UINF {
if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
@@ -906,8 +927,6 @@
* The caller must call regfree(a).
*/
func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- var n1 gc.Node
-
if gc.Debug['g'] != 0 {
gc.Dump("cgenr-n", n)
}
@@ -930,6 +949,7 @@
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
+ var n1 gc.Node
igen(n, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
gmove(&n1, a)
@@ -949,25 +969,12 @@
* The generated code checks that the result is not nil.
*/
func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var nr *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var n4 gc.Node
- var tmp gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
- var w uint32
- var v uint64
- var bounded bool
-
if gc.Debug['g'] != 0 {
gc.Dump("agenr-n", n)
}
- nl = n.Left
- nr = n.Right
+ nl := n.Left
+ nr := n.Right
switch n.Op {
case gc.ODOT,
@@ -975,6 +982,7 @@
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
+ var n1 gc.Node
igen(n, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
agen(&n1, a)
@@ -985,10 +993,13 @@
gc.Cgen_checknil(a)
case gc.OINDEX:
- p2 = nil // to be patched to panicindex.
- w = uint32(n.Type.Width)
- bounded = gc.Debug['B'] != 0 || n.Bounded
+ p2 := (*obj.Prog)(nil) // to be patched to panicindex.
+ w := uint32(n.Type.Width)
+ bounded := gc.Debug['B'] != 0 || n.Bounded
+ var n1 gc.Node
+ var n3 gc.Node
if nr.Addable != 0 {
+ var tmp gc.Node
if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT32])
}
@@ -1002,6 +1013,7 @@
}
} else if nl.Addable != 0 {
if !gc.Isconst(nr, gc.CTINT) {
+ var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT32])
p2 = cgenindex(nr, &tmp, bounded)
regalloc(&n1, tmp.Type, nil)
@@ -1012,6 +1024,7 @@
agenr(nl, &n3, res)
}
} else {
+ var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT32])
p2 = cgenindex(nr, &tmp, bounded)
nr = &tmp
@@ -1031,19 +1044,21 @@
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index")
}
- v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ var n2 gc.Node
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
n1.Xoffset = int64(gc.Array_nel)
+ var n4 gc.Node
regalloc(&n4, n1.Type, nil)
gmove(&n1, &n4)
gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n2)
regfree(&n4)
- p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
ginscall(gc.Panicindex, 0)
gc.Patch(p1, gc.Pc)
}
@@ -1061,10 +1076,12 @@
break
}
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.TINT32], &n1) // i
gmove(&n1, &n2)
regfree(&n1)
+ var n4 gc.Node
if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
if gc.Isconst(nl, gc.CTSTR) {
@@ -1084,7 +1101,7 @@
if n4.Op == gc.OREGISTER {
regfree(&n4)
}
- p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
if p2 != nil {
gc.Patch(p2, gc.Pc)
}
@@ -1094,7 +1111,7 @@
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
- p1 = gins(arm.AMOVW, nil, &n3)
+ p1 := gins(arm.AMOVW, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
p1.From.Type = obj.TYPE_ADDR
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
@@ -1141,15 +1158,14 @@
func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) {
var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var a int
regalloc(&n1, t, nil)
cgen(n, &n1)
- a = optoas(gc.OCMP, t)
+ a := optoas(gc.OCMP, t)
if a != arm.ACMP {
+ var n2 gc.Node
gc.Nodconst(&n2, t, 0)
+ var n3 gc.Node
regalloc(&n3, t, nil)
gmove(&n2, &n3)
gcmp(a, &n1, &n3)
@@ -1167,19 +1183,6 @@
* if(n == true) goto to;
*/
func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
- var et int
- var a int
- var nl *gc.Node
- var nr *gc.Node
- var r *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var tmp gc.Node
- var ll *gc.NodeList
- var p1 *obj.Prog
- var p2 *obj.Prog
-
if gc.Debug['g'] != 0 {
gc.Dump("\nbgen", n)
}
@@ -1192,6 +1195,9 @@
gc.Genlist(n.Ninit)
}
+ var et int
+ var nl *gc.Node
+ var nr *gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
@@ -1210,7 +1216,7 @@
switch n.Op {
default:
- a = gc.ONE
+ a := gc.ONE
if !true_ {
a = gc.OEQ
}
@@ -1227,8 +1233,8 @@
case gc.OANDAND,
gc.OOROR:
if (n.Op == gc.OANDAND) == true_ {
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ p1 := gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n.Left, !true_, -likely, p2)
bgen(n.Right, !true_, -likely, p2)
@@ -1273,15 +1279,15 @@
gc.OGT,
gc.OLE,
gc.OGE:
- a = int(n.Op)
+ a := int(n.Op)
if !true_ {
if gc.Isfloat[nl.Type.Etype] != 0 {
// brcom is not valid on floats when NaN is involved.
- p1 = gc.Gbranch(arm.AB, nil, 0)
+ p1 := gc.Gbranch(arm.AB, nil, 0)
- p2 = gc.Gbranch(arm.AB, nil, 0)
+ p2 := gc.Gbranch(arm.AB, nil, 0)
gc.Patch(p1, gc.Pc)
- ll = n.Ninit
+ ll := n.Ninit
n.Ninit = nil
bgen(n, true, -likely, p2)
n.Ninit = ll
@@ -1297,7 +1303,7 @@
// make simplest on right
if nl.Op == gc.OLITERAL || (nl.Ullman < gc.UINF && nl.Ullman < nr.Ullman) {
a = gc.Brrev(a)
- r = nl
+ r := nl
nl = nr
nr = r
}
@@ -1309,6 +1315,7 @@
break
}
+ var n1 gc.Node
igen(nl, &n1, nil)
n1.Xoffset += int64(gc.Array_array)
n1.Type = gc.Types[gc.Tptr]
@@ -1324,6 +1331,7 @@
break
}
+ var n1 gc.Node
igen(nl, &n1, nil)
n1.Type = gc.Types[gc.Tptr]
n1.Xoffset += 0
@@ -1339,12 +1347,14 @@
if gc.Is64(nr.Type) {
if nl.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if nr.Addable == 0 {
+ var n2 gc.Node
gc.Tempname(&n2, nr.Type)
cgen(nr, &n2)
nr = &n2
@@ -1369,13 +1379,16 @@
a = optoas(a, nr.Type)
if nr.Ullman >= gc.UINF {
+ var n1 gc.Node
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
+ var tmp gc.Node
gc.Tempname(&tmp, nl.Type)
gmove(&n1, &tmp)
regfree(&n1)
+ var n2 gc.Node
regalloc(&n2, nr.Type, nil)
cgen(nr, &n2)
@@ -1390,26 +1403,30 @@
break
}
+ var n3 gc.Node
gc.Tempname(&n3, nl.Type)
cgen(nl, &n3)
+ var tmp gc.Node
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
+ var n1 gc.Node
regalloc(&n1, nl.Type, nil)
gmove(&n3, &n1)
+ var n2 gc.Node
regalloc(&n2, nr.Type, nil)
gmove(&tmp, &n2)
gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
if gc.Isfloat[nl.Type.Etype] != 0 {
if n.Op == gc.ONE {
- p1 = gc.Gbranch(arm.ABVS, nr.Type, likely)
+ p1 := gc.Gbranch(arm.ABVS, nr.Type, likely)
gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
gc.Patch(p1, to)
} else {
- p1 = gc.Gbranch(arm.ABVS, nr.Type, -likely)
+ p1 := gc.Gbranch(arm.ABVS, nr.Type, -likely)
gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
gc.Patch(p1, gc.Pc)
}
@@ -1432,31 +1449,27 @@
* return n's offset from SP.
*/
func stkof(n *gc.Node) int32 {
- var t *gc.Type
- var flist gc.Iter
- var off int32
-
switch n.Op {
case gc.OINDREG:
return int32(n.Xoffset)
case gc.ODOT:
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
break
}
- off = stkof(n.Left)
+ off := stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
return int32(int64(off) + n.Xoffset)
case gc.OINDEX:
- t = n.Left.Type
+ t := n.Left.Type
if !gc.Isfixedarray(t) {
break
}
- off = stkof(n.Left)
+ off := stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
@@ -1468,11 +1481,12 @@
case gc.OCALLMETH,
gc.OCALLINTER,
gc.OCALLFUNC:
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
t = t.Type
}
+ var flist gc.Iter
t = gc.Structfirst(&flist, gc.Getoutarg(t))
if t != nil {
return int32(t.Width + 4) // correct for LR
@@ -1490,24 +1504,6 @@
* NB: character copy assumed little endian architecture
*/
func sgen(n *gc.Node, res *gc.Node, w int64) {
- var dst gc.Node
- var src gc.Node
- var tmp gc.Node
- var nend gc.Node
- var r0 gc.Node
- var r1 gc.Node
- var r2 gc.Node
- var f *gc.Node
- var c int32
- var odst int32
- var osrc int32
- var dir int
- var align int
- var op int
- var p *obj.Prog
- var ploop *obj.Prog
- var l *gc.NodeList
-
if gc.Debug['g'] != 0 {
fmt.Printf("\nsgen w=%d\n", w)
gc.Dump("r", n)
@@ -1528,6 +1524,7 @@
if w == 0 {
// evaluate side effects only.
+ var dst gc.Node
regalloc(&dst, gc.Types[gc.Tptr], nil)
agen(res, &dst)
@@ -1539,7 +1536,7 @@
// If copying .args, that's all the results, so record definition sites
// for them for the liveness analysis.
if res.Op == gc.ONAME && res.Sym.Name == ".args" {
- for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ for l := gc.Curfn.Dcl; l != nil; l = l.Next {
if l.N.Class == gc.PPARAMOUT {
gc.Gvardef(l.N)
}
@@ -1555,8 +1552,9 @@
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
- align = int(n.Type.Align)
+ align := int(n.Type.Align)
+ var op int
switch align {
default:
gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
@@ -1574,17 +1572,18 @@
if w%int64(align) != 0 {
gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
}
- c = int32(w / int64(align))
+ c := int32(w / int64(align))
// offset on the stack
- osrc = stkof(n)
+ osrc := stkof(n)
- odst = stkof(res)
+ odst := stkof(res)
if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
// osrc and odst both on stack, and at least one is in
// an unknown position. Could generate code to test
// for forward/backward copy, but instead just copy
// to a temporary location first.
+ var tmp gc.Node
gc.Tempname(&tmp, n.Type)
sgen(n, &tmp, w)
@@ -1598,21 +1597,26 @@
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
- dir = align
+ dir := align
if osrc < odst && int64(odst) < int64(osrc)+w {
dir = -dir
}
if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
+ var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Val.U.Reg = REGALLOC_R0
+ var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Val.U.Reg = REGALLOC_R0 + 1
+ var r2 gc.Node
r2.Op = gc.OREGISTER
r2.Val.U.Reg = REGALLOC_R0 + 2
+ var src gc.Node
regalloc(&src, gc.Types[gc.Tptr], &r1)
+ var dst gc.Node
regalloc(&dst, gc.Types[gc.Tptr], &r2)
if n.Ullman >= res.Ullman {
// eval n first
@@ -1631,9 +1635,10 @@
agen(n, &src)
}
+ var tmp gc.Node
regalloc(&tmp, gc.Types[gc.Tptr], &r0)
- f = gc.Sysfunc("duffcopy")
- p = gins(obj.ADUFFCOPY, nil, f)
+ f := gc.Sysfunc("duffcopy")
+ p := gins(obj.ADUFFCOPY, nil, f)
gc.Afunclit(&p.To, f)
// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
@@ -1645,6 +1650,8 @@
return
}
+ var dst gc.Node
+ var src gc.Node
if n.Ullman >= res.Ullman {
agenr(n, &dst, res) // temporarily use dst
regalloc(&src, gc.Types[gc.Tptr], nil)
@@ -1661,15 +1668,16 @@
agenr(n, &src, nil)
}
+ var tmp gc.Node
regalloc(&tmp, gc.Types[gc.TUINT32], nil)
// set up end marker
- nend = gc.Node{}
+ nend := gc.Node{}
if c >= 4 {
regalloc(&nend, gc.Types[gc.TUINT32], nil)
- p = gins(arm.AMOVW, &src, &nend)
+ p := gins(arm.AMOVW, &src, &nend)
p.From.Type = obj.TYPE_ADDR
if dir < 0 {
p.From.Offset = int64(dir)
@@ -1680,7 +1688,7 @@
// move src and dest to the end of block if necessary
if dir < 0 {
- p = gins(arm.AMOVW, &src, &src)
+ p := gins(arm.AMOVW, &src, &src)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w + int64(dir)
@@ -1691,11 +1699,11 @@
// move
if c >= 4 {
- p = gins(op, &src, &tmp)
+ p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
p.Scond |= arm.C_PBIT
- ploop = p
+ ploop := p
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
@@ -1708,6 +1716,7 @@
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
regfree(&nend)
} else {
+ var p *obj.Prog
for {
tmp14 := c
c--
@@ -1757,23 +1766,16 @@
func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
- var tmp gc.Node
- var t *gc.Type
- var freel int
- var freer int
- var fldcount int64
- var loffset int64
- var roffset int64
- freel = 0
- freer = 0
+ freel := 0
+ freer := 0
switch nl.Type.Etype {
default:
goto no
case gc.TARRAY:
- t = nl.Type
+ t := nl.Type
// Slices are ok.
if gc.Isslice(t) {
@@ -1790,9 +1792,9 @@
// Small structs with non-fat types are ok.
// Zero-sized structs are treated separately elsewhere.
case gc.TSTRUCT:
- fldcount = 0
+ fldcount := int64(0)
- for t = nl.Type.Type; t != nil; t = t.Down {
+ for t := nl.Type.Type; t != nil; t = t.Down {
if gc.Isfat(t.Type) {
goto no
}
@@ -1828,6 +1830,7 @@
}
} else {
// When zeroing, prepare a register containing zero.
+ var tmp gc.Node
gc.Nodconst(&tmp, nl.Type, 0)
regalloc(&nodr, gc.Types[gc.TUINT], nil)
@@ -1849,11 +1852,11 @@
if nl.Op == gc.ONAME {
gc.Gvardef(nl)
}
- t = nl.Type
+ t := nl.Type
if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
- for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ for fldcount := int64(0); fldcount < t.Bound; fldcount++ {
if nr == nil {
gc.Clearslim(&nodl)
} else {
@@ -1956,8 +1959,8 @@
if nl.Op == gc.ONAME {
gc.Gvardef(nl)
}
- loffset = nodl.Xoffset
- roffset = nodr.Xoffset
+ loffset := nodl.Xoffset
+ roffset := nodr.Xoffset
// funarg structs may not begin at offset zero.
if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
@@ -1967,7 +1970,7 @@
roffset -= nr.Type.Type.Width
}
- for t = nl.Type.Type; t != nil; t = t.Down {
+ for t := nl.Type.Type; t != nil; t = t.Down {
nodl.Xoffset = loffset + t.Width
nodl.Type = t.Type
diff --git a/src/cmd/5g/cgen64.go b/src/cmd/5g/cgen64.go
index f89c21c..b9e5b7c 100644
--- a/src/cmd/5g/cgen64.go
+++ b/src/cmd/5g/cgen64.go
@@ -16,61 +16,43 @@
* return 1 on success, 0 if op not handled.
*/
func cgen64(n *gc.Node, res *gc.Node) {
- var t1 gc.Node
- var t2 gc.Node
- var l *gc.Node
- var r *gc.Node
- var lo1 gc.Node
- var lo2 gc.Node
- var hi1 gc.Node
- var hi2 gc.Node
- var al gc.Node
- var ah gc.Node
- var bl gc.Node
- var bh gc.Node
- var cl gc.Node
- var ch gc.Node
- var s gc.Node
- var n1 gc.Node
- var creg gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
- var p3 *obj.Prog
- var p4 *obj.Prog
- var p5 *obj.Prog
- var p6 *obj.Prog
- var v uint64
-
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
}
- l = n.Left
+ l := n.Left
+ var t1 gc.Node
if l.Addable == 0 {
gc.Tempname(&t1, l.Type)
cgen(l, &t1)
l = &t1
}
+ var hi1 gc.Node
+ var lo1 gc.Node
split64(l, &lo1, &hi1)
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
case gc.OMINUS:
+ var lo2 gc.Node
+ var hi2 gc.Node
split64(res, &lo2, &hi2)
regalloc(&t1, lo1.Type, nil)
+ var al gc.Node
regalloc(&al, lo1.Type, nil)
+ var ah gc.Node
regalloc(&ah, hi1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gmove(ncon(0), &t1)
- p1 = gins(arm.ASUB, &al, &t1)
+ p1 := gins(arm.ASUB, &al, &t1)
p1.Scond |= arm.C_SBIT
gins(arm.AMOVW, &t1, &lo2)
@@ -89,7 +71,10 @@
regalloc(&t1, lo1.Type, nil)
gmove(ncon(^uint32(0)), &t1)
+ var lo2 gc.Node
+ var hi2 gc.Node
split64(res, &lo2, &hi2)
+ var n1 gc.Node
regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &n1)
@@ -121,19 +106,24 @@
}
// setup for binary operators
- r = n.Right
+ r := n.Right
if r != nil && r.Addable == 0 {
+ var t2 gc.Node
gc.Tempname(&t2, r.Type)
cgen(r, &t2)
r = &t2
}
+ var hi2 gc.Node
+ var lo2 gc.Node
if gc.Is64(r.Type) {
split64(r, &lo2, &hi2)
}
+ var al gc.Node
regalloc(&al, lo1.Type, nil)
+ var ah gc.Node
regalloc(&ah, hi1.Type, nil)
// Do op. Leave result in ah:al.
@@ -143,14 +133,16 @@
// TODO: Constants
case gc.OADD:
+ var bl gc.Node
regalloc(&bl, gc.Types[gc.TPTR32], nil)
+ var bh gc.Node
regalloc(&bh, gc.Types[gc.TPTR32], nil)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi2, &bh)
gins(arm.AMOVW, &lo2, &bl)
- p1 = gins(arm.AADD, &bl, &al)
+ p1 := gins(arm.AADD, &bl, &al)
p1.Scond |= arm.C_SBIT
gins(arm.AADC, &bh, &ah)
regfree(&bl)
@@ -158,14 +150,16 @@
// TODO: Constants.
case gc.OSUB:
+ var bl gc.Node
regalloc(&bl, gc.Types[gc.TPTR32], nil)
+ var bh gc.Node
regalloc(&bh, gc.Types[gc.TPTR32], nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo2, &bl)
gins(arm.AMOVW, &hi2, &bh)
- p1 = gins(arm.ASUB, &bl, &al)
+ p1 := gins(arm.ASUB, &bl, &al)
p1.Scond |= arm.C_SBIT
gins(arm.ASBC, &bh, &ah)
regfree(&bl)
@@ -173,10 +167,14 @@
// TODO(kaib): this can be done with 4 regs and does not need 6
case gc.OMUL:
+ var bl gc.Node
regalloc(&bl, gc.Types[gc.TPTR32], nil)
+ var bh gc.Node
regalloc(&bh, gc.Types[gc.TPTR32], nil)
+ var cl gc.Node
regalloc(&cl, gc.Types[gc.TPTR32], nil)
+ var ch gc.Node
regalloc(&ch, gc.Types[gc.TPTR32], nil)
// load args into bh:bl and bh:bl.
@@ -187,7 +185,7 @@
gins(arm.AMOVW, &lo2, &cl)
// bl * cl -> ah al
- p1 = gins(arm.AMULLU, nil, nil)
+ p1 := gins(arm.AMULLU, nil, nil)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = bl.Val.U.Reg
@@ -239,9 +237,11 @@
// shld hi:lo, c
// shld lo:t, c
case gc.OLROT:
- v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ v := uint64(gc.Mpgetfix(r.Val.U.Xval))
+ var bl gc.Node
regalloc(&bl, lo1.Type, nil)
+ var bh gc.Node
regalloc(&bh, hi1.Type, nil)
if v >= 32 {
// reverse during load to do the first 32 bits of rotate
@@ -274,13 +274,24 @@
regfree(&bh)
case gc.OLSH:
+ var bl gc.Node
regalloc(&bl, lo1.Type, nil)
+ var bh gc.Node
regalloc(&bh, hi1.Type, nil)
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
+ var p6 *obj.Prog
+ var s gc.Node
+ var n1 gc.Node
+ var creg gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var p4 *obj.Prog
+ var p5 *obj.Prog
if r.Op == gc.OLITERAL {
- v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ v := uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
// TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
// here and below (verify it optimizes to EOR)
@@ -316,6 +327,8 @@
regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) {
// shift is >= 1<<32
+ var cl gc.Node
+ var ch gc.Node
split64(r, &cl, &ch)
gmove(&ch, &s)
@@ -422,13 +435,24 @@
regfree(&bh)
case gc.ORSH:
+ var bl gc.Node
regalloc(&bl, lo1.Type, nil)
+ var bh gc.Node
regalloc(&bh, hi1.Type, nil)
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
+ var p4 *obj.Prog
+ var p5 *obj.Prog
+ var n1 gc.Node
+ var p6 *obj.Prog
+ var s gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var creg gc.Node
+ var p3 *obj.Prog
if r.Op == gc.OLITERAL {
- v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ v := uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
if bh.Type.Etype == gc.TINT32 {
// MOVW bh->31, al
@@ -487,10 +511,13 @@
regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) {
// shift is >= 1<<32
+ var ch gc.Node
+ var cl gc.Node
split64(r, &cl, &ch)
gmove(&ch, &s)
gins(arm.ATST, &s, nil)
+ var p1 *obj.Prog
if bh.Type.Etype == gc.TINT32 {
p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
} else {
@@ -578,12 +605,12 @@
if bh.Type.Etype == gc.TINT32 {
// MOVW bh->(s-32), al
- p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)
+ p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)
p1.Scond = arm.C_SCOND_LO
} else {
// MOVW bh>>(v-32), al
- p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)
+ p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)
p1.Scond = arm.C_SCOND_LO
}
@@ -708,6 +735,7 @@
case gc.OXOR,
gc.OAND,
gc.OOR:
+ var n1 gc.Node
regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
@@ -746,15 +774,13 @@
var hi2 gc.Node
var r1 gc.Node
var r2 gc.Node
- var br *obj.Prog
- var t *gc.Type
split64(nl, &lo1, &hi1)
split64(nr, &lo2, &hi2)
// compare most significant word;
// if they differ, we're done.
- t = hi1.Type
+ t := hi1.Type
regalloc(&r1, gc.Types[gc.TINT32], nil)
regalloc(&r2, gc.Types[gc.TINT32], nil)
@@ -764,7 +790,7 @@
regfree(&r1)
regfree(&r2)
- br = nil
+ br := (*obj.Prog)(nil)
switch op {
default:
gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
diff --git a/src/cmd/5g/ggen.go b/src/cmd/5g/ggen.go
index 3b007d8..8b7010f 100644
--- a/src/cmd/5g/ggen.go
+++ b/src/cmd/5g/ggen.go
@@ -11,30 +11,24 @@
import "cmd/internal/gc"
func defframe(ptxt *obj.Prog) {
- var frame uint32
- var r0 uint32
- var p *obj.Prog
- var hi int64
- var lo int64
- var l *gc.NodeList
var n *gc.Node
// fill in argument size, stack size
ptxt.To.Type = obj.TYPE_TEXTSIZE
ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
- frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
ptxt.To.Offset = int64(frame)
// insert code to contain ambiguously live variables
// so that garbage collector only sees initialized values
// when it looks for pointers.
- p = ptxt
+ p := ptxt
- hi = 0
- lo = hi
- r0 = 0
- for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ hi := int64(0)
+ lo := hi
+ r0 := uint32(0)
+ for l := gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if n.Needzero == 0 {
continue
@@ -66,12 +60,7 @@
}
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
- var cnt int64
- var i int64
- var p1 *obj.Prog
- var f *gc.Node
-
- cnt = hi - lo
+ cnt := hi - lo
if cnt == 0 {
return p
}
@@ -81,14 +70,14 @@
}
if cnt < int64(4*gc.Widthptr) {
- for i = 0; i < cnt; i += int64(gc.Widthptr) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
}
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
- f = gc.Sysfunc("duffzero")
+ f := gc.Sysfunc("duffzero")
gc.Naddr(f, &p.To, 1)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
@@ -98,7 +87,7 @@
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1
p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
- p1 = p
+ p1 := p
p.Scond |= arm.C_PBIT
p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2
@@ -110,9 +99,7 @@
}
func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int, treg int, toffset int32) *obj.Prog {
- var q *obj.Prog
-
- q = gc.Ctxt.NewProg()
+ q := gc.Ctxt.NewProg()
gc.Clearp(q)
q.As = int16(as)
q.Lineno = p.Lineno
@@ -137,14 +124,8 @@
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
- var p *obj.Prog
- var r gc.Node
- var r1 gc.Node
- var con gc.Node
- var extra int32
-
if f.Type != nil {
- extra = 0
+ extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
@@ -168,13 +149,14 @@
// ARM NOP 0x00000000 is really AND.EQ R0, R0, R0.
// Use the latter form because the NOP pseudo-instruction
// would be removed by the linker.
+ var r gc.Node
gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
- p = gins(arm.AAND, &r, &r)
+ p := gins(arm.AAND, &r, &r)
p.Scond = arm.C_SCOND_EQ
}
- p = gins(arm.ABL, nil, f)
+ p := gins(arm.ABL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
@@ -182,7 +164,9 @@
break
}
+ var r gc.Node
gc.Nodreg(&r, gc.Types[gc.Tptr], arm.REG_R7)
+ var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], arm.REG_R1)
gmove(f, &r)
r.Op = gc.OINDREG
@@ -196,11 +180,13 @@
case 1, // call in new proc (go)
2: // deferred call (defer)
+ var r gc.Node
regalloc(&r, gc.Types[gc.Tptr], nil)
+ var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
gins(arm.AMOVW, &con, &r)
- p = gins(arm.AMOVW, &r, nil)
+ p := gins(arm.AMOVW, &r, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REGSP
p.To.Offset = 4
@@ -221,7 +207,7 @@
if proc == 2 {
gc.Nodconst(&con, gc.Types[gc.TINT32], 0)
- p = gins(arm.ACMP, &con, nil)
+ p := gins(arm.ACMP, &con, nil)
p.Reg = arm.REG_R0
p = gc.Gbranch(arm.ABEQ, nil, +1)
cgen_ret(nil)
@@ -235,21 +221,12 @@
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
- var r int
- var i *gc.Node
- var f *gc.Node
- var tmpi gc.Node
- var nodo gc.Node
- var nodr gc.Node
- var nodsp gc.Node
- var p *obj.Prog
-
- i = n.Left
+ i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
- f = i.Right // field
+ f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
@@ -258,7 +235,7 @@
// Release res register during genlist and cgen,
// which might have their own function calls.
- r = -1
+ r := -1
if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
r = int(res.Val.U.Reg)
@@ -266,6 +243,7 @@
}
if i.Addable == 0 {
+ var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
@@ -276,12 +254,15 @@
reg[r]++
}
+ var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], res)
+ var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], &nodr)
nodo.Op = gc.OINDREG
agen(i, &nodr) // REG = &inter
+ var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], arm.REGSP)
nodsp.Xoffset = int64(gc.Widthptr)
@@ -305,7 +286,7 @@
proc = 3
} else {
// go/defer. generate go func value.
- p = gins(arm.AMOVW, &nodo, &nodr)
+ p := gins(arm.AMOVW, &nodo, &nodr)
p.From.Type = obj.TYPE_ADDR // REG = &(20+offset(REG)) -- i.tab->fun[f]
}
@@ -324,14 +305,11 @@
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
- var t *gc.Type
- var nod gc.Node
- var afun gc.Node
-
if n == nil {
return
}
+ var afun gc.Node
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
@@ -341,10 +319,11 @@
}
gc.Genlist(n.List) // assign the args
- t = n.Left.Type
+ t := n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
+ var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
@@ -355,6 +334,7 @@
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
@@ -377,22 +357,18 @@
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
- var nod gc.Node
- var fp *gc.Type
- var t *gc.Type
- var flist gc.Iter
-
- t = n.Left.Type
+ t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
- nod = gc.Node{}
+ nod := gc.Node{}
nod.Op = gc.OINDREG
nod.Val.U.Reg = arm.REGSP
nod.Addable = 1
@@ -408,23 +384,18 @@
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
- var nod1 gc.Node
- var nod2 gc.Node
- var fp *gc.Type
- var t *gc.Type
- var flist gc.Iter
-
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
t = t.Type
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
- nod1 = gc.Node{}
+ nod1 := gc.Node{}
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = arm.REGSP
nod1.Addable = 1
@@ -433,6 +404,7 @@
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
+ var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
agen(&nod1, &nod2)
gins(arm.AMOVW, &nod2, res)
@@ -447,8 +419,6 @@
* n->left is assignments to return values.
*/
func cgen_ret(n *gc.Node) {
- var p *obj.Prog
-
if n != nil {
gc.Genlist(n.List) // copy out args
}
@@ -456,7 +426,7 @@
ginscall(gc.Deferreturn, 0)
}
gc.Genlist(gc.Curfn.Exit)
- p = gins(obj.ARET, nil, nil)
+ p := gins(obj.ARET, nil, nil)
if n != nil && n.Op == gc.ORETJMP {
p.To.Name = obj.NAME_EXTERN
p.To.Type = obj.TYPE_ADDR
@@ -469,23 +439,18 @@
* res = (nl * nr) >> wordsize
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var w int
- var n1 gc.Node
- var n2 gc.Node
- var tmp *gc.Node
- var t *gc.Type
- var p *obj.Prog
-
if nl.Ullman < nr.Ullman {
- tmp = nl
+ tmp := nl
nl = nr
nr = tmp
}
- t = nl.Type
- w = int(t.Width * 8)
+ t := nl.Type
+ w := int(t.Width * 8)
+ var n1 gc.Node
regalloc(&n1, t, res)
cgen(nl, &n1)
+ var n2 gc.Node
regalloc(&n2, t, nil)
cgen(nr, &n2)
switch gc.Simtype[t.Etype] {
@@ -502,6 +467,7 @@
// perform a long multiplication.
case gc.TINT32,
gc.TUINT32:
+ var p *obj.Prog
if gc.Issigned[t.Etype] != 0 {
p = gins(arm.AMULL, &n2, nil)
} else {
@@ -530,34 +496,21 @@
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var nt gc.Node
- var t gc.Node
- var lo gc.Node
- var hi gc.Node
- var w int
- var v int
- var p1 *obj.Prog
- var p2 *obj.Prog
- var p3 *obj.Prog
- var tr *gc.Type
- var sc uint64
-
if nl.Type.Width > 4 {
gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
}
- w = int(nl.Type.Width * 8)
+ w := int(nl.Type.Width * 8)
if op == gc.OLROT {
- v = int(gc.Mpgetfix(nr.Val.U.Xval))
+ v := int(gc.Mpgetfix(nr.Val.U.Xval))
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
if w == 32 {
cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
+ var n2 gc.Node
regalloc(&n2, nl.Type, nil)
cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
@@ -574,9 +527,10 @@
}
if nr.Op == gc.OLITERAL {
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
- sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc == 0 {
} else // nothing to do
if sc >= uint64(nl.Type.Width*8) {
@@ -603,8 +557,13 @@
return
}
- tr = nr.Type
+ tr := nr.Type
+ var t gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
if tr.Width > 4 {
+ var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
regalloc(&n2, nl.Type, res)
@@ -617,6 +576,8 @@
cgen(nl, &n2)
}
+ var hi gc.Node
+ var lo gc.Node
split64(&nt, &lo, &hi)
regalloc(&n1, gc.Types[gc.TUINT32], nil)
regalloc(&n3, gc.Types[gc.TUINT32], nil)
@@ -625,7 +586,7 @@
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
- p1 = gins(arm.AMOVW, &t, &n1)
+ p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
regfree(&n3)
@@ -646,7 +607,7 @@
// test for shift being 0
gins(arm.ATST, &n1, nil)
- p3 = gc.Gbranch(arm.ABEQ, nil, -1)
+ p3 := gc.Gbranch(arm.ABEQ, nil, -1)
// test and fix up large shifts
// TODO: if(!bounded), don't emit some of this.
@@ -656,6 +617,8 @@
gmove(&t, &n3)
gcmp(arm.ACMP, &n1, &n3)
if op == gc.ORSH {
+ var p1 *obj.Prog
+ var p2 *obj.Prog
if gc.Issigned[nl.Type.Etype] != 0 {
p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
@@ -667,8 +630,8 @@
p1.Scond = arm.C_SCOND_HS
p2.Scond = arm.C_SCOND_LO
} else {
- p1 = gins(arm.AEOR, &n2, &n2)
- p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
+ p1 := gins(arm.AEOR, &n2, &n2)
+ p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
p1.Scond = arm.C_SCOND_HS
p2.Scond = arm.C_SCOND_LO
}
@@ -688,48 +651,41 @@
}
func clearfat(nl *gc.Node) {
- var w uint32
- var c uint32
- var q uint32
- var dst gc.Node
- var nc gc.Node
- var nz gc.Node
- var end gc.Node
- var r0 gc.Node
- var r1 gc.Node
- var f *gc.Node
- var p *obj.Prog
- var pl *obj.Prog
-
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
- w = uint32(nl.Type.Width)
+ w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if componentgen(nil, nl) {
return
}
- c = w % 4 // bytes
- q = w / 4 // quads
+ c := w % 4 // bytes
+ q := w / 4 // quads
+ var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Val.U.Reg = REGALLOC_R0
+ var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Val.U.Reg = REGALLOC_R0 + 1
+ var dst gc.Node
regalloc(&dst, gc.Types[gc.Tptr], &r1)
agen(nl, &dst)
+ var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
+ var nz gc.Node
regalloc(&nz, gc.Types[gc.TUINT32], &r0)
cgen(&nc, &nz)
if q > 128 {
+ var end gc.Node
regalloc(&end, gc.Types[gc.Tptr], nil)
- p = gins(arm.AMOVW, &dst, &end)
+ p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
@@ -737,7 +693,7 @@
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
- pl = p
+ pl := p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
@@ -745,13 +701,14 @@
regfree(&end)
} else if q >= 4 && !gc.Nacl {
- f = gc.Sysfunc("duffzero")
- p = gins(obj.ADUFFZERO, nil, f)
+ f := gc.Sysfunc("duffzero")
+ p := gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 4 * (128 - int64(q))
} else {
+ var p *obj.Prog
for q > 0 {
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
@@ -763,6 +720,7 @@
}
}
+ var p *obj.Prog
for c > 0 {
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
@@ -781,10 +739,9 @@
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
var reg int
- var p *obj.Prog
var p1 *obj.Prog
- for p = firstp; p != nil; p = p.Link {
+ for p := firstp; p != nil; p = p.Link {
if p.As != obj.ACHECKNIL {
continue
}
diff --git a/src/cmd/5g/gsubr.go b/src/cmd/5g/gsubr.go
index 857bafa..a8b8ed5 100644
--- a/src/cmd/5g/gsubr.go
+++ b/src/cmd/5g/gsubr.go
@@ -49,24 +49,20 @@
}
func ginit() {
- var i int
-
- for i = 0; i < len(reg); i++ {
+ for i := 0; i < len(reg); i++ {
reg[i] = 0
}
- for i = 0; i < len(resvd); i++ {
+ for i := 0; i < len(resvd); i++ {
reg[resvd[i]]++
}
}
func gclean() {
- var i int
-
- for i = 0; i < len(resvd); i++ {
+ for i := 0; i < len(resvd); i++ {
reg[resvd[i]]--
}
- for i = 0; i < len(reg); i++ {
+ for i := 0; i < len(reg); i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
}
@@ -74,10 +70,9 @@
}
func anyregalloc() bool {
- var i int
var j int
- for i = 0; i < len(reg); i++ {
+ for i := 0; i < len(reg); i++ {
if reg[i] == 0 {
goto ok
}
@@ -101,20 +96,15 @@
* caller must regfree(n).
*/
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
- var i int
- var et int
- var fixfree int
- var floatfree int
-
if false && gc.Debug['r'] != 0 {
- fixfree = 0
- for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ fixfree := 0
+ for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
if reg[i] == 0 {
fixfree++
}
}
- floatfree = 0
- for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
+ floatfree := 0
+ for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
if reg[i] == 0 {
floatfree++
}
@@ -125,11 +115,12 @@
if t == nil {
gc.Fatal("regalloc: t nil")
}
- et = int(gc.Simtype[t.Etype])
+ et := int(gc.Simtype[t.Etype])
if gc.Is64(t) {
gc.Fatal("regalloc: 64 bit type %v")
}
+ var i int
switch et {
case gc.TINT8,
gc.TUINT8,
@@ -154,7 +145,7 @@
}
fmt.Printf("registers allocated at\n")
- for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
fmt.Printf("%d %p\n", i, regpc[i])
}
gc.Fatal("out of fixed registers")
@@ -195,19 +186,15 @@
}
func regfree(n *gc.Node) {
- var i int
- var fixfree int
- var floatfree int
-
if false && gc.Debug['r'] != 0 {
- fixfree = 0
- for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ fixfree := 0
+ for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
if reg[i] == 0 {
fixfree++
}
}
- floatfree = 0
- for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
+ floatfree := 0
+ for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
if reg[i] == 0 {
floatfree++
}
@@ -221,7 +208,7 @@
if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
gc.Fatal("regfree: not a register")
}
- i = int(n.Val.U.Reg)
+ i := int(n.Val.U.Reg)
if i == arm.REGSP {
return
}
@@ -260,9 +247,6 @@
* n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
- var n1 gc.Node
- var i int64
-
if !gc.Is64(n.Type) {
gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
}
@@ -276,6 +260,7 @@
default:
switch n.Op {
default:
+ var n1 gc.Node
if !dotaddable(n, &n1) {
igen(n, &n1, nil)
sclean[nsclean-1] = n1
@@ -285,6 +270,7 @@
case gc.ONAME:
if n.Class == gc.PPARAMREF {
+ var n1 gc.Node
cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
@@ -306,8 +292,9 @@
hi.Xoffset += 4
case gc.OLITERAL:
+ var n1 gc.Node
gc.Convconst(&n1, n.Type, &n.Val)
- i = gc.Mpgetfix(n1.Val.U.Xval)
+ i := gc.Mpgetfix(n1.Val.U.Xval)
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32
if n.Type.Etype == gc.TINT64 {
@@ -329,28 +316,13 @@
}
func gmove(f *gc.Node, t *gc.Node) {
- var a int
- var ft int
- var tt int
- var fa int
- var ta int
- var cvt *gc.Type
- var r1 gc.Node
- var r2 gc.Node
- var flo gc.Node
- var fhi gc.Node
- var tlo gc.Node
- var thi gc.Node
- var con gc.Node
- var p1 *obj.Prog
-
if gc.Debug['M'] != 0 {
fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
- ft = gc.Simsimtype(f.Type)
- tt = gc.Simsimtype(t.Type)
- cvt = t.Type
+ ft := gc.Simsimtype(f.Type)
+ tt := gc.Simsimtype(t.Type)
+ cvt := t.Type
if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
gc.Complexmove(f, t)
@@ -359,19 +331,26 @@
// cannot have two memory operands;
// except 64-bit, which always copies via registers anyway.
+ var flo gc.Node
+ var a int
+ var r1 gc.Node
+ var fhi gc.Node
if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// convert constant to desired type
if f.Op == gc.OLITERAL {
+ var con gc.Node
switch tt {
default:
gc.Convconst(&con, t.Type, &f.Val)
case gc.TINT16,
gc.TINT8:
+ var con gc.Node
gc.Convconst(&con, gc.Types[gc.TINT32], &f.Val)
+ var r1 gc.Node
regalloc(&r1, con.Type, t)
gins(arm.AMOVW, &con, &r1)
gmove(&r1, t)
@@ -380,7 +359,9 @@
case gc.TUINT16,
gc.TUINT8:
+ var con gc.Node
gc.Convconst(&con, gc.Types[gc.TUINT32], &f.Val)
+ var r1 gc.Node
regalloc(&r1, con.Type, t)
gins(arm.AMOVW, &con, &r1)
gmove(&r1, t)
@@ -495,8 +476,11 @@
gc.TUINT64<<16 | gc.TINT32,
gc.TINT64<<16 | gc.TUINT32,
gc.TUINT64<<16 | gc.TUINT32:
+ var flo gc.Node
+ var fhi gc.Node
split64(f, &flo, &fhi)
+ var r1 gc.Node
regalloc(&r1, t.Type, nil)
gins(arm.AMOVW, &flo, &r1)
gins(arm.AMOVW, &r1, t)
@@ -508,10 +492,16 @@
gc.TINT64<<16 | gc.TUINT64,
gc.TUINT64<<16 | gc.TINT64,
gc.TUINT64<<16 | gc.TUINT64:
+ var fhi gc.Node
+ var flo gc.Node
split64(f, &flo, &fhi)
+ var tlo gc.Node
+ var thi gc.Node
split64(t, &tlo, &thi)
+ var r1 gc.Node
regalloc(&r1, flo.Type, nil)
+ var r2 gc.Node
regalloc(&r2, fhi.Type, nil)
gins(arm.AMOVW, &flo, &r1)
gins(arm.AMOVW, &fhi, &r2)
@@ -580,12 +570,16 @@
case gc.TINT32<<16 | gc.TINT64, // sign extend int32
gc.TINT32<<16 | gc.TUINT64:
+ var tlo gc.Node
+ var thi gc.Node
split64(t, &tlo, &thi)
+ var r1 gc.Node
regalloc(&r1, tlo.Type, nil)
+ var r2 gc.Node
regalloc(&r2, thi.Type, nil)
gmove(f, &r1)
- p1 = gins(arm.AMOVW, &r1, &r2)
+ p1 := gins(arm.AMOVW, &r1, &r2)
p1.From.Type = obj.TYPE_SHIFT
p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Val.U.Reg)&15 // r1->31
p1.From.Reg = 0
@@ -601,9 +595,12 @@
case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
gc.TUINT32<<16 | gc.TUINT64:
+ var thi gc.Node
+ var tlo gc.Node
split64(t, &tlo, &thi)
gmove(f, &tlo)
+ var r1 gc.Node
regalloc(&r1, thi.Type, nil)
gins(arm.AMOVW, ncon(0), &r1)
gins(arm.AMOVW, &r1, &thi)
@@ -630,15 +627,15 @@
gc.TFLOAT64<<16 | gc.TUINT16,
gc.TFLOAT64<<16 | gc.TINT32,
gc.TFLOAT64<<16 | gc.TUINT32:
- fa = arm.AMOVF
+ fa := arm.AMOVF
- a = arm.AMOVFW
+ a := arm.AMOVFW
if ft == gc.TFLOAT64 {
fa = arm.AMOVD
a = arm.AMOVDW
}
- ta = arm.AMOVW
+ ta := arm.AMOVW
switch tt {
case gc.TINT8:
ta = arm.AMOVBS
@@ -653,10 +650,12 @@
ta = arm.AMOVHU
}
+ var r1 gc.Node
regalloc(&r1, gc.Types[ft], f)
+ var r2 gc.Node
regalloc(&r2, gc.Types[tt], t)
- gins(fa, f, &r1) // load to fpu
- p1 = gins(a, &r1, &r1) // convert to w
+ gins(fa, f, &r1) // load to fpu
+ p1 := gins(a, &r1, &r1) // convert to w
switch tt {
case gc.TUINT8,
gc.TUINT16,
@@ -685,7 +684,7 @@
gc.TUINT16<<16 | gc.TFLOAT64,
gc.TINT32<<16 | gc.TFLOAT64,
gc.TUINT32<<16 | gc.TFLOAT64:
- fa = arm.AMOVW
+ fa := arm.AMOVW
switch ft {
case gc.TINT8:
@@ -701,18 +700,20 @@
fa = arm.AMOVHU
}
- a = arm.AMOVWF
- ta = arm.AMOVF
+ a := arm.AMOVWF
+ ta := arm.AMOVF
if tt == gc.TFLOAT64 {
a = arm.AMOVWD
ta = arm.AMOVD
}
+ var r1 gc.Node
regalloc(&r1, gc.Types[ft], f)
+ var r2 gc.Node
regalloc(&r2, gc.Types[tt], t)
gins(fa, f, &r1) // load to cpu
gins(arm.AMOVW, &r1, &r2) // copy to fpu
- p1 = gins(a, &r2, &r2) // convert
+ p1 := gins(a, &r2, &r2) // convert
switch ft {
case gc.TUINT8,
gc.TUINT16,
@@ -740,6 +741,7 @@
a = arm.AMOVD
case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ var r1 gc.Node
regalloc(&r1, gc.Types[gc.TFLOAT64], t)
gins(arm.AMOVF, f, &r1)
gins(arm.AMOVFD, &r1, &r1)
@@ -748,6 +750,7 @@
return
case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ var r1 gc.Node
regalloc(&r1, gc.Types[gc.TFLOAT64], t)
gins(arm.AMOVD, f, &r1)
gins(arm.AMOVDF, &r1, &r1)
@@ -816,13 +819,9 @@
* as f, t
*/
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
- var p *obj.Prog
- var af obj.Addr
// Node nod;
// int32 v;
- var at obj.Addr
-
if f != nil && f.Op == gc.OINDEX {
gc.Fatal("gins OINDEX not implemented")
}
@@ -843,16 +842,16 @@
// constnode.vconst = v;
// idx.reg = nod.reg;
// regfree(&nod);
- af = obj.Addr{}
+ af := obj.Addr{}
- at = obj.Addr{}
+ at := obj.Addr{}
if f != nil {
gc.Naddr(f, &af, 1)
}
if t != nil {
gc.Naddr(t, &at, 1)
}
- p = gc.Prog(as)
+ p := gc.Prog(as)
if f != nil {
p.From = af
}
@@ -888,13 +887,11 @@
TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
*/
func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
- var p *obj.Prog
-
if lhs.Op != gc.OREGISTER {
gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
}
- p = gins(as, rhs, nil)
+ p := gins(as, rhs, nil)
raddr(lhs, p)
return p
}
@@ -903,15 +900,13 @@
* arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
*/
func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
- var p *obj.Prog
-
if sval <= 0 || sval > 32 {
gc.Fatal("bad shift value: %d", sval)
}
sval = sval & 0x1f
- p = gins(as, nil, rhs)
+ p := gins(as, nil, rhs)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Val.U.Reg)&15
return p
@@ -920,8 +915,7 @@
/* generate a register shift
*/
func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
- var p *obj.Prog
- p = gins(as, nil, rhs)
+ p := gins(as, nil, rhs)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(stype) | (int64(reg.Val.U.Reg)&15)<<8 | 1<<4 | int64(lhs.Val.U.Reg)&15
return p
@@ -931,13 +925,11 @@
* return Axxx for Oxxx on type t.
*/
func optoas(op int, t *gc.Type) int {
- var a int
-
if t == nil {
gc.Fatal("optoas: t is nil")
}
- a = obj.AXXX
+ a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0), gc.Tconv(gc.Types[t.Etype], 0), gc.Tconv(gc.Types[gc.Simtype[t.Etype]], 0))
@@ -1246,15 +1238,13 @@
}
func dotaddable(n *gc.Node, n1 *gc.Node) bool {
- var o int
- var oary [10]int64
- var nn *gc.Node
-
if n.Op != gc.ODOT {
return false
}
- o = gc.Dotoffset(n, oary[:], &nn)
+ var oary [10]int64
+ var nn *gc.Node
+ o := gc.Dotoffset(n, oary[:], &nn)
if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 {
*n1 = *nn
n1.Type = n.Type
@@ -1277,35 +1267,24 @@
* to release the register used for a.
*/
func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
- var o int
- var i int
- var oary [10]int64
- var v int64
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var n4 gc.Node
- var nn *gc.Node
- var l *gc.Node
- var r *gc.Node
- var reg *gc.Node
- var reg1 *gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
- var t *gc.Type
-
if n.Type == nil {
return false
}
*a = obj.Addr{}
+ var oary [10]int64
+ var nn *gc.Node
+ var reg *gc.Node
+ var n1 gc.Node
+ var reg1 *gc.Node
+ var o int
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
break
}
- v = gc.Mpgetfix(n.Val.U.Xval)
+ v := gc.Mpgetfix(n.Val.U.Xval)
if v >= 32000 || v <= -32000 {
break
}
@@ -1315,25 +1294,13 @@
gc.ODOTPTR:
cleani += 2
reg = &clean[cleani-1]
- reg1 = &clean[cleani-2]
+ reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
goto odot
case gc.OINDEX:
return false
-
- // disabled: OINDEX case is now covered by agenr
- // for a more suitable register allocation pattern.
- if n.Left.Type.Etype == gc.TSTRING {
- return false
- }
- cleani += 2
- reg = &clean[cleani-1]
- reg1 = &clean[cleani-2]
- reg.Op = gc.OEMPTY
- reg1.Op = gc.OEMPTY
- goto oindex
}
return false
@@ -1374,7 +1341,7 @@
if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
// directly addressable set of DOTs
- n1 = *nn
+ n1 := *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
@@ -1394,7 +1361,7 @@
n1.Xoffset = -(oary[0] + 1)
}
- for i = 1; i < o; i++ {
+ for i := 1; i < o; i++ {
if oary[i] >= 0 {
gc.Fatal("can't happen")
}
@@ -1409,187 +1376,6 @@
gc.Naddr(&n1, a, 1)
goto yes
-oindex:
- l = n.Left
- r = n.Right
- if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
- goto no
- }
-
- // set o to type of array
- o = 0
-
- if gc.Isptr[l.Type.Etype] != 0 {
- o += OPtrto
- if l.Type.Type.Etype != gc.TARRAY {
- gc.Fatal("not ptr ary")
- }
- if l.Type.Type.Bound < 0 {
- o += ODynam
- }
- } else {
- if l.Type.Etype != gc.TARRAY {
- gc.Fatal("not ary")
- }
- if l.Type.Bound < 0 {
- o += ODynam
- }
- }
-
- *w = int(n.Type.Width)
- if gc.Isconst(r, gc.CTINT) {
- goto oindex_const
- }
-
- switch *w {
- default:
- goto no
-
- case 1,
- 2,
- 4,
- 8:
- break
- }
-
- // load the array (reg)
- if l.Ullman > r.Ullman {
- regalloc(reg, gc.Types[gc.Tptr], nil)
- if o&OPtrto != 0 {
- cgen(l, reg)
- gc.Cgen_checknil(reg)
- } else {
- agen(l, reg)
- }
- }
-
- // load the index (reg1)
- t = gc.Types[gc.TUINT32]
-
- if gc.Issigned[r.Type.Etype] != 0 {
- t = gc.Types[gc.TINT32]
- }
- regalloc(reg1, t, nil)
- regalloc(&n3, gc.Types[gc.TINT32], reg1)
- p2 = cgenindex(r, &n3, gc.Debug['B'] != 0 || n.Bounded)
- gmove(&n3, reg1)
- regfree(&n3)
-
- // load the array (reg)
- if l.Ullman <= r.Ullman {
- regalloc(reg, gc.Types[gc.Tptr], nil)
- if o&OPtrto != 0 {
- cgen(l, reg)
- gc.Cgen_checknil(reg)
- } else {
- agen(l, reg)
- }
- }
-
- // check bounds
- if gc.Debug['B'] == 0 {
- if o&ODynam != 0 {
- n2 = *reg
- n2.Op = gc.OINDREG
- n2.Type = gc.Types[gc.Tptr]
- n2.Xoffset = int64(gc.Array_nel)
- } else {
- if o&OPtrto != 0 {
- gc.Nodconst(&n2, gc.Types[gc.TUINT32], l.Type.Type.Bound)
- } else {
- gc.Nodconst(&n2, gc.Types[gc.TUINT32], l.Type.Bound)
- }
- }
-
- regalloc(&n3, n2.Type, nil)
- cgen(&n2, &n3)
- gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), reg1, &n3)
- regfree(&n3)
- p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
- if p2 != nil {
- gc.Patch(p2, gc.Pc)
- }
- ginscall(gc.Panicindex, 0)
- gc.Patch(p1, gc.Pc)
- }
-
- if o&ODynam != 0 {
- n2 = *reg
- n2.Op = gc.OINDREG
- n2.Type = gc.Types[gc.Tptr]
- n2.Xoffset = int64(gc.Array_array)
- gmove(&n2, reg)
- }
-
- switch *w {
- case 1:
- gins(arm.AADD, reg1, reg)
-
- case 2:
- gshift(arm.AADD, reg1, arm.SHIFT_LL, 1, reg)
-
- case 4:
- gshift(arm.AADD, reg1, arm.SHIFT_LL, 2, reg)
-
- case 8:
- gshift(arm.AADD, reg1, arm.SHIFT_LL, 3, reg)
- }
-
- gc.Naddr(reg1, a, 1)
- a.Type = obj.TYPE_MEM
- a.Reg = reg.Val.U.Reg
- a.Offset = 0
- goto yes
-
- // index is constant
- // can check statically and
- // can multiply by width statically
-
-oindex_const:
- regalloc(reg, gc.Types[gc.Tptr], nil)
-
- if o&OPtrto != 0 {
- cgen(l, reg)
- gc.Cgen_checknil(reg)
- } else {
- agen(l, reg)
- }
-
- v = gc.Mpgetfix(r.Val.U.Xval)
- if o&ODynam != 0 {
- if gc.Debug['B'] == 0 && !n.Bounded {
- n1 = *reg
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_nel)
- gc.Nodconst(&n2, gc.Types[gc.TUINT32], v)
- regalloc(&n3, gc.Types[gc.TUINT32], nil)
- cgen(&n2, &n3)
- regalloc(&n4, n1.Type, nil)
- cgen(&n1, &n4)
- gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n3)
- regfree(&n4)
- regfree(&n3)
- p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
- ginscall(gc.Panicindex, 0)
- gc.Patch(p1, gc.Pc)
- }
-
- n1 = *reg
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_array)
- gmove(&n1, reg)
- }
-
- n2 = *reg
- n2.Op = gc.OINDREG
- n2.Xoffset = v * int64(*w)
- a.Type = obj.TYPE_NONE
- a.Name = obj.NAME_NONE
- gc.Naddr(&n2, a, 1)
- goto yes
-
yes:
return true
diff --git a/src/cmd/5g/peep.go b/src/cmd/5g/peep.go
index 2fbb1e5..e28ec02 100644
--- a/src/cmd/5g/peep.go
+++ b/src/cmd/5g/peep.go
@@ -41,17 +41,15 @@
// UNUSED
func peep(firstp *obj.Prog) {
- var r *gc.Flow
- var g *gc.Graph
- var p *obj.Prog
- var t int
-
- g = gc.Flowstart(firstp, nil)
+ g := (*gc.Graph)(gc.Flowstart(firstp, nil))
if g == nil {
return
}
gactive = 0
+ var r *gc.Flow
+ var p *obj.Prog
+ var t int
loop1:
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
gc.Dumpit("loop1", g.Start, 0)
@@ -121,7 +119,7 @@
goto loop1
}
- for r = g.Start; r != nil; r = r.Link {
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog
switch p.As {
/*
@@ -141,7 +139,7 @@
}
}
- for r = g.Start; r != nil; r = r.Link {
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog
switch p.As {
case arm.AMOVW,
@@ -250,22 +248,17 @@
* will be eliminated by copy propagation.
*/
func subprop(r0 *gc.Flow) bool {
- var p *obj.Prog
- var v1 *obj.Addr
- var v2 *obj.Addr
- var r *gc.Flow
- var t int
- var info gc.ProgInfo
-
- p = r0.Prog
- v1 = &p.From
+ p := (*obj.Prog)(r0.Prog)
+ v1 := (*obj.Addr)(&p.From)
if !regtyp(v1) {
return false
}
- v2 = &p.To
+ v2 := (*obj.Addr)(&p.To)
if !regtyp(v2) {
return false
}
+ var r *gc.Flow
+ var info gc.ProgInfo
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
break
@@ -332,7 +325,7 @@
}
}
- t = int(v1.Reg)
+ t := int(int(v1.Reg))
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
@@ -354,13 +347,9 @@
* set v2 return success
*/
func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
- var p *obj.Prog
- var v1 *obj.Addr
- var v2 *obj.Addr
-
- p = r0.Prog
- v1 = &p.From
- v2 = &p.To
+ p := (*obj.Prog)(r0.Prog)
+ v1 := (*obj.Addr)(&p.From)
+ v2 := (*obj.Addr)(&p.To)
if copyas(v1, v2) {
return true
}
@@ -369,9 +358,6 @@
}
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
- var t int
- var p *obj.Prog
-
if uint32(r.Active) == gactive {
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
@@ -383,6 +369,8 @@
if gc.Debug['P'] != 0 {
fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
}
+ var t int
+ var p *obj.Prog
for ; r != nil; r = r.S1 {
p = r.Prog
if gc.Debug['P'] != 0 {
@@ -473,11 +461,10 @@
* The v1->v2 should be eliminated by copy propagation.
*/
func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) {
- var p *obj.Prog
-
if gc.Debug['P'] != 0 {
fmt.Printf("constprop %v->%v\n", gc.Ctxt.Dconv(c1), gc.Ctxt.Dconv(v1))
}
+ var p *obj.Prog
for ; r != nil; r = r.S1 {
p = r.Prog
if gc.Debug['P'] != 0 {
@@ -527,17 +514,13 @@
* MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
*/
func shortprop(r *gc.Flow) bool {
- var p *obj.Prog
- var p1 *obj.Prog
- var r1 *gc.Flow
-
- p = r.Prog
- r1 = findpre(r, &p.From)
+ p := (*obj.Prog)(r.Prog)
+ r1 := (*gc.Flow)(findpre(r, &p.From))
if r1 == nil {
return false
}
- p1 = r1.Prog
+ p1 := (*obj.Prog)(r1.Prog)
if p1.As == p.As {
// Two consecutive extensions.
goto gotit
@@ -583,15 +566,7 @@
* ..
*/
func shiftprop(r *gc.Flow) bool {
- var r1 *gc.Flow
- var p *obj.Prog
- var p1 *obj.Prog
- var p2 *obj.Prog
- var n int
- var o int
- var a obj.Addr
-
- p = r.Prog
+ p := (*obj.Prog)(r.Prog)
if p.To.Type != obj.TYPE_REG {
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
@@ -599,8 +574,8 @@
return false
}
- n = int(p.To.Reg)
- a = obj.Addr{}
+ n := int(int(p.To.Reg))
+ a := obj.Addr(obj.Addr{})
if p.Reg != 0 && p.Reg != p.To.Reg {
a.Type = obj.TYPE_REG
a.Reg = p.Reg
@@ -609,7 +584,8 @@
if gc.Debug['P'] != 0 {
fmt.Printf("shiftprop\n%v", p)
}
- r1 = r
+ r1 := (*gc.Flow)(r)
+ var p1 *obj.Prog
for {
/* find first use of shift result; abort if shift operands or result are changed */
r1 = gc.Uniqs(r1)
@@ -736,9 +712,10 @@
}
/* check whether shift result is used subsequently */
- p2 = p1
+ p2 := (*obj.Prog)(p1)
if int(p1.To.Reg) != n {
+ var p1 *obj.Prog
for {
r1 = gc.Uniqs(r1)
if r1 == nil {
@@ -773,7 +750,7 @@
/* make the substitution */
p2.From.Reg = 0
- o = int(p.Reg)
+ o := int(int(p.Reg))
if o == 0 {
o = int(p.To.Reg)
}
@@ -870,14 +847,11 @@
}
func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) bool {
- var a [3]obj.Addr
- var i int
- var n int
-
if r == r2 {
return true
}
- n = 0
+ n := int(0)
+ var a [3]obj.Addr
if p.Reg != 0 && p.Reg != p.To.Reg {
a[n].Type = obj.TYPE_REG
a[n].Reg = p.Reg
@@ -900,6 +874,7 @@
if n == 0 {
return true
}
+ var i int
for ; r != nil && r != r2; r = gc.Uniqs(r) {
p = r.Prog
for i = 0; i < n; i++ {
@@ -939,9 +914,7 @@
}
func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
- var r1 *gc.Flow
-
- for r1 = g.Start; r1 != nil; r1 = r1.Link {
+ for r1 := (*gc.Flow)(g.Start); r1 != nil; r1 = r1.Link {
r1.Active = 0
}
return findu1(r, v)
@@ -961,19 +934,12 @@
* MOVBU R0<<0(R1),R0
*/
func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
- var r1 *gc.Flow
- var r2 *gc.Flow
- var r3 *gc.Flow
- var p *obj.Prog
- var p1 *obj.Prog
- var v obj.Addr
-
- p = r.Prog
- v = *a
+ p := (*obj.Prog)(r.Prog)
+ v := obj.Addr(*a)
v.Type = obj.TYPE_REG
- r1 = findpre(r, &v)
+ r1 := (*gc.Flow)(findpre(r, &v))
if r1 != nil {
- p1 = r1.Prog
+ p1 := r1.Prog
if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg {
switch p1.As {
case arm.AADD:
@@ -1030,13 +996,14 @@
case arm.AMOVW:
if p1.From.Type == obj.TYPE_REG {
- r2 = findinc(r1, r, &p1.From)
+ r2 := (*gc.Flow)(findinc(r1, r, &p1.From))
if r2 != nil {
+ var r3 *gc.Flow
for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) {
}
if r3 == r {
/* post-indexing */
- p1 = r2.Prog
+ p1 := r2.Prog
a.Reg = p1.To.Reg
a.Offset = p1.From.Offset
@@ -1054,10 +1021,10 @@
}
if a != &p.From || a.Reg != p.To.Reg {
- r1 = findinc(r, nil, &v)
+ r1 := (*gc.Flow)(findinc(r, nil, &v))
if r1 != nil {
/* post-indexing */
- p1 = r1.Prog
+ p1 := r1.Prog
a.Offset = p1.From.Offset
p.Scond |= arm.C_PBIT
@@ -1775,19 +1742,17 @@
}
func applypred(rstart *gc.Flow, j *Joininfo, cond int, branch int) {
- var pred int
- var r *gc.Flow
-
if j.len == 0 {
return
}
+ var pred int
if cond == Truecond {
pred = predinfo[rstart.Prog.As-arm.ABEQ].scond
} else {
pred = predinfo[rstart.Prog.As-arm.ABEQ].notscond
}
- for r = j.start; ; r = successor(r) {
+ for r := (*gc.Flow)(j.start); ; r = successor(r) {
if r.Prog.As == arm.AB {
if r != j.last || branch == Delbranch {
excise(r)
@@ -1813,13 +1778,12 @@
}
func predicate(g *gc.Graph) {
- var r *gc.Flow
var t1 int
var t2 int
var j1 Joininfo
var j2 Joininfo
- for r = g.Start; r != nil; r = r.Link {
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
if isbranch(r.Prog) {
t1 = joinsplit(r.S1, &j1)
t2 = joinsplit(r.S2, &j2)
@@ -1861,8 +1825,6 @@
}
func excise(r *gc.Flow) {
- var p *obj.Prog
-
- p = r.Prog
+ p := (*obj.Prog)(r.Prog)
obj.Nopout(p)
}
diff --git a/src/cmd/6g/cgen.go b/src/cmd/6g/cgen.go
index 36fa62c..58deffa 100644
--- a/src/cmd/6g/cgen.go
+++ b/src/cmd/6g/cgen.go
@@ -24,23 +24,16 @@
* simplifies and calls gmove.
*/
func cgen(n *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var nr *gc.Node
- var r *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var a int
- var f int
- var p1 *obj.Prog
- var p2 *obj.Prog
- var p3 *obj.Prog
- var addr obj.Addr
-
if gc.Debug['g'] != 0 {
gc.Dump("\ncgen-n", n)
gc.Dump("cgen-res", res)
}
+ var nl *gc.Node
+ var n1 gc.Node
+ var nr *gc.Node
+ var n2 gc.Node
+ var a int
if n == nil || n.Type == nil {
goto ret
}
@@ -60,6 +53,7 @@
gc.OSLICE3,
gc.OSLICE3ARR:
if res.Op != gc.ONAME || res.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
@@ -70,6 +64,7 @@
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
@@ -84,6 +79,7 @@
gc.Fatal("cgen: this is going to misscompile")
}
if res.Ullman >= gc.UINF {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
@@ -101,6 +97,7 @@
if res.Addable == 0 {
if n.Ullman > res.Ullman {
+ var n1 gc.Node
regalloc(&n1, n.Type, res)
cgen(n, &n1)
if n1.Ullman > res.Ullman {
@@ -114,6 +111,7 @@
goto ret
}
+ var f int
if res.Ullman >= gc.UINF {
goto gen
}
@@ -135,9 +133,12 @@
}
if gc.Iscomplex[n.Type.Etype] == 0 {
- a = optoas(gc.OAS, res.Type)
+ a := optoas(gc.OAS, res.Type)
+ var addr obj.Addr
if sudoaddable(a, res, &addr) {
+ var p1 *obj.Prog
if f != 0 {
+ var n2 gc.Node
regalloc(&n2, res.Type, nil)
cgen(n, &n2)
p1 = gins(a, &n2, nil)
@@ -155,6 +156,7 @@
}
gen:
+ var n1 gc.Node
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
@@ -195,9 +197,10 @@
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
+ var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
- n2 = *n
+ n2 := *n
n2.Left = &n1
cgen(&n2, res)
goto ret
@@ -205,14 +208,16 @@
}
if gc.Iscomplex[n.Type.Etype] == 0 {
- a = optoas(gc.OAS, n.Type)
+ a := optoas(gc.OAS, n.Type)
+ var addr obj.Addr
if sudoaddable(a, n, &addr) {
if res.Op == gc.OREGISTER {
- p1 = gins(a, nil, res)
+ p1 := gins(a, nil, res)
p1.From = addr
} else {
+ var n2 gc.Node
regalloc(&n2, n.Type, nil)
- p1 = gins(a, nil, &n2)
+ p1 := gins(a, nil, &n2)
p1.From = addr
gins(a, &n2, res)
regfree(&n2)
@@ -238,11 +243,11 @@
gc.OGE,
gc.OGT,
gc.ONOT:
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p1 := gc.Gbranch(obj.AJMP, nil, 0)
- p2 = gc.Pc
+ p2 := gc.Pc
gmove(gc.Nodbool(true), res)
- p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
@@ -255,10 +260,12 @@
// unary
case gc.OCOM:
- a = optoas(gc.OXOR, nl.Type)
+ a := optoas(gc.OXOR, nl.Type)
+ var n1 gc.Node
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, nl.Type, -1)
gins(a, &n2, &n1)
gmove(&n1, res)
@@ -310,7 +317,9 @@
gc.OINDEX,
gc.OIND,
gc.ONAME:
+ var n1 gc.Node
igen(nl, &n1, res)
+ var n2 gc.Node
regalloc(&n2, n.Type, res)
gmove(&n1, &n2)
gmove(&n2, res)
@@ -320,7 +329,9 @@
}
}
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
+ var n2 gc.Node
regalloc(&n2, n.Type, &n1)
cgen(nl, &n1)
@@ -338,6 +349,7 @@
gc.OINDEX,
gc.OIND,
gc.ONAME: // PHEAP or PPARAMREF var
+ var n1 gc.Node
igen(n, &n1, res)
gmove(&n1, res)
@@ -345,6 +357,7 @@
// interface table is first word of interface value
case gc.OITAB:
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = n.Type
@@ -354,14 +367,16 @@
// pointer is the first word of string or slice.
case gc.OSPTR:
if gc.Isconst(nl, gc.CTSTR) {
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
- p1 = gins(x86.ALEAQ, nil, &n1)
+ p1 := gins(x86.ALEAQ, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
gmove(&n1, res)
regfree(&n1)
break
}
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = n.Type
gmove(&n1, res)
@@ -371,13 +386,15 @@
if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map and chan have len in the first int-sized word.
// a zero pointer means zero length
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
cgen(nl, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+ p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
n2 = n1
n2.Op = gc.OINDREG
@@ -394,6 +411,7 @@
if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
// a zero pointer means zero length
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
@@ -409,13 +427,15 @@
if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second int-sized word.
// a zero pointer means zero length
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
cgen(nl, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+ p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
n2 = n1
n2.Op = gc.OINDREG
@@ -431,6 +451,7 @@
}
if gc.Isslice(nl.Type) {
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
n1.Xoffset += int64(gc.Array_cap)
@@ -470,11 +491,13 @@
}
if nl.Ullman >= nr.Ullman {
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
cgen_div(int(n.Op), &n1, nr, res)
regfree(&n1)
} else {
+ var n2 gc.Node
if !gc.Smallintconst(nr) {
regalloc(&n2, nr.Type, res)
cgen(nr, &n2)
@@ -514,7 +537,7 @@
*/
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
- r = nl
+ r := nl
nl = nr
nr = r
}
@@ -585,8 +608,6 @@
* The caller must call regfree(a).
*/
func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- var n1 gc.Node
-
if gc.Debug['g'] != 0 {
gc.Dump("cgenr-n", n)
}
@@ -609,6 +630,7 @@
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
+ var n1 gc.Node
igen(n, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
gmove(&n1, a)
@@ -627,27 +649,12 @@
* The generated code checks that the result is not nil.
*/
func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var nr *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var n5 gc.Node
- var tmp gc.Node
- var tmp2 gc.Node
- var nlen gc.Node
- var p1 *obj.Prog
- var t *gc.Type
- var w uint64
- var v uint64
- var freelen int
-
if gc.Debug['g'] != 0 {
gc.Dump("\nagenr-n", n)
}
- nl = n.Left
- nr = n.Right
+ nl := n.Left
+ nr := n.Right
switch n.Op {
case gc.ODOT,
@@ -655,6 +662,7 @@
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
+ var n1 gc.Node
igen(n, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
agen(&n1, a)
@@ -665,10 +673,14 @@
gc.Cgen_checknil(a)
case gc.OINDEX:
- freelen = 0
- w = uint64(n.Type.Width)
+ freelen := 0
+ w := uint64(n.Type.Width)
// Generate the non-addressable child first.
+ var n3 gc.Node
+ var nlen gc.Node
+ var tmp gc.Node
+ var n1 gc.Node
if nr.Addable != 0 {
goto irad
}
@@ -703,6 +715,7 @@
} else {
if nl.Addable == 0 {
// igen will need an addressable node.
+ var tmp2 gc.Node
gc.Tempname(&tmp2, nl.Type)
cgen(nl, &tmp2)
@@ -737,9 +750,10 @@
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index") // front end should handle
}
- v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
+ var n2 gc.Node
gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v))
if gc.Smallintconst(nr) {
gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2)
@@ -750,7 +764,7 @@
regfree(&tmp)
}
- p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
ginscall(gc.Panicindex, -1)
gc.Patch(p1, gc.Pc)
}
@@ -766,12 +780,13 @@
}
// type of the index
- t = gc.Types[gc.TUINT64]
+ t := gc.Types[gc.TUINT64]
if gc.Issigned[n1.Type.Etype] != 0 {
t = gc.Types[gc.TINT64]
}
+ var n2 gc.Node
regalloc(&n2, t, &n1) // i
gmove(&n1, &n2)
regfree(&n1)
@@ -787,6 +802,7 @@
gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Is64(nr.Type) {
+ var n5 gc.Node
regalloc(&n5, t, nil)
gmove(&nlen, &n5)
regfree(&nlen)
@@ -795,6 +811,7 @@
} else {
gc.Nodconst(&nlen, t, nl.Type.Bound)
if !gc.Smallintconst(&nlen) {
+ var n5 gc.Node
regalloc(&n5, t, nil)
gmove(&nlen, &n5)
nlen = n5
@@ -803,14 +820,14 @@
}
gins(optoas(gc.OCMP, t), &n2, &nlen)
- p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.OLT, t), nil, +1)
ginscall(gc.Panicindex, -1)
gc.Patch(p1, gc.Pc)
}
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
- p1 = gins(x86.ALEAQ, nil, &n3)
+ p1 := gins(x86.ALEAQ, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
gins(x86.AADDQ, &n2, &n3)
goto indexdone
@@ -819,7 +836,7 @@
if w == 0 {
} else // nothing to do
if w == 1 || w == 2 || w == 4 || w == 8 {
- p1 = gins(x86.ALEAQ, &n2, &n3)
+ p1 := gins(x86.ALEAQ, &n2, &n3)
p1.From.Type = obj.TYPE_MEM
p1.From.Scale = int8(w)
p1.From.Index = p1.From.Reg
@@ -848,10 +865,6 @@
* The generated code checks that the result is not nil.
*/
func agen(n *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var n1 gc.Node
- var n2 gc.Node
-
if gc.Debug['g'] != 0 {
gc.Dump("\nagen-res", res)
gc.Dump("agen-r", n)
@@ -865,15 +878,18 @@
n = n.Left
}
+ var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
// be terribly efficient. See issue 3670.
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Gvardef(&n1)
clearfat(&n1)
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], res)
gins(x86.ALEAQ, &n1, &n2)
gmove(&n2, res)
@@ -882,6 +898,7 @@
}
if n.Addable != 0 {
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
gins(x86.ALEAQ, n, &n1)
gmove(&n1, res)
@@ -912,16 +929,19 @@
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
agen(&n1, res)
case gc.OEFACE:
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
agen(&n1, res)
case gc.OINDEX:
+ var n1 gc.Node
agenr(n, &n1, res)
gmove(&n1, res)
regfree(&n1)
@@ -975,10 +995,6 @@
* The generated code checks that the result is not *nil.
*/
func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
- var fp *gc.Type
- var flist gc.Iter
- var n1 gc.Node
-
if gc.Debug['g'] != 0 {
gc.Dump("\nigen-n", n)
}
@@ -1030,7 +1046,8 @@
cgen_callinter(n, nil, 0)
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
*a = gc.Node{}
a.Op = gc.OINDREG
a.Val.U.Reg = x86.REG_SP
@@ -1050,6 +1067,7 @@
if gc.Isptr[n.Left.Type.Etype] == 0 {
igen(n.Left, a, res)
} else {
+ var n1 gc.Node
igen(n.Left, &n1, res)
gc.Cgen_checknil(&n1)
regalloc(a, gc.Types[gc.Tptr], res)
@@ -1078,19 +1096,6 @@
* if(n == true) goto to;
*/
func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
- var et int
- var a int
- var nl *gc.Node
- var nr *gc.Node
- var l *gc.Node
- var r *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var tmp gc.Node
- var ll *gc.NodeList
- var p1 *obj.Prog
- var p2 *obj.Prog
-
if gc.Debug['g'] != 0 {
gc.Dump("\nbgen", n)
}
@@ -1103,6 +1108,12 @@
gc.Genlist(n.Ninit)
}
+ var a int
+ var et int
+ var nl *gc.Node
+ var n1 gc.Node
+ var nr *gc.Node
+ var n2 gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
@@ -1141,9 +1152,10 @@
if n.Addable == 0 {
goto def
}
+ var n1 gc.Node
gc.Nodconst(&n1, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), n, &n1)
- a = x86.AJNE
+ a := x86.AJNE
if !true_ {
a = x86.AJEQ
}
@@ -1153,8 +1165,8 @@
case gc.OANDAND,
gc.OOROR:
if (n.Op == gc.OANDAND) == true_ {
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ p1 := gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n.Left, !true_, -likely, p2)
bgen(n.Right, !true_, -likely, p2)
@@ -1199,15 +1211,15 @@
gc.OGT,
gc.OLE,
gc.OGE:
- a = int(n.Op)
+ a := int(n.Op)
if !true_ {
if gc.Isfloat[nr.Type.Etype] != 0 {
// brcom is not valid on floats when NaN is involved.
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p1 := gc.Gbranch(obj.AJMP, nil, 0)
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
- ll = n.Ninit // avoid re-genning ninit
+ ll := n.Ninit // avoid re-genning ninit
n.Ninit = nil
bgen(n, true, -likely, p2)
n.Ninit = ll
@@ -1223,7 +1235,7 @@
// make simplest on right
if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
a = gc.Brrev(a)
- r = nl
+ r := nl
nl = nr
nr = r
}
@@ -1236,9 +1248,11 @@
}
a = optoas(a, gc.Types[gc.Tptr])
+ var n1 gc.Node
igen(nl, &n1, nil)
n1.Xoffset += int64(gc.Array_array)
n1.Type = gc.Types[gc.Tptr]
+ var tmp gc.Node
gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
@@ -1254,8 +1268,10 @@
}
a = optoas(a, gc.Types[gc.Tptr])
+ var n1 gc.Node
igen(nl, &n1, nil)
n1.Type = gc.Types[gc.Tptr]
+ var tmp gc.Node
gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
@@ -1268,10 +1284,13 @@
break
}
+ var n2 gc.Node
+ var n1 gc.Node
if nr.Ullman >= gc.UINF {
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
+ var tmp gc.Node
gc.Tempname(&tmp, nl.Type)
gmove(&n1, &tmp)
regfree(&n1)
@@ -1300,9 +1319,9 @@
// only < and <= work right with NaN; reverse if needed
cmp:
- l = &n1
+ l := &n1
- r = &n2
+ r := &n2
if gc.Isfloat[nl.Type.Etype] != 0 && (a == gc.OGT || a == gc.OGE) {
l = &n2
r = &n1
@@ -1314,9 +1333,9 @@
if gc.Isfloat[nr.Type.Etype] != 0 && (n.Op == gc.OEQ || n.Op == gc.ONE) {
if n.Op == gc.OEQ {
// neither NE nor P
- p1 = gc.Gbranch(x86.AJNE, nil, -likely)
+ p1 := gc.Gbranch(x86.AJNE, nil, -likely)
- p2 = gc.Gbranch(x86.AJPS, nil, -likely)
+ p2 := gc.Gbranch(x86.AJPS, nil, -likely)
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
gc.Patch(p1, gc.Pc)
gc.Patch(p2, gc.Pc)
@@ -1357,31 +1376,27 @@
* return n's offset from SP.
*/
func stkof(n *gc.Node) int64 {
- var t *gc.Type
- var flist gc.Iter
- var off int64
-
switch n.Op {
case gc.OINDREG:
return n.Xoffset
case gc.ODOT:
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
break
}
- off = stkof(n.Left)
+ off := stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
return off + n.Xoffset
case gc.OINDEX:
- t = n.Left.Type
+ t := n.Left.Type
if !gc.Isfixedarray(t) {
break
}
- off = stkof(n.Left)
+ off := stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
@@ -1393,11 +1408,12 @@
case gc.OCALLMETH,
gc.OCALLINTER,
gc.OCALLFUNC:
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
t = t.Type
}
+ var flist gc.Iter
t = gc.Structfirst(&flist, gc.Getoutarg(t))
if t != nil {
return t.Width
@@ -1414,20 +1430,6 @@
* memmove(&ns, &n, w);
*/
func sgen(n *gc.Node, ns *gc.Node, w int64) {
- var nodl gc.Node
- var nodr gc.Node
- var nodsi gc.Node
- var noddi gc.Node
- var cx gc.Node
- var oldcx gc.Node
- var tmp gc.Node
- var c int64
- var q int64
- var odst int64
- var osrc int64
- var l *gc.NodeList
- var p *obj.Prog
-
if gc.Debug['g'] != 0 {
fmt.Printf("\nsgen w=%d\n", w)
gc.Dump("r", n)
@@ -1445,7 +1447,7 @@
// If copying .args, that's all the results, so record definition sites
// for them for the liveness analysis.
if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
- for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ for l := gc.Curfn.Dcl; l != nil; l = l.Next {
if l.N.Class == gc.PPARAMOUT {
gc.Gvardef(l.N)
}
@@ -1459,6 +1461,7 @@
if w == 0 {
// evaluate side effects only
+ var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], nil)
agen(ns, &nodr)
@@ -1468,15 +1471,16 @@
}
// offset on the stack
- osrc = stkof(n)
+ osrc := stkof(n)
- odst = stkof(ns)
+ odst := stkof(ns)
if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
// osrc and odst both on stack, and at least one is in
// an unknown position. Could generate code to test
// for forward/backward copy, but instead just copy
// to a temporary location first.
+ var tmp gc.Node
gc.Tempname(&tmp, n.Type)
sgen(n, &tmp, w)
@@ -1484,9 +1488,13 @@
return
}
+ var noddi gc.Node
gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
+ var nodsi gc.Node
gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
+ var nodl gc.Node
+ var nodr gc.Node
if n.Ullman >= ns.Ullman {
agenr(n, &nodr, &nodsi)
if ns.Op == gc.ONAME {
@@ -1510,9 +1518,11 @@
regfree(&nodl)
regfree(&nodr)
- c = w % 8 // bytes
- q = w / 8 // quads
+ c := w % 8 // bytes
+ q := w / 8 // quads
+ var oldcx gc.Node
+ var cx gc.Node
savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
// if we are copying forward on the stack and
@@ -1552,7 +1562,7 @@
gins(x86.AREP, nil, nil) // repeat
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
} else if q >= 4 {
- p = gins(obj.ADUFFCOPY, nil, nil)
+ p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
@@ -1643,23 +1653,16 @@
func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
- var tmp gc.Node
- var t *gc.Type
- var freel int
- var freer int
- var fldcount int64
- var loffset int64
- var roffset int64
- freel = 0
- freer = 0
+ freel := 0
+ freer := 0
switch nl.Type.Etype {
default:
goto no
case gc.TARRAY:
- t = nl.Type
+ t := nl.Type
// Slices are ok.
if gc.Isslice(t) {
@@ -1676,9 +1679,9 @@
// Small structs with non-fat types are ok.
// Zero-sized structs are treated separately elsewhere.
case gc.TSTRUCT:
- fldcount = 0
+ fldcount := int64(0)
- for t = nl.Type.Type; t != nil; t = t.Down {
+ for t := nl.Type.Type; t != nil; t = t.Down {
if gc.Isfat(t.Type) {
goto no
}
@@ -1714,6 +1717,7 @@
}
} else {
// When zeroing, prepare a register containing zero.
+ var tmp gc.Node
gc.Nodconst(&tmp, nl.Type, 0)
regalloc(&nodr, gc.Types[gc.TUINT], nil)
@@ -1735,11 +1739,11 @@
if nl.Op == gc.ONAME {
gc.Gvardef(nl)
}
- t = nl.Type
+ t := nl.Type
if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
- for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ for fldcount := int64(0); fldcount < t.Bound; fldcount++ {
if nr == nil {
gc.Clearslim(&nodl)
} else {
@@ -1842,8 +1846,8 @@
if nl.Op == gc.ONAME {
gc.Gvardef(nl)
}
- loffset = nodl.Xoffset
- roffset = nodr.Xoffset
+ loffset := nodl.Xoffset
+ roffset := nodr.Xoffset
// funarg structs may not begin at offset zero.
if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
@@ -1853,7 +1857,7 @@
roffset -= nr.Type.Type.Width
}
- for t = nl.Type.Type; t != nil; t = t.Down {
+ for t := nl.Type.Type; t != nil; t = t.Down {
nodl.Xoffset = loffset + t.Width
nodl.Type = t.Type
diff --git a/src/cmd/6g/ggen.go b/src/cmd/6g/ggen.go
index be6ff21..d0c43d6 100644
--- a/src/cmd/6g/ggen.go
+++ b/src/cmd/6g/ggen.go
@@ -11,32 +11,26 @@
import "cmd/internal/gc"
func defframe(ptxt *obj.Prog) {
- var frame uint32
- var ax uint32
- var p *obj.Prog
- var hi int64
- var lo int64
- var l *gc.NodeList
var n *gc.Node
// fill in argument size, stack size
ptxt.To.Type = obj.TYPE_TEXTSIZE
ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
- frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
ptxt.To.Offset = int64(frame)
// insert code to zero ambiguously live variables
// so that the garbage collector only sees initialized values
// when it looks for pointers.
- p = ptxt
+ p := ptxt
- hi = 0
- lo = hi
- ax = 0
+ hi := int64(0)
+ lo := hi
+ ax := uint32(0)
// iterate through declarations - they are sorted in decreasing xoffset order.
- for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ for l := gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if n.Needzero == 0 {
continue
@@ -69,10 +63,7 @@
}
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
- var cnt int64
- var i int64
-
- cnt = hi - lo
+ cnt := hi - lo
if cnt == 0 {
return p
}
@@ -92,7 +83,7 @@
}
if cnt <= int64(4*gc.Widthreg) {
- for i = 0; i < cnt; i += int64(gc.Widthreg) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
}
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
@@ -110,8 +101,7 @@
}
func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
- var q *obj.Prog
- q = gc.Ctxt.NewProg()
+ q := gc.Ctxt.NewProg()
gc.Clearp(q)
q.As = int16(as)
q.Lineno = p.Lineno
@@ -136,14 +126,8 @@
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
- var p *obj.Prog
- var reg gc.Node
- var stk gc.Node
- var r1 gc.Node
- var extra int32
-
if f.Type != nil {
- extra = 0
+ extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
@@ -167,12 +151,13 @@
// x86 NOP 0x90 is really XCHG AX, AX; use that description
// because the NOP pseudo-instruction would be removed by
// the linker.
+ var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX)
gins(x86.AXCHGL, ®, ®)
}
- p = gins(obj.ACALL, nil, f)
+ p := gins(obj.ACALL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
@@ -180,7 +165,9 @@
break
}
+ var reg gc.Node
gc.Nodreg(®, gc.Types[gc.Tptr], x86.REG_DX)
+ var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], x86.REG_BX)
gmove(f, ®)
reg.Op = gc.OINDREG
@@ -193,12 +180,13 @@
case 1, // call in new proc (go)
2: // deferred call (defer)
- stk = gc.Node{}
+ stk := gc.Node{}
stk.Op = gc.OINDREG
stk.Val.U.Reg = x86.REG_SP
stk.Xoffset = 0
+ var reg gc.Node
if gc.Widthptr == 8 {
// size of arguments at 0(SP)
ginscon(x86.AMOVQ, int64(gc.Argsize(f.Type)), &stk)
@@ -233,7 +221,7 @@
if proc == 2 {
gc.Nodreg(®, gc.Types[gc.TINT32], x86.REG_AX)
gins(x86.ATESTL, ®, ®)
- p = gc.Gbranch(x86.AJEQ, nil, +1)
+ p := gc.Gbranch(x86.AJEQ, nil, +1)
cgen_ret(nil)
gc.Patch(p, gc.Pc)
}
@@ -245,20 +233,12 @@
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
- var i *gc.Node
- var f *gc.Node
- var tmpi gc.Node
- var nodi gc.Node
- var nodo gc.Node
- var nodr gc.Node
- var nodsp gc.Node
-
- i = n.Left
+ i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
- f = i.Right // field
+ f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
@@ -266,6 +246,7 @@
i = i.Left // interface
if i.Addable == 0 {
+ var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
@@ -275,8 +256,10 @@
// i is now addable, prepare an indirected
// register to hold its address.
+ var nodi gc.Node
igen(i, &nodi, res) // REG = &inter
+ var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], x86.REG_SP)
nodsp.Xoffset = 0
@@ -287,6 +270,7 @@
nodi.Xoffset += int64(gc.Widthptr)
cgen(&nodi, &nodsp) // {0, 8(nacl), or 16}(SP) = 8(REG) -- i.data
+ var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], res)
nodi.Type = gc.Types[gc.Tptr]
@@ -294,6 +278,7 @@
cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
regfree(&nodi)
+ var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
if n.Left.Xoffset == gc.BADWIDTH {
gc.Fatal("cgen_callinter: badwidth")
@@ -324,14 +309,11 @@
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
- var t *gc.Type
- var nod gc.Node
- var afun gc.Node
-
if n == nil {
return
}
+ var afun gc.Node
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
@@ -341,10 +323,11 @@
}
gc.Genlist(n.List) // assign the args
- t = n.Left.Type
+ t := n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
+ var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
@@ -355,6 +338,7 @@
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
@@ -375,22 +359,18 @@
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
- var nod gc.Node
- var fp *gc.Type
- var t *gc.Type
- var flist gc.Iter
-
- t = n.Left.Type
+ t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
- nod = gc.Node{}
+ nod := gc.Node{}
nod.Op = gc.OINDREG
nod.Val.U.Reg = x86.REG_SP
nod.Addable = 1
@@ -406,23 +386,18 @@
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
- var nod1 gc.Node
- var nod2 gc.Node
- var fp *gc.Type
- var t *gc.Type
- var flist gc.Iter
-
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
t = t.Type
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
- nod1 = gc.Node{}
+ nod1 := gc.Node{}
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = x86.REG_SP
nod1.Addable = 1
@@ -431,6 +406,7 @@
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
+ var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
gins(leaptr, &nod1, &nod2)
gins(movptr, &nod2, res)
@@ -445,8 +421,6 @@
* n->left is assignments to return values.
*/
func cgen_ret(n *gc.Node) {
- var p *obj.Prog
-
if n != nil {
gc.Genlist(n.List) // copy out args
}
@@ -454,7 +428,7 @@
ginscall(gc.Deferreturn, 0)
}
gc.Genlist(gc.Curfn.Exit)
- p = gins(obj.ARET, nil, nil)
+ p := gins(obj.ARET, nil, nil)
if n != nil && n.Op == gc.ORETJMP {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
@@ -470,21 +444,6 @@
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var a int
- var check int
- var n3 gc.Node
- var n4 gc.Node
- var t *gc.Type
- var t0 *gc.Type
- var ax gc.Node
- var dx gc.Node
- var ax1 gc.Node
- var n31 gc.Node
- var oldax gc.Node
- var olddx gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
-
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
@@ -493,10 +452,10 @@
// Easiest way to avoid for int8, int16: use int32.
// For int32 and int64, use explicit test.
// Could use int64 hw for int32.
- t = nl.Type
+ t := nl.Type
- t0 = t
- check = 0
+ t0 := t
+ check := 0
if gc.Issigned[t.Etype] != 0 {
check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
@@ -515,9 +474,12 @@
check = 0
}
- a = optoas(op, t)
+ a := optoas(op, t)
+ var n3 gc.Node
regalloc(&n3, t0, nil)
+ var ax gc.Node
+ var oldax gc.Node
if nl.Ullman >= nr.Ullman {
savex(x86.REG_AX, &ax, &oldax, res, t0)
cgen(nl, &ax)
@@ -532,16 +494,17 @@
if t != t0 {
// Convert
- ax1 = ax
+ ax1 := ax
- n31 = n3
+ n31 := n3
ax.Type = t
n3.Type = t
gmove(&ax1, &ax)
gmove(&n31, &n3)
}
- p2 = nil
+ p2 := (*obj.Prog)(nil)
+ var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
// to the executing program, so we must insert a check
@@ -549,7 +512,7 @@
gc.Nodconst(&n4, t, 0)
gins(optoas(gc.OCMP, t), &n3, &n4)
- p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
@@ -560,7 +523,7 @@
if check != 0 {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4)
- p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, &ax)
@@ -577,6 +540,8 @@
gc.Patch(p1, gc.Pc)
}
+ var olddx gc.Node
+ var dx gc.Node
savex(x86.REG_DX, &dx, &olddx, res, t)
if gc.Issigned[t.Etype] == 0 {
gc.Nodconst(&n4, t, 0)
@@ -609,9 +574,7 @@
* known to be dead.
*/
func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
- var r int
-
- r = int(reg[dr])
+ r := int(reg[dr])
// save current ax and dx if they are live
// and not the destination
@@ -643,12 +606,7 @@
* res = nl % nr
*/
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
var w int
- var a int
- var m gc.Magic
if nr.Op != gc.OLITERAL {
goto longdiv
@@ -663,6 +621,7 @@
goto longdiv
case gc.TUINT64:
+ var m gc.Magic
m.W = w
m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
gc.Umagic(&m)
@@ -673,8 +632,11 @@
goto longmod
}
+ var n1 gc.Node
cgenr(nl, &n1, nil)
+ var n2 gc.Node
gc.Nodconst(&n2, nl.Type, int64(m.Um))
+ var n3 gc.Node
regalloc(&n3, nl.Type, res)
cgen_hmul(&n1, &n2, &n3)
@@ -697,6 +659,7 @@
return
case gc.TINT64:
+ var m gc.Magic
m.W = w
m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
gc.Smagic(&m)
@@ -707,8 +670,11 @@
goto longmod
}
+ var n1 gc.Node
cgenr(nl, &n1, res)
+ var n2 gc.Node
gc.Nodconst(&n2, nl.Type, m.Sm)
+ var n3 gc.Node
regalloc(&n3, nl.Type, nil)
cgen_hmul(&n1, &n2, &n3)
@@ -748,12 +714,14 @@
// mod using formula A%B = A-(A/B*B) but
// we know that there is a fast algorithm for A/B
longmod:
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
+ var n2 gc.Node
regalloc(&n2, nl.Type, nil)
cgen_div(gc.ODIV, &n1, nr, &n2)
- a = optoas(gc.OMUL, nl.Type)
+ a := optoas(gc.OMUL, nl.Type)
if w == 8 {
// use 2-operand 16-bit multiply
// because there is no 2-operand 8-bit multiply
@@ -761,6 +729,7 @@
}
if !gc.Smallintconst(nr) {
+ var n3 gc.Node
regalloc(&n3, nl.Type, nil)
cgen(nr, &n3)
gins(a, &n3, &n2)
@@ -779,30 +748,26 @@
* res = (nl*nr) >> width
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var t *gc.Type
- var a int
- var n1 gc.Node
- var n2 gc.Node
- var ax gc.Node
- var dx gc.Node
- var tmp *gc.Node
-
- t = nl.Type
- a = optoas(gc.OHMUL, t)
+ t := nl.Type
+ a := optoas(gc.OHMUL, t)
if nl.Ullman < nr.Ullman {
- tmp = nl
+ tmp := nl
nl = nr
nr = tmp
}
+ var n1 gc.Node
cgenr(nl, &n1, res)
+ var n2 gc.Node
cgenr(nr, &n2, nil)
+ var ax gc.Node
gc.Nodreg(&ax, t, x86.REG_AX)
gmove(&n1, &ax)
gins(a, &n2, nil)
regfree(&n2)
regfree(&n1)
+ var dx gc.Node
if t.Width == 1 {
// byte multiply behaves differently.
gc.Nodreg(&ax, t, x86.REG_AH)
@@ -824,24 +789,21 @@
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
- var n4 gc.Node
- var n5 gc.Node
var cx gc.Node
var oldcx gc.Node
- var a int
var rcx int
- var p1 *obj.Prog
- var sc uint64
var tcount *gc.Type
- a = optoas(op, nl.Type)
+ a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
- sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
+ var n3 gc.Node
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n1)
@@ -855,12 +817,14 @@
}
if nl.Ullman >= gc.UINF {
+ var n4 gc.Node
gc.Tempname(&n4, nl.Type)
cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
+ var n5 gc.Node
gc.Tempname(&n5, nr.Type)
cgen(nr, &n5)
nr = &n5
@@ -912,7 +876,7 @@
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
- p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
@@ -947,37 +911,33 @@
* we do a full-width multiplication and truncate afterwards.
*/
func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var n2 gc.Node
- var n1b gc.Node
- var n2b gc.Node
- var tmp *gc.Node
- var t *gc.Type
- var a int
-
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp = nl
+ tmp := nl
nl = nr
nr = tmp
}
// generate operands in "8-bit" registers.
+ var n1b gc.Node
regalloc(&n1b, nl.Type, res)
cgen(nl, &n1b)
+ var n2b gc.Node
regalloc(&n2b, nr.Type, nil)
cgen(nr, &n2b)
// perform full-width multiplication.
- t = gc.Types[gc.TUINT64]
+ t := gc.Types[gc.TUINT64]
if gc.Issigned[nl.Type.Etype] != 0 {
t = gc.Types[gc.TINT64]
}
+ var n1 gc.Node
gc.Nodreg(&n1, t, int(n1b.Val.U.Reg))
+ var n2 gc.Node
gc.Nodreg(&n2, t, int(n2b.Val.U.Reg))
- a = optoas(op, t)
+ a := optoas(op, t)
gins(a, &n2, &n1)
// truncate.
@@ -988,31 +948,20 @@
}
func clearfat(nl *gc.Node) {
- var w int64
- var c int64
- var q int64
- var n1 gc.Node
- var oldn1 gc.Node
- var ax gc.Node
- var oldax gc.Node
- var di gc.Node
- var z gc.Node
- var p *obj.Prog
-
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
- w = nl.Type.Width
+ w := nl.Type.Width
// Avoid taking the address for simple enough types.
if componentgen(nil, nl) {
return
}
- c = w % 8 // bytes
- q = w / 8 // quads
+ c := w % 8 // bytes
+ q := w / 8 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSQ.
@@ -1021,9 +970,11 @@
// than the unrolled STOSQ loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
+ var n1 gc.Node
agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
+ var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
@@ -1060,9 +1011,13 @@
return
}
+ var oldn1 gc.Node
+ var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
agen(nl, &n1)
+ var ax gc.Node
+ var oldax gc.Node
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
gconreg(x86.AMOVL, 0, x86.REG_AX)
@@ -1071,7 +1026,7 @@
gins(x86.AREP, nil, nil) // repeat
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
} else {
- p = gins(obj.ADUFFZERO, nil, nil)
+ p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
@@ -1079,22 +1034,22 @@
p.To.Offset = 2 * (128 - q)
}
- z = ax
- di = n1
+ z := ax
+ di := n1
if w >= 8 && c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT64]
di.Type = z.Type
- p = gins(x86.AMOVQ, &z, &di)
+ p := gins(x86.AMOVQ, &z, &di)
p.To.Scale = 1
p.To.Offset = c - 8
} else if c >= 4 {
di.Op = gc.OINDREG
z.Type = gc.Types[gc.TINT32]
di.Type = z.Type
- p = gins(x86.AMOVL, &z, &di)
+ gins(x86.AMOVL, &z, &di)
if c > 4 {
- p = gins(x86.AMOVL, &z, &di)
+ p := gins(x86.AMOVL, &z, &di)
p.To.Scale = 1
p.To.Offset = c - 4
}
@@ -1112,11 +1067,10 @@
// Called after regopt and peep have run.
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
- var p *obj.Prog
var p1 *obj.Prog
var p2 *obj.Prog
- for p = firstp; p != nil; p = p.Link {
+ for p := firstp; p != nil; p = p.Link {
if p.As != obj.ACHECKNIL {
continue
}
diff --git a/src/cmd/6g/gsubr.go b/src/cmd/6g/gsubr.go
index c440f8c..a8e47d3 100644
--- a/src/cmd/6g/gsubr.go
+++ b/src/cmd/6g/gsubr.go
@@ -53,19 +53,17 @@
}
func ginit() {
- var i int
-
- for i = 0; i < len(reg); i++ {
+ for i := 0; i < len(reg); i++ {
reg[i] = 1
}
- for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ for i := x86.REG_AX; i <= x86.REG_R15; i++ {
reg[i] = 0
}
- for i = x86.REG_X0; i <= x86.REG_X15; i++ {
+ for i := x86.REG_X0; i <= x86.REG_X15; i++ {
reg[i] = 0
}
- for i = 0; i < len(resvd); i++ {
+ for i := 0; i < len(resvd); i++ {
reg[resvd[i]]++
}
@@ -79,9 +77,7 @@
}
func gclean() {
- var i int
-
- for i = 0; i < len(resvd); i++ {
+ for i := 0; i < len(resvd); i++ {
reg[resvd[i]]--
}
if gc.Nacl {
@@ -91,12 +87,12 @@
reg[x86.REG_BP]--
}
- for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ for i := x86.REG_AX; i <= x86.REG_R15; i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
}
}
- for i = x86.REG_X0; i <= x86.REG_X15; i++ {
+ for i := x86.REG_X0; i <= x86.REG_X15; i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
}
@@ -104,10 +100,9 @@
}
func anyregalloc() bool {
- var i int
var j int
- for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ for i := x86.REG_AX; i <= x86.REG_R15; i++ {
if reg[i] == 0 {
goto ok
}
@@ -131,14 +126,12 @@
* caller must regfree(n).
*/
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
- var i int
- var et int
-
if t == nil {
gc.Fatal("regalloc: t nil")
}
- et = int(gc.Simtype[t.Etype])
+ et := int(gc.Simtype[t.Etype])
+ var i int
switch et {
case gc.TINT8,
gc.TUINT8,
@@ -166,7 +159,7 @@
}
gc.Flusherrors()
- for i = 0; i+x86.REG_AX <= x86.REG_R15; i++ {
+ for i := 0; i+x86.REG_AX <= x86.REG_R15; i++ {
fmt.Printf("%d %p\n", i, regpc[i])
}
gc.Fatal("out of fixed registers")
@@ -202,15 +195,13 @@
}
func regfree(n *gc.Node) {
- var i int
-
if n.Op == gc.ONAME {
return
}
if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
gc.Fatal("regfree: not a register")
}
- i = int(n.Val.U.Reg)
+ i := int(n.Val.U.Reg)
if i == x86.REG_SP {
return
}
@@ -252,7 +243,6 @@
*/
func ginscon(as int, c int64, n2 *gc.Node) {
var n1 gc.Node
- var ntmp gc.Node
switch as {
case x86.AADDL,
@@ -267,6 +257,7 @@
if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) {
// cannot have 64-bit immediate in ADD, etc.
// instead, MOV into register first.
+ var ntmp gc.Node
regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(x86.AMOVQ, &n1, &ntmp)
@@ -309,27 +300,13 @@
* hard part is conversions.
*/
func gmove(f *gc.Node, t *gc.Node) {
- var a int
- var ft int
- var tt int
- var cvt *gc.Type
- var r1 gc.Node
- var r2 gc.Node
- var r3 gc.Node
- var r4 gc.Node
- var zero gc.Node
- var one gc.Node
- var con gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
-
if gc.Debug['M'] != 0 {
fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
}
- ft = gc.Simsimtype(f.Type)
- tt = gc.Simsimtype(t.Type)
- cvt = t.Type
+ ft := gc.Simsimtype(f.Type)
+ tt := gc.Simsimtype(t.Type)
+ cvt := t.Type
if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
gc.Complexmove(f, t)
@@ -337,12 +314,15 @@
}
// cannot have two memory operands
+ var r1 gc.Node
+ var a int
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// convert constant to desired type
if f.Op == gc.OLITERAL {
+ var con gc.Node
gc.Convconst(&con, t.Type, &f.Val)
f = &con
ft = tt // so big switch will choose a simple mov
@@ -550,21 +530,25 @@
// otherwise, subtract 2^63, convert, and add it back.
case gc.TFLOAT32<<16 | gc.TUINT64,
gc.TFLOAT64<<16 | gc.TUINT64:
- a = x86.ACVTTSS2SQ
+ a := x86.ACVTTSS2SQ
if ft == gc.TFLOAT64 {
a = x86.ACVTTSD2SQ
}
bignodes()
+ var r1 gc.Node
regalloc(&r1, gc.Types[ft], nil)
+ var r2 gc.Node
regalloc(&r2, gc.Types[tt], t)
+ var r3 gc.Node
regalloc(&r3, gc.Types[ft], nil)
+ var r4 gc.Node
regalloc(&r4, gc.Types[tt], nil)
gins(optoas(gc.OAS, f.Type), f, &r1)
gins(optoas(gc.OCMP, f.Type), &bigf, &r1)
- p1 = gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
gins(a, &r1, &r2)
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gins(optoas(gc.OAS, f.Type), &bigf, &r3)
gins(optoas(gc.OSUB, f.Type), &r3, &r1)
@@ -624,22 +608,28 @@
// otherwise, halve (rounding to odd?), convert, and double.
case gc.TUINT64<<16 | gc.TFLOAT32,
gc.TUINT64<<16 | gc.TFLOAT64:
- a = x86.ACVTSQ2SS
+ a := x86.ACVTSQ2SS
if tt == gc.TFLOAT64 {
a = x86.ACVTSQ2SD
}
+ var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0)
+ var one gc.Node
gc.Nodconst(&one, gc.Types[gc.TUINT64], 1)
+ var r1 gc.Node
regalloc(&r1, f.Type, f)
+ var r2 gc.Node
regalloc(&r2, t.Type, t)
+ var r3 gc.Node
regalloc(&r3, f.Type, nil)
+ var r4 gc.Node
regalloc(&r4, f.Type, nil)
gmove(f, &r1)
gins(x86.ACMPQ, &r1, &zero)
- p1 = gc.Gbranch(x86.AJLT, nil, +1)
+ p1 := gc.Gbranch(x86.AJLT, nil, +1)
gins(a, &r1, &r2)
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gmove(&r1, &r3)
gins(x86.ASHRQ, &one, &r3)
@@ -717,13 +707,8 @@
* as f, t
*/
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
- var w int32
- var p *obj.Prog
// Node nod;
- var af obj.Addr
- var at obj.Addr
-
// if(f != N && f->op == OINDEX) {
// regalloc(&nod, ®node, Z);
// v = constnode.vconst;
@@ -758,15 +743,15 @@
}
}
- af = obj.Addr{}
- at = obj.Addr{}
+ af := obj.Addr{}
+ at := obj.Addr{}
if f != nil {
gc.Naddr(f, &af, 1)
}
if t != nil {
gc.Naddr(t, &at, 1)
}
- p = gc.Prog(as)
+ p := gc.Prog(as)
if f != nil {
p.From = af
}
@@ -777,7 +762,7 @@
fmt.Printf("%v\n", p)
}
- w = 0
+ w := int32(0)
switch as {
case x86.AMOVB:
w = 1
@@ -806,8 +791,6 @@
}
func fixlargeoffset(n *gc.Node) {
- var a gc.Node
-
if n == nil {
return
}
@@ -819,7 +802,7 @@
}
if n.Xoffset != int64(int32(n.Xoffset)) {
// offset too large, add to register instead.
- a = *n
+ a := *n
a.Op = gc.OREGISTER
a.Type = gc.Types[gc.Tptr]
@@ -834,13 +817,11 @@
* return Axxx for Oxxx on type t.
*/
func optoas(op int, t *gc.Type) int {
- var a int
-
if t == nil {
gc.Fatal("optoas: t is nil")
}
- a = obj.AXXX
+ a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
@@ -1362,35 +1343,24 @@
* to release the register used for a.
*/
func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
- var o int
- var i int
- var oary [10]int64
- var v int64
- var w int64
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var n4 gc.Node
- var nn *gc.Node
- var l *gc.Node
- var r *gc.Node
- var reg *gc.Node
- var reg1 *gc.Node
- var p1 *obj.Prog
- var t *gc.Type
-
if n.Type == nil {
return false
}
*a = obj.Addr{}
+ var o int
+ var n1 gc.Node
+ var oary [10]int64
+ var nn *gc.Node
+ var reg *gc.Node
+ var reg1 *gc.Node
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
break
}
- v = gc.Mpgetfix(n.Val.U.Xval)
+ v := gc.Mpgetfix(n.Val.U.Xval)
if v >= 32000 || v <= -32000 {
break
}
@@ -1400,20 +1370,13 @@
gc.ODOTPTR:
cleani += 2
reg = &clean[cleani-1]
- reg1 = &clean[cleani-2]
+ reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
goto odot
case gc.OINDEX:
return false
-
- // disabled: OINDEX case is now covered by agenr
- // for a more suitable register allocation pattern.
- if n.Left.Type.Etype == gc.TSTRING {
- return false
- }
- goto oindex
}
return false
@@ -1474,7 +1437,7 @@
if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
// directly addressable set of DOTs
- n1 = *nn
+ n1 := *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
@@ -1494,7 +1457,7 @@
n1.Xoffset = -(oary[0] + 1)
}
- for i = 1; i < o; i++ {
+ for i := 1; i < o; i++ {
if oary[i] >= 0 {
gc.Fatal("can't happen")
}
@@ -1509,240 +1472,6 @@
gc.Naddr(&n1, a, 1)
goto yes
-oindex:
- l = n.Left
- r = n.Right
- if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
- return false
- }
-
- // set o to type of array
- o = 0
-
- if gc.Isptr[l.Type.Etype] != 0 {
- gc.Fatal("ptr ary")
- }
- if l.Type.Etype != gc.TARRAY {
- gc.Fatal("not ary")
- }
- if l.Type.Bound < 0 {
- o |= ODynam
- }
-
- w = n.Type.Width
- if gc.Isconst(r, gc.CTINT) {
- goto oindex_const
- }
-
- switch w {
- default:
- return false
-
- case 1,
- 2,
- 4,
- 8:
- break
- }
-
- cleani += 2
- reg = &clean[cleani-1]
- reg1 = &clean[cleani-2]
- reg.Op = gc.OEMPTY
- reg1.Op = gc.OEMPTY
-
- // load the array (reg)
- if l.Ullman > r.Ullman {
- if xgen(l, reg, o) {
- o |= OAddable
- }
- }
-
- // load the index (reg1)
- t = gc.Types[gc.TUINT64]
-
- if gc.Issigned[r.Type.Etype] != 0 {
- t = gc.Types[gc.TINT64]
- }
- regalloc(reg1, t, nil)
- regalloc(&n3, r.Type, reg1)
- cgen(r, &n3)
- gmove(&n3, reg1)
- regfree(&n3)
-
- // load the array (reg)
- if l.Ullman <= r.Ullman {
- if xgen(l, reg, o) {
- o |= OAddable
- }
- }
-
- // check bounds
- if gc.Debug['B'] == 0 && !n.Bounded {
- // check bounds
- n4.Op = gc.OXXX
-
- t = gc.Types[gc.Simtype[gc.TUINT]]
- if o&ODynam != 0 {
- if o&OAddable != 0 {
- n2 = *l
- n2.Xoffset += int64(gc.Array_nel)
- n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
- } else {
- n2 = *reg
- n2.Xoffset = int64(gc.Array_nel)
- n2.Op = gc.OINDREG
- n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
- }
- } else {
- if gc.Is64(r.Type) {
- t = gc.Types[gc.TUINT64]
- }
- gc.Nodconst(&n2, gc.Types[gc.TUINT64], l.Type.Bound)
- }
-
- gins(optoas(gc.OCMP, t), reg1, &n2)
- p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
- if n4.Op != gc.OXXX {
- regfree(&n4)
- }
- ginscall(gc.Panicindex, -1)
- gc.Patch(p1, gc.Pc)
- }
-
- if o&ODynam != 0 {
- if o&OAddable != 0 {
- n2 = *l
- n2.Xoffset += int64(gc.Array_array)
- n2.Type = gc.Types[gc.Tptr]
- gmove(&n2, reg)
- } else {
- n2 = *reg
- n2.Op = gc.OINDREG
- n2.Xoffset = int64(gc.Array_array)
- n2.Type = gc.Types[gc.Tptr]
- gmove(&n2, reg)
- }
- }
-
- if o&OAddable != 0 {
- gc.Naddr(reg1, a, 1)
- a.Offset = 0
- a.Scale = int8(w)
- a.Index = a.Reg
- a.Type = obj.TYPE_MEM
- a.Reg = reg.Val.U.Reg
- } else {
- gc.Naddr(reg1, a, 1)
- a.Offset = 0
- a.Scale = int8(w)
- a.Index = a.Reg
- a.Type = obj.TYPE_MEM
- a.Reg = reg.Val.U.Reg
- }
-
- goto yes
-
- // index is constant
- // can check statically and
- // can multiply by width statically
-
-oindex_const:
- v = gc.Mpgetfix(r.Val.U.Xval)
-
- if sudoaddable(as, l, a) {
- goto oindex_const_sudo
- }
-
- cleani += 2
- reg = &clean[cleani-1]
- reg1 = &clean[cleani-2]
- reg.Op = gc.OEMPTY
- reg1.Op = gc.OEMPTY
-
- if o&ODynam != 0 {
- regalloc(reg, gc.Types[gc.Tptr], nil)
- agen(l, reg)
-
- if gc.Debug['B'] == 0 && !n.Bounded {
- n1 = *reg
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_nel)
- gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
- gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &n1, &n2)
- p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
- ginscall(gc.Panicindex, -1)
- gc.Patch(p1, gc.Pc)
- }
-
- n1 = *reg
- n1.Op = gc.OINDREG
- n1.Type = gc.Types[gc.Tptr]
- n1.Xoffset = int64(gc.Array_array)
- gmove(&n1, reg)
-
- n2 = *reg
- n2.Op = gc.OINDREG
- n2.Xoffset = v * w
- fixlargeoffset(&n2)
- a.Type = obj.TYPE_NONE
- a.Index = obj.TYPE_NONE
- gc.Naddr(&n2, a, 1)
- goto yes
- }
-
- igen(l, &n1, nil)
- if n1.Op == gc.OINDREG {
- *reg = n1
- reg.Op = gc.OREGISTER
- }
-
- n1.Xoffset += v * w
- fixlargeoffset(&n1)
- a.Type = obj.TYPE_NONE
- a.Index = obj.TYPE_NONE
- gc.Naddr(&n1, a, 1)
- goto yes
-
-oindex_const_sudo:
- if o&ODynam == 0 {
- // array indexed by a constant
- a.Offset += v * w
-
- goto yes
- }
-
- // slice indexed by a constant
- if gc.Debug['B'] == 0 && !n.Bounded {
- a.Offset += int64(gc.Array_nel)
- gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
- p1 = gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), nil, &n2)
- p1.From = *a
- p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
- ginscall(gc.Panicindex, -1)
- gc.Patch(p1, gc.Pc)
- a.Offset -= int64(gc.Array_nel)
- }
-
- a.Offset += int64(gc.Array_array)
- reg = &clean[cleani-1]
- if reg.Op == gc.OEMPTY {
- regalloc(reg, gc.Types[gc.Tptr], nil)
- }
-
- p1 = gins(movptr, nil, reg)
- p1.From = *a
-
- n2 = *reg
- n2.Op = gc.OINDREG
- n2.Xoffset = v * w
- fixlargeoffset(&n2)
- a.Type = obj.TYPE_NONE
- a.Index = obj.TYPE_NONE
- gc.Naddr(&n2, a, 1)
- goto yes
-
yes:
return true
diff --git a/src/cmd/6g/peep.go b/src/cmd/6g/peep.go
index 9870ca5..ed582d7 100644
--- a/src/cmd/6g/peep.go
+++ b/src/cmd/6g/peep.go
@@ -62,10 +62,9 @@
}
func rnops(r *gc.Flow) *gc.Flow {
- var p *obj.Prog
- var r1 *gc.Flow
-
if r != nil {
+ var p *obj.Prog
+ var r1 *gc.Flow
for {
p = r.Prog
if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
@@ -83,14 +82,7 @@
}
func peep(firstp *obj.Prog) {
- var r *gc.Flow
- var r1 *gc.Flow
- var g *gc.Graph
- var p *obj.Prog
- var p1 *obj.Prog
- var t int
-
- g = gc.Flowstart(firstp, nil)
+ g := (*gc.Graph)(gc.Flowstart(firstp, nil))
if g == nil {
return
}
@@ -103,7 +95,8 @@
// find MOV $con,R followed by
// another MOV $con,R without
// setting R in the interim
- for r = g.Start; r != nil; r = r.Link {
+ var p *obj.Prog
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog
switch p.As {
case x86.ALEAL,
@@ -130,6 +123,10 @@
}
}
+ var r *gc.Flow
+ var r1 *gc.Flow
+ var p1 *obj.Prog
+ var t int
loop1:
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
gc.Dumpit("loop1", g.Start, 0)
@@ -266,7 +263,7 @@
// can be replaced by MOVAPD, which moves the pair of float64s
// instead of just the lower one. We only use the lower one, but
// the processor can do better if we do moves using both.
- for r = g.Start; r != nil; r = r.Link {
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog
if p.As == x86.AMOVLQZX {
if regtyp(&p.From) {
@@ -290,7 +287,7 @@
// load pipelining
// push any load from memory as early as possible
// to give it time to complete before use.
- for r = g.Start; r != nil; r = r.Link {
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog
switch p.As {
case x86.AMOVB,
@@ -309,13 +306,10 @@
func pushback(r0 *gc.Flow) {
var r *gc.Flow
- var b *gc.Flow
- var p0 *obj.Prog
var p *obj.Prog
- var t obj.Prog
- b = nil
- p0 = r0.Prog
+ b := (*gc.Flow)(nil)
+ p0 := (*obj.Prog)(r0.Prog)
for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
p = r.Prog
if p.As != obj.ANOP {
@@ -346,7 +340,7 @@
if gc.Debug['v'] != 0 {
fmt.Printf("pushback\n")
- for r = b; ; r = r.Link {
+ for r := (*gc.Flow)(b); ; r = r.Link {
fmt.Printf("\t%v\n", r.Prog)
if r == r0 {
break
@@ -354,7 +348,7 @@
}
}
- t = *r0.Prog
+ t := obj.Prog(*r0.Prog)
for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
p0 = r.Link.Prog
p = r.Prog
@@ -376,7 +370,7 @@
if gc.Debug['v'] != 0 {
fmt.Printf("\tafter\n")
- for r = b; ; r = r.Link {
+ for r := (*gc.Flow)(b); ; r = r.Link {
fmt.Printf("\t%v\n", r.Prog)
if r == r0 {
break
@@ -386,9 +380,7 @@
}
func excise(r *gc.Flow) {
- var p *obj.Prog
-
- p = r.Prog
+ p := (*obj.Prog)(r.Prog)
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
@@ -414,9 +406,8 @@
// seems unnecessary, and it makes the instructions longer.
func elimshortmov(g *gc.Graph) {
var p *obj.Prog
- var r *gc.Flow
- for r = g.Start; r != nil; r = r.Link {
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog
if regtyp(&p.To) {
switch p.As {
@@ -518,10 +509,9 @@
// is reg guaranteed to be truncated by a previous L instruction?
func prevl(r0 *gc.Flow, reg int) bool {
var p *obj.Prog
- var r *gc.Flow
var info gc.ProgInfo
- for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ for r := (*gc.Flow)(gc.Uniqp(r0)); r != nil; r = gc.Uniqp(r) {
p = r.Prog
if p.To.Type == obj.TYPE_REG && int(p.To.Reg) == reg {
proginfo(&info, p)
@@ -552,18 +542,11 @@
* will be eliminated by copy propagation.
*/
func subprop(r0 *gc.Flow) bool {
- var p *obj.Prog
- var info gc.ProgInfo
- var v1 *obj.Addr
- var v2 *obj.Addr
- var r *gc.Flow
- var t int
-
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("subprop %v\n", r0.Prog)
}
- p = r0.Prog
- v1 = &p.From
+ p := (*obj.Prog)(r0.Prog)
+ v1 := (*obj.Addr)(&p.From)
if !regtyp(v1) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
@@ -571,7 +554,7 @@
return false
}
- v2 = &p.To
+ v2 := (*obj.Addr)(&p.To)
if !regtyp(v2) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
@@ -579,6 +562,8 @@
return false
}
+ var info gc.ProgInfo
+ var r *gc.Flow
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\t? %v\n", r.Prog)
@@ -652,7 +637,7 @@
}
}
- t = int(v1.Reg)
+ t := int(int(v1.Reg))
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
@@ -674,16 +659,12 @@
* set v2 return success
*/
func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
- var p *obj.Prog
- var v1 *obj.Addr
- var v2 *obj.Addr
-
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("copyprop %v\n", r0.Prog)
}
- p = r0.Prog
- v1 = &p.From
- v2 = &p.To
+ p := (*obj.Prog)(r0.Prog)
+ v1 := (*obj.Addr)(&p.From)
+ v2 := (*obj.Addr)(&p.To)
if copyas(v1, v2) {
return true
}
@@ -692,9 +673,6 @@
}
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
- var t int
- var p *obj.Prog
-
if uint32(r.Active) == gactive {
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
@@ -706,6 +684,8 @@
if gc.Debug['P'] != 0 {
fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
}
+ var t int
+ var p *obj.Prog
for ; r != nil; r = r.S1 {
p = r.Prog
if gc.Debug['P'] != 0 {
@@ -796,8 +776,6 @@
* 0 otherwise (not touched)
*/
func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
- var info gc.ProgInfo
-
switch p.As {
case obj.AJMP:
if s != nil {
@@ -851,6 +829,7 @@
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
return 0
}
+ var info gc.ProgInfo
proginfo(&info, p)
if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
@@ -977,10 +956,8 @@
* return failure to substitute
*/
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
- var reg int
-
if copyas(a, v) {
- reg = int(s.Reg)
+ reg := int(int(s.Reg))
if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 {
if f != 0 {
a.Reg = int16(reg)
@@ -991,7 +968,7 @@
}
if regtyp(v) {
- reg = int(v.Reg)
+ reg := int(int(v.Reg))
if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
return 1 /* can't use BP-base with index */
@@ -1016,15 +993,12 @@
}
func conprop(r0 *gc.Flow) {
- var r *gc.Flow
var p *obj.Prog
- var p0 *obj.Prog
var t int
- var v0 *obj.Addr
- p0 = r0.Prog
- v0 = &p0.To
- r = r0
+ p0 := (*obj.Prog)(r0.Prog)
+ v0 := (*obj.Addr)(&p0.To)
+ r := (*gc.Flow)(r0)
loop:
r = gc.Uniqs(r)
diff --git a/src/cmd/6g/reg.go b/src/cmd/6g/reg.go
index 0629a62..3c5a699 100644
--- a/src/cmd/6g/reg.go
+++ b/src/cmd/6g/reg.go
@@ -85,9 +85,7 @@
}
func doregbits(r int) uint64 {
- var b uint64
-
- b = 0
+ b := uint64(0)
if r >= x86.REG_AX && r <= x86.REG_R15 {
b |= RtoB(r)
} else if r >= x86.REG_AL && r <= x86.REG_R15B {
diff --git a/src/cmd/8g/cgen.go b/src/cmd/8g/cgen.go
index 9f736b1..d36bef7 100644
--- a/src/cmd/8g/cgen.go
+++ b/src/cmd/8g/cgen.go
@@ -19,8 +19,6 @@
* peep.c
*/
func mgen(n *gc.Node, n1 *gc.Node, rg *gc.Node) {
- var n2 gc.Node
-
n1.Op = gc.OEMPTY
if n.Addable != 0 {
@@ -34,7 +32,7 @@
gc.Tempname(n1, n.Type)
cgen(n, n1)
if n.Type.Width <= int64(gc.Widthptr) || gc.Isfloat[n.Type.Etype] != 0 {
- n2 = *n1
+ n2 := *n1
regalloc(n1, n.Type, rg)
gmove(&n2, n1)
}
@@ -55,17 +53,6 @@
* sudoaddable
*/
func cgen(n *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var nr *gc.Node
- var r *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var nt gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
- var p3 *obj.Prog
- var a int
-
if gc.Debug['g'] != 0 {
gc.Dump("\ncgen-n", n)
gc.Dump("cgen-res", res)
@@ -85,6 +72,7 @@
gc.OSLICE3,
gc.OSLICE3ARR:
if res.Op != gc.ONAME || res.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
@@ -95,6 +83,7 @@
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
@@ -110,6 +99,7 @@
// function calls on both sides? introduce temporary
if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
@@ -154,6 +144,7 @@
if n.Addable == 0 && res.Addable == 0 {
// could use regalloc here sometimes,
// but have to check for ullman >= UINF.
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
@@ -164,6 +155,7 @@
// if result is not addressable directly but n is,
// compute its address and then store via the address.
if res.Addable == 0 {
+ var n1 gc.Node
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
@@ -180,16 +172,17 @@
// let's do some computation.
// use ullman to pick operand to eval first.
- nl = n.Left
+ nl := n.Left
- nr = n.Right
+ nr := n.Right
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
// both are hard
+ var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
- n2 = *n
+ n2 := *n
n2.Left = &n1
cgen(&n2, res)
return
@@ -222,6 +215,7 @@
return
}
+ var a int
switch n.Op {
default:
gc.Dump("cgen", n)
@@ -243,11 +237,11 @@
gc.OGE,
gc.OGT,
gc.ONOT:
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p1 := gc.Gbranch(obj.AJMP, nil, 0)
- p2 = gc.Pc
+ p2 := gc.Pc
gmove(gc.Nodbool(true), res)
- p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
@@ -293,7 +287,9 @@
break
}
+ var n2 gc.Node
gc.Tempname(&n2, n.Type)
+ var n1 gc.Node
mgen(nl, &n1, res)
gmove(&n1, &n2)
gmove(&n2, res)
@@ -304,12 +300,14 @@
gc.OINDEX,
gc.OIND,
gc.ONAME: // PHEAP or PPARAMREF var
+ var n1 gc.Node
igen(n, &n1, res)
gmove(&n1, res)
regfree(&n1)
case gc.OITAB:
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = gc.Ptrto(gc.Types[gc.TUINTPTR])
gmove(&n1, res)
@@ -318,14 +316,16 @@
// pointer is the first word of string or slice.
case gc.OSPTR:
if gc.Isconst(nl, gc.CTSTR) {
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
- p1 = gins(i386.ALEAL, nil, &n1)
+ p1 := gins(i386.ALEAL, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
gmove(&n1, res)
regfree(&n1)
break
}
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = n.Type
gmove(&n1, res)
@@ -335,16 +335,18 @@
if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map has len in the first 32-bit word.
// a zero pointer means zero length
+ var n1 gc.Node
gc.Tempname(&n1, gc.Types[gc.Tptr])
cgen(nl, &n1)
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], nil)
gmove(&n1, &n2)
n1 = n2
gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+ p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
n2 = n1
n2.Op = gc.OINDREG
@@ -360,6 +362,7 @@
if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = gc.Types[gc.TUINT32]
@@ -375,16 +378,18 @@
if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second 32-bit word.
// a zero pointer means zero length
+ var n1 gc.Node
gc.Tempname(&n1, gc.Types[gc.Tptr])
cgen(nl, &n1)
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], nil)
gmove(&n1, &n2)
n1 = n2
gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+ p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
n2 = n1
n2.Op = gc.OINDREG
@@ -400,6 +405,7 @@
}
if gc.Isslice(nl.Type) {
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = gc.Types[gc.TUINT32]
n1.Xoffset += int64(gc.Array_cap)
@@ -439,14 +445,16 @@
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
- r = nl
+ r := nl
nl = nr
nr = r
}
abop: // asymmetric binary
if gc.Smallintconst(nr) {
+ var n1 gc.Node
mgen(nl, &n1, res)
+ var n2 gc.Node
regalloc(&n2, nl.Type, &n1)
gmove(&n1, &n2)
gins(a, nr, &n2)
@@ -454,9 +462,12 @@
regfree(&n2)
mfree(&n1)
} else if nl.Ullman >= nr.Ullman {
+ var nt gc.Node
gc.Tempname(&nt, nl.Type)
cgen(nl, &nt)
+ var n2 gc.Node
mgen(nr, &n2, nil)
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
gmove(&nt, &n1)
gins(a, &n2, &n1)
@@ -464,8 +475,10 @@
regfree(&n1)
mfree(&n2)
} else {
+ var n2 gc.Node
regalloc(&n2, nr.Type, res)
cgen(nr, &n2)
+ var n1 gc.Node
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
gins(a, &n2, &n1)
@@ -477,6 +490,7 @@
return
uop: // unary
+ var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
@@ -491,11 +505,6 @@
* returns Prog* to patch to panic call.
*/
func igenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
- var tmp gc.Node
- var lo gc.Node
- var hi gc.Node
- var zero gc.Node
-
if !gc.Is64(n.Type) {
if n.Addable != 0 {
// nothing to do.
@@ -508,8 +517,11 @@
return nil
}
+ var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
cgen(n, &tmp)
+ var lo gc.Node
+ var hi gc.Node
split64(&tmp, &lo, &hi)
gc.Tempname(res, gc.Types[gc.TUINT32])
gmove(&lo, res)
@@ -518,6 +530,7 @@
return nil
}
+ var zero gc.Node
gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
gins(i386.ACMPL, &hi, &zero)
splitclean()
@@ -530,20 +543,6 @@
* The generated code checks that the result is not nil.
*/
func agen(n *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var nr *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var tmp gc.Node
- var nlen gc.Node
- var t *gc.Type
- var w uint32
- var v uint64
- var p1 *obj.Prog
- var p2 *obj.Prog
- var bounded bool
-
if gc.Debug['g'] != 0 {
gc.Dump("\nagen-res", res)
gc.Dump("agen-r", n)
@@ -562,10 +561,12 @@
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
// be terribly efficient. See issue 3670.
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Gvardef(&n1)
clearfat(&n1)
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], res)
gins(i386.ALEAL, &n1, &n2)
gmove(&n2, res)
@@ -578,6 +579,7 @@
if n.Op == gc.OREGISTER {
gc.Fatal("agen OREGISTER")
}
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
gins(i386.ALEAL, n, &n1)
gmove(&n1, res)
@@ -586,9 +588,9 @@
}
// let's compute
- nl = n.Left
+ nl := n.Left
- nr = n.Right
+ nr := n.Right
switch n.Op {
default:
@@ -611,19 +613,24 @@
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
agen(&n1, res)
case gc.OEFACE:
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
agen(&n1, res)
case gc.OINDEX:
- p2 = nil // to be patched to panicindex.
- w = uint32(n.Type.Width)
- bounded = gc.Debug['B'] != 0 || n.Bounded
+ p2 := (*obj.Prog)(nil) // to be patched to panicindex.
+ w := uint32(n.Type.Width)
+ bounded := gc.Debug['B'] != 0 || n.Bounded
+ var n3 gc.Node
+ var tmp gc.Node
+ var n1 gc.Node
if nr.Addable != 0 {
// Generate &nl first, and move nr into register.
if !gc.Isconst(nl, gc.CTSTR) {
@@ -656,6 +663,7 @@
}
// For fixed array we really want the pointer in n3.
+ var n2 gc.Node
if gc.Isfixedarray(nl.Type) {
regalloc(&n2, gc.Types[gc.Tptr], &n3)
agen(&n3, &n2)
@@ -673,15 +681,15 @@
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index") // front end should handle
}
- v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
- nlen = n3
+ nlen := n3
nlen.Type = gc.Types[gc.TUINT32]
nlen.Xoffset += int64(gc.Array_nel)
gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &nlen, &n2)
- p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
ginscall(gc.Panicindex, -1)
gc.Patch(p1, gc.Pc)
}
@@ -705,7 +713,7 @@
}
// i is in register n1, extend to 32 bits.
- t = gc.Types[gc.TUINT32]
+ t := gc.Types[gc.TUINT32]
if gc.Issigned[n1.Type.Etype] != 0 {
t = gc.Types[gc.TINT32]
@@ -717,8 +725,9 @@
if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
- t = gc.Types[gc.TUINT32]
+ t := gc.Types[gc.TUINT32]
+ var nlen gc.Node
if gc.Isconst(nl, gc.CTSTR) {
gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
@@ -730,7 +739,7 @@
}
gins(optoas(gc.OCMP, t), &n2, &nlen)
- p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.OLT, t), nil, +1)
if p2 != nil {
gc.Patch(p2, gc.Pc)
}
@@ -740,7 +749,7 @@
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
- p1 = gins(i386.ALEAL, nil, &n3)
+ p1 := gins(i386.ALEAL, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
p1.From.Scale = 1
p1.From.Index = n2.Val.U.Reg
@@ -763,7 +772,7 @@
} else // nothing to do
if w == 1 || w == 2 || w == 4 || w == 8 {
// LEAL (n3)(n2*w), n3
- p1 = gins(i386.ALEAL, &n2, &n3)
+ p1 := gins(i386.ALEAL, &n2, &n3)
p1.From.Scale = int8(w)
p1.From.Type = obj.TYPE_MEM
@@ -795,6 +804,7 @@
cgen(n.Heapaddr, res)
if n.Xoffset != 0 {
+ var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
}
@@ -806,18 +816,20 @@
case gc.ODOT:
agen(nl, res)
if n.Xoffset != 0 {
+ var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
}
case gc.ODOTPTR:
- t = nl.Type
+ t := nl.Type
if gc.Isptr[t.Etype] == 0 {
gc.Fatal("agen: not ptr %v", gc.Nconv(n, 0))
}
cgen(nl, res)
gc.Cgen_checknil(res)
if n.Xoffset != 0 {
+ var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
}
@@ -834,10 +846,6 @@
* The generated code checks that the result is not *nil.
*/
func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
- var fp *gc.Type
- var flist gc.Iter
- var n1 gc.Node
-
if gc.Debug['g'] != 0 {
gc.Dump("\nigen-n", n)
}
@@ -873,6 +881,7 @@
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
+ var n1 gc.Node
igen(n.Left, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
@@ -904,7 +913,8 @@
cgen_callinter(n, nil, 0)
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
*a = gc.Node{}
a.Op = gc.OINDREG
a.Val.U.Reg = i386.REG_SP
@@ -924,6 +934,7 @@
if gc.Isptr[n.Left.Type.Etype] == 0 {
igen(n.Left, a, res)
} else {
+ var n1 gc.Node
igen(n.Left, &n1, res)
gc.Cgen_checknil(&n1)
regalloc(a, gc.Types[gc.Tptr], res)
@@ -946,6 +957,7 @@
if res != nil && res.Op == gc.OREGISTER {
reg[res.Val.U.Reg]--
}
+ var n1 gc.Node
gc.Tempname(&n1, gc.Types[gc.Tptr])
agen(n, &n1)
if res != nil && res.Op == gc.OREGISTER {
@@ -962,17 +974,6 @@
* if(n == true) goto to;
*/
func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
- var et int
- var a int
- var nl *gc.Node
- var nr *gc.Node
- var r *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var tmp gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
-
if gc.Debug['g'] != 0 {
gc.Dump("\nbgen", n)
}
@@ -992,7 +993,7 @@
}
}
- et = int(n.Type.Etype)
+ et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
@@ -1006,8 +1007,8 @@
}
}
- nl = n.Left
- nr = nil
+ nl := n.Left
+ nr := (*gc.Node)(nil)
if nl != nil && gc.Isfloat[nl.Type.Etype] != 0 {
bgen_float(n, bool2int(true_), likely, to)
@@ -1029,9 +1030,10 @@
if n.Addable == 0 {
goto def
}
+ var n1 gc.Node
gc.Nodconst(&n1, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), n, &n1)
- a = i386.AJNE
+ a := i386.AJNE
if !true_ {
a = i386.AJEQ
}
@@ -1041,8 +1043,8 @@
case gc.OANDAND,
gc.OOROR:
if (n.Op == gc.OANDAND) == true_ {
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ p1 := gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n.Left, !true_, -likely, p2)
bgen(n.Right, !true_, -likely, p2)
@@ -1086,7 +1088,7 @@
gc.OGT,
gc.OLE,
gc.OGE:
- a = int(n.Op)
+ a := int(n.Op)
if !true_ {
a = gc.Brcom(a)
true_ = !true_
@@ -1095,7 +1097,7 @@
// make simplest on right
if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
a = gc.Brrev(a)
- r = nl
+ r := nl
nl = nr
nr = r
}
@@ -1108,9 +1110,11 @@
}
a = optoas(a, gc.Types[gc.Tptr])
+ var n1 gc.Node
igen(nl, &n1, nil)
n1.Xoffset += int64(gc.Array_array)
n1.Type = gc.Types[gc.Tptr]
+ var tmp gc.Node
gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
@@ -1126,8 +1130,10 @@
}
a = optoas(a, gc.Types[gc.Tptr])
+ var n1 gc.Node
igen(nl, &n1, nil)
n1.Type = gc.Types[gc.Tptr]
+ var tmp gc.Node
gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
@@ -1142,12 +1148,14 @@
if gc.Is64(nr.Type) {
if nl.Addable == 0 || gc.Isconst(nl, gc.CTINT) {
+ var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if nr.Addable == 0 {
+ var n2 gc.Node
gc.Tempname(&n2, nr.Type)
cgen(nr, &n2)
nr = &n2
@@ -1157,19 +1165,23 @@
break
}
+ var n2 gc.Node
if nr.Ullman >= gc.UINF {
if nl.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if nr.Addable == 0 {
+ var tmp gc.Node
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
}
+ var n2 gc.Node
regalloc(&n2, nr.Type, nil)
cgen(nr, &n2)
nr = &n2
@@ -1177,6 +1189,7 @@
}
if nl.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
@@ -1189,6 +1202,7 @@
}
if nr.Addable == 0 {
+ var tmp gc.Node
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
@@ -1211,11 +1225,13 @@
return
def:
+ var n1 gc.Node
regalloc(&n1, n.Type, nil)
cgen(n, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), &n1, &n2)
- a = i386.AJNE
+ a := i386.AJNE
if !true_ {
a = i386.AJEQ
}
@@ -1230,31 +1246,27 @@
* return n's offset from SP.
*/
func stkof(n *gc.Node) int32 {
- var t *gc.Type
- var flist gc.Iter
- var off int32
-
switch n.Op {
case gc.OINDREG:
return int32(n.Xoffset)
case gc.ODOT:
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
break
}
- off = stkof(n.Left)
+ off := stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
return int32(int64(off) + n.Xoffset)
case gc.OINDEX:
- t = n.Left.Type
+ t := n.Left.Type
if !gc.Isfixedarray(t) {
break
}
- off = stkof(n.Left)
+ off := stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
@@ -1266,11 +1278,12 @@
case gc.OCALLMETH,
gc.OCALLINTER,
gc.OCALLFUNC:
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
t = t.Type
}
+ var flist gc.Iter
t = gc.Structfirst(&flist, gc.Getoutarg(t))
if t != nil {
return int32(t.Width)
@@ -1287,18 +1300,6 @@
* memmove(&res, &n, w);
*/
func sgen(n *gc.Node, res *gc.Node, w int64) {
- var dst gc.Node
- var src gc.Node
- var tdst gc.Node
- var tsrc gc.Node
- var cx gc.Node
- var c int32
- var q int32
- var odst int32
- var osrc int32
- var l *gc.NodeList
- var p *obj.Prog
-
if gc.Debug['g'] != 0 {
fmt.Printf("\nsgen w=%d\n", w)
gc.Dump("r", n)
@@ -1315,6 +1316,7 @@
if w == 0 {
// evaluate side effects only.
+ var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
agen(res, &tdst)
@@ -1325,7 +1327,7 @@
// If copying .args, that's all the results, so record definition sites
// for them for the liveness analysis.
if res.Op == gc.ONAME && res.Sym.Name == ".args" {
- for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ for l := gc.Curfn.Dcl; l != nil; l = l.Next {
if l.N.Class == gc.PPARAMOUT {
gc.Gvardef(l.N)
}
@@ -1338,15 +1340,16 @@
}
// offset on the stack
- osrc = stkof(n)
+ osrc := stkof(n)
- odst = stkof(res)
+ odst := stkof(res)
if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
// osrc and odst both on stack, and at least one is in
// an unknown position. Could generate code to test
// for forward/backward copy, but instead just copy
// to a temporary location first.
+ var tsrc gc.Node
gc.Tempname(&tsrc, n.Type)
sgen(n, &tsrc, w)
@@ -1354,10 +1357,14 @@
return
}
+ var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], i386.REG_DI)
+ var src gc.Node
gc.Nodreg(&src, gc.Types[gc.Tptr], i386.REG_SI)
+ var tsrc gc.Node
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
+ var tdst gc.Node
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if n.Addable == 0 {
agen(n, &tsrc)
@@ -1381,8 +1388,8 @@
gmove(&tdst, &dst)
}
- c = int32(w % 4) // bytes
- q = int32(w / 4) // doublewords
+ c := int32(w % 4) // bytes
+ q := int32(w / 4) // doublewords
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
@@ -1423,13 +1430,14 @@
gins(i386.AREP, nil, nil) // repeat
gins(i386.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
} else if q >= 4 {
- p = gins(obj.ADUFFCOPY, nil, nil)
+ p := gins(obj.ADUFFCOPY, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
// 10 and 128 = magic constants: see ../../runtime/asm_386.s
p.To.Offset = 10 * (128 - int64(q))
} else if !gc.Nacl && c == 0 {
+ var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], i386.REG_CX)
// We don't need the MOVSL side-effect of updating SI and DI,
@@ -1484,23 +1492,16 @@
func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
- var tmp gc.Node
- var t *gc.Type
- var freel int
- var freer int
- var fldcount int64
- var loffset int64
- var roffset int64
- freel = 0
- freer = 0
+ freel := 0
+ freer := 0
switch nl.Type.Etype {
default:
goto no
case gc.TARRAY:
- t = nl.Type
+ t := nl.Type
// Slices are ok.
if gc.Isslice(t) {
@@ -1517,9 +1518,9 @@
// Small structs with non-fat types are ok.
// Zero-sized structs are treated separately elsewhere.
case gc.TSTRUCT:
- fldcount = 0
+ fldcount := int64(0)
- for t = nl.Type.Type; t != nil; t = t.Down {
+ for t := nl.Type.Type; t != nil; t = t.Down {
if gc.Isfat(t.Type) {
goto no
}
@@ -1555,6 +1556,7 @@
}
} else {
// When zeroing, prepare a register containing zero.
+ var tmp gc.Node
gc.Nodconst(&tmp, nl.Type, 0)
regalloc(&nodr, gc.Types[gc.TUINT], nil)
@@ -1576,11 +1578,11 @@
if nl.Op == gc.ONAME {
gc.Gvardef(nl)
}
- t = nl.Type
+ t := nl.Type
if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
- for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ for fldcount := int64(0); fldcount < t.Bound; fldcount++ {
if nr == nil {
gc.Clearslim(&nodl)
} else {
@@ -1683,8 +1685,8 @@
if nl.Op == gc.ONAME {
gc.Gvardef(nl)
}
- loffset = nodl.Xoffset
- roffset = nodr.Xoffset
+ loffset := nodl.Xoffset
+ roffset := nodr.Xoffset
// funarg structs may not begin at offset zero.
if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
@@ -1694,7 +1696,7 @@
roffset -= nr.Type.Type.Width
}
- for t = nl.Type.Type; t != nil; t = t.Down {
+ for t := nl.Type.Type; t != nil; t = t.Down {
nodl.Xoffset = loffset + t.Width
nodl.Type = t.Type
diff --git a/src/cmd/8g/cgen64.go b/src/cmd/8g/cgen64.go
index 1937ae0..0755f0e 100644
--- a/src/cmd/8g/cgen64.go
+++ b/src/cmd/8g/cgen64.go
@@ -16,25 +16,6 @@
* return 1 on success, 0 if op not handled.
*/
func cgen64(n *gc.Node, res *gc.Node) {
- var t1 gc.Node
- var t2 gc.Node
- var ax gc.Node
- var dx gc.Node
- var cx gc.Node
- var ex gc.Node
- var fx gc.Node
- var l *gc.Node
- var r *gc.Node
- var lo1 gc.Node
- var lo2 gc.Node
- var hi1 gc.Node
- var hi2 gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
- var v uint64
- var lv uint32
- var hv uint32
-
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
@@ -47,6 +28,8 @@
case gc.OMINUS:
cgen(n.Left, res)
+ var hi1 gc.Node
+ var lo1 gc.Node
split64(res, &lo1, &hi1)
gins(i386.ANEGL, nil, &lo1)
gins(i386.AADCL, ncon(0), &hi1)
@@ -56,6 +39,8 @@
case gc.OCOM:
cgen(n.Left, res)
+ var lo1 gc.Node
+ var hi1 gc.Node
split64(res, &lo1, &hi1)
gins(i386.ANOTL, nil, &lo1)
gins(i386.ANOTL, nil, &hi1)
@@ -76,27 +61,36 @@
break
}
- l = n.Left
- r = n.Right
+ l := n.Left
+ r := n.Right
if l.Addable == 0 {
+ var t1 gc.Node
gc.Tempname(&t1, l.Type)
cgen(l, &t1)
l = &t1
}
if r != nil && r.Addable == 0 {
+ var t2 gc.Node
gc.Tempname(&t2, r.Type)
cgen(r, &t2)
r = &t2
}
+ var ax gc.Node
gc.Nodreg(&ax, gc.Types[gc.TINT32], i386.REG_AX)
+ var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TINT32], i386.REG_CX)
+ var dx gc.Node
gc.Nodreg(&dx, gc.Types[gc.TINT32], i386.REG_DX)
// Setup for binary operation.
+ var hi1 gc.Node
+ var lo1 gc.Node
split64(l, &lo1, &hi1)
+ var lo2 gc.Node
+ var hi2 gc.Node
if gc.Is64(r.Type) {
split64(r, &lo2, &hi2)
}
@@ -121,8 +115,10 @@
// let's call the next two EX and FX.
case gc.OMUL:
+ var ex gc.Node
regalloc(&ex, gc.Types[gc.TPTR32], nil)
+ var fx gc.Node
regalloc(&fx, gc.Types[gc.TPTR32], nil)
// load args into DX:AX and EX:CX.
@@ -136,9 +132,9 @@
gins(i386.AMOVL, &dx, &fx)
gins(i386.AORL, &ex, &fx)
- p1 = gc.Gbranch(i386.AJNE, nil, 0)
+ p1 := gc.Gbranch(i386.AJNE, nil, 0)
gins(i386.AMULL, &cx, nil) // implicit &ax
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
// full 64x64 -> 64, from 32x32 -> 64.
@@ -166,7 +162,7 @@
// shld hi:lo, c
// shld lo:t, c
case gc.OLROT:
- v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ v := uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 32 {
// reverse during load to do the first 32 bits of rotate
@@ -183,7 +179,7 @@
} else // done
{
gins(i386.AMOVL, &dx, &cx)
- p1 = gins(i386.ASHLL, ncon(uint32(v)), &dx)
+ p1 := gins(i386.ASHLL, ncon(uint32(v)), &dx)
p1.From.Index = i386.REG_AX // double-width shift
p1.From.Scale = 0
p1 = gins(i386.ASHLL, ncon(uint32(v)), &ax)
@@ -193,7 +189,7 @@
case gc.OLSH:
if r.Op == gc.OLITERAL {
- v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ v := uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
if gc.Is64(r.Type) {
splitclean()
@@ -226,7 +222,7 @@
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
- p1 = gins(i386.ASHLL, ncon(uint32(v)), &dx)
+ p1 := gins(i386.ASHLL, ncon(uint32(v)), &dx)
p1.From.Index = i386.REG_AX // double-width shift
p1.From.Scale = 0
gins(i386.ASHLL, ncon(uint32(v)), &ax)
@@ -240,7 +236,7 @@
// load shift value into register.
// if high bits are set, zero value.
- p1 = nil
+ p1 := (*obj.Prog)(nil)
if gc.Is64(r.Type) {
gins(i386.ACMPL, &hi2, ncon(0))
@@ -254,7 +250,7 @@
// if shift count is >=64, zero value
gins(i386.ACMPL, &cx, ncon(64))
- p2 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
if p1 != nil {
gc.Patch(p1, gc.Pc)
}
@@ -282,7 +278,7 @@
case gc.ORSH:
if r.Op == gc.OLITERAL {
- v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ v := uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
if gc.Is64(r.Type) {
splitclean()
@@ -327,7 +323,7 @@
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
- p1 = gins(i386.ASHRL, ncon(uint32(v)), &ax)
+ p1 := gins(i386.ASHRL, ncon(uint32(v)), &ax)
p1.From.Index = i386.REG_DX // double-width shift
p1.From.Scale = 0
gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx)
@@ -341,7 +337,7 @@
// load shift value into register.
// if high bits are set, zero value.
- p1 = nil
+ p1 := (*obj.Prog)(nil)
if gc.Is64(r.Type) {
gins(i386.ACMPL, &hi2, ncon(0))
@@ -355,7 +351,7 @@
// if shift count is >=64, zero or sign-extend value
gins(i386.ACMPL, &cx, ncon(64))
- p2 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
if p1 != nil {
gc.Patch(p1, gc.Pc)
}
@@ -404,9 +400,9 @@
if lo2.Op == gc.OLITERAL {
// special cases for constants.
- lv = uint32(gc.Mpgetfix(lo2.Val.U.Xval))
+ lv := uint32(gc.Mpgetfix(lo2.Val.U.Xval))
- hv = uint32(gc.Mpgetfix(hi2.Val.U.Xval))
+ hv := uint32(gc.Mpgetfix(hi2.Val.U.Xval))
splitclean() // right side
split64(res, &lo2, &hi2)
switch n.Op {
@@ -518,15 +514,13 @@
var lo2 gc.Node
var hi2 gc.Node
var rr gc.Node
- var br *obj.Prog
- var t *gc.Type
split64(nl, &lo1, &hi1)
split64(nr, &lo2, &hi2)
// compare most significant word;
// if they differ, we're done.
- t = hi1.Type
+ t := hi1.Type
if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
gins(i386.ACMPL, &hi1, &hi2)
@@ -537,7 +531,7 @@
regfree(&rr)
}
- br = nil
+ br := (*obj.Prog)(nil)
switch op {
default:
gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
diff --git a/src/cmd/8g/ggen.go b/src/cmd/8g/ggen.go
index f72beda..ca2a79f 100644
--- a/src/cmd/8g/ggen.go
+++ b/src/cmd/8g/ggen.go
@@ -11,30 +11,24 @@
import "cmd/internal/gc"
func defframe(ptxt *obj.Prog) {
- var frame uint32
- var ax uint32
- var p *obj.Prog
- var lo int64
- var hi int64
- var l *gc.NodeList
var n *gc.Node
// fill in argument size, stack size
ptxt.To.Type = obj.TYPE_TEXTSIZE
ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
- frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
ptxt.To.Offset = int64(frame)
// insert code to zero ambiguously live variables
// so that the garbage collector only sees initialized values
// when it looks for pointers.
- p = ptxt
+ p := ptxt
- hi = 0
- lo = hi
- ax = 0
- for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ hi := int64(0)
+ lo := hi
+ ax := uint32(0)
+ for l := gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if n.Needzero == 0 {
continue
@@ -66,10 +60,7 @@
}
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
- var cnt int64
- var i int64
-
- cnt = hi - lo
+ cnt := hi - lo
if cnt == 0 {
return p
}
@@ -79,7 +70,7 @@
}
if cnt <= int64(4*gc.Widthreg) {
- for i = 0; i < cnt; i += int64(gc.Widthreg) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
p = appendpp(p, i386.AMOVL, obj.TYPE_REG, i386.REG_AX, 0, obj.TYPE_MEM, i386.REG_SP, frame+lo+i)
}
} else if !gc.Nacl && cnt <= int64(128*gc.Widthreg) {
@@ -97,8 +88,7 @@
}
func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
- var q *obj.Prog
- q = gc.Ctxt.NewProg()
+ q := gc.Ctxt.NewProg()
gc.Clearp(q)
q.As = int16(as)
q.Lineno = p.Lineno
@@ -114,27 +104,20 @@
}
func clearfat(nl *gc.Node) {
- var w uint32
- var c uint32
- var q uint32
- var n1 gc.Node
- var z gc.Node
- var p *obj.Prog
-
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
- w = uint32(nl.Type.Width)
+ w := uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if componentgen(nil, nl) {
return
}
- c = w % 4 // bytes
- q = w / 4 // quads
+ c := w % 4 // bytes
+ q := w / 4 // quads
if q < 4 {
// Write sequence of MOV 0, off(base) instead of using STOSL.
@@ -143,10 +126,12 @@
// than the unrolled STOSL loop.
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], nil)
agen(nl, &n1)
n1.Op = gc.OINDREG
+ var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
@@ -175,6 +160,7 @@
return
}
+ var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.Tptr], i386.REG_DI)
agen(nl, &n1)
gconreg(i386.AMOVL, 0, i386.REG_AX)
@@ -184,7 +170,7 @@
gins(i386.AREP, nil, nil) // repeat
gins(i386.ASTOSL, nil, nil) // STOL AL,*(DI)+
} else if q >= 4 {
- p = gins(obj.ADUFFZERO, nil, nil)
+ p := gins(obj.ADUFFZERO, nil, nil)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
@@ -213,15 +199,8 @@
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
- var p *obj.Prog
- var reg gc.Node
- var r1 gc.Node
- var con gc.Node
- var stk gc.Node
- var extra int32
-
if f.Type != nil {
- extra = 0
+ extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
@@ -245,12 +224,13 @@
// x86 NOP 0x90 is really XCHG AX, AX; use that description
// because the NOP pseudo-instruction will be removed by
// the linker.
+ var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT], i386.REG_AX)
gins(i386.AXCHGL, ®, ®)
}
- p = gins(obj.ACALL, nil, f)
+ p := gins(obj.ACALL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
@@ -258,7 +238,9 @@
break
}
+ var reg gc.Node
gc.Nodreg(®, gc.Types[gc.Tptr], i386.REG_DX)
+ var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], i386.REG_BX)
gmove(f, ®)
reg.Op = gc.OINDREG
@@ -271,13 +253,14 @@
case 1, // call in new proc (go)
2: // deferred call (defer)
- stk = gc.Node{}
+ stk := gc.Node{}
stk.Op = gc.OINDREG
stk.Val.U.Reg = i386.REG_SP
stk.Xoffset = 0
// size of arguments at 0(SP)
+ var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
gins(i386.AMOVL, &con, &stk)
@@ -293,9 +276,10 @@
ginscall(gc.Deferproc, 0)
}
if proc == 2 {
+ var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT32], i386.REG_AX)
gins(i386.ATESTL, ®, ®)
- p = gc.Gbranch(i386.AJEQ, nil, +1)
+ p := gc.Gbranch(i386.AJEQ, nil, +1)
cgen_ret(nil)
gc.Patch(p, gc.Pc)
}
@@ -307,20 +291,12 @@
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
- var i *gc.Node
- var f *gc.Node
- var tmpi gc.Node
- var nodi gc.Node
- var nodo gc.Node
- var nodr gc.Node
- var nodsp gc.Node
-
- i = n.Left
+ i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
- f = i.Right // field
+ f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
@@ -328,6 +304,7 @@
i = i.Left // interface
if i.Addable == 0 {
+ var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
@@ -337,8 +314,10 @@
// i is now addable, prepare an indirected
// register to hold its address.
+ var nodi gc.Node
igen(i, &nodi, res) // REG = &inter
+ var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], i386.REG_SP)
nodsp.Xoffset = 0
@@ -349,6 +328,7 @@
nodi.Xoffset += int64(gc.Widthptr)
cgen(&nodi, &nodsp) // {0 or 8}(SP) = 4(REG) -- i.data
+ var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], res)
nodi.Type = gc.Types[gc.Tptr]
@@ -356,6 +336,7 @@
cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
regfree(&nodi)
+ var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
if n.Left.Xoffset == gc.BADWIDTH {
gc.Fatal("cgen_callinter: badwidth")
@@ -387,14 +368,11 @@
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
- var t *gc.Type
- var nod gc.Node
- var afun gc.Node
-
if n == nil {
return
}
+ var afun gc.Node
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
@@ -404,10 +382,11 @@
}
gc.Genlist(n.List) // assign the args
- t = n.Left.Type
+ t := n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
+ var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
@@ -418,6 +397,7 @@
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
@@ -438,22 +418,18 @@
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
- var nod gc.Node
- var fp *gc.Type
- var t *gc.Type
- var flist gc.Iter
-
- t = n.Left.Type
+ t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
- nod = gc.Node{}
+ nod := gc.Node{}
nod.Op = gc.OINDREG
nod.Val.U.Reg = i386.REG_SP
nod.Addable = 1
@@ -469,23 +445,18 @@
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
- var nod1 gc.Node
- var nod2 gc.Node
- var fp *gc.Type
- var t *gc.Type
- var flist gc.Iter
-
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
t = t.Type
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
- nod1 = gc.Node{}
+ nod1 := gc.Node{}
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = i386.REG_SP
nod1.Addable = 1
@@ -494,6 +465,7 @@
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
+ var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
gins(i386.ALEAL, &nod1, &nod2)
gins(i386.AMOVL, &nod2, res)
@@ -508,8 +480,6 @@
* n->left is assignments to return values.
*/
func cgen_ret(n *gc.Node) {
- var p *obj.Prog
-
if n != nil {
gc.Genlist(n.List) // copy out args
}
@@ -517,7 +487,7 @@
ginscall(gc.Deferreturn, 0)
}
gc.Genlist(gc.Curfn.Exit)
- p = gins(obj.ARET, nil, nil)
+ p := gins(obj.ARET, nil, nil)
if n != nil && n.Op == gc.ORETJMP {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
@@ -536,19 +506,6 @@
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
- var check int
- var n1 gc.Node
- var t1 gc.Node
- var t2 gc.Node
- var t3 gc.Node
- var t4 gc.Node
- var n4 gc.Node
- var nz gc.Node
- var t *gc.Type
- var t0 *gc.Type
- var p1 *obj.Prog
- var p2 *obj.Prog
-
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
@@ -557,10 +514,10 @@
// Easiest way to avoid for int8, int16: use int32.
// For int32 and int64, use explicit test.
// Could use int64 hw for int32.
- t = nl.Type
+ t := nl.Type
- t0 = t
- check = 0
+ t0 := t
+ check := 0
if gc.Issigned[t.Etype] != 0 {
check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
@@ -579,10 +536,14 @@
check = 0
}
+ var t1 gc.Node
gc.Tempname(&t1, t)
+ var t2 gc.Node
gc.Tempname(&t2, t)
if t0 != t {
+ var t3 gc.Node
gc.Tempname(&t3, t0)
+ var t4 gc.Node
gc.Tempname(&t4, t0)
cgen(nl, &t3)
cgen(nr, &t4)
@@ -596,6 +557,7 @@
cgen(nr, &t2)
}
+ var n1 gc.Node
if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
regalloc(&n1, t, res)
} else {
@@ -603,7 +565,8 @@
}
gmove(&t2, &n1)
gmove(&t1, ax)
- p2 = nil
+ p2 := (*obj.Prog)(nil)
+ var n4 gc.Node
if gc.Nacl {
// Native Client does not relay the divide-by-zero trap
// to the executing program, so we must insert a check
@@ -611,7 +574,7 @@
gc.Nodconst(&n4, t, 0)
gins(optoas(gc.OCMP, t), &n1, &n4)
- p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
@@ -622,7 +585,7 @@
if check != 0 {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n1, &n4)
- p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, ax)
@@ -640,6 +603,7 @@
}
if gc.Issigned[t.Etype] == 0 {
+ var nz gc.Node
gc.Nodconst(&nz, t, 0)
gmove(&nz, dx)
} else {
@@ -659,9 +623,7 @@
}
func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
- var r int
-
- r = int(reg[dr])
+ r := int(reg[dr])
gc.Nodreg(x, gc.Types[gc.TINT32], dr)
// save current ax and dx if they are live
@@ -691,22 +653,21 @@
* res = nl % nr
*/
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var ax gc.Node
- var dx gc.Node
- var oldax gc.Node
- var olddx gc.Node
- var t *gc.Type
-
if gc.Is64(nl.Type) {
gc.Fatal("cgen_div %v", gc.Tconv(nl.Type, 0))
}
+ var t *gc.Type
if gc.Issigned[nl.Type.Etype] != 0 {
t = gc.Types[gc.TINT32]
} else {
t = gc.Types[gc.TUINT32]
}
+ var ax gc.Node
+ var oldax gc.Node
savex(i386.REG_AX, &ax, &oldax, res, t)
+ var olddx gc.Node
+ var dx gc.Node
savex(i386.REG_DX, &dx, &olddx, res, t)
dodiv(op, nl, nr, res, &ax, &dx)
restx(&dx, &olddx)
@@ -719,33 +680,22 @@
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var n2 gc.Node
- var nt gc.Node
- var cx gc.Node
- var oldcx gc.Node
- var hi gc.Node
- var lo gc.Node
- var a int
- var w int
- var p1 *obj.Prog
- var p2 *obj.Prog
- var sc uint64
-
if nl.Type.Width > 4 {
gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
}
- w = int(nl.Type.Width * 8)
+ w := int(nl.Type.Width * 8)
- a = optoas(op, nl.Type)
+ a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
+ var n2 gc.Node
gc.Tempname(&n2, nl.Type)
cgen(nl, &n2)
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
gmove(&n2, &n1)
- sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
gins(a, ncon(uint32(w)-1), &n1)
@@ -759,13 +709,16 @@
return
}
- oldcx = gc.Node{}
+ oldcx := gc.Node{}
+ var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
if reg[i386.REG_CX] > 1 && !gc.Samereg(&cx, res) {
gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
gmove(&cx, &oldcx)
}
+ var n1 gc.Node
+ var nt gc.Node
if nr.Type.Width > 4 {
gc.Tempname(&nt, nr.Type)
n1 = nt
@@ -774,6 +727,7 @@
regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
}
+ var n2 gc.Node
if gc.Samereg(&cx, res) {
regalloc(&n2, nl.Type, nil)
} else {
@@ -794,20 +748,25 @@
gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+ var lo gc.Node
+ var hi gc.Node
split64(&nt, &lo, &hi)
gmove(&lo, &n1)
splitclean()
}
} else {
+ var p1 *obj.Prog
if nr.Type.Width > 4 {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+ var lo gc.Node
+ var hi gc.Node
split64(&nt, &lo, &hi)
gmove(&lo, &n1)
gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0))
- p2 = gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
+ p2 := gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w)))
p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
splitclean()
@@ -845,15 +804,8 @@
* we do a full-width multiplication and truncate afterwards.
*/
func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var n2 gc.Node
- var nt gc.Node
- var tmp *gc.Node
- var t *gc.Type
- var a int
-
// copy from byte to full registers
- t = gc.Types[gc.TUINT32]
+ t := gc.Types[gc.TUINT32]
if gc.Issigned[nl.Type.Etype] != 0 {
t = gc.Types[gc.TINT32]
@@ -861,18 +813,21 @@
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp = nl
+ tmp := nl
nl = nr
nr = tmp
}
+ var nt gc.Node
gc.Tempname(&nt, nl.Type)
cgen(nl, &nt)
+ var n1 gc.Node
regalloc(&n1, t, res)
cgen(nr, &n1)
+ var n2 gc.Node
regalloc(&n2, t, nil)
gmove(&nt, &n2)
- a = optoas(op, t)
+ a := optoas(op, t)
gins(a, &n2, &n1)
regfree(&n2)
gmove(&n1, res)
@@ -884,15 +839,13 @@
* res = (nl*nr) >> width
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var t *gc.Type
- var a int
var n1 gc.Node
var n2 gc.Node
var ax gc.Node
var dx gc.Node
- t = nl.Type
- a = optoas(gc.OHMUL, t)
+ t := nl.Type
+ a := optoas(gc.OHMUL, t)
// gen nl in n1.
gc.Tempname(&n1, t)
@@ -927,24 +880,17 @@
* generate floating-point operation.
*/
func cgen_float(n *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
- var p3 *obj.Prog
-
- nl = n.Left
+ nl := n.Left
switch n.Op {
case gc.OEQ,
gc.ONE,
gc.OLT,
gc.OLE,
gc.OGE:
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
- p2 = gc.Pc
+ p1 := gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Pc
gmove(gc.Nodbool(true), res)
- p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
@@ -961,7 +907,9 @@
return
}
+ var n2 gc.Node
gc.Tempname(&n2, n.Type)
+ var n1 gc.Node
mgen(nl, &n1, res)
gmove(&n1, &n2)
gmove(&n2, res)
@@ -980,11 +928,9 @@
func cgen_float387(n *gc.Node, res *gc.Node) {
var f0 gc.Node
var f1 gc.Node
- var nl *gc.Node
- var nr *gc.Node
- nl = n.Left
- nr = n.Right
+ nl := n.Left
+ nr := n.Right
gc.Nodreg(&f0, nl.Type, i386.REG_F0)
gc.Nodreg(&f1, n.Type, i386.REG_F0+1)
if nr != nil {
@@ -1024,16 +970,10 @@
}
func cgen_floatsse(n *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var nr *gc.Node
- var r *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var nt gc.Node
var a int
- nl = n.Left
- nr = n.Right
+ nl := n.Left
+ nr := n.Right
switch n.Op {
default:
gc.Dump("cgen_floatsse", n)
@@ -1065,16 +1005,19 @@
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
- r = nl
+ r := nl
nl = nr
nr = r
}
abop: // asymmetric binary
if nl.Ullman >= nr.Ullman {
+ var nt gc.Node
gc.Tempname(&nt, nl.Type)
cgen(nl, &nt)
+ var n2 gc.Node
mgen(nr, &n2, nil)
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
gmove(&nt, &n1)
gins(a, &n2, &n1)
@@ -1082,8 +1025,10 @@
regfree(&n1)
mfree(&n2)
} else {
+ var n2 gc.Node
regalloc(&n2, nr.Type, res)
cgen(nr, &n2)
+ var n1 gc.Node
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
gins(a, &n2, &n1)
@@ -1096,29 +1041,14 @@
}
func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) {
- var et int
- var a int
- var nl *gc.Node
- var nr *gc.Node
- var r *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var tmp gc.Node
- var t1 gc.Node
- var t2 gc.Node
- var ax gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
-
- nl = n.Left
- nr = n.Right
- a = int(n.Op)
+ nl := n.Left
+ nr := n.Right
+ a := int(n.Op)
if true_ == 0 {
// brcom is not valid on floats when NaN is involved.
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p1 := gc.Gbranch(obj.AJMP, nil, 0)
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
// No need to avoid re-genning ninit.
@@ -1129,6 +1059,10 @@
return
}
+ var tmp gc.Node
+ var et int
+ var n2 gc.Node
+ var ax gc.Node
if gc.Use_sse != 0 {
goto sse
} else {
@@ -1139,7 +1073,7 @@
a = gc.Brrev(a) // because the args are stacked
if a == gc.OGE || a == gc.OGT {
// only < and <= work right with NaN; reverse if needed
- r = nr
+ r := nr
nr = nl
nl = r
@@ -1169,8 +1103,10 @@
// all the other ops have the same problem.
// We need to figure out what the right general
// solution is, besides telling people to use float64.
+ var t1 gc.Node
gc.Tempname(&t1, gc.Types[gc.TFLOAT32])
+ var t2 gc.Node
gc.Tempname(&t2, gc.Types[gc.TFLOAT32])
cgen(nr, &t1)
cgen(nl, &t2)
@@ -1184,12 +1120,14 @@
sse:
if nl.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if nr.Addable == 0 {
+ var tmp gc.Node
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
@@ -1200,6 +1138,7 @@
nr = &n2
if nl.Op != gc.OREGISTER {
+ var n3 gc.Node
regalloc(&n3, nl.Type, nil)
gmove(nl, &n3)
nl = &n3
@@ -1207,7 +1146,7 @@
if a == gc.OGE || a == gc.OGT {
// only < and <= work right with NaN; reverse if needed
- r = nr
+ r := nr
nr = nl
nl = r
@@ -1223,9 +1162,9 @@
ret:
if a == gc.OEQ {
// neither NE nor P
- p1 = gc.Gbranch(i386.AJNE, nil, -likely)
+ p1 := gc.Gbranch(i386.AJNE, nil, -likely)
- p2 = gc.Gbranch(i386.AJPS, nil, -likely)
+ p2 := gc.Gbranch(i386.AJPS, nil, -likely)
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
gc.Patch(p1, gc.Pc)
gc.Patch(p2, gc.Pc)
@@ -1242,11 +1181,10 @@
// Called after regopt and peep have run.
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
- var p *obj.Prog
var p1 *obj.Prog
var p2 *obj.Prog
- for p = firstp; p != nil; p = p.Link {
+ for p := firstp; p != nil; p = p.Link {
if p.As != obj.ACHECKNIL {
continue
}
diff --git a/src/cmd/8g/gsubr.go b/src/cmd/8g/gsubr.go
index 2728c2a..ac3ad41 100644
--- a/src/cmd/8g/gsubr.go
+++ b/src/cmd/8g/gsubr.go
@@ -46,13 +46,11 @@
* return Axxx for Oxxx on type t.
*/
func optoas(op int, t *gc.Type) int {
- var a int
-
if t == nil {
gc.Fatal("optoas: t is nil")
}
- a = obj.AXXX
+ a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
@@ -401,11 +399,8 @@
}
func foptoas(op int, t *gc.Type, flg int) int {
- var et int
- var a int
-
- a = obj.AXXX
- et = int(gc.Simtype[t.Etype])
+ a := obj.AXXX
+ et := int(gc.Simtype[t.Etype])
if gc.Use_sse != 0 {
goto sse
@@ -564,18 +559,16 @@
}
func ginit() {
- var i int
-
- for i = 0; i < len(reg); i++ {
+ for i := 0; i < len(reg); i++ {
reg[i] = 1
}
- for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ for i := i386.REG_AX; i <= i386.REG_DI; i++ {
reg[i] = 0
}
- for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ for i := i386.REG_X0; i <= i386.REG_X7; i++ {
reg[i] = 0
}
- for i = 0; i < len(resvd); i++ {
+ for i := 0; i < len(resvd); i++ {
reg[resvd[i]]++
}
}
@@ -583,18 +576,16 @@
var regpc [i386.MAXREG]uint32
func gclean() {
- var i int
-
- for i = 0; i < len(resvd); i++ {
+ for i := 0; i < len(resvd); i++ {
reg[resvd[i]]--
}
- for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ for i := i386.REG_AX; i <= i386.REG_DI; i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated at %x", gc.Ctxt.Rconv(i), regpc[i])
}
}
- for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ for i := i386.REG_X0; i <= i386.REG_X7; i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
}
@@ -602,10 +593,9 @@
}
func anyregalloc() bool {
- var i int
var j int
- for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ for i := i386.REG_AX; i <= i386.REG_DI; i++ {
if reg[i] == 0 {
goto ok
}
@@ -618,7 +608,7 @@
ok:
}
- for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ for i := i386.REG_X0; i <= i386.REG_X7; i++ {
if reg[i] != 0 {
return true
}
@@ -632,14 +622,12 @@
* caller must regfree(n).
*/
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
- var i int
- var et int
-
if t == nil {
gc.Fatal("regalloc: t nil")
}
- et = int(gc.Simtype[t.Etype])
+ et := int(gc.Simtype[t.Etype])
+ var i int
switch et {
case gc.TINT64,
gc.TUINT64:
@@ -668,7 +656,7 @@
}
fmt.Printf("registers allocated at\n")
- for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ for i := i386.REG_AX; i <= i386.REG_DI; i++ {
fmt.Printf("\t%v\t%#x\n", gc.Ctxt.Rconv(i), regpc[i])
}
gc.Fatal("out of fixed registers")
@@ -694,7 +682,7 @@
}
}
fmt.Printf("registers allocated at\n")
- for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ for i := i386.REG_X0; i <= i386.REG_X7; i++ {
fmt.Printf("\t%v\t%#x\n", gc.Ctxt.Rconv(i), regpc[i])
}
gc.Fatal("out of floating registers")
@@ -723,15 +711,13 @@
}
func regfree(n *gc.Node) {
- var i int
-
if n.Op == gc.ONAME {
return
}
if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
gc.Fatal("regfree: not a register")
}
- i = int(n.Val.U.Reg)
+ i := int(n.Val.U.Reg)
if i == i386.REG_SP {
return
}
@@ -764,9 +750,7 @@
* swap node contents
*/
func nswap(a *gc.Node, b *gc.Node) {
- var t gc.Node
-
- t = *a
+ t := *a
*a = *b
*b = t
}
@@ -794,9 +778,6 @@
* n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
*/
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
- var n1 gc.Node
- var i int64
-
if !gc.Is64(n.Type) {
gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
}
@@ -810,6 +791,7 @@
default:
switch n.Op {
default:
+ var n1 gc.Node
if !dotaddable(n, &n1) {
igen(n, &n1, nil)
sclean[nsclean-1] = n1
@@ -819,6 +801,7 @@
case gc.ONAME:
if n.Class == gc.PPARAMREF {
+ var n1 gc.Node
cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
@@ -840,8 +823,9 @@
hi.Xoffset += 4
case gc.OLITERAL:
+ var n1 gc.Node
gc.Convconst(&n1, n.Type, &n.Val)
- i = gc.Mpgetfix(n1.Val.U.Xval)
+ i := gc.Mpgetfix(n1.Val.U.Xval)
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32
if n.Type.Etype == gc.TINT64 {
@@ -901,25 +885,13 @@
}
func gmove(f *gc.Node, t *gc.Node) {
- var a int
- var ft int
- var tt int
- var cvt *gc.Type
- var r1 gc.Node
- var r2 gc.Node
- var flo gc.Node
- var fhi gc.Node
- var tlo gc.Node
- var thi gc.Node
- var con gc.Node
-
if gc.Debug['M'] != 0 {
fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
- ft = gc.Simsimtype(f.Type)
- tt = gc.Simsimtype(t.Type)
- cvt = t.Type
+ ft := gc.Simsimtype(f.Type)
+ tt := gc.Simsimtype(t.Type)
+ cvt := t.Type
if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
gc.Complexmove(f, t)
@@ -933,12 +905,15 @@
// cannot have two integer memory operands;
// except 64-bit, which always copies via registers anyway.
+ var r1 gc.Node
+ var a int
if gc.Isint[ft] != 0 && gc.Isint[tt] != 0 && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// convert constant to desired type
if f.Op == gc.OLITERAL {
+ var con gc.Node
gc.Convconst(&con, t.Type, &f.Val)
f = &con
ft = gc.Simsimtype(con.Type)
@@ -980,8 +955,11 @@
gc.TUINT64<<16 | gc.TINT8,
gc.TINT64<<16 | gc.TUINT8,
gc.TUINT64<<16 | gc.TUINT8:
+ var flo gc.Node
+ var fhi gc.Node
split64(f, &flo, &fhi)
+ var r1 gc.Node
gc.Nodreg(&r1, t.Type, i386.REG_AX)
gmove(&flo, &r1)
gins(i386.AMOVB, &r1, t)
@@ -1006,8 +984,11 @@
gc.TUINT64<<16 | gc.TINT16,
gc.TINT64<<16 | gc.TUINT16,
gc.TUINT64<<16 | gc.TUINT16:
+ var flo gc.Node
+ var fhi gc.Node
split64(f, &flo, &fhi)
+ var r1 gc.Node
gc.Nodreg(&r1, t.Type, i386.REG_AX)
gmove(&flo, &r1)
gins(i386.AMOVW, &r1, t)
@@ -1024,8 +1005,11 @@
gc.TUINT64<<16 | gc.TINT32,
gc.TINT64<<16 | gc.TUINT32,
gc.TUINT64<<16 | gc.TUINT32:
+ var fhi gc.Node
+ var flo gc.Node
split64(f, &flo, &fhi)
+ var r1 gc.Node
gc.Nodreg(&r1, t.Type, i386.REG_AX)
gmove(&flo, &r1)
gins(i386.AMOVL, &r1, t)
@@ -1036,14 +1020,20 @@
gc.TINT64<<16 | gc.TUINT64,
gc.TUINT64<<16 | gc.TINT64,
gc.TUINT64<<16 | gc.TUINT64:
+ var fhi gc.Node
+ var flo gc.Node
split64(f, &flo, &fhi)
+ var tlo gc.Node
+ var thi gc.Node
split64(t, &tlo, &thi)
if f.Op == gc.OLITERAL {
gins(i386.AMOVL, &flo, &tlo)
gins(i386.AMOVL, &fhi, &thi)
} else {
+ var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.TUINT32], i386.REG_AX)
+ var r2 gc.Node
gc.Nodreg(&r2, gc.Types[gc.TUINT32], i386.REG_DX)
gins(i386.AMOVL, &flo, &r1)
gins(i386.AMOVL, &fhi, &r2)
@@ -1118,9 +1108,13 @@
case gc.TINT32<<16 | gc.TINT64, // sign extend int32
gc.TINT32<<16 | gc.TUINT64:
+ var thi gc.Node
+ var tlo gc.Node
split64(t, &tlo, &thi)
+ var flo gc.Node
gc.Nodreg(&flo, tlo.Type, i386.REG_AX)
+ var fhi gc.Node
gc.Nodreg(&fhi, thi.Type, i386.REG_DX)
gmove(f, &flo)
gins(i386.ACDQ, nil, nil)
@@ -1131,6 +1125,8 @@
case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
gc.TUINT32<<16 | gc.TUINT64:
+ var tlo gc.Node
+ var thi gc.Node
split64(t, &tlo, &thi)
gmove(f, &tlo)
@@ -1176,27 +1172,10 @@
func floatmove(f *gc.Node, t *gc.Node) {
var r1 gc.Node
- var r2 gc.Node
- var t1 gc.Node
- var t2 gc.Node
- var tlo gc.Node
- var thi gc.Node
- var con gc.Node
- var f0 gc.Node
- var f1 gc.Node
- var ax gc.Node
- var dx gc.Node
- var cx gc.Node
- var cvt *gc.Type
- var ft int
- var tt int
- var p1 *obj.Prog
- var p2 *obj.Prog
- var p3 *obj.Prog
- ft = gc.Simsimtype(f.Type)
- tt = gc.Simsimtype(t.Type)
- cvt = t.Type
+ ft := gc.Simsimtype(f.Type)
+ tt := gc.Simsimtype(t.Type)
+ cvt := t.Type
// cannot have two floating point memory operands.
if gc.Isfloat[ft] != 0 && gc.Isfloat[tt] != 0 && gc.Ismem(f) && gc.Ismem(t) {
@@ -1205,6 +1184,7 @@
// convert constant to desired type
if f.Op == gc.OLITERAL {
+ var con gc.Node
gc.Convconst(&con, t.Type, &f.Val)
f = &con
ft = gc.Simsimtype(con.Type)
@@ -1242,6 +1222,7 @@
goto hardmem
}
+ var r1 gc.Node
gc.Nodreg(&r1, gc.Types[ft], i386.REG_F0)
if ft == gc.TFLOAT32 {
gins(i386.AFMOVF, f, &r1)
@@ -1250,8 +1231,10 @@
}
// set round to zero mode during conversion
+ var t1 gc.Node
memname(&t1, gc.Types[gc.TUINT16])
+ var t2 gc.Node
memname(&t2, gc.Types[gc.TUINT16])
gins(i386.AFSTCW, nil, &t1)
gins(i386.AMOVW, ncon(0xf7f), &t2)
@@ -1274,8 +1257,11 @@
}
bignodes()
+ var f0 gc.Node
gc.Nodreg(&f0, gc.Types[ft], i386.REG_F0)
+ var f1 gc.Node
gc.Nodreg(&f1, gc.Types[ft], i386.REG_F0+1)
+ var ax gc.Node
gc.Nodreg(&ax, gc.Types[gc.TUINT16], i386.REG_AX)
if ft == gc.TFLOAT32 {
@@ -1288,15 +1274,17 @@
gins(i386.AFMOVD, &zerof, &f0)
gins(i386.AFUCOMIP, &f0, &f1)
- p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
+ p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
// if 1<<64 <= v { answer = 0 too }
gins(i386.AFMOVD, &two64f, &f0)
gins(i386.AFUCOMIP, &f0, &f1)
- p2 = gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
+ p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
gc.Patch(p1, gc.Pc)
gins(i386.AFMOVVP, &f0, t) // don't care about t, but will pop the stack
+ var thi gc.Node
+ var tlo gc.Node
split64(t, &tlo, &thi)
gins(i386.AMOVL, ncon(0), &tlo)
gins(i386.AMOVL, ncon(0), &thi)
@@ -1309,8 +1297,10 @@
// otherwise, subtract 2^63, convert, and add it back.
// set round to zero mode during conversion
+ var t1 gc.Node
memname(&t1, gc.Types[gc.TUINT16])
+ var t2 gc.Node
memname(&t2, gc.Types[gc.TUINT16])
gins(i386.AFSTCW, nil, &t1)
gins(i386.AMOVW, ncon(0xf7f), &t2)
@@ -1322,7 +1312,7 @@
gins(i386.AFUCOMIP, &f0, &f1)
p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0)
gins(i386.AFMOVVP, &f0, t)
- p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p2, gc.Pc)
gins(i386.AFMOVD, &two63f, &f0)
gins(i386.AFSUBDP, &f0, &f1)
@@ -1346,6 +1336,7 @@
if t.Op == gc.OREGISTER {
goto hardmem
}
+ var f0 gc.Node
gc.Nodreg(&f0, t.Type, i386.REG_F0)
gins(i386.AFMOVV, f, &f0)
if tt == gc.TFLOAT32 {
@@ -1360,17 +1351,24 @@
// otherwise, halve (rounding to odd?), convert, and double.
case gc.TUINT64<<16 | gc.TFLOAT32,
gc.TUINT64<<16 | gc.TFLOAT64:
+ var ax gc.Node
gc.Nodreg(&ax, gc.Types[gc.TUINT32], i386.REG_AX)
+ var dx gc.Node
gc.Nodreg(&dx, gc.Types[gc.TUINT32], i386.REG_DX)
+ var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
+ var t1 gc.Node
gc.Tempname(&t1, f.Type)
+ var tlo gc.Node
+ var thi gc.Node
split64(&t1, &tlo, &thi)
gmove(f, &t1)
gins(i386.ACMPL, &thi, ncon(0))
- p1 = gc.Gbranch(i386.AJLT, nil, 0)
+ p1 := gc.Gbranch(i386.AJLT, nil, 0)
// native
+ var r1 gc.Node
gc.Nodreg(&r1, gc.Types[tt], i386.REG_F0)
gins(i386.AFMOVV, &t1, &r1)
@@ -1379,7 +1377,7 @@
} else {
gins(i386.AFMOVDP, &r1, t)
}
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(obj.AJMP, nil, 0)
// simulated
gc.Patch(p1, gc.Pc)
@@ -1396,6 +1394,7 @@
gmove(&dx, &thi)
gmove(&ax, &tlo)
gc.Nodreg(&r1, gc.Types[tt], i386.REG_F0)
+ var r2 gc.Node
gc.Nodreg(&r2, gc.Types[tt], i386.REG_F0+1)
gins(i386.AFMOVV, &t1, &r1)
gins(i386.AFMOVD, &r1, &r1)
@@ -1430,19 +1429,11 @@
func floatmove_387(f *gc.Node, t *gc.Node) {
var r1 gc.Node
- var t1 gc.Node
- var t2 gc.Node
- var cvt *gc.Type
- var p1 *obj.Prog
- var p2 *obj.Prog
- var p3 *obj.Prog
var a int
- var ft int
- var tt int
- ft = gc.Simsimtype(f.Type)
- tt = gc.Simsimtype(t.Type)
- cvt = t.Type
+ ft := gc.Simsimtype(f.Type)
+ tt := gc.Simsimtype(t.Type)
+ cvt := t.Type
switch uint32(ft)<<16 | uint32(tt) {
default:
@@ -1460,6 +1451,7 @@
if t.Op == gc.OREGISTER {
goto hardmem
}
+ var r1 gc.Node
gc.Nodreg(&r1, gc.Types[ft], i386.REG_F0)
if f.Op != gc.OREGISTER {
if ft == gc.TFLOAT32 {
@@ -1470,8 +1462,10 @@
}
// set round to zero mode during conversion
+ var t1 gc.Node
memname(&t1, gc.Types[gc.TUINT16])
+ var t2 gc.Node
memname(&t2, gc.Types[gc.TUINT16])
gins(i386.AFSTCW, nil, &t1)
gins(i386.AMOVW, ncon(0xf7f), &t2)
@@ -1493,6 +1487,7 @@
gc.TFLOAT64<<16 | gc.TINT8,
gc.TFLOAT64<<16 | gc.TUINT16,
gc.TFLOAT64<<16 | gc.TUINT8:
+ var t1 gc.Node
gc.Tempname(&t1, gc.Types[gc.TINT32])
gmove(f, &t1)
@@ -1502,10 +1497,10 @@
case gc.TINT8:
gins(i386.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
- p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
gins(i386.ACMPL, &t1, ncon(0x7f))
- p2 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
- p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
+ p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
gc.Patch(p2, gc.Pc)
gmove(ncon(-0x80&(1<<32-1)), &t1)
@@ -1514,14 +1509,14 @@
case gc.TUINT8:
gins(i386.ATESTL, ncon(0xffffff00), &t1)
- p1 = gc.Gbranch(i386.AJEQ, nil, +1)
+ p1 := gc.Gbranch(i386.AJEQ, nil, +1)
gins(i386.AMOVL, ncon(0), &t1)
gc.Patch(p1, gc.Pc)
gmove(&t1, t)
case gc.TUINT16:
gins(i386.ATESTL, ncon(0xffff0000), &t1)
- p1 = gc.Gbranch(i386.AJEQ, nil, +1)
+ p1 := gc.Gbranch(i386.AJEQ, nil, +1)
gins(i386.AMOVL, ncon(0), &t1)
gc.Patch(p1, gc.Pc)
gmove(&t1, t)
@@ -1640,6 +1635,7 @@
goto hard
}
if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+ var r1 gc.Node
gc.Tempname(&r1, gc.Types[gc.TFLOAT32])
gins(i386.AFMOVFP, f, &r1)
gins(i386.AFMOVF, &r1, t)
@@ -1685,11 +1681,9 @@
var r1 gc.Node
var cvt *gc.Type
var a int
- var ft int
- var tt int
- ft = gc.Simsimtype(f.Type)
- tt = gc.Simsimtype(t.Type)
+ ft := gc.Simsimtype(f.Type)
+ tt := gc.Simsimtype(t.Type)
switch uint32(ft)<<16 | uint32(tt) {
// should not happen
@@ -1829,11 +1823,6 @@
* as f, t
*/
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
- var p *obj.Prog
- var af obj.Addr
- var at obj.Addr
- var w int
-
if as == i386.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
gc.Fatal("gins MOVF reg, reg")
}
@@ -1858,15 +1847,15 @@
}
}
- af = obj.Addr{}
- at = obj.Addr{}
+ af := obj.Addr{}
+ at := obj.Addr{}
if f != nil {
gc.Naddr(f, &af, 1)
}
if t != nil {
gc.Naddr(t, &at, 1)
}
- p = gc.Prog(as)
+ p := gc.Prog(as)
if f != nil {
p.From = af
}
@@ -1877,7 +1866,7 @@
fmt.Printf("%v\n", p)
}
- w = 0
+ w := 0
switch as {
case i386.AMOVB:
w = 1
@@ -1903,15 +1892,13 @@
}
func dotaddable(n *gc.Node, n1 *gc.Node) bool {
- var o int
- var oary [10]int64
- var nn *gc.Node
-
if n.Op != gc.ODOT {
return false
}
- o = gc.Dotoffset(n, oary[:], &nn)
+ var oary [10]int64
+ var nn *gc.Node
+ o := gc.Dotoffset(n, oary[:], &nn)
if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 {
*n1 = *nn
n1.Type = n.Type
diff --git a/src/cmd/8g/peep.go b/src/cmd/8g/peep.go
index 0838882..8aa6e94 100644
--- a/src/cmd/8g/peep.go
+++ b/src/cmd/8g/peep.go
@@ -63,10 +63,9 @@
}
func rnops(r *gc.Flow) *gc.Flow {
- var p *obj.Prog
- var r1 *gc.Flow
-
if r != nil {
+ var p *obj.Prog
+ var r1 *gc.Flow
for {
p = r.Prog
if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
@@ -84,14 +83,7 @@
}
func peep(firstp *obj.Prog) {
- var r *gc.Flow
- var r1 *gc.Flow
- var g *gc.Graph
- var p *obj.Prog
- var p1 *obj.Prog
- var t int
-
- g = gc.Flowstart(firstp, nil)
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
@@ -104,7 +96,8 @@
// find MOV $con,R followed by
// another MOV $con,R without
// setting R in the interim
- for r = g.Start; r != nil; r = r.Link {
+ var p *obj.Prog
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case i386.ALEAL:
@@ -129,6 +122,10 @@
}
}
+ var r1 *gc.Flow
+ var p1 *obj.Prog
+ var r *gc.Flow
+ var t int
loop1:
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
gc.Dumpit("loop1", g.Start, 0)
@@ -229,7 +226,7 @@
// can be replaced by MOVAPD, which moves the pair of float64s
// instead of just the lower one. We only use the lower one, but
// the processor can do better if we do moves using both.
- for r = g.Start; r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
if p.As == i386.AMOVSD {
if regtyp(&p.From) {
@@ -244,9 +241,7 @@
}
func excise(r *gc.Flow) {
- var p *obj.Prog
-
- p = r.Prog
+ p := r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
@@ -269,9 +264,8 @@
// causing any trouble.
func elimshortmov(g *gc.Graph) {
var p *obj.Prog
- var r *gc.Flow
- for r = g.Start; r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
if regtyp(&p.To) {
switch p.As {
@@ -367,22 +361,17 @@
* will be eliminated by copy propagation.
*/
func subprop(r0 *gc.Flow) bool {
- var p *obj.Prog
- var v1 *obj.Addr
- var v2 *obj.Addr
- var r *gc.Flow
- var t int
- var info gc.ProgInfo
-
- p = r0.Prog
- v1 = &p.From
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
return false
}
- v2 = &p.To
+ v2 := &p.To
if !regtyp(v2) {
return false
}
+ var info gc.ProgInfo
+ var r *gc.Flow
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\t? %v\n", r.Prog)
@@ -436,7 +425,7 @@
}
}
- t = int(v1.Reg)
+ t := int(v1.Reg)
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
@@ -458,13 +447,9 @@
* set v2 return success
*/
func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
- var p *obj.Prog
- var v1 *obj.Addr
- var v2 *obj.Addr
-
- p = r0.Prog
- v1 = &p.From
- v2 = &p.To
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
return true
}
@@ -473,9 +458,6 @@
}
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
- var t int
- var p *obj.Prog
-
if uint32(r.Active) == gactive {
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
@@ -487,6 +469,8 @@
if gc.Debug['P'] != 0 {
fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
}
+ var t int
+ var p *obj.Prog
for ; r != nil; r = r.S1 {
p = r.Prog
if gc.Debug['P'] != 0 {
@@ -577,8 +561,6 @@
* 0 otherwise (not touched)
*/
func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
- var info gc.ProgInfo
-
switch p.As {
case obj.AJMP:
if s != nil {
@@ -632,6 +614,7 @@
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
return 0
}
+ var info gc.ProgInfo
proginfo(&info, p)
if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
@@ -747,10 +730,8 @@
* return failure to substitute
*/
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
- var reg int
-
if copyas(a, v) {
- reg = int(s.Reg)
+ reg := int(s.Reg)
if reg >= i386.REG_AX && reg <= i386.REG_DI || reg >= i386.REG_X0 && reg <= i386.REG_X7 {
if f != 0 {
a.Reg = int16(reg)
@@ -761,7 +742,7 @@
}
if regtyp(v) {
- reg = int(v.Reg)
+ reg := int(v.Reg)
if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
if (s.Reg == i386.REG_BP) && a.Index != obj.TYPE_NONE {
return 1 /* can't use BP-base with index */
@@ -786,15 +767,12 @@
}
func conprop(r0 *gc.Flow) {
- var r *gc.Flow
var p *obj.Prog
- var p0 *obj.Prog
var t int
- var v0 *obj.Addr
- p0 = r0.Prog
- v0 = &p0.To
- r = r0
+ p0 := r0.Prog
+ v0 := &p0.To
+ r := r0
loop:
r = gc.Uniqs(r)
diff --git a/src/cmd/8g/reg.go b/src/cmd/8g/reg.go
index 76bd260..4d4d9a5 100644
--- a/src/cmd/8g/reg.go
+++ b/src/cmd/8g/reg.go
@@ -66,9 +66,7 @@
}
func doregbits(r int) uint64 {
- var b uint64
-
- b = 0
+ b := uint64(0)
if r >= i386.REG_AX && r <= i386.REG_DI {
b |= RtoB(r)
} else if r >= i386.REG_AL && r <= i386.REG_BL {
diff --git a/src/cmd/9g/cgen.go b/src/cmd/9g/cgen.go
index 7a1e967..74accf2 100644
--- a/src/cmd/9g/cgen.go
+++ b/src/cmd/9g/cgen.go
@@ -20,24 +20,17 @@
* simplifies and calls gmove.
*/
func cgen(n *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var nr *gc.Node
- var r *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var a int
- var f int
- var p1 *obj.Prog
- var p2 *obj.Prog
- var p3 *obj.Prog
- var addr obj.Addr
-
//print("cgen %N(%d) -> %N(%d)\n", n, n->addable, res, res->addable);
if gc.Debug['g'] != 0 {
gc.Dump("\ncgen-n", n)
gc.Dump("cgen-res", res)
}
+ var a int
+ var nr *gc.Node
+ var nl *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
if n == nil || n.Type == nil {
goto ret
}
@@ -57,6 +50,7 @@
gc.OSLICE3,
gc.OSLICE3ARR:
if res.Op != gc.ONAME || res.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
@@ -67,6 +61,7 @@
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
@@ -81,6 +76,7 @@
gc.Fatal("cgen: this is going to misscompile")
}
if res.Ullman >= gc.UINF {
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
@@ -98,6 +94,7 @@
if res.Addable == 0 {
if n.Ullman > res.Ullman {
+ var n1 gc.Node
regalloc(&n1, n.Type, res)
cgen(n, &n1)
if n1.Ullman > res.Ullman {
@@ -111,6 +108,7 @@
goto ret
}
+ var f int
if res.Ullman >= gc.UINF {
goto gen
}
@@ -132,9 +130,12 @@
}
if gc.Iscomplex[n.Type.Etype] == 0 {
- a = optoas(gc.OAS, res.Type)
+ a := optoas(gc.OAS, res.Type)
+ var addr obj.Addr
if sudoaddable(a, res, &addr) {
+ var p1 *obj.Prog
if f != 0 {
+ var n2 gc.Node
regalloc(&n2, res.Type, nil)
cgen(n, &n2)
p1 = gins(a, &n2, nil)
@@ -152,6 +153,7 @@
}
gen:
+ var n1 gc.Node
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
@@ -187,6 +189,7 @@
if n.Op == gc.OREGISTER || res.Op == gc.OREGISTER {
gmove(n, res)
} else {
+ var n1 gc.Node
regalloc(&n1, n.Type, nil)
gmove(n, &n1)
cgen(&n1, res)
@@ -201,9 +204,10 @@
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
+ var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
- n2 = *n
+ n2 := *n
n2.Left = &n1
cgen(&n2, res)
goto ret
@@ -211,14 +215,16 @@
}
if gc.Iscomplex[n.Type.Etype] == 0 {
- a = optoas(gc.OAS, n.Type)
+ a := optoas(gc.OAS, n.Type)
+ var addr obj.Addr
if sudoaddable(a, n, &addr) {
if res.Op == gc.OREGISTER {
- p1 = gins(a, nil, res)
+ p1 := gins(a, nil, res)
p1.From = addr
} else {
+ var n2 gc.Node
regalloc(&n2, n.Type, nil)
- p1 = gins(a, nil, &n2)
+ p1 := gins(a, nil, &n2)
p1.From = addr
gins(a, &n2, res)
regfree(&n2)
@@ -248,11 +254,11 @@
gc.OGE,
gc.OGT,
gc.ONOT:
- p1 = gc.Gbranch(ppc64.ABR, nil, 0)
+ p1 := gc.Gbranch(ppc64.ABR, nil, 0)
- p2 = gc.Pc
+ p2 := gc.Pc
gmove(gc.Nodbool(true), res)
- p3 = gc.Gbranch(ppc64.ABR, nil, 0)
+ p3 := gc.Gbranch(ppc64.ABR, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
@@ -265,10 +271,12 @@
// unary
case gc.OCOM:
- a = optoas(gc.OXOR, nl.Type)
+ a := optoas(gc.OXOR, nl.Type)
+ var n1 gc.Node
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, nl.Type, -1)
gins(a, &n2, &n1)
gmove(&n1, res)
@@ -315,7 +323,9 @@
gc.OINDEX,
gc.OIND,
gc.ONAME:
+ var n1 gc.Node
igen(nl, &n1, res)
+ var n2 gc.Node
regalloc(&n2, n.Type, res)
gmove(&n1, &n2)
gmove(&n2, res)
@@ -325,7 +335,9 @@
}
}
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
+ var n2 gc.Node
regalloc(&n2, n.Type, &n1)
cgen(nl, &n1)
@@ -343,6 +355,7 @@
gc.OINDEX,
gc.OIND,
gc.ONAME: // PHEAP or PPARAMREF var
+ var n1 gc.Node
igen(n, &n1, res)
gmove(&n1, res)
@@ -350,6 +363,7 @@
// interface table is first word of interface value
case gc.OITAB:
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = n.Type
@@ -359,14 +373,16 @@
// pointer is the first word of string or slice.
case gc.OSPTR:
if gc.Isconst(nl, gc.CTSTR) {
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
- p1 = gins(ppc64.AMOVD, nil, &n1)
+ p1 := gins(ppc64.AMOVD, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
gmove(&n1, res)
regfree(&n1)
break
}
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = n.Type
gmove(&n1, res)
@@ -376,13 +392,15 @@
if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map and chan have len in the first int-sized word.
// a zero pointer means zero length
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
cgen(nl, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+ p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
n2 = n1
n2.Op = gc.OINDREG
@@ -399,6 +417,7 @@
if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
// a zero pointer means zero length
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
@@ -414,13 +433,15 @@
if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second int-sized word.
// a zero pointer means zero length
+ var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], res)
cgen(nl, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
- p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+ p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
n2 = n1
n2.Op = gc.OINDREG
@@ -436,6 +457,7 @@
}
if gc.Isslice(nl.Type) {
+ var n1 gc.Node
igen(nl, &n1, res)
n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
n1.Xoffset += int64(gc.Array_cap)
@@ -475,11 +497,13 @@
}
if nl.Ullman >= nr.Ullman {
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
cgen_div(int(n.Op), &n1, nr, res)
regfree(&n1)
} else {
+ var n2 gc.Node
if !gc.Smallintconst(nr) {
regalloc(&n2, nr.Type, res)
cgen(nr, &n2)
@@ -519,7 +543,7 @@
*/
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
- r = nl
+ r := nl
nl = nr
nr = r
}
@@ -606,8 +630,6 @@
* The caller must call regfree(a).
*/
func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- var n1 gc.Node
-
if gc.Debug['g'] != 0 {
gc.Dump("cgenr-n", n)
}
@@ -630,6 +652,7 @@
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
+ var n1 gc.Node
igen(n, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
gmove(&n1, a)
@@ -648,24 +671,12 @@
* The generated code checks that the result is not nil.
*/
func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var nr *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var n4 gc.Node
- var tmp gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
- var w uint32
- var v uint64
-
if gc.Debug['g'] != 0 {
gc.Dump("agenr-n", n)
}
- nl = n.Left
- nr = n.Right
+ nl := n.Left
+ nr := n.Right
switch n.Op {
case gc.ODOT,
@@ -673,6 +684,7 @@
gc.OCALLFUNC,
gc.OCALLMETH,
gc.OCALLINTER:
+ var n1 gc.Node
igen(n, &n1, res)
regalloc(a, gc.Types[gc.Tptr], &n1)
agen(&n1, a)
@@ -683,11 +695,14 @@
gc.Cgen_checknil(a)
case gc.OINDEX:
- p2 = nil // to be patched to panicindex.
- w = uint32(n.Type.Width)
+ p2 := (*obj.Prog)(nil) // to be patched to panicindex.
+ w := uint32(n.Type.Width)
//bounded = debug['B'] || n->bounded;
+ var n3 gc.Node
+ var n1 gc.Node
if nr.Addable != 0 {
+ var tmp gc.Node
if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT64])
}
@@ -701,6 +716,7 @@
}
} else if nl.Addable != 0 {
if !gc.Isconst(nr, gc.CTINT) {
+ var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
cgen(nr, &tmp)
regalloc(&n1, tmp.Type, nil)
@@ -711,6 +727,7 @@
agenr(nl, &n3, res)
}
} else {
+ var tmp gc.Node
gc.Tempname(&tmp, gc.Types[gc.TINT64])
cgen(nr, &tmp)
nr = &tmp
@@ -730,18 +747,19 @@
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index")
}
- v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
n1.Xoffset = int64(gc.Array_nel)
+ var n4 gc.Node
regalloc(&n4, n1.Type, nil)
gmove(&n1, &n4)
ginscon2(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n4, int64(v))
regfree(&n4)
- p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
ginscall(gc.Panicindex, 0)
gc.Patch(p1, gc.Pc)
}
@@ -761,10 +779,12 @@
break
}
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.TINT64], &n1) // i
gmove(&n1, &n2)
regfree(&n1)
+ var n4 gc.Node
if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
if gc.Isconst(nl, gc.CTSTR) {
@@ -781,7 +801,7 @@
gc.Nodconst(&n4, gc.Types[gc.TUINT64], nl.Type.Bound)
} else {
regalloc(&n4, gc.Types[gc.TUINT64], nil)
- p1 = gins(ppc64.AMOVD, nil, &n4)
+ p1 := gins(ppc64.AMOVD, nil, &n4)
p1.From.Type = obj.TYPE_CONST
p1.From.Offset = nl.Type.Bound
}
@@ -791,7 +811,7 @@
if n4.Op == gc.OREGISTER {
regfree(&n4)
}
- p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
if p2 != nil {
gc.Patch(p2, gc.Pc)
}
@@ -801,7 +821,7 @@
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
- p1 = gins(ppc64.AMOVD, nil, &n3)
+ p1 := gins(ppc64.AMOVD, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
p1.From.Type = obj.TYPE_ADDR
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
@@ -854,11 +874,6 @@
* The generated code checks that the result is not nil.
*/
func agen(n *gc.Node, res *gc.Node) {
- var nl *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
-
if gc.Debug['g'] != 0 {
gc.Dump("\nagen-res", res)
gc.Dump("agen-r", n)
@@ -872,17 +887,20 @@
n = n.Left
}
+ var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
// be terribly efficient. See issue 3670.
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Gvardef(&n1)
clearfat(&n1)
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], res)
- n3 = gc.Node{}
+ n3 := gc.Node{}
n3.Op = gc.OADDR
n3.Left = &n1
gins(ppc64.AMOVD, &n3, &n2)
@@ -892,9 +910,10 @@
}
if n.Addable != 0 {
- n1 = gc.Node{}
+ n1 := gc.Node{}
n1.Op = gc.OADDR
n1.Left = n
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], res)
gins(ppc64.AMOVD, &n1, &n2)
gmove(&n2, res)
@@ -928,16 +947,19 @@
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
agen(&n1, res)
case gc.OEFACE:
+ var n1 gc.Node
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
agen(&n1, res)
case gc.OINDEX:
+ var n1 gc.Node
agenr(n, &n1, res)
gmove(&n1, res)
regfree(&n1)
@@ -991,10 +1013,6 @@
* The generated code checks that the result is not *nil.
*/
func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
- var fp *gc.Type
- var flist gc.Iter
- var n1 gc.Node
-
if gc.Debug['g'] != 0 {
gc.Dump("\nigen-n", n)
}
@@ -1046,7 +1064,8 @@
cgen_callinter(n, nil, 0)
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
*a = gc.Node{}
a.Op = gc.OINDREG
a.Val.U.Reg = ppc64.REGSP
@@ -1066,6 +1085,7 @@
if gc.Isptr[n.Left.Type.Etype] == 0 {
igen(n.Left, a, res)
} else {
+ var n1 gc.Node
igen(n.Left, &n1, res)
gc.Cgen_checknil(&n1)
regalloc(a, gc.Types[gc.Tptr], res)
@@ -1094,19 +1114,6 @@
* if(n == true) goto to;
*/
func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
- var et int
- var a int
- var nl *gc.Node
- var nr *gc.Node
- var l *gc.Node
- var r *gc.Node
- var n1 gc.Node
- var n2 gc.Node
- var tmp gc.Node
- var ll *gc.NodeList
- var p1 *obj.Prog
- var p2 *obj.Prog
-
if gc.Debug['g'] != 0 {
gc.Dump("\nbgen", n)
}
@@ -1119,6 +1126,9 @@
gc.Genlist(n.Ninit)
}
+ var et int
+ var nl *gc.Node
+ var nr *gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
@@ -1144,11 +1154,13 @@
switch n.Op {
default:
+ var n1 gc.Node
regalloc(&n1, n.Type, nil)
cgen(n, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), &n1, &n2)
- a = ppc64.ABNE
+ a := ppc64.ABNE
if !true_ {
a = ppc64.ABEQ
}
@@ -1166,8 +1178,8 @@
case gc.OANDAND,
gc.OOROR:
if (n.Op == gc.OANDAND) == true_ {
- p1 = gc.Gbranch(obj.AJMP, nil, 0)
- p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ p1 := gc.Gbranch(obj.AJMP, nil, 0)
+ p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n.Left, !true_, -likely, p2)
bgen(n.Right, !true_, -likely, p2)
@@ -1212,15 +1224,15 @@
gc.OGT,
gc.OLE,
gc.OGE:
- a = int(n.Op)
+ a := int(n.Op)
if !true_ {
if gc.Isfloat[nr.Type.Etype] != 0 {
// brcom is not valid on floats when NaN is involved.
- p1 = gc.Gbranch(ppc64.ABR, nil, 0)
+ p1 := gc.Gbranch(ppc64.ABR, nil, 0)
- p2 = gc.Gbranch(ppc64.ABR, nil, 0)
+ p2 := gc.Gbranch(ppc64.ABR, nil, 0)
gc.Patch(p1, gc.Pc)
- ll = n.Ninit // avoid re-genning ninit
+ ll := n.Ninit // avoid re-genning ninit
n.Ninit = nil
bgen(n, true, -likely, p2)
n.Ninit = ll
@@ -1236,7 +1248,7 @@
// make simplest on right
if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
a = gc.Brrev(a)
- r = nl
+ r := nl
nl = nr
nr = r
}
@@ -1249,10 +1261,13 @@
}
a = optoas(a, gc.Types[gc.Tptr])
+ var n1 gc.Node
igen(nl, &n1, nil)
n1.Xoffset += int64(gc.Array_array)
n1.Type = gc.Types[gc.Tptr]
+ var tmp gc.Node
gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], &n1)
gmove(&n1, &n2)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
@@ -1270,9 +1285,12 @@
}
a = optoas(a, gc.Types[gc.Tptr])
+ var n1 gc.Node
igen(nl, &n1, nil)
n1.Type = gc.Types[gc.Tptr]
+ var tmp gc.Node
gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ var n2 gc.Node
regalloc(&n2, gc.Types[gc.Tptr], &n1)
gmove(&n1, &n2)
gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
@@ -1287,10 +1305,13 @@
break
}
+ var n1 gc.Node
+ var n2 gc.Node
if nr.Ullman >= gc.UINF {
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
+ var tmp gc.Node
gc.Tempname(&tmp, nl.Type)
gmove(&n1, &tmp)
regfree(&n1)
@@ -1321,8 +1342,8 @@
cgen(nr, &n2)
cmp:
- l = &n1
- r = &n2
+ l := &n1
+ r := &n2
gins(optoas(gc.OCMP, nr.Type), l, r)
if gc.Isfloat[nr.Type.Etype] != 0 && (a == gc.OLE || a == gc.OGE) {
// To get NaN right, must rewrite x <= y into separate x < y or x = y.
@@ -1355,31 +1376,27 @@
* return n's offset from SP.
*/
func stkof(n *gc.Node) int64 {
- var t *gc.Type
- var flist gc.Iter
- var off int64
-
switch n.Op {
case gc.OINDREG:
return n.Xoffset
case gc.ODOT:
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
break
}
- off = stkof(n.Left)
+ off := stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
return off + n.Xoffset
case gc.OINDEX:
- t = n.Left.Type
+ t := n.Left.Type
if !gc.Isfixedarray(t) {
break
}
- off = stkof(n.Left)
+ off := stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
@@ -1391,11 +1408,12 @@
case gc.OCALLMETH,
gc.OCALLINTER,
gc.OCALLFUNC:
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
t = t.Type
}
+ var flist gc.Iter
t = gc.Structfirst(&flist, gc.Getoutarg(t))
if t != nil {
return t.Width + int64(gc.Widthptr) // +widthptr: correct for saved LR
@@ -1412,19 +1430,6 @@
* memmove(&ns, &n, w);
*/
func sgen(n *gc.Node, ns *gc.Node, w int64) {
- var dst gc.Node
- var src gc.Node
- var tmp gc.Node
- var nend gc.Node
- var c int32
- var odst int32
- var osrc int32
- var dir int
- var align int
- var op int
- var p *obj.Prog
- var ploop *obj.Prog
- var l *gc.NodeList
var res *gc.Node = ns
if gc.Debug['g'] != 0 {
@@ -1444,7 +1449,7 @@
// If copying .args, that's all the results, so record definition sites
// for them for the liveness analysis.
if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
- for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ for l := gc.Curfn.Dcl; l != nil; l = l.Next {
if l.N.Class == gc.PPARAMOUT {
gc.Gvardef(l.N)
}
@@ -1456,6 +1461,7 @@
// return;
if w == 0 {
// evaluate side effects only.
+ var dst gc.Node
regalloc(&dst, gc.Types[gc.Tptr], nil)
agen(res, &dst)
@@ -1468,8 +1474,9 @@
// want to avoid unaligned access, so have to use
// smaller operations for less aligned types.
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
- align = int(n.Type.Align)
+ align := int(n.Type.Align)
+ var op int
switch align {
default:
gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
@@ -1490,17 +1497,18 @@
if w%int64(align) != 0 {
gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
}
- c = int32(w / int64(align))
+ c := int32(w / int64(align))
// offset on the stack
- osrc = int32(stkof(n))
+ osrc := int32(stkof(n))
- odst = int32(stkof(res))
+ odst := int32(stkof(res))
if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
// osrc and odst both on stack, and at least one is in
// an unknown position. Could generate code to test
// for forward/backward copy, but instead just copy
// to a temporary location first.
+ var tmp gc.Node
gc.Tempname(&tmp, n.Type)
sgen(n, &tmp, w)
@@ -1514,12 +1522,14 @@
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
- dir = align
+ dir := align
if osrc < odst && int64(odst) < int64(osrc)+w {
dir = -dir
}
+ var dst gc.Node
+ var src gc.Node
if n.Ullman >= res.Ullman {
agenr(n, &dst, res) // temporarily use dst
regalloc(&src, gc.Types[gc.Tptr], nil)
@@ -1536,19 +1546,20 @@
agenr(n, &src, nil)
}
+ var tmp gc.Node
regalloc(&tmp, gc.Types[gc.Tptr], nil)
// set up end marker
- nend = gc.Node{}
+ nend := gc.Node{}
// move src and dest to the end of block if necessary
if dir < 0 {
if c >= 4 {
regalloc(&nend, gc.Types[gc.Tptr], nil)
- p = gins(ppc64.AMOVD, &src, &nend)
+ gins(ppc64.AMOVD, &src, &nend)
}
- p = gins(ppc64.AADD, nil, &src)
+ p := gins(ppc64.AADD, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
@@ -1556,7 +1567,7 @@
p.From.Type = obj.TYPE_CONST
p.From.Offset = w
} else {
- p = gins(ppc64.AADD, nil, &src)
+ p := gins(ppc64.AADD, nil, &src)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(-dir)
@@ -1566,7 +1577,7 @@
if c >= 4 {
regalloc(&nend, gc.Types[gc.Tptr], nil)
- p = gins(ppc64.AMOVD, &src, &nend)
+ p := gins(ppc64.AMOVD, &src, &nend)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = w
}
@@ -1575,10 +1586,10 @@
// move
// TODO: enable duffcopy for larger copies.
if c >= 4 {
- p = gins(op, &src, &tmp)
+ p := gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
- ploop = p
+ ploop := p
p = gins(op, &tmp, &dst)
p.To.Type = obj.TYPE_MEM
@@ -1594,6 +1605,7 @@
// generate the offsets directly and eliminate the
// ADDs. That will produce shorter, more
// pipeline-able code.
+ var p *obj.Prog
for {
tmp14 := c
c--
@@ -1642,23 +1654,16 @@
func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
- var tmp gc.Node
- var t *gc.Type
- var freel int
- var freer int
- var fldcount int64
- var loffset int64
- var roffset int64
- freel = 0
- freer = 0
+ freel := 0
+ freer := 0
switch nl.Type.Etype {
default:
goto no
case gc.TARRAY:
- t = nl.Type
+ t := nl.Type
// Slices are ok.
if gc.Isslice(t) {
@@ -1675,9 +1680,9 @@
// Small structs with non-fat types are ok.
// Zero-sized structs are treated separately elsewhere.
case gc.TSTRUCT:
- fldcount = 0
+ fldcount := int64(0)
- for t = nl.Type.Type; t != nil; t = t.Down {
+ for t := nl.Type.Type; t != nil; t = t.Down {
if gc.Isfat(t.Type) {
goto no
}
@@ -1713,6 +1718,7 @@
}
} else {
// When zeroing, prepare a register containing zero.
+ var tmp gc.Node
gc.Nodconst(&tmp, nl.Type, 0)
regalloc(&nodr, gc.Types[gc.TUINT], nil)
@@ -1734,11 +1740,11 @@
if nl.Op == gc.ONAME {
gc.Gvardef(nl)
}
- t = nl.Type
+ t := nl.Type
if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
- for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ for fldcount := int64(0); fldcount < t.Bound; fldcount++ {
if nr == nil {
gc.Clearslim(&nodl)
} else {
@@ -1841,8 +1847,8 @@
if nl.Op == gc.ONAME {
gc.Gvardef(nl)
}
- loffset = nodl.Xoffset
- roffset = nodr.Xoffset
+ loffset := nodl.Xoffset
+ roffset := nodr.Xoffset
// funarg structs may not begin at offset zero.
if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
@@ -1852,7 +1858,7 @@
roffset -= nr.Type.Type.Width
}
- for t = nl.Type.Type; t != nil; t = t.Down {
+ for t := nl.Type.Type; t != nil; t = t.Down {
nodl.Xoffset = loffset + t.Width
nodl.Type = t.Type
diff --git a/src/cmd/9g/ggen.go b/src/cmd/9g/ggen.go
index 54bebdd..3197e46 100644
--- a/src/cmd/9g/ggen.go
+++ b/src/cmd/9g/ggen.go
@@ -12,30 +12,25 @@
import "cmd/internal/gc"
func defframe(ptxt *obj.Prog) {
- var frame uint32
- var p *obj.Prog
- var hi int64
- var lo int64
- var l *gc.NodeList
var n *gc.Node
// fill in argument size, stack size
ptxt.To.Type = obj.TYPE_TEXTSIZE
ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
- frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
ptxt.To.Offset = int64(frame)
// insert code to zero ambiguously live variables
// so that the garbage collector only sees initialized values
// when it looks for pointers.
- p = ptxt
+ p := ptxt
- hi = 0
- lo = hi
+ hi := int64(0)
+ lo := hi
// iterate through declarations - they are sorted in decreasing xoffset order.
- for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ for l := gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if n.Needzero == 0 {
continue
@@ -68,24 +63,19 @@
}
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
- var cnt int64
- var i int64
- var p1 *obj.Prog
- var f *gc.Node
-
- cnt = hi - lo
+ cnt := hi - lo
if cnt == 0 {
return p
}
if cnt < int64(4*gc.Widthptr) {
- for i = 0; i < cnt; i += int64(gc.Widthptr) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
- f = gc.Sysfunc("duffzero")
+ f := gc.Sysfunc("duffzero")
gc.Naddr(f, &p.To, 1)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
@@ -97,7 +87,7 @@
p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p.Reg = ppc64.REGRT1
p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
- p1 = p
+ p1 := p
p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
@@ -107,8 +97,7 @@
}
func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
- var q *obj.Prog
- q = gc.Ctxt.NewProg()
+ q := gc.Ctxt.NewProg()
gc.Clearp(q)
q.As = int16(as)
q.Lineno = p.Lineno
@@ -129,8 +118,7 @@
* On power, f must be moved to CTR first.
*/
func ginsBL(reg *gc.Node, f *gc.Node) {
- var p *obj.Prog
- p = gins(ppc64.AMOVD, f, nil)
+ p := gins(ppc64.AMOVD, f, nil)
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_CTR
p = gins(ppc64.ABL, reg, nil)
@@ -148,15 +136,8 @@
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
- var p *obj.Prog
- var reg gc.Node
- var con gc.Node
- var reg2 gc.Node
- var r1 gc.Node
- var extra int32
-
if f.Type != nil {
- extra = 0
+ extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
@@ -180,12 +161,13 @@
// The ppc64 NOP is really or r0, r0, r0; use that description
// because the NOP pseudo-instruction would be removed by
// the linker.
+ var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT], ppc64.REG_R0)
gins(ppc64.AOR, ®, ®)
}
- p = gins(ppc64.ABL, nil, f)
+ p := gins(ppc64.ABL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
@@ -193,7 +175,9 @@
break
}
+ var reg gc.Node
gc.Nodreg(®, gc.Types[gc.Tptr], ppc64.REGCTXT)
+ var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], ppc64.REG_R3)
gmove(f, ®)
reg.Op = gc.OINDREG
@@ -206,14 +190,17 @@
case 1, // call in new proc (go)
2: // deferred call (defer)
+ var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type)))
+ var reg gc.Node
gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3)
+ var reg2 gc.Node
gc.Nodreg(®2, gc.Types[gc.TINT64], ppc64.REG_R4)
gmove(f, ®)
gmove(&con, ®2)
- p = gins(ppc64.AMOVW, ®2, nil)
+ p := gins(ppc64.AMOVW, ®2, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = ppc64.REGSP
p.To.Offset = 8
@@ -234,7 +221,7 @@
if proc == 2 {
gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3)
- p = gins(ppc64.ACMP, ®, nil)
+ p := gins(ppc64.ACMP, ®, nil)
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R0
p = gc.Gbranch(ppc64.ABEQ, nil, +1)
@@ -249,21 +236,12 @@
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
- var i *gc.Node
- var f *gc.Node
- var tmpi gc.Node
- var nodi gc.Node
- var nodo gc.Node
- var nodr gc.Node
- var nodsp gc.Node
- var p *obj.Prog
-
- i = n.Left
+ i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
- f = i.Right // field
+ f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
@@ -271,6 +249,7 @@
i = i.Left // interface
if i.Addable == 0 {
+ var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
@@ -280,8 +259,10 @@
// i is now addable, prepare an indirected
// register to hold its address.
+ var nodi gc.Node
igen(i, &nodi, res) // REG = &inter
+ var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], ppc64.REGSP)
nodsp.Xoffset = int64(gc.Widthptr)
@@ -292,6 +273,7 @@
nodi.Xoffset += int64(gc.Widthptr)
cgen(&nodi, &nodsp) // {8 or 24}(SP) = 8(REG) -- i.data
+ var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], res)
nodi.Type = gc.Types[gc.Tptr]
@@ -299,6 +281,7 @@
cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
regfree(&nodi)
+ var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
if n.Left.Xoffset == gc.BADWIDTH {
gc.Fatal("cgen_callinter: badwidth")
@@ -312,7 +295,7 @@
proc = 3
} else {
// go/defer. generate go func value.
- p = gins(ppc64.AMOVD, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
+ p := gins(ppc64.AMOVD, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
p.From.Type = obj.TYPE_ADDR
}
@@ -330,14 +313,11 @@
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
- var t *gc.Type
- var nod gc.Node
- var afun gc.Node
-
if n == nil {
return
}
+ var afun gc.Node
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
@@ -347,10 +327,11 @@
}
gc.Genlist(n.List) // assign the args
- t = n.Left.Type
+ t := n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
+ var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
@@ -361,6 +342,7 @@
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
@@ -381,22 +363,18 @@
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
- var nod gc.Node
- var fp *gc.Type
- var t *gc.Type
- var flist gc.Iter
-
- t = n.Left.Type
+ t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
- nod = gc.Node{}
+ nod := gc.Node{}
nod.Op = gc.OINDREG
nod.Val.U.Reg = ppc64.REGSP
nod.Addable = 1
@@ -412,23 +390,18 @@
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
- var nod1 gc.Node
- var nod2 gc.Node
- var fp *gc.Type
- var t *gc.Type
- var flist gc.Iter
-
- t = n.Left.Type
+ t := n.Left.Type
if gc.Isptr[t.Etype] != 0 {
t = t.Type
}
- fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ var flist gc.Iter
+ fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
- nod1 = gc.Node{}
+ nod1 := gc.Node{}
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = ppc64.REGSP
nod1.Addable = 1
@@ -437,6 +410,7 @@
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
+ var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
agen(&nod1, &nod2)
gins(ppc64.AMOVD, &nod2, res)
@@ -451,8 +425,6 @@
* n->left is assignments to return values.
*/
func cgen_ret(n *gc.Node) {
- var p *obj.Prog
-
if n != nil {
gc.Genlist(n.List) // copy out args
}
@@ -460,7 +432,7 @@
ginscall(gc.Deferreturn, 0)
}
gc.Genlist(gc.Curfn.Exit)
- p = gins(obj.ARET, nil, nil)
+ p := gins(obj.ARET, nil, nil)
if n != nil && n.Op == gc.ORETJMP {
p.To.Name = obj.NAME_EXTERN
p.To.Type = obj.TYPE_ADDR
@@ -476,20 +448,6 @@
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var a int
- var check int
- var t *gc.Type
- var t0 *gc.Type
- var tl gc.Node
- var tr gc.Node
- var tl2 gc.Node
- var tr2 gc.Node
- var nm1 gc.Node
- var nz gc.Node
- var tm gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
-
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will generate undefined result.
@@ -497,10 +455,10 @@
// the hardware will silently generate undefined result.
// DIVW will leave unpredicable result in higher 32-bit,
// so always use DIVD/DIVDU.
- t = nl.Type
+ t := nl.Type
- t0 = t
- check = 0
+ t0 := t
+ check := 0
if gc.Issigned[t.Etype] != 0 {
check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
@@ -519,9 +477,11 @@
check = 0
}
- a = optoas(gc.ODIV, t)
+ a := optoas(gc.ODIV, t)
+ var tl gc.Node
regalloc(&tl, t0, nil)
+ var tr gc.Node
regalloc(&tr, t0, nil)
if nl.Ullman >= nr.Ullman {
cgen(nl, &tl)
@@ -533,9 +493,9 @@
if t != t0 {
// Convert
- tl2 = tl
+ tl2 := tl
- tr2 = tr
+ tr2 := tr
tl.Type = t
tr.Type = t
gmove(&tl2, &tl)
@@ -543,7 +503,7 @@
}
// Handle divide-by-zero panic.
- p1 = gins(optoas(gc.OCMP, t), &tr, nil)
+ p1 := gins(optoas(gc.OCMP, t), &tr, nil)
p1.To.Type = obj.TYPE_REG
p1.To.Reg = ppc64.REGZERO
@@ -554,10 +514,12 @@
ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
+ var p2 *obj.Prog
if check != 0 {
+ var nm1 gc.Node
gc.Nodconst(&nm1, t, -1)
gins(optoas(gc.OCMP, t), &tr, &nm1)
- p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if op == gc.ODIV {
// a / (-1) is -a.
gins(optoas(gc.OMINUS, t), nil, &tl)
@@ -565,6 +527,7 @@
gmove(&tl, res)
} else {
// a % (-1) is 0.
+ var nz gc.Node
gc.Nodconst(&nz, t, 0)
gmove(&nz, res)
@@ -580,6 +543,7 @@
gmove(&tl, res)
} else {
// A%B = A-(A/B*B)
+ var tm gc.Node
regalloc(&tm, t, nil)
// patch div to use the 3 register form
@@ -606,135 +570,15 @@
* res = nl % nr
*/
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var w int
- var a int
- var m gc.Magic
-
// TODO(minux): enable division by magic multiply (also need to fix longmod below)
//if(nr->op != OLITERAL)
goto longdiv
- w = int(nl.Type.Width * 8)
-
- // Front end handled 32-bit division. We only need to handle 64-bit.
- // try to do division by multiply by (2^w)/d
- // see hacker's delight chapter 10
- switch gc.Simtype[nl.Type.Etype] {
- default:
- goto longdiv
-
- case gc.TUINT64:
- m.W = w
- m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
- gc.Umagic(&m)
- if m.Bad != 0 {
- break
- }
- if op == gc.OMOD {
- goto longmod
- }
-
- cgenr(nl, &n1, nil)
- gc.Nodconst(&n2, nl.Type, int64(m.Um))
- regalloc(&n3, nl.Type, res)
- cgen_hmul(&n1, &n2, &n3)
-
- if m.Ua != 0 {
- // need to add numerator accounting for overflow
- gins(optoas(gc.OADD, nl.Type), &n1, &n3)
-
- gc.Nodconst(&n2, nl.Type, 1)
- gins(optoas(gc.ORROTC, nl.Type), &n2, &n3)
- gc.Nodconst(&n2, nl.Type, int64(m.S)-1)
- gins(optoas(gc.ORSH, nl.Type), &n2, &n3)
- } else {
- gc.Nodconst(&n2, nl.Type, int64(m.S))
- gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift dx
- }
-
- gmove(&n3, res)
- regfree(&n1)
- regfree(&n3)
- return
-
- case gc.TINT64:
- m.W = w
- m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
- gc.Smagic(&m)
- if m.Bad != 0 {
- break
- }
- if op == gc.OMOD {
- goto longmod
- }
-
- cgenr(nl, &n1, res)
- gc.Nodconst(&n2, nl.Type, m.Sm)
- regalloc(&n3, nl.Type, nil)
- cgen_hmul(&n1, &n2, &n3)
-
- if m.Sm < 0 {
- // need to add numerator
- gins(optoas(gc.OADD, nl.Type), &n1, &n3)
- }
-
- gc.Nodconst(&n2, nl.Type, int64(m.S))
- gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift n3
-
- gc.Nodconst(&n2, nl.Type, int64(w)-1)
-
- gins(optoas(gc.ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
- gins(optoas(gc.OSUB, nl.Type), &n1, &n3) // added
-
- if m.Sd < 0 {
- // this could probably be removed
- // by factoring it into the multiplier
- gins(optoas(gc.OMINUS, nl.Type), nil, &n3)
- }
-
- gmove(&n3, res)
- regfree(&n1)
- regfree(&n3)
- return
- }
-
- goto longdiv
-
// division and mod using (slow) hardware instruction
longdiv:
dodiv(op, nl, nr, res)
return
-
- // mod using formula A%B = A-(A/B*B) but
- // we know that there is a fast algorithm for A/B
-longmod:
- regalloc(&n1, nl.Type, res)
-
- cgen(nl, &n1)
- regalloc(&n2, nl.Type, nil)
- cgen_div(gc.ODIV, &n1, nr, &n2)
- a = optoas(gc.OMUL, nl.Type)
- if w == 8 {
- }
- // use 2-operand 16-bit multiply
- // because there is no 2-operand 8-bit multiply
- //a = AIMULW;
- if !gc.Smallintconst(nr) {
- regalloc(&n3, nl.Type, nil)
- cgen(nr, &n3)
- gins(a, &n3, &n2)
- regfree(&n3)
- } else {
- gins(a, nr, &n2)
- }
- gins(optoas(gc.OSUB, nl.Type), &n2, &n1)
- gmove(&n1, res)
- regfree(&n1)
- regfree(&n2)
}
/*
@@ -742,30 +586,25 @@
* res = (nl*nr) >> width
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var w int
- var n1 gc.Node
- var n2 gc.Node
- var tmp *gc.Node
- var t *gc.Type
- var p *obj.Prog
-
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp = nl
+ tmp := (*gc.Node)(nl)
nl = nr
nr = tmp
}
- t = nl.Type
- w = int(t.Width * 8)
+ t := (*gc.Type)(nl.Type)
+ w := int(int(t.Width * 8))
+ var n1 gc.Node
cgenr(nl, &n1, res)
+ var n2 gc.Node
cgenr(nr, &n2, nil)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16,
gc.TINT32:
gins(optoas(gc.OMUL, t), &n2, &n1)
- p = gins(ppc64.ASRAD, nil, &n1)
+ p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(w)
@@ -773,16 +612,16 @@
gc.TUINT16,
gc.TUINT32:
gins(optoas(gc.OMUL, t), &n2, &n1)
- p = gins(ppc64.ASRD, nil, &n1)
+ p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(w)
case gc.TINT64,
gc.TUINT64:
if gc.Issigned[t.Etype] != 0 {
- p = gins(ppc64.AMULHD, &n2, &n1)
+ gins(ppc64.AMULHD, &n2, &n1)
} else {
- p = gins(ppc64.AMULHDU, &n2, &n1)
+ gins(ppc64.AMULHDU, &n2, &n1)
}
default:
@@ -803,21 +642,18 @@
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
- var n4 gc.Node
- var n5 gc.Node
- var a int
- var p1 *obj.Prog
- var sc uint64
var tcount *gc.Type
- a = optoas(op, nl.Type)
+ a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {
+ var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
- sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ sc := uint64(uint64(gc.Mpgetfix(nr.Val.U.Xval)))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
+ var n3 gc.Node
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n1)
@@ -831,12 +667,14 @@
}
if nl.Ullman >= gc.UINF {
+ var n4 gc.Node
gc.Tempname(&n4, nl.Type)
cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
+ var n5 gc.Node
gc.Tempname(&n5, nr.Type)
cgen(nr, &n5)
nr = &n5
@@ -872,7 +710,7 @@
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
- p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
+ p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
@@ -895,46 +733,39 @@
}
func clearfat(nl *gc.Node) {
- var w uint64
- var c uint64
- var q uint64
- var t uint64
- var boff uint64
- var dst gc.Node
- var end gc.Node
- var r0 gc.Node
- var f *gc.Node
- var p *obj.Prog
- var pl *obj.Prog
-
/* clear a fat object */
if gc.Debug['g'] != 0 {
fmt.Printf("clearfat %v (%v, size: %d)\n", gc.Nconv(nl, 0), gc.Tconv(nl.Type, 0), nl.Type.Width)
}
- w = uint64(nl.Type.Width)
+ w := uint64(uint64(nl.Type.Width))
// Avoid taking the address for simple enough types.
//if(componentgen(N, nl))
// return;
- c = w % 8 // bytes
- q = w / 8 // dwords
+ c := uint64(w % 8) // bytes
+ q := uint64(w / 8) // dwords
if reg[ppc64.REGRT1] > 0 {
gc.Fatal("R%d in use during clearfat", ppc64.REGRT1)
}
+ var r0 gc.Node
gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REG_R0) // r0 is always zero
+ var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
reg[ppc64.REGRT1]++
agen(nl, &dst)
+ var boff uint64
+ var p *obj.Prog
if q > 128 {
p = gins(ppc64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
+ var end gc.Node
regalloc(&end, gc.Types[gc.Tptr], nil)
p = gins(ppc64.AMOVD, &dst, &end)
p.From.Type = obj.TYPE_ADDR
@@ -943,7 +774,7 @@
p = gins(ppc64.AMOVDU, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 8
- pl = p
+ pl := (*obj.Prog)(p)
p = gins(ppc64.ACMP, &dst, &end)
gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
@@ -956,7 +787,7 @@
p = gins(ppc64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
- f = gc.Sysfunc("duffzero")
+ f := (*gc.Node)(gc.Sysfunc("duffzero"))
p = gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
@@ -966,7 +797,7 @@
// duffzero leaves R3 on the last zeroed dword
boff = 8
} else {
- for t = 0; t < q; t++ {
+ for t := uint64(0); t < q; t++ {
p = gins(ppc64.AMOVD, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(8 * t)
@@ -975,7 +806,7 @@
boff = 8 * q
}
- for t = 0; t < c; t++ {
+ for t := uint64(0); t < c; t++ {
p = gins(ppc64.AMOVB, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = int64(t + boff)
@@ -987,11 +818,10 @@
// Called after regopt and peep have run.
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
- var p *obj.Prog
var p1 *obj.Prog
var p2 *obj.Prog
- for p = firstp; p != nil; p = p.Link {
+ for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
fmt.Printf("expandchecks: %v\n", p)
}
diff --git a/src/cmd/9g/gsubr.go b/src/cmd/9g/gsubr.go
index 91e87ff..9a00434 100644
--- a/src/cmd/9g/gsubr.go
+++ b/src/cmd/9g/gsubr.go
@@ -63,16 +63,14 @@
}
func ginit() {
- var i int
-
- for i = 0; i < len(reg); i++ {
+ for i := int(0); i < len(reg); i++ {
reg[i] = 1
}
- for i = 0; i < ppc64.NREG+ppc64.NFREG; i++ {
+ for i := int(0); i < ppc64.NREG+ppc64.NFREG; i++ {
reg[i] = 0
}
- for i = 0; i < len(resvd); i++ {
+ for i := int(0); i < len(resvd); i++ {
reg[resvd[i]-ppc64.REG_R0]++
}
}
@@ -80,13 +78,11 @@
var regpc [len(reg)]uint32
func gclean() {
- var i int
-
- for i = 0; i < len(resvd); i++ {
+ for i := int(0); i < len(resvd); i++ {
reg[resvd[i]-ppc64.REG_R0]--
}
- for i = 0; i < len(reg); i++ {
+ for i := int(0); i < len(reg); i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated, %p\n", gc.Ctxt.Rconv(i+ppc64.REG_R0), regpc[i])
}
@@ -94,10 +90,9 @@
}
func anyregalloc() bool {
- var i int
var j int
- for i = 0; i < len(reg); i++ {
+ for i := int(0); i < len(reg); i++ {
if reg[i] == 0 {
goto ok
}
@@ -119,20 +114,15 @@
* caller must regfree(n).
*/
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
- var i int
- var et int
- var fixfree int
- var fltfree int
-
if t == nil {
gc.Fatal("regalloc: t nil")
}
- et = int(gc.Simtype[t.Etype])
+ et := int(int(gc.Simtype[t.Etype]))
if gc.Debug['r'] != 0 {
- fixfree = 0
- fltfree = 0
- for i = ppc64.REG_R0; i < ppc64.REG_F31; i++ {
+ fixfree := int(0)
+ fltfree := int(0)
+ for i := int(ppc64.REG_R0); i < ppc64.REG_F31; i++ {
if reg[i-ppc64.REG_R0] == 0 {
if i < ppc64.REG_F0 {
fixfree++
@@ -145,6 +135,7 @@
fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree)
}
+ var i int
switch et {
case gc.TINT8,
gc.TUINT8,
@@ -172,7 +163,7 @@
}
gc.Flusherrors()
- for i = ppc64.REG_R0; i < ppc64.REG_R0+ppc64.NREG; i++ {
+ for i := int(ppc64.REG_R0); i < ppc64.REG_R0+ppc64.NREG; i++ {
fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0])
}
gc.Fatal("out of fixed registers")
@@ -194,7 +185,7 @@
}
gc.Flusherrors()
- for i = ppc64.REG_F0; i < ppc64.REG_F0+ppc64.NREG; i++ {
+ for i := int(ppc64.REG_F0); i < ppc64.REG_F0+ppc64.NREG; i++ {
fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0])
}
gc.Fatal("out of floating registers")
@@ -214,15 +205,13 @@
}
func regfree(n *gc.Node) {
- var i int
-
if n.Op == gc.ONAME {
return
}
if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
gc.Fatal("regfree: not a register")
}
- i = int(n.Val.U.Reg) - ppc64.REG_R0
+ i := int(int(n.Val.U.Reg) - ppc64.REG_R0)
if i == ppc64.REGSP-ppc64.REG_R0 {
return
}
@@ -244,13 +233,13 @@
*/
func ginscon(as int, c int64, n2 *gc.Node) {
var n1 gc.Node
- var ntmp gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) {
// cannot have more than 16-bit of immediate in ADD, etc.
// instead, MOV into register first.
+ var ntmp gc.Node
regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(ppc64.AMOVD, &n1, &ntmp)
@@ -268,7 +257,6 @@
*/
func ginscon2(as int, n2 *gc.Node, c int64) {
var n1 gc.Node
- var ntmp gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
@@ -290,6 +278,7 @@
}
// MOV n1 into register first
+ var ntmp gc.Node
regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(ppc64.AMOVD, &n1, &ntmp)
@@ -328,24 +317,13 @@
* hard part is conversions.
*/
func gmove(f *gc.Node, t *gc.Node) {
- var a int
- var ft int
- var tt int
- var cvt *gc.Type
- var r1 gc.Node
- var r2 gc.Node
- var r3 gc.Node
- var con gc.Node
- var p1 *obj.Prog
- var p2 *obj.Prog
-
if gc.Debug['M'] != 0 {
fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
}
- ft = gc.Simsimtype(f.Type)
- tt = gc.Simsimtype(t.Type)
- cvt = t.Type
+ ft := int(gc.Simsimtype(f.Type))
+ tt := int(gc.Simsimtype(t.Type))
+ cvt := (*gc.Type)(t.Type)
if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
gc.Complexmove(f, t)
@@ -353,12 +331,16 @@
}
// cannot have two memory operands
+ var r2 gc.Node
+ var r1 gc.Node
+ var a int
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// convert constant to desired type
if f.Op == gc.OLITERAL {
+ var con gc.Node
switch tt {
default:
gc.Convconst(&con, t.Type, &f.Val)
@@ -366,7 +348,9 @@
case gc.TINT32,
gc.TINT16,
gc.TINT8:
+ var con gc.Node
gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val)
+ var r1 gc.Node
regalloc(&r1, con.Type, t)
gins(ppc64.AMOVD, &con, &r1)
gmove(&r1, t)
@@ -376,7 +360,9 @@
case gc.TUINT32,
gc.TUINT16,
gc.TUINT8:
+ var con gc.Node
gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val)
+ var r1 gc.Node
regalloc(&r1, con.Type, t)
gins(ppc64.AMOVD, &con, &r1)
gmove(&r1, t)
@@ -559,22 +545,24 @@
gc.TFLOAT64<<16 | gc.TUINT64:
bignodes()
+ var r1 gc.Node
regalloc(&r1, gc.Types[ft], f)
gmove(f, &r1)
if tt == gc.TUINT64 {
regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
gmove(&bigf, &r2)
gins(ppc64.AFCMPU, &r1, &r2)
- p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)
+ p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
gins(ppc64.AFSUB, &r2, &r1)
gc.Patch(p1, gc.Pc)
regfree(&r2)
}
regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+ var r3 gc.Node
regalloc(&r3, gc.Types[gc.TINT64], t)
gins(ppc64.AFCTIDZ, &r1, &r2)
- p1 = gins(ppc64.AFMOVD, &r2, nil)
+ p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = ppc64.REGSP
p1.To.Offset = -8
@@ -585,7 +573,7 @@
regfree(&r2)
regfree(&r1)
if tt == gc.TUINT64 {
- p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1) // use CR0 here again
+ p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
gins(ppc64.AMOVD, &bigi, &r1)
gins(ppc64.AADD, &r1, &r3)
@@ -622,21 +610,22 @@
gc.TUINT64<<16 | gc.TFLOAT64:
bignodes()
+ var r1 gc.Node
regalloc(&r1, gc.Types[gc.TINT64], nil)
gmove(f, &r1)
if ft == gc.TUINT64 {
gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
gmove(&bigi, &r2)
gins(ppc64.ACMPU, &r1, &r2)
- p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
- p2 = gins(ppc64.ASRD, nil, &r1)
+ p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
+ p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
p2.From.Type = obj.TYPE_CONST
p2.From.Offset = 1
gc.Patch(p1, gc.Pc)
}
regalloc(&r2, gc.Types[gc.TFLOAT64], t)
- p1 = gins(ppc64.AMOVD, &r1, nil)
+ p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = ppc64.REGSP
p1.To.Offset = -8
@@ -647,7 +636,7 @@
gins(ppc64.AFCFID, &r2, &r2)
regfree(&r1)
if ft == gc.TUINT64 {
- p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1) // use CR0 here again
+ p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
gins(ppc64.AFMUL, &r1, &r2)
gc.Patch(p1, gc.Pc)
@@ -702,24 +691,19 @@
* as f, t
*/
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
- var w int32
- var p *obj.Prog
- var af obj.Addr
- var at obj.Addr
-
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
- af = obj.Addr{}
+ af := obj.Addr(obj.Addr{})
- at = obj.Addr{}
+ at := obj.Addr(obj.Addr{})
if f != nil {
gc.Naddr(f, &af, 1)
}
if t != nil {
gc.Naddr(t, &at, 1)
}
- p = gc.Prog(as)
+ p := (*obj.Prog)(gc.Prog(as))
if f != nil {
p.From = af
}
@@ -730,7 +714,7 @@
fmt.Printf("%v\n", p)
}
- w = 0
+ w := int32(0)
switch as {
case ppc64.AMOVB,
ppc64.AMOVBU,
@@ -768,8 +752,6 @@
}
func fixlargeoffset(n *gc.Node) {
- var a gc.Node
-
if n == nil {
return
}
@@ -784,7 +766,7 @@
// this is used only in test/fixedbugs/issue6036.go.
gc.Fatal("offset too large: %v", gc.Nconv(n, 0))
- a = *n
+ a := gc.Node(*n)
a.Op = gc.OREGISTER
a.Type = gc.Types[gc.Tptr]
a.Xoffset = 0
@@ -798,13 +780,11 @@
* return Axxx for Oxxx on type t.
*/
func optoas(op int, t *gc.Type) int {
- var a int
-
if t == nil {
gc.Fatal("optoas: t is nil")
}
- a = obj.AXXX
+ a := int(obj.AXXX)
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
diff --git a/src/cmd/9g/peep.go b/src/cmd/9g/peep.go
index 486b316..f7c0a95 100644
--- a/src/cmd/9g/peep.go
+++ b/src/cmd/9g/peep.go
@@ -40,19 +40,15 @@
var gactive uint32
func peep(firstp *obj.Prog) {
- var g *gc.Graph
- var r *gc.Flow
- var r1 *gc.Flow
- var p *obj.Prog
- var p1 *obj.Prog
- var t int
-
- g = gc.Flowstart(firstp, nil)
+ g := (*gc.Graph)(gc.Flowstart(firstp, nil))
if g == nil {
return
}
gactive = 0
+ var p *obj.Prog
+ var r *gc.Flow
+ var t int
loop1:
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
gc.Dumpit("loop1", g.Start, 0)
@@ -109,7 +105,9 @@
/*
* look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
*/
- for r = g.Start; r != nil; r = r.Link {
+ var p1 *obj.Prog
+ var r1 *gc.Flow
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog
switch p.As {
default:
@@ -151,7 +149,7 @@
* look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
* when OP can set condition codes correctly
*/
- for r = g.Start; r != nil; r = r.Link {
+ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog
switch p.As {
case ppc64.ACMP,
@@ -350,9 +348,7 @@
}
func excise(r *gc.Flow) {
- var p *obj.Prog
-
- p = r.Prog
+ p := (*obj.Prog)(r.Prog)
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
@@ -402,22 +398,17 @@
* above sequences. This returns 1 if it modified any instructions.
*/
func subprop(r0 *gc.Flow) bool {
- var p *obj.Prog
- var v1 *obj.Addr
- var v2 *obj.Addr
- var r *gc.Flow
- var t int
- var info gc.ProgInfo
-
- p = r0.Prog
- v1 = &p.From
+ p := (*obj.Prog)(r0.Prog)
+ v1 := (*obj.Addr)(&p.From)
if !regtyp(v1) {
return false
}
- v2 = &p.To
+ v2 := (*obj.Addr)(&p.To)
if !regtyp(v2) {
return false
}
+ var r *gc.Flow
+ var info gc.ProgInfo
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
break
@@ -469,7 +460,7 @@
}
}
- t = int(v1.Reg)
+ t := int(int(v1.Reg))
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
@@ -491,13 +482,9 @@
* set v2 return success (caller can remove v1->v2 move)
*/
func copyprop(r0 *gc.Flow) bool {
- var p *obj.Prog
- var v1 *obj.Addr
- var v2 *obj.Addr
-
- p = r0.Prog
- v1 = &p.From
- v2 = &p.To
+ p := (*obj.Prog)(r0.Prog)
+ v1 := (*obj.Addr)(&p.From)
+ v2 := (*obj.Addr)(&p.To)
if copyas(v1, v2) {
if gc.Debug['P'] != 0 {
fmt.Printf("eliminating self-move\n", r0.Prog)
@@ -515,9 +502,6 @@
// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
// all uses were rewritten.
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
- var t int
- var p *obj.Prog
-
if uint32(r.Active) == gactive {
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
@@ -529,6 +513,8 @@
if gc.Debug['P'] != 0 {
fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
}
+ var t int
+ var p *obj.Prog
for ; r != nil; r = r.S1 {
p = r.Prog
if gc.Debug['P'] != 0 {
diff --git a/src/cmd/9g/prog.go b/src/cmd/9g/prog.go
index e188f0d..b971256 100644
--- a/src/cmd/9g/prog.go
+++ b/src/cmd/9g/prog.go
@@ -108,10 +108,6 @@
func initproginfo() {
var addvariant = []int{V_CC, V_V, V_CC | V_V}
- var as int
- var as2 int
- var i int
- var variant int
if initproginfo_initialized != 0 {
return
@@ -120,7 +116,10 @@
// Perform one-time expansion of instructions in progtable to
// their CC, V, and VCC variants
- for as = 0; as < len(progtable); as++ {
+ var as2 int
+ var i int
+ var variant int
+ for as := int(0); as < len(progtable); as++ {
if progtable[as].Flags == 0 {
continue
}
@@ -272,15 +271,13 @@
var initvariants_initialized int
func initvariants() {
- var i int
- var j int
-
if initvariants_initialized != 0 {
return
}
initvariants_initialized = 1
- for i = 0; i < len(varianttable); i++ {
+ var j int
+ for i := int(0); i < len(varianttable); i++ {
if varianttable[i][0] == 0 {
// Instruction has no variants
varianttable[i][0] = i
@@ -299,9 +296,8 @@
// as2variant returns the variant (V_*) flags of instruction as.
func as2variant(as int) int {
- var i int
initvariants()
- for i = 0; i < len(varianttable[as]); i++ {
+ for i := int(0); i < len(varianttable[as]); i++ {
if varianttable[as][i] == as {
return i
}
diff --git a/src/cmd/9g/reg.go b/src/cmd/9g/reg.go
index faed60d..b1b681a 100644
--- a/src/cmd/9g/reg.go
+++ b/src/cmd/9g/reg.go
@@ -110,10 +110,8 @@
}
func excludedregs() uint64 {
- var regbits uint64
-
// Exclude registers with fixed functions
- regbits = 1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS)
+ regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS))
// Also exclude floating point registers with fixed constants
regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
diff --git a/src/cmd/internal/gc/align.go b/src/cmd/internal/gc/align.go
index 994b7a2..a588ca3 100644
--- a/src/cmd/internal/gc/align.go
+++ b/src/cmd/internal/gc/align.go
@@ -22,11 +22,8 @@
}
func offmod(t *Type) {
- var f *Type
- var o int32
-
- o = 0
- for f = t.Type; f != nil; f = f.Down {
+ o := int32(0)
+ for f := t.Type; f != nil; f = f.Down {
if f.Etype != TFIELD {
Fatal("offmod: not TFIELD: %v", Tconv(f, obj.FmtLong))
}
@@ -40,19 +37,14 @@
}
func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
- var f *Type
- var w int64
- var maxalign int32
- var starto int64
- var lastzero int64
-
- starto = o
- maxalign = int32(flag)
+ starto := o
+ maxalign := int32(flag)
if maxalign < 1 {
maxalign = 1
}
- lastzero = 0
- for f = t.Type; f != nil; f = f.Down {
+ lastzero := int64(0)
+ var w int64
+ for f := t.Type; f != nil; f = f.Down {
if f.Etype != TFIELD {
Fatal("widstruct: not TFIELD: %v", Tconv(f, obj.FmtLong))
}
@@ -118,11 +110,6 @@
}
func dowidth(t *Type) {
- var et int32
- var w int64
- var lno int
- var t1 *Type
-
if Widthptr == 0 {
Fatal("dowidth without betypeinit")
}
@@ -136,7 +123,7 @@
}
if t.Width == -2 {
- lno = int(lineno)
+ lno := int(lineno)
lineno = int32(t.Lineno)
if t.Broke == 0 {
t.Broke = 1
@@ -157,12 +144,12 @@
// defer checkwidth calls until after we're done
defercalc++
- lno = int(lineno)
+ lno := int(lineno)
lineno = int32(t.Lineno)
t.Width = -2
t.Align = 0
- et = int32(t.Etype)
+ et := int32(t.Etype)
switch et {
case TFUNC,
TCHAN,
@@ -177,7 +164,7 @@
}
}
- w = 0
+ w := int64(0)
switch et {
default:
Fatal("dowidth: unknown type: %v", Tconv(t, 0))
@@ -233,13 +220,13 @@
// make fake type to check later to
// trigger channel argument check.
- t1 = typ(TCHANARGS)
+ t1 := typ(TCHANARGS)
t1.Type = t
checkwidth(t1)
case TCHANARGS:
- t1 = t.Type
+ t1 := t.Type
dowidth(t.Type) // just in case
if t1.Type.Width >= 1<<16 {
Yyerror("channel element type too large (>64kB)")
@@ -277,11 +264,9 @@
break
}
if t.Bound >= 0 {
- var cap uint64
-
dowidth(t.Type)
if t.Type.Width != 0 {
- cap = (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Type.Width)
+ cap := (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Type.Width)
if uint64(t.Bound) > cap {
Yyerror("type %v larger than address space", Tconv(t, obj.FmtLong))
}
@@ -311,7 +296,7 @@
// make fake type to check later to
// trigger function argument computation.
case TFUNC:
- t1 = typ(TFUNCARGS)
+ t1 := typ(TFUNCARGS)
t1.Type = t
checkwidth(t1)
@@ -322,7 +307,7 @@
// function is 3 cated structures;
// compute their widths as side-effect.
case TFUNCARGS:
- t1 = t.Type
+ t1 := t.Type
w = widstruct(t.Type, *getthis(t1), 0, 0)
w = widstruct(t.Type, *getinarg(t1), w, Widthreg)
@@ -382,8 +367,6 @@
var tlq *TypeList
func checkwidth(t *Type) {
- var l *TypeList
-
if t == nil {
return
}
@@ -404,7 +387,7 @@
}
t.Deferwidth = 1
- l = tlfree
+ l := tlfree
if l != nil {
tlfree = l.next
} else {
@@ -425,12 +408,10 @@
}
func resumecheckwidth() {
- var l *TypeList
-
if defercalc == 0 {
Fatal("resumecheckwidth")
}
- for l = tlq; l != nil; l = tlq {
+ for l := tlq; l != nil; l = tlq {
l.t.Deferwidth = 0
tlq = l.next
dowidth(l.t)
@@ -442,18 +423,11 @@
}
func typeinit() {
- var i int
- var etype int
- var sameas int
- var t *Type
- var s *Sym
- var s1 *Sym
-
if Widthptr == 0 {
Fatal("typeinit before betypeinit")
}
- for i = 0; i < NTYPE; i++ {
+ for i := 0; i < NTYPE; i++ {
Simtype[i] = uint8(i)
}
@@ -463,7 +437,7 @@
Types[TPTR64] = typ(TPTR64)
dowidth(Types[TPTR64])
- t = typ(TUNSAFEPTR)
+ t := typ(TUNSAFEPTR)
Types[TUNSAFEPTR] = t
t.Sym = Pkglookup("Pointer", unsafepkg)
t.Sym.Def = typenod(t)
@@ -475,7 +449,7 @@
Tptr = TPTR64
}
- for i = TINT8; i <= TUINT64; i++ {
+ for i := TINT8; i <= TUINT64; i++ {
Isint[i] = 1
}
Isint[TINT] = 1
@@ -502,7 +476,7 @@
/*
* initialize okfor
*/
- for i = 0; i < NTYPE; i++ {
+ for i := 0; i < NTYPE; i++ {
if Isint[i] != 0 || i == TIDEAL {
okforeq[i] = 1
okforcmp[i] = 1
@@ -566,6 +540,7 @@
okforcmp[TSTRING] = 1
+ var i int
for i = 0; i < len(okfor); i++ {
okfor[i] = okfornone[:]
}
@@ -655,6 +630,10 @@
Simtype[TUNSAFEPTR] = uint8(Tptr)
/* pick up the backend thearch.typedefs */
+ var s1 *Sym
+ var etype int
+ var sameas int
+ var s *Sym
for i = range Thearch.Typedefs {
s = Lookup(Thearch.Typedefs[i].Name)
s1 = Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
@@ -703,13 +682,11 @@
*/
func Argsize(t *Type) int {
var save Iter
- var fp *Type
- var w int64
var x int64
- w = 0
+ w := int64(0)
- fp = Structfirst(&save, Getoutarg(t))
+ fp := Structfirst(&save, Getoutarg(t))
for fp != nil {
x = fp.Width + fp.Type.Width
if x > w {
diff --git a/src/cmd/internal/gc/bits.go b/src/cmd/internal/gc/bits.go
index 23da356..95421e5 100644
--- a/src/cmd/internal/gc/bits.go
+++ b/src/cmd/internal/gc/bits.go
@@ -67,9 +67,7 @@
}
*/
func bany(a *Bits) bool {
- var i int
-
- for i = 0; i < BITS; i++ {
+ for i := 0; i < BITS; i++ {
if a.b[i] != 0 {
return true
}
@@ -90,10 +88,9 @@
}
*/
func bnum(a Bits) int {
- var i int
var b uint64
- for i = 0; i < BITS; i++ {
+ for i := 0; i < BITS; i++ {
b = a.b[i]
if b != 0 {
return 64*i + Bitno(b)
@@ -105,9 +102,7 @@
}
func blsh(n uint) Bits {
- var c Bits
-
- c = zbits
+ c := zbits
c.b[n/64] = 1 << (n % 64)
return c
}
@@ -125,9 +120,7 @@
}
func Bitno(b uint64) int {
- var i int
-
- for i = 0; i < 64; i++ {
+ for i := 0; i < 64; i++ {
if b&(1<<uint(i)) != 0 {
return i
}
@@ -140,9 +133,8 @@
var fp string
var i int
- var first int
- first = 1
+ first := 1
for bany(&bits) {
i = bnum(bits)
diff --git a/src/cmd/internal/gc/bv.go b/src/cmd/internal/gc/bv.go
index 002b5a4..e7fdd70 100644
--- a/src/cmd/internal/gc/bv.go
+++ b/src/cmd/internal/gc/bv.go
@@ -63,16 +63,13 @@
}
func bvconcat(src1 *Bvec, src2 *Bvec) *Bvec {
- var dst *Bvec
- var i int32
-
- dst = bvalloc(src1.n + src2.n)
- for i = 0; i < src1.n; i++ {
+ dst := bvalloc(src1.n + src2.n)
+ for i := int32(0); i < src1.n; i++ {
if bvget(src1, i) != 0 {
bvset(dst, i)
}
}
- for i = 0; i < src2.n; i++ {
+ for i := int32(0); i < src2.n; i++ {
if bvget(src2, i) != 0 {
bvset(dst, i+src1.n)
}
@@ -90,8 +87,6 @@
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func bvnext(bv *Bvec, i int32) int {
- var w uint32
-
if i >= bv.n {
return -1
}
@@ -110,7 +105,7 @@
}
// Find 1 bit.
- w = bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)
+ w := bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)
for w&1 == 0 {
w >>= 1
@@ -121,9 +116,7 @@
}
func bvisempty(bv *Bvec) bool {
- var i int32
-
- for i = 0; i < bv.n; i += WORDBITS {
+ for i := int32(0); i < bv.n; i += WORDBITS {
if bv.b[i>>WORDSHIFT] != 0 {
return false
}
@@ -173,21 +166,17 @@
}
func bvprint(bv *Bvec) {
- var i int32
-
fmt.Printf("#*")
- for i = 0; i < bv.n; i++ {
+ for i := int32(0); i < bv.n; i++ {
fmt.Printf("%d", bvget(bv, i))
}
}
func bvreset(bv *Bvec, i int32) {
- var mask uint32
-
if i < 0 || i >= bv.n {
Fatal("bvreset: index %d is out of bounds with length %d\n", i, bv.n)
}
- mask = ^(1 << uint(i%WORDBITS))
+ mask := uint32(^(1 << uint(i%WORDBITS)))
bv.b[i/WORDBITS] &= mask
}
@@ -198,11 +187,9 @@
}
func bvset(bv *Bvec, i int32) {
- var mask uint32
-
if i < 0 || i >= bv.n {
Fatal("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
- mask = 1 << uint(i%WORDBITS)
+ mask := uint32(1 << uint(i%WORDBITS))
bv.b[i/WORDBITS] |= mask
}
diff --git a/src/cmd/internal/gc/closure.go b/src/cmd/internal/gc/closure.go
index c2c802e..20a0349 100644
--- a/src/cmd/internal/gc/closure.go
+++ b/src/cmd/internal/gc/closure.go
@@ -13,12 +13,10 @@
* function literals aka closures
*/
func closurehdr(ntype *Node) {
- var n *Node
var name *Node
var a *Node
- var l *NodeList
- n = Nod(OCLOSURE, nil, nil)
+ n := Nod(OCLOSURE, nil, nil)
n.Ntype = ntype
n.Funcdepth = Funcdepth
n.Outerfunc = Curfn
@@ -35,7 +33,7 @@
n.Rlist = ntype.Rlist
ntype.List = nil
ntype.Rlist = nil
- for l = n.List; l != nil; l = l.Next {
+ for l := n.List; l != nil; l = l.Next {
name = l.N.Left
if name != nil {
name = newname(name.Sym)
@@ -48,7 +46,7 @@
ntype.List = list(ntype.List, a)
}
- for l = n.Rlist; l != nil; l = l.Next {
+ for l := n.Rlist; l != nil; l = l.Next {
name = l.N.Left
if name != nil {
name = newname(name.Sym)
@@ -58,15 +56,11 @@
}
func closurebody(body *NodeList) *Node {
- var func_ *Node
- var v *Node
- var l *NodeList
-
if body == nil {
body = list1(Nod(OEMPTY, nil, nil))
}
- func_ = Curfn
+ func_ := Curfn
func_.Nbody = body
func_.Endlineno = lineno
funcbody(func_)
@@ -75,7 +69,8 @@
// ordinary ones in the symbol table; see oldname.
// unhook them.
// make the list of pointers for the closure call.
- for l = func_.Cvars; l != nil; l = l.Next {
+ var v *Node
+ for l := func_.Cvars; l != nil; l = l.Next {
v = l.N
v.Closure.Closure = v.Outer
v.Outerexpr = oldname(v.Sym)
@@ -85,12 +80,9 @@
}
func typecheckclosure(func_ *Node, top int) {
- var oldfn *Node
var n *Node
- var l *NodeList
- var olddd int
- for l = func_.Cvars; l != nil; l = l.Next {
+ for l := func_.Cvars; l != nil; l = l.Next {
n = l.N.Closure
if n.Captured == 0 {
n.Captured = 1
@@ -106,13 +98,13 @@
}
}
- for l = func_.Dcl; l != nil; l = l.Next {
+ for l := func_.Dcl; l != nil; l = l.Next {
if l.N.Op == ONAME && (l.N.Class == PPARAM || l.N.Class == PPARAMOUT) {
l.N.Decldepth = 1
}
}
- oldfn = Curfn
+ oldfn := Curfn
typecheck(&func_.Ntype, Etype)
func_.Type = func_.Ntype.Type
func_.Top = top
@@ -123,7 +115,7 @@
// underlying closure function we create is added to xtop.
if Curfn != nil && func_.Type != nil {
Curfn = func_
- olddd = decldepth
+ olddd := decldepth
decldepth = 1
typechecklist(func_.Nbody, Etop)
decldepth = olddd
@@ -143,16 +135,12 @@
var closurename_closgen int
func closurename(n *Node) *Sym {
- var outer string
- var prefix string
- var gen int
-
if n.Sym != nil {
return n.Sym
}
- gen = 0
- outer = ""
- prefix = ""
+ gen := 0
+ outer := ""
+ prefix := ""
if n.Outerfunc == nil {
// Global closure.
outer = "glob"
@@ -192,20 +180,17 @@
}
func makeclosure(func_ *Node) *Node {
- var xtype *Node
- var xfunc *Node
-
/*
* wrap body in external function
* that begins by reading closure parameters.
*/
- xtype = Nod(OTFUNC, nil, nil)
+ xtype := Nod(OTFUNC, nil, nil)
xtype.List = func_.List
xtype.Rlist = func_.Rlist
// create the function
- xfunc = Nod(ODCLFUNC, nil, nil)
+ xfunc := Nod(ODCLFUNC, nil, nil)
xfunc.Nname = newname(closurename(func_))
xfunc.Nname.Sym.Flags |= SymExported // disable export
@@ -239,18 +224,15 @@
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
func capturevars(xfunc *Node) {
- var func_ *Node
var v *Node
var outer *Node
- var l *NodeList
- var lno int
- lno = int(lineno)
+ lno := int(lineno)
lineno = xfunc.Lineno
- func_ = xfunc.Closure
+ func_ := xfunc.Closure
func_.Enter = nil
- for l = func_.Cvars; l != nil; l = l.Next {
+ for l := func_.Cvars; l != nil; l = l.Next {
v = l.N
if v.Type == nil {
// if v->type is nil, it means v looked like it was
@@ -280,13 +262,11 @@
}
if Debug['m'] > 1 {
- var name *Sym
- var how string
- name = nil
+ name := (*Sym)(nil)
if v.Curfn != nil && v.Curfn.Nname != nil {
name = v.Curfn.Nname.Sym
}
- how = "ref"
+ how := "ref"
if v.Byval != 0 {
how = "value"
}
@@ -303,22 +283,9 @@
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
func transformclosure(xfunc *Node) {
- var func_ *Node
- var cv *Node
- var addr *Node
- var v *Node
- var f *Node
- var l *NodeList
- var body *NodeList
- var param **Type
- var fld *Type
- var offset int64
- var lno int
- var nvar int
-
- lno = int(lineno)
+ lno := int(lineno)
lineno = xfunc.Lineno
- func_ = xfunc.Closure
+ func_ := xfunc.Closure
if func_.Top&Ecall != 0 {
// If the closure is directly called, we transform it to a plain function call
@@ -337,15 +304,18 @@
// }(42, byval, &byref)
// f is ONAME of the actual function.
- f = xfunc.Nname
+ f := xfunc.Nname
// Get pointer to input arguments and rewind to the end.
// We are going to append captured variables to input args.
- param = &getinargx(f.Type).Type
+ param := &getinargx(f.Type).Type
for ; *param != nil; param = &(*param).Down {
}
- for l = func_.Cvars; l != nil; l = l.Next {
+ var v *Node
+ var addr *Node
+ var fld *Type
+ for l := func_.Cvars; l != nil; l = l.Next {
v = l.N
if v.Op == OXXX {
continue
@@ -390,11 +360,14 @@
xfunc.Type = f.Type // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
- nvar = 0
+ nvar := 0
- body = nil
- offset = int64(Widthptr)
- for l = func_.Cvars; l != nil; l = l.Next {
+ body := (*NodeList)(nil)
+ offset := int64(Widthptr)
+ var addr *Node
+ var v *Node
+ var cv *Node
+ for l := func_.Cvars; l != nil; l = l.Next {
v = l.N
if v.Op == OXXX {
continue
@@ -450,12 +423,6 @@
}
func walkclosure(func_ *Node, init **NodeList) *Node {
- var clos *Node
- var typ *Node
- var typ1 *Node
- var v *Node
- var l *NodeList
-
// If no closure vars, don't bother wrapping.
if func_.Cvars == nil {
return func_.Closure.Nname
@@ -475,10 +442,12 @@
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
- typ = Nod(OTSTRUCT, nil, nil)
+ typ := Nod(OTSTRUCT, nil, nil)
typ.List = list1(Nod(ODCLFIELD, newname(Lookup(".F")), typenod(Types[TUINTPTR])))
- for l = func_.Cvars; l != nil; l = l.Next {
+ var typ1 *Node
+ var v *Node
+ for l := func_.Cvars; l != nil; l = l.Next {
v = l.N
if v.Op == OXXX {
continue
@@ -490,7 +459,7 @@
typ.List = list(typ.List, Nod(ODCLFIELD, newname(v.Sym), typ1))
}
- clos = Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
+ clos := Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
clos.Esc = func_.Esc
clos.Right.Implicit = 1
clos.List = concat(list1(Nod(OCFUNC, func_.Closure.Nname, nil)), func_.Enter)
@@ -541,34 +510,15 @@
var makepartialcall_gopkg *Pkg
func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
- var ptr *Node
- var n *Node
- var fld *Node
- var call *Node
- var xtype *Node
- var xfunc *Node
- var cv *Node
- var savecurfn *Node
- var rcvrtype *Type
- var basetype *Type
- var t *Type
- var body *NodeList
- var l *NodeList
- var callargs *NodeList
- var retargs *NodeList
var p string
- var sym *Sym
- var spkg *Pkg
- var i int
- var ddd int
- rcvrtype = fn.Left.Type
+ rcvrtype := fn.Left.Type
if exportname(meth.Sym.Name) {
p = fmt.Sprintf("(%v).%s-fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), meth.Sym.Name)
} else {
p = fmt.Sprintf("(%v).(%v)-fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), Sconv(meth.Sym, obj.FmtLeft))
}
- basetype = rcvrtype
+ basetype := rcvrtype
if Isptr[rcvrtype.Etype] != 0 {
basetype = basetype.Type
}
@@ -576,7 +526,7 @@
Fatal("missing base type for %v", Tconv(rcvrtype, 0))
}
- spkg = nil
+ spkg := (*Pkg)(nil)
if basetype.Sym != nil {
spkg = basetype.Sym.Pkg
}
@@ -587,24 +537,26 @@
spkg = makepartialcall_gopkg
}
- sym = Pkglookup(p, spkg)
+ sym := Pkglookup(p, spkg)
if sym.Flags&SymUniq != 0 {
return sym.Def
}
sym.Flags |= SymUniq
- savecurfn = Curfn
+ savecurfn := Curfn
Curfn = nil
- xtype = Nod(OTFUNC, nil, nil)
- i = 0
- l = nil
- callargs = nil
- ddd = 0
- xfunc = Nod(ODCLFUNC, nil, nil)
+ xtype := Nod(OTFUNC, nil, nil)
+ i := 0
+ l := (*NodeList)(nil)
+ callargs := (*NodeList)(nil)
+ ddd := 0
+ xfunc := Nod(ODCLFUNC, nil, nil)
Curfn = xfunc
- for t = getinargx(t0).Type; t != nil; t = t.Down {
+ var fld *Node
+ var n *Node
+ for t := getinargx(t0).Type; t != nil; t = t.Down {
namebuf = fmt.Sprintf("a%d", i)
i++
n = newname(Lookup(namebuf))
@@ -623,8 +575,8 @@
xtype.List = l
i = 0
l = nil
- retargs = nil
- for t = getoutargx(t0).Type; t != nil; t = t.Down {
+ retargs := (*NodeList)(nil)
+ for t := getoutargx(t0).Type; t != nil; t = t.Down {
namebuf = fmt.Sprintf("r%d", i)
i++
n = newname(Lookup(namebuf))
@@ -644,16 +596,16 @@
declare(xfunc.Nname, PFUNC)
// Declare and initialize variable holding receiver.
- body = nil
+ body := (*NodeList)(nil)
xfunc.Needctxt = true
- cv = Nod(OCLOSUREVAR, nil, nil)
+ cv := Nod(OCLOSUREVAR, nil, nil)
cv.Xoffset = int64(Widthptr)
cv.Type = rcvrtype
if int(cv.Type.Align) > Widthptr {
cv.Xoffset = int64(cv.Type.Align)
}
- ptr = Nod(ONAME, nil, nil)
+ ptr := Nod(ONAME, nil, nil)
ptr.Sym = Lookup("rcvr")
ptr.Class = PAUTO
ptr.Addable = 1
@@ -669,13 +621,13 @@
body = list(body, Nod(OAS, ptr, Nod(OADDR, cv, nil)))
}
- call = Nod(OCALL, Nod(OXDOT, ptr, meth), nil)
+ call := Nod(OCALL, Nod(OXDOT, ptr, meth), nil)
call.List = callargs
call.Isddd = uint8(ddd)
if t0.Outtuple == 0 {
body = list(body, call)
} else {
- n = Nod(OAS2, nil, nil)
+ n := Nod(OAS2, nil, nil)
n.List = retargs
n.Rlist = list1(call)
body = list(body, n)
@@ -694,9 +646,6 @@
}
func walkpartialcall(n *Node, init **NodeList) *Node {
- var clos *Node
- var typ *Node
-
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
@@ -712,11 +661,11 @@
checknil(n.Left, init)
}
- typ = Nod(OTSTRUCT, nil, nil)
+ typ := Nod(OTSTRUCT, nil, nil)
typ.List = list1(Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR])))
typ.List = list(typ.List, Nod(ODCLFIELD, newname(Lookup("R")), typenod(n.Left.Type)))
- clos = Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
+ clos := Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
clos.Esc = n.Esc
clos.Right.Implicit = 1
clos.List = list1(Nod(OCFUNC, n.Nname.Nname, nil))
diff --git a/src/cmd/internal/gc/const.go b/src/cmd/internal/gc/const.go
index 49d3bcc..43c8809 100644
--- a/src/cmd/internal/gc/const.go
+++ b/src/cmd/internal/gc/const.go
@@ -11,31 +11,27 @@
* according to type; return truncated value.
*/
func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
- var d float64
- var fv *Mpflt
- var v Val
-
if t == nil {
return oldv
}
- v = Val{}
+ v := Val{}
v.Ctype = CTFLT
v.U.Fval = oldv
overflow(v, t)
- fv = new(Mpflt)
+ fv := new(Mpflt)
*fv = *oldv
// convert large precision literal floating
// into limited precision (float64 or float32)
switch t.Etype {
case TFLOAT64:
- d = mpgetflt(fv)
+ d := mpgetflt(fv)
Mpmovecflt(fv, d)
case TFLOAT32:
- d = mpgetflt32(fv)
+ d := mpgetflt32(fv)
Mpmovecflt(fv, d)
}
@@ -56,12 +52,7 @@
* (if n is a named constant, can't edit n->type directly).
*/
func convlit1(np **Node, t *Type, explicit bool) {
- var ct int
- var et int
- var n *Node
- var nn *Node
-
- n = *np
+ n := *np
if n == nil || t == nil || n.Type == nil || isideal(t) || n.Type == t {
return
}
@@ -70,7 +61,7 @@
}
if n.Op == OLITERAL {
- nn = Nod(OXXX, nil, nil)
+ nn := Nod(OXXX, nil, nil)
*nn = *n
n = nn
*np = n
@@ -148,7 +139,8 @@
return
}
- ct = consttype(n)
+ ct := consttype(n)
+ var et int
if ct < 0 {
goto bad
}
@@ -214,7 +206,7 @@
CTRUNE,
CTFLT,
CTCPLX:
- ct = int(n.Val.Ctype)
+ ct := int(n.Val.Ctype)
if Isint[et] != 0 {
switch ct {
default:
@@ -285,24 +277,20 @@
}
func copyval(v Val) Val {
- var i *Mpint
- var f *Mpflt
- var c *Mpcplx
-
switch v.Ctype {
case CTINT,
CTRUNE:
- i = new(Mpint)
+ i := new(Mpint)
mpmovefixfix(i, v.U.Xval)
v.U.Xval = i
case CTFLT:
- f = new(Mpflt)
+ f := new(Mpflt)
mpmovefltflt(f, v.U.Fval)
v.U.Fval = f
case CTCPLX:
- c = new(Mpcplx)
+ c := new(Mpcplx)
mpmovefltflt(&c.Real, &v.U.Cval.Real)
mpmovefltflt(&c.Imag, &v.U.Cval.Imag)
v.U.Cval = c
@@ -312,19 +300,17 @@
}
func tocplx(v Val) Val {
- var c *Mpcplx
-
switch v.Ctype {
case CTINT,
CTRUNE:
- c = new(Mpcplx)
+ c := new(Mpcplx)
Mpmovefixflt(&c.Real, v.U.Xval)
Mpmovecflt(&c.Imag, 0.0)
v.Ctype = CTCPLX
v.U.Cval = c
case CTFLT:
- c = new(Mpcplx)
+ c := new(Mpcplx)
mpmovefltflt(&c.Real, v.U.Fval)
Mpmovecflt(&c.Imag, 0.0)
v.Ctype = CTCPLX
@@ -335,18 +321,16 @@
}
func toflt(v Val) Val {
- var f *Mpflt
-
switch v.Ctype {
case CTINT,
CTRUNE:
- f = new(Mpflt)
+ f := new(Mpflt)
Mpmovefixflt(f, v.U.Xval)
v.Ctype = CTFLT
v.U.Fval = f
case CTCPLX:
- f = new(Mpflt)
+ f := new(Mpflt)
mpmovefltflt(f, &v.U.Cval.Real)
if mpcmpfltc(&v.U.Cval.Imag, 0) != 0 {
Yyerror("constant %v%vi truncated to real", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp|obj.FmtSign))
@@ -359,14 +343,12 @@
}
func toint(v Val) Val {
- var i *Mpint
-
switch v.Ctype {
case CTRUNE:
v.Ctype = CTINT
case CTFLT:
- i = new(Mpint)
+ i := new(Mpint)
if mpmovefltfix(i, v.U.Fval) < 0 {
Yyerror("constant %v truncated to integer", Fconv(v.U.Fval, obj.FmtSharp))
}
@@ -374,7 +356,7 @@
v.U.Xval = i
case CTCPLX:
- i = new(Mpint)
+ i := new(Mpint)
if mpmovefltfix(i, &v.U.Cval.Real) < 0 {
Yyerror("constant %v%vi truncated to integer", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp|obj.FmtSign))
}
@@ -444,17 +426,14 @@
}
func tostr(v Val) Val {
- var rune_ uint
- var s *Strlit
-
switch v.Ctype {
case CTINT,
CTRUNE:
if Mpcmpfixfix(v.U.Xval, Minintval[TINT]) < 0 || Mpcmpfixfix(v.U.Xval, Maxintval[TINT]) > 0 {
Yyerror("overflow in int -> string")
}
- rune_ = uint(Mpgetfix(v.U.Xval))
- s = &Strlit{S: string(rune_)}
+ rune_ := uint(Mpgetfix(v.U.Xval))
+ s := &Strlit{S: string(rune_)}
v = Val{}
v.Ctype = CTSTR
v.U.Sval = s
@@ -480,9 +459,7 @@
}
func Isconst(n *Node, ct int) bool {
- var t int
-
- t = consttype(n)
+ t := consttype(n)
// If the caller is asking for CTINT, allow CTRUNE too.
// Makes life easier for back ends.
@@ -490,11 +467,9 @@
}
func saveorig(n *Node) *Node {
- var n1 *Node
-
if n == n.Orig {
// duplicate node for n->orig.
- n1 = Nod(OLITERAL, nil, nil)
+ n1 := Nod(OLITERAL, nil, nil)
n.Orig = n1
*n1 = *n
@@ -507,20 +482,6 @@
* if n is constant, rewrite as OLITERAL node.
*/
func evconst(n *Node) {
- var nl *Node
- var nr *Node
- var norig *Node
- var str *Strlit
- var wl int
- var wr int
- var lno int
- var et int
- var v Val
- var rv Val
- var b Mpint
- var l1 *NodeList
- var l2 *NodeList
-
// pick off just the opcodes that can be
// constant evaluated.
switch n.Op {
@@ -563,7 +524,11 @@
// merge adjacent constants in the argument list.
case OADDSTR:
- for l1 = n.List; l1 != nil; l1 = l1.Next {
+ var nr *Node
+ var nl *Node
+ var str *Strlit
+ var l2 *NodeList
+ for l1 := n.List; l1 != nil; l1 = l1.Next {
if Isconst(l1.N, CTSTR) && l1.Next != nil && Isconst(l1.Next.N, CTSTR) {
// merge from l1 up to but not including l2
str = new(Strlit)
@@ -585,7 +550,7 @@
}
// fix list end pointer.
- for l2 = n.List; l2 != nil; l2 = l2.Next {
+ for l2 := n.List; l2 != nil; l2 = l2.Next {
n.List.End = l2
}
@@ -598,19 +563,24 @@
return
}
- nl = n.Left
+ nl := n.Left
if nl == nil || nl.Type == nil {
return
}
if consttype(nl) < 0 {
return
}
- wl = int(nl.Type.Etype)
+ wl := int(nl.Type.Etype)
if Isint[wl] != 0 || Isfloat[wl] != 0 || Iscomplex[wl] != 0 {
wl = TIDEAL
}
- nr = n.Right
+ nr := n.Right
+ var rv Val
+ var lno int
+ var wr int
+ var v Val
+ var norig *Node
if nr == nil {
goto unary
}
@@ -1020,13 +990,14 @@
case OCOM<<16 | CTINT,
OCOM<<16 | CTRUNE:
- et = Txxx
+ et := Txxx
if nl.Type != nil {
et = int(nl.Type.Etype)
}
// calculate the mask in b
// result will be (a ^ mask)
+ var b Mpint
switch et {
// signed guys change sign
default:
@@ -1107,9 +1078,7 @@
}
func nodlit(v Val) *Node {
- var n *Node
-
- n = Nod(OLITERAL, nil, nil)
+ n := Nod(OLITERAL, nil, nil)
n.Val = v
switch v.Ctype {
default:
@@ -1135,14 +1104,11 @@
}
func nodcplxlit(r Val, i Val) *Node {
- var n *Node
- var c *Mpcplx
-
r = toflt(r)
i = toflt(i)
- c = new(Mpcplx)
- n = Nod(OLITERAL, nil, nil)
+ c := new(Mpcplx)
+ n := Nod(OLITERAL, nil, nil)
n.Type = Types[TIDEAL]
n.Val.U.Cval = c
n.Val.Ctype = CTCPLX
@@ -1159,9 +1125,6 @@
// idealkind returns a constant kind like consttype
// but for an arbitrary "ideal" (untyped constant) expression.
func idealkind(n *Node) int {
- var k1 int
- var k2 int
-
if n == nil || !isideal(n.Type) {
return CTxxx
}
@@ -1186,9 +1149,9 @@
OXOR,
OOR,
OPLUS:
- k1 = idealkind(n.Left)
+ k1 := idealkind(n.Left)
- k2 = idealkind(n.Right)
+ k2 := idealkind(n.Right)
if k1 > k2 {
return k1
} else {
@@ -1227,26 +1190,21 @@
}
func defaultlit(np **Node, t *Type) {
- var lno int
- var ctype int
- var n *Node
- var nn *Node
- var t1 *Type
-
- n = *np
+ n := *np
if n == nil || !isideal(n.Type) {
return
}
if n.Op == OLITERAL {
- nn = Nod(OXXX, nil, nil)
+ nn := Nod(OXXX, nil, nil)
*nn = *n
n = nn
*np = n
}
- lno = int(setlineno(n))
- ctype = idealkind(n)
+ lno := int(setlineno(n))
+ ctype := idealkind(n)
+ var t1 *Type
switch ctype {
default:
if t != nil {
@@ -1266,7 +1224,7 @@
}
if n.Val.Ctype == CTSTR {
- t1 = Types[TSTRING]
+ t1 := Types[TSTRING]
Convlit(np, t1)
break
}
@@ -1277,7 +1235,7 @@
Fatal("defaultlit: idealkind is CTxxx: %v", Nconv(n, obj.FmtSign))
case CTBOOL:
- t1 = Types[TBOOL]
+ t1 := Types[TBOOL]
if t != nil && t.Etype == TBOOL {
t1 = t
}
@@ -1330,13 +1288,8 @@
* force means must assign concrete (non-ideal) type.
*/
func defaultlit2(lp **Node, rp **Node, force int) {
- var l *Node
- var r *Node
- var lkind int
- var rkind int
-
- l = *lp
- r = *rp
+ l := *lp
+ r := *rp
if l.Type == nil || r.Type == nil {
return
}
@@ -1358,8 +1311,8 @@
Convlit(rp, Types[TBOOL])
}
- lkind = idealkind(l)
- rkind = idealkind(r)
+ lkind := idealkind(l)
+ rkind := idealkind(r)
if lkind == CTCPLX || rkind == CTCPLX {
Convlit(lp, Types[TCOMPLEX128])
Convlit(rp, Types[TCOMPLEX128])
@@ -1473,10 +1426,7 @@
* for back end.
*/
func Convconst(con *Node, t *Type, val *Val) {
- var i int64
- var tt int
-
- tt = Simsimtype(t)
+ tt := Simsimtype(t)
// copy the constant for conversion
Nodconst(con, Types[TINT8], 0)
@@ -1487,6 +1437,7 @@
if Isint[tt] != 0 {
con.Val.Ctype = CTINT
con.Val.U.Xval = new(Mpint)
+ var i int64
switch val.Ctype {
default:
Fatal("convconst ctype=%d %v", val.Ctype, Tconv(t, obj.FmtLong))
@@ -1614,9 +1565,6 @@
// Only called for expressions known to evaluated to compile-time
// constants.
func isgoconst(n *Node) bool {
- var l *Node
- var t *Type
-
if n.Orig != nil {
n = n.Orig
}
@@ -1661,7 +1609,7 @@
case OLEN,
OCAP:
- l = n.Left
+ l := n.Left
if isgoconst(l) {
return true
}
@@ -1669,7 +1617,7 @@
// Special case: len/cap is constant when applied to array or
// pointer to array when the expression does not contain
// function calls or channel receive operations.
- t = l.Type
+ t := l.Type
if t != nil && Isptr[t.Etype] != 0 {
t = t.Type
@@ -1684,7 +1632,7 @@
}
case ONAME:
- l = n.Sym.Def
+ l := n.Sym.Def
if l != nil && l.Op == OLITERAL && n.Val.Ctype != CTNIL {
return true
}
@@ -1696,7 +1644,7 @@
// Only constant calls are unsafe.Alignof, Offsetof, and Sizeof.
case OCALL:
- l = n.Left
+ l := n.Left
for l.Op == OPAREN {
l = l.Left
@@ -1714,8 +1662,6 @@
}
func hascallchan(n *Node) bool {
- var l *NodeList
-
if n == nil {
return false
}
@@ -1747,12 +1693,12 @@
return true
}
- for l = n.List; l != nil; l = l.Next {
+ for l := n.List; l != nil; l = l.Next {
if hascallchan(l.N) {
return true
}
}
- for l = n.Rlist; l != nil; l = l.Next {
+ for l := n.Rlist; l != nil; l = l.Next {
if hascallchan(l.N) {
return true
}
diff --git a/src/cmd/internal/gc/cplx.go b/src/cmd/internal/gc/cplx.go
index 34decd1..c07ba34 100644
--- a/src/cmd/internal/gc/cplx.go
+++ b/src/cmd/internal/gc/cplx.go
@@ -20,14 +20,6 @@
func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Prog) {
var tnl Node
- var tnr Node
- var n1 Node
- var n2 Node
- var n3 Node
- var n4 Node
- var na Node
- var nb Node
- var nc Node
// make both sides addable in ullman order
if nr != nil {
@@ -38,6 +30,7 @@
}
if nr.Addable == 0 {
+ var tnr Node
Tempname(&tnr, nr.Type)
Thearch.Cgen(nr, &tnr)
nr = &tnr
@@ -53,13 +46,19 @@
// build tree
// real(l) == real(r) && imag(l) == imag(r)
+ var n2 Node
+ var n1 Node
subnode(&n1, &n2, nl)
+ var n3 Node
+ var n4 Node
subnode(&n3, &n4, nr)
- na = Node{}
+ na := Node{}
na.Op = OANDAND
+ var nb Node
na.Left = &nb
+ var nc Node
na.Right = &nc
na.Type = Types[TBOOL]
@@ -84,16 +83,13 @@
// break addable nc-complex into nr-real and ni-imaginary
func subnode(nr *Node, ni *Node, nc *Node) {
- var tc int
- var t *Type
-
if nc.Addable == 0 {
Fatal("subnode not addable")
}
- tc = Simsimtype(nc.Type)
+ tc := Simsimtype(nc.Type)
tc = cplxsubtype(tc)
- t = Types[tc]
+ t := Types[tc]
if nc.Op == OLITERAL {
nodfconst(nr, t, &nc.Val.U.Cval.Real)
@@ -111,9 +107,7 @@
// generate code res = -nl
func minus(nl *Node, res *Node) {
- var ra Node
-
- ra = Node{}
+ ra := Node{}
ra.Op = OMINUS
ra.Left = nl
ra.Type = nl.Type
@@ -146,13 +140,12 @@
var n4 Node
var n5 Node
var n6 Node
- var ra Node
subnode(&n1, &n2, nl)
subnode(&n3, &n4, nr)
subnode(&n5, &n6, res)
- ra = Node{}
+ ra := Node{}
ra.Op = uint8(op)
ra.Left = &n1
ra.Right = &n3
@@ -178,9 +171,6 @@
var n4 Node
var n5 Node
var n6 Node
- var rm1 Node
- var rm2 Node
- var ra Node
var tmp Node
subnode(&n1, &n2, nl)
@@ -189,20 +179,20 @@
Tempname(&tmp, n5.Type)
// real part -> tmp
- rm1 = Node{}
+ rm1 := Node{}
rm1.Op = OMUL
rm1.Left = &n1
rm1.Right = &n3
rm1.Type = n1.Type
- rm2 = Node{}
+ rm2 := Node{}
rm2.Op = OMUL
rm2.Left = &n2
rm2.Right = &n4
rm2.Type = n2.Type
- ra = Node{}
+ ra := Node{}
ra.Op = OSUB
ra.Left = &rm1
ra.Right = &rm2
@@ -300,14 +290,6 @@
}
func Complexmove(f *Node, t *Node) {
- var ft int
- var tt int
- var n1 Node
- var n2 Node
- var n3 Node
- var n4 Node
- var tmp Node
-
if Debug['g'] != 0 {
Dump("\ncomplexmove-f", f)
Dump("complexmove-t", t)
@@ -317,8 +299,8 @@
Fatal("complexmove: to not addable")
}
- ft = Simsimtype(f.Type)
- tt = Simsimtype(t.Type)
+ ft := Simsimtype(f.Type)
+ tt := Simsimtype(t.Type)
switch uint32(ft)<<16 | uint32(tt) {
default:
Fatal("complexmove: unknown conversion: %v -> %v\n", Tconv(f.Type, 0), Tconv(t.Type, 0))
@@ -331,12 +313,17 @@
TCOMPLEX128<<16 | TCOMPLEX64,
TCOMPLEX128<<16 | TCOMPLEX128:
if f.Addable == 0 || overlap_cplx(f, t) {
+ var tmp Node
Tempname(&tmp, f.Type)
Complexmove(f, &tmp)
f = &tmp
}
+ var n1 Node
+ var n2 Node
subnode(&n1, &n2, f)
+ var n4 Node
+ var n3 Node
subnode(&n3, &n4, t)
Thearch.Cgen(&n1, &n3)
@@ -345,16 +332,6 @@
}
func Complexgen(n *Node, res *Node) {
- var nl *Node
- var nr *Node
- var tnl Node
- var tnr Node
- var n1 Node
- var n2 Node
- var tmp Node
- var tl int
- var tr int
-
if Debug['g'] != 0 {
Dump("\ncomplexgen-n", n)
Dump("complexgen-res", res)
@@ -368,7 +345,10 @@
switch n.Op {
case OCOMPLEX:
if res.Addable != 0 {
+ var n1 Node
+ var n2 Node
subnode(&n1, &n2, res)
+ var tmp Node
Tempname(&tmp, n1.Type)
Thearch.Cgen(n.Left, &tmp)
Thearch.Cgen(n.Right, &n2)
@@ -378,13 +358,16 @@
case OREAL,
OIMAG:
- nl = n.Left
+ nl := n.Left
if nl.Addable == 0 {
+ var tmp Node
Tempname(&tmp, nl.Type)
Complexgen(nl, &tmp)
nl = &tmp
}
+ var n1 Node
+ var n2 Node
subnode(&n1, &n2, nl)
if n.Op == OREAL {
Thearch.Cgen(&n1, res)
@@ -396,13 +379,14 @@
}
// perform conversion from n to res
- tl = Simsimtype(res.Type)
+ tl := Simsimtype(res.Type)
tl = cplxsubtype(tl)
- tr = Simsimtype(n.Type)
+ tr := Simsimtype(n.Type)
tr = cplxsubtype(tr)
if tl != tr {
if n.Addable == 0 {
+ var n1 Node
Tempname(&n1, n.Type)
Complexmove(n, &n1)
n = &n1
@@ -413,6 +397,7 @@
}
if res.Addable == 0 {
+ var n1 Node
Thearch.Igen(res, &n1, nil)
Thearch.Cgen(n, &n1)
Thearch.Regfree(&n1)
@@ -437,6 +422,7 @@
OCALLFUNC,
OCALLMETH,
OCALLINTER:
+ var n1 Node
Thearch.Igen(n, &n1, res)
Complexmove(&n1, res)
@@ -454,13 +440,14 @@
break
}
- nl = n.Left
+ nl := n.Left
if nl == nil {
return
}
- nr = n.Right
+ nr := n.Right
// make both sides addable in ullman order
+ var tnl Node
if nr != nil {
if nl.Ullman > nr.Ullman && nl.Addable == 0 {
Tempname(&tnl, nl.Type)
@@ -469,6 +456,7 @@
}
if nr.Addable == 0 {
+ var tnr Node
Tempname(&tnr, nr.Type)
Thearch.Cgen(nr, &tnr)
nr = &tnr
diff --git a/src/cmd/internal/gc/dcl.go b/src/cmd/internal/gc/dcl.go
index 577f7ec..ceececd 100644
--- a/src/cmd/internal/gc/dcl.go
+++ b/src/cmd/internal/gc/dcl.go
@@ -35,9 +35,7 @@
}
func push() *Sym {
- var d *Sym
-
- d = new(Sym)
+ d := new(Sym)
d.Lastlineno = lineno
d.Link = dclstack
dclstack = d
@@ -45,9 +43,7 @@
}
func pushdcl(s *Sym) *Sym {
- var d *Sym
-
- d = push()
+ d := push()
dcopy(d, s)
if dflag() {
fmt.Printf("\t%v push %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
@@ -94,9 +90,7 @@
}
func markdcl() {
- var d *Sym
-
- d = push()
+ d := push()
d.Name = "" // used as a mark in fifo
d.Block = block
@@ -108,11 +102,9 @@
// print("markdcl\n");
func dumpdcl(st string) {
var s *Sym
- var d *Sym
- var i int
- i = 0
- for d = dclstack; d != nil; d = d.Link {
+ i := 0
+ for d := dclstack; d != nil; d = d.Link {
i++
fmt.Printf(" %.2d %p", i, d)
if d.Name == "" {
@@ -127,9 +119,7 @@
}
func testdclstack() {
- var d *Sym
-
- for d = dclstack; d != nil; d = d.Link {
+ for d := dclstack; d != nil; d = d.Link {
if d.Name == "" {
if nerrors != 0 {
errorexit()
@@ -141,10 +131,6 @@
}
func redeclare(s *Sym, where string) {
- var pkgstr *Strlit
- var line1 int
- var line2 int
-
if s.Lastlineno == 0 {
var tmp *Strlit
if s.Origpkg != nil {
@@ -152,11 +138,11 @@
} else {
tmp = s.Pkg.Path
}
- pkgstr = tmp
+ pkgstr := tmp
Yyerror("%v redeclared %s\n"+"\tprevious declaration during import \"%v\"", Sconv(s, 0), where, Zconv(pkgstr, 0))
} else {
- line1 = parserline()
- line2 = int(s.Lastlineno)
+ line1 := parserline()
+ line2 := int(s.Lastlineno)
// When an import and a declaration collide in separate files,
// present the import as the "redeclared", because the declaration
@@ -180,9 +166,6 @@
var declare_typegen int
func declare(n *Node, ctxt int) {
- var s *Sym
- var gen int
-
if ctxt == PDISCARD {
return
}
@@ -192,7 +175,7 @@
}
n.Lineno = int32(parserline())
- s = n.Sym
+ s := n.Sym
// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
if importpkg == nil && typecheckok == 0 && s.Pkg != localpkg {
@@ -203,7 +186,7 @@
Yyerror("cannot declare init - must be func", s)
}
- gen = 0
+ gen := 0
if ctxt == PEXTERN {
externdcl = list(externdcl, n)
if dflag() {
@@ -264,20 +247,15 @@
* new_name_list (type | [type] = expr_list)
*/
func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
- var doexpr bool
- var v *Node
- var e *Node
- var as2 *Node
- var init *NodeList
-
- init = nil
- doexpr = el != nil
+ init := (*NodeList)(nil)
+ doexpr := el != nil
if count(el) == 1 && count(vl) > 1 {
- e = el.N
- as2 = Nod(OAS2, nil, nil)
+ e := el.N
+ as2 := Nod(OAS2, nil, nil)
as2.List = vl
as2.Rlist = list1(e)
+ var v *Node
for ; vl != nil; vl = vl.Next {
v = vl.N
v.Op = ONAME
@@ -292,6 +270,8 @@
return list(init, as2)
}
+ var v *Node
+ var e *Node
for ; vl != nil; vl = vl.Next {
if doexpr {
if el == nil {
@@ -333,11 +313,7 @@
* new_name_list [[type] = expr_list]
*/
func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
- var v *Node
- var c *Node
- var vv *NodeList
-
- vv = nil
+ vv := (*NodeList)(nil)
if cl == nil {
if t != nil {
Yyerror("const declaration cannot have type without expression")
@@ -351,6 +327,8 @@
cl = listtreecopy(cl)
+ var v *Node
+ var c *Node
for ; vl != nil; vl = vl.Next {
if cl == nil {
Yyerror("missing value in const declaration")
@@ -382,13 +360,11 @@
* typically for labels or other one-off names.
*/
func newname(s *Sym) *Node {
- var n *Node
-
if s == nil {
Fatal("newname nil")
}
- n = Nod(ONAME, nil, nil)
+ n := Nod(ONAME, nil, nil)
n.Sym = s
n.Type = nil
n.Addable = 1
@@ -402,9 +378,7 @@
* being declared.
*/
func dclname(s *Sym) *Node {
- var n *Node
-
- n = newname(s)
+ n := newname(s)
n.Op = ONONAME // caller will correct it
return n
}
@@ -429,10 +403,7 @@
* generated if no name has been defined.
*/
func oldname(s *Sym) *Node {
- var n *Node
- var c *Node
-
- n = s.Def
+ n := s.Def
if n == nil {
// maybe a top-level name will come along
// to give this a definition later.
@@ -453,7 +424,7 @@
// make x a closure variable unnecessarily.
if n.Closure == nil || n.Closure.Funcdepth != Funcdepth {
// create new closure var.
- c = Nod(ONAME, nil, nil)
+ c := Nod(ONAME, nil, nil)
c.Sym = s
c.Class = PPARAMREF
@@ -493,20 +464,16 @@
}
func colasdefn(left *NodeList, defn *Node) {
- var nnew int
- var nerr int
- var l *NodeList
- var n *Node
-
- for l = left; l != nil; l = l.Next {
+ for l := left; l != nil; l = l.Next {
if l.N.Sym != nil {
l.N.Sym.Flags |= SymUniq
}
}
- nnew = 0
- nerr = 0
- for l = left; l != nil; l = l.Next {
+ nnew := 0
+ nerr := 0
+ var n *Node
+ for l := left; l != nil; l = l.Next {
n = l.N
if isblank(n) {
continue
@@ -543,9 +510,7 @@
}
func colas(left *NodeList, right *NodeList, lno int32) *Node {
- var as *Node
-
- as = Nod(OAS2, nil, nil)
+ as := Nod(OAS2, nil, nil)
as.List = left
as.Rlist = right
as.Colas = 1
@@ -622,11 +587,6 @@
}
func funcargs(nt *Node) {
- var n *Node
- var nn *Node
- var l *NodeList
- var gen int
-
if nt.Op != OTFUNC {
Fatal("funcargs %v", Oconv(int(nt.Op), 0))
}
@@ -640,7 +600,7 @@
// no n->defn because type checking of func header
// will not fill in the types until later
if nt.Left != nil {
- n = nt.Left
+ n := nt.Left
if n.Op != ODCLFIELD {
Fatal("funcargs receiver %v", Oconv(int(n.Op), 0))
}
@@ -655,7 +615,8 @@
}
}
- for l = nt.List; l != nil; l = l.Next {
+ var n *Node
+ for l := nt.List; l != nil; l = l.Next {
n = l.N
if n.Op != ODCLFIELD {
Fatal("funcargs in %v", Oconv(int(n.Op), 0))
@@ -672,9 +633,10 @@
}
// declare the out arguments.
- gen = count(nt.List)
+ gen := count(nt.List)
var i int = 0
- for l = nt.Rlist; l != nil; l = l.Next {
+ var nn *Node
+ for l := nt.Rlist; l != nil; l = l.Next {
n = l.N
if n.Op != ODCLFIELD {
@@ -726,15 +688,13 @@
* used functype directly to parse the function's type.
*/
func funcargs2(t *Type) {
- var ft *Type
- var n *Node
-
if t.Etype != TFUNC {
Fatal("funcargs2 %v", Tconv(t, 0))
}
if t.Thistuple != 0 {
- for ft = getthisx(t).Type; ft != nil; ft = ft.Down {
+ var n *Node
+ for ft := getthisx(t).Type; ft != nil; ft = ft.Down {
if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
@@ -745,7 +705,8 @@
}
if t.Intuple != 0 {
- for ft = getinargx(t).Type; ft != nil; ft = ft.Down {
+ var n *Node
+ for ft := getinargx(t).Type; ft != nil; ft = ft.Down {
if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
@@ -756,7 +717,8 @@
}
if t.Outtuple != 0 {
- for ft = getoutargx(t).Type; ft != nil; ft = ft.Down {
+ var n *Node
+ for ft := getoutargx(t).Type; ft != nil; ft = ft.Down {
if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
@@ -790,9 +752,7 @@
* new type being defined with name s.
*/
func typedcl0(s *Sym) *Node {
- var n *Node
-
- n = newname(s)
+ n := newname(s)
n.Op = OTYPE
declare(n, dclcontext)
return n
@@ -833,17 +793,14 @@
}
func structfield(n *Node) *Type {
- var f *Type
- var lno int
-
- lno = int(lineno)
+ lno := int(lineno)
lineno = n.Lineno
if n.Op != ODCLFIELD {
Fatal("structfield: oops %v\n", Nconv(n, 0))
}
- f = typ(TFIELD)
+ f := typ(TFIELD)
f.Isddd = n.Isddd
if n.Right != nil {
@@ -890,9 +847,7 @@
var uniqgen uint32
func checkdupfields(t *Type, what string) {
- var lno int
-
- lno = int(lineno)
+ lno := int(lineno)
for ; t != nil; t = t.Down {
if t.Sym != nil && t.Nname != nil && !isblank(t.Nname) {
@@ -913,19 +868,17 @@
* a type for struct/interface/arglist
*/
func tostruct(l *NodeList) *Type {
- var t *Type
var f *Type
- var tp **Type
- t = typ(TSTRUCT)
+ t := typ(TSTRUCT)
- for tp = &t.Type; l != nil; l = l.Next {
+ for tp := &t.Type; l != nil; l = l.Next {
f = structfield(l.N)
*tp = f
tp = &f.Down
}
- for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
+ for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
if f.Broke != 0 {
t.Broke = 1
}
@@ -942,14 +895,12 @@
}
func tofunargs(l *NodeList) *Type {
- var t *Type
var f *Type
- var tp **Type
- t = typ(TSTRUCT)
+ t := typ(TSTRUCT)
t.Funarg = 1
- for tp = &t.Type; l != nil; l = l.Next {
+ for tp := &t.Type; l != nil; l = l.Next {
f = structfield(l.N)
f.Funarg = 1
@@ -962,7 +913,7 @@
tp = &f.Down
}
- for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
+ for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
if f.Broke != 0 {
t.Broke = 1
}
@@ -972,10 +923,7 @@
}
func interfacefield(n *Node) *Type {
- var f *Type
- var lno int
-
- lno = int(lineno)
+ lno := int(lineno)
lineno = n.Lineno
if n.Op != ODCLFIELD {
@@ -986,7 +934,7 @@
Yyerror("interface method cannot have annotation")
}
- f = typ(TFIELD)
+ f := typ(TFIELD)
f.Isddd = n.Isddd
if n.Right != nil {
@@ -1042,14 +990,12 @@
}
func tointerface(l *NodeList) *Type {
- var t *Type
var f *Type
- var tp **Type
var t1 *Type
- t = typ(TINTER)
+ t := typ(TINTER)
- tp = &t.Type
+ tp := &t.Type
for ; l != nil; l = l.Next {
f = interfacefield(l.N)
@@ -1072,7 +1018,7 @@
}
}
- for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
+ for f := t.Type; f != nil && t.Broke == 0; f = f.Down {
if f.Broke != 0 {
t.Broke = 1
}
@@ -1087,20 +1033,19 @@
}
func embedded(s *Sym, pkg *Pkg) *Node {
- var n *Node
- var name string
const (
CenterDot = 0xB7
)
// Names sometimes have disambiguation junk
// appended after a center dot. Discard it when
// making the name for the embedded struct field.
- name = s.Name
+ name := s.Name
if i := strings.Index(s.Name, string(CenterDot)); i >= 0 {
name = s.Name[:i]
}
+ var n *Node
if exportname(name) {
n = newname(Lookup(name))
} else if s.Pkg == builtinpkg {
@@ -1127,14 +1072,8 @@
}
func checkarglist(all *NodeList, input int) *NodeList {
- var named int
- var n *Node
- var t *Node
- var nextt *Node
- var l *NodeList
-
- named = 0
- for l = all; l != nil; l = l.Next {
+ named := 0
+ for l := all; l != nil; l = l.Next {
if l.N.Op == OKEY {
named = 1
break
@@ -1142,7 +1081,8 @@
}
if named != 0 {
- n = nil
+ n := (*Node)(nil)
+ var l *NodeList
for l = all; l != nil; l = l.Next {
n = l.N
if n.Op != OKEY && n.Sym == nil {
@@ -1156,8 +1096,10 @@
}
}
- nextt = nil
- for l = all; l != nil; l = l.Next {
+ nextt := (*Node)(nil)
+ var t *Node
+ var n *Node
+ for l := all; l != nil; l = l.Next {
// can cache result from findtype to avoid
// quadratic behavior here, but unlikely to matter.
n = l.N
@@ -1220,9 +1162,7 @@
}
func fakethis() *Node {
- var n *Node
-
- n = Nod(ODCLFIELD, nil, typenod(Ptrto(typ(TSTRUCT))))
+ n := Nod(ODCLFIELD, nil, typenod(Ptrto(typ(TSTRUCT))))
return n
}
@@ -1233,14 +1173,11 @@
* (See fakethis above.)
*/
func isifacemethod(f *Type) bool {
- var rcvr *Type
- var t *Type
-
- rcvr = getthisx(f).Type
+ rcvr := getthisx(f).Type
if rcvr.Sym != nil {
return false
}
- t = rcvr.Type
+ t := rcvr.Type
if Isptr[t.Etype] == 0 {
return false
}
@@ -1256,13 +1193,9 @@
* into a type
*/
func functype(this *Node, in *NodeList, out *NodeList) *Type {
- var t *Type
- var rcvr *NodeList
- var s *Sym
+ t := typ(TFUNC)
- t = typ(TFUNC)
-
- rcvr = nil
+ rcvr := (*NodeList)(nil)
if this != nil {
rcvr = list1(this)
}
@@ -1286,7 +1219,7 @@
t.Intuple = count(in)
t.Outnamed = 0
if t.Outtuple > 0 && out.N.Left != nil && out.N.Left.Orig != nil {
- s = out.N.Left.Orig.Sym
+ s := out.N.Left.Orig.Sym
if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result
t.Outnamed = 1
}
@@ -1300,11 +1233,10 @@
func methodsym(nsym *Sym, t0 *Type, iface int) *Sym {
var s *Sym
var p string
- var t *Type
var suffix string
var spkg *Pkg
- t = t0
+ t := t0
if t == nil {
goto bad
}
@@ -1367,9 +1299,7 @@
}
func methodname(n *Node, t *Type) *Node {
- var s *Sym
-
- s = methodsym(n.Sym, t, 0)
+ s := methodsym(n.Sym, t, 0)
if s == nil {
return n
}
@@ -1377,10 +1307,7 @@
}
func methodname1(n *Node, t *Node) *Node {
- var star string
- var p string
-
- star = ""
+ star := ""
if t.Op == OIND {
star = "*"
t = t.Left
@@ -1390,6 +1317,7 @@
return newname(n.Sym)
}
+ var p string
if star != "" {
p = fmt.Sprintf("(%s%v).%v", star, Sconv(t.Sym, 0), Sconv(n.Sym, 0))
} else {
@@ -1410,25 +1338,20 @@
* n is fieldname, pa is base type, t is function type
*/
func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
- var f *Type
- var d *Type
- var pa *Type
- var n *Node
-
// get field sym
if sf == nil {
Fatal("no method symbol")
}
// get parent type sym
- pa = getthisx(t).Type // ptr to this structure
+ pa := getthisx(t).Type // ptr to this structure
if pa == nil {
Yyerror("missing receiver")
return
}
pa = pa.Type
- f = methtype(pa, 1)
+ f := methtype(pa, 1)
if f == nil {
t = pa
if t == nil { // rely on typecheck having complained before
@@ -1472,7 +1395,7 @@
pa = f
if pa.Etype == TSTRUCT {
- for f = pa.Type; f != nil; f = f.Down {
+ for f := pa.Type; f != nil; f = f.Down {
if f.Sym == sf {
Yyerror("type %v has both field and method named %v", Tconv(pa, 0), Sconv(sf, 0))
return
@@ -1487,11 +1410,11 @@
return
}
- n = Nod(ODCLFIELD, newname(sf), nil)
+ n := Nod(ODCLFIELD, newname(sf), nil)
n.Type = t
- d = nil // last found
- for f = pa.Method; f != nil; f = f.Down {
+ d := (*Type)(nil) // last found
+ for f := pa.Method; f != nil; f = f.Down {
d = f
if f.Etype != TFIELD {
Fatal("addmethod: not TFIELD: %v", Tconv(f, obj.FmtLong))
@@ -1549,11 +1472,8 @@
}
func funcsym(s *Sym) *Sym {
- var p string
- var s1 *Sym
-
- p = fmt.Sprintf("%s·f", s.Name)
- s1 = Pkglookup(p, s.Pkg)
+ p := fmt.Sprintf("%s·f", s.Name)
+ s1 := Pkglookup(p, s.Pkg)
if s1.Def == nil {
s1.Def = newname(s1)
diff --git a/src/cmd/internal/gc/esc.go b/src/cmd/internal/gc/esc.go
index f4d5b43..697ca677 100644
--- a/src/cmd/internal/gc/esc.go
+++ b/src/cmd/internal/gc/esc.go
@@ -49,30 +49,23 @@
)
func escapes(all *NodeList) {
- var l *NodeList
-
- for l = all; l != nil; l = l.Next {
+ for l := all; l != nil; l = l.Next {
l.N.Walkgen = 0
}
visitgen = 0
- for l = all; l != nil; l = l.Next {
+ for l := all; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC && l.N.Curfn == nil {
visit(l.N)
}
}
- for l = all; l != nil; l = l.Next {
+ for l := all; l != nil; l = l.Next {
l.N.Walkgen = 0
}
}
func visit(n *Node) uint32 {
- var min uint32
- var recursive bool
- var l *NodeList
- var block *NodeList
-
if n.Walkgen > 0 {
// already visited
return n.Walkgen
@@ -81,9 +74,9 @@
visitgen++
n.Walkgen = visitgen
visitgen++
- min = visitgen
+ min := visitgen
- l = new(NodeList)
+ l := new(NodeList)
l.Next = stack
l.N = n
stack = l
@@ -95,13 +88,14 @@
// If visitcodelist found its way back to n->walkgen, then this
// block is a set of mutually recursive functions.
// Otherwise it's just a lone function that does not recurse.
- recursive = min == n.Walkgen
+ recursive := min == n.Walkgen
// Remove connected component from stack.
// Mark walkgen so that future visits return a large number
// so as not to affect the caller's min.
- block = stack
+ block := stack
+ var l *NodeList
for l = stack; l.N != n; l = l.Next {
l.N.Walkgen = ^uint32(0)
}
@@ -124,9 +118,6 @@
}
func visitcode(n *Node, min uint32) uint32 {
- var fn *Node
- var m uint32
-
if n == nil {
return min
}
@@ -142,12 +133,12 @@
min = visitcodelist(n.Rlist, min)
if n.Op == OCALLFUNC || n.Op == OCALLMETH {
- fn = n.Left
+ fn := n.Left
if n.Op == OCALLMETH {
fn = n.Left.Right.Sym.Def
}
if fn != nil && fn.Op == ONAME && fn.Class == PFUNC && fn.Defn != nil {
- m = visit(fn.Defn)
+ m := visit(fn.Defn)
if m < min {
min = m
}
@@ -155,7 +146,7 @@
}
if n.Op == OCLOSURE {
- m = visit(n.Closure)
+ m := visit(n.Closure)
if m < min {
min = m
}
@@ -205,9 +196,6 @@
var tags [16]*Strlit
func mktag(mask int) *Strlit {
- var s *Strlit
- var buf string
-
switch mask & EscMask {
case EscNone,
EscReturn:
@@ -223,8 +211,8 @@
return tags[mask]
}
- buf = fmt.Sprintf("esc:0x%x", mask)
- s = newstrlit(buf)
+ buf := fmt.Sprintf("esc:0x%x", mask)
+ s := newstrlit(buf)
if mask < len(tags) {
tags[mask] = s
}
@@ -232,15 +220,13 @@
}
func parsetag(note *Strlit) int {
- var em int
-
if note == nil {
return EscUnknown
}
if !strings.HasPrefix(note.S, "esc:") {
return EscUnknown
}
- em = atoi(note.S[4:])
+ em := atoi(note.S[4:])
if em == 0 {
return EscNone
}
@@ -248,12 +234,8 @@
}
func analyze(all *NodeList, recursive bool) {
- var l *NodeList
- var es EscState
- var e *EscState
-
- es = EscState{}
- e = &es
+ es := EscState{}
+ e := &es
e.theSink.Op = ONAME
e.theSink.Orig = &e.theSink
e.theSink.Class = PEXTERN
@@ -267,14 +249,14 @@
e.funcParam.Sym = Lookup(".param")
e.funcParam.Escloopdepth = 10000000
- for l = all; l != nil; l = l.Next {
+ for l := all; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC {
l.N.Esc = EscFuncPlanned
}
}
// flow-analyze functions
- for l = all; l != nil; l = l.Next {
+ for l := all; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC {
escfunc(e, l.N)
}
@@ -284,19 +266,19 @@
// visit the upstream of each dst, mark address nodes with
// addrescapes, mark parameters unsafe
- for l = e.dsts; l != nil; l = l.Next {
+ for l := e.dsts; l != nil; l = l.Next {
escflood(e, l.N)
}
// for all top level functions, tag the typenodes corresponding to the param nodes
- for l = all; l != nil; l = l.Next {
+ for l := all; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC {
esctag(e, l.N)
}
}
if Debug['m'] != 0 {
- for l = e.noesc; l != nil; l = l.Next {
+ for l := e.noesc; l != nil; l = l.Next {
if l.N.Esc == EscNone {
var tmp *Sym
if l.N.Curfn != nil && l.N.Curfn.Nname != nil {
@@ -311,10 +293,6 @@
}
func escfunc(e *EscState, func_ *Node) {
- var savefn *Node
- var ll *NodeList
- var saveld int
-
// print("escfunc %N %s\n", func->nname, e->recursive?"(recursive)":"");
if func_.Esc != 1 {
@@ -322,12 +300,12 @@
}
func_.Esc = EscFuncStarted
- saveld = e.loopdepth
+ saveld := e.loopdepth
e.loopdepth = 1
- savefn = Curfn
+ savefn := Curfn
Curfn = func_
- for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ for ll := Curfn.Dcl; ll != nil; ll = ll.Next {
if ll.N.Op != ONAME {
continue
}
@@ -352,7 +330,7 @@
// in a mutually recursive group we lose track of the return values
if e.recursive {
- for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ for ll := Curfn.Dcl; ll != nil; ll = ll.Next {
if ll.N.Op == ONAME && ll.N.Class == PPARAMOUT {
escflows(e, &e.theSink, ll.N)
}
@@ -426,17 +404,14 @@
}
func esc(e *EscState, n *Node, up *Node) {
- var lno int
var ll *NodeList
var lr *NodeList
- var a *Node
- var v *Node
if n == nil {
return
}
- lno = int(setlineno(n))
+ lno := int(setlineno(n))
// ninit logically runs at a different loopdepth than the rest of the for loop.
esclist(e, n.Ninit, n)
@@ -702,6 +677,8 @@
// Link addresses of captured variables to closure.
case OCLOSURE:
+ var a *Node
+ var v *Node
for ll = n.Cvars; ll != nil; ll = ll.Next {
v = ll.N
if v.Op == OXXX { // unnamed out argument; see dcl.c:/^funcargs
@@ -780,9 +757,6 @@
// evaluated in curfn. For expr==nil, dst must still be examined for
// evaluations inside it (e.g *f(x) = y)
func escassign(e *EscState, dst *Node, src *Node) {
- var lno int
- var ll *NodeList
-
if isblank(dst) || dst == nil || src == nil || src.Op == ONONAME || src.Op == OXXX {
return
}
@@ -846,7 +820,7 @@
dst = &e.theSink
}
- lno = int(setlineno(src))
+ lno := int(setlineno(src))
e.pdepth++
switch src.Op {
@@ -879,7 +853,7 @@
case OCALLMETH,
OCALLFUNC,
OCALLINTER:
- for ll = src.Escretval; ll != nil; ll = ll.Next {
+ for ll := src.Escretval; ll != nil; ll = ll.Next {
escflows(e, dst, ll.N)
}
@@ -945,7 +919,6 @@
func escassignfromtag(e *EscState, note *Strlit, dsts *NodeList, src *Node) int {
var em int
- var em0 int
em = parsetag(note)
@@ -964,7 +937,7 @@
escassign(e, &e.funcParam, src)
}
- em0 = em
+ em0 := em
for em >>= EscReturnBits; em != 0 && dsts != nil; (func() { em >>= 1; dsts = dsts.Next })() {
if em&1 != 0 {
escassign(e, dsts.N, src)
@@ -986,15 +959,9 @@
func esccall(e *EscState, n *Node, up *Node) {
var ll *NodeList
var lr *NodeList
- var a *Node
- var fn *Node
- var src *Node
- var t *Type
var fntype *Type
- var buf string
- var i int
- fn = nil
+ fn := (*Node)(nil)
switch n.Op {
default:
Fatal("esccall")
@@ -1017,7 +984,7 @@
ll = n.List
if n.List != nil && n.List.Next == nil {
- a = n.List.N
+ a := n.List.N
if a.Type.Etype == TSTRUCT && a.Type.Funarg != 0 { // f(g()).
ll = a.Escretval
}
@@ -1040,6 +1007,7 @@
escassign(e, fn.Ntype.Left.Left, n.Left.Left)
}
+ var src *Node
for lr = fn.Ntype.List; ll != nil && lr != nil; (func() { ll = ll.Next; lr = lr.Next })() {
src = ll.N
if lr.N.Isddd != 0 && n.Isddd == 0 {
@@ -1079,9 +1047,11 @@
}
// set up out list on this call node with dummy auto ONAMES in the current (calling) function.
- i = 0
+ i := 0
- for t = getoutargx(fntype).Type; t != nil; t = t.Down {
+ var src *Node
+ var buf string
+ for t := getoutargx(fntype).Type; t != nil; t = t.Down {
src = Nod(ONAME, nil, nil)
buf = fmt.Sprintf(".dum%d", i)
i++
@@ -1099,14 +1069,15 @@
// Receiver.
if n.Op != OCALLFUNC {
- t = getthisx(fntype).Type
- src = n.Left.Left
+ t := getthisx(fntype).Type
+ src := n.Left.Left
if haspointers(t.Type) {
escassignfromtag(e, t.Note, n.Escretval, src)
}
}
- for t = getinargx(fntype).Type; ll != nil; ll = ll.Next {
+ var a *Node
+ for t := getinargx(fntype).Type; ll != nil; ll = ll.Next {
src = ll.N
if t.Isddd != 0 && n.Isddd == 0 {
// Introduce ODDDARG node to represent ... allocation.
@@ -1197,8 +1168,6 @@
// Once an object has been moved to the heap, all of it's upstream should be considered
// escaping to the global scope.
func escflood(e *EscState, dst *Node) {
- var l *NodeList
-
switch dst.Op {
case ONAME,
OCLOSURE:
@@ -1218,7 +1187,7 @@
fmt.Printf("\nescflood:%d: dst %v scope:%v[%d]\n", walkgen, Nconv(dst, obj.FmtShort), Sconv(tmp, 0), dst.Escloopdepth)
}
- for l = dst.Escflowsrc; l != nil; l = l.Next {
+ for l := dst.Escflowsrc; l != nil; l = l.Next {
walkgen++
escwalk(e, 0, dst, l.N)
}
@@ -1240,10 +1209,6 @@
)
func escwalk(e *EscState, level int, dst *Node, src *Node) {
- var ll *NodeList
- var leaks bool
- var newlevel int
-
if src.Walkgen == walkgen && src.Esclevel <= int32(level) {
return
}
@@ -1263,6 +1228,7 @@
e.pdepth++
// Input parameter flowing to output parameter?
+ var leaks bool
if dst.Op == ONAME && dst.Class == PPARAMOUT && dst.Vargen <= 20 {
if src.Op == ONAME && src.Class == PPARAM && src.Curfn == dst.Curfn && src.Esc != EscScope && src.Esc != EscHeap {
if level == 0 {
@@ -1320,7 +1286,7 @@
}
}
- newlevel = level
+ newlevel := level
if level > MinLevel {
newlevel--
}
@@ -1373,7 +1339,7 @@
case ODOTPTR,
OINDEXMAP,
OIND:
- newlevel = level
+ newlevel := level
if level > MinLevel {
newlevel++
@@ -1382,7 +1348,7 @@
}
recurse:
- for ll = src.Escflowsrc; ll != nil; ll = ll.Next {
+ for ll := src.Escflowsrc; ll != nil; ll = ll.Next {
escwalk(e, level, dst, ll.N)
}
@@ -1390,17 +1356,13 @@
}
func esctag(e *EscState, func_ *Node) {
- var savefn *Node
- var ll *NodeList
- var t *Type
-
func_.Esc = EscFuncTagged
// External functions are assumed unsafe,
// unless //go:noescape is given before the declaration.
if func_.Nbody == nil {
if func_.Noescape {
- for t = getinargx(func_.Type).Type; t != nil; t = t.Down {
+ for t := getinargx(func_.Type).Type; t != nil; t = t.Down {
if haspointers(t.Type) {
t.Note = mktag(EscNone)
}
@@ -1410,10 +1372,10 @@
return
}
- savefn = Curfn
+ savefn := Curfn
Curfn = func_
- for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ for ll := Curfn.Dcl; ll != nil; ll = ll.Next {
if ll.N.Op != ONAME || ll.N.Class != PPARAM {
continue
}
diff --git a/src/cmd/internal/gc/export.go b/src/cmd/internal/gc/export.go
index b0c5931..2467086 100644
--- a/src/cmd/internal/gc/export.go
+++ b/src/cmd/internal/gc/export.go
@@ -79,13 +79,11 @@
}
func dumppkg(p *Pkg) {
- var suffix string
-
if p == nil || p == localpkg || p.Exported != 0 || p == builtinpkg {
return
}
p.Exported = 1
- suffix = ""
+ suffix := ""
if p.Direct == 0 {
suffix = " // indirect"
}
@@ -100,8 +98,6 @@
}
func reexportdep(n *Node) {
- var t *Type
-
if n == nil {
return
}
@@ -135,7 +131,7 @@
// Local variables in the bodies need their type.
case ODCL:
- t = n.Left.Type
+ t := n.Left.Type
if t != Types[t.Etype] && t != idealbool && t != idealstring {
if Isptr[t.Etype] != 0 {
@@ -150,7 +146,7 @@
}
case OLITERAL:
- t = n.Type
+ t := n.Type
if t != Types[n.Type.Etype] && t != idealbool && t != idealstring {
if Isptr[t.Etype] != 0 {
t = t.Type
@@ -190,7 +186,7 @@
OMAKEMAP,
OMAKESLICE,
OMAKECHAN:
- t = n.Type
+ t := n.Type
if t.Sym == nil && t.Type != nil {
t = t.Type
@@ -215,16 +211,13 @@
}
func dumpexportconst(s *Sym) {
- var n *Node
- var t *Type
-
- n = s.Def
+ n := s.Def
typecheck(&n, Erv)
if n == nil || n.Op != OLITERAL {
Fatal("dumpexportconst: oconst nil: %v", Sconv(s, 0))
}
- t = n.Type // may or may not be specified
+ t := n.Type // may or may not be specified
dumpexporttype(t)
if t != nil && !isideal(t) {
@@ -235,17 +228,14 @@
}
func dumpexportvar(s *Sym) {
- var n *Node
- var t *Type
-
- n = s.Def
+ n := s.Def
typecheck(&n, Erv|Ecall)
if n == nil || n.Type == nil {
Yyerror("variable exported but not defined: %v", Sconv(s, 0))
return
}
- t = n.Type
+ t := n.Type
dumpexporttype(t)
if t.Etype == TFUNC && n.Class == PFUNC {
@@ -279,20 +269,12 @@
}
func (x methodbyname) Less(i, j int) bool {
- var a *Type
- var b *Type
-
- a = x[i]
- b = x[j]
+ a := x[i]
+ b := x[j]
return stringsCompare(a.Sym.Name, b.Sym.Name) < 0
}
func dumpexporttype(t *Type) {
- var f *Type
- var m []*Type
- var i int
- var n int
-
if t == nil {
return
}
@@ -312,22 +294,23 @@
return
}
- n = 0
- for f = t.Method; f != nil; f = f.Down {
+ n := 0
+ for f := t.Method; f != nil; f = f.Down {
dumpexporttype(f)
n++
}
- m = make([]*Type, n)
- i = 0
- for f = t.Method; f != nil; f = f.Down {
+ m := make([]*Type, n)
+ i := 0
+ for f := t.Method; f != nil; f = f.Down {
m[i] = f
i++
}
sort.Sort(methodbyname(m[:n]))
fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
- for i = 0; i < n; i++ {
+ var f *Type
+ for i := 0; i < n; i++ {
f = m[i]
if f.Nointerface {
fmt.Fprintf(bout, "\t//go:nointerface\n")
@@ -381,12 +364,7 @@
}
func dumpexport() {
- var l *NodeList
- var i int32
- var lno int32
- var p *Pkg
-
- lno = lineno
+ lno := lineno
fmt.Fprintf(bout, "\n$$\npackage %s", localpkg.Name)
if safemode != 0 {
@@ -394,7 +372,8 @@
}
fmt.Fprintf(bout, "\n")
- for i = 0; i < int32(len(phash)); i++ {
+ var p *Pkg
+ for i := int32(0); i < int32(len(phash)); i++ {
for p = phash[i]; p != nil; p = p.Link {
if p.Direct != 0 {
dumppkg(p)
@@ -402,7 +381,7 @@
}
}
- for l = exportlist; l != nil; l = l.Next {
+ for l := exportlist; l != nil; l = l.Next {
lineno = l.N.Lineno
dumpsym(l.N.Sym)
}
@@ -419,10 +398,8 @@
* return the sym for ss, which should match lexical
*/
func importsym(s *Sym, op int) *Sym {
- var pkgstr string
-
if s.Def != nil && int(s.Def.Op) != op {
- pkgstr = fmt.Sprintf("during import \"%v\"", Zconv(importpkg.Path, 0))
+ pkgstr := fmt.Sprintf("during import \"%v\"", Zconv(importpkg.Path, 0))
redeclare(s, pkgstr)
}
@@ -442,11 +419,9 @@
* return the type pkg.name, forward declaring if needed
*/
func pkgtype(s *Sym) *Type {
- var t *Type
-
importsym(s, OTYPE)
if s.Def == nil || s.Def.Op != OTYPE {
- t = typ(TFORW)
+ t := typ(TFORW)
t.Sym = s
s.Def = typenod(t)
}
@@ -461,12 +436,11 @@
// Informational: record package name
// associated with import path, for use in
// human-readable messages.
- var p *Pkg
if isbadimport(z) {
errorexit()
}
- p = mkpkg(z)
+ p := mkpkg(z)
if p.Name == "" {
p.Name = s.Name
Pkglookup(s.Name, nil).Npkg++
@@ -481,8 +455,6 @@
}
func importconst(s *Sym, t *Type, n *Node) {
- var n1 *Node
-
importsym(s, OLITERAL)
Convlit(&n, t)
@@ -496,7 +468,7 @@
}
if n.Sym != nil {
- n1 = Nod(OXXX, nil, nil)
+ n1 := Nod(OXXX, nil, nil)
*n1 = *n
n = n1
}
@@ -511,8 +483,6 @@
}
func importvar(s *Sym, t *Type) {
- var n *Node
-
importsym(s, ONAME)
if s.Def != nil && s.Def.Op == ONAME {
if Eqtype(t, s.Def.Type) {
@@ -521,7 +491,7 @@
Yyerror("inconsistent definition for var %v during import\n\t%v (in \"%v\")\n\t%v (in \"%v\")", Sconv(s, 0), Tconv(s.Def.Type, 0), Zconv(s.Importdef.Path, 0), Tconv(t, 0), Zconv(importpkg.Path, 0))
}
- n = newname(s)
+ n := newname(s)
s.Importdef = importpkg
n.Type = t
declare(n, PEXTERN)
@@ -532,8 +502,6 @@
}
func importtype(pt *Type, t *Type) {
- var n *Node
-
// override declaration in unsafe.go for Pointer.
// there is no way in Go code to define unsafe.Pointer
// so we have to supply it.
@@ -542,7 +510,7 @@
}
if pt.Etype == TFORW {
- n = pt.Nod
+ n := pt.Nod
copytype(pt.Nod, t)
pt.Nod = n // unzero nod
pt.Sym.Importdef = importpkg
@@ -560,16 +528,15 @@
func dumpasmhdr() {
var b *obj.Biobuf
- var l *NodeList
- var n *Node
- var t *Type
b, err := obj.Bopenw(asmhdr)
if err != nil {
Fatal("%v", err)
}
fmt.Fprintf(b, "// generated by %cg -asmhdr from package %s\n\n", Thearch.Thechar, localpkg.Name)
- for l = asmlist; l != nil; l = l.Next {
+ var n *Node
+ var t *Type
+ for l := asmlist; l != nil; l = l.Next {
n = l.N
if isblanksym(n.Sym) {
continue
diff --git a/src/cmd/internal/gc/fmt.go b/src/cmd/internal/gc/fmt.go
index b155f78..ce73676 100644
--- a/src/cmd/internal/gc/fmt.go
+++ b/src/cmd/internal/gc/fmt.go
@@ -88,9 +88,7 @@
//
func setfmode(flags *int) int {
- var fm int
-
- fm = fmtmode
+ fm := fmtmode
if *flags&obj.FmtSign != 0 {
fmtmode = FDbg
} else if *flags&obj.FmtSharp != 0 {
@@ -170,20 +168,21 @@
// Fmt "%O": Node opcodes
func Oconv(o int, flag int) string {
- var fp string
-
if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode != FDbg {
if o >= 0 && o < len(goopnames) && goopnames[o] != "" {
+ var fp string
fp += goopnames[o]
return fp
}
}
if o >= 0 && o < len(opnames) && opnames[o] != "" {
+ var fp string
fp += opnames[o]
return fp
}
+ var fp string
fp += fmt.Sprintf("O-%d", o)
return fp
}
@@ -202,10 +201,7 @@
func Jconv(n *Node, flag int) string {
var fp string
- var s string
- var c int
-
- c = flag & obj.FmtShort
+ c := flag & obj.FmtShort
if c == 0 && n.Ullman != 0 {
fp += fmt.Sprintf(" u(%d)", n.Ullman)
@@ -228,7 +224,7 @@
}
if n.Class != 0 {
- s = ""
+ s := ""
if n.Class&PHEAP != 0 {
s = ",heap"
}
@@ -309,98 +305,111 @@
// Fmt "%V": Values
func Vconv(v *Val, flag int) string {
- var fp string
-
- var x int64
-
switch v.Ctype {
case CTINT:
if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+ var fp string
fp += fmt.Sprintf("%v", Bconv(v.U.Xval, obj.FmtSharp))
return fp
}
+ var fp string
fp += fmt.Sprintf("%v", Bconv(v.U.Xval, 0))
return fp
case CTRUNE:
- x = Mpgetfix(v.U.Xval)
+ x := Mpgetfix(v.U.Xval)
if ' ' <= x && x < 0x80 && x != '\\' && x != '\'' {
+ var fp string
fp += fmt.Sprintf("'%c'", int(x))
return fp
}
if 0 <= x && x < 1<<16 {
+ var fp string
fp += fmt.Sprintf("'\\u%04x'", uint(int(x)))
return fp
}
if 0 <= x && x <= utf8.MaxRune {
+ var fp string
fp += fmt.Sprintf("'\\U%08x'", uint64(x))
return fp
}
+ var fp string
fp += fmt.Sprintf("('\\x00' + %v)", Bconv(v.U.Xval, 0))
return fp
case CTFLT:
if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+ var fp string
fp += fmt.Sprintf("%v", Fconv(v.U.Fval, 0))
return fp
}
+ var fp string
fp += fmt.Sprintf("%v", Fconv(v.U.Fval, obj.FmtSharp))
return fp
case CTCPLX:
if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+ var fp string
fp += fmt.Sprintf("(%v+%vi)", Fconv(&v.U.Cval.Real, 0), Fconv(&v.U.Cval.Imag, 0))
return fp
}
if mpcmpfltc(&v.U.Cval.Real, 0) == 0 {
+ var fp string
fp += fmt.Sprintf("%vi", Fconv(&v.U.Cval.Imag, obj.FmtSharp))
return fp
}
if mpcmpfltc(&v.U.Cval.Imag, 0) == 0 {
+ var fp string
fp += fmt.Sprintf("%v", Fconv(&v.U.Cval.Real, obj.FmtSharp))
return fp
}
if mpcmpfltc(&v.U.Cval.Imag, 0) < 0 {
+ var fp string
fp += fmt.Sprintf("(%v%vi)", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp))
return fp
}
+ var fp string
fp += fmt.Sprintf("(%v+%vi)", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp))
return fp
case CTSTR:
+ var fp string
fp += fmt.Sprintf("\"%v\"", Zconv(v.U.Sval, 0))
return fp
case CTBOOL:
if v.U.Bval != 0 {
+ var fp string
fp += "true"
return fp
}
+ var fp string
fp += "false"
return fp
case CTNIL:
+ var fp string
fp += "nil"
return fp
}
+ var fp string
fp += fmt.Sprintf("<ctype=%d>", v.Ctype)
return fp
}
// Fmt "%Z": escaped string literals
func Zconv(sp *Strlit, flag int) string {
- var fp string
- var s string
- var n int
-
if sp == nil {
+ var fp string
fp += "<nil>"
return fp
}
// NOTE: Keep in sync with ../ld/go.c:/^Zconv.
- s = sp.S
+ s := sp.S
+ var n int
+ var fp string
for i := 0; i < len(s); i += n {
var r rune
r, n = utf8.DecodeRuneInString(s[i:])
@@ -481,47 +490,49 @@
// Fmt "%E": etype
func Econv(et int, flag int) string {
- var fp string
-
if et >= 0 && et < len(etnames) && etnames[et] != "" {
+ var fp string
fp += etnames[et]
return fp
}
+ var fp string
fp += fmt.Sprintf("E-%d", et)
return fp
}
// Fmt "%S": syms
func symfmt(s *Sym, flag int) string {
- var fp string
-
- var p string
-
if s.Pkg != nil && flag&obj.FmtShort == 0 /*untyped*/ {
switch fmtmode {
case FErr: // This is for the user
if s.Pkg == localpkg {
+ var fp string
fp += s.Name
return fp
}
// If the name was used by multiple packages, display the full path,
if s.Pkg.Name != "" && Pkglookup(s.Pkg.Name, nil).Npkg > 1 {
+ var fp string
fp += fmt.Sprintf("\"%v\".%s", Zconv(s.Pkg.Path, 0), s.Name)
return fp
}
+ var fp string
fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
return fp
case FDbg:
+ var fp string
fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
return fp
case FTypeId:
if flag&obj.FmtUnsigned != 0 /*untyped*/ {
+ var fp string
fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
return fp // dcommontype, typehash
}
+ var fp string
fp += fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
return fp // (methodsym), typesym, weaksym
@@ -530,6 +541,7 @@
Fatal("exporting synthetic symbol %s", s.Name)
}
if s.Pkg != builtinpkg {
+ var fp string
fp += fmt.Sprintf("@\"%v\".%s", Zconv(s.Pkg.Path, 0), s.Name)
return fp
}
@@ -539,21 +551,24 @@
if flag&obj.FmtByte != 0 /*untyped*/ { // FmtByte (hh) implies FmtShort (h)
// skip leading "type." in method name
- p = s.Name
+ p := s.Name
if i := strings.LastIndex(s.Name, "."); i >= 0 {
p = s.Name[i+1:]
}
// exportname needs to see the name without the prefix too.
if (fmtmode == FExp && !exportname(p)) || fmtmode == FDbg {
+ var fp string
fp += fmt.Sprintf("@\"%v\".%s", Zconv(s.Pkg.Path, 0), p)
return fp
}
+ var fp string
fp += p
return fp
}
+ var fp string
fp += s.Name
return fp
}
@@ -583,12 +598,8 @@
}
func typefmt(t *Type, flag int) string {
- var fp string
-
- var t1 *Type
- var s *Sym
-
if t == nil {
+ var fp string
fp += "<T>"
return fp
}
@@ -596,6 +607,7 @@
if t == bytetype || t == runetype {
// in %-T mode collapse rune and byte with their originals.
if fmtmode != FTypeId {
+ var fp string
fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtShort))
return fp
}
@@ -603,6 +615,7 @@
}
if t == errortype {
+ var fp string
fp += "error"
return fp
}
@@ -613,14 +626,17 @@
case FTypeId:
if flag&obj.FmtShort != 0 /*untyped*/ {
if t.Vargen != 0 {
+ var fp string
fp += fmt.Sprintf("%v·%d", Sconv(t.Sym, obj.FmtShort), t.Vargen)
return fp
}
+ var fp string
fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtShort))
return fp
}
if flag&obj.FmtUnsigned != 0 /*untyped*/ {
+ var fp string
fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtUnsigned))
return fp
}
@@ -629,15 +645,18 @@
// fallthrough
case FExp:
if t.Sym.Pkg == localpkg && t.Vargen != 0 {
+ var fp string
fp += fmt.Sprintf("%v·%d", Sconv(t.Sym, 0), t.Vargen)
return fp
}
}
+ var fp string
fp += fmt.Sprintf("%v", Sconv(t.Sym, 0))
return fp
}
+ var fp string
if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
if fmtmode == FErr && (t == idealbool || t == idealstring) {
fp += "untyped "
@@ -696,7 +715,7 @@
case TINTER:
fp += "interface {"
- for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ for t1 := t.Type; t1 != nil; t1 = t1.Down {
if exportname(t1.Sym.Name) {
if t1.Down != nil {
fp += fmt.Sprintf(" %v%v;", Sconv(t1.Sym, obj.FmtShort), Tconv(t1.Type, obj.FmtShort))
@@ -769,7 +788,7 @@
if t.Funarg != 0 {
fp += "("
if fmtmode == FTypeId || fmtmode == FErr { // no argument names on function signature, and no "noescape"/"nosplit" tags
- for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ for t1 := t.Type; t1 != nil; t1 = t1.Down {
if t1.Down != nil {
fp += fmt.Sprintf("%v, ", Tconv(t1, obj.FmtShort))
} else {
@@ -777,7 +796,7 @@
}
}
} else {
- for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ for t1 := t.Type; t1 != nil; t1 = t1.Down {
if t1.Down != nil {
fp += fmt.Sprintf("%v, ", Tconv(t1, 0))
} else {
@@ -789,7 +808,7 @@
fp += ")"
} else {
fp += "struct {"
- for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ for t1 := t.Type; t1 != nil; t1 = t1.Down {
if t1.Down != nil {
fp += fmt.Sprintf(" %v;", Tconv(t1, obj.FmtLong))
} else {
@@ -803,7 +822,7 @@
case TFIELD:
if flag&obj.FmtShort == 0 /*untyped*/ {
- s = t.Sym
+ s := t.Sym
// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
// ~r%d is a (formerly) unnamed result.
@@ -896,23 +915,19 @@
func stmtfmt(n *Node) string {
var f string
- var complexinit bool
- var simpleinit bool
- var extrablock bool
-
// some statements allow for an init, but at most one,
// but we may have an arbitrary number added, eg by typecheck
// and inlining. If it doesn't fit the syntax, emit an enclosing
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
- simpleinit = n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
+ simpleinit := n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
// otherwise, print the inits as separate statements
- complexinit = n.Ninit != nil && !simpleinit && (fmtmode != FErr)
+ complexinit := n.Ninit != nil && !simpleinit && (fmtmode != FErr)
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
- extrablock = complexinit && stmtwithinit(int(n.Op))
+ extrablock := complexinit && stmtwithinit(int(n.Op))
if extrablock {
f += "{"
@@ -1208,50 +1223,51 @@
}
func exprfmt(n *Node, prec int) string {
- var f string
-
- var nprec int
- var ptrlit bool
- var l *NodeList
-
for n != nil && n.Implicit != 0 && (n.Op == OIND || n.Op == OADDR) {
n = n.Left
}
if n == nil {
+ var f string
f += "<N>"
return f
}
- nprec = opprec[n.Op]
+ nprec := opprec[n.Op]
if n.Op == OTYPE && n.Sym != nil {
nprec = 8
}
if prec > nprec {
+ var f string
f += fmt.Sprintf("(%v)", Nconv(n, 0))
return f
}
switch n.Op {
case OPAREN:
+ var f string
f += fmt.Sprintf("(%v)", Nconv(n.Left, 0))
return f
case ODDDARG:
+ var f string
f += fmt.Sprintf("... argument")
return f
case OREGISTER:
+ var f string
f += fmt.Sprintf("%v", Ctxt.Rconv(int(n.Val.U.Reg)))
return f
case OLITERAL: // this is a bit of a mess
if fmtmode == FErr && n.Sym != nil {
+ var f string
f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
return f
}
if n.Val.Ctype == CTNIL && n.Orig != nil && n.Orig != n {
+ var f string
f += exprfmt(n.Orig, prec)
return f
}
@@ -1259,14 +1275,17 @@
// Need parens when type begins with what might
// be misinterpreted as a unary operator: * or <-.
if Isptr[n.Type.Etype] != 0 || (n.Type.Etype == TCHAN && n.Type.Chan == Crecv) {
+ var f string
f += fmt.Sprintf("(%v)(%v)", Tconv(n.Type, 0), Vconv(&n.Val, 0))
return f
} else {
+ var f string
f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Vconv(&n.Val, 0))
return f
}
}
+ var f string
f += fmt.Sprintf("%v", Vconv(&n.Val, 0))
return f
@@ -1274,10 +1293,12 @@
// _ becomes ~b%d internally; print as _ for export
case ONAME:
if fmtmode == FExp && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
+ var f string
f += fmt.Sprintf("_")
return f
}
if fmtmode == FExp && n.Sym != nil && !isblank(n) && n.Vargen > 0 {
+ var f string
f += fmt.Sprintf("%v·%d", Sconv(n.Sym, 0), n.Vargen)
return f
}
@@ -1287,9 +1308,11 @@
// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
if fmtmode == FExp && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME {
if Isptr[n.Left.Type.Etype] != 0 {
+ var f string
f += fmt.Sprintf("(%v).%v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
return f
} else {
+ var f string
f += fmt.Sprintf("%v.%v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
return f
}
@@ -1299,44 +1322,54 @@
//fallthrough
case OPACK,
ONONAME:
+ var f string
f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
return f
case OTYPE:
if n.Type == nil && n.Sym != nil {
+ var f string
f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
return f
}
+ var f string
f += fmt.Sprintf("%v", Tconv(n.Type, 0))
return f
case OTARRAY:
if n.Left != nil {
+ var f string
f += fmt.Sprintf("[]%v", Nconv(n.Left, 0))
return f
}
+ var f string
f += fmt.Sprintf("[]%v", Nconv(n.Right, 0))
return f // happens before typecheck
case OTMAP:
+ var f string
f += fmt.Sprintf("map[%v]%v", Nconv(n.Left, 0), Nconv(n.Right, 0))
return f
case OTCHAN:
switch n.Etype {
case Crecv:
+ var f string
f += fmt.Sprintf("<-chan %v", Nconv(n.Left, 0))
return f
case Csend:
+ var f string
f += fmt.Sprintf("chan<- %v", Nconv(n.Left, 0))
return f
default:
if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.Etype == Crecv {
+ var f string
f += fmt.Sprintf("chan (%v)", Nconv(n.Left, 0))
return f
} else {
+ var f string
f += fmt.Sprintf("chan %v", Nconv(n.Left, 0))
return f
}
@@ -1344,71 +1377,85 @@
fallthrough
case OTSTRUCT:
+ var f string
f += fmt.Sprintf("<struct>")
return f
case OTINTER:
+ var f string
f += fmt.Sprintf("<inter>")
return f
case OTFUNC:
+ var f string
f += fmt.Sprintf("<func>")
return f
case OCLOSURE:
if fmtmode == FErr {
+ var f string
f += "func literal"
return f
}
if n.Nbody != nil {
+ var f string
f += fmt.Sprintf("%v { %v }", Tconv(n.Type, 0), Hconv(n.Nbody, 0))
return f
}
+ var f string
f += fmt.Sprintf("%v { %v }", Tconv(n.Type, 0), Hconv(n.Closure.Nbody, 0))
return f
case OCOMPLIT:
- ptrlit = n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0
+ ptrlit := n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0
if fmtmode == FErr {
if n.Right != nil && n.Right.Type != nil && n.Implicit == 0 {
if ptrlit {
+ var f string
f += fmt.Sprintf("&%v literal", Tconv(n.Right.Type.Type, 0))
return f
} else {
+ var f string
f += fmt.Sprintf("%v literal", Tconv(n.Right.Type, 0))
return f
}
}
+ var f string
f += "composite literal"
return f
}
if fmtmode == FExp && ptrlit {
// typecheck has overwritten OIND by OTYPE with pointer type.
+ var f string
f += fmt.Sprintf("(&%v{ %v })", Tconv(n.Right.Type.Type, 0), Hconv(n.List, obj.FmtComma))
return f
}
+ var f string
f += fmt.Sprintf("(%v{ %v })", Nconv(n.Right, 0), Hconv(n.List, obj.FmtComma))
return f
case OPTRLIT:
if fmtmode == FExp && n.Left.Implicit != 0 {
+ var f string
f += fmt.Sprintf("%v", Nconv(n.Left, 0))
return f
}
+ var f string
f += fmt.Sprintf("&%v", Nconv(n.Left, 0))
return f
case OSTRUCTLIT:
if fmtmode == FExp { // requires special handling of field names
+ var f string
if n.Implicit != 0 {
f += "{"
} else {
f += fmt.Sprintf("(%v{", Tconv(n.Type, 0))
}
- for l = n.List; l != nil; l = l.Next {
+ for l := n.List; l != nil; l = l.Next {
f += fmt.Sprintf(" %v:%v", Sconv(l.N.Left.Sym, obj.FmtShort|obj.FmtByte), Nconv(l.N.Right, 0))
if l.Next != nil {
@@ -1432,13 +1479,16 @@
case OARRAYLIT,
OMAPLIT:
if fmtmode == FErr {
+ var f string
f += fmt.Sprintf("%v literal", Tconv(n.Type, 0))
return f
}
if fmtmode == FExp && n.Implicit != 0 {
+ var f string
f += fmt.Sprintf("{ %v }", Hconv(n.List, obj.FmtComma))
return f
}
+ var f string
f += fmt.Sprintf("(%v{ %v })", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
return f
@@ -1446,22 +1496,27 @@
if n.Left != nil && n.Right != nil {
if fmtmode == FExp && n.Left.Type != nil && n.Left.Type.Etype == TFIELD {
// requires special handling of field names
+ var f string
f += fmt.Sprintf("%v:%v", Sconv(n.Left.Sym, obj.FmtShort|obj.FmtByte), Nconv(n.Right, 0))
return f
} else {
+ var f string
f += fmt.Sprintf("%v:%v", Nconv(n.Left, 0), Nconv(n.Right, 0))
return f
}
}
if n.Left == nil && n.Right != nil {
+ var f string
f += fmt.Sprintf(":%v", Nconv(n.Right, 0))
return f
}
if n.Left != nil && n.Right == nil {
+ var f string
f += fmt.Sprintf("%v:", Nconv(n.Left, 0))
return f
}
+ var f string
f += ":"
return f
@@ -1471,6 +1526,7 @@
ODOTINTER,
ODOTMETH,
OCALLPART:
+ var f string
f += exprfmt(n.Left, nprec)
if n.Right == nil || n.Right.Sym == nil {
f += ".<nil>"
@@ -1481,6 +1537,7 @@
case ODOTTYPE,
ODOTTYPE2:
+ var f string
f += exprfmt(n.Left, nprec)
if n.Right != nil {
f += fmt.Sprintf(".(%v)", Nconv(n.Right, 0))
@@ -1496,12 +1553,14 @@
OSLICEARR,
OSLICE3,
OSLICE3ARR:
+ var f string
f += exprfmt(n.Left, nprec)
f += fmt.Sprintf("[%v]", Nconv(n.Right, 0))
return f
case OCOPY,
OCOMPLEX:
+ var f string
f += fmt.Sprintf("%v(%v, %v)", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0), Nconv(n.Right, 0))
return f
@@ -1514,13 +1573,16 @@
OSTRARRAYRUNE,
ORUNESTR:
if n.Type == nil || n.Type.Sym == nil {
+ var f string
f += fmt.Sprintf("(%v)(%v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
return f
}
if n.Left != nil {
+ var f string
f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
return f
}
+ var f string
f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
return f
@@ -1538,13 +1600,16 @@
OPRINT,
OPRINTN:
if n.Left != nil {
+ var f string
f += fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0))
return f
}
if n.Isddd != 0 {
+ var f string
f += fmt.Sprintf("%v(%v...)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
return f
}
+ var f string
f += fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
return f
@@ -1552,6 +1617,7 @@
OCALLFUNC,
OCALLINTER,
OCALLMETH:
+ var f string
f += exprfmt(n.Left, nprec)
if n.Isddd != 0 {
f += fmt.Sprintf("(%v...)", Hconv(n.List, obj.FmtComma))
@@ -1564,17 +1630,21 @@
OMAKECHAN,
OMAKESLICE:
if n.List != nil { // pre-typecheck
+ var f string
f += fmt.Sprintf("make(%v, %v)", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
return f
}
if n.Right != nil {
+ var f string
f += fmt.Sprintf("make(%v, %v, %v)", Tconv(n.Type, 0), Nconv(n.Left, 0), Nconv(n.Right, 0))
return f
}
if n.Left != nil {
+ var f string
f += fmt.Sprintf("make(%v, %v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
return f
}
+ var f string
f += fmt.Sprintf("make(%v)", Tconv(n.Type, 0))
return f
@@ -1586,6 +1656,7 @@
OIND,
ONOT,
ORECV:
+ var f string
if n.Left.Op == n.Op {
f += fmt.Sprintf("%v ", Oconv(int(n.Op), obj.FmtSharp))
} else {
@@ -1615,6 +1686,7 @@
OSEND,
OSUB,
OXOR:
+ var f string
f += exprfmt(n.Left, nprec)
f += fmt.Sprintf(" %v ", Oconv(int(n.Op), obj.FmtSharp))
@@ -1622,7 +1694,8 @@
return f
case OADDSTR:
- for l = n.List; l != nil; l = l.Next {
+ var f string
+ for l := n.List; l != nil; l = l.Next {
if l != n.List {
f += fmt.Sprintf(" + ")
}
@@ -1633,22 +1706,20 @@
case OCMPSTR,
OCMPIFACE:
+ var f string
f += exprfmt(n.Left, nprec)
f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp))
f += exprfmt(n.Right, nprec+1)
return f
}
+ var f string
f += fmt.Sprintf("<node %v>", Oconv(int(n.Op), 0))
return f
}
func nodefmt(n *Node, flag int) string {
- var f string
-
- var t *Type
-
- t = n.Type
+ t := n.Type
// we almost always want the original, except in export mode for literals
// this saves the importer some work, and avoids us having to redo some
@@ -1659,9 +1730,11 @@
if flag&obj.FmtLong != 0 /*untyped*/ && t != nil {
if t.Etype == TNIL {
+ var f string
f += fmt.Sprintf("nil")
return f
} else {
+ var f string
f += fmt.Sprintf("%v (type %v)", Nconv(n, 0), Tconv(t, 0))
return f
}
@@ -1673,6 +1746,7 @@
return stmtfmt(n)
}
+ var f string
f += exprfmt(n, 0)
return f
}
@@ -1684,16 +1758,14 @@
}
func nodedump(n *Node, flag int) string {
- var fp string
-
- var recur bool
-
if n == nil {
+ var fp string
return fp
}
- recur = flag&obj.FmtShort == 0 /*untyped*/
+ recur := flag&obj.FmtShort == 0 /*untyped*/
+ var fp string
if recur {
fp = indent(fp)
if dumpdepth > 10 {
@@ -1795,28 +1867,25 @@
// Fmt "%S": syms
// Flags: "%hS" suppresses qualifying with package
func Sconv(s *Sym, flag int) string {
- var fp string
-
- var r int
- var sm int
- var sf int
-
if flag&obj.FmtLong != 0 /*untyped*/ {
panic("linksymfmt")
}
if s == nil {
+ var fp string
fp += "<S>"
return fp
}
if s.Name == "_" {
+ var fp string
fp += "_"
return fp
}
- sf = flag
- sm = setfmode(&flag)
+ sf := flag
+ sm := setfmode(&flag)
+ var r int
_ = r
str := symfmt(s, flag)
flag = sf
@@ -1829,25 +1898,21 @@
// 'h' omit 'func' and receiver from function types, short type names
// 'u' package name, not prefix (FTypeId mode, sticky)
func Tconv(t *Type, flag int) string {
- var fp string
-
- var r int
- var sm int
- var sf int
-
if t == nil {
+ var fp string
fp += "<T>"
return fp
}
if t.Trecur > 4 {
+ var fp string
fp += "<...>"
return fp
}
t.Trecur++
- sf = flag
- sm = setfmode(&flag)
+ sf := flag
+ sm := setfmode(&flag)
if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
fmtpkgpfx++
@@ -1856,6 +1921,7 @@
flag |= obj.FmtUnsigned
}
+ var r int
_ = r
str := typefmt(t, flag)
@@ -1873,19 +1939,15 @@
// Flags: 'l' suffix with "(type %T)" where possible
// '+h' in debug mode, don't recurse, no multiline output
func Nconv(n *Node, flag int) string {
- var fp string
-
- var r int
- var sm int
- var sf int
-
if n == nil {
+ var fp string
fp += "<N>"
return fp
}
- sf = flag
- sm = setfmode(&flag)
+ sf := flag
+ sm := setfmode(&flag)
+ var r int
_ = r
var str string
switch fmtmode {
@@ -1910,28 +1972,24 @@
// Fmt '%H': NodeList.
// Flags: all those of %N plus ',': separate with comma's instead of semicolons.
func Hconv(l *NodeList, flag int) string {
- var fp string
-
- var r int
- var sm int
- var sf int
- var sep string
-
if l == nil && fmtmode == FDbg {
+ var fp string
fp += "<nil>"
return fp
}
- sf = flag
- sm = setfmode(&flag)
+ sf := flag
+ sm := setfmode(&flag)
+ var r int
_ = r
- sep = "; "
+ sep := "; "
if fmtmode == FDbg {
sep = "\n"
} else if flag&obj.FmtComma != 0 /*untyped*/ {
sep = ", "
}
+ var fp string
for ; l != nil; l = l.Next {
fp += fmt.Sprintf("%v", Nconv(l.N, 0))
if l.Next != nil {
diff --git a/src/cmd/internal/gc/gen.go b/src/cmd/internal/gc/gen.go
index 9d41b6f..079158a 100644
--- a/src/cmd/internal/gc/gen.go
+++ b/src/cmd/internal/gc/gen.go
@@ -18,9 +18,7 @@
var lastlabel *Label
func Sysfunc(name string) *Node {
- var n *Node
-
- n = newname(Pkglookup(name, Runtimepkg))
+ n := newname(Pkglookup(name, Runtimepkg))
n.Class = PFUNC
return n
}
@@ -31,9 +29,6 @@
* as needing to move to the heap.
*/
func addrescapes(n *Node) {
- var buf string
- var oldfn *Node
-
switch n.Op {
// probably a type error already.
// dump("addrescapes", n);
@@ -84,11 +79,11 @@
n.Xoffset = 0
// create stack variable to hold pointer to heap
- oldfn = Curfn
+ oldfn := Curfn
Curfn = n.Curfn
n.Heapaddr = temp(Ptrto(n.Type))
- buf = fmt.Sprintf("&%v", Sconv(n.Sym, 0))
+ buf := fmt.Sprintf("&%v", Sconv(n.Sym, 0))
n.Heapaddr.Sym = Lookup(buf)
n.Heapaddr.Orig.Sym = n.Heapaddr.Sym
n.Esc = EscHeap
@@ -116,9 +111,7 @@
}
func clearlabels() {
- var l *Label
-
- for l = labellist; l != nil; l = l.Link {
+ for l := labellist; l != nil; l = l.Link {
l.Sym.Label = nil
}
@@ -127,11 +120,8 @@
}
func newlab(n *Node) *Label {
- var s *Sym
- var lab *Label
-
- s = n.Left.Sym
- lab = s.Label
+ s := n.Left.Sym
+ lab := s.Label
if lab == nil {
lab = new(Label)
if lastlabel == nil {
@@ -158,41 +148,33 @@
}
func checkgoto(from *Node, to *Node) {
- var nf int
- var nt int
- var block *Sym
- var dcl *Sym
- var fs *Sym
- var ts *Sym
- var lno int
-
if from.Sym == to.Sym {
return
}
- nf = 0
- for fs = from.Sym; fs != nil; fs = fs.Link {
+ nf := 0
+ for fs := from.Sym; fs != nil; fs = fs.Link {
nf++
}
- nt = 0
- for fs = to.Sym; fs != nil; fs = fs.Link {
+ nt := 0
+ for fs := to.Sym; fs != nil; fs = fs.Link {
nt++
}
- fs = from.Sym
+ fs := from.Sym
for ; nf > nt; nf-- {
fs = fs.Link
}
if fs != to.Sym {
- lno = int(lineno)
+ lno := int(lineno)
setlineno(from)
// decide what to complain about.
// prefer to complain about 'into block' over declarations,
// so scan backward to find most recent block or else dcl.
- block = nil
+ block := (*Sym)(nil)
- dcl = nil
- ts = to.Sym
+ dcl := (*Sym)(nil)
+ ts := to.Sym
for ; nt > nf; nt-- {
if ts.Pkg == nil {
block = ts
@@ -222,10 +204,8 @@
}
func stmtlabel(n *Node) *Label {
- var lab *Label
-
if n.Sym != nil {
- lab = n.Sym.Label
+ lab := n.Sym.Label
if lab != nil {
if lab.Def != nil {
if lab.Def.Defn == n {
@@ -295,8 +275,6 @@
* generate discard of value
*/
func cgen_discard(nr *Node) {
- var tmp Node
-
if nr == nil {
return
}
@@ -342,6 +320,7 @@
// special enough to just evaluate
default:
+ var tmp Node
Tempname(&tmp, nr.Type)
Cgen_as(&tmp, nr)
@@ -353,10 +332,7 @@
* clearslim generates code to zero a slim node.
*/
func Clearslim(n *Node) {
- var z Node
- var zero Mpflt
-
- z = Node{}
+ z := Node{}
z.Op = OLITERAL
z.Type = n.Type
z.Addable = 1
@@ -370,6 +346,7 @@
case TFLOAT32,
TFLOAT64:
+ var zero Mpflt
Mpmovecflt(&zero, 0.0)
z.Val.Ctype = CTFLT
z.Val.U.Fval = &zero
@@ -410,20 +387,17 @@
* n->right is data
*/
func Cgen_eface(n *Node, res *Node) {
- var dst Node
/*
* the right node of an eface may contain function calls that uses res as an argument,
* so it's important that it is done first
*/
- var tmp *Node
-
- tmp = temp(Types[Tptr])
+ tmp := temp(Types[Tptr])
Thearch.Cgen(n.Right, tmp)
Gvardef(res)
- dst = *res
+ dst := *res
dst.Type = Types[Tptr]
dst.Xoffset += int64(Widthptr)
Thearch.Cgen(tmp, &dst)
@@ -442,23 +416,9 @@
* called for OSLICE, OSLICE3, OSLICEARR, OSLICE3ARR, OSLICESTR.
*/
func Cgen_slice(n *Node, res *Node) {
- var src Node
- var dst Node
- var cap *Node
- var len *Node
- var offs *Node
- var add *Node
- var base *Node
- var tmpcap *Node
- var tmplen *Node
- var cmp *Node
- var con Node
- var p1 *obj.Prog
- var p2 *obj.Prog
-
- cap = n.List.N
- len = n.List.Next.N
- offs = nil
+ cap := n.List.N
+ len := n.List.Next.N
+ offs := (*Node)(nil)
if n.List.Next.Next != nil {
offs = n.List.Next.Next.N
}
@@ -470,15 +430,17 @@
// might cause preemption or garbage collection.
// this makes the whole slice update atomic as far as the
// garbage collector can see.
- base = temp(Types[TUINTPTR])
+ base := temp(Types[TUINTPTR])
- tmplen = temp(Types[TINT])
+ tmplen := temp(Types[TINT])
+ var tmpcap *Node
if n.Op != OSLICESTR {
tmpcap = temp(Types[TINT])
} else {
tmpcap = tmplen
}
+ var src Node
if isnil(n.Left) {
Tempname(&src, n.Left.Type)
Thearch.Cgen(n.Left, &src)
@@ -519,16 +481,17 @@
// In essence we are replacing x[i:j:k] where i == j == k
// or x[i:j] where i == j == cap(x) with x[0:0:0].
if offs != nil {
- p1 = gjmp(nil)
- p2 = gjmp(nil)
+ p1 := gjmp(nil)
+ p2 := gjmp(nil)
Patch(p1, Pc)
+ var con Node
Nodconst(&con, tmpcap.Type, 0)
- cmp = Nod(OEQ, tmpcap, &con)
+ cmp := Nod(OEQ, tmpcap, &con)
typecheck(&cmp, Erv)
Thearch.Bgen(cmp, true, -1, p2)
- add = Nod(OADD, base, offs)
+ add := Nod(OADD, base, offs)
typecheck(&add, Erv)
Thearch.Cgen(add, base)
@@ -536,7 +499,7 @@
}
// dst.array = src.array [ + lo *width ]
- dst = *res
+ dst := *res
dst.Xoffset += int64(Array_array)
dst.Type = Types[Tptr]
@@ -616,9 +579,6 @@
* make a new off the books
*/
func Tempname(nn *Node, t *Type) {
- var n *Node
- var s *Sym
-
if Curfn == nil {
Fatal("no curfn for tempname")
}
@@ -633,8 +593,8 @@
namebuf = fmt.Sprintf("autotmp_%.4d", statuniqgen)
statuniqgen++
- s = Lookup(namebuf)
- n = Nod(ONAME, nil, nil)
+ s := Lookup(namebuf)
+ n := Nod(ONAME, nil, nil)
n.Sym = s
s.Def = n
n.Type = t
@@ -651,26 +611,16 @@
}
func temp(t *Type) *Node {
- var n *Node
-
- n = Nod(OXXX, nil, nil)
+ n := Nod(OXXX, nil, nil)
Tempname(n, t)
n.Sym.Def.Used = 1
return n.Orig
}
func gen(n *Node) {
- var lno int32
- var scontin *obj.Prog
- var sbreak *obj.Prog
- var p1 *obj.Prog
- var p2 *obj.Prog
- var p3 *obj.Prog
- var lab *Label
-
//dump("gen", n);
- lno = setlineno(n)
+ lno := setlineno(n)
wasregalloc := Thearch.Anyregalloc()
@@ -708,10 +658,11 @@
break
}
- lab = newlab(n)
+ lab := newlab(n)
// if there are pending gotos, resolve them all to the current pc.
- for p1 = lab.Gotopc; p1 != nil; p1 = p2 {
+ var p2 *obj.Prog
+ for p1 := lab.Gotopc; p1 != nil; p1 = p2 {
p2 = unpatch(p1)
Patch(p1, Pc)
}
@@ -739,7 +690,7 @@
// to the same label. we'll unwind it when we learn the pc
// of the label in the OLABEL case above.)
case OGOTO:
- lab = newlab(n)
+ lab := newlab(n)
if lab.Labelpc != nil {
gjmp(lab.Labelpc)
@@ -749,7 +700,7 @@
case OBREAK:
if n.Left != nil {
- lab = n.Left.Sym.Label
+ lab := n.Left.Sym.Label
if lab == nil {
Yyerror("break label not defined: %v", Sconv(n.Left.Sym, 0))
break
@@ -774,7 +725,7 @@
case OCONTINUE:
if n.Left != nil {
- lab = n.Left.Sym.Label
+ lab := n.Left.Sym.Label
if lab == nil {
Yyerror("continue label not defined: %v", Sconv(n.Left.Sym, 0))
break
@@ -798,14 +749,14 @@
gjmp(continpc)
case OFOR:
- sbreak = breakpc
- p1 = gjmp(nil) // goto test
+ sbreak := breakpc
+ p1 := gjmp(nil) // goto test
breakpc = gjmp(nil) // break: goto done
- scontin = continpc
+ scontin := continpc
continpc = Pc
// define break and continue labels
- lab = stmtlabel(n)
+ lab := stmtlabel(n)
if lab != nil {
lab.Breakpc = breakpc
lab.Continpc = continpc
@@ -825,23 +776,23 @@
}
case OIF:
- p1 = gjmp(nil) // goto test
- p2 = gjmp(nil) // p2: goto else
+ p1 := gjmp(nil) // goto test
+ p2 := gjmp(nil) // p2: goto else
Patch(p1, Pc) // test:
Thearch.Bgen(n.Ntest, false, int(-n.Likely), p2) // if(!test) goto p2
Genlist(n.Nbody) // then
- p3 = gjmp(nil) // goto done
+ p3 := gjmp(nil) // goto done
Patch(p2, Pc) // else:
Genlist(n.Nelse) // else
Patch(p3, Pc) // done:
case OSWITCH:
- sbreak = breakpc
- p1 = gjmp(nil) // goto test
+ sbreak := breakpc
+ p1 := gjmp(nil) // goto test
breakpc = gjmp(nil) // break: goto done
// define break label
- lab = stmtlabel(n)
+ lab := stmtlabel(n)
if lab != nil {
lab.Breakpc = breakpc
}
@@ -855,12 +806,12 @@
}
case OSELECT:
- sbreak = breakpc
- p1 = gjmp(nil) // goto test
+ sbreak := breakpc
+ p1 := gjmp(nil) // goto test
breakpc = gjmp(nil) // break: goto done
// define break label
- lab = stmtlabel(n)
+ lab := stmtlabel(n)
if lab != nil {
lab.Breakpc = breakpc
}
@@ -918,8 +869,6 @@
}
func Cgen_as(nl *Node, nr *Node) {
- var tl *Type
-
if Debug['g'] != 0 {
Dump("cgen_as", nl)
Dump("cgen_as = ", nr)
@@ -940,7 +889,7 @@
return
}
- tl = nl.Type
+ tl := nl.Type
if tl == nil {
return
}
@@ -956,7 +905,7 @@
return
}
- tl = nl.Type
+ tl := nl.Type
if tl == nil {
return
}
@@ -965,19 +914,16 @@
}
func Cgen_callmeth(n *Node, proc int) {
- var n2 Node
- var l *Node
-
// generate a rewrite in n2 for the method call
// (p.f)(...) goes to (f)(p,...)
- l = n.Left
+ l := n.Left
if l.Op != ODOTMETH {
Fatal("cgen_callmeth: not dotmethod: %v")
}
- n2 = *n
+ n2 := *n
n2.Op = OCALLFUNC
n2.Left = l.Right
n2.Left.Type = l.Type
@@ -989,10 +935,9 @@
}
func checklabels() {
- var lab *Label
var l *NodeList
- for lab = labellist; lab != nil; lab = lab.Link {
+ for lab := labellist; lab != nil; lab = lab.Link {
if lab.Def == nil {
for l = lab.Use; l != nil; l = l.Next {
yyerrorl(int(l.N.Lineno), "label %v not defined", Sconv(lab.Sym, 0))
diff --git a/src/cmd/internal/gc/gsubr.go b/src/cmd/internal/gc/gsubr.go
index 6fd6057..ad5e494 100644
--- a/src/cmd/internal/gc/gsubr.go
+++ b/src/cmd/internal/gc/gsubr.go
@@ -80,9 +80,7 @@
* gsubr.c
*/
func Gbranch(as int, t *Type, likely int) *obj.Prog {
- var p *obj.Prog
-
- p = Prog(as)
+ p := Prog(as)
p.To.Type = obj.TYPE_BRANCH
p.To.U.Branch = nil
if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' {
@@ -170,9 +168,7 @@
}
func fixautoused(p *obj.Prog) {
- var lp **obj.Prog
-
- for lp = &p; ; {
+ for lp := &p; ; {
p = *lp
if p == nil {
break
@@ -205,9 +201,7 @@
}
func ggloblnod(nam *Node) {
- var p *obj.Prog
-
- p = Thearch.Gins(obj.AGLOBL, nam, nil)
+ p := Thearch.Gins(obj.AGLOBL, nam, nil)
p.Lineno = nam.Lineno
p.From.Sym.Gotype = Linksym(ngotype(nam))
p.To.Sym = nil
@@ -222,9 +216,7 @@
}
func ggloblsym(s *Sym, width int32, flags int8) {
- var p *obj.Prog
-
- p = Thearch.Gins(obj.AGLOBL, nil, nil)
+ p := Thearch.Gins(obj.AGLOBL, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = Linksym(s)
@@ -234,9 +226,7 @@
}
func gjmp(to *obj.Prog) *obj.Prog {
- var p *obj.Prog
-
- p = Gbranch(obj.AJMP, nil, 0)
+ p := Gbranch(obj.AJMP, nil, 0)
if to != nil {
Patch(p, to)
}
@@ -244,9 +234,7 @@
}
func gtrack(s *Sym) {
- var p *obj.Prog
-
- p = Thearch.Gins(obj.AUSEFIELD, nil, nil)
+ p := Thearch.Gins(obj.AUSEFIELD, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = Linksym(s)
@@ -287,8 +275,6 @@
}
func Naddr(n *Node, a *obj.Addr, canemitcode int) {
- var s *Sym
-
*a = obj.Addr{}
if n == nil {
return
@@ -361,7 +347,7 @@
a.Etype = Simtype[n.Type.Etype]
}
a.Offset = n.Xoffset
- s = n.Sym
+ s := n.Sym
a.Node = n.Orig
//if(a->node >= (Node*)&n)
@@ -502,9 +488,7 @@
}
func newplist() *obj.Plist {
- var pl *obj.Plist
-
- pl = obj.Linknewplist(Ctxt)
+ pl := obj.Linknewplist(Ctxt)
Pc = Ctxt.NewProg()
Clearp(Pc)
@@ -515,16 +499,14 @@
func nodarg(t *Type, fp int) *Node {
var n *Node
- var l *NodeList
- var first *Type
- var savet Iter
// entire argument struct, not just one arg
if t.Etype == TSTRUCT && t.Funarg != 0 {
n = Nod(ONAME, nil, nil)
n.Sym = Lookup(".args")
n.Type = t
- first = Structfirst(&savet, &t)
+ var savet Iter
+ first := Structfirst(&savet, &t)
if first == nil {
Fatal("nodarg: bad struct")
}
@@ -541,7 +523,8 @@
}
if fp == 1 {
- for l = Curfn.Dcl; l != nil; l = l.Next {
+ var n *Node
+ for l := Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
return n
@@ -604,12 +587,10 @@
}
func unpatch(p *obj.Prog) *obj.Prog {
- var q *obj.Prog
-
if p.To.Type != obj.TYPE_BRANCH {
Fatal("unpatch: not a branch")
}
- q = p.To.U.Branch
+ q := p.To.U.Branch
p.To.U.Branch = nil
p.To.Offset = 0
return q
diff --git a/src/cmd/internal/gc/init.go b/src/cmd/internal/gc/init.go
index 9202ac5..a7d4fbd 100644
--- a/src/cmd/internal/gc/init.go
+++ b/src/cmd/internal/gc/init.go
@@ -54,12 +54,8 @@
* }
*/
func anyinit(n *NodeList) bool {
- var h uint32
- var s *Sym
- var l *NodeList
-
// are there any interesting init statements
- for l = n; l != nil; l = l.Next {
+ for l := n; l != nil; l = l.Next {
switch l.N.Op {
case ODCLFUNC,
ODCLCONST,
@@ -85,14 +81,14 @@
}
// is there an explicit init function
- s = Lookup("init.1")
+ s := Lookup("init.1")
if s.Def != nil {
return true
}
// are there any imported init functions
- for h = 0; h < NHASH; h++ {
+ for h := uint32(0); h < NHASH; h++ {
for s = hash[h]; s != nil; s = s.Link {
if s.Name[0] != 'i' || s.Name != "init" {
continue
@@ -109,16 +105,6 @@
}
func fninit(n *NodeList) {
- var i int
- var gatevar *Node
- var a *Node
- var b *Node
- var fn *Node
- var r *NodeList
- var h uint32
- var s *Sym
- var initsym *Sym
-
if Debug['A'] != 0 {
// sys.go or unsafe.go during compiler build
return
@@ -129,12 +115,12 @@
return
}
- r = nil
+ r := (*NodeList)(nil)
// (1)
namebuf = fmt.Sprintf("initdone·")
- gatevar = newname(Lookup(namebuf))
+ gatevar := newname(Lookup(namebuf))
addvar(gatevar, Types[TUINT8], PEXTERN)
// (2)
@@ -142,8 +128,8 @@
namebuf = fmt.Sprintf("init")
- fn = Nod(ODCLFUNC, nil, nil)
- initsym = Lookup(namebuf)
+ fn := Nod(ODCLFUNC, nil, nil)
+ initsym := Lookup(namebuf)
fn.Nname = newname(initsym)
fn.Nname.Defn = fn
fn.Nname.Ntype = Nod(OTFUNC, nil, nil)
@@ -151,13 +137,13 @@
funchdr(fn)
// (3)
- a = Nod(OIF, nil, nil)
+ a := Nod(OIF, nil, nil)
a.Ntest = Nod(ONE, gatevar, Nodintconst(0))
r = list(r, a)
// (4)
- b = Nod(OIF, nil, nil)
+ b := Nod(OIF, nil, nil)
b.Ntest = Nod(OEQ, gatevar, Nodintconst(2))
b.Nbody = list1(Nod(ORETURN, nil, nil))
@@ -175,7 +161,8 @@
r = list(r, a)
// (7)
- for h = 0; h < NHASH; h++ {
+ var s *Sym
+ for h := uint32(0); h < NHASH; h++ {
for s = hash[h]; s != nil; s = s.Link {
if s.Name[0] != 'i' || s.Name != "init" {
continue
@@ -199,7 +186,7 @@
// (9)
// could check that it is fn of no args/returns
- for i = 1; ; i++ {
+ for i := 1; ; i++ {
namebuf = fmt.Sprintf("init.%d", i)
s = Lookup(namebuf)
if s.Def == nil {
diff --git a/src/cmd/internal/gc/inl.go b/src/cmd/internal/gc/inl.go
index 73d6481..8b088a7 100644
--- a/src/cmd/internal/gc/inl.go
+++ b/src/cmd/internal/gc/inl.go
@@ -47,11 +47,9 @@
// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
// the ->sym can be re-used in the local package, so peel it off the receiver's type.
func fnpkg(fn *Node) *Pkg {
- var rcvr *Type
-
if fn.Type.Thistuple != 0 {
// method
- rcvr = getthisx(fn.Type).Type.Type
+ rcvr := getthisx(fn.Type).Type.Type
if Isptr[rcvr.Etype] != 0 {
rcvr = rcvr.Type
@@ -69,18 +67,13 @@
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// because they're a copy of an already checked body.
func typecheckinl(fn *Node) {
- var savefn *Node
- var pkg *Pkg
- var save_safemode int
- var lno int
-
- lno = int(setlineno(fn))
+ lno := int(setlineno(fn))
// typecheckinl is only for imported functions;
// their bodies may refer to unsafe as long as the package
// was marked safe during import (which was checked then).
// the ->inl of a local function has been typechecked before caninl copied it.
- pkg = fnpkg(fn)
+ pkg := fnpkg(fn)
if pkg == localpkg || pkg == nil {
return // typecheckinl on local function
@@ -90,10 +83,10 @@
fmt.Printf("typecheck import [%v] %v { %v }\n", Sconv(fn.Sym, 0), Nconv(fn, obj.FmtLong), Hconv(fn.Inl, obj.FmtSharp))
}
- save_safemode = safemode
+ save_safemode := safemode
safemode = 0
- savefn = Curfn
+ savefn := Curfn
Curfn = fn
typechecklist(fn.Inl, Etop)
Curfn = savefn
@@ -107,10 +100,6 @@
// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
// fn and ->nbody will already have been typechecked.
func caninl(fn *Node) {
- var savefn *Node
- var t *Type
- var budget int
-
if fn.Op != ODCLFUNC {
Fatal("caninl %v", Nconv(fn, 0))
}
@@ -129,19 +118,19 @@
// can't handle ... args yet
if Debug['l'] < 3 {
- for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+ for t := fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
if t.Isddd != 0 {
return
}
}
}
- budget = 40 // allowed hairyness
+ budget := 40 // allowed hairyness
if ishairylist(fn.Nbody, &budget) {
return
}
- savefn = Curfn
+ savefn := Curfn
Curfn = fn
fn.Nname.Inl = fn.Nbody
@@ -211,9 +200,7 @@
// Any name-like node of non-local class is marked for re-export by adding it to
// the exportlist.
func inlcopylist(ll *NodeList) *NodeList {
- var l *NodeList
-
- l = nil
+ l := (*NodeList)(nil)
for ; ll != nil; ll = ll.Next {
l = list(l, inlcopy(ll.N))
}
@@ -221,8 +208,6 @@
}
func inlcopy(n *Node) *Node {
- var m *Node
-
if n == nil {
return nil
}
@@ -234,7 +219,7 @@
return n
}
- m = Nod(OXXX, nil, nil)
+ m := Nod(OXXX, nil, nil)
*m = *n
m.Inl = nil
m.Left = inlcopy(n.Left)
@@ -253,9 +238,7 @@
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point.
func inlcalls(fn *Node) {
- var savefn *Node
-
- savefn = Curfn
+ savefn := Curfn
Curfn = fn
inlnode(&fn)
if fn != Curfn {
@@ -277,10 +260,8 @@
// Turn an OINLCALL into a single valued expression.
func inlconv2expr(np **Node) {
- var n *Node
- var r *Node
- n = *np
- r = n.Rlist.N
+ n := *np
+ r := n.Rlist.N
addinit(&r, concat(n.Ninit, n.Nbody))
*np = r
}
@@ -291,13 +272,11 @@
// order will be preserved Used in return, oas2func and call
// statements.
func inlconv2list(n *Node) *NodeList {
- var l *NodeList
-
if n.Op != OINLCALL || n.Rlist == nil {
Fatal("inlconv2list %v\n", Nconv(n, obj.FmtSign))
}
- l = n.Rlist
+ l := n.Rlist
addinit(&l.N, concat(n.Ninit, n.Nbody))
return l
}
@@ -320,15 +299,11 @@
// but then you may as well do it here. so this is cleaner and
// shorter and less complicated.
func inlnode(np **Node) {
- var n *Node
- var l *NodeList
- var lno int
-
if *np == nil {
return
}
- n = *np
+ n := *np
switch n.Op {
// inhibit inlining of their argument
@@ -347,10 +322,10 @@
return
}
- lno = int(setlineno(n))
+ lno := int(setlineno(n))
inlnodelist(n.Ninit)
- for l = n.Ninit; l != nil; l = l.Next {
+ for l := n.Ninit; l != nil; l = l.Next {
if l.N.Op == OINLCALL {
inlconv2stmt(l.N)
}
@@ -369,7 +344,7 @@
inlnodelist(n.List)
switch n.Op {
case OBLOCK:
- for l = n.List; l != nil; l = l.Next {
+ for l := n.List; l != nil; l = l.Next {
if l.N.Op == OINLCALL {
inlconv2stmt(l.N)
}
@@ -391,7 +366,7 @@
// fallthrough
default:
- for l = n.List; l != nil; l = l.Next {
+ for l := n.List; l != nil; l = l.Next {
if l.N.Op == OINLCALL {
inlconv2expr(&l.N)
}
@@ -412,7 +387,7 @@
// fallthrough
default:
- for l = n.Rlist; l != nil; l = l.Next {
+ for l := n.Rlist; l != nil; l = l.Next {
if l.N.Op == OINLCALL {
inlconv2expr(&l.N)
}
@@ -430,14 +405,14 @@
}
inlnodelist(n.Nbody)
- for l = n.Nbody; l != nil; l = l.Next {
+ for l := n.Nbody; l != nil; l = l.Next {
if l.N.Op == OINLCALL {
inlconv2stmt(l.N)
}
}
inlnodelist(n.Nelse)
- for l = n.Nelse; l != nil; l = l.Next {
+ for l := n.Nelse; l != nil; l = l.Next {
if l.N.Op == OINLCALL {
inlconv2stmt(l.N)
}
@@ -488,14 +463,11 @@
}
func mkinlcall(np **Node, fn *Node, isddd int) {
- var save_safemode int
- var pkg *Pkg
-
- save_safemode = safemode
+ save_safemode := safemode
// imported functions may refer to unsafe as long as the
// package was marked safe during import (already checked).
- pkg = fnpkg(fn)
+ pkg := fnpkg(fn)
if pkg != localpkg && pkg != nil {
safemode = 0
@@ -523,26 +495,6 @@
// inlined function body and list, rlist contain the input, output
// parameters.
func mkinlcall1(np **Node, fn *Node, isddd int) {
- var i int
- var chkargcount bool
- var n *Node
- var call *Node
- var saveinlfn *Node
- var as *Node
- var m *Node
- var dcl *NodeList
- var ll *NodeList
- var ninit *NodeList
- var body *NodeList
- var t *Type
- var variadic bool
- var varargcount int
- var multiret int
- var vararg *Node
- var varargs *NodeList
- var varargtype *Type
- var vararrtype *Type
-
// For variadic fn.
if fn.Inl == nil {
return
@@ -556,7 +508,7 @@
typecheckinl(fn)
}
- n = *np
+ n := *np
// Bingo, we have a function node, and it has an inlineable body
if Debug['m'] > 1 {
@@ -569,13 +521,14 @@
fmt.Printf("%v: Before inlining: %v\n", n.Line(), Nconv(n, obj.FmtSign))
}
- saveinlfn = inlfn
+ saveinlfn := inlfn
inlfn = fn
- ninit = n.Ninit
+ ninit := n.Ninit
//dumplist("ninit pre", ninit);
+ var dcl *NodeList
if fn.Defn != nil { // local function
dcl = fn.Inldcl // imported function
} else {
@@ -583,10 +536,10 @@
}
inlretvars = nil
- i = 0
+ i := 0
// Make temp names to use instead of the originals
- for ll = dcl; ll != nil; ll = ll.Next {
+ for ll := dcl; ll != nil; ll = ll.Next {
if ll.N.Class == PPARAMOUT { // return values handled below.
continue
}
@@ -603,7 +556,8 @@
}
// temporaries for return values.
- for t = getoutargx(fn.Type).Type; t != nil; t = t.Down {
+ var m *Node
+ for t := getoutargx(fn.Type).Type; t != nil; t = t.Down {
if t != nil && t.Nname != nil && !isblank(t.Nname) {
m = inlvar(t.Nname)
typecheck(&m, Erv)
@@ -619,9 +573,10 @@
}
// assign receiver.
+ var as *Node
if fn.Type.Thistuple != 0 && n.Left.Op == ODOTMETH {
// method call with a receiver.
- t = getthisx(fn.Type).Type
+ t := getthisx(fn.Type).Type
if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
@@ -640,11 +595,11 @@
}
// check if inlined function is variadic.
- variadic = false
+ variadic := false
- varargtype = nil
- varargcount = 0
- for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+ varargtype := (*Type)(nil)
+ varargcount := 0
+ for t := fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
if t.Isddd != 0 {
variadic = true
varargtype = t.Type
@@ -657,7 +612,7 @@
}
// check if argument is actually a returned tuple from call.
- multiret = 0
+ multiret := 0
if n.List != nil && n.List.Next == nil {
switch n.List.N.Op {
@@ -683,7 +638,7 @@
as = Nod(OAS2, nil, nil)
as.Rlist = n.List
- ll = n.List
+ ll := n.List
// TODO: if len(nlist) == 1 but multiple args, check that n->list->n is a call?
if fn.Type.Thistuple != 0 && n.Left.Op != ODOTMETH {
@@ -693,7 +648,7 @@
}
// append receiver inlvar to LHS.
- t = getthisx(fn.Type).Type
+ t := getthisx(fn.Type).Type
if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
@@ -706,13 +661,14 @@
}
// append ordinary arguments to LHS.
- chkargcount = n.List != nil && n.List.Next != nil
+ chkargcount := n.List != nil && n.List.Next != nil
- vararg = nil // the slice argument to a variadic call
- varargs = nil // the list of LHS names to put in vararg.
+ vararg := (*Node)(nil) // the slice argument to a variadic call
+ varargs := (*NodeList)(nil) // the list of LHS names to put in vararg.
if !chkargcount {
// 0 or 1 expression on RHS.
- for t = getinargx(fn.Type).Type; t != nil; t = t.Down {
+ var i int
+ for t := getinargx(fn.Type).Type; t != nil; t = t.Down {
if variadic && t.Isddd != 0 {
vararg = tinlvar(t)
for i = 0; i < varargcount && ll != nil; i++ {
@@ -728,6 +684,7 @@
}
} else {
// match arguments except final variadic (unless the call is dotted itself)
+ var t *Type
for t = getinargx(fn.Type).Type; t != nil; {
if ll == nil {
break
@@ -743,6 +700,7 @@
// match varargcount arguments with variadic parameters.
if variadic && t != nil && t.Isddd != 0 {
vararg = tinlvar(t)
+ var i int
for i = 0; i < varargcount && ll != nil; i++ {
m = argvar(varargtype, i)
varargs = list(varargs, m)
@@ -772,7 +730,7 @@
as.Right = nodnil()
as.Right.Type = varargtype
} else {
- vararrtype = typ(TARRAY)
+ vararrtype := typ(TARRAY)
vararrtype.Type = varargtype.Type
vararrtype.Bound = int64(varargcount)
@@ -786,7 +744,7 @@
}
// zero the outparams
- for ll = inlretvars; ll != nil; ll = ll.Next {
+ for ll := inlretvars; ll != nil; ll = ll.Next {
as = Nod(OAS, ll.N, nil)
typecheck(&as, Etop)
ninit = list(ninit, as)
@@ -794,7 +752,7 @@
inlretlabel = newlabel_inl()
inlgen++
- body = inlsubstlist(fn.Inl)
+ body := inlsubstlist(fn.Inl)
body = list(body, Nod(OGOTO, inlretlabel, nil)) // avoid 'not used' when function doesnt have return
body = list(body, Nod(OLABEL, inlretlabel, nil))
@@ -803,7 +761,7 @@
//dumplist("ninit post", ninit);
- call = Nod(OINLCALL, nil, nil)
+ call := Nod(OINLCALL, nil, nil)
call.Ninit = ninit
call.Nbody = body
@@ -824,10 +782,10 @@
// either supporting exporting statemetns with complex ninits
// or saving inl and making inlinl
if Debug['l'] >= 5 {
- body = fn.Inl
+ body := fn.Inl
fn.Inl = nil // prevent infinite recursion
inlnodelist(call.Nbody)
- for ll = call.Nbody; ll != nil; ll = ll.Next {
+ for ll := call.Nbody; ll != nil; ll = ll.Next {
if ll.N.Op == OINLCALL {
inlconv2stmt(ll.N)
}
@@ -844,13 +802,11 @@
// PAUTO's in the calling functions, and link them off of the
// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
func inlvar(var_ *Node) *Node {
- var n *Node
-
if Debug['m'] > 3 {
fmt.Printf("inlvar %v\n", Nconv(var_, obj.FmtSign))
}
- n = newname(var_.Sym)
+ n := newname(var_.Sym)
n.Type = var_.Type
n.Class = PAUTO
n.Used = 1
@@ -872,10 +828,8 @@
// Synthesize a variable to store the inlined function's results in.
func retvar(t *Type, i int) *Node {
- var n *Node
-
namebuf = fmt.Sprintf("~r%d", i)
- n = newname(Lookup(namebuf))
+ n := newname(Lookup(namebuf))
n.Type = t.Type
n.Class = PAUTO
n.Used = 1
@@ -887,10 +841,8 @@
// Synthesize a variable to store the inlined function's arguments
// when they come from a multiple return call.
func argvar(t *Type, i int) *Node {
- var n *Node
-
namebuf = fmt.Sprintf("~arg%d", i)
- n = newname(Lookup(namebuf))
+ n := newname(Lookup(namebuf))
n.Type = t.Type
n.Class = PAUTO
n.Used = 1
@@ -902,11 +854,9 @@
var newlabel_inl_label int
func newlabel_inl() *Node {
- var n *Node
-
newlabel_inl_label++
namebuf = fmt.Sprintf(".inlret%.6d", newlabel_inl_label)
- n = newname(Lookup(namebuf))
+ n := newname(Lookup(namebuf))
n.Etype = 1 // flag 'safe' for escape analysis (no backjumps)
return n
}
@@ -916,9 +866,7 @@
// to input/output parameters with ones to the tmpnames, and
// substituting returns with assignments to the output.
func inlsubstlist(ll *NodeList) *NodeList {
- var l *NodeList
-
- l = nil
+ l := (*NodeList)(nil)
for ; ll != nil; ll = ll.Next {
l = list(l, inlsubst(ll.N))
}
@@ -926,11 +874,6 @@
}
func inlsubst(n *Node) *Node {
- var p string
- var m *Node
- var as *Node
- var ll *NodeList
-
if n == nil {
return nil
}
@@ -957,15 +900,15 @@
// dump("Return before substitution", n);
case ORETURN:
- m = Nod(OGOTO, inlretlabel, nil)
+ m := Nod(OGOTO, inlretlabel, nil)
m.Ninit = inlsubstlist(n.Ninit)
if inlretvars != nil && n.List != nil {
- as = Nod(OAS2, nil, nil)
+ as := Nod(OAS2, nil, nil)
// shallow copy or OINLCALL->rlist will be the same list, and later walk and typecheck may clobber that.
- for ll = inlretvars; ll != nil; ll = ll.Next {
+ for ll := inlretvars; ll != nil; ll = ll.Next {
as.List = list(as.List, ll.N)
}
as.Rlist = inlsubstlist(n.List)
@@ -981,16 +924,16 @@
case OGOTO,
OLABEL:
- m = Nod(OXXX, nil, nil)
+ m := Nod(OXXX, nil, nil)
*m = *n
m.Ninit = nil
- p = fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
+ p := fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
m.Left = newname(Lookup(p))
return m
}
- m = Nod(OXXX, nil, nil)
+ m := Nod(OXXX, nil, nil)
*m = *n
m.Ninit = nil
diff --git a/src/cmd/internal/gc/lex.go b/src/cmd/internal/gc/lex.go
index 26f2eff..9627337 100644
--- a/src/cmd/internal/gc/lex.go
+++ b/src/cmd/internal/gc/lex.go
@@ -88,14 +88,11 @@
}
func doversion() {
- var p string
- var sep string
-
- p = obj.Expstring()
+ p := obj.Expstring()
if p == "X:none" {
p = ""
}
- sep = ""
+ sep := ""
if p != "" {
sep = " "
}
@@ -105,12 +102,10 @@
func Main() {
defer hidePanic()
- var l *NodeList
- var p string
// Allow GOARCH=thearch.thestring or GOARCH=thearch.thestringsuffix,
// but not other values.
- p = obj.Getgoarch()
+ p := obj.Getgoarch()
if !strings.HasPrefix(p, Thearch.Thestring) {
log.Fatalf("cannot use %cg with GOARCH=%s", Thearch.Thechar, p)
@@ -276,7 +271,7 @@
}
if Thearch.Thechar == '8' {
- p = obj.Getgo386()
+ p := obj.Getgo386()
if p == "387" {
Use_sse = 0
} else if p == "sse2" {
@@ -355,7 +350,7 @@
// and methods but doesn't depend on any of it.
defercheckwidth()
- for l = xtop; l != nil; l = l.Next {
+ for l := xtop; l != nil; l = l.Next {
if l.N.Op != ODCL && l.N.Op != OAS {
typecheck(&l.N, Etop)
}
@@ -363,7 +358,7 @@
// Phase 2: Variable assignments.
// To check interface assignments, depends on phase 1.
- for l = xtop; l != nil; l = l.Next {
+ for l := xtop; l != nil; l = l.Next {
if l.N.Op == ODCL || l.N.Op == OAS {
typecheck(&l.N, Etop)
}
@@ -371,7 +366,7 @@
resumecheckwidth()
// Phase 3: Type check function bodies.
- for l = xtop; l != nil; l = l.Next {
+ for l := xtop; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC || l.N.Op == OCLOSURE {
Curfn = l.N
decldepth = 1
@@ -387,7 +382,7 @@
// Phase 4: Decide how to capture closed variables.
// This needs to run before escape analysis,
// because variables captured by value do not escape.
- for l = xtop; l != nil; l = l.Next {
+ for l := xtop; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC && l.N.Closure != nil {
Curfn = l.N
capturevars(l.N)
@@ -404,7 +399,7 @@
if Debug['l'] > 1 {
// Typecheck imported function bodies if debug['l'] > 1,
// otherwise lazily when used or re-exported.
- for l = importlist; l != nil; l = l.Next {
+ for l := importlist; l != nil; l = l.Next {
if l.N.Inl != nil {
saveerrors()
typecheckinl(l.N)
@@ -418,14 +413,14 @@
if Debug['l'] != 0 {
// Find functions that can be inlined and clone them before walk expands them.
- for l = xtop; l != nil; l = l.Next {
+ for l := xtop; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC {
caninl(l.N)
}
}
// Expand inlineable calls in all functions
- for l = xtop; l != nil; l = l.Next {
+ for l := xtop; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC {
inlcalls(l.N)
}
@@ -447,7 +442,7 @@
// Phase 7: Transform closure bodies to properly reference captured variables.
// This needs to happen before walk, because closures must be transformed
// before walk reaches a call of a closure.
- for l = xtop; l != nil; l = l.Next {
+ for l := xtop; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC && l.N.Closure != nil {
Curfn = l.N
transformclosure(l.N)
@@ -457,7 +452,7 @@
Curfn = nil
// Phase 8: Compile top level functions.
- for l = xtop; l != nil; l = l.Next {
+ for l := xtop; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC {
funccompile(l.N)
}
@@ -468,7 +463,7 @@
}
// Phase 9: Check external declarations.
- for l = externdcl; l != nil; l = l.Next {
+ for l := externdcl; l != nil; l = l.Next {
if l.N.Op == ONAME {
typecheck(&l.N, Erv)
}
@@ -511,11 +506,8 @@
}
func skiptopkgdef(b *obj.Biobuf) bool {
- var p string
- var sz int
-
/* archive header */
- p = obj.Brdline(b, '\n')
+ p := obj.Brdline(b, '\n')
if p == "" {
return false
}
@@ -527,7 +519,7 @@
}
/* symbol table may be first; skip it */
- sz = arsize(b, "__.GOSYMDEF")
+ sz := arsize(b, "__.GOSYMDEF")
if sz >= 0 {
obj.Bseek(b, int64(sz), 1)
@@ -545,12 +537,11 @@
}
func addidir(dir string) {
- var pp **Idir
-
if dir == "" {
return
}
+ var pp **Idir
for pp = &idirs; *pp != nil; pp = &(*pp).link {
}
*pp = new(Idir)
@@ -567,11 +558,6 @@
}
func findpkg(name *Strlit) bool {
- var p *Idir
- var q string
- var suffix string
- var suffixsep string
-
if islocalname(name) {
if safemode != 0 || nolocalimports != 0 {
return false
@@ -595,13 +581,14 @@
// local imports should be canonicalized already.
// don't want to see "encoding/../encoding/base64"
// as different from "encoding/base64".
+ var q string
_ = q
if path.Clean(name.S) != name.S {
Yyerror("non-canonical import path %v (should be %s)", Zconv(name, 0), q)
return false
}
- for p = idirs; p != nil; p = p.link {
+ for p := idirs; p != nil; p = p.link {
namebuf = fmt.Sprintf("%s/%v.a", p.dir, Zconv(name, 0))
if obj.Access(namebuf, 0) >= 0 {
return true
@@ -613,8 +600,8 @@
}
if goroot != "" {
- suffix = ""
- suffixsep = ""
+ suffix := ""
+ suffixsep := ""
if flag_installsuffix != "" {
suffixsep = "_"
suffix = flag_installsuffix
@@ -642,17 +629,6 @@
}
func importfile(f *Val, line int) {
- var imp *obj.Biobuf
- var file string
- var p string
- var q string
- var tag string
- var c int32
- var n int
- var path_ *Strlit
- var cleanbuf string
- var prefix string
-
if f.Ctype != CTSTR {
Yyerror("import statement not a string")
fakeimport()
@@ -696,7 +672,7 @@
return
}
- path_ = f.U.Sval
+ path_ := f.U.Sval
if islocalname(path_) {
if path_.S[0] == '/' {
Yyerror("import path cannot be absolute path")
@@ -704,11 +680,11 @@
return
}
- prefix = Ctxt.Pathname
+ prefix := Ctxt.Pathname
if localimport != "" {
prefix = localimport
}
- cleanbuf = prefix
+ cleanbuf := prefix
cleanbuf += "/"
cleanbuf += path_.S
cleanbuf = path.Clean(cleanbuf)
@@ -730,13 +706,13 @@
// If we already saw that package, feed a dummy statement
// to the lexer to avoid parsing export data twice.
if importpkg.Imported != 0 {
- file = namebuf
- tag = ""
+ file := namebuf
+ tag := ""
if importpkg.Safe {
tag = "safe"
}
- p = fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
+ p := fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
cannedimports(file, p)
return
}
@@ -744,15 +720,16 @@
importpkg.Imported = 1
var err error
+ var imp *obj.Biobuf
imp, err = obj.Bopenr(namebuf)
if err != nil {
Yyerror("can't open import: \"%v\": %v", Zconv(f.U.Sval, 0), err)
errorexit()
}
- file = namebuf
+ file := namebuf
- n = len(namebuf)
+ n := len(namebuf)
if n > 2 && namebuf[n-2] == '.' && namebuf[n-1] == 'a' {
if !skiptopkgdef(imp) {
Yyerror("import %s: not a package file", file)
@@ -761,7 +738,7 @@
}
// check object header
- p = obj.Brdstr(imp, '\n', 1)
+ p := obj.Brdstr(imp, '\n', 1)
if p != "empty archive" {
if !strings.HasPrefix(p, "go object ") {
@@ -769,7 +746,7 @@
errorexit()
}
- q = fmt.Sprintf("%s %s %s %s", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+ q := fmt.Sprintf("%s %s %s %s", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
if p[10:] != q {
Yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
errorexit()
@@ -793,6 +770,7 @@
curio.nlsemi = 0
typecheckok = 1
+ var c int32
for {
c = int32(getc())
if c == EOF {
@@ -951,7 +929,6 @@
cp.Reset()
for {
-
if escchar('"', &escflag, &v) {
break
}
@@ -974,7 +951,6 @@
cp.Reset()
for {
-
c = int(getr())
if c == '\r' {
continue
@@ -1014,9 +990,7 @@
case '/':
c1 = getc()
if c1 == '*' {
- var nl int
-
- nl = 0
+ nl := 0
for {
c = int(getr())
if c == '\n' {
@@ -1297,7 +1271,6 @@
*/
talph:
for {
-
if c >= utf8.RuneSelf {
ungetc(c)
rune_ = uint(getr())
@@ -1339,7 +1312,6 @@
cp.Reset()
if c != '0' {
for {
-
cp.WriteByte(byte(c))
c = getc()
if yy_isdigit(c) {
@@ -1353,7 +1325,6 @@
c = getc()
if c == 'x' || c == 'X' {
for {
-
cp.WriteByte(byte(c))
c = getc()
if yy_isdigit(c) {
@@ -1381,7 +1352,6 @@
c1 = 0
for {
-
if !yy_isdigit(c) {
break
}
@@ -1436,7 +1406,6 @@
casedot:
for {
-
cp.WriteByte(byte(c))
c = getc()
if !yy_isdigit(c) {
@@ -1463,7 +1432,6 @@
Yyerror("malformed fp constant exponent")
}
for yy_isdigit(c) {
-
cp.WriteByte(byte(c))
c = getc()
}
@@ -1533,21 +1501,18 @@
*/
func getlinepragma() int {
var cmd, verb, name string
- var i int
- var c int
var n int
var cp *bytes.Buffer
var linep int
- var h *obj.Hist
- c = int(getr())
+ c := int(getr())
if c == 'g' {
goto go_
}
if c != 'l' {
goto out
}
- for i = 1; i < 5; i++ {
+ for i := 1; i < 5; i++ {
c = int(getr())
if c != int("line "[i]) {
goto out
@@ -1597,7 +1562,7 @@
// try to avoid allocating file name over and over
name = lexbuf.String()[:linep-1]
- for h = Ctxt.Hist; h != nil; h = h.Link {
+ for h := Ctxt.Hist; h != nil; h = h.Link {
if h.Name != "" && h.Name == name {
linehist(h.Name, int32(n), 0)
goto out
@@ -1708,20 +1673,17 @@
// Copied nearly verbatim from the C compiler's #pragma parser.
// TODO: Rewrite more cleanly once the compiler is written in Go.
func pragcgo(text string) {
- var local string
- var remote string
- var p string
var q string
- var verb string
if i := strings.Index(text, " "); i >= 0 {
text, q = text[:i], text[i:]
}
- verb = text[3:] // skip "go:"
+ verb := text[3:] // skip "go:"
if verb == "cgo_dynamic_linker" || verb == "dynlinker" {
var ok bool
+ var p string
p, ok = getquoted(&q)
if !ok {
goto err1
@@ -1738,7 +1700,8 @@
verb = "cgo_export_dynamic"
}
if verb == "cgo_export_static" || verb == "cgo_export_dynamic" {
- local = getimpsym(&q)
+ local := getimpsym(&q)
+ var remote string
if local == "" {
goto err2
}
@@ -1761,7 +1724,9 @@
if verb == "cgo_import_dynamic" || verb == "dynimport" {
var ok bool
- local = getimpsym(&q)
+ local := getimpsym(&q)
+ var p string
+ var remote string
if local == "" {
goto err3
}
@@ -1792,7 +1757,7 @@
}
if verb == "cgo_import_static" {
- local = getimpsym(&q)
+ local := getimpsym(&q)
if local == "" || more(&q) {
goto err4
}
@@ -1806,6 +1771,7 @@
if verb == "cgo_ldflag" {
var ok bool
+ var p string
p, ok = getquoted(&q)
if !ok {
goto err5
@@ -1847,9 +1813,7 @@
}
func yylex(yylval *yySymType) int32 {
- var lx int
-
- lx = int(_yylex(yylval))
+ lx := int(_yylex(yylval))
if curio.nlsemi != 0 && lx == EOF {
// Treat EOF as "end of line" for the purposes
@@ -1883,11 +1847,7 @@
}
func getc() int {
- var c int
- var c1 int
- var c2 int
-
- c = curio.peekc
+ c := curio.peekc
if c != 0 {
curio.peekc = curio.peekc1
curio.peekc1 = 0
@@ -1902,6 +1862,8 @@
curio.cp = curio.cp[1:]
}
} else {
+ var c1 int
+ var c2 int
loop:
c = obj.Bgetc(curio.bin)
if c == 0xef {
@@ -1974,14 +1936,9 @@
}
func escchar(e int, escflg *int, val *int64) bool {
- var i int
- var u int
- var c int
- var l int64
-
*escflg = 0
- c = int(getr())
+ c := int(getr())
switch c {
case EOF:
Yyerror("eof in string")
@@ -2002,8 +1959,10 @@
return false
}
- u = 0
+ u := 0
c = int(getr())
+ var l int64
+ var i int
switch c {
case 'x':
*escflg = 1 // it's a byte
@@ -2091,7 +2050,7 @@
oct:
l = int64(c) - '0'
- for i = 2; i > 0; i-- {
+ for i := 2; i > 0; i-- {
c = getc()
if c >= '0' && c <= '7' {
l = l*8 + int64(c) - '0'
@@ -2482,19 +2441,17 @@
}
func lexinit() {
- var i int
var lex int
var s *Sym
var s1 *Sym
var t *Type
var etype int
- var v Val
/*
* initialize basic types array
* initialize known symbols
*/
- for i = 0; i < len(syms); i++ {
+ for i := 0; i < len(syms); i++ {
lex = syms[i].lexical
s = Lookup(syms[i].name)
s.Lexical = uint16(lex)
@@ -2568,33 +2525,26 @@
Types[TNIL] = typ(TNIL)
s = Pkglookup("nil", builtinpkg)
+ var v Val
v.Ctype = CTNIL
s.Def = nodlit(v)
s.Def.Sym = s
}
func lexinit1() {
- var s *Sym
- var s1 *Sym
- var t *Type
- var f *Type
- var rcvr *Type
- var in *Type
- var out *Type
-
// t = interface { Error() string }
- rcvr = typ(TSTRUCT)
+ rcvr := typ(TSTRUCT)
rcvr.Type = typ(TFIELD)
rcvr.Type.Type = Ptrto(typ(TSTRUCT))
rcvr.Funarg = 1
- in = typ(TSTRUCT)
+ in := typ(TSTRUCT)
in.Funarg = 1
- out = typ(TSTRUCT)
+ out := typ(TSTRUCT)
out.Type = typ(TFIELD)
out.Type.Type = Types[TSTRING]
out.Funarg = 1
- f = typ(TFUNC)
+ f := typ(TFUNC)
*getthis(f) = rcvr
*Getoutarg(f) = out
*getinarg(f) = in
@@ -2602,16 +2552,16 @@
f.Intuple = 0
f.Outnamed = 0
f.Outtuple = 1
- t = typ(TINTER)
+ t := typ(TINTER)
t.Type = typ(TFIELD)
t.Type.Sym = Lookup("Error")
t.Type.Type = f
// error type
- s = Lookup("error")
+ s := Lookup("error")
s.Lexical = LNAME
- s1 = Pkglookup("error", builtinpkg)
+ s1 := Pkglookup("error", builtinpkg)
errortype = t
errortype.Sym = s1
s1.Lexical = LNAME
@@ -2643,7 +2593,6 @@
var lex int
var etype int
var i int
- var v Val
for i = 0; i < len(syms); i++ {
lex = syms[i].lexical
@@ -2701,6 +2650,7 @@
s = Lookup("nil")
if s.Def == nil {
+ var v Val
v.Ctype = CTNIL
s.Def = nodlit(v)
s.Def.Sym = s
@@ -2920,9 +2870,7 @@
var lexname_buf string
func lexname(lex int) string {
- var i int
-
- for i = 0; i < len(lexn); i++ {
+ for i := 0; i < len(lexn); i++ {
if lexn[i].lex == lex {
return lexn[i].name
}
@@ -3131,15 +3079,13 @@
}
func pkgnotused(lineno int, path_ *Strlit, name string) {
- var elem string
-
// If the package was imported with a name other than the final
// import path element, show it explicitly in the error message.
// Note that this handles both renamed imports and imports of
// packages containing unconventional package declarations.
// Note that this uses / always, even on Windows, because Go import
// paths always use forward slashes.
- elem = path_.S
+ elem := path_.S
if i := strings.LastIndex(elem, "/"); i >= 0 {
elem = elem[i+1:]
}
@@ -3151,10 +3097,6 @@
}
func mkpackage(pkgname string) {
- var s *Sym
- var h int32
- var p string
-
if localpkg.Name == "" {
if pkgname == "_" {
Yyerror("invalid package name _")
@@ -3164,7 +3106,8 @@
if pkgname != localpkg.Name {
Yyerror("package %s; expected %s", pkgname, localpkg.Name)
}
- for h = 0; h < NHASH; h++ {
+ var s *Sym
+ for h := int32(0); h < NHASH; h++ {
for s = hash[h]; s != nil; s = s.Link {
if s.Def == nil || s.Pkg != localpkg {
continue
@@ -3198,7 +3141,7 @@
}
if outfile == "" {
- p = infile
+ p := infile
if i := strings.LastIndex(p, "/"); i >= 0 {
p = p[i+1:]
}
diff --git a/src/cmd/internal/gc/md5.go b/src/cmd/internal/gc/md5.go
index 862fdd5..3b51900 100644
--- a/src/cmd/internal/gc/md5.go
+++ b/src/cmd/internal/gc/md5.go
@@ -39,16 +39,13 @@
}
func md5write(d *MD5, p []byte, nn int) {
- var i int
- var n int
-
d.len += uint64(nn)
if d.nx > 0 {
- n = nn
+ n := nn
if n > _Chunk-d.nx {
n = _Chunk - d.nx
}
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
d.x[d.nx+i] = uint8(p[i])
}
d.nx += n
@@ -61,11 +58,11 @@
nn -= n
}
- n = md5block(d, p, nn)
+ n := md5block(d, p, nn)
p = p[n:]
nn -= n
if nn > 0 {
- for i = 0; i < nn; i++ {
+ for i := 0; i < nn; i++ {
d.x[i] = uint8(p[i])
}
d.nx = nn
@@ -73,14 +70,10 @@
}
func md5sum(d *MD5, hi *uint64) uint64 {
- var tmp [64]uint8
- var i int
- var len uint64
-
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
- len = d.len
+ len := d.len
- tmp = [64]uint8{}
+ tmp := [64]uint8{}
tmp[0] = 0x80
if len%64 < 56 {
md5write(d, tmp[:], int(56-len%64))
@@ -91,7 +84,7 @@
// Length in bits.
len <<= 3
- for i = 0; i < 8; i++ {
+ for i := 0; i < 8; i++ {
tmp[i] = uint8(len >> uint(8*i))
}
md5write(d, tmp[:], 8)
@@ -194,24 +187,19 @@
var shift4 = []uint32{6, 10, 15, 21}
func md5block(dig *MD5, p []byte, nn int) int {
- var a uint32
- var b uint32
- var c uint32
- var d uint32
var aa uint32
var bb uint32
var cc uint32
var dd uint32
var i int
var j int
- var n int
var X [16]uint32
- a = dig.s[0]
- b = dig.s[1]
- c = dig.s[2]
- d = dig.s[3]
- n = 0
+ a := dig.s[0]
+ b := dig.s[1]
+ c := dig.s[2]
+ d := dig.s[3]
+ n := 0
for nn >= _Chunk {
aa = a
@@ -226,14 +214,10 @@
// Round 1.
for i = 0; i < 16; i++ {
- var x uint32
- var t uint32
- var s uint32
- var f uint32
- x = uint32(i)
- t = uint32(i)
- s = shift1[i%4]
- f = ((c ^ d) & b) ^ d
+ x := uint32(i)
+ t := uint32(i)
+ s := shift1[i%4]
+ f := ((c ^ d) & b) ^ d
a += f + X[x] + table[t]
a = a<<s | a>>(32-s)
a += b
@@ -247,15 +231,10 @@
// Round 2.
for i = 0; i < 16; i++ {
- var x uint32
- var t uint32
- var s uint32
- var g uint32
-
- x = (1 + 5*uint32(i)) % 16
- t = 16 + uint32(i)
- s = shift2[i%4]
- g = ((b ^ c) & d) ^ c
+ x := (1 + 5*uint32(i)) % 16
+ t := 16 + uint32(i)
+ s := shift2[i%4]
+ g := ((b ^ c) & d) ^ c
a += g + X[x] + table[t]
a = a<<s | a>>(32-s)
a += b
@@ -269,15 +248,10 @@
// Round 3.
for i = 0; i < 16; i++ {
- var x uint32
- var t uint32
- var s uint32
- var h uint32
-
- x = (5 + 3*uint32(i)) % 16
- t = 32 + uint32(i)
- s = shift3[i%4]
- h = b ^ c ^ d
+ x := (5 + 3*uint32(i)) % 16
+ t := 32 + uint32(i)
+ s := shift3[i%4]
+ h := b ^ c ^ d
a += h + X[x] + table[t]
a = a<<s | a>>(32-s)
a += b
@@ -291,15 +265,10 @@
// Round 4.
for i = 0; i < 16; i++ {
- var x uint32
- var s uint32
- var t uint32
- var ii uint32
-
- x = (7 * uint32(i)) % 16
- s = shift4[i%4]
- t = 48 + uint32(i)
- ii = c ^ (b | ^d)
+ x := (7 * uint32(i)) % 16
+ s := shift4[i%4]
+ t := 48 + uint32(i)
+ ii := c ^ (b | ^d)
a += ii + X[x] + table[t]
a = a<<s | a>>(32-s)
a += b
diff --git a/src/cmd/internal/gc/mparith1.go b/src/cmd/internal/gc/mparith1.go
index 14b8620..454b688 100644
--- a/src/cmd/internal/gc/mparith1.go
+++ b/src/cmd/internal/gc/mparith1.go
@@ -13,19 +13,17 @@
/// uses arithmetic
func mpcmpfixflt(a *Mpint, b *Mpflt) int {
- var buf string
var c Mpflt
- buf = fmt.Sprintf("%v", Bconv(a, 0))
+ buf := fmt.Sprintf("%v", Bconv(a, 0))
mpatoflt(&c, buf)
return mpcmpfltflt(&c, b)
}
func mpcmpfltfix(a *Mpflt, b *Mpint) int {
- var buf string
var c Mpflt
- buf = fmt.Sprintf("%v", Bconv(b, 0))
+ buf := fmt.Sprintf("%v", Bconv(b, 0))
mpatoflt(&c, buf)
return mpcmpfltflt(a, &c)
}
@@ -133,11 +131,10 @@
// convert (truncate) b to a.
// return -1 (but still convert) if b was non-integer.
func mpexactfltfix(a *Mpint, b *Mpflt) int {
- var f Mpflt
-
*a = b.Val
Mpshiftfix(a, int(b.Exp))
if b.Exp < 0 {
+ var f Mpflt
f.Val = *a
f.Exp = 0
mpnorm(&f)
@@ -150,15 +147,12 @@
}
func mpmovefltfix(a *Mpint, b *Mpflt) int {
- var f Mpflt
- var i int
-
if mpexactfltfix(a, b) == 0 {
return 0
}
// try rounding down a little
- f = *b
+ f := *b
f.Val.A[0] = 0
if mpexactfltfix(a, &f) == 0 {
@@ -166,7 +160,7 @@
}
// try rounding up a little
- for i = 1; i < Mpprec; i++ {
+ for i := 1; i < Mpprec; i++ {
f.Val.A[i]++
if f.Val.A[i] != Mpbase {
break
@@ -209,12 +203,6 @@
}
func mphextofix(a *Mpint, s string) {
- var c int8
- var d int
- var bit int
- var hexdigitp int
- var end int
-
for s != "" && s[0] == '0' {
s = s[1:]
}
@@ -225,8 +213,11 @@
return
}
- end = len(s) - 1
- for hexdigitp = end; hexdigitp >= 0; hexdigitp-- {
+ end := len(s) - 1
+ var c int8
+ var d int
+ var bit int
+ for hexdigitp := end; hexdigitp >= 0; hexdigitp-- {
c = int8(s[hexdigitp])
if c >= '0' && c <= '9' {
d = int(c) - '0'
@@ -252,25 +243,14 @@
// required syntax is [+-]d*[.]d*[e[+-]d*] or [+-]0xH*[e[+-]d*]
//
func mpatoflt(a *Mpflt, as string) {
- var b Mpflt
- var dp int
- var c int
- var f int
- var ef int
- var ex int
- var eb int
- var base int
- var s string
- var start string
-
for as[0] == ' ' || as[0] == '\t' {
as = as[1:]
}
/* determine base */
- s = as
+ s := as
- base = -1
+ base := -1
for base == -1 {
if s == "" {
base = 10
@@ -296,14 +276,17 @@
}
s = as
- dp = 0 /* digits after decimal point */
- f = 0 /* sign */
- ex = 0 /* exponent */
- eb = 0 /* binary point */
+ dp := 0 /* digits after decimal point */
+ f := 0 /* sign */
+ ex := 0 /* exponent */
+ eb := 0 /* binary point */
Mpmovecflt(a, 0.0)
+ var ef int
+ var c int
if base == 16 {
- start = ""
+ start := ""
+ var c int
for {
c, _ = intstarstringplusplus(s)
if c == '-' {
@@ -437,6 +420,7 @@
}
if mpcmpfltc(a, 0.0) != 0 {
if ex >= dp {
+ var b Mpflt
mppow10flt(&b, ex-dp)
mpmulfltflt(a, &b)
} else {
@@ -444,6 +428,7 @@
if dp-ex >= 1<<(32-3) || int(int16(4*(dp-ex))) != 4*(dp-ex) {
Mpmovecflt(a, 0.0)
} else {
+ var b Mpflt
mppow10flt(&b, dp-ex)
mpdivfltflt(a, &b)
}
@@ -466,12 +451,10 @@
//
func mpatofix(a *Mpint, as string) {
var c int
- var f int
- var s string
var s0 string
- s = as
- f = 0
+ s := as
+ f := 0
Mpmovecfix(a, 0)
c, s = intstarstringplusplus(s)
@@ -555,29 +538,24 @@
}
func Bconv(xval *Mpint, flag int) string {
- var buf [500]byte
- var p int
- var fp string
-
var q Mpint
- var r Mpint
- var ten Mpint
- var sixteen Mpint
- var f int
- var digit int
mpmovefixfix(&q, xval)
- f = 0
+ f := 0
if mptestfix(&q) < 0 {
f = 1
mpnegfix(&q)
}
- p = len(buf)
+ var buf [500]byte
+ p := len(buf)
+ var r Mpint
if flag&obj.FmtSharp != 0 /*untyped*/ {
// Hexadecimal
+ var sixteen Mpint
Mpmovecfix(&sixteen, 16)
+ var digit int
for {
mpdivmodfixfix(&q, &r, &q, &sixteen)
digit = int(Mpgetfix(&r))
@@ -599,6 +577,7 @@
buf[p] = '0'
} else {
// Decimal
+ var ten Mpint
Mpmovecfix(&ten, 10)
for {
@@ -615,26 +594,20 @@
p--
buf[p] = '-'
}
+ var fp string
fp += string(buf[p:])
return fp
}
func Fconv(fvp *Mpflt, flag int) string {
- var buf string
- var fp string
-
- var fv Mpflt
- var d float64
- var dexp float64
- var exp int
-
if flag&obj.FmtSharp != 0 /*untyped*/ {
// alternate form - decimal for error messages.
// for well in range, convert to double and use print's %g
- exp = int(fvp.Exp) + sigfig(fvp)*Mpscale
+ exp := int(fvp.Exp) + sigfig(fvp)*Mpscale
+ var fp string
if -900 < exp && exp < 900 {
- d = mpgetflt(fvp)
+ d := mpgetflt(fvp)
if d >= 0 && (flag&obj.FmtSign != 0 /*untyped*/) {
fp += fmt.Sprintf("+")
}
@@ -644,15 +617,15 @@
// very out of range. compute decimal approximation by hand.
// decimal exponent
- dexp = float64(fvp.Exp) * 0.301029995663981195 // log_10(2)
+ dexp := float64(fvp.Exp) * 0.301029995663981195 // log_10(2)
exp = int(dexp)
// decimal mantissa
- fv = *fvp
+ fv := *fvp
fv.Val.Neg = 0
fv.Exp = 0
- d = mpgetflt(&fv)
+ d := mpgetflt(&fv)
d *= math.Pow(10, dexp-float64(exp))
for d >= 9.99995 {
d /= 10
@@ -668,6 +641,8 @@
return fp
}
+ var fv Mpflt
+ var buf string
if sigfig(fvp) == 0 {
buf = fmt.Sprintf("0p+0")
goto out
@@ -693,6 +668,7 @@
buf = fmt.Sprintf("%vp-%d", Bconv(&fv.Val, obj.FmtSharp), -fv.Exp)
out:
+ var fp string
fp += buf
return fp
}
diff --git a/src/cmd/internal/gc/mparith2.go b/src/cmd/internal/gc/mparith2.go
index 057585c..c9c9230 100644
--- a/src/cmd/internal/gc/mparith2.go
+++ b/src/cmd/internal/gc/mparith2.go
@@ -9,11 +9,8 @@
// words of the argument
//
func mplen(a *Mpint) int {
- var i int
- var n int
-
- n = -1
- for i = 0; i < Mpprec; i++ {
+ n := -1
+ for i := 0; i < Mpprec; i++ {
if a.A[i] != 0 {
n = i
}
@@ -28,11 +25,9 @@
//
func mplsh(a *Mpint, quiet int) {
var x int
- var i int
- var c int
- c = 0
- for i = 0; i < Mpprec; i++ {
+ c := 0
+ for i := 0; i < Mpprec; i++ {
x = (a.A[i] << 1) + c
c = 0
if x >= Mpbase {
@@ -54,9 +49,7 @@
// ignores sign
//
func mplshw(a *Mpint, quiet int) {
- var i int
-
- i = Mpprec - 1
+ i := Mpprec - 1
if a.A[i] != 0 {
a.Ovf = 1
if quiet == 0 {
@@ -76,13 +69,10 @@
//
func mprsh(a *Mpint) {
var x int
- var lo int
- var i int
- var c int
- c = 0
- lo = a.A[0] & 1
- for i = Mpprec - 1; i >= 0; i-- {
+ c := 0
+ lo := a.A[0] & 1
+ for i := Mpprec - 1; i >= 0; i-- {
x = a.A[i]
a.A[i] = (x + c) >> 1
c = 0
@@ -101,10 +91,9 @@
// ignores sign and overflow
//
func mprshw(a *Mpint) {
- var lo int
var i int
- lo = a.A[0]
+ lo := a.A[0]
for i = 0; i < Mpprec-1; i++ {
a.A[i] = a.A[i+1]
}
@@ -119,9 +108,6 @@
// return the sign of (abs(a)-abs(b))
//
func mpcmp(a *Mpint, b *Mpint) int {
- var x int
- var i int
-
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in cmp")
@@ -129,7 +115,8 @@
return 0
}
- for i = Mpprec - 1; i >= 0; i-- {
+ var x int
+ for i := Mpprec - 1; i >= 0; i-- {
x = a.A[i] - b.A[i]
if x > 0 {
return +1
@@ -148,11 +135,9 @@
//
func mpneg(a *Mpint) {
var x int
- var i int
- var c int
- c = 0
- for i = 0; i < Mpprec; i++ {
+ c := 0
+ for i := 0; i < Mpprec; i++ {
x = -a.A[i] - c
c = 0
if x < 0 {
@@ -193,10 +178,6 @@
/// implements fix arihmetic
func mpaddfixfix(a *Mpint, b *Mpint, quiet int) {
- var i int
- var c int
- var x int
-
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpaddxx")
@@ -205,13 +186,14 @@
return
}
- c = 0
+ c := 0
+ var x int
if a.Neg != b.Neg {
goto sub
}
// perform a+b
- for i = 0; i < Mpprec; i++ {
+ for i := 0; i < Mpprec; i++ {
x = a.A[i] + b.A[i] + c
c = 0
if x >= Mpbase {
@@ -236,7 +218,8 @@
Mpmovecfix(a, 0)
case 1:
- for i = 0; i < Mpprec; i++ {
+ var x int
+ for i := 0; i < Mpprec; i++ {
x = a.A[i] - b.A[i] - c
c = 0
if x < 0 {
@@ -249,7 +232,8 @@
case -1:
a.Neg ^= 1
- for i = 0; i < Mpprec; i++ {
+ var x int
+ for i := 0; i < Mpprec; i++ {
x = b.A[i] - a.A[i] - c
c = 0
if x < 0 {
@@ -263,15 +247,6 @@
}
func mpmulfixfix(a *Mpint, b *Mpint) {
- var i int
- var j int
- var na int
- var nb int
- var x int
- var s Mpint
- var q Mpint
- var c *Mpint
-
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpmulfixfix")
@@ -282,9 +257,11 @@
// pick the smaller
// to test for bits
- na = mplen(a)
+ na := mplen(a)
- nb = mplen(b)
+ nb := mplen(b)
+ var s Mpint
+ var c *Mpint
if na > nb {
mpmovefixfix(&s, a)
c = b
@@ -296,8 +273,11 @@
s.Neg = 0
+ var q Mpint
Mpmovecfix(&q, 0)
- for i = 0; i < na; i++ {
+ var j int
+ var x int
+ for i := 0; i < na; i++ {
x = c.A[i]
for j = 0; j < Mpscale; j++ {
if x&1 != 0 {
@@ -326,12 +306,6 @@
}
func mpmulfract(a *Mpint, b *Mpint) {
- var i int
- var j int
- var x int
- var s Mpint
- var q Mpint
-
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpmulflt")
@@ -340,16 +314,19 @@
return
}
+ var s Mpint
mpmovefixfix(&s, b)
s.Neg = 0
+ var q Mpint
Mpmovecfix(&q, 0)
- i = Mpprec - 1
- x = a.A[i]
+ i := Mpprec - 1
+ x := a.A[i]
if x != 0 {
Yyerror("mpmulfract not normal")
}
+ var j int
for i--; i >= 0; i-- {
x = a.A[i]
if x == 0 {
@@ -374,10 +351,7 @@
}
func mporfixfix(a *Mpint, b *Mpint) {
- var i int
- var x int
-
- x = 0
+ x := 0
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mporfixfix")
@@ -396,7 +370,7 @@
mpneg(b)
}
- for i = 0; i < Mpprec; i++ {
+ for i := 0; i < Mpprec; i++ {
x = a.A[i] | b.A[i]
a.A[i] = x
}
@@ -411,10 +385,7 @@
}
func mpandfixfix(a *Mpint, b *Mpint) {
- var i int
- var x int
-
- x = 0
+ x := 0
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpandfixfix")
@@ -433,7 +404,7 @@
mpneg(b)
}
- for i = 0; i < Mpprec; i++ {
+ for i := 0; i < Mpprec; i++ {
x = a.A[i] & b.A[i]
a.A[i] = x
}
@@ -448,10 +419,7 @@
}
func mpandnotfixfix(a *Mpint, b *Mpint) {
- var i int
- var x int
-
- x = 0
+ x := 0
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpandnotfixfix")
@@ -470,7 +438,7 @@
mpneg(b)
}
- for i = 0; i < Mpprec; i++ {
+ for i := 0; i < Mpprec; i++ {
x = a.A[i] &^ b.A[i]
a.A[i] = x
}
@@ -485,10 +453,7 @@
}
func mpxorfixfix(a *Mpint, b *Mpint) {
- var i int
- var x int
-
- x = 0
+ x := 0
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mporfixfix")
@@ -507,7 +472,7 @@
mpneg(b)
}
- for i = 0; i < Mpprec; i++ {
+ for i := 0; i < Mpprec; i++ {
x = a.A[i] ^ b.A[i]
a.A[i] = x
}
@@ -522,8 +487,6 @@
}
func mplshfixfix(a *Mpint, b *Mpint) {
- var s int64
-
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mporfixfix")
@@ -533,7 +496,7 @@
return
}
- s = Mpgetfix(b)
+ s := Mpgetfix(b)
if s < 0 || s >= Mpprec*Mpscale {
Yyerror("stupid shift: %d", s)
Mpmovecfix(a, 0)
@@ -544,8 +507,6 @@
}
func mprshfixfix(a *Mpint, b *Mpint) {
- var s int64
-
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mprshfixfix")
@@ -555,7 +516,7 @@
return
}
- s = Mpgetfix(b)
+ s := Mpgetfix(b)
if s < 0 || s >= Mpprec*Mpscale {
Yyerror("stupid shift: %d", s)
if a.Neg != 0 {
@@ -574,8 +535,6 @@
}
func Mpgetfix(a *Mpint) int64 {
- var v int64
-
if a.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("constant overflow")
@@ -583,7 +542,7 @@
return 0
}
- v = int64(uint64(a.A[0]))
+ v := int64(uint64(a.A[0]))
v |= int64(uint64(a.A[1]) << Mpscale)
v |= int64(uint64(a.A[2]) << (Mpscale + Mpscale))
if a.Neg != 0 {
@@ -593,19 +552,16 @@
}
func Mpmovecfix(a *Mpint, c int64) {
- var i int
- var x int64
-
a.Neg = 0
a.Ovf = 0
- x = c
+ x := c
if x < 0 {
a.Neg = 1
x = int64(-uint64(x))
}
- for i = 0; i < Mpprec; i++ {
+ for i := 0; i < Mpprec; i++ {
a.A[i] = int(x & Mpmask)
x >>= Mpscale
}
@@ -613,11 +569,9 @@
func mpdivmodfixfix(q *Mpint, r *Mpint, n *Mpint, d *Mpint) {
var i int
- var ns int
- var ds int
- ns = int(n.Neg)
- ds = int(d.Neg)
+ ns := int(n.Neg)
+ ds := int(d.Neg)
n.Neg = 0
d.Neg = 0
@@ -664,9 +618,7 @@
}
func mpiszero(a *Mpint) bool {
- var i int
-
- for i = Mpprec - 1; i >= 0; i-- {
+ for i := Mpprec - 1; i >= 0; i-- {
if a.A[i] != 0 {
return false
}
@@ -677,19 +629,17 @@
func mpdivfract(a *Mpint, b *Mpint) {
var n Mpint
var d Mpint
- var i int
var j int
- var neg int
var x int
mpmovefixfix(&n, a) // numerator
mpmovefixfix(&d, b) // denominator
- neg = int(n.Neg) ^ int(d.Neg)
+ neg := int(n.Neg) ^ int(d.Neg)
n.Neg = 0
d.Neg = 0
- for i = Mpprec - 1; i >= 0; i-- {
+ for i := Mpprec - 1; i >= 0; i-- {
x = 0
for j = 0; j < Mpscale; j++ {
x <<= 1
@@ -711,10 +661,9 @@
func mptestfix(a *Mpint) int {
var b Mpint
- var r int
Mpmovecfix(&b, 0)
- r = mpcmp(a, &b)
+ r := mpcmp(a, &b)
if a.Neg != 0 {
if r > 0 {
return -1
diff --git a/src/cmd/internal/gc/mparith3.go b/src/cmd/internal/gc/mparith3.go
index 61bf9e9..103c53d 100644
--- a/src/cmd/internal/gc/mparith3.go
+++ b/src/cmd/internal/gc/mparith3.go
@@ -49,11 +49,7 @@
* word of the number to Mpnorm
*/
func mpnorm(a *Mpflt) {
- var s int
- var os int
- var x int
-
- os = sigfig(a)
+ os := sigfig(a)
if os == 0 {
// zero
a.Exp = 0
@@ -63,9 +59,9 @@
}
// this will normalize to the nearest word
- x = a.Val.A[os-1]
+ x := a.Val.A[os-1]
- s = (Mpnorm - os) * Mpscale
+ s := (Mpnorm - os) * Mpscale
// further normalize to the nearest bit
for {
@@ -91,16 +87,13 @@
/// implements float arihmetic
func mpaddfltflt(a *Mpflt, b *Mpflt) {
- var sa int
- var sb int
- var s int
- var c Mpflt
-
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf("\n%v + %v", Fconv(a, 0), Fconv(b, 0))
}
- sa = sigfig(a)
+ sa := sigfig(a)
+ var s int
+ var sb int
if sa == 0 {
mpmovefltflt(a, b)
goto out
@@ -114,6 +107,7 @@
s = int(a.Exp) - int(b.Exp)
if s > 0 {
// a is larger, shift b right
+ var c Mpflt
mpmovefltflt(&c, b)
Mpshiftfix(&c.Val, -s)
@@ -140,14 +134,11 @@
}
func mpmulfltflt(a *Mpflt, b *Mpflt) {
- var sa int
- var sb int
-
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf("%v\n * %v\n", Fconv(a, 0), Fconv(b, 0))
}
- sa = sigfig(a)
+ sa := sigfig(a)
if sa == 0 {
// zero
a.Exp = 0
@@ -156,7 +147,7 @@
return
}
- sb = sigfig(b)
+ sb := sigfig(b)
if sb == 0 {
// zero
mpmovefltflt(a, b)
@@ -174,15 +165,11 @@
}
func mpdivfltflt(a *Mpflt, b *Mpflt) {
- var sa int
- var sb int
- var c Mpflt
-
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf("%v\n / %v\n", Fconv(a, 0), Fconv(b, 0))
}
- sb = sigfig(b)
+ sb := sigfig(b)
if sb == 0 {
// zero and ovfl
a.Exp = 0
@@ -193,7 +180,7 @@
return
}
- sa = sigfig(a)
+ sa := sigfig(a)
if sa == 0 {
// zero
a.Exp = 0
@@ -203,6 +190,7 @@
}
// adjust b to top
+ var c Mpflt
mpmovefltflt(&c, b)
Mpshiftfix(&c.Val, Mpscale)
@@ -219,18 +207,11 @@
}
func mpgetfltN(a *Mpflt, prec int, bias int) float64 {
- var s int
- var i int
- var e int
- var minexp int
- var v uint64
- var f float64
-
if a.Val.Ovf != 0 && nsavederrors+nerrors == 0 {
Yyerror("mpgetflt ovf")
}
- s = sigfig(a)
+ s := sigfig(a)
if s == 0 {
return 0
}
@@ -252,7 +233,8 @@
// pick up the mantissa, a rounding bit, and a tie-breaking bit in a uvlong
s = prec + 2
- v = 0
+ v := uint64(0)
+ var i int
for i = Mpnorm - 1; s >= Mpscale; i-- {
v = v<<Mpscale | uint64(a.Val.A[i])
s -= Mpscale
@@ -273,11 +255,11 @@
}
// gradual underflow
- e = Mpnorm*Mpscale + int(a.Exp) - prec
+ e := Mpnorm*Mpscale + int(a.Exp) - prec
- minexp = bias + 1 - prec + 1
+ minexp := bias + 1 - prec + 1
if e < minexp {
- s = minexp - e
+ s := minexp - e
if s > prec+1 {
s = prec + 1
}
@@ -294,7 +276,7 @@
v += v & 1
v >>= 2
- f = float64(v)
+ f := float64(v)
f = math.Ldexp(f, e)
if a.Val.Neg != 0 {
@@ -313,15 +295,14 @@
}
func Mpmovecflt(a *Mpflt, c float64) {
- var i int
- var f float64
- var l int
-
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf("\nconst %g", c)
}
Mpmovecfix(&a.Val, 0)
a.Exp = 0
+ var f float64
+ var l int
+ var i int
if c == 0 {
goto out
}
@@ -333,7 +314,7 @@
f, i = math.Frexp(c)
a.Exp = int16(i)
- for i = 0; i < 10; i++ {
+ for i := 0; i < 10; i++ {
f = f * Mpbase
l = int(math.Floor(f))
f = f - float64(l)
@@ -357,12 +338,10 @@
}
func mptestflt(a *Mpflt) int {
- var s int
-
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf("\n%v?", Fconv(a, 0))
}
- s = sigfig(a)
+ s := sigfig(a)
if s != 0 {
s = +1
if a.Val.Neg != 0 {
diff --git a/src/cmd/internal/gc/obj.go b/src/cmd/internal/gc/obj.go
index afaf87c..27a1811 100644
--- a/src/cmd/internal/gc/obj.go
+++ b/src/cmd/internal/gc/obj.go
@@ -21,13 +21,6 @@
}
func dumpobj() {
- var externs *NodeList
- var tmp *NodeList
- var arhdr [ArhdrSize]byte
- var startobj int64
- var size int64
- var zero *Sym
-
var err error
bout, err = obj.Bopenw(outfile)
if err != nil {
@@ -36,7 +29,8 @@
errorexit()
}
- startobj = 0
+ startobj := int64(0)
+ var arhdr [ArhdrSize]byte
if writearchive != 0 {
obj.Bwritestring(bout, "!<arch>\n")
arhdr = [ArhdrSize]byte{}
@@ -49,7 +43,7 @@
if writearchive != 0 {
obj.Bflush(bout)
- size = obj.Boffset(bout) - startobj
+ size := obj.Boffset(bout) - startobj
if size&1 != 0 {
obj.Bputc(bout, 0)
}
@@ -77,7 +71,7 @@
fmt.Fprintf(bout, "\n!\n")
- externs = nil
+ externs := (*NodeList)(nil)
if externdcl != nil {
externs = externdcl.End
}
@@ -86,7 +80,7 @@
dumptypestructs()
// Dump extra globals.
- tmp = externdcl
+ tmp := externdcl
if externs != nil {
externdcl = externs.Next
@@ -94,7 +88,7 @@
dumpglobls()
externdcl = tmp
- zero = Pkglookup("zerovalue", Runtimepkg)
+ zero := Pkglookup("zerovalue", Runtimepkg)
ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA)
dumpdata()
@@ -102,7 +96,7 @@
if writearchive != 0 {
obj.Bflush(bout)
- size = obj.Boffset(bout) - startobj
+ size := obj.Boffset(bout) - startobj
if size&1 != 0 {
obj.Bputc(bout, 0)
}
@@ -117,10 +111,9 @@
func dumpglobls() {
var n *Node
- var l *NodeList
// add globals
- for l = externdcl; l != nil; l = l.Next {
+ for l := externdcl; l != nil; l = l.Next {
n = l.N
if n.Op != ONAME {
continue
@@ -140,7 +133,7 @@
ggloblnod(n)
}
- for l = funcsyms; l != nil; l = l.Next {
+ for l := funcsyms; l != nil; l = l.Next {
n = l.N
dsymptr(n.Sym, 0, n.Sym.Def.Shortname.Sym, 0)
ggloblsym(n.Sym, int32(Widthptr), obj.DUPOK|obj.RODATA)
@@ -156,8 +149,6 @@
}
func Linksym(s *Sym) *obj.LSym {
- var p string
-
if s == nil {
return nil
}
@@ -169,7 +160,7 @@
} else if s.Linkname != "" {
s.Lsym = obj.Linklookup(Ctxt, s.Linkname, 0)
} else {
- p = fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
+ p := fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
s.Lsym = obj.Linklookup(Ctxt, p, 0)
}
@@ -208,10 +199,6 @@
var stringsym_gen int
func stringsym(s string) *Sym {
- var sym *Sym
- var off int
- var n int
- var m int
var tmp struct {
lit Strlit
buf string
@@ -233,7 +220,7 @@
pkg = gostringpkg
}
- sym = Pkglookup(namebuf, pkg)
+ sym := Pkglookup(namebuf, pkg)
// SymUniq flag indicates that data is generated already
if sym.Flags&SymUniq != 0 {
@@ -242,14 +229,15 @@
sym.Flags |= SymUniq
sym.Def = newname(sym)
- off = 0
+ off := 0
// string header
off = dsymptr(sym, off, sym, Widthptr+Widthint)
off = duintxx(sym, off, uint64(len(s)), Widthint)
// string data
- for n = 0; n < len(s); n += m {
+ var m int
+ for n := 0; n < len(s); n += m {
m = 8
if m > len(s)-n {
m = len(s) - n
@@ -267,18 +255,15 @@
var slicebytes_gen int
func slicebytes(nam *Node, s string, len int) {
- var off int
- var n int
var m int
- var sym *Sym
slicebytes_gen++
namebuf = fmt.Sprintf(".gobytes.%d", slicebytes_gen)
- sym = Pkglookup(namebuf, localpkg)
+ sym := Pkglookup(namebuf, localpkg)
sym.Def = newname(sym)
- off = 0
- for n = 0; n < len; n += m {
+ off := 0
+ for n := 0; n < len; n += m {
m = 8
if m > len-n {
m = len - n
@@ -298,10 +283,8 @@
}
func dstringptr(s *Sym, off int, str string) int {
- var p *obj.Prog
-
off = int(Rnd(int64(off), int64(Widthptr)))
- p = Thearch.Gins(obj.ADATA, nil, nil)
+ p := Thearch.Gins(obj.ADATA, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = Linksym(s)
@@ -321,9 +304,7 @@
* gobj.c
*/
func Datastring(s string, a *obj.Addr) {
- var sym *Sym
-
- sym = stringsym(s)
+ sym := stringsym(s)
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_EXTERN
a.Sym = Linksym(sym)
@@ -333,9 +314,7 @@
}
func datagostring(sval *Strlit, a *obj.Addr) {
- var sym *Sym
-
- sym = stringsym(sval.S)
+ sym := stringsym(sval.S)
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_EXTERN
a.Sym = Linksym(sym)
@@ -345,29 +324,24 @@
}
func dgostringptr(s *Sym, off int, str string) int {
- var n int
- var lit *Strlit
-
if str == "" {
return duintptr(s, off, 0)
}
- n = len(str)
- lit = new(Strlit)
+ n := len(str)
+ lit := new(Strlit)
lit.S = str
lit.S = lit.S[:n]
return dgostrlitptr(s, off, lit)
}
func dgostrlitptr(s *Sym, off int, lit *Strlit) int {
- var p *obj.Prog
-
if lit == nil {
return duintptr(s, off, 0)
}
off = int(Rnd(int64(off), int64(Widthptr)))
- p = Thearch.Gins(obj.ADATA, nil, nil)
+ p := Thearch.Gins(obj.ADATA, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = Linksym(s)
@@ -383,9 +357,7 @@
}
func dsname(s *Sym, off int, t string) int {
- var p *obj.Prog
-
- p = Thearch.Gins(obj.ADATA, nil, nil)
+ p := Thearch.Gins(obj.ADATA, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Offset = int64(off)
@@ -399,11 +371,9 @@
}
func dsymptr(s *Sym, off int, x *Sym, xoff int) int {
- var p *obj.Prog
-
off = int(Rnd(int64(off), int64(Widthptr)))
- p = Thearch.Gins(obj.ADATA, nil, nil)
+ p := Thearch.Gins(obj.ADATA, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = Linksym(s)
@@ -420,8 +390,6 @@
}
func gdata(nam *Node, nr *Node, wid int) {
- var p *obj.Prog
-
if nr.Op == OLITERAL {
switch nr.Val.Ctype {
case CTCPLX:
@@ -434,19 +402,16 @@
}
}
- p = Thearch.Gins(obj.ADATA, nam, nr)
+ p := Thearch.Gins(obj.ADATA, nam, nr)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = int64(wid)
}
func gdatacomplex(nam *Node, cval *Mpcplx) {
- var p *obj.Prog
- var w int
-
- w = cplxsubtype(int(nam.Type.Etype))
+ w := cplxsubtype(int(nam.Type.Etype))
w = int(Types[w].Width)
- p = Thearch.Gins(obj.ADATA, nam, nil)
+ p := Thearch.Gins(obj.ADATA, nam, nil)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = int64(w)
p.To.Type = obj.TYPE_FCONST
@@ -461,10 +426,9 @@
}
func gdatastring(nam *Node, sval *Strlit) {
- var p *obj.Prog
var nod1 Node
- p = Thearch.Gins(obj.ADATA, nam, nil)
+ p := Thearch.Gins(obj.ADATA, nam, nil)
Datastring(sval.S, &p.To)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = Types[Tptr].Width
diff --git a/src/cmd/internal/gc/order.go b/src/cmd/internal/gc/order.go
index 743ca80..e8744d7 100644
--- a/src/cmd/internal/gc/order.go
+++ b/src/cmd/internal/gc/order.go
@@ -49,10 +49,8 @@
// Order rewrites fn->nbody to apply the ordering constraints
// described in the comment at the top of the file.
func order(fn *Node) {
- var s string
-
if Debug['W'] > 1 {
- s = fmt.Sprintf("\nbefore order %v", Sconv(fn.Nname.Sym, 0))
+ s := fmt.Sprintf("\nbefore order %v", Sconv(fn.Nname.Sym, 0))
dumplist(s, fn.Nbody)
}
@@ -63,18 +61,14 @@
// pushes it onto the temp stack, and returns it.
// If clear is true, ordertemp emits code to zero the temporary.
func ordertemp(t *Type, order *Order, clear bool) *Node {
- var var_ *Node
- var a *Node
- var l *NodeList
-
- var_ = temp(t)
+ var_ := temp(t)
if clear {
- a = Nod(OAS, var_, nil)
+ a := Nod(OAS, var_, nil)
typecheck(&a, Etop)
order.out = list(order.out, a)
}
- l = order.free
+ l := order.free
if l == nil {
l = new(NodeList)
}
@@ -98,11 +92,8 @@
// returns a pointer to the result data instead of taking a pointer