cmd/compile/internal: some janitoring
Nicer swaps, loops (removed tmp variables). Use of bool instead of int.
This change passes go build -toolexec 'toolstash -cmp' -a std.
Change-Id: I541904c74b57297848decc51a8a4913a8eca4af3
Reviewed-on: https://go-review.googlesource.com/14316
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index 5aae563..f1f4955 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -187,13 +187,13 @@
t := nl.Type
t0 := t
- check := 0
+ check := false
if gc.Issigned[t.Etype] {
- check = 1
+ check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
- check = 0
+ check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
- check = 0
+ check = false
}
}
@@ -203,7 +203,7 @@
} else {
t = gc.Types[gc.TUINT32]
}
- check = 0
+ check = false
}
a := optoas(op, t)
@@ -252,7 +252,7 @@
}
var p2 *obj.Prog
- if check != 0 {
+ if check {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
@@ -289,7 +289,7 @@
gmove(&dx, res)
}
restx(&dx, &olddx)
- if check != 0 {
+ if check {
gc.Patch(p2, gc.Pc)
}
restx(&ax, &oldax)
@@ -340,9 +340,7 @@
t := nl.Type
a := optoas(gc.OHMUL, t)
if nl.Ullman < nr.Ullman {
- tmp := nl
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
var n1 gc.Node
@@ -500,9 +498,7 @@
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp := nl
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
// generate operands in "8-bit" registers.
@@ -564,12 +560,7 @@
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
- for {
- tmp14 := q
- q--
- if tmp14 <= 0 {
- break
- }
+ for ; q > 0; q-- {
n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8
@@ -584,12 +575,7 @@
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
- for {
- tmp15 := c
- c--
- if tmp15 <= 0 {
- break
- }
+ for ; c > 0; c-- {
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
diff --git a/src/cmd/compile/internal/arm/cgen.go b/src/cmd/compile/internal/arm/cgen.go
index 6c1a84c..289da5d 100644
--- a/src/cmd/compile/internal/arm/cgen.go
+++ b/src/cmd/compile/internal/arm/cgen.go
@@ -205,12 +205,7 @@
gc.Regfree(&nend)
} else {
var p *obj.Prog
- for {
- tmp14 := c
- c--
- if tmp14 <= 0 {
- break
- }
+ for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
index 8ab384e..193d4af 100644
--- a/src/cmd/compile/internal/arm/ggen.go
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -120,9 +120,7 @@
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Ullman < nr.Ullman {
- tmp := nl
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
t := nl.Type
diff --git a/src/cmd/compile/internal/arm64/cgen.go b/src/cmd/compile/internal/arm64/cgen.go
index 9a7a8f9..a7f1c18 100644
--- a/src/cmd/compile/internal/arm64/cgen.go
+++ b/src/cmd/compile/internal/arm64/cgen.go
@@ -132,13 +132,7 @@
// ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
- for {
- tmp14 := c
- c--
- if tmp14 <= 0 {
- break
- }
-
+ for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
index b647fce..2cbd663 100644
--- a/src/cmd/compile/internal/arm64/ggen.go
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -151,13 +151,13 @@
t := nl.Type
t0 := t
- check := 0
+ check := false
if gc.Issigned[t.Etype] {
- check = 1
+ check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
- check = 0
+ check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
- check = 0
+ check = false
}
}
@@ -167,7 +167,7 @@
} else {
t = gc.Types[gc.TUINT64]
}
- check = 0
+ check = false
}
a := optoas(gc.ODIV, t)
@@ -206,7 +206,7 @@
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
- if check != 0 {
+ if check {
var nm1 gc.Node
gc.Nodconst(&nm1, t, -1)
gcmp(optoas(gc.OCMP, t), &tr, &nm1)
@@ -250,7 +250,7 @@
}
gc.Regfree(&tl)
- if check != 0 {
+ if check {
gc.Patch(p2, gc.Pc)
}
}
@@ -262,9 +262,7 @@
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp := (*gc.Node)(nl)
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
t := (*gc.Type)(nl.Type)
diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go
index b979340..860db34 100644
--- a/src/cmd/compile/internal/gc/cgen.go
+++ b/src/cmd/compile/internal/gc/cgen.go
@@ -705,9 +705,7 @@
*/
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) {
- r := nl
- nl = nr
- nr = r
+ nl, nr = nr, nl
}
abop: // asymmetric binary
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index 2d8a0ba..fa8bc20 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -1685,15 +1685,13 @@
for j = 0; j < len(lv.vars); j++ {
n = lv.vars[j]
if islive(n, args, locals) {
- tmp9 := printed
- printed++
- if tmp9 != 0 {
+ if printed != 0 {
fmt.Printf(",")
}
fmt.Printf("%v", n)
+ printed++
}
}
-
fmt.Printf("\n")
}
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index ba960a8..605597f 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -665,12 +665,7 @@
i++
}
sort.Sort(methcmp(a[:i]))
- for {
- tmp11 := i
- i--
- if tmp11 <= 0 {
- break
- }
+ for i--; i >= 0; i-- {
a[i].Down = f
f = a[i]
}
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index 7c9d85f..fdeaa5b 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -3073,11 +3073,10 @@
setlineno(ll.N)
typecheck(&ll.N, Erv)
if f == nil {
- tmp12 := bad
- bad++
- if tmp12 == 0 {
+ if bad == 0 {
Yyerror("too many values in struct initializer")
}
+ bad++
continue
}
@@ -3110,11 +3109,10 @@
l = ll.N
setlineno(l)
if l.Op != OKEY {
- tmp13 := bad
- bad++
- if tmp13 == 0 {
+ if bad == 0 {
Yyerror("mixture of field:value and value initializers")
}
+ bad++
typecheck(&ll.N, Erv)
continue
}
diff --git a/src/cmd/compile/internal/ppc64/cgen.go b/src/cmd/compile/internal/ppc64/cgen.go
index 4f3092c..740e64c 100644
--- a/src/cmd/compile/internal/ppc64/cgen.go
+++ b/src/cmd/compile/internal/ppc64/cgen.go
@@ -126,13 +126,7 @@
// ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
- for {
- tmp14 := c
- c--
- if tmp14 <= 0 {
- break
- }
-
+ for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
index 2779140..173e2f0 100644
--- a/src/cmd/compile/internal/ppc64/ggen.go
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -141,13 +141,13 @@
t := nl.Type
t0 := t
- check := 0
+ check := false
if gc.Issigned[t.Etype] {
- check = 1
+ check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
- check = 0
+ check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
- check = 0
+ check = false
}
}
@@ -157,7 +157,7 @@
} else {
t = gc.Types[gc.TUINT64]
}
- check = 0
+ check = false
}
a := optoas(gc.ODIV, t)
@@ -198,7 +198,7 @@
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
- if check != 0 {
+ if check {
var nm1 gc.Node
gc.Nodconst(&nm1, t, -1)
gins(optoas(gc.OCMP, t), &tr, &nm1)
@@ -242,7 +242,7 @@
}
gc.Regfree(&tl)
- if check != 0 {
+ if check {
gc.Patch(p2, gc.Pc)
}
}
@@ -254,9 +254,7 @@
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp := (*gc.Node)(nl)
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
t := (*gc.Type)(nl.Type)
diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go
index be865e5..85ae808 100644
--- a/src/cmd/compile/internal/x86/ggen.go
+++ b/src/cmd/compile/internal/x86/ggen.go
@@ -133,24 +133,14 @@
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
- for {
- tmp14 := q
- q--
- if tmp14 <= 0 {
- break
- }
+ for ; q > 0; q-- {
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
- for {
- tmp15 := c
- c--
- if tmp15 <= 0 {
- break
- }
+ for ; c > 0; c-- {
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
@@ -213,13 +203,13 @@
t := nl.Type
t0 := t
- check := 0
+ check := false
if gc.Issigned[t.Etype] {
- check = 1
+ check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -1<<uint64(t.Width*8-1) {
- check = 0
+ check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
- check = 0
+ check = false
}
}
@@ -229,7 +219,7 @@
} else {
t = gc.Types[gc.TUINT32]
}
- check = 0
+ check = false
}
var t1 gc.Node
@@ -278,7 +268,7 @@
gc.Patch(p1, gc.Pc)
}
- if check != 0 {
+ if check {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n1, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
@@ -313,7 +303,7 @@
} else {
gmove(dx, res)
}
- if check != 0 {
+ if check {
gc.Patch(p2, gc.Pc)
}
}
@@ -513,9 +503,7 @@
// largest ullman on left.
if nl.Ullman < nr.Ullman {
- tmp := nl
- nl = nr
- nr = tmp
+ nl, nr = nr, nl
}
var nt gc.Node
@@ -705,9 +693,7 @@
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
- r := nl
- nl = nr
- nr = r
+ nl, nr = nr, nl
}
abop: // asymmetric binary