cmd/5g etc: mechanical cleanup
Run rsc.io/grind rev a26569f on C->Go conversions.
The new change in grind is the inlining of goto targets.
If code says 'goto x' and the block starting at label x is unreachable
except through that goto and the code can be moved to where
the goto is without changing the meaning of its variable names,
grind does that move. Simlarly, a goto to a plain return statement
turns into that return statement (even if there are other paths to
the return statement).
Combined, these remove many long-distance gotos, which in turn
makes it possible to reduce the scope of more variable declarations.
(Because gotos can't jump across declarations, the gotos were
keeping the declarations from moving.)
Checked bit-for-bit compatibility with toolstash + buildall.
Reduces compiler runtime in html/template by about 12%.
Change-Id: Id727c0bd7763a61aa22f3daa00aeb8fccbc057a3
Reviewed-on: https://go-review.googlesource.com/6472
Reviewed-by: Aram Hăvărneanu <aram@mgk.ro>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
diff --git a/src/cmd/5g/cgen.go b/src/cmd/5g/cgen.go
index 638c5a6..d9d4892 100644
--- a/src/cmd/5g/cgen.go
+++ b/src/cmd/5g/cgen.go
@@ -25,15 +25,8 @@
gc.Dump("cgen-res", res)
}
- var n1 gc.Node
- var nr *gc.Node
- var nl *gc.Node
- var a int
- var f1 gc.Node
- var f0 gc.Node
- var n2 gc.Node
if n == nil || n.Type == nil {
- goto ret
+ return
}
if res == nil || res.Type == nil {
@@ -81,7 +74,7 @@
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
- goto ret
+ return
}
}
@@ -90,7 +83,7 @@
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
- goto ret
+ return
}
// update addressability for string, slice
@@ -124,7 +117,7 @@
regfree(&n1)
}
- goto ret
+ return
}
// if both are not addressable, use a temporary.
@@ -179,16 +172,16 @@
}
sudoclean()
- goto ret
+ return
}
}
// otherwise, the result is addressable but n is not.
// let's do some computation.
- nl = n.Left
+ nl := n.Left
- nr = n.Right
+ nr := n.Right
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
@@ -198,7 +191,7 @@
n2 := *n
n2.Left = &n1
cgen(&n2, res)
- goto ret
+ return
}
}
@@ -223,8 +216,34 @@
}
}
+ var a int
+ var f0 gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
if nl != nil && gc.Isfloat[n.Type.Etype] != 0 && gc.Isfloat[nl.Type.Etype] != 0 {
- goto flt
+ // floating-point.
+ regalloc(&f0, nl.Type, res)
+
+ if nr != nil {
+ goto flt2
+ }
+
+ if n.Op == gc.OMINUS {
+ nr = gc.Nodintconst(-1)
+ gc.Convlit(&nr, n.Type)
+ n.Op = gc.OMUL
+ goto flt2
+ }
+
+ // unary
+ cgen(nl, &f0)
+
+ if n.Op != gc.OCONV && n.Op != gc.OPLUS {
+ gins(optoas(int(n.Op), n.Type), &f0, &f0)
+ }
+ gmove(&f0, res)
+ regfree(&f0)
+ return
}
switch n.Op {
default:
@@ -255,11 +274,11 @@
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
- goto ret
+ return
case gc.OPLUS:
cgen(nl, res)
- goto ret
+ return
// unary
case gc.OCOM:
@@ -286,7 +305,13 @@
gc.OMUL:
a = optoas(int(n.Op), nl.Type)
- goto sbop
+ // symmetric binary
+ if nl.Ullman < nr.Ullman {
+ r := nl
+ nl = nr
+ nr = r
+ }
+ goto abop
// asymmetric binary
case gc.OSUB:
@@ -489,14 +514,7 @@
goto abop
}
- goto ret
-
-sbop: // symmetric binary
- if nl.Ullman < nr.Ullman {
- r := nl
- nl = nr
- nr = r
- }
+ return
// TODO(kaib): use fewer registers here.
abop: // asymmetric binary
@@ -561,33 +579,10 @@
if n2.Op != gc.OLITERAL {
regfree(&n2)
}
- goto ret
-
-flt: // floating-point.
- regalloc(&f0, nl.Type, res)
-
- if nr != nil {
- goto flt2
- }
-
- if n.Op == gc.OMINUS {
- nr = gc.Nodintconst(-1)
- gc.Convlit(&nr, n.Type)
- n.Op = gc.OMUL
- goto flt2
- }
-
- // unary
- cgen(nl, &f0)
-
- if n.Op != gc.OCONV && n.Op != gc.OPLUS {
- gins(optoas(int(n.Op), n.Type), &f0, &f0)
- }
- gmove(&f0, res)
- regfree(&f0)
- goto ret
+ return
flt2: // binary
+ var f1 gc.Node
if nl.Ullman >= nr.Ullman {
cgen(nl, &f0)
regalloc(&f1, n.Type, nil)
@@ -604,9 +599,7 @@
gmove(&f1, res)
regfree(&f0)
regfree(&f1)
- goto ret
-
-ret:
+ return
}
/*
@@ -666,7 +659,6 @@
n = n.Left
}
- var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
@@ -682,7 +674,7 @@
gins(arm.AMOVW, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
- goto ret
+ return
}
if n.Addable != 0 {
@@ -694,10 +686,10 @@
gins(arm.AMOVW, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
- goto ret
+ return
}
- nl = n.Left
+ nl := n.Left
switch n.Op {
default:
@@ -820,8 +812,6 @@
regfree(&n3)
}
}
-
-ret:
}
/*
@@ -1195,25 +1185,23 @@
gc.Genlist(n.Ninit)
}
- var et int
- var nl *gc.Node
- var nr *gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
- goto ret
+ return
}
}
- et = int(n.Type.Etype)
+ et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
- goto ret
+ return
}
- nr = nil
+ nr := (*gc.Node)(nil)
+ var nl *gc.Node
switch n.Op {
default:
a := gc.ONE
@@ -1221,14 +1209,14 @@
a = gc.OEQ
}
gencmp0(n, n.Type, a, likely, to)
- goto ret
+ return
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
}
- goto ret
+ return
case gc.OANDAND,
gc.OOROR:
@@ -1246,7 +1234,7 @@
bgen(n.Right, true_, likely, to)
}
- goto ret
+ return
case gc.OEQ,
gc.ONE,
@@ -1256,7 +1244,7 @@
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
- goto ret
+ return
}
fallthrough
@@ -1264,14 +1252,14 @@
nl = n.Left
if nl == nil || nl.Type == nil {
- goto ret
+ return
}
}
switch n.Op {
case gc.ONOT:
bgen(nl, !true_, likely, to)
- goto ret
+ return
case gc.OEQ,
gc.ONE,
@@ -1293,7 +1281,7 @@
n.Ninit = ll
gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
gc.Patch(p2, gc.Pc)
- goto ret
+ return
}
a = gc.Brcom(a)
@@ -1438,9 +1426,7 @@
regfree(&n2)
}
- goto ret
-
-ret:
+ return
}
/*
diff --git a/src/cmd/5g/ggen.go b/src/cmd/5g/ggen.go
index 8b7010f..2879805 100644
--- a/src/cmd/5g/ggen.go
+++ b/src/cmd/5g/ggen.go
@@ -329,7 +329,7 @@
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
- goto ret
+ return
}
// call pointer
@@ -340,15 +340,13 @@
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
- goto ret
+ return
}
// call direct
n.Left.Method = 1
ginscall(n.Left, proc)
-
-ret:
}
/*
diff --git a/src/cmd/5g/gsubr.go b/src/cmd/5g/gsubr.go
index ce0d5e8..424e164 100644
--- a/src/cmd/5g/gsubr.go
+++ b/src/cmd/5g/gsubr.go
@@ -331,10 +331,8 @@
// cannot have two memory operands;
// except 64-bit, which always copies via registers anyway.
- var flo gc.Node
var a int
var r1 gc.Node
- var fhi gc.Node
if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
@@ -387,7 +385,9 @@
switch uint32(ft)<<16 | uint32(tt) {
default:
- goto fatal
+ // should not happen
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
+ return
/*
* integer copy and truncate
@@ -784,6 +784,8 @@
// truncate 64 bit integer
trunc64:
+ var fhi gc.Node
+ var flo gc.Node
split64(f, &flo, &fhi)
regalloc(&r1, t.Type, nil)
@@ -792,10 +794,6 @@
regfree(&r1)
splitclean()
return
-
- // should not happen
-fatal:
- gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
func samaddr(f *gc.Node, t *gc.Node) bool {
@@ -1273,12 +1271,6 @@
*a = obj.Addr{}
- var oary [10]int64
- var nn *gc.Node
- var reg *gc.Node
- var n1 gc.Node
- var reg1 *gc.Node
- var o int
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
@@ -1288,98 +1280,88 @@
if v >= 32000 || v <= -32000 {
break
}
- goto lit
+ switch as {
+ default:
+ return false
+
+ case arm.AADD,
+ arm.ASUB,
+ arm.AAND,
+ arm.AORR,
+ arm.AEOR,
+ arm.AMOVB,
+ arm.AMOVBS,
+ arm.AMOVBU,
+ arm.AMOVH,
+ arm.AMOVHS,
+ arm.AMOVHU,
+ arm.AMOVW:
+ break
+ }
+
+ cleani += 2
+ reg := &clean[cleani-1]
+ reg1 := &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ gc.Naddr(n, a, 1)
+ return true
case gc.ODOT,
gc.ODOTPTR:
cleani += 2
- reg = &clean[cleani-1]
+ reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
- goto odot
+ var nn *gc.Node
+ var oary [10]int64
+ o := gc.Dotoffset(n, oary[:], &nn)
+ if nn == nil {
+ sudoclean()
+ return false
+ }
+
+ if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ // directly addressable set of DOTs
+ n1 := *nn
+
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ gc.Naddr(&n1, a, 1)
+ return true
+ }
+
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ n1 := *reg
+ n1.Op = gc.OINDREG
+ if oary[0] >= 0 {
+ agen(nn, reg)
+ n1.Xoffset = oary[0]
+ } else {
+ cgen(nn, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[0] + 1)
+ }
+
+ for i := 1; i < o; i++ {
+ if oary[i] >= 0 {
+ gc.Fatal("can't happen")
+ }
+ gins(arm.AMOVW, &n1, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[i] + 1)
+ }
+
+ a.Type = obj.TYPE_NONE
+ a.Name = obj.NAME_NONE
+ n1.Type = n.Type
+ gc.Naddr(&n1, a, 1)
+ return true
case gc.OINDEX:
return false
}
return false
-
-lit:
- switch as {
- default:
- return false
-
- case arm.AADD,
- arm.ASUB,
- arm.AAND,
- arm.AORR,
- arm.AEOR,
- arm.AMOVB,
- arm.AMOVBS,
- arm.AMOVBU,
- arm.AMOVH,
- arm.AMOVHS,
- arm.AMOVHU,
- arm.AMOVW:
- break
- }
-
- cleani += 2
- reg = &clean[cleani-1]
- reg1 = &clean[cleani-2]
- reg.Op = gc.OEMPTY
- reg1.Op = gc.OEMPTY
- gc.Naddr(n, a, 1)
- goto yes
-
-odot:
- o = gc.Dotoffset(n, oary[:], &nn)
- if nn == nil {
- goto no
- }
-
- if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
- // directly addressable set of DOTs
- n1 := *nn
-
- n1.Type = n.Type
- n1.Xoffset += oary[0]
- gc.Naddr(&n1, a, 1)
- goto yes
- }
-
- regalloc(reg, gc.Types[gc.Tptr], nil)
- n1 = *reg
- n1.Op = gc.OINDREG
- if oary[0] >= 0 {
- agen(nn, reg)
- n1.Xoffset = oary[0]
- } else {
- cgen(nn, reg)
- gc.Cgen_checknil(reg)
- n1.Xoffset = -(oary[0] + 1)
- }
-
- for i := 1; i < o; i++ {
- if oary[i] >= 0 {
- gc.Fatal("can't happen")
- }
- gins(arm.AMOVW, &n1, reg)
- gc.Cgen_checknil(reg)
- n1.Xoffset = -(oary[i] + 1)
- }
-
- a.Type = obj.TYPE_NONE
- a.Name = obj.NAME_NONE
- n1.Type = n.Type
- gc.Naddr(&n1, a, 1)
- goto yes
-
-yes:
- return true
-
-no:
- sudoclean()
- return false
}
diff --git a/src/cmd/5g/peep.go b/src/cmd/5g/peep.go
index e28ec02..280cfb7 100644
--- a/src/cmd/5g/peep.go
+++ b/src/cmd/5g/peep.go
@@ -257,9 +257,8 @@
if !regtyp(v2) {
return false
}
- var r *gc.Flow
var info gc.ProgInfo
- for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
break
}
@@ -289,7 +288,32 @@
if p.To.Type == v1.Type {
if p.To.Reg == v1.Reg {
if p.Scond == arm.C_SCOND_NONE {
- goto gotit
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub1(p, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t := int(int(v1.Reg))
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
}
}
}
@@ -304,34 +328,6 @@
}
return false
-
-gotit:
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
- if p.From.Type == v2.Type {
- fmt.Printf(" excise")
- }
- fmt.Printf("\n")
- }
-
- for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
- p = r.Prog
- copysub(&p.From, v1, v2, 1)
- copysub1(p, v1, v2, 1)
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v\n", r.Prog)
- }
- }
-
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v last\n", r.Prog)
- }
- return true
}
/*
diff --git a/src/cmd/5l/asm.go b/src/cmd/5l/asm.go
index 909f682..0cebb6b 100644
--- a/src/cmd/5l/asm.go
+++ b/src/cmd/5l/asm.go
@@ -38,17 +38,14 @@
import "cmd/internal/ld"
func needlib(name string) int {
- var p string
- var s *ld.LSym
-
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
- p = fmt.Sprintf(".dynlib.%s", name)
+ p := fmt.Sprintf(".dynlib.%s", name)
- s = ld.Linklookup(ld.Ctxt, p, 0)
+ s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
@@ -73,10 +70,7 @@
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
- var targ *ld.LSym
- var rel *ld.LSym
-
- targ = r.Sym
+ targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
@@ -203,7 +197,7 @@
}
if ld.Iself {
adddynsym(ld.Ctxt, targ)
- rel = ld.Linklookup(ld.Ctxt, ".rel", 0)
+ rel := ld.Linklookup(ld.Ctxt, ".rel", 0)
ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off))
ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_ARM_GLOB_DAT)) // we need a nil + A dynmic reloc
r.Type = ld.R_CONST // write r->add during relocsym
@@ -217,11 +211,9 @@
}
func elfreloc1(r *ld.Reloc, sectoff int64) int {
- var elfsym int32
-
ld.Thearch.Lput(uint32(sectoff))
- elfsym = r.Xsym.Elfsym
+ elfsym := r.Xsym.Elfsym
switch r.Type {
default:
return -1
@@ -267,11 +259,8 @@
}
func elfsetupplt() {
- var plt *ld.LSym
- var got *ld.LSym
-
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
- got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
if plt.Size == 0 {
// str lr, [sp, #-4]!
ld.Adduint32(ld.Ctxt, plt, 0xe52de004)
@@ -298,9 +287,8 @@
func machoreloc1(r *ld.Reloc, sectoff int64) int {
var v uint32
- var rs *ld.LSym
- rs = r.Xsym
+ rs := r.Xsym
if rs.Type == ld.SHOSTOBJ || r.Type == ld.R_CALLARM {
if rs.Dynid < 0 {
@@ -353,15 +341,13 @@
}
func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
- var rs *ld.LSym
-
if ld.Linkmode == ld.LinkExternal {
switch r.Type {
case ld.R_CALLARM:
r.Done = 0
// set up addend for eventual relocation via outer symbol.
- rs = r.Sym
+ rs := r.Sym
r.Xadd = r.Add
if r.Xadd&0x800000 != 0 {
@@ -437,9 +423,7 @@
}
func addpltreloc(ctxt *ld.Link, plt *ld.LSym, got *ld.LSym, sym *ld.LSym, typ int) *ld.Reloc {
- var r *ld.Reloc
-
- r = ld.Addrel(plt)
+ r := ld.Addrel(plt)
r.Sym = got
r.Off = int32(plt.Size)
r.Siz = 4
@@ -454,10 +438,6 @@
}
func addpltsym(ctxt *ld.Link, s *ld.LSym) {
- var plt *ld.LSym
- var got *ld.LSym
- var rel *ld.LSym
-
if s.Plt >= 0 {
return
}
@@ -465,9 +445,9 @@
adddynsym(ctxt, s)
if ld.Iself {
- plt = ld.Linklookup(ctxt, ".plt", 0)
- got = ld.Linklookup(ctxt, ".got.plt", 0)
- rel = ld.Linklookup(ctxt, ".rel.plt", 0)
+ plt := ld.Linklookup(ctxt, ".plt", 0)
+ got := ld.Linklookup(ctxt, ".got.plt", 0)
+ rel := ld.Linklookup(ctxt, ".rel.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
@@ -497,13 +477,11 @@
}
func addgotsyminternal(ctxt *ld.Link, s *ld.LSym) {
- var got *ld.LSym
-
if s.Got >= 0 {
return
}
- got = ld.Linklookup(ctxt, ".got", 0)
+ got := ld.Linklookup(ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Addaddrplus(ctxt, got, s, 0)
@@ -515,20 +493,17 @@
}
func addgotsym(ctxt *ld.Link, s *ld.LSym) {
- var got *ld.LSym
- var rel *ld.LSym
-
if s.Got >= 0 {
return
}
adddynsym(ctxt, s)
- got = ld.Linklookup(ctxt, ".got", 0)
+ got := ld.Linklookup(ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Adduint32(ctxt, got, 0)
if ld.Iself {
- rel = ld.Linklookup(ctxt, ".rel", 0)
+ rel := ld.Linklookup(ctxt, ".rel", 0)
ld.Addaddrplus(ctxt, rel, got, int64(s.Got))
ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_GLOB_DAT))
} else {
@@ -537,10 +512,6 @@
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
- var d *ld.LSym
- var t int
- var name string
-
if s.Dynid >= 0 {
return
}
@@ -549,10 +520,10 @@
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
- d = ld.Linklookup(ctxt, ".dynsym", 0)
+ d := ld.Linklookup(ctxt, ".dynsym", 0)
/* name */
- name = s.Extname
+ name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
@@ -567,7 +538,7 @@
ld.Adduint32(ctxt, d, 0)
/* type */
- t = ld.STB_GLOBAL << 4
+ t := ld.STB_GLOBAL << 4
if (s.Cgoexport&ld.CgoExportDynamic != 0) && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
@@ -589,14 +560,12 @@
}
func adddynlib(lib string) {
- var s *ld.LSym
-
if needlib(lib) == 0 {
return
}
if ld.Iself {
- s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
+ s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
@@ -609,13 +578,6 @@
}
func asmb() {
- var symo uint32
- var dwarfoff uint32
- var machlink uint32
- var sect *ld.Section
- var sym *ld.LSym
- var i int
-
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
@@ -625,7 +587,7 @@
ld.Asmbelfsetup()
}
- sect = ld.Segtext.Sect
+ sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
@@ -651,14 +613,14 @@
ld.Cseek(int64(ld.Segdata.Fileoff))
ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
- machlink = 0
+ machlink := uint32(0)
if ld.HEADTYPE == ld.Hdarwin {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
}
if ld.Debug['w'] == 0 {
- dwarfoff = uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
+ dwarfoff := uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
ld.Cseek(int64(dwarfoff))
ld.Segdwarf.Fileoff = uint64(ld.Cpos())
@@ -673,7 +635,7 @@
ld.Symsize = 0
ld.Lcsize = 0
- symo = 0
+ symo := uint32(0)
if ld.Debug['s'] == 0 {
// TODO: rationalize
if ld.Debug['v'] != 0 {
@@ -719,10 +681,10 @@
ld.Asmplan9sym()
ld.Cflush()
- sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
+ sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
- for i = 0; int32(i) < ld.Lcsize; i++ {
+ for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}
diff --git a/src/cmd/5l/obj.go b/src/cmd/5l/obj.go
index 98ebbc2..7eb3b0e 100644
--- a/src/cmd/5l/obj.go
+++ b/src/cmd/5l/obj.go
@@ -81,8 +81,6 @@
}
func archinit() {
- var s *ld.LSym
-
// getgoextlinkenabled is based on GO_EXTLINK_ENABLED when
// Go was built; see ../../make.bash.
if ld.Linkmode == ld.LinkAuto && obj.Getgoextlinkenabled() == "0" {
@@ -175,7 +173,7 @@
}
// embed goarm to runtime.goarm
- s = ld.Linklookup(ld.Ctxt, "runtime.goarm", 0)
+ s := ld.Linklookup(ld.Ctxt, "runtime.goarm", 0)
s.Type = ld.SRODATA
ld.Adduint8(ld.Ctxt, s, uint8(ld.Ctxt.Goarm))
diff --git a/src/cmd/6g/cgen.go b/src/cmd/6g/cgen.go
index cb16039..05bc55b 100644
--- a/src/cmd/6g/cgen.go
+++ b/src/cmd/6g/cgen.go
@@ -29,13 +29,8 @@
gc.Dump("cgen-res", res)
}
- var nl *gc.Node
- var n1 gc.Node
- var nr *gc.Node
- var n2 gc.Node
- var a int
if n == nil || n.Type == nil {
- goto ret
+ return
}
if res == nil || res.Type == nil {
@@ -60,7 +55,7 @@
} else {
gc.Cgen_slice(n, res)
}
- goto ret
+ return
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
@@ -71,7 +66,7 @@
} else {
gc.Cgen_eface(n, res)
}
- goto ret
+ return
}
if n.Ullman >= gc.UINF {
@@ -83,7 +78,7 @@
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
- goto ret
+ return
}
}
@@ -92,7 +87,7 @@
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
- goto ret
+ return
}
if res.Addable == 0 {
@@ -108,7 +103,7 @@
cgen(&n1, res)
regfree(&n1)
- goto ret
+ return
}
var f int
@@ -118,7 +113,7 @@
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
- goto ret
+ return
}
f = 1 // gen thru register
@@ -151,7 +146,7 @@
fmt.Printf("%v [ignore previous line]\n", p1)
}
sudoclean()
- goto ret
+ return
}
}
@@ -160,7 +155,7 @@
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
- goto ret
+ return
}
// update addressability for string, slice
@@ -184,16 +179,16 @@
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
- goto ret
+ return
}
if n.Addable != 0 {
gmove(n, res)
- goto ret
+ return
}
- nl = n.Left
- nr = n.Right
+ nl := n.Left
+ nr := n.Right
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
@@ -203,7 +198,7 @@
n2 := *n
n2.Left = &n1
cgen(&n2, res)
- goto ret
+ return
}
}
@@ -224,10 +219,11 @@
}
sudoclean()
- goto ret
+ return
}
}
+ var a int
switch n.Op {
default:
gc.Dump("cgen", n)
@@ -252,11 +248,11 @@
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
- goto ret
+ return
case gc.OPLUS:
cgen(nl, res)
- goto ret
+ return
// unary
case gc.OCOM:
@@ -270,7 +266,7 @@
gins(a, &n2, &n1)
gmove(&n1, res)
regfree(&n1)
- goto ret
+ return
case gc.OMINUS:
if gc.Isfloat[nl.Type.Etype] != 0 {
@@ -280,8 +276,16 @@
goto sbop
}
- a = optoas(int(n.Op), nl.Type)
- goto uop
+ a := optoas(int(n.Op), nl.Type)
+ // unary
+ var n1 gc.Node
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ return
// symmetric binary
case gc.OAND,
@@ -325,7 +329,7 @@
gmove(&n2, res)
regfree(&n2)
regfree(&n1)
- goto ret
+ return
}
}
@@ -517,7 +521,7 @@
cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
- goto ret
+ return
/*
* put simplest on right - we'll generate into left
@@ -543,6 +547,8 @@
}
abop: // asymmetric binary
+ var n1 gc.Node
+ var n2 gc.Node
if nl.Ullman >= nr.Ullman {
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
@@ -588,18 +594,7 @@
if n2.Op != gc.OLITERAL {
regfree(&n2)
}
- goto ret
-
-uop: // unary
- regalloc(&n1, nl.Type, res)
-
- cgen(nl, &n1)
- gins(a, nil, &n1)
- gmove(&n1, res)
- regfree(&n1)
- goto ret
-
-ret:
+ return
}
/*
@@ -878,7 +873,6 @@
n = n.Left
}
- var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
@@ -894,7 +888,7 @@
gins(x86.ALEAQ, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
- goto ret
+ return
}
if n.Addable != 0 {
@@ -903,10 +897,10 @@
gins(x86.ALEAQ, n, &n1)
gmove(&n1, res)
regfree(&n1)
- goto ret
+ return
}
- nl = n.Left
+ nl := n.Left
switch n.Op {
default:
@@ -981,8 +975,6 @@
ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
}
}
-
-ret:
}
/*
@@ -1108,27 +1100,21 @@
gc.Genlist(n.Ninit)
}
- var a int
- var et int
- var nl *gc.Node
- var n1 gc.Node
- var nr *gc.Node
- var n2 gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
- goto ret
+ return
}
}
- et = int(n.Type.Etype)
+ et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
- goto ret
+ return
}
- nr = nil
+ nr := (*gc.Node)(nil)
for n.Op == gc.OCONVNOP {
n = n.Left
@@ -1137,6 +1123,7 @@
}
}
+ var nl *gc.Node
switch n.Op {
default:
goto def
@@ -1146,7 +1133,7 @@
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(obj.AJMP, nil, likely), to)
}
- goto ret
+ return
case gc.ONAME:
if n.Addable == 0 {
@@ -1160,7 +1147,7 @@
a = x86.AJEQ
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
- goto ret
+ return
case gc.OANDAND,
gc.OOROR:
@@ -1178,7 +1165,7 @@
bgen(n.Right, true_, likely, to)
}
- goto ret
+ return
case gc.OEQ,
gc.ONE,
@@ -1188,7 +1175,7 @@
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
- goto ret
+ return
}
fallthrough
@@ -1196,14 +1183,14 @@
nl = n.Left
if nl == nil || nl.Type == nil {
- goto ret
+ return
}
}
switch n.Op {
case gc.ONOT:
bgen(nl, !true_, likely, to)
- goto ret
+ return
case gc.OEQ,
gc.ONE,
@@ -1225,7 +1212,7 @@
n.Ninit = ll
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
gc.Patch(p2, gc.Pc)
- goto ret
+ return
}
a = gc.Brcom(a)
@@ -1352,22 +1339,22 @@
regfree(&n2)
}
- goto ret
+ return
def:
+ var n1 gc.Node
regalloc(&n1, n.Type, nil)
cgen(n, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), &n1, &n2)
- a = x86.AJNE
+ a := x86.AJNE
if !true_ {
a = x86.AJEQ
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
regfree(&n1)
- goto ret
-
-ret:
+ return
}
/*
diff --git a/src/cmd/6g/ggen.go b/src/cmd/6g/ggen.go
index d0c43d6..c278f02 100644
--- a/src/cmd/6g/ggen.go
+++ b/src/cmd/6g/ggen.go
@@ -786,14 +786,6 @@
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var cx gc.Node
- var oldcx gc.Node
- var rcx int
- var tcount *gc.Type
-
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
@@ -813,7 +805,7 @@
}
gmove(&n1, res)
regfree(&n1)
- goto ret
+ return
}
if nl.Ullman >= gc.UINF {
@@ -830,24 +822,27 @@
nr = &n5
}
- rcx = int(reg[x86.REG_CX])
+ rcx := int(reg[x86.REG_CX])
+ var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
- tcount = gc.Types[gc.Simtype[nr.Type.Etype]]
+ tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
- regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ var n3 gc.Node
+ regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
- oldcx = gc.Node{}
+ oldcx := gc.Node{}
if rcx > 0 && !gc.Samereg(&cx, res) {
regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gmove(&cx, &oldcx)
@@ -855,6 +850,7 @@
cx.Type = tcount
+ var n2 gc.Node
if gc.Samereg(&cx, res) {
regalloc(&n2, nl.Type, nil)
} else {
@@ -900,8 +896,6 @@
regfree(&n1)
regfree(&n2)
-
-ret:
}
/*
diff --git a/src/cmd/6g/gsubr.go b/src/cmd/6g/gsubr.go
index 4a1edf7..807b6b2 100644
--- a/src/cmd/6g/gsubr.go
+++ b/src/cmd/6g/gsubr.go
@@ -1349,12 +1349,6 @@
*a = obj.Addr{}
- var o int
- var n1 gc.Node
- var oary [10]int64
- var nn *gc.Node
- var reg *gc.Node
- var reg1 *gc.Node
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
@@ -1364,118 +1358,108 @@
if v >= 32000 || v <= -32000 {
break
}
- goto lit
+ switch as {
+ default:
+ return false
+
+ case x86.AADDB,
+ x86.AADDW,
+ x86.AADDL,
+ x86.AADDQ,
+ x86.ASUBB,
+ x86.ASUBW,
+ x86.ASUBL,
+ x86.ASUBQ,
+ x86.AANDB,
+ x86.AANDW,
+ x86.AANDL,
+ x86.AANDQ,
+ x86.AORB,
+ x86.AORW,
+ x86.AORL,
+ x86.AORQ,
+ x86.AXORB,
+ x86.AXORW,
+ x86.AXORL,
+ x86.AXORQ,
+ x86.AINCB,
+ x86.AINCW,
+ x86.AINCL,
+ x86.AINCQ,
+ x86.ADECB,
+ x86.ADECW,
+ x86.ADECL,
+ x86.ADECQ,
+ x86.AMOVB,
+ x86.AMOVW,
+ x86.AMOVL,
+ x86.AMOVQ:
+ break
+ }
+
+ cleani += 2
+ reg := &clean[cleani-1]
+ reg1 := &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ gc.Naddr(n, a, 1)
+ return true
case gc.ODOT,
gc.ODOTPTR:
cleani += 2
- reg = &clean[cleani-1]
+ reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
- goto odot
+ var nn *gc.Node
+ var oary [10]int64
+ o := gc.Dotoffset(n, oary[:], &nn)
+ if nn == nil {
+ sudoclean()
+ return false
+ }
+
+ if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ // directly addressable set of DOTs
+ n1 := *nn
+
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ gc.Naddr(&n1, a, 1)
+ return true
+ }
+
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ n1 := *reg
+ n1.Op = gc.OINDREG
+ if oary[0] >= 0 {
+ agen(nn, reg)
+ n1.Xoffset = oary[0]
+ } else {
+ cgen(nn, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[0] + 1)
+ }
+
+ for i := 1; i < o; i++ {
+ if oary[i] >= 0 {
+ gc.Fatal("can't happen")
+ }
+ gins(movptr, &n1, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[i] + 1)
+ }
+
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ fixlargeoffset(&n1)
+ gc.Naddr(&n1, a, 1)
+ return true
case gc.OINDEX:
return false
}
return false
-
-lit:
- switch as {
- default:
- return false
-
- case x86.AADDB,
- x86.AADDW,
- x86.AADDL,
- x86.AADDQ,
- x86.ASUBB,
- x86.ASUBW,
- x86.ASUBL,
- x86.ASUBQ,
- x86.AANDB,
- x86.AANDW,
- x86.AANDL,
- x86.AANDQ,
- x86.AORB,
- x86.AORW,
- x86.AORL,
- x86.AORQ,
- x86.AXORB,
- x86.AXORW,
- x86.AXORL,
- x86.AXORQ,
- x86.AINCB,
- x86.AINCW,
- x86.AINCL,
- x86.AINCQ,
- x86.ADECB,
- x86.ADECW,
- x86.ADECL,
- x86.ADECQ,
- x86.AMOVB,
- x86.AMOVW,
- x86.AMOVL,
- x86.AMOVQ:
- break
- }
-
- cleani += 2
- reg = &clean[cleani-1]
- reg1 = &clean[cleani-2]
- reg.Op = gc.OEMPTY
- reg1.Op = gc.OEMPTY
- gc.Naddr(n, a, 1)
- goto yes
-
-odot:
- o = gc.Dotoffset(n, oary[:], &nn)
- if nn == nil {
- goto no
- }
-
- if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
- // directly addressable set of DOTs
- n1 := *nn
-
- n1.Type = n.Type
- n1.Xoffset += oary[0]
- gc.Naddr(&n1, a, 1)
- goto yes
- }
-
- regalloc(reg, gc.Types[gc.Tptr], nil)
- n1 = *reg
- n1.Op = gc.OINDREG
- if oary[0] >= 0 {
- agen(nn, reg)
- n1.Xoffset = oary[0]
- } else {
- cgen(nn, reg)
- gc.Cgen_checknil(reg)
- n1.Xoffset = -(oary[0] + 1)
- }
-
- for i := 1; i < o; i++ {
- if oary[i] >= 0 {
- gc.Fatal("can't happen")
- }
- gins(movptr, &n1, reg)
- gc.Cgen_checknil(reg)
- n1.Xoffset = -(oary[i] + 1)
- }
-
- a.Type = obj.TYPE_NONE
- a.Index = obj.TYPE_NONE
- fixlargeoffset(&n1)
- gc.Naddr(&n1, a, 1)
- goto yes
-
-yes:
- return true
-
-no:
- sudoclean()
- return false
}
diff --git a/src/cmd/6g/peep.go b/src/cmd/6g/peep.go
index ed582d7..f81be87 100644
--- a/src/cmd/6g/peep.go
+++ b/src/cmd/6g/peep.go
@@ -563,8 +563,7 @@
}
var info gc.ProgInfo
- var r *gc.Flow
- for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\t? %v\n", r.Prog)
}
@@ -595,7 +594,31 @@
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
- goto gotit
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t := int(int(v1.Reg))
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
}
if copyau(&p.From, v2) || copyau(&p.To, v2) {
@@ -617,33 +640,6 @@
fmt.Printf("\tran off end; return 0\n")
}
return false
-
-gotit:
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
- if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
- fmt.Printf(" excise")
- }
- fmt.Printf("\n")
- }
-
- for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
- p = r.Prog
- copysub(&p.From, v1, v2, 1)
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v\n", r.Prog)
- }
- }
-
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v last\n", r.Prog)
- }
- return true
}
/*
diff --git a/src/cmd/6l/asm.go b/src/cmd/6l/asm.go
index 72e9dbf..22776b9 100644
--- a/src/cmd/6l/asm.go
+++ b/src/cmd/6l/asm.go
@@ -44,17 +44,14 @@
var zeroes string
func needlib(name string) int {
- var p string
- var s *ld.LSym
-
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
- p = fmt.Sprintf(".elfload.%s", name)
+ p := fmt.Sprintf(".elfload.%s", name)
- s = ld.Linklookup(ld.Ctxt, p, 0)
+ s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
@@ -74,11 +71,7 @@
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
- var targ *ld.LSym
- var rela *ld.LSym
- var got *ld.LSym
-
- targ = r.Sym
+ targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
@@ -233,7 +226,7 @@
}
if ld.Iself {
adddynsym(ld.Ctxt, targ)
- rela = ld.Linklookup(ld.Ctxt, ".rela", 0)
+ rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
ld.Addaddrplus(ld.Ctxt, rela, s, int64(r.Off))
if r.Siz == 8 {
ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_64))
@@ -258,7 +251,7 @@
// but we only need to support cgo and that's all it needs.
adddynsym(ld.Ctxt, targ)
- got = ld.Linklookup(ld.Ctxt, ".got", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got", 0)
s.Type = got.Type | ld.SSUB
s.Outer = got
s.Sub = got.Sub
@@ -276,11 +269,9 @@
}
func elfreloc1(r *ld.Reloc, sectoff int64) int {
- var elfsym int32
-
ld.Thearch.Vput(uint64(sectoff))
- elfsym = r.Xsym.Elfsym
+ elfsym := r.Xsym.Elfsym
switch r.Type {
default:
return -1
@@ -337,9 +328,8 @@
func machoreloc1(r *ld.Reloc, sectoff int64) int {
var v uint32
- var rs *ld.LSym
- rs = r.Xsym
+ rs := r.Xsym
if rs.Type == ld.SHOSTOBJ || r.Type == ld.R_PCREL {
if rs.Dynid < 0 {
@@ -406,11 +396,8 @@
}
func elfsetupplt() {
- var plt *ld.LSym
- var got *ld.LSym
-
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
- got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
if plt.Size == 0 {
// pushq got+8(IP)
ld.Adduint8(ld.Ctxt, plt, 0xff)
@@ -443,13 +430,9 @@
adddynsym(ld.Ctxt, s)
if ld.Iself {
- var plt *ld.LSym
- var got *ld.LSym
- var rela *ld.LSym
-
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
- got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
- rela = ld.Linklookup(ld.Ctxt, ".rela.plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
+ rela := ld.Linklookup(ld.Ctxt, ".rela.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
@@ -491,10 +474,8 @@
// http://networkpx.blogspot.com/2009/09/about-lcdyldinfoonly-command.html
// has details about what we're avoiding.
- var plt *ld.LSym
-
addgotsym(s)
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
ld.Adduint32(ld.Ctxt, ld.Linklookup(ld.Ctxt, ".linkedit.plt", 0), uint32(s.Dynid))
@@ -510,20 +491,17 @@
}
func addgotsym(s *ld.LSym) {
- var got *ld.LSym
- var rela *ld.LSym
-
if s.Got >= 0 {
return
}
adddynsym(ld.Ctxt, s)
- got = ld.Linklookup(ld.Ctxt, ".got", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Adduint64(ld.Ctxt, got, 0)
if ld.Iself {
- rela = ld.Linklookup(ld.Ctxt, ".rela", 0)
+ rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
ld.Addaddrplus(ld.Ctxt, rela, got, int64(s.Got))
ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_X86_64_GLOB_DAT))
ld.Adduint64(ld.Ctxt, rela, 0)
@@ -535,10 +513,6 @@
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
- var d *ld.LSym
- var t int
- var name string
-
if s.Dynid >= 0 {
return
}
@@ -547,13 +521,13 @@
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
- d = ld.Linklookup(ctxt, ".dynsym", 0)
+ d := ld.Linklookup(ctxt, ".dynsym", 0)
- name = s.Extname
+ name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
/* type */
- t = ld.STB_GLOBAL << 4
+ t := ld.STB_GLOBAL << 4
if s.Cgoexport != 0 && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
@@ -595,14 +569,12 @@
}
func adddynlib(lib string) {
- var s *ld.LSym
-
if needlib(lib) == 0 {
return
}
if ld.Iself {
- s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
+ s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
@@ -615,15 +587,6 @@
}
func asmb() {
- var magic int32
- var i int
- var vl int64
- var symo int64
- var dwarfoff int64
- var machlink int64
- var sect *ld.Section
- var sym *ld.LSym
-
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
@@ -638,7 +601,7 @@
ld.Asmbelfsetup()
}
- sect = ld.Segtext.Sect
+ sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
@@ -664,13 +627,13 @@
ld.Cseek(int64(ld.Segdata.Fileoff))
ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
- machlink = 0
+ machlink := int64(0)
if ld.HEADTYPE == ld.Hdarwin {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
}
- dwarfoff = ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND))
+ dwarfoff := ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND))
ld.Cseek(dwarfoff)
ld.Segdwarf.Fileoff = uint64(ld.Cpos())
@@ -708,7 +671,7 @@
ld.Symsize = 0
ld.Spsize = 0
ld.Lcsize = 0
- symo = 0
+ symo := int64(0)
if ld.Debug['s'] == 0 {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f sym\n", obj.Cputime())
@@ -763,10 +726,10 @@
ld.Asmplan9sym()
ld.Cflush()
- sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
+ sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
- for i = 0; int32(i) < ld.Lcsize; i++ {
+ for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}
@@ -795,7 +758,7 @@
switch ld.HEADTYPE {
default:
case ld.Hplan9: /* plan9 */
- magic = 4*26*26 + 7
+ magic := int32(4*26*26 + 7)
magic |= 0x00008000 /* fat header */
ld.Lputb(uint32(magic)) /* magic */
@@ -803,7 +766,7 @@
ld.Lputb(uint32(ld.Segdata.Filelen))
ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
ld.Lputb(uint32(ld.Symsize)) /* nsyms */
- vl = ld.Entryvalue()
+ vl := ld.Entryvalue()
ld.Lputb(PADDR(uint32(vl))) /* va of entry */
ld.Lputb(uint32(ld.Spsize)) /* sp offsets */
ld.Lputb(uint32(ld.Lcsize)) /* line offsets */
diff --git a/src/cmd/8g/cgen.go b/src/cmd/8g/cgen.go
index ba8953a..1be0928 100644
--- a/src/cmd/8g/cgen.go
+++ b/src/cmd/8g/cgen.go
@@ -254,8 +254,15 @@
case gc.OMINUS,
gc.OCOM:
- a = optoas(int(n.Op), nl.Type)
- goto uop
+ a := optoas(int(n.Op), nl.Type)
+ // unary
+ var n1 gc.Node
+ gc.Tempname(&n1, nl.Type)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ return
// symmetric binary
case gc.OAND,
@@ -270,7 +277,13 @@
break
}
- goto sbop
+ // symmetric binary
+ if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
+ r := nl
+ nl = nr
+ nr = r
+ }
+ goto abop
// asymmetric binary
case gc.OSUB:
@@ -443,13 +456,6 @@
return
-sbop: // symmetric binary
- if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
- r := nl
- nl = nr
- nr = r
- }
-
abop: // asymmetric binary
if gc.Smallintconst(nr) {
var n1 gc.Node
@@ -488,15 +494,6 @@
}
return
-
-uop: // unary
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
-
- cgen(nl, &n1)
- gins(a, nil, &n1)
- gmove(&n1, res)
- return
}
/*
diff --git a/src/cmd/8g/cgen64.go b/src/cmd/8g/cgen64.go
index 0755f0e..f0b9e49 100644
--- a/src/cmd/8g/cgen64.go
+++ b/src/cmd/8g/cgen64.go
@@ -199,7 +199,7 @@
gins(i386.AMOVL, ncon(0), &lo2)
gins(i386.AMOVL, ncon(0), &hi2)
splitclean()
- goto out
+ return
}
if v >= 32 {
@@ -215,7 +215,7 @@
gins(i386.AMOVL, ncon(0), &lo2)
splitclean()
splitclean()
- goto out
+ return
}
// general shift
@@ -296,7 +296,7 @@
}
splitclean()
- goto out
+ return
}
if v >= 32 {
@@ -316,7 +316,7 @@
}
splitclean()
splitclean()
- goto out
+ return
}
// general shift
@@ -482,7 +482,7 @@
splitclean()
splitclean()
- goto out
+ return
}
gins(i386.AMOVL, &lo1, &ax)
@@ -500,8 +500,6 @@
gins(i386.AMOVL, &ax, &lo1)
gins(i386.AMOVL, &dx, &hi1)
splitclean()
-
-out:
}
/*
diff --git a/src/cmd/8g/ggen.go b/src/cmd/8g/ggen.go
index ca2a79f..7aba85e 100644
--- a/src/cmd/8g/ggen.go
+++ b/src/cmd/8g/ggen.go
@@ -934,7 +934,27 @@
gc.Nodreg(&f0, nl.Type, i386.REG_F0)
gc.Nodreg(&f1, n.Type, i386.REG_F0+1)
if nr != nil {
- goto flt2
+ // binary
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &f0)
+ if nr.Addable != 0 {
+ gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
+ } else {
+ cgen(nr, &f0)
+ gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
+ }
+ } else {
+ cgen(nr, &f0)
+ if nl.Addable != 0 {
+ gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
+ } else {
+ cgen(nl, &f0)
+ gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
+ }
+ }
+
+ gmove(&f0, res)
+ return
}
// unary
@@ -945,28 +965,6 @@
}
gmove(&f0, res)
return
-
-flt2: // binary
- if nl.Ullman >= nr.Ullman {
- cgen(nl, &f0)
- if nr.Addable != 0 {
- gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
- } else {
- cgen(nr, &f0)
- gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
- }
- } else {
- cgen(nr, &f0)
- if nl.Addable != 0 {
- gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
- } else {
- cgen(nl, &f0)
- gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
- }
- }
-
- gmove(&f0, res)
- return
}
func cgen_floatsse(n *gc.Node, res *gc.Node) {
@@ -1064,7 +1062,47 @@
var n2 gc.Node
var ax gc.Node
if gc.Use_sse != 0 {
- goto sse
+ if nl.Addable == 0 {
+ var n1 gc.Node
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if nr.Addable == 0 {
+ var tmp gc.Node
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+ nr = &tmp
+ }
+
+ var n2 gc.Node
+ regalloc(&n2, nr.Type, nil)
+ gmove(nr, &n2)
+ nr = &n2
+
+ if nl.Op != gc.OREGISTER {
+ var n3 gc.Node
+ regalloc(&n3, nl.Type, nil)
+ gmove(nl, &n3)
+ nl = &n3
+ }
+
+ if a == gc.OGE || a == gc.OGT {
+ // only < and <= work right with NaN; reverse if needed
+ r := nr
+
+ nr = nl
+ nl = r
+ a = gc.Brrev(a)
+ }
+
+ gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
+ if nl.Op == gc.OREGISTER {
+ regfree(nl)
+ }
+ regfree(nr)
+ goto ret
} else {
goto x87
}
@@ -1118,47 +1156,6 @@
goto ret
-sse:
- if nl.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
- nl = &n1
- }
-
- if nr.Addable == 0 {
- var tmp gc.Node
- gc.Tempname(&tmp, nr.Type)
- cgen(nr, &tmp)
- nr = &tmp
- }
-
- regalloc(&n2, nr.Type, nil)
- gmove(nr, &n2)
- nr = &n2
-
- if nl.Op != gc.OREGISTER {
- var n3 gc.Node
- regalloc(&n3, nl.Type, nil)
- gmove(nl, &n3)
- nl = &n3
- }
-
- if a == gc.OGE || a == gc.OGT {
- // only < and <= work right with NaN; reverse if needed
- r := nr
-
- nr = nl
- nl = r
- a = gc.Brrev(a)
- }
-
- gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
- if nl.Op == gc.OREGISTER {
- regfree(nl)
- }
- regfree(nr)
-
ret:
if a == gc.OEQ {
// neither NE nor P
diff --git a/src/cmd/8g/gsubr.go b/src/cmd/8g/gsubr.go
index 841f4dc..ec00779 100644
--- a/src/cmd/8g/gsubr.go
+++ b/src/cmd/8g/gsubr.go
@@ -403,7 +403,48 @@
et := int(gc.Simtype[t.Etype])
if gc.Use_sse != 0 {
- goto sse
+ switch uint32(op)<<16 | uint32(et) {
+ default:
+ gc.Fatal("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ case gc.OCMP<<16 | gc.TFLOAT32:
+ a = i386.AUCOMISS
+
+ case gc.OCMP<<16 | gc.TFLOAT64:
+ a = i386.AUCOMISD
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = i386.AMOVSS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = i386.AMOVSD
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = i386.AADDSS
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = i386.AADDSD
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = i386.ASUBSS
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = i386.ASUBSD
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = i386.AMULSS
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = i386.AMULSD
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = i386.ADIVSS
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = i386.ADIVSD
+ }
+
+ return a
}
// If we need Fpop, it means we're working on
@@ -499,50 +540,6 @@
gc.Fatal("foptoas %v %v %#x", gc.Oconv(int(op), 0), gc.Tconv(t, 0), flg)
return 0
-
-sse:
- switch uint32(op)<<16 | uint32(et) {
- default:
- gc.Fatal("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
-
- case gc.OCMP<<16 | gc.TFLOAT32:
- a = i386.AUCOMISS
-
- case gc.OCMP<<16 | gc.TFLOAT64:
- a = i386.AUCOMISD
-
- case gc.OAS<<16 | gc.TFLOAT32:
- a = i386.AMOVSS
-
- case gc.OAS<<16 | gc.TFLOAT64:
- a = i386.AMOVSD
-
- case gc.OADD<<16 | gc.TFLOAT32:
- a = i386.AADDSS
-
- case gc.OADD<<16 | gc.TFLOAT64:
- a = i386.AADDSD
-
- case gc.OSUB<<16 | gc.TFLOAT32:
- a = i386.ASUBSS
-
- case gc.OSUB<<16 | gc.TFLOAT64:
- a = i386.ASUBSD
-
- case gc.OMUL<<16 | gc.TFLOAT32:
- a = i386.AMULSS
-
- case gc.OMUL<<16 | gc.TFLOAT64:
- a = i386.AMULSD
-
- case gc.ODIV<<16 | gc.TFLOAT32:
- a = i386.ADIVSS
-
- case gc.ODIV<<16 | gc.TFLOAT64:
- a = i386.ADIVSD
- }
-
- return a
}
var resvd = []int{
@@ -928,7 +925,9 @@
switch uint32(ft)<<16 | uint32(tt) {
default:
- goto fatal
+ // should not happen
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
+ return
/*
* integer copy and truncate
@@ -1164,10 +1163,6 @@
gmove(&r1, t)
regfree(&r1)
return
-
- // should not happen
-fatal:
- gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
func floatmove(f *gc.Node, t *gc.Node) {
diff --git a/src/cmd/8g/peep.go b/src/cmd/8g/peep.go
index 8aa6e94..deb3405 100644
--- a/src/cmd/8g/peep.go
+++ b/src/cmd/8g/peep.go
@@ -371,8 +371,7 @@
return false
}
var info gc.ProgInfo
- var r *gc.Flow
- for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\t? %v\n", r.Prog)
}
@@ -393,7 +392,31 @@
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
- goto gotit
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t := int(v1.Reg)
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
}
if copyau(&p.From, v2) || copyau(&p.To, v2) {
@@ -405,33 +428,6 @@
}
return false
-
-gotit:
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
- if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
- fmt.Printf(" excise")
- }
- fmt.Printf("\n")
- }
-
- for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
- p = r.Prog
- copysub(&p.From, v1, v2, 1)
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v\n", r.Prog)
- }
- }
-
- t := int(v1.Reg)
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v last\n", r.Prog)
- }
- return true
}
/*
diff --git a/src/cmd/8l/asm.go b/src/cmd/8l/asm.go
index 295c62e..d040c03 100644
--- a/src/cmd/8l/asm.go
+++ b/src/cmd/8l/asm.go
@@ -38,17 +38,14 @@
import "cmd/internal/ld"
func needlib(name string) int {
- var p string
- var s *ld.LSym
-
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
- p = fmt.Sprintf(".dynlib.%s", name)
+ p := fmt.Sprintf(".dynlib.%s", name)
- s = ld.Linklookup(ld.Ctxt, p, 0)
+ s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
@@ -66,11 +63,7 @@
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
- var targ *ld.LSym
- var rel *ld.LSym
- var got *ld.LSym
-
- targ = r.Sym
+ targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
@@ -210,7 +203,7 @@
}
if ld.Iself {
adddynsym(ld.Ctxt, targ)
- rel = ld.Linklookup(ld.Ctxt, ".rel", 0)
+ rel := ld.Linklookup(ld.Ctxt, ".rel", 0)
ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off))
ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_386_32))
r.Type = ld.R_CONST // write r->add during relocsym
@@ -231,7 +224,7 @@
// but we only need to support cgo and that's all it needs.
adddynsym(ld.Ctxt, targ)
- got = ld.Linklookup(ld.Ctxt, ".got", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got", 0)
s.Type = got.Type | ld.SSUB
s.Outer = got
s.Sub = got.Sub
@@ -249,11 +242,9 @@
}
func elfreloc1(r *ld.Reloc, sectoff int64) int {
- var elfsym int32
-
ld.Thearch.Lput(uint32(sectoff))
- elfsym = r.Xsym.Elfsym
+ elfsym := r.Xsym.Elfsym
switch r.Type {
default:
return -1
@@ -287,9 +278,8 @@
func machoreloc1(r *ld.Reloc, sectoff int64) int {
var v uint32
- var rs *ld.LSym
- rs = r.Xsym
+ rs := r.Xsym
if rs.Type == ld.SHOSTOBJ {
if rs.Dynid < 0 {
@@ -365,11 +355,8 @@
}
func elfsetupplt() {
- var plt *ld.LSym
- var got *ld.LSym
-
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
- got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
if plt.Size == 0 {
// pushl got+4
ld.Adduint8(ld.Ctxt, plt, 0xff)
@@ -395,10 +382,6 @@
}
func addpltsym(ctxt *ld.Link, s *ld.LSym) {
- var plt *ld.LSym
- var got *ld.LSym
- var rel *ld.LSym
-
if s.Plt >= 0 {
return
}
@@ -406,9 +389,9 @@
adddynsym(ctxt, s)
if ld.Iself {
- plt = ld.Linklookup(ctxt, ".plt", 0)
- got = ld.Linklookup(ctxt, ".got.plt", 0)
- rel = ld.Linklookup(ctxt, ".rel.plt", 0)
+ plt := ld.Linklookup(ctxt, ".plt", 0)
+ got := ld.Linklookup(ctxt, ".got.plt", 0)
+ rel := ld.Linklookup(ctxt, ".rel.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
@@ -441,9 +424,7 @@
} else if ld.HEADTYPE == ld.Hdarwin {
// Same laziness as in 6l.
- var plt *ld.LSym
-
- plt = ld.Linklookup(ctxt, ".plt", 0)
+ plt := ld.Linklookup(ctxt, ".plt", 0)
addgotsym(ctxt, s)
@@ -461,20 +442,17 @@
}
func addgotsym(ctxt *ld.Link, s *ld.LSym) {
- var got *ld.LSym
- var rel *ld.LSym
-
if s.Got >= 0 {
return
}
adddynsym(ctxt, s)
- got = ld.Linklookup(ctxt, ".got", 0)
+ got := ld.Linklookup(ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Adduint32(ctxt, got, 0)
if ld.Iself {
- rel = ld.Linklookup(ctxt, ".rel", 0)
+ rel := ld.Linklookup(ctxt, ".rel", 0)
ld.Addaddrplus(ctxt, rel, got, int64(s.Got))
ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_386_GLOB_DAT))
} else if ld.HEADTYPE == ld.Hdarwin {
@@ -485,10 +463,6 @@
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
- var d *ld.LSym
- var t int
- var name string
-
if s.Dynid >= 0 {
return
}
@@ -497,10 +471,10 @@
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
- d = ld.Linklookup(ctxt, ".dynsym", 0)
+ d := ld.Linklookup(ctxt, ".dynsym", 0)
/* name */
- name = s.Extname
+ name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
@@ -515,7 +489,7 @@
ld.Adduint32(ctxt, d, 0)
/* type */
- t = ld.STB_GLOBAL << 4
+ t := ld.STB_GLOBAL << 4
if s.Cgoexport != 0 && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
@@ -541,14 +515,12 @@
}
func adddynlib(lib string) {
- var s *ld.LSym
-
if needlib(lib) == 0 {
return
}
if ld.Iself {
- s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
+ s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
@@ -561,14 +533,6 @@
}
func asmb() {
- var magic int32
- var symo uint32
- var dwarfoff uint32
- var machlink uint32
- var sect *ld.Section
- var sym *ld.LSym
- var i int
-
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
@@ -578,7 +542,7 @@
ld.Asmbelfsetup()
}
- sect = ld.Segtext.Sect
+ sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
@@ -604,13 +568,13 @@
ld.Cseek(int64(ld.Segdata.Fileoff))
ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
- machlink = 0
+ machlink := uint32(0)
if ld.HEADTYPE == ld.Hdarwin {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
}
- dwarfoff = uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
+ dwarfoff := uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
ld.Cseek(int64(dwarfoff))
ld.Segdwarf.Fileoff = uint64(ld.Cpos())
@@ -623,7 +587,7 @@
ld.Symsize = 0
ld.Spsize = 0
ld.Lcsize = 0
- symo = 0
+ symo := uint32(0)
if ld.Debug['s'] == 0 {
// TODO: rationalize
if ld.Debug['v'] != 0 {
@@ -673,10 +637,10 @@
ld.Asmplan9sym()
ld.Cflush()
- sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
+ sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
- for i = 0; int32(i) < ld.Lcsize; i++ {
+ for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}
@@ -704,7 +668,7 @@
switch ld.HEADTYPE {
default:
case ld.Hplan9: /* plan9 */
- magic = 4*11*11 + 7
+ magic := int32(4*11*11 + 7)
ld.Lputb(uint32(magic)) /* magic */
ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */
diff --git a/src/cmd/9g/cgen.go b/src/cmd/9g/cgen.go
index 74accf2..61a93da 100644
--- a/src/cmd/9g/cgen.go
+++ b/src/cmd/9g/cgen.go
@@ -26,13 +26,8 @@
gc.Dump("cgen-res", res)
}
- var a int
- var nr *gc.Node
- var nl *gc.Node
- var n1 gc.Node
- var n2 gc.Node
if n == nil || n.Type == nil {
- goto ret
+ return
}
if res == nil || res.Type == nil {
@@ -57,7 +52,7 @@
} else {
gc.Cgen_slice(n, res)
}
- goto ret
+ return
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
@@ -68,7 +63,7 @@
} else {
gc.Cgen_eface(n, res)
}
- goto ret
+ return
}
if n.Ullman >= gc.UINF {
@@ -80,7 +75,7 @@
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
- goto ret
+ return
}
}
@@ -89,7 +84,7 @@
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
- goto ret
+ return
}
if res.Addable == 0 {
@@ -105,7 +100,7 @@
cgen(&n1, res)
regfree(&n1)
- goto ret
+ return
}
var f int
@@ -115,7 +110,7 @@
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
- goto ret
+ return
}
f = 1 // gen thru register
@@ -148,7 +143,7 @@
fmt.Printf("%v [ignore previous line]\n", p1)
}
sudoclean()
- goto ret
+ return
}
}
@@ -157,7 +152,7 @@
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
- goto ret
+ return
}
// update addressability for string, slice
@@ -181,7 +176,7 @@
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
- goto ret
+ return
}
// if both are addressable, move
@@ -196,11 +191,11 @@
regfree(&n1)
}
- goto ret
+ return
}
- nl = n.Left
- nr = n.Right
+ nl := n.Left
+ nr := n.Right
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
@@ -210,7 +205,7 @@
n2 := *n
n2.Left = &n1
cgen(&n2, res)
- goto ret
+ return
}
}
@@ -231,7 +226,7 @@
}
sudoclean()
- goto ret
+ return
}
}
@@ -239,6 +234,7 @@
// OGE, OLE, and ONE ourselves.
// if(nl != N && isfloat[n->type->etype] && isfloat[nl->type->etype]) goto flt;
+ var a int
switch n.Op {
default:
gc.Dump("cgen", n)
@@ -263,11 +259,11 @@
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
- goto ret
+ return
case gc.OPLUS:
cgen(nl, res)
- goto ret
+ return
// unary
case gc.OCOM:
@@ -281,7 +277,7 @@
gins(a, &n2, &n1)
gmove(&n1, res)
regfree(&n1)
- goto ret
+ return
case gc.OMINUS:
if gc.Isfloat[nl.Type.Etype] != 0 {
@@ -291,8 +287,16 @@
goto sbop
}
- a = optoas(int(n.Op), nl.Type)
- goto uop
+ a := optoas(int(n.Op), nl.Type)
+ // unary
+ var n1 gc.Node
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ return
// symmetric binary
case gc.OAND,
@@ -331,7 +335,7 @@
gmove(&n2, res)
regfree(&n2)
regfree(&n1)
- goto ret
+ return
}
}
@@ -523,7 +527,7 @@
cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
- goto ret
+ return
/*
* put simplest on right - we'll generate into left
@@ -549,6 +553,8 @@
}
abop: // asymmetric binary
+ var n1 gc.Node
+ var n2 gc.Node
if nl.Ullman >= nr.Ullman {
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
@@ -610,18 +616,7 @@
if n2.Op != gc.OLITERAL {
regfree(&n2)
}
- goto ret
-
-uop: // unary
- regalloc(&n1, nl.Type, res)
-
- cgen(nl, &n1)
- gins(a, nil, &n1)
- gmove(&n1, res)
- regfree(&n1)
- goto ret
-
-ret:
+ return
}
/*
@@ -887,7 +882,6 @@
n = n.Left
}
- var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
@@ -906,7 +900,7 @@
gins(ppc64.AMOVD, &n3, &n2)
gmove(&n2, res)
regfree(&n2)
- goto ret
+ return
}
if n.Addable != 0 {
@@ -918,10 +912,10 @@
gins(ppc64.AMOVD, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
- goto ret
+ return
}
- nl = n.Left
+ nl := n.Left
switch n.Op {
default:
@@ -999,8 +993,6 @@
ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
}
}
-
-ret:
}
/*
@@ -1126,24 +1118,21 @@
gc.Genlist(n.Ninit)
}
- var et int
- var nl *gc.Node
- var nr *gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
- goto ret
+ return
}
}
- et = int(n.Type.Etype)
+ et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
- goto ret
+ return
}
- nr = nil
+ nr := (*gc.Node)(nil)
for n.Op == gc.OCONVNOP {
n = n.Left
@@ -1152,6 +1141,7 @@
}
}
+ var nl *gc.Node
switch n.Op {
default:
var n1 gc.Node
@@ -1166,14 +1156,14 @@
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
regfree(&n1)
- goto ret
+ return
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
}
- goto ret
+ return
case gc.OANDAND,
gc.OOROR:
@@ -1191,7 +1181,7 @@
bgen(n.Right, true_, likely, to)
}
- goto ret
+ return
case gc.OEQ,
gc.ONE,
@@ -1201,7 +1191,7 @@
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
- goto ret
+ return
}
fallthrough
@@ -1209,14 +1199,14 @@
nl = n.Left
if nl == nil || nl.Type == nil {
- goto ret
+ return
}
}
switch n.Op {
case gc.ONOT:
bgen(nl, !true_, likely, to)
- goto ret
+ return
case gc.OEQ,
gc.ONE,
@@ -1238,7 +1228,7 @@
n.Ninit = ll
gc.Patch(gc.Gbranch(ppc64.ABR, nil, 0), to)
gc.Patch(p2, gc.Pc)
- goto ret
+ return
}
a = gc.Brcom(a)
@@ -1365,9 +1355,7 @@
regfree(&n2)
}
- goto ret
-
-ret:
+ return
}
/*
diff --git a/src/cmd/9g/ggen.go b/src/cmd/9g/ggen.go
index 2a3dbcb..bd22771 100644
--- a/src/cmd/9g/ggen.go
+++ b/src/cmd/9g/ggen.go
@@ -572,10 +572,7 @@
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// TODO(minux): enable division by magic multiply (also need to fix longmod below)
//if(nr->op != OLITERAL)
- goto longdiv
-
// division and mod using (slow) hardware instruction
-longdiv:
dodiv(op, nl, nr, res)
return
@@ -639,11 +636,6 @@
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var tcount *gc.Type
-
a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {
@@ -663,7 +655,7 @@
}
gmove(&n1, res)
regfree(&n1)
- goto ret
+ return
}
if nl.Ullman >= gc.UINF {
@@ -683,15 +675,18 @@
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
- tcount = gc.Types[gc.Simtype[nr.Type.Etype]]
+ tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
+ var n1 gc.Node
regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
- regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ var n3 gc.Node
+ regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ var n2 gc.Node
regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
@@ -728,8 +723,6 @@
regfree(&n1)
regfree(&n2)
-
-ret:
}
func clearfat(nl *gc.Node) {
@@ -759,9 +752,8 @@
agen(nl, &dst)
var boff uint64
- var p *obj.Prog
if q > 128 {
- p = gins(ppc64.ASUB, nil, &dst)
+ p := gins(ppc64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
@@ -784,7 +776,7 @@
// The loop leaves R3 on the last zeroed dword
boff = 8
} else if q >= 4 {
- p = gins(ppc64.ASUB, nil, &dst)
+ p := gins(ppc64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
f := (*gc.Node)(gc.Sysfunc("duffzero"))
@@ -797,6 +789,7 @@
// duffzero leaves R3 on the last zeroed dword
boff = 8
} else {
+ var p *obj.Prog
for t := uint64(0); t < q; t++ {
p = gins(ppc64.AMOVD, &r0, &dst)
p.To.Type = obj.TYPE_MEM
@@ -806,6 +799,7 @@
boff = 8 * q
}
+ var p *obj.Prog
for t := uint64(0); t < c; t++ {
p = gins(ppc64.AMOVB, &r0, &dst)
p.To.Type = obj.TYPE_MEM
diff --git a/src/cmd/9g/peep.go b/src/cmd/9g/peep.go
index f7c0a95..ecb8fb5 100644
--- a/src/cmd/9g/peep.go
+++ b/src/cmd/9g/peep.go
@@ -407,9 +407,8 @@
if !regtyp(v2) {
return false
}
- var r *gc.Flow
var info gc.ProgInfo
- for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
break
}
@@ -425,7 +424,32 @@
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
if p.To.Type == v1.Type {
if p.To.Reg == v1.Reg {
- goto gotit
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub1(p, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t := int(int(v1.Reg))
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
}
}
}
@@ -439,34 +463,6 @@
}
return false
-
-gotit:
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
- if p.From.Type == v2.Type {
- fmt.Printf(" excise")
- }
- fmt.Printf("\n")
- }
-
- for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
- p = r.Prog
- copysub(&p.From, v1, v2, 1)
- copysub1(p, v1, v2, 1)
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v\n", r.Prog)
- }
- }
-
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v last\n", r.Prog)
- }
- return true
}
/*
diff --git a/src/cmd/9l/asm.go b/src/cmd/9l/asm.go
index 176de6f..f0f5b56 100644
--- a/src/cmd/9l/asm.go
+++ b/src/cmd/9l/asm.go
@@ -39,17 +39,14 @@
import "cmd/internal/ld"
func needlib(name string) int {
- var p string
- var s *ld.LSym
-
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
- p = fmt.Sprintf(".dynlib.%s", name)
+ p := fmt.Sprintf(".dynlib.%s", name)
- s = ld.Linklookup(ld.Ctxt, p, 0)
+ s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
@@ -163,16 +160,13 @@
// Construct a call stub in stub that calls symbol targ via its PLT
// entry.
func gencallstub(abicase int, stub *ld.LSym, targ *ld.LSym) {
- var plt *ld.LSym
- var r *ld.Reloc
-
if abicase != 1 {
// If we see R_PPC64_TOCSAVE or R_PPC64_REL24_NOTOC
// relocations, we'll need to implement cases 2 and 3.
log.Fatalf("gencallstub only implements case 1 calls")
}
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
stub.Type = ld.STEXT
@@ -180,7 +174,7 @@
ld.Adduint32(ld.Ctxt, stub, 0xf8410018) // std r2,24(r1)
// Load the function pointer from the PLT.
- r = ld.Addrel(stub)
+ r := ld.Addrel(stub)
r.Off = int32(stub.Size)
r.Sym = plt
@@ -214,10 +208,7 @@
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
- var targ *ld.LSym
- var rela *ld.LSym
-
- targ = r.Sym
+ targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
@@ -251,7 +242,7 @@
// These happen in .toc sections
adddynsym(ld.Ctxt, targ)
- rela = ld.Linklookup(ld.Ctxt, ".rela", 0)
+ rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
ld.Addaddrplus(ld.Ctxt, rela, s, int64(r.Off))
ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_PPC64_ADDR64))
ld.Adduint64(ld.Ctxt, rela, uint64(r.Add))
@@ -325,9 +316,7 @@
}
func elfsetupplt() {
- var plt *ld.LSym
-
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
if plt.Size == 0 {
// The dynamic linker stores the address of the
// dynamic resolver and the DSO identifier in the two
@@ -360,10 +349,6 @@
}
func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
- var o1 uint32
- var o2 uint32
- var t int64
-
if ld.Linkmode == ld.LinkExternal {
// TODO(minux): translate R_ADDRPOWER and R_CALLPOWER into standard ELF relocations.
// R_ADDRPOWER corresponds to R_PPC_ADDR16_HA and R_PPC_ADDR16_LO.
@@ -386,9 +371,9 @@
// The encoding of the immediate x<<16 + y,
// where x is the low 16 bits of the first instruction and y is the low 16
// bits of the second. Both x and y are signed (int16, not uint16).
- o1 = uint32(r.Add >> 32)
- o2 = uint32(r.Add)
- t = ld.Symaddr(r.Sym)
+ o1 := uint32(r.Add >> 32)
+ o2 := uint32(r.Add)
+ t := ld.Symaddr(r.Sym)
if t < 0 {
ld.Ctxt.Diag("relocation for %s is too big (>=2G): %d", s.Name, ld.Symaddr(r.Sym))
}
@@ -410,13 +395,14 @@
case ld.R_CALLPOWER:
// Bits 6 through 29 = (S + A - P) >> 2
+ var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = ld.Be32(s.P[r.Off:])
} else {
o1 = ld.Le32(s.P[r.Off:])
}
- t = ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off))
+ t := ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off))
if t&3 != 0 {
ld.Ctxt.Diag("relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t)
}
@@ -439,7 +425,6 @@
}
func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
- var o1 uint32
switch r.Variant & ld.RV_TYPE_MASK {
default:
ld.Diag("unexpected relocation variant %d", r.Variant)
@@ -452,6 +437,7 @@
if r.Variant&ld.RV_CHECK_OVERFLOW != 0 {
// Whether to check for signed or unsigned
// overflow depends on the instruction
+ var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = ld.Be32(s.P[r.Off-2:])
} else {
@@ -485,6 +471,7 @@
if r.Variant&ld.RV_CHECK_OVERFLOW != 0 {
// Whether to check for signed or unsigned
// overflow depends on the instruction
+ var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = ld.Be32(s.P[r.Off-2:])
} else {
@@ -508,6 +495,7 @@
return int64(int16(t))
case ld.RV_POWER_DS:
+ var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = uint32(ld.Be16(s.P[r.Off:]))
} else {
@@ -535,23 +523,18 @@
adddynsym(ctxt, s)
if ld.Iself {
- var plt *ld.LSym
- var rela *ld.LSym
- var glink *ld.LSym
- var r *ld.Reloc
-
- plt = ld.Linklookup(ctxt, ".plt", 0)
- rela = ld.Linklookup(ctxt, ".rela.plt", 0)
+ plt := ld.Linklookup(ctxt, ".plt", 0)
+ rela := ld.Linklookup(ctxt, ".rela.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
// Create the glink resolver if necessary
- glink = ensureglinkresolver()
+ glink := ensureglinkresolver()
// Write symbol resolver stub (just a branch to the
// glink resolver stub)
- r = ld.Addrel(glink)
+ r := ld.Addrel(glink)
r.Sym = glink
r.Off = int32(glink.Size)
@@ -579,11 +562,7 @@
// Generate the glink resolver stub if necessary and return the .glink section
func ensureglinkresolver() *ld.LSym {
- var glink *ld.LSym
- var s *ld.LSym
- var r *ld.Reloc
-
- glink = ld.Linklookup(ld.Ctxt, ".glink", 0)
+ glink := ld.Linklookup(ld.Ctxt, ".glink", 0)
if glink.Size != 0 {
return glink
}
@@ -610,7 +589,7 @@
ld.Adduint32(ld.Ctxt, glink, 0x7800f082) // srdi r0,r0,2
// r11 = address of the first byte of the PLT
- r = ld.Addrel(glink)
+ r := ld.Addrel(glink)
r.Off = int32(glink.Size)
r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0)
@@ -636,7 +615,7 @@
// Add DT_PPC64_GLINK .dynamic entry, which points to 32 bytes
// before the first symbol resolver stub.
- s = ld.Linklookup(ld.Ctxt, ".dynamic", 0)
+ s := ld.Linklookup(ld.Ctxt, ".dynamic", 0)
ld.Elfwritedynentsymplus(s, ld.DT_PPC64_GLINK, glink, glink.Size-32)
@@ -644,10 +623,6 @@
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
- var d *ld.LSym
- var t int
- var name string
-
if s.Dynid >= 0 {
return
}
@@ -656,13 +631,13 @@
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
- d = ld.Linklookup(ctxt, ".dynsym", 0)
+ d := ld.Linklookup(ctxt, ".dynsym", 0)
- name = s.Extname
+ name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
/* type */
- t = ld.STB_GLOBAL << 4
+ t := ld.STB_GLOBAL << 4
if s.Cgoexport != 0 && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
@@ -696,14 +671,12 @@
}
func adddynlib(lib string) {
- var s *ld.LSym
-
if needlib(lib) == 0 {
return
}
if ld.Iself {
- s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
+ s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
@@ -714,11 +687,6 @@
}
func asmb() {
- var symo uint32
- var sect *ld.Section
- var sym *ld.LSym
- var i int
-
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
@@ -728,7 +696,7 @@
ld.Asmbelfsetup()
}
- sect = ld.Segtext.Sect
+ sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
@@ -758,7 +726,7 @@
ld.Symsize = 0
ld.Lcsize = 0
- symo = 0
+ symo := uint32(0)
if ld.Debug['s'] == 0 {
// TODO: rationalize
if ld.Debug['v'] != 0 {
@@ -801,10 +769,10 @@
ld.Asmplan9sym()
ld.Cflush()
- sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
+ sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
- for i = 0; int32(i) < ld.Lcsize; i++ {
+ for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}
diff --git a/src/cmd/internal/asm/asm.go b/src/cmd/internal/asm/asm.go
index 71c6b1a..151a8a5 100644
--- a/src/cmd/internal/asm/asm.go
+++ b/src/cmd/internal/asm/asm.go
@@ -163,11 +163,9 @@
}
func Main() {
- var p string
-
// Allow GOARCH=Thestring or GOARCH=Thestringsuffix,
// but not other values.
- p = obj.Getgoarch()
+ p := obj.Getgoarch()
if !strings.HasPrefix(p, Thestring) {
log.Fatalf("cannot use %cc with GOARCH=%s", Thechar, p)
@@ -219,8 +217,6 @@
}
func assemble(file string) int {
- var i int
-
if outfile == "" {
outfile = strings.TrimSuffix(filepath.Base(file), ".s") + "." + string(Thechar)
}
@@ -235,6 +231,7 @@
fmt.Fprintf(&obuf, "go object %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion())
fmt.Fprintf(&obuf, "!\n")
+ var i int
for Pass = 1; Pass <= 2; Pass++ {
pinit(file)
for i = 0; i < len(Dlist); i++ {
diff --git a/src/cmd/internal/asm/lexbody.go b/src/cmd/internal/asm/lexbody.go
index 14a82f8..7943cba 100644
--- a/src/cmd/internal/asm/lexbody.go
+++ b/src/cmd/internal/asm/lexbody.go
@@ -45,61 +45,50 @@
* common code for all the assemblers
*/
func pragpack() {
-
for getnsc() != '\n' {
-
}
}
func pragvararg() {
for getnsc() != '\n' {
-
}
}
func pragcgo(name string) {
for getnsc() != '\n' {
-
}
}
func pragfpround() {
for getnsc() != '\n' {
-
}
}
func pragtextflag() {
for getnsc() != '\n' {
-
}
}
func pragdataflag() {
for getnsc() != '\n' {
-
}
}
func pragprofile() {
for getnsc() != '\n' {
-
}
}
func pragincomplete() {
for getnsc() != '\n' {
-
}
}
func setinclude(p string) {
- var i int
-
if p == "" {
return
}
- for i = 1; i < len(include); i++ {
+ for i := 1; i < len(include); i++ {
if p == include[i] {
return
}
@@ -117,9 +106,7 @@
}
func pushio() {
- var i *Io
-
- i = iostack
+ i := iostack
if i == nil {
Yyerror("botch in pushio")
errorexit()
@@ -129,10 +116,9 @@
}
func newio() {
- var i *Io
var pushdepth int = 0
- i = iofree
+ i := iofree
if i == nil {
pushdepth++
if pushdepth > 1000 {
@@ -149,9 +135,7 @@
}
func newfile(s string, f *os.File) {
- var i *Io
-
- i = ionext
+ i := ionext
i.Link = iostack
iostack = i
i.F = f
@@ -175,16 +159,13 @@
}
func LabelLookup(s *Sym) *Sym {
- var p string
- var lab *Sym
-
if thetext == nil {
s.Labelname = s.Name
return s
}
- p = string(fmt.Sprintf("%s.%s", thetext.Name, s.Name))
- lab = Lookup(p)
+ p := string(fmt.Sprintf("%s.%s", thetext.Name, s.Name))
+ lab := Lookup(p)
lab.Labelname = s.Name
return lab
@@ -249,11 +230,10 @@
}
func Yylex(yylval *Yylval) int {
- var c int
var c1 int
var s *Sym
- c = peekc
+ c := peekc
if c != IGN {
peekc = IGN
goto l1
@@ -282,7 +262,48 @@
goto aloop
}
if isdigit(c) {
- goto tnum
+ yybuf.Reset()
+ if c != '0' {
+ goto dc
+ }
+ yybuf.WriteByte(byte(c))
+ c = GETC()
+ c1 = 3
+ if c == 'x' || c == 'X' {
+ c1 = 4
+ c = GETC()
+ } else if c < '0' || c > '7' {
+ goto dc
+ }
+ yylval.Lval = 0
+ for {
+ if c >= '0' && c <= '9' {
+ if c > '7' && c1 == 3 {
+ break
+ }
+ yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
+ yylval.Lval += int64(c) - '0'
+ c = GETC()
+ continue
+ }
+
+ if c1 == 3 {
+ break
+ }
+ if c >= 'A' && c <= 'F' {
+ c += 'a' - 'A'
+ }
+ if c >= 'a' && c <= 'f' {
+ yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
+ yylval.Lval += int64(c) - 'a' + 10
+ c = GETC()
+ continue
+ }
+
+ break
+ }
+
+ goto ncu
}
switch c {
case '\n':
@@ -458,50 +479,6 @@
yylval.Sval = last
return int(s.Type)
-tnum:
- yybuf.Reset()
- if c != '0' {
- goto dc
- }
- yybuf.WriteByte(byte(c))
- c = GETC()
- c1 = 3
- if c == 'x' || c == 'X' {
- c1 = 4
- c = GETC()
- } else if c < '0' || c > '7' {
- goto dc
- }
- yylval.Lval = 0
- for {
- if c >= '0' && c <= '9' {
- if c > '7' && c1 == 3 {
- break
- }
- yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
- yylval.Lval += int64(c) - '0'
- c = GETC()
- continue
- }
-
- if c1 == 3 {
- break
- }
- if c >= 'A' && c <= 'F' {
- c += 'a' - 'A'
- }
- if c >= 'a' && c <= 'f' {
- yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
- yylval.Lval += int64(c) - 'a' + 10
- c = GETC()
- continue
- }
-
- break
- }
-
- goto ncu
-
dc:
for {
if !(isdigit(c)) {
@@ -529,9 +506,7 @@
}
func getc() int {
- var c int
-
- c = peekc
+ c := peekc
if c != IGN {
peekc = IGN
if c == '\n' {
@@ -571,11 +546,10 @@
}
func escchar(e int) int {
- var c int
var l int
loop:
- c = getc()
+ c := getc()
if c == '\n' {
Yyerror("newline in string")
return EOF
@@ -643,11 +617,10 @@
}
func filbuf() int {
- var i *Io
var n int
loop:
- i = iostack
+ i := iostack
if i == nil {
return EOF
}
@@ -705,11 +678,10 @@
}
func GETC() int {
- var c int
if len(fi.P) == 0 {
return filbuf()
}
- c = int(fi.P[0])
+ c := int(fi.P[0])
fi.P = fi.P[1:]
return c
}
diff --git a/src/cmd/internal/asm/macbody.go b/src/cmd/internal/asm/macbody.go
index 337692a..c488ea1 100644
--- a/src/cmd/internal/asm/macbody.go
+++ b/src/cmd/internal/asm/macbody.go
@@ -43,14 +43,11 @@
)
func getnsn() int32 {
- var n int32
- var c int
-
- c = getnsc()
+ c := getnsc()
if c < '0' || c > '9' {
return -1
}
- n = 0
+ n := int32(0)
for c >= '0' && c <= '9' {
n = n*10 + int32(c) - '0'
c = getc()
@@ -61,9 +58,7 @@
}
func getsym() *Sym {
- var c int
-
- c = getnsc()
+ c := getnsc()
if !isalpha(c) && c != '_' && c < 0x80 {
unget(c)
return nil
@@ -84,15 +79,12 @@
}
func getsymdots(dots *int) *Sym {
- var c int
- var s *Sym
-
- s = getsym()
+ s := getsym()
if s != nil {
return s
}
- c = getnsc()
+ c := getnsc()
if c != '.' {
unget(c)
return nil
@@ -153,10 +145,9 @@
func dodefine(cp string) {
var s *Sym
- var p string
if i := strings.Index(cp, "="); i >= 0 {
- p = cp[i+1:]
+ p := cp[i+1:]
cp = cp[:i]
s = Lookup(cp)
s.Macro = &Macro{Text: p}
@@ -186,14 +177,11 @@
}
func domacro() {
- var i int
- var s *Sym
-
- s = getsym()
+ s := getsym()
if s == nil {
s = Lookup("endif")
}
- for i = 0; i < len(mactab); i++ {
+ for i := 0; i < len(mactab); i++ {
if s.Name == mactab[i].Macname {
if mactab[i].Macf != nil {
mactab[i].Macf()
@@ -209,9 +197,7 @@
}
func macund() {
- var s *Sym
-
- s = getsym()
+ s := getsym()
macend()
if s == nil {
Yyerror("syntax in #undef")
@@ -226,8 +212,6 @@
)
func macdef() {
- var s *Sym
- var a *Sym
var args [NARG]string
var n int
var i int
@@ -236,7 +220,7 @@
var ischr int
var base bytes.Buffer
- s = getsym()
+ s := getsym()
if s == nil {
goto bad
}
@@ -251,6 +235,8 @@
c = getnsc()
if c != ')' {
unget(c)
+ var a *Sym
+ var c int
for {
a = getsymdots(&dots)
if a == nil {
@@ -316,7 +302,6 @@
ischr = 0
}
} else {
-
if c == '"' || c == '\'' {
base.WriteByte(byte(c))
ischr = c
@@ -417,20 +402,12 @@
if s == nil {
Yyerror("syntax in #define")
} else {
-
Yyerror("syntax in #define: %s", s.Name)
}
macend()
}
func macexpand(s *Sym) []byte {
- var l int
- var c int
- var arg []string
- var out bytes.Buffer
- var buf bytes.Buffer
- var cp string
-
if s.Macro.Narg == 0 {
if debug['m'] != 0 {
fmt.Printf("#expand %s %s\n", s.Name, s.Macro.Text)
@@ -441,14 +418,19 @@
nargs := s.Macro.Narg - 1
dots := s.Macro.Dots
- c = getnsc()
+ c := getnsc()
+ var arg []string
+ var cp string
+ var out bytes.Buffer
if c != '(' {
goto bad
}
c = getc()
if c != ')' {
unget(c)
- l = 0
+ l := 0
+ var buf bytes.Buffer
+ var c int
for {
c = getc()
if c == '"' {
@@ -595,16 +577,14 @@
}
func macinc() {
- var c0 int
var c int
- var i int
var buf bytes.Buffer
var f *os.File
var hp string
var str string
var symb string
- c0 = getnsc()
+ c0 := getnsc()
if c0 != '"' {
c = c0
if c0 != '<' {
@@ -630,7 +610,7 @@
goto bad
}
- for i = 0; i < len(include); i++ {
+ for i := 0; i < len(include); i++ {
if i == 0 && c0 == '>' {
continue
}
@@ -663,13 +643,11 @@
}
func maclin() {
- var c int
- var n int32
var buf bytes.Buffer
var symb string
- n = getnsn()
- c = getc()
+ n := getnsn()
+ c := getc()
if n < 0 {
goto bad
}
@@ -783,16 +761,43 @@
}
func macprag() {
- var s *Sym
- var c0 int
var c int
- var buf bytes.Buffer
- var symb string
- s = getsym()
+ s := getsym()
if s != nil && s.Name == "lib" {
- goto praglib
+ c0 := getnsc()
+ if c0 != '"' {
+ c = c0
+ if c0 != '<' {
+ goto bad
+ }
+ c0 = '>'
+ }
+
+ var buf bytes.Buffer
+ for {
+ c = getc()
+ if c == c0 {
+ break
+ }
+ if c == '\n' {
+ goto bad
+ }
+ buf.WriteByte(byte(c))
+ }
+ symb := buf.String()
+
+ c = getcom()
+ if c != '\n' {
+ goto bad
+ }
+
+ /*
+ * put pragma-line in as a funny history
+ */
+ obj.Linklinehist(Ctxt, int(Lineno), symb, -1)
+ return
}
if s != nil && s.Name == "pack" {
pragpack()
@@ -830,43 +835,9 @@
}
for getnsc() != '\n' {
-
}
return
-praglib:
- c0 = getnsc()
- if c0 != '"' {
- c = c0
- if c0 != '<' {
- goto bad
- }
- c0 = '>'
- }
-
- for {
- c = getc()
- if c == c0 {
- break
- }
- if c == '\n' {
- goto bad
- }
- buf.WriteByte(byte(c))
- }
- symb = buf.String()
-
- c = getcom()
- if c != '\n' {
- goto bad
- }
-
- /*
- * put pragma-line in as a funny history
- */
- obj.Linklinehist(Ctxt, int(Lineno), symb, -1)
- return
-
bad:
unget(c)
Yyerror("syntax in #pragma lib")
diff --git a/src/cmd/internal/gc/const.go b/src/cmd/internal/gc/const.go
index 43c8809..4730d7d 100644
--- a/src/cmd/internal/gc/const.go
+++ b/src/cmd/internal/gc/const.go
@@ -582,7 +582,96 @@
var v Val
var norig *Node
if nr == nil {
- goto unary
+ // copy numeric value to avoid modifying
+ // nl, in case someone still refers to it (e.g. iota).
+ v = nl.Val
+
+ if wl == TIDEAL {
+ v = copyval(v)
+ }
+
+ switch uint32(n.Op)<<16 | uint32(v.Ctype) {
+ default:
+ if n.Diag == 0 {
+ Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
+ n.Diag = 1
+ }
+
+ return
+
+ case OCONV<<16 | CTNIL,
+ OARRAYBYTESTR<<16 | CTNIL:
+ if n.Type.Etype == TSTRING {
+ v = tostr(v)
+ nl.Type = n.Type
+ break
+ }
+ fallthrough
+
+ // fall through
+ case OCONV<<16 | CTINT,
+ OCONV<<16 | CTRUNE,
+ OCONV<<16 | CTFLT,
+ OCONV<<16 | CTSTR:
+ convlit1(&nl, n.Type, true)
+
+ v = nl.Val
+
+ case OPLUS<<16 | CTINT,
+ OPLUS<<16 | CTRUNE:
+ break
+
+ case OMINUS<<16 | CTINT,
+ OMINUS<<16 | CTRUNE:
+ mpnegfix(v.U.Xval)
+
+ case OCOM<<16 | CTINT,
+ OCOM<<16 | CTRUNE:
+ et := Txxx
+ if nl.Type != nil {
+ et = int(nl.Type.Etype)
+ }
+
+ // calculate the mask in b
+ // result will be (a ^ mask)
+ var b Mpint
+ switch et {
+ // signed guys change sign
+ default:
+ Mpmovecfix(&b, -1)
+
+ // unsigned guys invert their bits
+ case TUINT8,
+ TUINT16,
+ TUINT32,
+ TUINT64,
+ TUINT,
+ TUINTPTR:
+ mpmovefixfix(&b, Maxintval[et])
+ }
+
+ mpxorfixfix(v.U.Xval, &b)
+
+ case OPLUS<<16 | CTFLT:
+ break
+
+ case OMINUS<<16 | CTFLT:
+ mpnegflt(v.U.Fval)
+
+ case OPLUS<<16 | CTCPLX:
+ break
+
+ case OMINUS<<16 | CTCPLX:
+ mpnegflt(&v.U.Cval.Real)
+ mpnegflt(&v.U.Cval.Imag)
+
+ case ONOT<<16 | CTBOOL:
+ if v.U.Bval == 0 {
+ goto settrue
+ }
+ goto setfalse
+ }
+ goto ret
}
if nr.Type == nil {
return
@@ -944,97 +1033,6 @@
goto ret
- // copy numeric value to avoid modifying
- // nl, in case someone still refers to it (e.g. iota).
-unary:
- v = nl.Val
-
- if wl == TIDEAL {
- v = copyval(v)
- }
-
- switch uint32(n.Op)<<16 | uint32(v.Ctype) {
- default:
- if n.Diag == 0 {
- Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
- n.Diag = 1
- }
-
- return
-
- case OCONV<<16 | CTNIL,
- OARRAYBYTESTR<<16 | CTNIL:
- if n.Type.Etype == TSTRING {
- v = tostr(v)
- nl.Type = n.Type
- break
- }
- fallthrough
-
- // fall through
- case OCONV<<16 | CTINT,
- OCONV<<16 | CTRUNE,
- OCONV<<16 | CTFLT,
- OCONV<<16 | CTSTR:
- convlit1(&nl, n.Type, true)
-
- v = nl.Val
-
- case OPLUS<<16 | CTINT,
- OPLUS<<16 | CTRUNE:
- break
-
- case OMINUS<<16 | CTINT,
- OMINUS<<16 | CTRUNE:
- mpnegfix(v.U.Xval)
-
- case OCOM<<16 | CTINT,
- OCOM<<16 | CTRUNE:
- et := Txxx
- if nl.Type != nil {
- et = int(nl.Type.Etype)
- }
-
- // calculate the mask in b
- // result will be (a ^ mask)
- var b Mpint
- switch et {
- // signed guys change sign
- default:
- Mpmovecfix(&b, -1)
-
- // unsigned guys invert their bits
- case TUINT8,
- TUINT16,
- TUINT32,
- TUINT64,
- TUINT,
- TUINTPTR:
- mpmovefixfix(&b, Maxintval[et])
- }
-
- mpxorfixfix(v.U.Xval, &b)
-
- case OPLUS<<16 | CTFLT:
- break
-
- case OMINUS<<16 | CTFLT:
- mpnegflt(v.U.Fval)
-
- case OPLUS<<16 | CTCPLX:
- break
-
- case OMINUS<<16 | CTCPLX:
- mpnegflt(&v.U.Cval.Real)
- mpnegflt(&v.U.Cval.Imag)
-
- case ONOT<<16 | CTBOOL:
- if v.U.Bval == 0 {
- goto settrue
- }
- goto setfalse
- }
-
ret:
norig = saveorig(n)
*n = *nl
diff --git a/src/cmd/internal/gc/cplx.go b/src/cmd/internal/gc/cplx.go
index c07ba34..02b6c79 100644
--- a/src/cmd/internal/gc/cplx.go
+++ b/src/cmd/internal/gc/cplx.go
@@ -255,10 +255,12 @@
}
if n.Op == OREAL || n.Op == OIMAG {
- goto yes
+ //dump("\ncomplex-yes", n);
+ return true
}
- goto no
+ //dump("\ncomplex-no", n);
+ return false
maybe:
switch n.Op {
@@ -270,23 +272,20 @@
OCOMPLEX,
OREAL,
OIMAG:
- goto yes
+ //dump("\ncomplex-yes", n);
+ return true
case ODOT,
ODOTPTR,
OINDEX,
OIND,
ONAME:
- goto yes
+ //dump("\ncomplex-yes", n);
+ return true
}
//dump("\ncomplex-no", n);
-no:
return false
-
- //dump("\ncomplex-yes", n);
-yes:
- return true
}
func Complexmove(f *Node, t *Node) {
diff --git a/src/cmd/internal/gc/lex.go b/src/cmd/internal/gc/lex.go
index 4e5f011..c03222b 100644
--- a/src/cmd/internal/gc/lex.go
+++ b/src/cmd/internal/gc/lex.go
@@ -844,7 +844,6 @@
var _yylex_lstk *Loophack
func _yylex(yylval *yySymType) int32 {
- var c int
var c1 int
var escflag int
var v int64
@@ -857,7 +856,7 @@
prevlineno = lineno
l0:
- c = getc()
+ c := getc()
if yy_isspace(c) {
if c == '\n' && curio.nlsemi != 0 {
ungetc(c)
@@ -887,7 +886,82 @@
}
if yy_isdigit(c) {
- goto tnum
+ cp = &lexbuf
+ cp.Reset()
+ if c != '0' {
+ for {
+ cp.WriteByte(byte(c))
+ c = getc()
+ if yy_isdigit(c) {
+ continue
+ }
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' || c == 'p' || c == 'P' {
+ goto caseep
+ }
+ if c == 'i' {
+ goto casei
+ }
+ goto ncu
+ }
+ }
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if c == 'x' || c == 'X' {
+ for {
+ cp.WriteByte(byte(c))
+ c = getc()
+ if yy_isdigit(c) {
+ continue
+ }
+ if c >= 'a' && c <= 'f' {
+ continue
+ }
+ if c >= 'A' && c <= 'F' {
+ continue
+ }
+ if lexbuf.Len() == 2 {
+ Yyerror("malformed hex constant")
+ }
+ if c == 'p' {
+ goto caseep
+ }
+ goto ncu
+ }
+ }
+
+ if c == 'p' { // 0p begins floating point zero
+ goto caseep
+ }
+
+ c1 = 0
+ for {
+ if !yy_isdigit(c) {
+ break
+ }
+ if c < '0' || c > '7' {
+ c1 = 1 // not octal
+ }
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' {
+ goto caseep
+ }
+ if c == 'i' {
+ goto casei
+ }
+ if c1 != 0 {
+ Yyerror("malformed octal constant")
+ }
+ goto ncu
}
switch c {
@@ -1321,86 +1395,6 @@
yylval.sym = s
return int32(s.Lexical)
-tnum:
- cp = &lexbuf
- cp.Reset()
- if c != '0' {
- for {
- cp.WriteByte(byte(c))
- c = getc()
- if yy_isdigit(c) {
- continue
- }
- goto dc
- }
- }
-
- cp.WriteByte(byte(c))
- c = getc()
- if c == 'x' || c == 'X' {
- for {
- cp.WriteByte(byte(c))
- c = getc()
- if yy_isdigit(c) {
- continue
- }
- if c >= 'a' && c <= 'f' {
- continue
- }
- if c >= 'A' && c <= 'F' {
- continue
- }
- if lexbuf.Len() == 2 {
- Yyerror("malformed hex constant")
- }
- if c == 'p' {
- goto caseep
- }
- goto ncu
- }
- }
-
- if c == 'p' { // 0p begins floating point zero
- goto caseep
- }
-
- c1 = 0
- for {
- if !yy_isdigit(c) {
- break
- }
- if c < '0' || c > '7' {
- c1 = 1 // not octal
- }
- cp.WriteByte(byte(c))
- c = getc()
- }
-
- if c == '.' {
- goto casedot
- }
- if c == 'e' || c == 'E' {
- goto caseep
- }
- if c == 'i' {
- goto casei
- }
- if c1 != 0 {
- Yyerror("malformed octal constant")
- }
- goto ncu
-
-dc:
- if c == '.' {
- goto casedot
- }
- if c == 'e' || c == 'E' || c == 'p' || c == 'P' {
- goto caseep
- }
- if c == 'i' {
- goto casei
- }
-
ncu:
cp = nil
ungetc(c)
@@ -1523,31 +1517,90 @@
*/
func getlinepragma() int {
var cmd, verb, name string
- var n int
- var cp *bytes.Buffer
- var linep int
c := int(getr())
if c == 'g' {
- goto go_
+ cp := &lexbuf
+ cp.Reset()
+ cp.WriteByte('g') // already read
+ for {
+ c = int(getr())
+ if c == EOF || c >= utf8.RuneSelf {
+ return c
+ }
+ if c == '\n' {
+ break
+ }
+ cp.WriteByte(byte(c))
+ }
+
+ cp = nil
+
+ if strings.HasPrefix(lexbuf.String(), "go:cgo_") {
+ pragcgo(lexbuf.String())
+ }
+
+ cmd = lexbuf.String()
+ verb = cmd
+ if i := strings.Index(verb, " "); i >= 0 {
+ verb = verb[:i]
+ }
+
+ if verb == "go:linkname" {
+ if imported_unsafe == 0 {
+ Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
+ }
+ f := strings.Fields(cmd)
+ if len(f) != 3 {
+ Yyerror("usage: //go:linkname localname linkname")
+ return c
+ }
+
+ Lookup(f[1]).Linkname = f[2]
+ return c
+ }
+
+ if verb == "go:nointerface" && obj.Fieldtrack_enabled != 0 {
+ nointerface = true
+ return c
+ }
+
+ if verb == "go:noescape" {
+ noescape = true
+ return c
+ }
+
+ if verb == "go:nosplit" {
+ nosplit = true
+ return c
+ }
+
+ if verb == "go:nowritebarrier" {
+ if compiling_runtime == 0 {
+ Yyerror("//go:nowritebarrier only allowed in runtime")
+ }
+ nowritebarrier = true
+ return c
+ }
+ return c
}
if c != 'l' {
- goto out
+ return c
}
for i := 1; i < 5; i++ {
c = int(getr())
if c != int("line "[i]) {
- goto out
+ return c
}
}
- cp = &lexbuf
+ cp := &lexbuf
cp.Reset()
- linep = 0
+ linep := 0
for {
c = int(getr())
if c == EOF {
- goto out
+ return c
}
if c == '\n' {
break
@@ -1564,9 +1617,9 @@
cp = nil
if linep == 0 {
- goto out
+ return c
}
- n = 0
+ n := 0
for _, c := range lexbuf.String()[linep:] {
if c < '0' || c > '9' {
goto out
@@ -1579,7 +1632,7 @@
}
if n <= 0 {
- goto out
+ return c
}
// try to avoid allocating file name over and over
@@ -1587,76 +1640,12 @@
for h := Ctxt.Hist; h != nil; h = h.Link {
if h.Name != "" && h.Name == name {
linehist(h.Name, int32(n), 0)
- goto out
+ return c
}
}
linehist(name, int32(n), 0)
- goto out
-
-go_:
- cp = &lexbuf
- cp.Reset()
- cp.WriteByte('g') // already read
- for {
- c = int(getr())
- if c == EOF || c >= utf8.RuneSelf {
- goto out
- }
- if c == '\n' {
- break
- }
- cp.WriteByte(byte(c))
- }
-
- cp = nil
-
- if strings.HasPrefix(lexbuf.String(), "go:cgo_") {
- pragcgo(lexbuf.String())
- }
-
- cmd = lexbuf.String()
- verb = cmd
- if i := strings.Index(verb, " "); i >= 0 {
- verb = verb[:i]
- }
-
- if verb == "go:linkname" {
- if imported_unsafe == 0 {
- Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
- }
- f := strings.Fields(cmd)
- if len(f) != 3 {
- Yyerror("usage: //go:linkname localname linkname")
- goto out
- }
-
- Lookup(f[1]).Linkname = f[2]
- goto out
- }
-
- if verb == "go:nointerface" && obj.Fieldtrack_enabled != 0 {
- nointerface = true
- goto out
- }
-
- if verb == "go:noescape" {
- noescape = true
- goto out
- }
-
- if verb == "go:nosplit" {
- nosplit = true
- goto out
- }
-
- if verb == "go:nowritebarrier" {
- if compiling_runtime == 0 {
- Yyerror("//go:nowritebarrier only allowed in runtime")
- }
- nowritebarrier = true
- goto out
- }
+ return c
out:
return c
@@ -1708,14 +1697,12 @@
var p string
p, ok = getquoted(&q)
if !ok {
- goto err1
+ Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
+ return
}
pragcgobuf += fmt.Sprintf("cgo_dynamic_linker %v\n", plan9quote(p))
- goto out
+ return
- err1:
- Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
- goto out
}
if verb == "dynexport" {
@@ -1729,7 +1716,7 @@
}
if !more(&q) {
pragcgobuf += fmt.Sprintf("%s %v\n", verb, plan9quote(local))
- goto out
+ return
}
remote = getimpsym(&q)
@@ -1737,11 +1724,11 @@
goto err2
}
pragcgobuf += fmt.Sprintf("%s %v %v\n", verb, plan9quote(local), plan9quote(remote))
- goto out
+ return
err2:
Yyerror("usage: //go:%s local [remote]", verb)
- goto out
+ return
}
if verb == "cgo_import_dynamic" || verb == "dynimport" {
@@ -1754,7 +1741,7 @@
}
if !more(&q) {
pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v\n", plan9quote(local))
- goto out
+ return
}
remote = getimpsym(&q)
@@ -1763,7 +1750,7 @@
}
if !more(&q) {
pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v\n", plan9quote(local), plan9quote(remote))
- goto out
+ return
}
p, ok = getquoted(&q)
@@ -1771,24 +1758,22 @@
goto err3
}
pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v %v\n", plan9quote(local), plan9quote(remote), plan9quote(p))
- goto out
+ return
err3:
Yyerror("usage: //go:cgo_import_dynamic local [remote [\"library\"]]")
- goto out
+ return
}
if verb == "cgo_import_static" {
local := getimpsym(&q)
if local == "" || more(&q) {
- goto err4
+ Yyerror("usage: //go:cgo_import_static local")
+ return
}
pragcgobuf += fmt.Sprintf("cgo_import_static %v\n", plan9quote(local))
- goto out
+ return
- err4:
- Yyerror("usage: //go:cgo_import_static local")
- goto out
}
if verb == "cgo_ldflag" {
@@ -1796,17 +1781,13 @@
var p string
p, ok = getquoted(&q)
if !ok {
- goto err5
+ Yyerror("usage: //go:cgo_ldflag \"arg\"")
+ return
}
pragcgobuf += fmt.Sprintf("cgo_ldflag %v\n", plan9quote(p))
- goto out
+ return
- err5:
- Yyerror("usage: //go:cgo_ldflag \"arg\"")
- goto out
}
-
-out:
}
type yy struct{}
@@ -1983,7 +1964,6 @@
u := 0
c = int(getr())
- var l int64
var i int
switch c {
case 'x':
@@ -2010,7 +1990,24 @@
'6',
'7':
*escflg = 1 // it's a byte
- goto oct
+ l := int64(c) - '0'
+ for i := 2; i > 0; i-- {
+ c = getc()
+ if c >= '0' && c <= '7' {
+ l = l*8 + int64(c) - '0'
+ continue
+ }
+
+ Yyerror("non-octal character in escape sequence: %c", c)
+ ungetc(c)
+ }
+
+ if l > 255 {
+ Yyerror("octal escape value > 255: %d", l)
+ }
+
+ *val = l
+ return false
case 'a':
c = '\a'
@@ -2039,7 +2036,7 @@
return false
hex:
- l = 0
+ l := int64(0)
for ; i > 0; i-- {
c = getc()
if c >= '0' && c <= '9' {
@@ -2069,26 +2066,6 @@
*val = l
return false
-
-oct:
- l = int64(c) - '0'
- for i := 2; i > 0; i-- {
- c = getc()
- if c >= '0' && c <= '7' {
- l = l*8 + int64(c) - '0'
- continue
- }
-
- Yyerror("non-octal character in escape sequence: %c", c)
- ungetc(c)
- }
-
- if l > 255 {
- Yyerror("octal escape value > 255: %d", l)
- }
-
- *val = l
- return false
}
var syms = []struct {
diff --git a/src/cmd/internal/gc/mparith1.go b/src/cmd/internal/gc/mparith1.go
index 104992f..d728d6a 100644
--- a/src/cmd/internal/gc/mparith1.go
+++ b/src/cmd/internal/gc/mparith1.go
@@ -451,7 +451,6 @@
//
func mpatofix(a *Mpint, as string) {
var c int
- var s0 string
s := as
f := 0
@@ -471,7 +470,43 @@
fallthrough
case '0':
- goto oct
+ var c int
+ c, s = intstarstringplusplus(s)
+ if c == 'x' || c == 'X' {
+ s0 := s
+ var c int
+ c, _ = intstarstringplusplus(s)
+ for c != 0 {
+ if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
+ s = s[1:]
+ c, _ = intstarstringplusplus(s)
+ continue
+ }
+
+ Yyerror("malformed hex constant: %s", as)
+ goto bad
+ }
+
+ mphextofix(a, s0)
+ if a.Ovf != 0 {
+ Yyerror("constant too large: %s", as)
+ goto bad
+ }
+ goto out
+ }
+ for c != 0 {
+ if c >= '0' && c <= '7' {
+ mpmulcfix(a, 8)
+ mpaddcfix(a, int64(c)-'0')
+ c, s = intstarstringplusplus(s)
+ continue
+ }
+
+ Yyerror("malformed octal constant: %s", as)
+ goto bad
+ }
+
+ goto out
}
for c != 0 {
@@ -488,45 +523,6 @@
goto out
-oct:
- c, s = intstarstringplusplus(s)
- if c == 'x' || c == 'X' {
- goto hex
- }
- for c != 0 {
- if c >= '0' && c <= '7' {
- mpmulcfix(a, 8)
- mpaddcfix(a, int64(c)-'0')
- c, s = intstarstringplusplus(s)
- continue
- }
-
- Yyerror("malformed octal constant: %s", as)
- goto bad
- }
-
- goto out
-
-hex:
- s0 = s
- c, _ = intstarstringplusplus(s)
- for c != 0 {
- if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
- s = s[1:]
- c, _ = intstarstringplusplus(s)
- continue
- }
-
- Yyerror("malformed hex constant: %s", as)
- goto bad
- }
-
- mphextofix(a, s0)
- if a.Ovf != 0 {
- Yyerror("constant too large: %s", as)
- goto bad
- }
-
out:
if f != 0 {
mpnegfix(a)
diff --git a/src/cmd/internal/gc/mparith2.go b/src/cmd/internal/gc/mparith2.go
index c9c9230..7e12913 100644
--- a/src/cmd/internal/gc/mparith2.go
+++ b/src/cmd/internal/gc/mparith2.go
@@ -187,12 +187,44 @@
}
c := 0
- var x int
if a.Neg != b.Neg {
- goto sub
+ // perform a-b
+ switch mpcmp(a, b) {
+ case 0:
+ Mpmovecfix(a, 0)
+
+ case 1:
+ var x int
+ for i := 0; i < Mpprec; i++ {
+ x = a.A[i] - b.A[i] - c
+ c = 0
+ if x < 0 {
+ x += Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+
+ case -1:
+ a.Neg ^= 1
+ var x int
+ for i := 0; i < Mpprec; i++ {
+ x = b.A[i] - a.A[i] - c
+ c = 0
+ if x < 0 {
+ x += Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+ }
+ return
}
// perform a+b
+ var x int
for i := 0; i < Mpprec; i++ {
x = a.A[i] + b.A[i] + c
c = 0
@@ -210,40 +242,6 @@
}
return
-
- // perform a-b
-sub:
- switch mpcmp(a, b) {
- case 0:
- Mpmovecfix(a, 0)
-
- case 1:
- var x int
- for i := 0; i < Mpprec; i++ {
- x = a.A[i] - b.A[i] - c
- c = 0
- if x < 0 {
- x += Mpbase
- c = 1
- }
-
- a.A[i] = x
- }
-
- case -1:
- a.Neg ^= 1
- var x int
- for i := 0; i < Mpprec; i++ {
- x = b.A[i] - a.A[i] - c
- c = 0
- if x < 0 {
- x += Mpbase
- c = 1
- }
-
- a.A[i] = x
- }
- }
}
func mpmulfixfix(a *Mpint, b *Mpint) {
diff --git a/src/cmd/internal/gc/plive.go b/src/cmd/internal/gc/plive.go
index 86e7ea0..e6211e1 100644
--- a/src/cmd/internal/gc/plive.go
+++ b/src/cmd/internal/gc/plive.go
@@ -657,7 +657,7 @@
PPARAMOUT:
pos, ok := to.Node.(*Node).Opt.(int32) // index in vars
if !ok {
- goto Next1
+ return
}
if pos >= int32(len(vars)) || vars[pos] != to.Node {
Fatal("bad bookkeeping in liveness %v %d", Nconv(to.Node.(*Node), 0), pos)
@@ -690,8 +690,6 @@
}
}
}
-
-Next1:
}
// Constructs a new liveness structure used to hold the global state of the
diff --git a/src/cmd/internal/gc/reg.go b/src/cmd/internal/gc/reg.go
index 5f9586b..e051c25 100644
--- a/src/cmd/internal/gc/reg.go
+++ b/src/cmd/internal/gc/reg.go
@@ -185,32 +185,22 @@
}
func mkvar(f *Flow, a *obj.Addr) Bits {
- var v *Var
- var i int
- var n int
- var et int
- var flag int
- var w int64
- var o int64
- var bit Bits
- var node *Node
- var r *Reg
-
/*
* mark registers used
*/
if a.Type == obj.TYPE_NONE {
- goto none
+ return zbits
}
- r = f.Data.(*Reg)
+ r := f.Data.(*Reg)
r.use1.b[0] |= Thearch.Doregbits(int(a.Index)) // TODO: Use RtoB
+ var n int
switch a.Type {
default:
regu := Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
if regu == 0 {
- goto none
+ return zbits
}
bit := zbits
bit.b[0] = regu
@@ -227,7 +217,7 @@
setaddrs(bit)
a.Type = obj.TYPE_ADDR
Ostats.Naddr++
- goto none
+ return zbits
memcase:
fallthrough
@@ -243,7 +233,7 @@
*/
switch a.Name {
default:
- goto none
+ return zbits
case obj.NAME_EXTERN,
obj.NAME_STATIC,
@@ -253,25 +243,27 @@
}
}
+ var node *Node
node, _ = a.Node.(*Node)
if node == nil || node.Op != ONAME || node.Orig == nil {
- goto none
+ return zbits
}
node = node.Orig
if node.Orig != node {
Fatal("%v: bad node", Ctxt.Dconv(a))
}
if node.Sym == nil || node.Sym.Name[0] == '.' {
- goto none
+ return zbits
}
- et = int(a.Etype)
- o = a.Offset
- w = a.Width
+ et := int(a.Etype)
+ o := a.Offset
+ w := a.Width
if w < 0 {
Fatal("bad width %d for %v", w, Ctxt.Dconv(a))
}
- flag = 0
+ flag := 0
+ var v *Var
for i := 0; i < nvar; i++ {
v = &var_[i:][0]
if v.node == node && int(v.name) == n {
@@ -299,7 +291,7 @@
switch et {
case 0,
TFUNC:
- goto none
+ return zbits
}
if nvar >= NVAR {
@@ -319,10 +311,10 @@
}
}
- goto none
+ return zbits
}
- i = nvar
+ i := nvar
nvar++
v = &var_[i:][0]
v.id = i
@@ -341,7 +333,7 @@
node.Opt = v
- bit = blsh(uint(i))
+ bit := blsh(uint(i))
if n == obj.NAME_EXTERN || n == obj.NAME_STATIC {
for z := 0; z < BITS; z++ {
externs.b[z] |= bit.b[z]
@@ -401,9 +393,6 @@
Ostats.Nvar++
return bit
-
-none:
- return zbits
}
func prop(f *Flow, ref Bits, cal Bits) {
diff --git a/src/cmd/internal/gc/sinit.go b/src/cmd/internal/gc/sinit.go
index ca8db418..4cb2dd6 100644
--- a/src/cmd/internal/gc/sinit.go
+++ b/src/cmd/internal/gc/sinit.go
@@ -533,21 +533,18 @@
func simplename(n *Node) bool {
if n.Op != ONAME {
- goto no
+ return false
}
if n.Addable == 0 {
- goto no
+ return false
}
if n.Class&PHEAP != 0 {
- goto no
+ return false
}
if n.Class == PPARAMREF {
- goto no
+ return false
}
return true
-
-no:
- return false
}
func litas(l *Node, r *Node, init **NodeList) {
@@ -1191,48 +1188,48 @@
}
func oaslit(n *Node, init **NodeList) bool {
- var ctxt int
-
if n.Left == nil || n.Right == nil {
- goto no
+ // not a special composit literal assignment
+ return false
}
if n.Left.Type == nil || n.Right.Type == nil {
- goto no
+ // not a special composit literal assignment
+ return false
}
if !simplename(n.Left) {
- goto no
+ // not a special composit literal assignment
+ return false
}
if !Eqtype(n.Left.Type, n.Right.Type) {
- goto no
+ // not a special composit literal assignment
+ return false
}
// context is init() function.
// implies generated data executed
// exactly once and not subject to races.
- ctxt = 0
+ ctxt := 0
// if(n->dodata == 1)
// ctxt = 1;
switch n.Right.Op {
default:
- goto no
+ // not a special composit literal assignment
+ return false
case OSTRUCTLIT,
OARRAYLIT,
OMAPLIT:
if vmatch1(n.Left, n.Right) {
- goto no
+ // not a special composit literal assignment
+ return false
}
anylit(ctxt, n.Right, n.Left, init)
}
n.Op = OEMPTY
return true
-
- // not a special composit literal assignment
-no:
- return false
}
func getlit(lit *Node) int {
@@ -1244,7 +1241,7 @@
func stataddr(nam *Node, n *Node) bool {
if n == nil {
- goto no
+ return false
}
switch n.Op {
@@ -1281,7 +1278,6 @@
return true
}
-no:
return false
}
@@ -1420,7 +1416,6 @@
var nr *Node
var nl *Node
var nam Node
- var nod1 Node
if n.Dodata == 0 {
goto no
@@ -1436,7 +1431,7 @@
if nam.Class != PEXTERN {
goto no
}
- goto yes
+ return true
}
if nr.Type == nil || !Eqtype(nl.Type, nr.Type) {
@@ -1466,7 +1461,33 @@
case OSLICEARR:
if nr.Right.Op == OKEY && nr.Right.Left == nil && nr.Right.Right == nil {
nr = nr.Left
- goto slice
+ gused(nil) // in case the data is the dest of a goto
+ nl := nr
+ if nr == nil || nr.Op != OADDR {
+ goto no
+ }
+ nr = nr.Left
+ if nr == nil || nr.Op != ONAME {
+ goto no
+ }
+
+ // nr is the array being converted to a slice
+ if nr.Type == nil || nr.Type.Etype != TARRAY || nr.Type.Bound < 0 {
+ goto no
+ }
+
+ nam.Xoffset += int64(Array_array)
+ gdata(&nam, nl, int(Types[Tptr].Width))
+
+ nam.Xoffset += int64(Array_nel) - int64(Array_array)
+ var nod1 Node
+ Nodconst(&nod1, Types[TINT], nr.Type.Bound)
+ gdata(&nam, &nod1, Widthint)
+
+ nam.Xoffset += int64(Array_cap) - int64(Array_nel)
+ gdata(&nam, &nod1, Widthint)
+
+ return true
}
goto no
@@ -1505,37 +1526,8 @@
gdatastring(&nam, nr.Val.U.Sval)
}
-yes:
return true
-slice:
- gused(nil) // in case the data is the dest of a goto
- nl = nr
- if nr == nil || nr.Op != OADDR {
- goto no
- }
- nr = nr.Left
- if nr == nil || nr.Op != ONAME {
- goto no
- }
-
- // nr is the array being converted to a slice
- if nr.Type == nil || nr.Type.Etype != TARRAY || nr.Type.Bound < 0 {
- goto no
- }
-
- nam.Xoffset += int64(Array_array)
- gdata(&nam, nl, int(Types[Tptr].Width))
-
- nam.Xoffset += int64(Array_nel) - int64(Array_array)
- Nodconst(&nod1, Types[TINT], nr.Type.Bound)
- gdata(&nam, &nod1, Widthint)
-
- nam.Xoffset += int64(Array_cap) - int64(Array_nel)
- gdata(&nam, &nod1, Widthint)
-
- goto yes
-
no:
if n.Dodata == 2 {
Dump("\ngen_as_init", n)
diff --git a/src/cmd/internal/gc/subr.go b/src/cmd/internal/gc/subr.go
index 1c59e59..c8eb1d6 100644
--- a/src/cmd/internal/gc/subr.go
+++ b/src/cmd/internal/gc/subr.go
@@ -1014,14 +1014,14 @@
Fatal("struct/interface missing field: %v %v", Tconv(t1, 0), Tconv(t2, 0))
}
if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
- goto no
+ return false
}
}
if t1 == nil && t2 == nil {
- goto yes
+ return true
}
- goto no
+ return false
// Loop over structs: receiver, in, out.
case TFUNC:
@@ -1043,40 +1043,34 @@
Fatal("func struct missing field: %v %v", Tconv(ta, 0), Tconv(tb, 0))
}
if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
- goto no
+ return false
}
}
if ta != nil || tb != nil {
- goto no
+ return false
}
}
if t1 == nil && t2 == nil {
- goto yes
+ return true
}
- goto no
+ return false
case TARRAY:
if t1.Bound != t2.Bound {
- goto no
+ return false
}
case TCHAN:
if t1.Chan != t2.Chan {
- goto no
+ return false
}
}
if eqtype1(t1.Down, t2.Down, &l) && eqtype1(t1.Type, t2.Type, &l) {
- goto yes
+ return true
}
- goto no
-
-yes:
- return true
-
-no:
return false
}
@@ -1376,10 +1370,8 @@
}
func subtype(stp **Type, t *Type, d int) bool {
- var st *Type
-
loop:
- st = *stp
+ st := *stp
if st == nil {
return false
}
@@ -1762,7 +1754,7 @@
t = n.Type
if t == nil {
- goto rnil
+ return nil
}
if t.Etype != TFIELD {
@@ -1775,7 +1767,6 @@
bad:
Fatal("structfirst: not struct %v", Tconv(n, 0))
-rnil:
return nil
}
@@ -1783,21 +1774,17 @@
n := s.T
t := n.Down
if t == nil {
- goto rnil
+ return nil
}
if t.Etype != TFIELD {
- goto bad
+ Fatal("structnext: not struct %v", Tconv(n, 0))
+
+ return nil
}
s.T = t
return t
-
-bad:
- Fatal("structnext: not struct %v", Tconv(n, 0))
-
-rnil:
- return nil
}
/*
@@ -2135,54 +2122,47 @@
// will give shortest unique addressing.
// modify the tree with missing type names.
func adddot(n *Node) *Node {
- var s *Sym
- var c int
- var d int
-
typecheck(&n.Left, Etype|Erv)
n.Diag |= n.Left.Diag
t := n.Left.Type
if t == nil {
- goto ret
- }
-
- if n.Left.Op == OTYPE {
- goto ret
- }
-
- if n.Right.Op != ONAME {
- goto ret
- }
- s = n.Right.Sym
- if s == nil {
- goto ret
- }
-
- for d = 0; d < len(dotlist); d++ {
- c = adddot1(s, t, d, nil, 0)
- if c > 0 {
- goto out
- }
- }
-
- goto ret
-
-out:
- if c > 1 {
- Yyerror("ambiguous selector %v", Nconv(n, 0))
- n.Left = nil
return n
}
- // rebuild elided dots
- for c := d - 1; c >= 0; c-- {
- if n.Left.Type != nil && Isptr[n.Left.Type.Etype] != 0 {
- n.Left.Implicit = 1
- }
- n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
+ if n.Left.Op == OTYPE {
+ return n
}
-ret:
+ if n.Right.Op != ONAME {
+ return n
+ }
+ s := n.Right.Sym
+ if s == nil {
+ return n
+ }
+
+ var c int
+ for d := 0; d < len(dotlist); d++ {
+ c = adddot1(s, t, d, nil, 0)
+ if c > 0 {
+ if c > 1 {
+ Yyerror("ambiguous selector %v", Nconv(n, 0))
+ n.Left = nil
+ return n
+ }
+
+ // rebuild elided dots
+ for c := d - 1; c >= 0; c-- {
+ if n.Left.Type != nil && Isptr[n.Left.Type.Etype] != 0 {
+ n.Left.Implicit = 1
+ }
+ n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
+ }
+
+ return n
+ }
+ }
+
return n
}
@@ -3301,18 +3281,15 @@
* 1000+ if it is a -(power of 2)
*/
func powtwo(n *Node) int {
- var v uint64
- var b uint64
-
if n == nil || n.Op != OLITERAL || n.Type == nil {
- goto no
+ return -1
}
if Isint[n.Type.Etype] == 0 {
- goto no
+ return -1
}
- v = uint64(Mpgetfix(n.Val.U.Xval))
- b = 1
+ v := uint64(Mpgetfix(n.Val.U.Xval))
+ b := uint64(1)
for i := 0; i < 64; i++ {
if b == v {
return i
@@ -3321,7 +3298,7 @@
}
if Issigned[n.Type.Etype] == 0 {
- goto no
+ return -1
}
v = -v
@@ -3333,7 +3310,6 @@
b = b << 1
}
-no:
return -1
}
@@ -3592,22 +3568,19 @@
for i := 0; i < len(s); i++ {
c := s[i]
if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
- goto escape
+ var buf bytes.Buffer
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ fmt.Fprintf(&buf, "%%%02x", c)
+ continue
+ }
+ buf.WriteByte(c)
+ }
+ return buf.String()
}
}
return s
-
-escape:
- var buf bytes.Buffer
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
- fmt.Fprintf(&buf, "%%%02x", c)
- continue
- }
- buf.WriteByte(c)
- }
- return buf.String()
}
func mkpkg(path_ *Strlit) *Pkg {
diff --git a/src/cmd/internal/gc/typecheck.go b/src/cmd/internal/gc/typecheck.go
index 1468d5f..cb0111e 100644
--- a/src/cmd/internal/gc/typecheck.go
+++ b/src/cmd/internal/gc/typecheck.go
@@ -1805,7 +1805,25 @@
arith:
if op == OLSH || op == ORSH {
- goto shift
+ defaultlit(&r, Types[TUINT])
+ n.Right = r
+ t := r.Type
+ if Isint[t.Etype] == 0 || Issigned[t.Etype] != 0 {
+ Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", Nconv(n, 0), Tconv(r.Type, 0))
+ goto error
+ }
+
+ t = l.Type
+ if t != nil && t.Etype != TIDEAL && Isint[t.Etype] == 0 {
+ Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ // no defaultlit for left
+ // the outer context gives the type
+ n.Type = l.Type
+
+ goto ret
}
// ideal mixed with non-ideal
@@ -1993,27 +2011,6 @@
n.Type = t
goto ret
-shift:
- defaultlit(&r, Types[TUINT])
- n.Right = r
- t = r.Type
- if Isint[t.Etype] == 0 || Issigned[t.Etype] != 0 {
- Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", Nconv(n, 0), Tconv(r.Type, 0))
- goto error
- }
-
- t = l.Type
- if t != nil && t.Etype != TIDEAL && Isint[t.Etype] == 0 {
- Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
- goto error
- }
-
- // no defaultlit for left
- // the outer context gives the type
- n.Type = l.Type
-
- goto ret
-
doconv:
ok |= Erv
saveorignode(n)
diff --git a/src/cmd/internal/gc/unsafe.go b/src/cmd/internal/gc/unsafe.go
index 7f0a33f..6faed49 100644
--- a/src/cmd/internal/gc/unsafe.go
+++ b/src/cmd/internal/gc/unsafe.go
@@ -14,31 +14,28 @@
* rewrite with a constant
*/
func unsafenmagic(nn *Node) *Node {
- var r *Node
- var s *Sym
- var v int64
-
fn := nn.Left
args := nn.List
if safemode != 0 || fn == nil || fn.Op != ONAME {
- goto no
+ return nil
}
- s = fn.Sym
+ s := fn.Sym
if s == nil {
- goto no
+ return nil
}
if s.Pkg != unsafepkg {
- goto no
+ return nil
}
if args == nil {
Yyerror("missing argument for %v", Sconv(s, 0))
- goto no
+ return nil
}
- r = args.N
+ r := args.N
+ var v int64
if s.Name == "Sizeof" {
typecheck(&r, Erv)
defaultlit(&r, nil)
@@ -127,7 +124,6 @@
goto yes
}
-no:
return nil
bad:
diff --git a/src/cmd/internal/gc/util.go b/src/cmd/internal/gc/util.go
index df68d50..c3f7db2 100644
--- a/src/cmd/internal/gc/util.go
+++ b/src/cmd/internal/gc/util.go
@@ -39,17 +39,14 @@
func plan9quote(s string) string {
if s == "" {
- goto needquote
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
for i := 0; i < len(s); i++ {
if s[i] <= ' ' || s[i] == '\'' {
- goto needquote
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
}
return s
-
-needquote:
- return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
// simulation of int(*s++) in C
diff --git a/src/cmd/internal/gc/walk.go b/src/cmd/internal/gc/walk.go
index c57bdd6..4667494 100644
--- a/src/cmd/internal/gc/walk.go
+++ b/src/cmd/internal/gc/walk.go
@@ -3653,7 +3653,23 @@
if sl >= 0 {
sr := int(Mpgetfix(r.Right.Val.U.Xval))
if sr >= 0 && sl+sr == w {
- goto yes
+ // Rewrite left shift half to left rotate.
+ if l.Op == OLSH {
+ n = l
+ } else {
+ n = r
+ }
+ n.Op = OLROT
+
+ // Remove rotate 0 and rotate w.
+ s := int(Mpgetfix(n.Right.Val.U.Xval))
+
+ if s == 0 || s == w {
+ n = n.Left
+ }
+
+ *np = n
+ return
}
}
return
@@ -3661,25 +3677,6 @@
// TODO: Could allow s and 32-s if s is bounded (maybe s&31 and 32-s&31).
return
-
- // Rewrite left shift half to left rotate.
-yes:
- if l.Op == OLSH {
- n = l
- } else {
- n = r
- }
- n.Op = OLROT
-
- // Remove rotate 0 and rotate w.
- s := int(Mpgetfix(n.Right.Val.U.Xval))
-
- if s == 0 || s == w {
- n = n.Left
- }
-
- *np = n
- return
}
/*
@@ -3793,11 +3790,124 @@
return
}
- var n1 *Node
- var m Magic
- var n2 *Node
if pow < 0 {
- goto divbymul
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ // TODO: support 64-bit magic multiply here.
+ var m Magic
+ m.W = w
+
+ if Issigned[nl.Type.Etype] != 0 {
+ m.Sd = Mpgetfix(nr.Val.U.Xval)
+ Smagic(&m)
+ } else {
+ m.Ud = uint64(Mpgetfix(nr.Val.U.Xval))
+ Umagic(&m)
+ }
+
+ if m.Bad != 0 {
+ return
+ }
+
+ // We have a quick division method so use it
+ // for modulo too.
+ if n.Op == OMOD {
+ // rewrite as A%B = A - (A/B*B).
+ n1 := Nod(ODIV, nl, nr)
+
+ n2 := Nod(OMUL, n1, nr)
+ n = Nod(OSUB, nl, n2)
+ goto ret
+ }
+
+ switch Simtype[nl.Type.Etype] {
+ default:
+ return
+
+ // n1 = nl * magic >> w (HMUL)
+ case TUINT8,
+ TUINT16,
+ TUINT32:
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, int64(m.Um))
+ n1 := Nod(OMUL, nl, nc)
+ typecheck(&n1, Erv)
+ n1.Op = OHMUL
+ if m.Ua != 0 {
+ // Select a Go type with (at least) twice the width.
+ var twide *Type
+ switch Simtype[nl.Type.Etype] {
+ default:
+ return
+
+ case TUINT8,
+ TUINT16:
+ twide = Types[TUINT32]
+
+ case TUINT32:
+ twide = Types[TUINT64]
+
+ case TINT8,
+ TINT16:
+ twide = Types[TINT32]
+
+ case TINT32:
+ twide = Types[TINT64]
+ }
+
+ // add numerator (might overflow).
+ // n2 = (n1 + nl)
+ n2 := Nod(OADD, conv(n1, twide), conv(nl, twide))
+
+ // shift by m.s
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n = conv(Nod(ORSH, n2, nc), nl.Type)
+ } else {
+ // n = n1 >> m.s
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n = Nod(ORSH, n1, nc)
+ }
+
+ // n1 = nl * magic >> w
+ case TINT8,
+ TINT16,
+ TINT32:
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, m.Sm)
+ n1 := Nod(OMUL, nl, nc)
+ typecheck(&n1, Erv)
+ n1.Op = OHMUL
+ if m.Sm < 0 {
+ // add the numerator.
+ n1 = Nod(OADD, n1, nl)
+ }
+
+ // shift by m.s
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n2 := conv(Nod(ORSH, n1, nc), nl.Type)
+
+ // add 1 iff n1 is negative.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(w)-1)
+ n3 := Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
+ n = Nod(OSUB, n2, n3)
+
+ // apply sign.
+ if m.Sd < 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+ }
+
+ goto ret
}
switch pow {
@@ -3905,127 +4015,6 @@
goto ret
- // try to do division by multiply by (2^w)/d
- // see hacker's delight chapter 10
- // TODO: support 64-bit magic multiply here.
-divbymul:
- m.W = w
-
- if Issigned[nl.Type.Etype] != 0 {
- m.Sd = Mpgetfix(nr.Val.U.Xval)
- Smagic(&m)
- } else {
- m.Ud = uint64(Mpgetfix(nr.Val.U.Xval))
- Umagic(&m)
- }
-
- if m.Bad != 0 {
- return
- }
-
- // We have a quick division method so use it
- // for modulo too.
- if n.Op == OMOD {
- goto longmod
- }
-
- switch Simtype[nl.Type.Etype] {
- default:
- return
-
- // n1 = nl * magic >> w (HMUL)
- case TUINT8,
- TUINT16,
- TUINT32:
- nc := Nod(OXXX, nil, nil)
-
- Nodconst(nc, nl.Type, int64(m.Um))
- n1 := Nod(OMUL, nl, nc)
- typecheck(&n1, Erv)
- n1.Op = OHMUL
- if m.Ua != 0 {
- // Select a Go type with (at least) twice the width.
- var twide *Type
- switch Simtype[nl.Type.Etype] {
- default:
- return
-
- case TUINT8,
- TUINT16:
- twide = Types[TUINT32]
-
- case TUINT32:
- twide = Types[TUINT64]
-
- case TINT8,
- TINT16:
- twide = Types[TINT32]
-
- case TINT32:
- twide = Types[TINT64]
- }
-
- // add numerator (might overflow).
- // n2 = (n1 + nl)
- n2 := Nod(OADD, conv(n1, twide), conv(nl, twide))
-
- // shift by m.s
- nc := Nod(OXXX, nil, nil)
-
- Nodconst(nc, Types[TUINT], int64(m.S))
- n = conv(Nod(ORSH, n2, nc), nl.Type)
- } else {
- // n = n1 >> m.s
- nc := Nod(OXXX, nil, nil)
-
- Nodconst(nc, Types[TUINT], int64(m.S))
- n = Nod(ORSH, n1, nc)
- }
-
- // n1 = nl * magic >> w
- case TINT8,
- TINT16,
- TINT32:
- nc := Nod(OXXX, nil, nil)
-
- Nodconst(nc, nl.Type, m.Sm)
- n1 := Nod(OMUL, nl, nc)
- typecheck(&n1, Erv)
- n1.Op = OHMUL
- if m.Sm < 0 {
- // add the numerator.
- n1 = Nod(OADD, n1, nl)
- }
-
- // shift by m.s
- nc = Nod(OXXX, nil, nil)
-
- Nodconst(nc, Types[TUINT], int64(m.S))
- n2 := conv(Nod(ORSH, n1, nc), nl.Type)
-
- // add 1 iff n1 is negative.
- nc = Nod(OXXX, nil, nil)
-
- Nodconst(nc, Types[TUINT], int64(w)-1)
- n3 := Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
- n = Nod(OSUB, n2, n3)
-
- // apply sign.
- if m.Sd < 0 {
- n = Nod(OMINUS, n, nil)
- }
- }
-
- goto ret
-
- // rewrite as A%B = A - (A/B*B).
-longmod:
- n1 = Nod(ODIV, nl, nr)
-
- n2 = Nod(OMUL, n1, nr)
- n = Nod(OSUB, nl, n2)
- goto ret
-
ret:
typecheck(&n, Erv)
walkexpr(&n, init)
diff --git a/src/cmd/internal/ld/data.go b/src/cmd/internal/ld/data.go
index 381030d..369d3a4 100644
--- a/src/cmd/internal/ld/data.go
+++ b/src/cmd/internal/ld/data.go
@@ -81,9 +81,7 @@
}
func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
- var off int64
-
- off = s.Size
+ off := s.Size
setuintxx(ctxt, s, off, v, int64(wid))
return off
}
@@ -113,17 +111,14 @@
}
func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
- var i int64
- var r *Reloc
-
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
- i = s.Size
+ i := s.Size
s.Size += int64(ctxt.Arch.Ptrsize)
Symgrow(ctxt, s, s.Size)
- r = Addrel(s)
+ r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Siz = uint8(ctxt.Arch.Ptrsize)
@@ -133,17 +128,14 @@
}
func Addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
- var i int64
- var r *Reloc
-
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
- i = s.Size
+ i := s.Size
s.Size += 4
Symgrow(ctxt, s, s.Size)
- r = Addrel(s)
+ r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Add = add
@@ -157,8 +149,6 @@
}
func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
- var r *Reloc
-
if s.Type == 0 {
s.Type = SDATA
}
@@ -168,7 +158,7 @@
Symgrow(ctxt, s, s.Size)
}
- r = Addrel(s)
+ r := Addrel(s)
r.Sym = t
r.Off = int32(off)
r.Siz = uint8(ctxt.Arch.Ptrsize)
@@ -182,17 +172,14 @@
}
func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
- var i int64
- var r *Reloc
-
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
- i = s.Size
+ i := s.Size
s.Size += int64(ctxt.Arch.Ptrsize)
Symgrow(ctxt, s, s.Size)
- r = Addrel(s)
+ r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Siz = uint8(ctxt.Arch.Ptrsize)
@@ -201,17 +188,14 @@
}
func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
- var i int64
- var r *Reloc
-
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
- i = s.Size
+ i := s.Size
s.Size += 4
Symgrow(ctxt, s, s.Size)
- r = Addrel(s)
+ r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Siz = 4
@@ -253,16 +237,12 @@
}
func listsort(l *LSym, cmp func(*LSym, *LSym) int, nextp func(*LSym) **LSym) *LSym {
- var l1 *LSym
- var l2 *LSym
- var le *LSym
-
if l == nil || *nextp(l) == nil {
return l
}
- l1 = l
- l2 = l
+ l1 := l
+ l2 := l
for {
l2 = *nextp(l2)
if l2 == nil {
@@ -289,7 +269,7 @@
l2 = *nextp(l2)
}
- le = l
+ le := l
for {
if l1 == nil {
@@ -332,14 +312,13 @@
var r *Reloc
var rs *LSym
var i16 int16
- var ri int32
var off int32
var siz int32
var fl int32
var o int64
Ctxt.Cursym = s
- for ri = 0; ri < int32(len(s.R)); ri++ {
+ for ri := int32(0); ri < int32(len(s.R)); ri++ {
r = &s.R[ri]
r.Done = 1
off = r.Off
@@ -591,34 +570,28 @@
}
func reloc() {
- var s *LSym
-
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f reloc\n", obj.Cputime())
}
Bflush(&Bso)
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
relocsym(s)
}
- for s = datap; s != nil; s = s.Next {
+ for s := datap; s != nil; s = s.Next {
relocsym(s)
}
}
func dynrelocsym(s *LSym) {
- var ri int
- var r *Reloc
-
if HEADTYPE == Hwindows {
- var rel *LSym
- var targ *LSym
-
- rel = Linklookup(Ctxt, ".rel", 0)
+ rel := Linklookup(Ctxt, ".rel", 0)
if s == rel {
return
}
- for ri = 0; ri < len(s.R); ri++ {
+ var r *Reloc
+ var targ *LSym
+ for ri := 0; ri < len(s.R); ri++ {
r = &s.R[ri]
targ = r.Sym
if targ == nil {
@@ -655,7 +628,8 @@
return
}
- for ri = 0; ri < len(s.R); ri++ {
+ var r *Reloc
+ for ri := 0; ri < len(s.R); ri++ {
r = &s.R[ri]
if r.Sym != nil && r.Sym.Type == SDYNIMPORT || r.Type >= 256 {
if r.Sym != nil && !r.Sym.Reachable {
@@ -667,8 +641,6 @@
}
func dynreloc() {
- var s *LSym
-
// -d suppresses dynamic loader format, so we may as well not
// compute these sections or mark their symbols as reachable.
if Debug['d'] != 0 && HEADTYPE != Hwindows {
@@ -679,10 +651,10 @@
}
Bflush(&Bso)
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
dynrelocsym(s)
}
- for s = datap; s != nil; s = s.Next {
+ for s := datap; s != nil; s = s.Next {
dynrelocsym(s)
}
if Iself {
@@ -692,9 +664,6 @@
func blk(start *LSym, addr int64, size int64) {
var sym *LSym
- var eaddr int64
- var p []byte
- var ep []byte
for sym = start; sym != nil; sym = sym.Next {
if sym.Type&SSUB == 0 && sym.Value >= addr {
@@ -702,7 +671,9 @@
}
}
- eaddr = addr + size
+ eaddr := addr + size
+ var ep []byte
+ var p []byte
for ; sym != nil; sym = sym.Next {
if sym.Type&SSUB != 0 {
continue
@@ -746,11 +717,6 @@
}
func Codeblk(addr int64, size int64) {
- var sym *LSym
- var eaddr int64
- var n int64
- var q []byte
-
if Debug['a'] != 0 {
fmt.Fprintf(&Bso, "codeblk [%#x,%#x) at offset %#x\n", addr, addr+size, Cpos())
}
@@ -762,6 +728,7 @@
return
}
+ var sym *LSym
for sym = Ctxt.Textp; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@@ -771,7 +738,9 @@
}
}
- eaddr = addr + size
+ eaddr := addr + size
+ var n int64
+ var q []byte
for ; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@@ -816,15 +785,6 @@
}
func Datblk(addr int64, size int64) {
- var sym *LSym
- var i int64
- var eaddr int64
- var p []byte
- var ep []byte
- var typ string
- var rsname string
- var r *Reloc
-
if Debug['a'] != 0 {
fmt.Fprintf(&Bso, "datblk [%#x,%#x) at offset %#x\n", addr, addr+size, Cpos())
}
@@ -836,13 +796,20 @@
return
}
+ var sym *LSym
for sym = datap; sym != nil; sym = sym.Next {
if sym.Value >= addr {
break
}
}
- eaddr = addr + size
+ eaddr := addr + size
+ var ep []byte
+ var i int64
+ var p []byte
+ var r *Reloc
+ var rsname string
+ var typ string
for ; sym != nil; sym = sym.Next {
if sym.Value >= eaddr {
break
@@ -922,21 +889,16 @@
}
func addstrdata(name string, value string) {
- var s *LSym
- var sp *LSym
- var p string
- var reachable bool
-
- p = fmt.Sprintf("%s.str", name)
- sp = Linklookup(Ctxt, p, 0)
+ p := fmt.Sprintf("%s.str", name)
+ sp := Linklookup(Ctxt, p, 0)
Addstring(sp, value)
sp.Type = SRODATA
- s = Linklookup(Ctxt, name, 0)
+ s := Linklookup(Ctxt, name, 0)
s.Size = 0
s.Dupok = 1
- reachable = s.Reachable
+ reachable := s.Reachable
Addaddr(Ctxt, s, sp)
adduintxx(Ctxt, s, uint64(len(value)), Thearch.Ptrsize)
@@ -949,15 +911,12 @@
}
func Addstring(s *LSym, str string) int64 {
- var n int
- var r int32
-
if s.Type == 0 {
s.Type = SNOPTRDATA
}
s.Reachable = true
- r = int32(s.Size)
- n = len(str) + 1
+ r := int32(s.Size)
+ n := len(str) + 1
if s.Name == ".shstrtab" {
elfsetstring(str, int(r))
}
@@ -969,9 +928,7 @@
}
func dosymtype() {
- var s *LSym
-
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if len(s.P) > 0 {
if s.Type == SBSS {
s.Type = SDATA
@@ -984,13 +941,11 @@
}
func symalign(s *LSym) int32 {
- var align int32
-
if s.Align != 0 {
return s.Align
}
- align = int32(Thearch.Maxalign)
+ align := int32(Thearch.Maxalign)
for int64(align) > s.Size && align > 1 {
align >>= 1
}
@@ -1008,9 +963,8 @@
// the list of symbols s; the list stops when s->type exceeds type.
func maxalign(s *LSym, type_ int) int32 {
var align int32
- var max int32
- max = 0
+ max := int32(0)
for ; s != nil && int(s.Type) <= type_; s = s.Next {
align = symalign(s)
if max < align {
@@ -1042,16 +996,13 @@
// Writes insData block from g->data.
func proggendataflush(g *ProgGen) {
- var i int32
- var s int32
-
if g.datasize == 0 {
return
}
proggenemit(g, obj.InsData)
proggenemit(g, uint8(g.datasize))
- s = (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte
- for i = 0; i < s; i++ {
+ s := (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte
+ for i := int32(0); i < s; i++ {
proggenemit(g, g.data[i])
}
g.datasize = 0
@@ -1068,9 +1019,7 @@
// Skip v bytes due to alignment, etc.
func proggenskip(g *ProgGen, off int64, v int64) {
- var i int64
-
- for i = off; i < off+v; i++ {
+ for i := off; i < off+v; i++ {
if (i % int64(Thearch.Ptrsize)) == 0 {
proggendata(g, obj.BitsScalar)
}
@@ -1101,11 +1050,6 @@
// This function generates GC pointer info for global variables.
func proggenaddsym(g *ProgGen, s *LSym) {
- var gcprog *LSym
- var mask []byte
- var i int64
- var size int64
-
if s.Size == 0 {
return
}
@@ -1128,10 +1072,10 @@
if (s.Size%int64(Thearch.Ptrsize) != 0) || (g.pos%int64(Thearch.Ptrsize) != 0) {
Diag("proggenaddsym: unaligned conservative symbol %s: size=%d pos=%d", s.Name, s.Size, g.pos)
}
- size = (s.Size + int64(Thearch.Ptrsize) - 1) / int64(Thearch.Ptrsize) * int64(Thearch.Ptrsize)
+ size := (s.Size + int64(Thearch.Ptrsize) - 1) / int64(Thearch.Ptrsize) * int64(Thearch.Ptrsize)
if size < int64(32*Thearch.Ptrsize) {
// Emit small symbols as data.
- for i = 0; i < size/int64(Thearch.Ptrsize); i++ {
+ for i := int64(0); i < size/int64(Thearch.Ptrsize); i++ {
proggendata(g, obj.BitsPointer)
}
} else {
@@ -1148,7 +1092,7 @@
if s.Size < int64(32*Thearch.Ptrsize) {
// Emit small symbols as data.
// This case also handles unaligned and tiny symbols, so tread carefully.
- for i = s.Value; i < s.Value+s.Size; i++ {
+ for i := s.Value; i < s.Value+s.Size; i++ {
if (i % int64(Thearch.Ptrsize)) == 0 {
proggendata(g, obj.BitsScalar)
}
@@ -1168,24 +1112,24 @@
// gc program, copy directly
proggendataflush(g)
- gcprog = decodetype_gcprog(s.Gotype)
- size = decodetype_size(s.Gotype)
+ gcprog := decodetype_gcprog(s.Gotype)
+ size := decodetype_size(s.Gotype)
if (size%int64(Thearch.Ptrsize) != 0) || (g.pos%int64(Thearch.Ptrsize) != 0) {
Diag("proggenaddsym: unaligned gcprog symbol %s: size=%d pos=%d", s.Name, s.Size, g.pos)
}
- for i = 0; i < int64(len(gcprog.P)-1); i++ {
+ for i := int64(0); i < int64(len(gcprog.P)-1); i++ {
proggenemit(g, uint8(gcprog.P[i]))
}
g.pos = s.Value + size
} else {
// gc mask, it's small so emit as data
- mask = decodetype_gcmask(s.Gotype)
+ mask := decodetype_gcmask(s.Gotype)
- size = decodetype_size(s.Gotype)
+ size := decodetype_size(s.Gotype)
if (size%int64(Thearch.Ptrsize) != 0) || (g.pos%int64(Thearch.Ptrsize) != 0) {
Diag("proggenaddsym: unaligned gcmask symbol %s: size=%d pos=%d", s.Name, s.Size, g.pos)
}
- for i = 0; i < size; i += int64(Thearch.Ptrsize) {
+ for i := int64(0); i < size; i += int64(Thearch.Ptrsize) {
proggendata(g, uint8((mask[i/int64(Thearch.Ptrsize)/2]>>uint64((i/int64(Thearch.Ptrsize)%2)*4+2))&obj.BitsMask))
}
g.pos = s.Value + size
@@ -1193,9 +1137,7 @@
}
func growdatsize(datsizep *int64, s *LSym) {
- var datsize int64
-
- datsize = *datsizep
+ datsize := *datsizep
if s.Size < 0 {
Diag("negative size (datsize = %d, s->size = %d)", datsize, s.Size)
}
@@ -1206,27 +1148,15 @@
}
func dodata() {
- var n int32
- var datsize int64
- var sect *Section
- var segro *Segment
- var s *LSym
- var last *LSym
- var l **LSym
- var toc *LSym
- var gcdata *LSym
- var gcbss *LSym
- var gen ProgGen
-
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f dodata\n", obj.Cputime())
}
Bflush(&Bso)
- last = nil
+ last := (*LSym)(nil)
datap = nil
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Special != 0 {
continue
}
@@ -1245,7 +1175,7 @@
}
}
- for s = datap; s != nil; s = s.Next {
+ for s := datap; s != nil; s = s.Next {
if int64(len(s.P)) > s.Size {
Diag("%s: initialize bounds (%d < %d)", s.Name, int64(s.Size), len(s.P))
}
@@ -1265,6 +1195,8 @@
dynreloc()
/* some symbols may no longer belong in datap (Mach-O) */
+ var l **LSym
+ var s *LSym
for l = &datap; ; {
s = *l
if s == nil {
@@ -1299,8 +1231,9 @@
}
/* writable ELF sections */
- datsize = 0
+ datsize := int64(0)
+ var sect *Section
for ; s != nil && s.Type < SELFGOT; s = s.Next {
sect = addsection(&Segdata, s.Name, 06)
sect.Align = symalign(s)
@@ -1315,10 +1248,11 @@
/* .got (and .toc on ppc64) */
if s.Type == SELFGOT {
- sect = addsection(&Segdata, ".got", 06)
+ sect := addsection(&Segdata, ".got", 06)
sect.Align = maxalign(s, SELFGOT)
datsize = Rnd(datsize, int64(sect.Align))
sect.Vaddr = uint64(datsize)
+ var toc *LSym
for ; s != nil && s.Type == SELFGOT; s = s.Next {
datsize = aligndatsize(datsize, s)
s.Sect = sect
@@ -1363,7 +1297,7 @@
/* shared library initializer */
if Flag_shared != 0 {
- sect = addsection(&Segdata, ".init_array", 06)
+ sect := addsection(&Segdata, ".init_array", 06)
sect.Align = maxalign(s, SINITARR)
datsize = Rnd(datsize, int64(sect.Align))
sect.Vaddr = uint64(datsize)
@@ -1385,7 +1319,8 @@
sect.Vaddr = uint64(datsize)
Linklookup(Ctxt, "runtime.data", 0).Sect = sect
Linklookup(Ctxt, "runtime.edata", 0).Sect = sect
- gcdata = Linklookup(Ctxt, "runtime.gcdata", 0)
+ gcdata := Linklookup(Ctxt, "runtime.gcdata", 0)
+ var gen ProgGen
proggeninit(&gen, gcdata)
for ; s != nil && s.Type < SBSS; s = s.Next {
if s.Type == SINITARR {
@@ -1412,7 +1347,7 @@
sect.Vaddr = uint64(datsize)
Linklookup(Ctxt, "runtime.bss", 0).Sect = sect
Linklookup(Ctxt, "runtime.ebss", 0).Sect = sect
- gcbss = Linklookup(Ctxt, "runtime.gcbss", 0)
+ gcbss := Linklookup(Ctxt, "runtime.gcbss", 0)
proggeninit(&gen, gcbss)
for ; s != nil && s.Type < SNOPTRBSS; s = s.Next {
s.Sect = sect
@@ -1449,7 +1384,7 @@
}
if Iself && Linkmode == LinkExternal && s != nil && s.Type == STLSBSS && HEADTYPE != Hopenbsd {
- sect = addsection(&Segdata, ".tbss", 06)
+ sect := addsection(&Segdata, ".tbss", 06)
sect.Align = int32(Thearch.Ptrsize)
sect.Vaddr = 0
datsize = 0
@@ -1486,6 +1421,7 @@
* since it's not our decision; that code expects the sections in
* segtext.
*/
+ var segro *Segment
if Iself && Linkmode == LinkInternal {
segro = &Segrodata
} else {
@@ -1600,17 +1536,17 @@
}
/* number the sections */
- n = 1
+ n := int32(1)
- for sect = Segtext.Sect; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
sect.Extnum = int16(n)
n++
}
- for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
sect.Extnum = int16(n)
n++
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
sect.Extnum = int16(n)
n++
}
@@ -1618,9 +1554,6 @@
// assign addresses to text
func textaddress() {
- var va uint64
- var sect *Section
- var sym *LSym
var sub *LSym
addsection(&Segtext, ".text", 05)
@@ -1628,14 +1561,14 @@
// Assign PCs in text segment.
// Could parallelize, by assigning to text
// and then letting threads copy down, but probably not worth it.
- sect = Segtext.Sect
+ sect := Segtext.Sect
sect.Align = int32(Funcalign)
Linklookup(Ctxt, "runtime.text", 0).Sect = sect
Linklookup(Ctxt, "runtime.etext", 0).Sect = sect
- va = uint64(INITTEXT)
+ va := uint64(INITTEXT)
sect.Vaddr = va
- for sym = Ctxt.Textp; sym != nil; sym = sym.Next {
+ for sym := Ctxt.Textp; sym != nil; sym = sym.Next {
sym.Sect = sect
if sym.Type&SSUB != 0 {
continue
@@ -1664,26 +1597,11 @@
// assign addresses
func address() {
- var s *Section
- var text *Section
- var data *Section
- var rodata *Section
- var symtab *Section
- var pclntab *Section
- var noptr *Section
- var bss *Section
- var noptrbss *Section
- var typelink *Section
- var sym *LSym
- var sub *LSym
- var va uint64
- var vlen int64
-
- va = uint64(INITTEXT)
+ va := uint64(INITTEXT)
Segtext.Rwx = 05
Segtext.Vaddr = va
Segtext.Fileoff = uint64(HEADR)
- for s = Segtext.Sect; s != nil; s = s.Next {
+ for s := Segtext.Sect; s != nil; s = s.Next {
va = uint64(Rnd(int64(va), int64(s.Align)))
s.Vaddr = va
va += s.Length
@@ -1704,7 +1622,7 @@
Segrodata.Vaddr = va
Segrodata.Fileoff = va - Segtext.Vaddr + Segtext.Fileoff
Segrodata.Filelen = 0
- for s = Segrodata.Sect; s != nil; s = s.Next {
+ for s := Segrodata.Sect; s != nil; s = s.Next {
va = uint64(Rnd(int64(va), int64(s.Align)))
s.Vaddr = va
va += s.Length
@@ -1725,11 +1643,12 @@
if HEADTYPE == Hplan9 {
Segdata.Fileoff = Segtext.Fileoff + Segtext.Filelen
}
- data = nil
- noptr = nil
- bss = nil
- noptrbss = nil
- for s = Segdata.Sect; s != nil; s = s.Next {
+ data := (*Section)(nil)
+ noptr := (*Section)(nil)
+ bss := (*Section)(nil)
+ noptrbss := (*Section)(nil)
+ var vlen int64
+ for s := Segdata.Sect; s != nil; s = s.Next {
vlen = int64(s.Length)
if s.Next != nil {
vlen = int64(s.Next.Vaddr - s.Vaddr)
@@ -1753,17 +1672,19 @@
Segdata.Filelen = bss.Vaddr - Segdata.Vaddr
- text = Segtext.Sect
+ text := Segtext.Sect
+ var rodata *Section
if Segrodata.Sect != nil {
rodata = Segrodata.Sect
} else {
rodata = text.Next
}
- typelink = rodata.Next
- symtab = typelink.Next
- pclntab = symtab.Next
+ typelink := rodata.Next
+ symtab := typelink.Next
+ pclntab := symtab.Next
- for sym = datap; sym != nil; sym = sym.Next {
+ var sub *LSym
+ for sym := datap; sym != nil; sym = sym.Next {
Ctxt.Cursym = sym
if sym.Sect != nil {
sym.Value += int64((sym.Sect.(*Section)).Vaddr)
@@ -1780,7 +1701,7 @@
xdefine("runtime.typelink", SRODATA, int64(typelink.Vaddr))
xdefine("runtime.etypelink", SRODATA, int64(typelink.Vaddr+typelink.Length))
- sym = Linklookup(Ctxt, "runtime.gcdata", 0)
+ sym := Linklookup(Ctxt, "runtime.gcdata", 0)
xdefine("runtime.egcdata", SRODATA, Symaddr(sym)+sym.Size)
Linklookup(Ctxt, "runtime.egcdata", 0).Sect = sym.Sect
diff --git a/src/cmd/internal/ld/decodesym.go b/src/cmd/internal/ld/decodesym.go
index c7b1a2f..74fa4b5 100644
--- a/src/cmd/internal/ld/decodesym.go
+++ b/src/cmd/internal/ld/decodesym.go
@@ -11,9 +11,7 @@
// ../gc/reflect.c stuffs in these.
func decode_reloc(s *LSym, off int32) *Reloc {
- var i int
-
- for i = 0; i < len(s.R); i++ {
+ for i := 0; i < len(s.R); i++ {
if s.R[i].Off == off {
return &s.R[i:][0]
}
@@ -22,9 +20,7 @@
}
func decode_reloc_sym(s *LSym, off int32) *LSym {
- var r *Reloc
-
- r = decode_reloc(s, off)
+ r := decode_reloc(s, off)
if r == nil {
return nil
}
@@ -75,9 +71,7 @@
}
func decodetype_gcmask(s *LSym) []byte {
- var mask *LSym
-
- mask = decode_reloc_sym(s, 1*int32(Thearch.Ptrsize)+8+1*int32(Thearch.Ptrsize))
+ mask := decode_reloc_sym(s, 1*int32(Thearch.Ptrsize)+8+1*int32(Thearch.Ptrsize))
return mask.P
}
@@ -124,9 +118,7 @@
}
func decodetype_funcintype(s *LSym, i int) *LSym {
- var r *Reloc
-
- r = decode_reloc(s, int32(commonsize())+int32(Thearch.Ptrsize))
+ r := decode_reloc(s, int32(commonsize())+int32(Thearch.Ptrsize))
if r == nil {
return nil
}
@@ -134,9 +126,7 @@
}
func decodetype_funcouttype(s *LSym, i int) *LSym {
- var r *Reloc
-
- r = decode_reloc(s, int32(commonsize())+2*int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize))
+ r := decode_reloc(s, int32(commonsize())+2*int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize))
if r == nil {
return nil
}
@@ -154,16 +144,14 @@
// Type.StructType.fields[]-> name, typ and offset.
func decodetype_structfieldname(s *LSym, i int) string {
- var r *Reloc
-
// go.string."foo" 0x28 / 0x40
s = decode_reloc_sym(s, int32(commonsize())+int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize)+int32(i)*int32(structfieldsize()))
if s == nil { // embedded structs have a nil name.
return ""
}
- r = decode_reloc(s, 0) // s has a pointer to the string data at offset 0
- if r == nil { // shouldn't happen.
+ r := decode_reloc(s, 0) // s has a pointer to the string data at offset 0
+ if r == nil { // shouldn't happen.
return ""
}
return cstring(r.Sym.P[r.Add:])
diff --git a/src/cmd/internal/ld/dwarf.go b/src/cmd/internal/ld/dwarf.go
index 490795c..4d3715b 100644
--- a/src/cmd/internal/ld/dwarf.go
+++ b/src/cmd/internal/ld/dwarf.go
@@ -102,9 +102,8 @@
func uleb128enc(v uint64, dst []byte) int {
var c uint8
- var length uint8
- length = 0
+ length := uint8(0)
for {
c = uint8(v & 0x7f)
v >>= 7
@@ -127,9 +126,8 @@
func sleb128enc(v int64, dst []byte) int {
var c uint8
var s uint8
- var length uint8
- length = 0
+ length := uint8(0)
for {
c = uint8(v & 0x7f)
s = uint8(v & 0x40)
@@ -587,12 +585,11 @@
}
func writeabbrev() {
- var i int
var j int
var f *DWAttrForm
abbrevo = Cpos()
- for i = 1; i < DW_NABRV; i++ {
+ for i := 1; i < DW_NABRV; i++ {
// See section 7.5.3
uleb128put(int64(i))
@@ -620,9 +617,7 @@
)
func dwarfhashstr(s string) uint32 {
- var h uint32
-
- h = 0
+ h := uint32(0)
for s != "" {
h = h + h + h + uint32(s[0])
s = s[1:]
@@ -663,9 +658,7 @@
var dwglobals DWDie
func newattr(die *DWDie, attr uint16, cls int, value int64, data interface{}) *DWAttr {
- var a *DWAttr
-
- a = new(DWAttr)
+ a := new(DWAttr)
a.link = die.attr
die.attr = a
a.atr = attr
@@ -679,15 +672,12 @@
// name. getattr moves the desired one to the front so
// frequently searched ones are found faster.
func getattr(die *DWDie, attr uint16) *DWAttr {
- var a *DWAttr
- var b *DWAttr
-
if die.attr.atr == attr {
return die.attr
}
- a = die.attr
- b = a.link
+ a := die.attr
+ b := a.link
for b != nil {
if b.atr == attr {
a.link = b.link
@@ -707,10 +697,7 @@
// written out if it is listed in the abbrev). If its parent is
// keeping an index, the new DIE will be inserted there.
func newdie(parent *DWDie, abbrev int, name string) *DWDie {
- var die *DWDie
- var h int
-
- die = new(DWDie)
+ die := new(DWDie)
die.abbrev = abbrev
die.link = parent.child
parent.child = die
@@ -718,7 +705,7 @@
newattr(die, DW_AT_name, DW_CLS_STRING, int64(len(name)), name)
if parent.hash != nil {
- h = int(dwarfhashstr(name))
+ h := int(dwarfhashstr(name))
die.hlink = parent.hash[h]
parent.hash[h] = die
}
@@ -731,11 +718,9 @@
}
func walktypedef(die *DWDie) *DWDie {
- var attr *DWAttr
-
// Resolve typedef if present.
if die.abbrev == DW_ABRV_TYPEDECL {
- for attr = die.attr; attr != nil; attr = attr.link {
+ for attr := die.attr; attr != nil; attr = attr.link {
if attr.atr == DW_AT_type && attr.cls == DW_CLS_REFERENCE && attr.data != nil {
return attr.data.(*DWDie)
}
@@ -800,8 +785,7 @@
}
func find_or_diag(die *DWDie, name string) *DWDie {
- var r *DWDie
- r = find(die, name)
+ r := find(die, name)
if r == nil {
Diag("dwarf find: %s %p has no %s", getattr(die, DW_AT_name).data, die, name)
Errorexit()
@@ -811,9 +795,7 @@
}
func adddwarfrel(sec *LSym, sym *LSym, offsetbase int64, siz int, addend int64) {
- var r *Reloc
-
- r = Addrel(sec)
+ r := Addrel(sec)
r.Sym = sym
r.Xsym = sym
r.Off = int32(Cpos() - offsetbase)
@@ -846,10 +828,6 @@
var fwdcount int
func putattr(abbrev int, form int, cls int, value int64, data interface{}) {
- var off int64
- var p []byte
- var i int
-
switch form {
case DW_FORM_addr: // address
if Linkmode == LinkExternal {
@@ -876,8 +854,8 @@
value &= 0xff
Cput(uint8(value))
- p = data.([]byte)
- for i = 0; int64(i) < value; i++ {
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
Cput(uint8(p[i]))
}
@@ -885,8 +863,8 @@
value &= 0xffff
Thearch.Wput(uint16(value))
- p = data.([]byte)
- for i = 0; int64(i) < value; i++ {
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
Cput(uint8(p[i]))
}
@@ -894,16 +872,16 @@
value &= 0xffffffff
Thearch.Lput(uint32(value))
- p = data.([]byte)
- for i = 0; int64(i) < value; i++ {
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
Cput(uint8(p[i]))
}
case DW_FORM_block: // block
uleb128put(value)
- p = data.([]byte)
- for i = 0; int64(i) < value; i++ {
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
Cput(uint8(p[i]))
}
@@ -953,7 +931,7 @@
Thearch.Lput(0) // invalid dwarf, gdb will complain.
}
} else {
- off = (data.(*DWDie)).offs
+ off := (data.(*DWDie)).offs
if off == 0 {
fwdcount++
}
@@ -984,10 +962,9 @@
// Note that we can (and do) add arbitrary attributes to a DIE, but
// only the ones actually listed in the Abbrev will be written out.
func putattrs(abbrev int, attr *DWAttr) {
- var af []DWAttrForm
var ap *DWAttr
- for af = abbrevs[abbrev].attr[:]; af[0].attr != 0; af = af[1:] {
+ for af := abbrevs[abbrev].attr[:]; af[0].attr != 0; af = af[1:] {
for ap = attr; ap != nil; ap = ap.link {
if ap.atr == af[0].attr {
putattr(abbrev, int(af[0].form), int(ap.cls), ap.value, ap.data)
@@ -1017,11 +994,8 @@
}
func reverselist(list **DWDie) {
- var curr *DWDie
- var prev *DWDie
-
- curr = *list
- prev = nil
+ curr := *list
+ prev := (*DWDie)(nil)
for curr != nil {
var next *DWDie = curr.link
curr.link = prev
@@ -1033,10 +1007,8 @@
}
func reversetree(list **DWDie) {
- var die *DWDie
-
reverselist(list)
- for die = *list; die != nil; die = die.link {
+ for die := *list; die != nil; die = die.link {
if abbrevs[die.abbrev].children != 0 {
reversetree(&die.child)
}
@@ -1045,9 +1017,8 @@
func newmemberoffsetattr(die *DWDie, offs int32) {
var block [20]byte
- var i int
- i = 0
+ i := 0
block[i] = DW_OP_plus_uconst
i++
i += uleb128enc(uint64(offs), block[i:])
@@ -1063,9 +1034,7 @@
// Lookup predefined types
func lookup_or_diag(n string) *LSym {
- var s *LSym
-
- s = Linkrlookup(Ctxt, n, 0)
+ s := Linkrlookup(Ctxt, n, 0)
if s == nil || s.Size == 0 {
Diag("dwarf: missing type: %s", n)
Errorexit()
@@ -1075,8 +1044,6 @@
}
func dotypedef(parent *DWDie, name string, def *DWDie) {
- var die *DWDie
-
// Only emit typedefs for real names.
if strings.HasPrefix(name, "map[") {
return
@@ -1098,23 +1065,13 @@
// so that future lookups will find the typedef instead
// of the real definition. This hooks the typedef into any
// circular definition loops, so that gdb can understand them.
- die = newdie(parent, DW_ABRV_TYPEDECL, name)
+ die := newdie(parent, DW_ABRV_TYPEDECL, name)
newrefattr(die, DW_AT_type, def)
}
// Define gotype, for composite ones recurse into constituents.
func defgotype(gotype *LSym) *DWDie {
- var die *DWDie
- var fld *DWDie
- var s *LSym
- var name string
- var f string
- var kind uint8
- var bytesize int64
- var i int
- var nfields int
-
if gotype == nil {
return find_or_diag(&dwtypes, "<unspecified>")
}
@@ -1124,9 +1081,9 @@
return find_or_diag(&dwtypes, "<unspecified>")
}
- name = gotype.Name[5:] // could also decode from Type.string
+ name := gotype.Name[5:] // could also decode from Type.string
- die = find(&dwtypes, name)
+ die := find(&dwtypes, name)
if die != nil {
return die
@@ -1136,8 +1093,8 @@
fmt.Printf("new type: %%Y\n", gotype)
}
- kind = decodetype_kind(gotype)
- bytesize = decodetype_size(gotype)
+ kind := decodetype_kind(gotype)
+ bytesize := decodetype_size(gotype)
switch kind {
case obj.KindBool:
@@ -1180,9 +1137,9 @@
die = newdie(&dwtypes, DW_ABRV_ARRAYTYPE, name)
dotypedef(&dwtypes, name, die)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
- s = decodetype_arrayelem(gotype)
+ s := decodetype_arrayelem(gotype)
newrefattr(die, DW_AT_type, defgotype(s))
- fld = newdie(die, DW_ABRV_ARRAYRANGE, "range")
+ fld := newdie(die, DW_ABRV_ARRAYRANGE, "range")
// use actual length not upper bound; correct for 0-length arrays.
newattr(fld, DW_AT_count, DW_CLS_CONSTANT, decodetype_arraylen(gotype), 0)
@@ -1192,15 +1149,17 @@
case obj.KindChan:
die = newdie(&dwtypes, DW_ABRV_CHANTYPE, name)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
- s = decodetype_chanelem(gotype)
+ s := decodetype_chanelem(gotype)
newrefattr(die, DW_AT_go_elem, defgotype(s))
case obj.KindFunc:
die = newdie(&dwtypes, DW_ABRV_FUNCTYPE, name)
dotypedef(&dwtypes, name, die)
newrefattr(die, DW_AT_type, find_or_diag(&dwtypes, "void"))
- nfields = decodetype_funcincount(gotype)
- for i = 0; i < nfields; i++ {
+ nfields := decodetype_funcincount(gotype)
+ var fld *DWDie
+ var s *LSym
+ for i := 0; i < nfields; i++ {
s = decodetype_funcintype(gotype, i)
fld = newdie(die, DW_ABRV_FUNCTYPEPARAM, s.Name[5:])
newrefattr(fld, DW_AT_type, defgotype(s))
@@ -1210,7 +1169,7 @@
newdie(die, DW_ABRV_DOTDOTDOT, "...")
}
nfields = decodetype_funcoutcount(gotype)
- for i = 0; i < nfields; i++ {
+ for i := 0; i < nfields; i++ {
s = decodetype_funcouttype(gotype, i)
fld = newdie(die, DW_ABRV_FUNCTYPEPARAM, s.Name[5:])
newrefattr(fld, DW_AT_type, defptrto(defgotype(s)))
@@ -1220,7 +1179,8 @@
die = newdie(&dwtypes, DW_ABRV_IFACETYPE, name)
dotypedef(&dwtypes, name, die)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
- nfields = int(decodetype_ifacemethodcount(gotype))
+ nfields := int(decodetype_ifacemethodcount(gotype))
+ var s *LSym
if nfields == 0 {
s = lookup_or_diag("type.runtime.eface")
} else {
@@ -1230,7 +1190,7 @@
case obj.KindMap:
die = newdie(&dwtypes, DW_ABRV_MAPTYPE, name)
- s = decodetype_mapkey(gotype)
+ s := decodetype_mapkey(gotype)
newrefattr(die, DW_AT_go_key, defgotype(s))
s = decodetype_mapvalue(gotype)
newrefattr(die, DW_AT_go_elem, defgotype(s))
@@ -1238,14 +1198,14 @@
case obj.KindPtr:
die = newdie(&dwtypes, DW_ABRV_PTRTYPE, name)
dotypedef(&dwtypes, name, die)
- s = decodetype_ptrelem(gotype)
+ s := decodetype_ptrelem(gotype)
newrefattr(die, DW_AT_type, defgotype(s))
case obj.KindSlice:
die = newdie(&dwtypes, DW_ABRV_SLICETYPE, name)
dotypedef(&dwtypes, name, die)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
- s = decodetype_arrayelem(gotype)
+ s := decodetype_arrayelem(gotype)
newrefattr(die, DW_AT_go_elem, defgotype(s))
case obj.KindString:
@@ -1256,8 +1216,11 @@
die = newdie(&dwtypes, DW_ABRV_STRUCTTYPE, name)
dotypedef(&dwtypes, name, die)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
- nfields = decodetype_structfieldcount(gotype)
- for i = 0; i < nfields; i++ {
+ nfields := decodetype_structfieldcount(gotype)
+ var f string
+ var fld *DWDie
+ var s *LSym
+ for i := 0; i < nfields; i++ {
f = decodetype_structfieldname(gotype, i)
s = decodetype_structfieldtype(gotype, i)
if f == "" {
@@ -1284,11 +1247,8 @@
// Find or construct *T given T.
func defptrto(dwtype *DWDie) *DWDie {
- var ptrname string
- var die *DWDie
-
- ptrname = fmt.Sprintf("*%s", getattr(dwtype, DW_AT_name).data)
- die = find(&dwtypes, ptrname)
+ ptrname := fmt.Sprintf("*%s", getattr(dwtype, DW_AT_name).data)
+ die := find(&dwtypes, ptrname)
if die == nil {
die = newdie(&dwtypes, DW_ABRV_PTRTYPE, ptrname)
newrefattr(die, DW_AT_type, dwtype)
@@ -1325,15 +1285,12 @@
// Search children (assumed to have DW_TAG_member) for the one named
// field and set its DW_AT_type to dwtype
func substitutetype(structdie *DWDie, field string, dwtype *DWDie) {
- var child *DWDie
- var a *DWAttr
-
- child = find_or_diag(structdie, field)
+ child := find_or_diag(structdie, field)
if child == nil {
return
}
- a = getattr(child, DW_AT_type)
+ a := getattr(child, DW_AT_type)
if a != nil {
a.data = dwtype
} else {
@@ -1342,9 +1299,7 @@
}
func synthesizestringtypes(die *DWDie) {
- var prototype *DWDie
-
- prototype = walktypedef(defgotype(lookup_or_diag("type.runtime._string")))
+ prototype := walktypedef(defgotype(lookup_or_diag("type.runtime._string")))
if prototype == nil {
return
}
@@ -1358,14 +1313,12 @@
}
func synthesizeslicetypes(die *DWDie) {
- var prototype *DWDie
- var elem *DWDie
-
- prototype = walktypedef(defgotype(lookup_or_diag("type.runtime.slice")))
+ prototype := walktypedef(defgotype(lookup_or_diag("type.runtime.slice")))
if prototype == nil {
return
}
+ var elem *DWDie
for ; die != nil; die = die.link {
if die.abbrev != DW_ABRV_SLICETYPE {
continue
@@ -1378,14 +1331,13 @@
func mkinternaltypename(base string, arg1 string, arg2 string) string {
var buf string
- var n string
if arg2 == "" {
buf = fmt.Sprintf("%s<%s>", base, arg1)
} else {
buf = fmt.Sprintf("%s<%s,%s>", base, arg1, arg2)
}
- n = buf
+ n := buf
return n
}
@@ -1397,29 +1349,26 @@
)
func synthesizemaptypes(die *DWDie) {
- var hash *DWDie
- var bucket *DWDie
- var dwh *DWDie
- var dwhk *DWDie
- var dwhv *DWDie
- var dwhb *DWDie
- var keytype *DWDie
- var valtype *DWDie
- var fld *DWDie
- var t *DWDie
- var indirect_key int
- var indirect_val int
- var keysize int
- var valsize int
- var a *DWAttr
-
- hash = walktypedef(defgotype(lookup_or_diag("type.runtime.hmap")))
- bucket = walktypedef(defgotype(lookup_or_diag("type.runtime.bmap")))
+ hash := walktypedef(defgotype(lookup_or_diag("type.runtime.hmap")))
+ bucket := walktypedef(defgotype(lookup_or_diag("type.runtime.bmap")))
if hash == nil {
return
}
+ var a *DWAttr
+ var dwh *DWDie
+ var dwhb *DWDie
+ var dwhk *DWDie
+ var dwhv *DWDie
+ var fld *DWDie
+ var indirect_key int
+ var indirect_val int
+ var keysize int
+ var keytype *DWDie
+ var t *DWDie
+ var valsize int
+ var valtype *DWDie
for ; die != nil; die = die.link {
if die.abbrev != DW_ABRV_MAPTYPE {
continue
@@ -1518,26 +1467,21 @@
}
func synthesizechantypes(die *DWDie) {
- var sudog *DWDie
- var waitq *DWDie
- var hchan *DWDie
- var dws *DWDie
- var dww *DWDie
- var dwh *DWDie
- var elemtype *DWDie
- var a *DWAttr
- var elemsize int
- var sudogsize int
-
- sudog = walktypedef(defgotype(lookup_or_diag("type.runtime.sudog")))
- waitq = walktypedef(defgotype(lookup_or_diag("type.runtime.waitq")))
- hchan = walktypedef(defgotype(lookup_or_diag("type.runtime.hchan")))
+ sudog := walktypedef(defgotype(lookup_or_diag("type.runtime.sudog")))
+ waitq := walktypedef(defgotype(lookup_or_diag("type.runtime.waitq")))
+ hchan := walktypedef(defgotype(lookup_or_diag("type.runtime.hchan")))
if sudog == nil || waitq == nil || hchan == nil {
return
}
- sudogsize = int(getattr(sudog, DW_AT_byte_size).value)
+ sudogsize := int(getattr(sudog, DW_AT_byte_size).value)
+ var a *DWAttr
+ var dwh *DWDie
+ var dws *DWDie
+ var dww *DWDie
+ var elemsize int
+ var elemtype *DWDie
for ; die != nil; die = die.link {
if die.abbrev != DW_ABRV_CHANTYPE {
continue
@@ -1584,9 +1528,6 @@
// For use with pass.c::genasmsym
func defdwsymb(sym *LSym, s string, t int, v int64, size int64, ver int, gotype *LSym) {
- var dv *DWDie
- var dt *DWDie
-
if strings.HasPrefix(s, "go.string.") {
return
}
@@ -1596,8 +1537,9 @@
return
}
- dv = nil
+ dv := (*DWDie)(nil)
+ var dt *DWDie
switch t {
default:
return
@@ -1625,9 +1567,7 @@
}
func movetomodule(parent *DWDie) {
- var die *DWDie
-
- die = dwroot.child.child
+ die := dwroot.child.child
for die.link != nil {
die = die.link
}
@@ -1636,15 +1576,13 @@
// If the pcln table contains runtime/runtime.go, use that to set gdbscript path.
func finddebugruntimepath(s *LSym) {
- var i int
- var p string
- var f *LSym
-
if gdbscript != "" {
return
}
- for i = 0; i < s.Pcln.Nfile; i++ {
+ var f *LSym
+ var p string
+ for i := 0; i < s.Pcln.Nfile; i++ {
f = s.Pcln.File[i]
_ = p
if i := strings.Index(f.Name, "runtime/runtime.go"); i >= 0 {
@@ -1685,9 +1623,8 @@
func newcfaoffsetattr(die *DWDie, offs int32) {
var block [20]byte
- var i int
- i = 0
+ i := 0
block[i] = DW_OP_call_frame_cfa
i++
@@ -1703,11 +1640,8 @@
}
func mkvarname(name string, da int) string {
- var buf string
- var n string
-
- buf = fmt.Sprintf("%s#%d", name, da)
- n = buf
+ buf := fmt.Sprintf("%s#%d", name, da)
+ n := buf
return n
}
@@ -1717,8 +1651,6 @@
// flush previous compilation unit.
func flushunit(dwinfo *DWDie, pc int64, pcsym *LSym, unitstart int64, header_length int32) {
- var here int64
-
if dwinfo != nil && pc != 0 {
newattr(dwinfo, DW_AT_high_pc, DW_CLS_ADDRESS, pc+1, pcsym)
}
@@ -1728,7 +1660,7 @@
uleb128put(1)
Cput(DW_LNE_end_sequence)
- here = Cpos()
+ here := Cpos()
Cseek(unitstart)
Thearch.Lput(uint32(here - unitstart - 4)) // unit_length
Thearch.Wput(2) // dwarf version
@@ -1738,50 +1670,24 @@
}
func writelines() {
- var s *LSym
- var epcs *LSym
- var a *Auto
- var unitstart int64
- var headerend int64
- var offs int64
- var pc int64
- var epc int64
- var i int
- var lang int
- var da int
- var dt int
- var line int
- var file int
- var dwinfo *DWDie
- var dwfunc *DWDie
- var dwvar *DWDie
- var dws **DWDie
- var varhash [HASHSIZE]*DWDie
- var n string
- var nn string
- var pcfile Pciter
- var pcline Pciter
- var files []*LSym
- var f *LSym
-
if linesec == nil {
linesec = Linklookup(Ctxt, ".dwarfline", 0)
}
linesec.R = linesec.R[:0]
- unitstart = -1
- headerend = -1
- epc = 0
- epcs = nil
+ unitstart := int64(-1)
+ headerend := int64(-1)
+ epc := int64(0)
+ epcs := (*LSym)(nil)
lineo = Cpos()
- dwinfo = nil
+ dwinfo := (*DWDie)(nil)
flushunit(dwinfo, epc, epcs, unitstart, int32(headerend-unitstart-10))
unitstart = Cpos()
- lang = DW_LANG_Go
+ lang := DW_LANG_Go
- s = Ctxt.Textp
+ s := Ctxt.Textp
dwinfo = newdie(&dwroot, DW_ABRV_COMPUNIT, "go")
newattr(dwinfo, DW_AT_language, DW_CLS_CONSTANT, int64(lang), 0)
@@ -1811,13 +1717,13 @@
Cput(1) // standard_opcode_lengths[9]
Cput(0) // include_directories (empty)
- files = make([]*LSym, Ctxt.Nhistfile)
+ files := make([]*LSym, Ctxt.Nhistfile)
- for f = Ctxt.Filesyms; f != nil; f = f.Next {
+ for f := Ctxt.Filesyms; f != nil; f = f.Next {
files[f.Value-1] = f
}
- for i = 0; int32(i) < Ctxt.Nhistfile; i++ {
+ for i := 0; int32(i) < Ctxt.Nhistfile; i++ {
strnput(files[i].Name, len(files[i].Name)+4)
}
@@ -1830,15 +1736,27 @@
uleb128put(1 + int64(Thearch.Ptrsize))
Cput(DW_LNE_set_address)
- pc = s.Value
- line = 1
- file = 1
+ pc := s.Value
+ line := 1
+ file := 1
if Linkmode == LinkExternal {
adddwarfrel(linesec, s, lineo, Thearch.Ptrsize, 0)
} else {
addrput(pc)
}
+ var a *Auto
+ var da int
+ var dt int
+ var dwfunc *DWDie
+ var dws **DWDie
+ var dwvar *DWDie
+ var n string
+ var nn string
+ var offs int64
+ var pcfile Pciter
+ var pcline Pciter
+ var varhash [HASHSIZE]*DWDie
for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
s = Ctxt.Cursym
@@ -1975,13 +1893,6 @@
}
func writeframes() {
- var s *LSym
- var fdeo int64
- var fdesize int64
- var pad int64
- var pcsp Pciter
- var nextpc uint32
-
if framesec == nil {
framesec = Linklookup(Ctxt, ".dwarfframe", 0)
}
@@ -2006,7 +1917,7 @@
uleb128put(int64(-Thearch.Ptrsize) / DATAALIGNMENTFACTOR) // at cfa - x*4
// 4 is to exclude the length field.
- pad = CIERESERVE + frameo + 4 - Cpos()
+ pad := CIERESERVE + frameo + 4 - Cpos()
if pad < 0 {
Diag("dwarf: CIERESERVE too small by %d bytes.", -pad)
@@ -2015,6 +1926,11 @@
strnput("", int(pad))
+ var fdeo int64
+ var fdesize int64
+ var nextpc uint32
+ var pcsp Pciter
+ var s *LSym
for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
s = Ctxt.Cursym
if s.Pcln == nil {
@@ -2077,10 +1993,6 @@
)
func writeinfo() {
- var compunit *DWDie
- var unitstart int64
- var here int64
-
fwdcount = 0
if infosec == nil {
infosec = Linklookup(Ctxt, ".dwarfinfo", 0)
@@ -2092,7 +2004,9 @@
}
arangessec.R = arangessec.R[:0]
- for compunit = dwroot.child; compunit != nil; compunit = compunit.link {
+ var here int64
+ var unitstart int64
+ for compunit := dwroot.child; compunit != nil; compunit = compunit.link {
unitstart = Cpos()
// Write .debug_info Compilation Unit Header (sec 7.5.1)
@@ -2126,12 +2040,10 @@
* because we need die->offs and infoo/infosize;
*/
func ispubname(die *DWDie) bool {
- var a *DWAttr
-
switch die.abbrev {
case DW_ABRV_FUNCTION,
DW_ABRV_VARIABLE:
- a = getattr(die, DW_AT_external)
+ a := getattr(die, DW_AT_external)
return a != nil && a.value != 0
}
@@ -2143,17 +2055,15 @@
}
func writepub(ispub func(*DWDie) bool) int64 {
- var compunit *DWDie
var die *DWDie
var dwa *DWAttr
var unitstart int64
var unitend int64
- var sectionstart int64
var here int64
- sectionstart = Cpos()
+ sectionstart := Cpos()
- for compunit = dwroot.child; compunit != nil; compunit = compunit.link {
+ for compunit := dwroot.child; compunit != nil; compunit = compunit.link {
unitstart = compunit.offs - COMPUNITHEADERSIZE
if compunit.link != nil {
unitend = compunit.link.offs - COMPUNITHEADERSIZE
@@ -2192,17 +2102,14 @@
* because we need die->offs of dw_globals.
*/
func writearanges() int64 {
- var compunit *DWDie
var b *DWAttr
var e *DWAttr
- var headersize int
- var sectionstart int64
var value int64
- sectionstart = Cpos()
- headersize = int(Rnd(4+2+4+1+1, int64(Thearch.Ptrsize))) // don't count unit_length field itself
+ sectionstart := Cpos()
+ headersize := int(Rnd(4+2+4+1+1, int64(Thearch.Ptrsize))) // don't count unit_length field itself
- for compunit = dwroot.child; compunit != nil; compunit = compunit.link {
+ for compunit := dwroot.child; compunit != nil; compunit = compunit.link {
b = getattr(compunit, DW_AT_low_pc)
if b == nil {
continue
@@ -2243,9 +2150,7 @@
}
func writegdbscript() int64 {
- var sectionstart int64
-
- sectionstart = Cpos()
+ sectionstart := Cpos()
if gdbscript != "" {
Cput(1) // magic 1 byte?
@@ -2264,12 +2169,10 @@
func writedwarfreloc(s *LSym) int64 {
var i int
- var ri int
- var start int64
var r *Reloc
- start = Cpos()
- for ri = 0; ri < len(s.R); ri++ {
+ start := Cpos()
+ for ri := 0; ri < len(s.R); ri++ {
r = &s.R[ri]
if Iself {
i = Thearch.Elfreloc1(r, int64(r.Off))
@@ -2296,9 +2199,6 @@
*
*/
func Dwarfemitdebugsections() {
- var infoe int64
- var die *DWDie
-
if Debug['w'] != 0 { // disable dwarf
return
}
@@ -2320,7 +2220,7 @@
newdie(&dwtypes, DW_ABRV_NULLTYPE, "void")
newdie(&dwtypes, DW_ABRV_BARE_PTRTYPE, "unsafe.Pointer")
- die = newdie(&dwtypes, DW_ABRV_BASETYPE, "uintptr") // needed for array size
+ die := newdie(&dwtypes, DW_ABRV_BASETYPE, "uintptr") // needed for array size
newattr(die, DW_AT_encoding, DW_CLS_CONSTANT, DW_ATE_unsigned, 0)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, int64(Thearch.Ptrsize), 0)
newattr(die, DW_AT_go_kind, DW_CLS_CONSTANT, obj.KindUintptr, 0)
@@ -2354,7 +2254,7 @@
infoo = Cpos()
writeinfo()
- infoe = Cpos()
+ infoe := Cpos()
pubnameso = infoe
pubtypeso = infoe
arangeso = infoe
@@ -2510,9 +2410,7 @@
}
func dwarfaddelfrelocheader(elfstr int, shdata *ElfShdr, off int64, size int64) {
- var sh *ElfShdr
-
- sh = newElfShdr(elfstrdbg[elfstr])
+ sh := newElfShdr(elfstrdbg[elfstr])
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
sh.type_ = SHT_RELA
} else {
@@ -2531,17 +2429,11 @@
}
func dwarfaddelfheaders() {
- var sh *ElfShdr
- var shinfo *ElfShdr
- var sharanges *ElfShdr
- var shline *ElfShdr
- var shframe *ElfShdr
-
if Debug['w'] != 0 { // disable dwarf
return
}
- sh = newElfShdr(elfstrdbg[ElfStrDebugAbbrev])
+ sh := newElfShdr(elfstrdbg[ElfStrDebugAbbrev])
sh.type_ = SHT_PROGBITS
sh.off = uint64(abbrevo)
sh.size = uint64(abbrevsize)
@@ -2558,7 +2450,7 @@
if linesympos > 0 {
putelfsymshndx(linesympos, sh.shnum)
}
- shline = sh
+ shline := sh
sh = newElfShdr(elfstrdbg[ElfStrDebugFrame])
sh.type_ = SHT_PROGBITS
@@ -2568,7 +2460,7 @@
if framesympos > 0 {
putelfsymshndx(framesympos, sh.shnum)
}
- shframe = sh
+ shframe := sh
sh = newElfShdr(elfstrdbg[ElfStrDebugInfo])
sh.type_ = SHT_PROGBITS
@@ -2578,10 +2470,10 @@
if infosympos > 0 {
putelfsymshndx(infosympos, sh.shnum)
}
- shinfo = sh
+ shinfo := sh
if pubnamessize > 0 {
- sh = newElfShdr(elfstrdbg[ElfStrDebugPubNames])
+ sh := newElfShdr(elfstrdbg[ElfStrDebugPubNames])
sh.type_ = SHT_PROGBITS
sh.off = uint64(pubnameso)
sh.size = uint64(pubnamessize)
@@ -2589,16 +2481,16 @@
}
if pubtypessize > 0 {
- sh = newElfShdr(elfstrdbg[ElfStrDebugPubTypes])
+ sh := newElfShdr(elfstrdbg[ElfStrDebugPubTypes])
sh.type_ = SHT_PROGBITS
sh.off = uint64(pubtypeso)
sh.size = uint64(pubtypessize)
sh.addralign = 1
}
- sharanges = nil
+ sharanges := (*ElfShdr)(nil)
if arangessize != 0 {
- sh = newElfShdr(elfstrdbg[ElfStrDebugAranges])
+ sh := newElfShdr(elfstrdbg[ElfStrDebugAranges])
sh.type_ = SHT_PROGBITS
sh.off = uint64(arangeso)
sh.size = uint64(arangessize)
@@ -2607,7 +2499,7 @@
}
if gdbscriptsize != 0 {
- sh = newElfShdr(elfstrdbg[ElfStrGDBScripts])
+ sh := newElfShdr(elfstrdbg[ElfStrGDBScripts])
sh.type_ = SHT_PROGBITS
sh.off = uint64(gdbscripto)
sh.size = uint64(gdbscriptsize)
@@ -2635,20 +2527,15 @@
* Macho
*/
func dwarfaddmachoheaders() {
- var msect *MachoSect
- var ms *MachoSeg
- var fakestart int64
- var nsect int
-
if Debug['w'] != 0 { // disable dwarf
return
}
// Zero vsize segments won't be loaded in memory, even so they
// have to be page aligned in the file.
- fakestart = abbrevo &^ 0xfff
+ fakestart := abbrevo &^ 0xfff
- nsect = 4
+ nsect := 4
if pubnamessize > 0 {
nsect++
}
@@ -2662,12 +2549,12 @@
nsect++
}
- ms = newMachoSeg("__DWARF", nsect)
+ ms := newMachoSeg("__DWARF", nsect)
ms.fileoffset = uint64(fakestart)
ms.filesize = uint64(abbrevo) - uint64(fakestart)
ms.vaddr = ms.fileoffset + Segdata.Vaddr - Segdata.Fileoff
- msect = newMachoSect(ms, "__debug_abbrev", "__DWARF")
+ msect := newMachoSect(ms, "__debug_abbrev", "__DWARF")
msect.off = uint32(abbrevo)
msect.size = uint64(abbrevsize)
msect.addr = uint64(msect.off) + Segdata.Vaddr - Segdata.Fileoff
@@ -2692,7 +2579,7 @@
ms.filesize += msect.size
if pubnamessize > 0 {
- msect = newMachoSect(ms, "__debug_pubnames", "__DWARF")
+ msect := newMachoSect(ms, "__debug_pubnames", "__DWARF")
msect.off = uint32(pubnameso)
msect.size = uint64(pubnamessize)
msect.addr = uint64(msect.off) + Segdata.Vaddr - Segdata.Fileoff
@@ -2700,7 +2587,7 @@
}
if pubtypessize > 0 {
- msect = newMachoSect(ms, "__debug_pubtypes", "__DWARF")
+ msect := newMachoSect(ms, "__debug_pubtypes", "__DWARF")
msect.off = uint32(pubtypeso)
msect.size = uint64(pubtypessize)
msect.addr = uint64(msect.off) + Segdata.Vaddr - Segdata.Fileoff
@@ -2708,7 +2595,7 @@
}
if arangessize > 0 {
- msect = newMachoSect(ms, "__debug_aranges", "__DWARF")
+ msect := newMachoSect(ms, "__debug_aranges", "__DWARF")
msect.off = uint32(arangeso)
msect.size = uint64(arangessize)
msect.addr = uint64(msect.off) + Segdata.Vaddr - Segdata.Fileoff
@@ -2717,7 +2604,7 @@
// TODO(lvd) fix gdb/python to load MachO (16 char section name limit)
if gdbscriptsize > 0 {
- msect = newMachoSect(ms, "__debug_gdb_scripts", "__DWARF")
+ msect := newMachoSect(ms, "__debug_gdb_scripts", "__DWARF")
msect.off = uint32(gdbscripto)
msect.size = uint64(gdbscriptsize)
msect.addr = uint64(msect.off) + Segdata.Vaddr - Segdata.Fileoff
diff --git a/src/cmd/internal/ld/elf.go b/src/cmd/internal/ld/elf.go
index f6d1043..be66fab 100644
--- a/src/cmd/internal/ld/elf.go
+++ b/src/cmd/internal/ld/elf.go
@@ -804,13 +804,11 @@
}
func elf32phdr(e *ElfPhdr) {
- var frag int
-
if e.type_ == PT_LOAD {
// Correct ELF loaders will do this implicitly,
// but buggy ELF loaders like the one in some
// versions of QEMU won't.
- frag = int(e.vaddr & (e.align - 1))
+ frag := int(e.vaddr & (e.align - 1))
e.off -= uint64(frag)
e.vaddr -= uint64(frag)
@@ -856,16 +854,14 @@
}
func elfwriteshdrs() uint32 {
- var i int
-
if elf64 != 0 {
- for i = 0; i < int(ehdr.shnum); i++ {
+ for i := 0; i < int(ehdr.shnum); i++ {
elf64shdr(shdr[i])
}
return uint32(ehdr.shnum) * ELF64SHDRSIZE
}
- for i = 0; i < int(ehdr.shnum); i++ {
+ for i := 0; i < int(ehdr.shnum); i++ {
elf32shdr(shdr[i])
}
return uint32(ehdr.shnum) * ELF32SHDRSIZE
@@ -883,25 +879,21 @@
}
func elfwritephdrs() uint32 {
- var i int
-
if elf64 != 0 {
- for i = 0; i < int(ehdr.phnum); i++ {
+ for i := 0; i < int(ehdr.phnum); i++ {
elf64phdr(phdr[i])
}
return uint32(ehdr.phnum) * ELF64PHDRSIZE
}
- for i = 0; i < int(ehdr.phnum); i++ {
+ for i := 0; i < int(ehdr.phnum); i++ {
elf32phdr(phdr[i])
}
return uint32(ehdr.phnum) * ELF32PHDRSIZE
}
func newElfPhdr() *ElfPhdr {
- var e *ElfPhdr
-
- e = new(ElfPhdr)
+ e := new(ElfPhdr)
if ehdr.phnum >= NSECT {
Diag("too many phdrs")
} else {
@@ -917,9 +909,7 @@
}
func newElfShdr(name int64) *ElfShdr {
- var e *ElfShdr
-
- e = new(ElfShdr)
+ e := new(ElfShdr)
e.name = uint32(name)
e.shnum = int(ehdr.shnum)
if ehdr.shnum >= NSECT {
@@ -937,9 +927,7 @@
}
func elf64writehdr() uint32 {
- var i int
-
- for i = 0; i < EI_NIDENT; i++ {
+ for i := 0; i < EI_NIDENT; i++ {
Cput(ehdr.ident[i])
}
Thearch.Wput(ehdr.type_)
@@ -959,9 +947,7 @@
}
func elf32writehdr() uint32 {
- var i int
-
- for i = 0; i < EI_NIDENT; i++ {
+ for i := 0; i < EI_NIDENT; i++ {
Cput(ehdr.ident[i])
}
Thearch.Wput(ehdr.type_)
@@ -1037,10 +1023,8 @@
}
func elfinterp(sh *ElfShdr, startva uint64, resoff uint64, p string) int {
- var n int
-
interp = p
- n = len(interp) + 1
+ n := len(interp) + 1
sh.addr = startva + resoff - uint64(n)
sh.off = resoff - uint64(n)
sh.size = uint64(n)
@@ -1049,9 +1033,7 @@
}
func elfwriteinterp() int {
- var sh *ElfShdr
-
- sh = elfshname(".interp")
+ sh := elfshname(".interp")
Cseek(int64(sh.off))
coutbuf.w.WriteString(interp)
Cput(0)
@@ -1059,9 +1041,7 @@
}
func elfnote(sh *ElfShdr, startva uint64, resoff uint64, sz int) int {
- var n uint64
-
- n = 3*4 + uint64(sz) + resoff%4
+ n := 3*4 + uint64(sz) + resoff%4
sh.type_ = SHT_NOTE
sh.flags = SHF_ALLOC
@@ -1074,9 +1054,7 @@
}
func elfwritenotehdr(str string, namesz uint32, descsz uint32, tag uint32) *ElfShdr {
- var sh *ElfShdr
-
- sh = elfshname(str)
+ sh := elfshname(str)
// Write Elf_Note header.
Cseek(int64(sh.off))
@@ -1099,17 +1077,13 @@
var ELF_NOTE_NETBSD_NAME = []byte("NetBSD\x00")
func elfnetbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int {
- var n int
-
- n = int(Rnd(ELF_NOTE_NETBSD_NAMESZ, 4) + Rnd(ELF_NOTE_NETBSD_DESCSZ, 4))
+ n := int(Rnd(ELF_NOTE_NETBSD_NAMESZ, 4) + Rnd(ELF_NOTE_NETBSD_DESCSZ, 4))
return elfnote(sh, startva, resoff, n)
}
func elfwritenetbsdsig() int {
- var sh *ElfShdr
-
// Write Elf_Note header.
- sh = elfwritenotehdr(".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG)
+ sh := elfwritenotehdr(".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG)
if sh == nil {
return 0
@@ -1135,17 +1109,13 @@
var ELF_NOTE_OPENBSD_NAME = []byte("OpenBSD\x00")
func elfopenbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int {
- var n int
-
- n = ELF_NOTE_OPENBSD_NAMESZ + ELF_NOTE_OPENBSD_DESCSZ
+ n := ELF_NOTE_OPENBSD_NAMESZ + ELF_NOTE_OPENBSD_DESCSZ
return elfnote(sh, startva, resoff, n)
}
func elfwriteopenbsdsig() int {
- var sh *ElfShdr
-
// Write Elf_Note header.
- sh = elfwritenotehdr(".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG)
+ sh := elfwritenotehdr(".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG)
if sh == nil {
return 0
@@ -1160,9 +1130,6 @@
}
func addbuildinfo(val string) {
- var ov string
- var i int
- var b int
var j int
if val[0] != '0' || val[1] != 'x' {
@@ -1170,9 +1137,10 @@
Exit(2)
}
- ov = val
+ ov := val
val = val[2:]
- i = 0
+ i := 0
+ var b int
for val != "" {
if len(val) == 1 {
fmt.Fprintf(os.Stderr, "%s: -B argument must have even number of digits: %s\n", os.Args[0], ov)
@@ -1216,16 +1184,12 @@
var ELF_NOTE_BUILDINFO_NAME = []byte("GNU\x00")
func elfbuildinfo(sh *ElfShdr, startva uint64, resoff uint64) int {
- var n int
-
- n = int(ELF_NOTE_BUILDINFO_NAMESZ + Rnd(int64(len(buildinfo)), 4))
+ n := int(ELF_NOTE_BUILDINFO_NAMESZ + Rnd(int64(len(buildinfo)), 4))
return elfnote(sh, startva, resoff, n)
}
func elfwritebuildinfo() int {
- var sh *ElfShdr
-
- sh = elfwritenotehdr(".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG)
+ sh := elfwritenotehdr(".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG)
if sh == nil {
return 0
}
@@ -1254,7 +1218,6 @@
func addelflib(list **Elflib, file string, vers string) *Elfaux {
var lib *Elflib
- var aux *Elfaux
for lib = *list; lib != nil; lib = lib.next {
if lib.file == file {
@@ -1267,75 +1230,59 @@
*list = lib
havelib:
- for aux = lib.aux; aux != nil; aux = aux.next {
+ for aux := lib.aux; aux != nil; aux = aux.next {
if aux.vers == vers {
- goto haveaux
+ return aux
}
}
- aux = new(Elfaux)
+ aux := new(Elfaux)
aux.next = lib.aux
aux.vers = vers
lib.aux = aux
-haveaux:
return aux
}
func elfdynhash() {
- var s *LSym
- var sy *LSym
- var dynstr *LSym
- var i int
- var j int
- var nbucket int
- var b int
- var nfile int
- var hc uint32
- var chain []uint32
- var buckets []uint32
- var nsym int
- var name string
- var need []*Elfaux
- var needlib *Elflib
- var l *Elflib
- var x *Elfaux
-
if !Iself {
return
}
- nsym = Nelfsym
- s = Linklookup(Ctxt, ".hash", 0)
+ nsym := Nelfsym
+ s := Linklookup(Ctxt, ".hash", 0)
s.Type = SELFROSECT
s.Reachable = true
- i = nsym
- nbucket = 1
+ i := nsym
+ nbucket := 1
for i > 0 {
nbucket++
i >>= 1
}
- needlib = nil
- need = make([]*Elfaux, nsym)
- chain = make([]uint32, nsym)
- buckets = make([]uint32, nbucket)
+ needlib := (*Elflib)(nil)
+ need := make([]*Elfaux, nsym)
+ chain := make([]uint32, nsym)
+ buckets := make([]uint32, nbucket)
if need == nil || chain == nil || buckets == nil {
Ctxt.Cursym = nil
Diag("out of memory")
Errorexit()
}
- for i = 0; i < nsym; i++ {
+ for i := 0; i < nsym; i++ {
need[i] = nil
}
- for i = 0; i < nsym; i++ {
+ for i := 0; i < nsym; i++ {
chain[i] = 0
}
- for i = 0; i < nbucket; i++ {
+ for i := 0; i < nbucket; i++ {
buckets[i] = 0
}
- for sy = Ctxt.Allsym; sy != nil; sy = sy.Allsym {
+ var b int
+ var hc uint32
+ var name string
+ for sy := Ctxt.Allsym; sy != nil; sy = sy.Allsym {
if sy.Dynid <= 0 {
continue
}
@@ -1354,20 +1301,22 @@
Adduint32(Ctxt, s, uint32(nbucket))
Adduint32(Ctxt, s, uint32(nsym))
- for i = 0; i < nbucket; i++ {
+ for i := 0; i < nbucket; i++ {
Adduint32(Ctxt, s, buckets[i])
}
- for i = 0; i < nsym; i++ {
+ for i := 0; i < nsym; i++ {
Adduint32(Ctxt, s, chain[i])
}
// version symbols
- dynstr = Linklookup(Ctxt, ".dynstr", 0)
+ dynstr := Linklookup(Ctxt, ".dynstr", 0)
s = Linklookup(Ctxt, ".gnu.version_r", 0)
i = 2
- nfile = 0
- for l = needlib; l != nil; l = l.next {
+ nfile := 0
+ var j int
+ var x *Elfaux
+ for l := needlib; l != nil; l = l.next {
nfile++
// header
@@ -1405,7 +1354,7 @@
// version references
s = Linklookup(Ctxt, ".gnu.version", 0)
- for i = 0; i < nsym; i++ {
+ for i := 0; i < nsym; i++ {
if i == 0 {
Adduint16(Ctxt, s, 0) // first entry - no symbol
} else if need[i] == nil {
@@ -1424,14 +1373,14 @@
}
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
- sy = Linklookup(Ctxt, ".rela.plt", 0)
+ sy := Linklookup(Ctxt, ".rela.plt", 0)
if sy.Size > 0 {
Elfwritedynent(s, DT_PLTREL, DT_RELA)
elfwritedynentsymsize(s, DT_PLTRELSZ, sy)
elfwritedynentsym(s, DT_JMPREL, sy)
}
} else {
- sy = Linklookup(Ctxt, ".rel.plt", 0)
+ sy := Linklookup(Ctxt, ".rel.plt", 0)
if sy.Size > 0 {
Elfwritedynent(s, DT_PLTREL, DT_REL)
elfwritedynentsymsize(s, DT_PLTRELSZ, sy)
@@ -1443,9 +1392,7 @@
}
func elfphload(seg *Segment) *ElfPhdr {
- var ph *ElfPhdr
-
- ph = newElfPhdr()
+ ph := newElfPhdr()
ph.type_ = PT_LOAD
if seg.Rwx&4 != 0 {
ph.flags |= PF_R
@@ -1467,45 +1414,37 @@
}
func elfshname(name string) *ElfShdr {
- var i int
var off int
var sh *ElfShdr
- for i = 0; i < nelfstr; i++ {
+ for i := 0; i < nelfstr; i++ {
if name == elfstr[i].s {
off = elfstr[i].off
- goto found
+ for i = 0; i < int(ehdr.shnum); i++ {
+ sh = shdr[i]
+ if sh.name == uint32(off) {
+ return sh
+ }
+ }
+
+ sh = newElfShdr(int64(off))
+ return sh
}
}
Diag("cannot find elf name %s", name)
Errorexit()
return nil
-
-found:
- for i = 0; i < int(ehdr.shnum); i++ {
- sh = shdr[i]
- if sh.name == uint32(off) {
- return sh
- }
- }
-
- sh = newElfShdr(int64(off))
- return sh
}
func elfshalloc(sect *Section) *ElfShdr {
- var sh *ElfShdr
-
- sh = elfshname(sect.Name)
+ sh := elfshname(sect.Name)
sect.Elfsect = sh
return sh
}
func elfshbits(sect *Section) *ElfShdr {
- var sh *ElfShdr
-
- sh = elfshalloc(sect)
+ sh := elfshalloc(sect)
if sh.type_ > 0 {
return sh
}
@@ -1540,11 +1479,6 @@
}
func elfshreloc(sect *Section) *ElfShdr {
- var typ int
- var sh *ElfShdr
- var prefix string
- var buf string
-
// If main section is SHT_NOBITS, nothing to relocate.
// Also nothing to relocate in .shstrtab.
if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
@@ -1554,6 +1488,8 @@
return nil
}
+ var prefix string
+ var typ int
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
prefix = ".rela"
typ = SHT_RELA
@@ -1562,8 +1498,8 @@
typ = SHT_REL
}
- buf = fmt.Sprintf("%s%s", prefix, sect.Name)
- sh = elfshname(buf)
+ buf := fmt.Sprintf("%s%s", prefix, sect.Name)
+ sh := elfshname(buf)
sh.type_ = uint32(typ)
sh.entsize = uint64(Thearch.Regsize) * 2
if typ == SHT_RELA {
@@ -1578,11 +1514,6 @@
}
func elfrelocsect(sect *Section, first *LSym) {
- var ri int
- var sym *LSym
- var eaddr int32
- var r *Reloc
-
// If main section is SHT_NOBITS, nothing to relocate.
// Also nothing to relocate in .shstrtab.
if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
@@ -1593,6 +1524,7 @@
}
sect.Reloff = uint64(Cpos())
+ var sym *LSym
for sym = first; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@@ -1602,7 +1534,9 @@
}
}
- eaddr = int32(sect.Vaddr + sect.Length)
+ eaddr := int32(sect.Vaddr + sect.Length)
+ var r *Reloc
+ var ri int
for ; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@@ -1635,35 +1569,29 @@
}
func Elfemitreloc() {
- var sect *Section
-
for Cpos()&7 != 0 {
Cput(0)
}
elfrelocsect(Segtext.Sect, Ctxt.Textp)
- for sect = Segtext.Sect.Next; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect.Next; sect != nil; sect = sect.Next {
elfrelocsect(sect, datap)
}
- for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfrelocsect(sect, datap)
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfrelocsect(sect, datap)
}
}
func doelf() {
- var s *LSym
- var shstrtab *LSym
- var dynstr *LSym
-
if !Iself {
return
}
/* predefine strings we need for section headers */
- shstrtab = Linklookup(Ctxt, ".shstrtab", 0)
+ shstrtab := Linklookup(Ctxt, ".shstrtab", 0)
shstrtab.Type = SELFROSECT
shstrtab.Reachable = true
@@ -1767,7 +1695,7 @@
Addstring(shstrtab, ".gnu.version_r")
/* dynamic symbol table - first entry all zeros */
- s = Linklookup(Ctxt, ".dynsym", 0)
+ s := Linklookup(Ctxt, ".dynsym", 0)
s.Type = SELFROSECT
s.Reachable = true
@@ -1785,7 +1713,7 @@
if s.Size == 0 {
Addstring(s, "")
}
- dynstr = s
+ dynstr := s
/* relocation table */
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
@@ -1804,7 +1732,7 @@
/* ppc64 glink resolver */
if Thearch.Thechar == '9' {
- s = Linklookup(Ctxt, ".glink", 0)
+ s := Linklookup(Ctxt, ".glink", 0)
s.Reachable = true
s.Type = SELFRXSECT
}
@@ -1901,8 +1829,7 @@
// Do not write DT_NULL. elfdynhash will finish it.
func shsym(sh *ElfShdr, s *LSym) {
- var addr int64
- addr = Symaddr(s)
+ addr := Symaddr(s)
if sh.flags&SHF_ALLOC != 0 {
sh.addr = uint64(addr)
}
@@ -1920,35 +1847,22 @@
}
func Asmbelfsetup() {
- var sect *Section
-
/* This null SHdr must appear before all others */
elfshname("")
- for sect = Segtext.Sect; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
elfshalloc(sect)
}
- for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfshalloc(sect)
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfshalloc(sect)
}
}
func Asmbelf(symo int64) {
- var a int64
- var o int64
- var startva int64
- var resoff int64
- var eh *ElfEhdr
- var ph *ElfPhdr
- var pph *ElfPhdr
- var pnote *ElfPhdr
- var sh *ElfShdr
- var sect *Section
-
- eh = getElfEhdr()
+ eh := getElfEhdr()
switch Thearch.Thechar {
default:
Diag("unknown architecture in asmbelf")
@@ -1968,10 +1882,11 @@
eh.machine = EM_PPC64
}
- startva = INITTEXT - int64(HEADR)
- resoff = ELFRESERVE
+ startva := INITTEXT - int64(HEADR)
+ resoff := int64(ELFRESERVE)
- pph = nil
+ pph := (*ElfPhdr)(nil)
+ var pnote *ElfPhdr
if Linkmode == LinkExternal {
/* skip program headers */
eh.phoff = 0
@@ -1996,7 +1911,7 @@
* Except on NaCl where it must not be loaded.
*/
if HEADTYPE != Hnacl {
- o = int64(Segtext.Vaddr - pph.vaddr)
+ o := int64(Segtext.Vaddr - pph.vaddr)
Segtext.Vaddr -= uint64(o)
Segtext.Length += uint64(o)
o = int64(Segtext.Fileoff - pph.off)
@@ -2006,7 +1921,7 @@
if Debug['d'] == 0 {
/* interpreter */
- sh = elfshname(".interp")
+ sh := elfshname(".interp")
sh.type_ = SHT_PROGBITS
sh.flags = SHF_ALLOC
@@ -2035,7 +1950,7 @@
resoff -= int64(elfinterp(sh, uint64(startva), uint64(resoff), interpreter))
- ph = newElfPhdr()
+ ph := newElfPhdr()
ph.type_ = PT_INTERP
ph.flags = PF_R
phsh(ph, sh)
@@ -2043,7 +1958,7 @@
pnote = nil
if HEADTYPE == Hnetbsd || HEADTYPE == Hopenbsd {
- sh = nil
+ sh := (*ElfShdr)(nil)
switch HEADTYPE {
case Hnetbsd:
sh = elfshname(".note.netbsd.ident")
@@ -2061,7 +1976,7 @@
}
if len(buildinfo) > 0 {
- sh = elfshname(".note.gnu.build-id")
+ sh := elfshname(".note.gnu.build-id")
resoff -= int64(elfbuildinfo(sh, uint64(startva), uint64(resoff)))
if pnote == nil {
@@ -2083,7 +1998,7 @@
/* Dynamic linking sections */
if Debug['d'] == 0 {
- sh = elfshname(".dynsym")
+ sh := elfshname(".dynsym")
sh.type_ = SHT_DYNSYM
sh.flags = SHF_ALLOC
if elf64 != 0 {
@@ -2104,7 +2019,7 @@
shsym(sh, Linklookup(Ctxt, ".dynstr", 0))
if elfverneed != 0 {
- sh = elfshname(".gnu.version")
+ sh := elfshname(".gnu.version")
sh.type_ = SHT_GNU_VERSYM
sh.flags = SHF_ALLOC
sh.addralign = 2
@@ -2124,7 +2039,7 @@
switch eh.machine {
case EM_X86_64,
EM_PPC64:
- sh = elfshname(".rela.plt")
+ sh := elfshname(".rela.plt")
sh.type_ = SHT_RELA
sh.flags = SHF_ALLOC
sh.entsize = ELF64RELASIZE
@@ -2142,7 +2057,7 @@
shsym(sh, Linklookup(Ctxt, ".rela", 0))
default:
- sh = elfshname(".rel.plt")
+ sh := elfshname(".rel.plt")
sh.type_ = SHT_REL
sh.flags = SHF_ALLOC
sh.entsize = ELF32RELSIZE
@@ -2160,7 +2075,7 @@
}
if eh.machine == EM_PPC64 {
- sh = elfshname(".glink")
+ sh := elfshname(".glink")
sh.type_ = SHT_PROGBITS
sh.flags = SHF_ALLOC + SHF_EXECINSTR
sh.addralign = 4
@@ -2188,7 +2103,7 @@
// On ppc64, .got comes from the input files, so don't
// create it here, and .got.plt is not used.
if eh.machine != EM_PPC64 {
- sh = elfshname(".got")
+ sh := elfshname(".got")
sh.type_ = SHT_PROGBITS
sh.flags = SHF_ALLOC + SHF_WRITE
sh.entsize = uint64(Thearch.Regsize)
@@ -2220,7 +2135,7 @@
sh.addralign = uint64(Thearch.Regsize)
sh.link = uint32(elfshname(".dynstr").shnum)
shsym(sh, Linklookup(Ctxt, ".dynamic", 0))
- ph = newElfPhdr()
+ ph := newElfPhdr()
ph.type_ = PT_DYNAMIC
ph.flags = PF_R + PF_W
phsh(ph, sh)
@@ -2232,7 +2147,7 @@
// not currently support it. This is handled
// appropriately in runtime/cgo.
if Ctxt.Tlsoffset != 0 && HEADTYPE != Hopenbsd {
- ph = newElfPhdr()
+ ph := newElfPhdr()
ph.type_ = PT_TLS
ph.flags = PF_R
ph.memsz = uint64(-Ctxt.Tlsoffset)
@@ -2241,7 +2156,7 @@
}
if HEADTYPE == Hlinux {
- ph = newElfPhdr()
+ ph := newElfPhdr()
ph.type_ = PT_GNU_STACK
ph.flags = PF_W + PF_R
ph.align = uint64(Thearch.Regsize)
@@ -2253,7 +2168,7 @@
}
elfobj:
- sh = elfshname(".shstrtab")
+ sh := elfshname(".shstrtab")
sh.type_ = SHT_STRTAB
sh.addralign = 1
shsym(sh, Linklookup(Ctxt, ".shstrtab", 0))
@@ -2265,29 +2180,29 @@
elfshname(".strtab")
}
- for sect = Segtext.Sect; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
elfshbits(sect)
}
- for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfshbits(sect)
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfshbits(sect)
}
if Linkmode == LinkExternal {
- for sect = Segtext.Sect; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
elfshreloc(sect)
}
- for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfshreloc(sect)
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfshreloc(sect)
}
// add a .note.GNU-stack section to mark the stack as non-executable
- sh = elfshname(".note.GNU-stack")
+ sh := elfshname(".note.GNU-stack")
sh.type_ = SHT_PROGBITS
sh.addralign = 1
@@ -2297,7 +2212,7 @@
// generate .tbss section for dynamic internal linking (except for OpenBSD)
// external linking generates .tbss in data.c
if Linkmode == LinkInternal && Debug['d'] == 0 && HEADTYPE != Hopenbsd {
- sh = elfshname(".tbss")
+ sh := elfshname(".tbss")
sh.type_ = SHT_NOBITS
sh.addralign = uint64(Thearch.Regsize)
sh.size = uint64(-Ctxt.Tlsoffset)
@@ -2305,7 +2220,7 @@
}
if Debug['s'] == 0 {
- sh = elfshname(".symtab")
+ sh := elfshname(".symtab")
sh.type_ = SHT_SYMTAB
sh.off = uint64(symo)
sh.size = uint64(Symsize)
@@ -2368,7 +2283,7 @@
}
Cseek(0)
- a = 0
+ a := int64(0)
a += int64(elfwritehdr())
a += int64(elfwritephdrs())
a += int64(elfwriteshdrs())
diff --git a/src/cmd/internal/ld/go.go b/src/cmd/internal/ld/go.go
index cd85c8b..f529cea 100644
--- a/src/cmd/internal/ld/go.go
+++ b/src/cmd/internal/ld/go.go
@@ -54,11 +54,8 @@
var nimport int
func hashstr(name string) int {
- var h uint32
- var cp string
-
- h = 0
- for cp = name; cp != ""; cp = cp[1:] {
+ h := uint32(0)
+ for cp := name; cp != ""; cp = cp[1:] {
h = h*1119 + uint32(cp[0])
}
h &= 0xffffff
@@ -66,16 +63,13 @@
}
func ilookup(name string) *Import {
- var h int
- var x *Import
-
- h = hashstr(name) % NIHASH
- for x = ihash[h]; x != nil; x = x.hash {
+ h := hashstr(name) % NIHASH
+ for x := ihash[h]; x != nil; x = x.hash {
if x.name[0] == name[0] && x.name == name {
return x
}
}
- x = new(Import)
+ x := new(Import)
x.name = name
x.hash = ihash[h]
ihash[h] = x
@@ -84,10 +78,7 @@
}
func ldpkg(f *Biobuf, pkg string, length int64, filename string, whence int) {
- var bdata []byte
- var data string
var p0, p1 int
- var name string
if Debug['g'] != 0 {
return
@@ -101,7 +92,7 @@
return
}
- bdata = make([]byte, length)
+ bdata := make([]byte, length)
if int64(Bread(f, bdata)) != length {
fmt.Fprintf(os.Stderr, "%s: short pkg read %s\n", os.Args[0], filename)
if Debug['u'] != 0 {
@@ -109,7 +100,7 @@
}
return
}
- data = string(bdata)
+ data := string(bdata)
// first \n$$ marks beginning of exports - skip rest of line
p0 = strings.Index(data, "\n$$")
@@ -153,7 +144,7 @@
for p0 < p1 && (data[p0] == ' ' || data[p0] == '\t' || data[p0] == '\n') {
p0++
}
- name = data[p0:]
+ name := data[p0:]
for p0 < p1 && data[p0] != ' ' && data[p0] != '\t' && data[p0] != '\n' {
p0++
}
@@ -221,14 +212,13 @@
}
func loadpkgdata(file string, pkg string, data string) {
- var p string
var prefix string
var name string
var def string
var x *Import
file = file
- p = data
+ p := data
for parsepkgdata(file, pkg, &p, &prefix, &name, &def) > 0 {
x = ilookup(name)
if x.prefix == "" {
@@ -250,15 +240,10 @@
}
func parsepkgdata(file string, pkg string, pp *string, prefixp *string, namep *string, defp *string) int {
- var p string
var prefix string
- var name string
- var def string
- var meth string
- var inquote bool
// skip white space
- p = *pp
+ p := *pp
loop:
for len(p) > 0 && (p[0] == ' ' || p[0] == '\t' || p[0] == '\n') {
@@ -310,9 +295,9 @@
prefix = prefix[:len(prefix)-len(p)-1]
// name: a.b followed by space
- name = p
+ name := p
- inquote = false
+ inquote := false
for len(p) > 0 {
if p[0] == ' ' && !inquote {
break
@@ -334,7 +319,7 @@
p = p[1:]
// def: free form to new line
- def = p
+ def := p
for len(p) > 0 && p[0] != '\n' {
p = p[1:]
@@ -347,6 +332,7 @@
p = p[1:]
// include methods on successive lines in def of named type
+ var meth string
for parsemethod(&p, &meth) > 0 {
if defbuf == nil {
defbuf = new(bytes.Buffer)
@@ -372,10 +358,8 @@
}
func parsemethod(pp *string, methp *string) int {
- var p string
-
// skip white space
- p = *pp
+ p := *pp
for len(p) > 0 && (p[0] == ' ' || p[0] == '\t') {
p = p[1:]
@@ -415,7 +399,6 @@
func loadcgo(file string, pkg string, p string) {
var next string
- var p0 string
var q string
var f []string
var local string
@@ -423,7 +406,7 @@
var lib string
var s *LSym
- p0 = ""
+ p0 := ""
for ; p != ""; p = next {
if i := strings.Index(p, "\n"); i >= 0 {
p, next = p[:i], p[i+1:]
@@ -610,10 +593,9 @@
func markflood() {
var a *Auto
- var s *LSym
var i int
- for s = markq; s != nil; s = s.Queue {
+ for s := markq; s != nil; s = s.Queue {
if s.Type == STEXT {
if Debug['v'] > 1 {
fmt.Fprintf(&Bso, "marktext %s\n", s.Name)
@@ -659,38 +641,32 @@
}
func deadcode() {
- var i int
- var s *LSym
- var last *LSym
- var p *LSym
- var fmt_ string
-
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f deadcode\n", obj.Cputime())
}
mark(Linklookup(Ctxt, INITENTRY, 0))
- for i = 0; i < len(markextra); i++ {
+ for i := 0; i < len(markextra); i++ {
mark(Linklookup(Ctxt, markextra[i], 0))
}
- for i = 0; i < len(dynexp); i++ {
+ for i := 0; i < len(dynexp); i++ {
mark(dynexp[i])
}
markflood()
// keep each beginning with 'typelink.' if the symbol it points at is being kept.
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.typelink.") {
s.Reachable = len(s.R) == 1 && s.R[0].Sym.Reachable
}
}
// remove dead text but keep file information (z symbols).
- last = nil
+ last := (*LSym)(nil)
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
if !s.Reachable {
continue
}
@@ -710,7 +686,7 @@
last.Next = nil
}
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.weak.") {
s.Special = 1 // do not lay out in data segment
s.Reachable = true
@@ -719,9 +695,10 @@
}
// record field tracking references
- fmt_ = ""
+ fmt_ := ""
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ var p *LSym
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.track.") {
s.Special = 1 // do not lay out in data segment
s.Hide = 1
@@ -741,7 +718,7 @@
if tracksym == "" {
return
}
- s = Linklookup(Ctxt, tracksym, 0)
+ s := Linklookup(Ctxt, tracksym, 0)
if !s.Reachable {
return
}
@@ -749,12 +726,11 @@
}
func doweak() {
- var s *LSym
var t *LSym
// resolve weak references only if
// target symbol will be in binary anyway.
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.weak.") {
t = Linkrlookup(Ctxt, s.Name[8:], int(s.Version))
if t != nil && t.Type != 0 && t.Reachable {
@@ -772,13 +748,11 @@
}
func addexport() {
- var i int
-
if HEADTYPE == Hdarwin {
return
}
- for i = 0; i < len(dynexp); i++ {
+ for i := 0; i < len(dynexp); i++ {
Thearch.Adddynsym(Ctxt, dynexp[i])
}
}
@@ -840,16 +814,13 @@
var pkgall *Pkg
func getpkg(path_ string) *Pkg {
- var p *Pkg
- var h int
-
- h = hashstr(path_) % len(phash)
- for p = phash[h]; p != nil; p = p.next {
+ h := hashstr(path_) % len(phash)
+ for p := phash[h]; p != nil; p = p.next {
if p.path_ == path_ {
return p
}
}
- p = new(Pkg)
+ p := new(Pkg)
p.path_ = path_
p.next = phash[h]
phash[h] = p
@@ -859,24 +830,18 @@
}
func imported(pkg string, import_ string) {
- var p *Pkg
- var i *Pkg
-
// everyone imports runtime, even runtime.
if import_ == "\"runtime\"" {
return
}
pkg = fmt.Sprintf("\"%v\"", Zconv(pkg, 0)) // turn pkg path into quoted form, freed below
- p = getpkg(pkg)
- i = getpkg(import_)
+ p := getpkg(pkg)
+ i := getpkg(import_)
i.impby = append(i.impby, p)
}
func cycle(p *Pkg) *Pkg {
- var i int
- var bad *Pkg
-
if p.checked != 0 {
return nil
}
@@ -889,7 +854,8 @@
}
p.mark = 1
- for i = 0; i < len(p.impby); i++ {
+ var bad *Pkg
+ for i := 0; i < len(p.impby); i++ {
bad = cycle(p.impby[i])
if bad != nil {
p.mark = 0
@@ -908,9 +874,7 @@
}
func importcycles() {
- var p *Pkg
-
- for p = pkgall; p != nil; p = p.all {
+ for p := pkgall; p != nil; p = p.all {
cycle(p)
}
}
diff --git a/src/cmd/internal/ld/ld.go b/src/cmd/internal/ld/ld.go
index 99f2fab..3397dd2 100644
--- a/src/cmd/internal/ld/ld.go
+++ b/src/cmd/internal/ld/ld.go
@@ -86,10 +86,7 @@
* pkg: package import path, e.g. container/vector
*/
func addlibpath(ctxt *Link, srcref string, objref string, file string, pkg string) {
- var i int
- var l *Library
-
- for i = 0; i < len(ctxt.Library); i++ {
+ for i := 0; i < len(ctxt.Library); i++ {
if file == ctxt.Library[i].File {
return
}
@@ -100,7 +97,7 @@
}
ctxt.Library = append(ctxt.Library, Library{})
- l = &ctxt.Library[len(ctxt.Library)-1]
+ l := &ctxt.Library[len(ctxt.Library)-1]
l.Objref = objref
l.Srcref = srcref
l.File = file
diff --git a/src/cmd/internal/ld/ldelf.go b/src/cmd/internal/ld/ldelf.go
index c50e995..372dc18 100644
--- a/src/cmd/internal/ld/ldelf.go
+++ b/src/cmd/internal/ld/ldelf.go
@@ -285,39 +285,35 @@
}
func ldelf(f *Biobuf, pkg string, length int64, pn string) {
- var err error
- var base int32
- var add uint64
- var info uint64
- var name string
- var i int
- var j int
- var rela int
- var is64 int
- var n int
- var flag int
- var hdrbuf [64]uint8
- var p []byte
- var hdr *ElfHdrBytes
- var elfobj *ElfObj
- var sect *ElfSect
- var rsect *ElfSect
- var sym ElfSym
- var e binary.ByteOrder
- var r []Reloc
- var rp *Reloc
- var s *LSym
- var symbols []*LSym
-
- symbols = nil
+ symbols := []*LSym(nil)
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f ldelf %s\n", obj.Cputime(), pn)
}
Ctxt.Version++
- base = int32(Boffset(f))
+ base := int32(Boffset(f))
+ var add uint64
+ var e binary.ByteOrder
+ var elfobj *ElfObj
+ var err error
+ var flag int
+ var hdr *ElfHdrBytes
+ var hdrbuf [64]uint8
+ var info uint64
+ var is64 int
+ var j int
+ var n int
+ var name string
+ var p []byte
+ var r []Reloc
+ var rela int
+ var rp *Reloc
+ var rsect *ElfSect
+ var s *LSym
+ var sect *ElfSect
+ var sym ElfSym
if Bread(f, hdrbuf[:]) != len(hdrbuf) {
goto bad
}
@@ -348,10 +344,8 @@
is64 = 0
if hdr.Ident[4] == ElfClass64 {
- var hdr *ElfHdrBytes64
-
is64 = 1
- hdr = new(ElfHdrBytes64)
+ hdr := new(ElfHdrBytes64)
binary.Read(bytes.NewReader(hdrbuf[:]), binary.BigEndian, hdr) // only byte arrays; byte order doesn't matter
elfobj.type_ = uint32(e.Uint16(hdr.Type[:]))
elfobj.machine = uint32(e.Uint16(hdr.Machine[:]))
@@ -426,7 +420,7 @@
elfobj.sect = make([]ElfSect, elfobj.shnum)
elfobj.nsect = uint(elfobj.shnum)
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
if Bseek(f, int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) < 0 {
goto bad
}
@@ -478,7 +472,7 @@
if err = elfmap(elfobj, sect); err != nil {
goto bad
}
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
if elfobj.sect[i].nameoff != 0 {
elfobj.sect[i].name = cstring(sect.base[elfobj.sect[i].nameoff:])
}
@@ -517,7 +511,7 @@
// as well use one large chunk.
// create symbols for elfmapped sections
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
sect = &elfobj.sect[i]
if (sect.type_ != ElfSectProgbits && sect.type_ != ElfSectNobits) || sect.flags&ElfSectFlagAlloc == 0 {
continue
@@ -572,7 +566,7 @@
Errorexit()
}
- for i = 1; i < elfobj.nsymtab; i++ {
+ for i := 1; i < elfobj.nsymtab; i++ {
if err = readelfsym(elfobj, i, &sym, 1); err != nil {
goto bad
}
@@ -645,7 +639,7 @@
// Sort outer lists by address, adding to textp.
// This keeps textp in increasing address order.
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
s = elfobj.sect[i].sym
if s == nil {
continue
@@ -676,7 +670,7 @@
}
// load relocations
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
rsect = &elfobj.sect[i]
if rsect.type_ != ElfSectRela && rsect.type_ != ElfSectRel {
continue
@@ -782,9 +776,7 @@
}
func section(elfobj *ElfObj, name string) *ElfSect {
- var i int
-
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
if elfobj.sect[i].name != "" && name != "" && elfobj.sect[i].name == name {
return &elfobj.sect[i]
}
@@ -812,8 +804,6 @@
}
func readelfsym(elfobj *ElfObj, i int, sym *ElfSym, needSym int) (err error) {
- var s *LSym
-
if i >= elfobj.nsymtab || i < 0 {
err = fmt.Errorf("invalid elf symbol index")
return err
@@ -845,7 +835,7 @@
sym.other = b.Other
}
- s = nil
+ s := (*LSym)(nil)
if sym.name == "_GLOBAL_OFFSET_TABLE_" {
sym.name = ".got"
}
@@ -940,11 +930,8 @@
}
func (x rbyoff) Less(i, j int) bool {
- var a *Reloc
- var b *Reloc
-
- a = &x[i]
- b = &x[j]
+ a := &x[i]
+ b := &x[j]
if a.Off < b.Off {
return true
}
diff --git a/src/cmd/internal/ld/ldmacho.go b/src/cmd/internal/ld/ldmacho.go
index e762318..58f065a 100644
--- a/src/cmd/internal/ld/ldmacho.go
+++ b/src/cmd/internal/ld/ldmacho.go
@@ -172,13 +172,8 @@
)
func unpackcmd(p []byte, m *LdMachoObj, c *LdMachoCmd, type_ uint, sz uint) int {
- var e4 func([]byte) uint32
- var e8 func([]byte) uint64
- var s *LdMachoSect
- var i int
-
- e4 = m.e.Uint32
- e8 = m.e.Uint64
+ e4 := m.e.Uint32
+ e8 := m.e.Uint64
c.type_ = int(type_)
c.size = uint32(sz)
@@ -204,7 +199,8 @@
return -1
}
p = p[56:]
- for i = 0; uint32(i) < c.seg.nsect; i++ {
+ var s *LdMachoSect
+ for i := 0; uint32(i) < c.seg.nsect; i++ {
s = &c.seg.sect[i]
s.name = cstring(p[0:16])
s.segname = cstring(p[16:32])
@@ -238,7 +234,8 @@
return -1
}
p = p[72:]
- for i = 0; uint32(i) < c.seg.nsect; i++ {
+ var s *LdMachoSect
+ for i := 0; uint32(i) < c.seg.nsect; i++ {
s = &c.seg.sect[i]
s.name = cstring(p[0:16])
s.segname = cstring(p[16:32])
@@ -293,24 +290,19 @@
}
func macholoadrel(m *LdMachoObj, sect *LdMachoSect) int {
- var rel []LdMachoRel
- var r *LdMachoRel
- var buf []byte
- var p []byte
- var i int
- var n int
- var v uint32
-
if sect.rel != nil || sect.nreloc == 0 {
return 0
}
- rel = make([]LdMachoRel, sect.nreloc)
- n = int(sect.nreloc * 8)
- buf = make([]byte, n)
+ rel := make([]LdMachoRel, sect.nreloc)
+ n := int(sect.nreloc * 8)
+ buf := make([]byte, n)
if Bseek(m.f, m.base+int64(sect.reloff), 0) < 0 || Bread(m.f, buf) != n {
return -1
}
- for i = 0; uint32(i) < sect.nreloc; i++ {
+ var p []byte
+ var r *LdMachoRel
+ var v uint32
+ for i := 0; uint32(i) < sect.nreloc; i++ {
r = &rel[i]
p = buf[i*8:]
r.addr = m.e.Uint32(p)
@@ -347,56 +339,44 @@
}
func macholoaddsym(m *LdMachoObj, d *LdMachoDysymtab) int {
- var p []byte
- var i int
- var n int
+ n := int(d.nindirectsyms)
- n = int(d.nindirectsyms)
-
- p = make([]byte, n*4)
+ p := make([]byte, n*4)
if Bseek(m.f, m.base+int64(d.indirectsymoff), 0) < 0 || Bread(m.f, p) != len(p) {
return -1
}
d.indir = make([]uint32, n)
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
d.indir[i] = m.e.Uint32(p[4*i:])
}
return 0
}
func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int {
- var strbuf []byte
- var symbuf []byte
- var p []byte
- var i int
- var n int
- var symsize int
- var sym []LdMachoSym
- var s *LdMachoSym
- var v uint32
-
if symtab.sym != nil {
return 0
}
- strbuf = make([]byte, symtab.strsize)
+ strbuf := make([]byte, symtab.strsize)
if Bseek(m.f, m.base+int64(symtab.stroff), 0) < 0 || Bread(m.f, strbuf) != len(strbuf) {
return -1
}
- symsize = 12
+ symsize := 12
if m.is64 {
symsize = 16
}
- n = int(symtab.nsym * uint32(symsize))
- symbuf = make([]byte, n)
+ n := int(symtab.nsym * uint32(symsize))
+ symbuf := make([]byte, n)
if Bseek(m.f, m.base+int64(symtab.symoff), 0) < 0 || Bread(m.f, symbuf) != len(symbuf) {
return -1
}
- sym = make([]LdMachoSym, symtab.nsym)
- p = symbuf
- for i = 0; uint32(i) < symtab.nsym; i++ {
+ sym := make([]LdMachoSym, symtab.nsym)
+ p := symbuf
+ var s *LdMachoSym
+ var v uint32
+ for i := 0; uint32(i) < symtab.nsym; i++ {
s = &sym[i]
v = m.e.Uint32(p)
if v >= symtab.strsize {
@@ -421,13 +401,11 @@
func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
var err error
- var i int
var j int
var is64 bool
var secaddr uint64
var hdr [7 * 4]uint8
var cmdp []byte
- var tmp [4]uint8
var dat []byte
var ncmd uint32
var cmdsz uint32
@@ -436,7 +414,6 @@
var off uint32
var m *LdMachoObj
var e binary.ByteOrder
- var base int64
var sect *LdMachoSect
var rel *LdMachoRel
var rpi int
@@ -452,7 +429,7 @@
var name string
Ctxt.Version++
- base = Boffset(f)
+ base := Boffset(f)
if Bread(f, hdr[:]) != len(hdr) {
goto bad
}
@@ -475,6 +452,7 @@
}
if is64 {
+ var tmp [4]uint8
Bread(f, tmp[:4]) // skip reserved word in header
}
@@ -524,7 +502,7 @@
symtab = nil
dsymtab = nil
- for i = 0; uint32(i) < ncmd; i++ {
+ for i := 0; uint32(i) < ncmd; i++ {
ty = e.Uint32(cmdp)
sz = e.Uint32(cmdp[4:])
m.cmd[i].off = off
@@ -581,7 +559,7 @@
goto bad
}
- for i = 0; uint32(i) < c.seg.nsect; i++ {
+ for i := 0; uint32(i) < c.seg.nsect; i++ {
sect = &c.seg.sect[i]
if sect.segname != "__TEXT" && sect.segname != "__DATA" {
continue
@@ -623,8 +601,7 @@
// enter sub-symbols into symbol table.
// have to guess sizes from next symbol.
- for i = 0; uint32(i) < symtab.nsym; i++ {
- var v int
+ for i := 0; uint32(i) < symtab.nsym; i++ {
sym = &symtab.sym[i]
if sym.type_&N_STAB != 0 {
continue
@@ -636,7 +613,7 @@
if name[0] == '_' && name[1] != '\x00' {
name = name[1:]
}
- v = 0
+ v := 0
if sym.type_&N_EXT == 0 {
v = Ctxt.Version
}
@@ -688,7 +665,7 @@
// Sort outer lists by address, adding to textp.
// This keeps textp in increasing address order.
- for i = 0; uint32(i) < c.seg.nsect; i++ {
+ for i := 0; uint32(i) < c.seg.nsect; i++ {
sect = &c.seg.sect[i]
s = sect.sym
if s == nil {
@@ -730,7 +707,7 @@
}
// load relocations
- for i = 0; uint32(i) < c.seg.nsect; i++ {
+ for i := 0; uint32(i) < c.seg.nsect; i++ {
sect = &c.seg.sect[i]
s = sect.sym
if s == nil {
@@ -746,9 +723,6 @@
rp = &r[rpi]
rel = §.rel[j]
if rel.scattered != 0 {
- var k int
- var ks *LdMachoSect
-
if Thearch.Thechar != '8' {
// mach-o only uses scattered relocation on 32-bit platforms
Diag("unexpected scattered relocation")
@@ -792,54 +766,53 @@
// now consider the desired symbol.
// find the section where it lives.
- for k = 0; uint32(k) < c.seg.nsect; k++ {
+ var ks *LdMachoSect
+ for k := 0; uint32(k) < c.seg.nsect; k++ {
ks = &c.seg.sect[k]
if ks.addr <= uint64(rel.value) && uint64(rel.value) < ks.addr+ks.size {
- goto foundk
+ if ks.sym != nil {
+ rp.Sym = ks.sym
+ rp.Add += int64(uint64(rel.value) - ks.addr)
+ } else if ks.segname == "__IMPORT" && ks.name == "__pointers" {
+ // handle reference to __IMPORT/__pointers.
+ // how much worse can this get?
+ // why are we supporting 386 on the mac anyway?
+ rp.Type = 512 + MACHO_FAKE_GOTPCREL
+
+ // figure out which pointer this is a reference to.
+ k = int(uint64(ks.res1) + (uint64(rel.value)-ks.addr)/4)
+
+ // load indirect table for __pointers
+ // fetch symbol number
+ if dsymtab == nil || k < 0 || uint32(k) >= dsymtab.nindirectsyms || dsymtab.indir == nil {
+ err = fmt.Errorf("invalid scattered relocation: indirect symbol reference out of range")
+ goto bad
+ }
+
+ k = int(dsymtab.indir[k])
+ if k < 0 || uint32(k) >= symtab.nsym {
+ err = fmt.Errorf("invalid scattered relocation: symbol reference out of range")
+ goto bad
+ }
+
+ rp.Sym = symtab.sym[k].sym
+ } else {
+ err = fmt.Errorf("unsupported scattered relocation: reference to %s/%s", ks.segname, ks.name)
+ goto bad
+ }
+
+ rpi++
+
+ // skip #1 of 2 rel; continue skips #2 of 2.
+ j++
+
+ continue
}
}
err = fmt.Errorf("unsupported scattered relocation: invalid address %#x", rel.addr)
goto bad
- foundk:
- if ks.sym != nil {
- rp.Sym = ks.sym
- rp.Add += int64(uint64(rel.value) - ks.addr)
- } else if ks.segname == "__IMPORT" && ks.name == "__pointers" {
- // handle reference to __IMPORT/__pointers.
- // how much worse can this get?
- // why are we supporting 386 on the mac anyway?
- rp.Type = 512 + MACHO_FAKE_GOTPCREL
-
- // figure out which pointer this is a reference to.
- k = int(uint64(ks.res1) + (uint64(rel.value)-ks.addr)/4)
-
- // load indirect table for __pointers
- // fetch symbol number
- if dsymtab == nil || k < 0 || uint32(k) >= dsymtab.nindirectsyms || dsymtab.indir == nil {
- err = fmt.Errorf("invalid scattered relocation: indirect symbol reference out of range")
- goto bad
- }
-
- k = int(dsymtab.indir[k])
- if k < 0 || uint32(k) >= symtab.nsym {
- err = fmt.Errorf("invalid scattered relocation: symbol reference out of range")
- goto bad
- }
-
- rp.Sym = symtab.sym[k].sym
- } else {
- err = fmt.Errorf("unsupported scattered relocation: reference to %s/%s", ks.segname, ks.name)
- goto bad
- }
-
- rpi++
-
- // skip #1 of 2 rel; continue skips #2 of 2.
- j++
-
- continue
}
rp.Siz = rel.length
diff --git a/src/cmd/internal/ld/ldpe.go b/src/cmd/internal/ld/ldpe.go
index 247e829..0daf887 100644
--- a/src/cmd/internal/ld/ldpe.go
+++ b/src/cmd/internal/ld/ldpe.go
@@ -127,36 +127,31 @@
}
func ldpe(f *Biobuf, pkg string, length int64, pn string) {
- var err error
- var name string
- var base int32
- var l uint32
- var i int
- var j int
- var numaux int
- var peobj *PeObj
- var sect *PeSect
- var rsect *PeSect
- var symbuf [18]uint8
- var s *LSym
- var r []Reloc
- var rp *Reloc
- var sym *PeSym
-
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f ldpe %s\n", obj.Cputime(), pn)
}
- sect = nil
+ sect := (*PeSect)(nil)
Ctxt.Version++
- base = int32(Boffset(f))
+ base := int32(Boffset(f))
- peobj = new(PeObj)
+ peobj := new(PeObj)
peobj.f = f
peobj.base = uint32(base)
peobj.name = pn
// read header
+ var err error
+ var j int
+ var l uint32
+ var name string
+ var numaux int
+ var r []Reloc
+ var rp *Reloc
+ var rsect *PeSect
+ var s *LSym
+ var sym *PeSym
+ var symbuf [18]uint8
if err = binary.Read(f, binary.LittleEndian, &peobj.fh); err != nil {
goto bad
}
@@ -165,7 +160,7 @@
peobj.sect = make([]PeSect, peobj.fh.NumberOfSections)
peobj.nsect = uint(peobj.fh.NumberOfSections)
- for i = 0; i < int(peobj.fh.NumberOfSections); i++ {
+ for i := 0; i < int(peobj.fh.NumberOfSections); i++ {
if err = binary.Read(f, binary.LittleEndian, &peobj.sect[i].sh); err != nil {
goto bad
}
@@ -189,7 +184,7 @@
}
// rewrite section names if they start with /
- for i = 0; i < int(peobj.fh.NumberOfSections); i++ {
+ for i := 0; i < int(peobj.fh.NumberOfSections); i++ {
if peobj.sect[i].name == "" {
continue
}
@@ -205,7 +200,7 @@
peobj.npesym = uint(peobj.fh.NumberOfSymbols)
Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable), 0)
- for i = 0; uint32(i) < peobj.fh.NumberOfSymbols; i += numaux + 1 {
+ for i := 0; uint32(i) < peobj.fh.NumberOfSymbols; i += numaux + 1 {
Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(i), 0)
if Bread(f, symbuf[:]) != len(symbuf) {
goto bad
@@ -230,7 +225,7 @@
}
// create symbols for mapped sections
- for i = 0; uint(i) < peobj.nsect; i++ {
+ for i := 0; uint(i) < peobj.nsect; i++ {
sect = &peobj.sect[i]
if sect.sh.Characteristics&IMAGE_SCN_MEM_DISCARDABLE != 0 {
continue
@@ -277,7 +272,7 @@
}
// load relocations
- for i = 0; uint(i) < peobj.nsect; i++ {
+ for i := 0; uint(i) < peobj.nsect; i++ {
rsect = &peobj.sect[i]
if rsect.sym == nil || rsect.sh.NumberOfRelocations == 0 {
continue
@@ -298,12 +293,9 @@
if Bread(f, symbuf[:10]) != 10 {
goto bad
}
- var rva uint32
- var symindex uint32
- var type_ uint16
- rva = Le32(symbuf[0:])
- symindex = Le32(symbuf[4:])
- type_ = Le16(symbuf[8:])
+ rva := Le32(symbuf[0:])
+ symindex := Le32(symbuf[4:])
+ type_ := Le16(symbuf[8:])
if err = readpesym(peobj, int(symindex), &sym); err != nil {
goto bad
}
@@ -360,7 +352,7 @@
}
// enter sub-symbols into symbol table.
- for i = 0; uint(i) < peobj.npesym; i++ {
+ for i := 0; uint(i) < peobj.npesym; i++ {
if peobj.pesym[i].name == "" {
continue
}
@@ -429,7 +421,7 @@
// Sort outer lists by address, adding to textp.
// This keeps textp in increasing address order.
- for i = 0; uint(i) < peobj.nsect; i++ {
+ for i := 0; uint(i) < peobj.nsect; i++ {
s = peobj.sect[i].sym
if s == nil {
continue
@@ -486,18 +478,15 @@
}
func readpesym(peobj *PeObj, i int, y **PeSym) (err error) {
- var s *LSym
- var sym *PeSym
- var name string
-
if uint(i) >= peobj.npesym || i < 0 {
err = fmt.Errorf("invalid pe symbol index")
return err
}
- sym = &peobj.pesym[i]
+ sym := &peobj.pesym[i]
*y = sym
+ var name string
if issect(sym) {
name = peobj.sect[sym.sectnum-1].sym.Name
} else {
@@ -515,6 +504,7 @@
name = name[:i]
}
+ var s *LSym
switch sym.type_ {
default:
err = fmt.Errorf("%s: invalid symbol type %d", sym.name, sym.type_)
diff --git a/src/cmd/internal/ld/lib.go b/src/cmd/internal/ld/lib.go
index bc58fff..030ddbf 100644
--- a/src/cmd/internal/ld/lib.go
+++ b/src/cmd/internal/ld/lib.go
@@ -294,16 +294,13 @@
}
func libinit() {
- var suffix string
- var suffixsep string
-
Funcalign = Thearch.Funcalign
mywhatsys() // get goroot, goarch, goos
// add goroot to the end of the libdir list.
- suffix = ""
+ suffix := ""
- suffixsep = ""
+ suffixsep := ""
if flag_installsuffix != "" {
suffixsep = "_"
suffix = flag_installsuffix
@@ -353,11 +350,9 @@
func loadinternal(name string) {
var pname string
- var i int
- var found int
- found = 0
- for i = 0; i < len(Ctxt.Libdir); i++ {
+ found := 0
+ for i := 0; i < len(Ctxt.Libdir); i++ {
pname = fmt.Sprintf("%s/%s.a", Ctxt.Libdir[i], name)
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "searching for %s.a in %s\n", name, pname)
@@ -375,15 +370,8 @@
}
func loadlib() {
- var i int
- var w int
- var x int
- var s *LSym
- var tlsg *LSym
- var cgostrsym string
-
if Flag_shared != 0 {
- s = Linklookup(Ctxt, "runtime.islibrary", 0)
+ s := Linklookup(Ctxt, "runtime.islibrary", 0)
s.Dupok = 1
Adduint8(Ctxt, s, 1)
}
@@ -396,6 +384,7 @@
loadinternal("runtime/race")
}
+ var i int
for i = 0; i < len(Ctxt.Library); i++ {
if Debug['v'] > 1 {
fmt.Fprintf(&Bso, "%5.2f autolib: %s (from %s)\n", obj.Cputime(), Ctxt.Library[i].File, Ctxt.Library[i].Objref)
@@ -438,7 +427,7 @@
}
// Pretend that we really imported the package.
- s = Linklookup(Ctxt, "go.importpath.runtime/cgo.", 0)
+ s := Linklookup(Ctxt, "go.importpath.runtime/cgo.", 0)
s.Type = SDATA
s.Dupok = 1
@@ -446,10 +435,10 @@
// Provided by the code that imports the package.
// Since we are simulating the import, we have to provide this string.
- cgostrsym = "go.string.\"runtime/cgo\""
+ cgostrsym := "go.string.\"runtime/cgo\""
if Linkrlookup(Ctxt, cgostrsym, 0) == nil {
- s = Linklookup(Ctxt, cgostrsym, 0)
+ s := Linklookup(Ctxt, cgostrsym, 0)
s.Type = SRODATA
s.Reachable = true
addstrdata(cgostrsym, "runtime/cgo")
@@ -459,7 +448,7 @@
if Linkmode == LinkInternal {
// Drop all the cgo_import_static declarations.
// Turns out we won't be needing them.
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Type == SHOSTOBJ {
// If a symbol was marked both
// cgo_import_static and cgo_import_dynamic,
@@ -474,7 +463,7 @@
}
}
- tlsg = Linklookup(Ctxt, "runtime.tlsg", 0)
+ tlsg := Linklookup(Ctxt, "runtime.tlsg", 0)
// For most ports, runtime.tlsg is a placeholder symbol for TLS
// relocation. However, the Android and Darwin arm ports need it
@@ -492,13 +481,13 @@
Ctxt.Tlsg = tlsg
// Now that we know the link mode, trim the dynexp list.
- x = CgoExportDynamic
+ x := CgoExportDynamic
if Linkmode == LinkExternal {
x = CgoExportStatic
}
- w = 0
- for i = 0; i < len(dynexp); i++ {
+ w := 0
+ for i := 0; i < len(dynexp); i++ {
if int(dynexp[i].Cgoexport)&x != 0 {
dynexp[w] = dynexp[i]
w++
@@ -564,12 +553,6 @@
}
func objfile(file string, pkg string) {
- var off int64
- var l int64
- var f *Biobuf
- var pname string
- var arhdr ArHdr
-
pkg = pathtoprefix(pkg)
if Debug['v'] > 1 {
@@ -577,6 +560,7 @@
}
Bflush(&Bso)
var err error
+ var f *Biobuf
f, err = Bopenr(file)
if err != nil {
Diag("cannot open file %s: %v", file, err)
@@ -586,7 +570,7 @@
magbuf := make([]byte, len(ARMAG))
if Bread(f, magbuf) != len(magbuf) || !strings.HasPrefix(string(magbuf), ARMAG) {
/* load it as a regular file */
- l = Bseek(f, 0, 2)
+ l := Bseek(f, 0, 2)
Bseek(f, 0, 0)
ldobj(f, pkg, l, file, file, FileObj)
@@ -596,9 +580,11 @@
}
/* skip over optional __.GOSYMDEF and process __.PKGDEF */
- off = Boffset(f)
+ off := Boffset(f)
- l = nextar(f, off, &arhdr)
+ var arhdr ArHdr
+ l := nextar(f, off, &arhdr)
+ var pname string
if l <= 0 {
Diag("%s: short read on archive file symbol header", file)
goto out
@@ -684,12 +670,8 @@
}
func ldhostobj(ld func(*Biobuf, string, int64, string), f *Biobuf, pkg string, length int64, pn string, file string) {
- var i int
- var isinternal int
- var h *Hostobj
-
- isinternal = 0
- for i = 0; i < len(internalpkg); i++ {
+ isinternal := 0
+ for i := 0; i < len(internalpkg); i++ {
if pkg == internalpkg[i] {
isinternal = 1
break
@@ -713,7 +695,7 @@
}
hostobj = append(hostobj, Hostobj{})
- h = &hostobj[len(hostobj)-1]
+ h := &hostobj[len(hostobj)-1]
h.ld = ld
h.pkg = pkg
h.pn = pn
@@ -723,11 +705,10 @@
}
func hostobjs() {
- var i int
var f *Biobuf
var h *Hostobj
- for i = 0; i < len(hostobj); i++ {
+ for i := 0; i < len(hostobj); i++ {
h = &hostobj[i]
var err error
f, err = Bopenr(h.file)
@@ -750,8 +731,6 @@
}
func hostlinksetup() {
- var p string
-
if Linkmode != LinkExternal {
return
}
@@ -769,7 +748,7 @@
// change our output to temporary object file
cout.Close()
- p = fmt.Sprintf("%s/go.o", tmpdir)
+ p := fmt.Sprintf("%s/go.o", tmpdir)
var err error
cout, err = os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0775)
if err != nil {
@@ -783,14 +762,6 @@
var hostlink_buf = make([]byte, 64*1024)
func hostlink() {
- var p string
- var argv []string
- var i int
- var n int
- var length int
- var h *Hostobj
- var f *Biobuf
-
if Linkmode != LinkExternal || nerrors > 0 {
return
}
@@ -798,6 +769,7 @@
if extld == "" {
extld = "gcc"
}
+ var argv []string
argv = append(argv, extld)
switch Thearch.Thechar {
case '8':
@@ -851,7 +823,12 @@
// already wrote main object file
// copy host objects to temporary directory
- for i = 0; i < len(hostobj); i++ {
+ var f *Biobuf
+ var h *Hostobj
+ var length int
+ var n int
+ var p string
+ for i := 0; i < len(hostobj); i++ {
h = &hostobj[i]
var err error
f, err = Bopenr(h.file)
@@ -886,7 +863,7 @@
length -= n
}
- if err = w.Close(); err != nil {
+ if err := w.Close(); err != nil {
Ctxt.Cursym = nil
Diag("cannot write %s: %v", p, err)
Errorexit()
@@ -896,6 +873,7 @@
}
argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
+ var i int
for i = 0; i < len(ldflag); i++ {
argv = append(argv, ldflag[i])
}
@@ -935,30 +913,18 @@
}
func ldobj(f *Biobuf, pkg string, length int64, pn string, file string, whence int) {
- var line string
- var c1 int
- var c2 int
- var c3 int
- var c4 int
- var magic uint32
- var import0 int64
- var import1 int64
- var eof int64
- var start int64
- var t string
-
- eof = Boffset(f) + length
+ eof := Boffset(f) + length
pn = pn
- start = Boffset(f)
- c1 = Bgetc(f)
- c2 = Bgetc(f)
- c3 = Bgetc(f)
- c4 = Bgetc(f)
+ start := Boffset(f)
+ c1 := Bgetc(f)
+ c2 := Bgetc(f)
+ c3 := Bgetc(f)
+ c4 := Bgetc(f)
Bseek(f, start, 0)
- magic = uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4)
+ magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4)
if magic == 0x7f454c46 { // \x7F E L F
ldhostobj(ldelf, f, pkg, length, pn, file)
return
@@ -975,8 +941,11 @@
}
/* check the header */
- line = Brdline(f, '\n')
+ line := Brdline(f, '\n')
+ var import0 int64
+ var import1 int64
+ var t string
if line == "" {
if Blinelen(f) > 0 {
Diag("%s: not an object file", pn)
@@ -1055,9 +1024,7 @@
}
func zerosig(sp string) {
- var s *LSym
-
- s = Linklookup(Ctxt, sp, 0)
+ s := Linklookup(Ctxt, sp, 0)
s.Sig = 0
}
@@ -1097,44 +1064,40 @@
for i := 0; i < len(s); i++ {
c := s[i]
if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
- goto escape
+ var buf bytes.Buffer
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ fmt.Fprintf(&buf, "%%%02x", c)
+ continue
+ }
+ buf.WriteByte(c)
+ }
+ return buf.String()
}
}
return s
-
-escape:
- var buf bytes.Buffer
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
- fmt.Fprintf(&buf, "%%%02x", c)
- continue
- }
- buf.WriteByte(c)
- }
- return buf.String()
}
func iconv(p string) string {
- var fp string
-
if p == "" {
+ var fp string
fp += "<nil>"
return fp
}
p = pathtoprefix(p)
+ var fp string
fp += p
return fp
}
func addsection(seg *Segment, name string, rwx int) *Section {
var l **Section
- var sect *Section
for l = &seg.Sect; *l != nil; l = &(*l).Next {
}
- sect = new(Section)
+ sect := new(Section)
sect.Rwx = uint8(rwx)
sect.Name = name
sect.Seg = seg
@@ -1197,7 +1160,6 @@
func dostkcheck() {
var ch Chain
- var s *LSym
morestack = Linklookup(Ctxt, "runtime.morestack", 0)
newstack = Linklookup(Ctxt, "runtime.newstack", 0)
@@ -1215,7 +1177,7 @@
// Check every function, but do the nosplit functions in a first pass,
// to make the printed failure chains as short as possible.
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
// runtime.racesymbolizethunk is called from gcc-compiled C
// code running on the operating system thread stack.
// It uses more than the usual amount of stack but that's okay.
@@ -1230,7 +1192,7 @@
}
}
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
if s.Nosplit == 0 {
Ctxt.Cursym = s
ch.sym = s
@@ -1240,17 +1202,8 @@
}
func stkcheck(up *Chain, depth int) int {
- var ch Chain
- var ch1 Chain
- var s *LSym
- var limit int
- var r *Reloc
- var ri int
- var endr int
- var pcsp Pciter
-
- limit = up.limit
- s = up.sym
+ limit := up.limit
+ s := up.sym
// Don't duplicate work: only need to consider each
// function at top of safe zone once.
@@ -1288,12 +1241,16 @@
return 0
}
+ var ch Chain
ch.up = up
// Walk through sp adjustments in function, consuming relocs.
- ri = 0
+ ri := 0
- endr = len(s.R)
+ endr := len(s.R)
+ var ch1 Chain
+ var pcsp Pciter
+ var r *Reloc
for pciterinit(Ctxt, &pcsp, &s.Pcln.Pcsp); pcsp.done == 0; pciternext(&pcsp) {
// pcsp.value is in effect for [pcsp.pc, pcsp.nextpc).
@@ -1384,16 +1341,12 @@
func Yconv(s *LSym) string {
var fp string
- var fmt_ string
- var i int
- var str string
-
if s == nil {
fp += fmt.Sprintf("<nil>")
} else {
- fmt_ = ""
+ fmt_ := ""
fmt_ += fmt.Sprintf("%s @0x%08x [%d]", s.Name, int64(s.Value), int64(s.Size))
- for i = 0; int64(i) < s.Size; i++ {
+ for i := 0; int64(i) < s.Size; i++ {
if i%8 == 0 {
fmt_ += fmt.Sprintf("\n\t0x%04x ", i)
}
@@ -1401,11 +1354,11 @@
}
fmt_ += fmt.Sprintf("\n")
- for i = 0; i < len(s.R); i++ {
+ for i := 0; i < len(s.R); i++ {
fmt_ += fmt.Sprintf("\t0x%04x[%x] %d %s[%x]\n", s.R[i].Off, s.R[i].Siz, s.R[i].Type, s.R[i].Sym.Name, int64(s.R[i].Add))
}
- str = fmt_
+ str := fmt_
fp += str
}
@@ -1439,9 +1392,7 @@
}
func setheadtype(s string) {
- var h int
-
- h = headtype(s)
+ h := headtype(s)
if h < 0 {
fmt.Fprintf(os.Stderr, "unknown header type -H %s\n", s)
Errorexit()
@@ -1462,13 +1413,9 @@
}
func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
- var a *Auto
- var s *LSym
- var off int32
-
// These symbols won't show up in the first loop below because we
// skip STEXT symbols. Normal STEXT symbols are emitted by walking textp.
- s = Linklookup(Ctxt, "runtime.text", 0)
+ s := Linklookup(Ctxt, "runtime.text", 0)
if s.Type == STEXT {
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
@@ -1478,7 +1425,7 @@
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
}
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Hide != 0 || (s.Name[0] == '.' && s.Version == 0 && s.Name != ".rathole") {
continue
}
@@ -1518,7 +1465,9 @@
}
}
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ var a *Auto
+ var off int32
+ for s := Ctxt.Textp; s != nil; s = s.Next {
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), s.Gotype)
// NOTE(ality): acid can't produce a stack trace without .frame symbols
@@ -1568,9 +1517,7 @@
}
func xdefine(p string, t int, v int64) {
- var s *LSym
-
- s = Linklookup(Ctxt, p, 0)
+ s := Linklookup(Ctxt, p, 0)
s.Type = int16(t)
s.Value = v
s.Reachable = true
@@ -1589,14 +1536,11 @@
}
func Entryvalue() int64 {
- var a string
- var s *LSym
-
- a = INITENTRY
+ a := INITENTRY
if a[0] >= '0' && a[0] <= '9' {
return atolwhex(a)
}
- s = Linklookup(Ctxt, a, 0)
+ s := Linklookup(Ctxt, a, 0)
if s.Type == 0 {
return INITTEXT
}
@@ -1607,11 +1551,10 @@
}
func undefsym(s *LSym) {
- var i int
var r *Reloc
Ctxt.Cursym = s
- for i = 0; i < len(s.R); i++ {
+ for i := 0; i < len(s.R); i++ {
r = &s.R[i]
if r.Sym == nil { // happens for some external ARM relocs
continue
@@ -1626,12 +1569,10 @@
}
func undef() {
- var s *LSym
-
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
undefsym(s)
}
- for s = datap; s != nil; s = s.Next {
+ for s := datap; s != nil; s = s.Next {
undefsym(s)
}
if nerrors > 0 {
@@ -1640,15 +1581,13 @@
}
func callgraph() {
- var s *LSym
- var r *Reloc
- var i int
-
if Debug['c'] == 0 {
return
}
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ var i int
+ var r *Reloc
+ for s := Ctxt.Textp; s != nil; s = s.Next {
for i = 0; i < len(s.R); i++ {
r = &s.R[i]
if r.Sym == nil {
@@ -1678,11 +1617,6 @@
}
func checkgo() {
- var s *LSym
- var r *Reloc
- var i int
- var changed int
-
if Debug['C'] == 0 {
return
}
@@ -1691,6 +1625,10 @@
// which would simplify this logic quite a bit.
// Mark every Go-called C function with cfunc=2, recursively.
+ var changed int
+ var i int
+ var r *Reloc
+ var s *LSym
for {
changed = 0
for s = Ctxt.Textp; s != nil; s = s.Next {
@@ -1716,7 +1654,7 @@
// Complain about Go-called C functions that can split the stack
// (that can be preempted for garbage collection or trigger a stack copy).
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
if s.Cfunc == 0 || (s.Cfunc == 2 && s.Nosplit != 0) {
for i = 0; i < len(s.R); i++ {
r = &s.R[i]
@@ -1736,13 +1674,11 @@
}
func Rnd(v int64, r int64) int64 {
- var c int64
-
if r <= 0 {
return v
}
v += r - 1
- c = v % r
+ c := v % r
if c < 0 {
c += r
}
diff --git a/src/cmd/internal/ld/macho.go b/src/cmd/internal/ld/macho.go
index 6349642..9fe63f3 100644
--- a/src/cmd/internal/ld/macho.go
+++ b/src/cmd/internal/ld/macho.go
@@ -156,14 +156,12 @@
}
func newMachoSeg(name string, msect int) *MachoSeg {
- var s *MachoSeg
-
if nseg >= len(seg) {
Diag("too many segs")
Errorexit()
}
- s = &seg[nseg]
+ s := &seg[nseg]
nseg++
s.name = name
s.msect = uint32(msect)
@@ -172,14 +170,12 @@
}
func newMachoSect(seg *MachoSeg, name string, segname string) *MachoSect {
- var s *MachoSect
-
if seg.nsect >= seg.msect {
Diag("too many sects in segment %s", seg.name)
Errorexit()
}
- s = &seg.sect[seg.nsect]
+ s := &seg.sect[seg.nsect]
seg.nsect++
s.name = name
s.segname = segname
@@ -196,18 +192,10 @@
var linkoff int64
func machowrite() int {
- var o1 int64
- var loadsize int
- var i int
- var j int
- var s *MachoSeg
- var t *MachoSect
- var l *MachoLoad
+ o1 := Cpos()
- o1 = Cpos()
-
- loadsize = 4 * 4 * ndebug
- for i = 0; i < len(load); i++ {
+ loadsize := 4 * 4 * ndebug
+ for i := 0; i < len(load); i++ {
loadsize += 4 * (len(load[i].data) + 2)
}
if macho64 {
@@ -237,7 +225,10 @@
Thearch.Lput(0) /* reserved */
}
- for i = 0; i < nseg; i++ {
+ var j int
+ var s *MachoSeg
+ var t *MachoSect
+ for i := 0; i < nseg; i++ {
s = &seg[i]
if macho64 {
Thearch.Lput(25) /* segment 64 */
@@ -296,7 +287,8 @@
}
}
- for i = 0; i < len(load); i++ {
+ var l *MachoLoad
+ for i := 0; i < len(load); i++ {
l = &load[i]
Thearch.Lput(l.type_)
Thearch.Lput(4 * (uint32(len(l.data)) + 2))
@@ -309,14 +301,12 @@
}
func domacho() {
- var s *LSym
-
if Debug['d'] != 0 {
return
}
// empirically, string table must begin with " \x00".
- s = Linklookup(Ctxt, ".machosymstr", 0)
+ s := Linklookup(Ctxt, ".machosymstr", 0)
s.Type = SMACHOSYMSTR
s.Reachable = true
@@ -328,7 +318,7 @@
s.Reachable = true
if Linkmode != LinkExternal {
- s = Linklookup(Ctxt, ".plt", 0) // will be __symbol_stub
+ s := Linklookup(Ctxt, ".plt", 0) // will be __symbol_stub
s.Type = SMACHOPLT
s.Reachable = true
@@ -364,12 +354,9 @@
}
func machoshbits(mseg *MachoSeg, sect *Section, segname string) {
- var msect *MachoSect
- var buf string
+ buf := "__" + strings.Replace(sect.Name[1:], ".", "_", -1)
- buf = "__" + strings.Replace(sect.Name[1:], ".", "_", -1)
-
- msect = newMachoSect(mseg, buf, segname)
+ msect := newMachoSect(mseg, buf, segname)
if sect.Rellen > 0 {
msect.reloc = uint32(sect.Reloff)
msect.nreloc = uint32(sect.Rellen / 8)
@@ -413,20 +400,10 @@
}
func Asmbmacho() {
- var v int64
- var w int64
- var va int64
- var a int
- var i int
- var mh *MachoHdr
- var ms *MachoSeg
- var ml *MachoLoad
- var sect *Section
-
/* apple MACH */
- va = INITTEXT - int64(HEADR)
+ va := INITTEXT - int64(HEADR)
- mh = getMachoHdr()
+ mh := getMachoHdr()
switch Thearch.Thechar {
default:
Diag("unknown mach architecture")
@@ -446,7 +423,7 @@
mh.subcpu = MACHO_SUBCPU_X86
}
- ms = nil
+ ms := (*MachoSeg)(nil)
if Linkmode == LinkExternal {
/* segment for entire file */
ms = newMachoSeg("", 40)
@@ -462,7 +439,7 @@
}
/* text */
- v = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(INITRND))
+ v := Rnd(int64(uint64(HEADR)+Segtext.Length), int64(INITRND))
if Linkmode != LinkExternal {
ms = newMachoSeg("__TEXT", 20)
@@ -474,13 +451,13 @@
ms.prot2 = 5
}
- for sect = Segtext.Sect; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
machoshbits(ms, sect, "__TEXT")
}
/* data */
if Linkmode != LinkExternal {
- w = int64(Segdata.Length)
+ w := int64(Segdata.Length)
ms = newMachoSeg("__DATA", 20)
ms.vaddr = uint64(va) + uint64(v)
ms.vsize = uint64(w)
@@ -490,7 +467,7 @@
ms.prot2 = 3
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
machoshbits(ms, sect, "__DATA")
}
@@ -502,20 +479,20 @@
fallthrough
case '5':
- ml = newMachoLoad(5, 17+2) /* unix thread */
+ ml := newMachoLoad(5, 17+2) /* unix thread */
ml.data[0] = 1 /* thread type */
ml.data[1] = 17 /* word count */
ml.data[2+15] = uint32(Entryvalue()) /* start pc */
case '6':
- ml = newMachoLoad(5, 42+2) /* unix thread */
+ ml := newMachoLoad(5, 42+2) /* unix thread */
ml.data[0] = 4 /* thread type */
ml.data[1] = 42 /* word count */
ml.data[2+32] = uint32(Entryvalue()) /* start pc */
ml.data[2+32+1] = uint32(Entryvalue() >> 16 >> 16) // hide >>32 for 8l
case '8':
- ml = newMachoLoad(5, 16+2) /* unix thread */
+ ml := newMachoLoad(5, 16+2) /* unix thread */
ml.data[0] = 1 /* thread type */
ml.data[1] = 16 /* word count */
ml.data[2+10] = uint32(Entryvalue()) /* start pc */
@@ -523,20 +500,15 @@
}
if Debug['d'] == 0 {
- var s1 *LSym
- var s2 *LSym
- var s3 *LSym
- var s4 *LSym
-
// must match domacholink below
- s1 = Linklookup(Ctxt, ".machosymtab", 0)
+ s1 := Linklookup(Ctxt, ".machosymtab", 0)
- s2 = Linklookup(Ctxt, ".linkedit.plt", 0)
- s3 = Linklookup(Ctxt, ".linkedit.got", 0)
- s4 = Linklookup(Ctxt, ".machosymstr", 0)
+ s2 := Linklookup(Ctxt, ".linkedit.plt", 0)
+ s3 := Linklookup(Ctxt, ".linkedit.got", 0)
+ s4 := Linklookup(Ctxt, ".machosymstr", 0)
if Linkmode != LinkExternal {
- ms = newMachoSeg("__LINKEDIT", 0)
+ ms := newMachoSeg("__LINKEDIT", 0)
ms.vaddr = uint64(va) + uint64(v) + uint64(Rnd(int64(Segdata.Length), int64(INITRND)))
ms.vsize = uint64(s1.Size) + uint64(s2.Size) + uint64(s3.Size) + uint64(s4.Size)
ms.fileoffset = uint64(linkoff)
@@ -545,7 +517,7 @@
ms.prot2 = 3
}
- ml = newMachoLoad(2, 4) /* LC_SYMTAB */
+ ml := newMachoLoad(2, 4) /* LC_SYMTAB */
ml.data[0] = uint32(linkoff) /* symoff */
ml.data[1] = uint32(nsortsym) /* nsyms */
ml.data[2] = uint32(linkoff + s1.Size + s2.Size + s3.Size) /* stroff */
@@ -554,11 +526,11 @@
machodysymtab()
if Linkmode != LinkExternal {
- ml = newMachoLoad(14, 6) /* LC_LOAD_DYLINKER */
- ml.data[0] = 12 /* offset to string */
+ ml := newMachoLoad(14, 6) /* LC_LOAD_DYLINKER */
+ ml.data[0] = 12 /* offset to string */
stringtouint32(ml.data[1:], "/usr/lib/dyld")
- for i = 0; i < len(dylib); i++ {
+ for i := 0; i < len(dylib); i++ {
ml = newMachoLoad(12, 4+(uint32(len(dylib[i]))+1+7)/8*2) /* LC_LOAD_DYLIB */
ml.data[0] = 24 /* offset of string from beginning of load */
ml.data[1] = 0 /* time stamp */
@@ -574,7 +546,7 @@
dwarfaddmachoheaders()
}
- a = machowrite()
+ a := machowrite()
if int32(a) > HEADR {
Diag("HEADR too small: %d > %d", a, HEADR)
}
@@ -624,16 +596,11 @@
}
func (x machoscmp) Less(i, j int) bool {
- var s1 *LSym
- var s2 *LSym
- var k1 int
- var k2 int
+ s1 := x[i]
+ s2 := x[j]
- s1 = x[i]
- s2 = x[j]
-
- k1 = symkind(s1)
- k2 = symkind(s2)
+ k1 := symkind(s1)
+ k2 := symkind(s2)
if k1 != k2 {
return k1-k2 < 0
}
@@ -642,10 +609,8 @@
}
func machogenasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
- var s *LSym
-
genasmsym(put)
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Type == SDYNIMPORT || s.Type == SHOSTOBJ {
if s.Reachable {
put(s, "", 'D', 0, 0, 0, nil)
@@ -655,12 +620,10 @@
}
func machosymorder() {
- var i int
-
// On Mac OS X Mountain Lion, we must sort exported symbols
// So we sort them here and pre-allocate dynid for them
// See http://golang.org/issue/4029
- for i = 0; i < len(dynexp); i++ {
+ for i := 0; i < len(dynexp); i++ {
dynexp[i].Reachable = true
}
machogenasmsym(addsym)
@@ -668,23 +631,20 @@
nsortsym = 0
machogenasmsym(addsym)
sort.Sort(machoscmp(sortsym[:nsortsym]))
- for i = 0; i < nsortsym; i++ {
+ for i := 0; i < nsortsym; i++ {
sortsym[i].Dynid = int32(i)
}
}
func machosymtab() {
- var i int
- var symtab *LSym
- var symstr *LSym
var s *LSym
var o *LSym
var p string
- symtab = Linklookup(Ctxt, ".machosymtab", 0)
- symstr = Linklookup(Ctxt, ".machosymstr", 0)
+ symtab := Linklookup(Ctxt, ".machosymtab", 0)
+ symstr := Linklookup(Ctxt, ".machosymstr", 0)
- for i = 0; i < nsortsym; i++ {
+ for i := 0; i < nsortsym; i++ {
s = sortsym[i]
Adduint32(Ctxt, symtab, uint32(symstr.Size))
@@ -737,15 +697,9 @@
}
func machodysymtab() {
- var n int
- var ml *MachoLoad
- var s1 *LSym
- var s2 *LSym
- var s3 *LSym
+ ml := newMachoLoad(11, 18) /* LC_DYSYMTAB */
- ml = newMachoLoad(11, 18) /* LC_DYSYMTAB */
-
- n = 0
+ n := 0
ml.data[0] = uint32(n) /* ilocalsym */
ml.data[1] = uint32(nkind[SymKindLocal]) /* nlocalsym */
n += nkind[SymKindLocal]
@@ -765,10 +719,10 @@
ml.data[11] = 0 /* nextrefsyms */
// must match domacholink below
- s1 = Linklookup(Ctxt, ".machosymtab", 0)
+ s1 := Linklookup(Ctxt, ".machosymtab", 0)
- s2 = Linklookup(Ctxt, ".linkedit.plt", 0)
- s3 = Linklookup(Ctxt, ".linkedit.got", 0)
+ s2 := Linklookup(Ctxt, ".linkedit.plt", 0)
+ s3 := Linklookup(Ctxt, ".linkedit.got", 0)
ml.data[12] = uint32(linkoff + s1.Size) /* indirectsymoff */
ml.data[13] = uint32((s2.Size + s3.Size) / 4) /* nindirectsyms */
@@ -779,20 +733,14 @@
}
func Domacholink() int64 {
- var size int
- var s1 *LSym
- var s2 *LSym
- var s3 *LSym
- var s4 *LSym
-
machosymtab()
// write data that will be linkedit section
- s1 = Linklookup(Ctxt, ".machosymtab", 0)
+ s1 := Linklookup(Ctxt, ".machosymtab", 0)
- s2 = Linklookup(Ctxt, ".linkedit.plt", 0)
- s3 = Linklookup(Ctxt, ".linkedit.got", 0)
- s4 = Linklookup(Ctxt, ".machosymstr", 0)
+ s2 := Linklookup(Ctxt, ".linkedit.plt", 0)
+ s3 := Linklookup(Ctxt, ".linkedit.got", 0)
+ s4 := Linklookup(Ctxt, ".machosymstr", 0)
// Force the linkedit section to end on a 16-byte
// boundary. This allows pure (non-cgo) Go binaries
@@ -815,7 +763,7 @@
Adduint8(Ctxt, s4, 0)
}
- size = int(s1.Size + s2.Size + s3.Size + s4.Size)
+ size := int(s1.Size + s2.Size + s3.Size + s4.Size)
if size > 0 {
linkoff = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(INITRND)) + Rnd(int64(Segdata.Filelen), int64(INITRND)) + Rnd(int64(Segdwarf.Filelen), int64(INITRND))
@@ -831,17 +779,13 @@
}
func machorelocsect(sect *Section, first *LSym) {
- var sym *LSym
- var eaddr int32
- var ri int
- var r *Reloc
-
// If main section has no bits, nothing to relocate.
if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
return
}
sect.Reloff = uint64(Cpos())
+ var sym *LSym
for sym = first; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@@ -851,7 +795,9 @@
}
}
- eaddr = int32(sect.Vaddr + sect.Length)
+ eaddr := int32(sect.Vaddr + sect.Length)
+ var r *Reloc
+ var ri int
for ; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@@ -876,17 +822,15 @@
}
func Machoemitreloc() {
- var sect *Section
-
for Cpos()&7 != 0 {
Cput(0)
}
machorelocsect(Segtext.Sect, Ctxt.Textp)
- for sect = Segtext.Sect.Next; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect.Next; sect != nil; sect = sect.Next {
machorelocsect(sect, datap)
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
machorelocsect(sect, datap)
}
}
diff --git a/src/cmd/internal/ld/objfile.go b/src/cmd/internal/ld/objfile.go
index 98b7ba6..003c106 100644
--- a/src/cmd/internal/ld/objfile.go
+++ b/src/cmd/internal/ld/objfile.go
@@ -17,23 +17,19 @@
var endmagic string = "\xff\xffgo13ld"
func ldobjfile(ctxt *Link, f *Biobuf, pkg string, length int64, pn string) {
- var c int
- var buf [8]uint8
- var start int64
- var lib string
-
- start = Boffset(f)
+ start := Boffset(f)
ctxt.Version++
- buf = [8]uint8{}
+ buf := [8]uint8{}
Bread(f, buf[:])
if string(buf[:]) != startmagic {
log.Fatalf("%s: invalid file start %x %x %x %x %x %x %x %x", pn, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7])
}
- c = Bgetc(f)
+ c := Bgetc(f)
if c != 1 {
log.Fatalf("%s: invalid file version number %d", pn, c)
}
+ var lib string
for {
lib = rdstring(f)
if lib == "" {
@@ -65,45 +61,28 @@
var readsym_ndup int
func readsym(ctxt *Link, f *Biobuf, pkg string, pn string) {
- var i int
- var j int
- var c int
- var t int
- var v int
- var n int
- var nreloc int
- var size int
- var dupok int
- var name string
- var data []byte
- var r *Reloc
- var s *LSym
- var dup *LSym
- var typ *LSym
- var pc *Pcln
- var a *Auto
-
if Bgetc(f) != 0xfe {
log.Fatalf("readsym out of sync")
}
- t = int(rdint(f))
- name = expandpkg(rdstring(f), pkg)
- v = int(rdint(f))
+ t := int(rdint(f))
+ name := expandpkg(rdstring(f), pkg)
+ v := int(rdint(f))
if v != 0 && v != 1 {
log.Fatalf("invalid symbol version %d", v)
}
- dupok = int(rdint(f))
+ dupok := int(rdint(f))
dupok &= 1
- size = int(rdint(f))
- typ = rdsym(ctxt, f, pkg)
+ size := int(rdint(f))
+ typ := rdsym(ctxt, f, pkg)
+ var data []byte
rddata(f, &data)
- nreloc = int(rdint(f))
+ nreloc := int(rdint(f))
if v != 0 {
v = ctxt.Version
}
- s = Linklookup(ctxt, name, v)
- dup = nil
+ s := Linklookup(ctxt, name, v)
+ dup := (*LSym)(nil)
if s.Type != 0 && s.Type != SXREF {
if (t == SDATA || t == SBSS || t == SNOPTRBSS) && len(data) == 0 && nreloc == 0 {
if s.Size < int64(size) {
@@ -155,7 +134,8 @@
if nreloc > 0 {
s.R = make([]Reloc, nreloc)
s.R = s.R[:nreloc]
- for i = 0; i < nreloc; i++ {
+ var r *Reloc
+ for i := 0; i < nreloc; i++ {
r = &s.R[i]
r.Off = int32(rdint(f))
r.Siz = uint8(rdint(f))
@@ -179,11 +159,12 @@
s.Args = int32(rdint(f))
s.Locals = int32(rdint(f))
s.Nosplit = uint8(rdint(f))
- v = int(rdint(f))
+ v := int(rdint(f))
s.Leaf = uint8(v & 1)
s.Cfunc = uint8(v & 2)
- n = int(rdint(f))
- for i = 0; i < n; i++ {
+ n := int(rdint(f))
+ var a *Auto
+ for i := 0; i < n; i++ {
a = new(Auto)
a.Asym = rdsym(ctxt, f, pkg)
a.Aoffset = int32(rdint(f))
@@ -194,30 +175,30 @@
}
s.Pcln = new(Pcln)
- pc = s.Pcln
+ pc := s.Pcln
rddata(f, &pc.Pcsp.P)
rddata(f, &pc.Pcfile.P)
rddata(f, &pc.Pcline.P)
n = int(rdint(f))
pc.Pcdata = make([]Pcdata, n)
pc.Npcdata = n
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
rddata(f, &pc.Pcdata[i].P)
}
n = int(rdint(f))
pc.Funcdata = make([]*LSym, n)
pc.Funcdataoff = make([]int64, n)
pc.Nfuncdata = n
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
pc.Funcdata[i] = rdsym(ctxt, f, pkg)
}
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
pc.Funcdataoff[i] = rdint(f)
}
n = int(rdint(f))
pc.File = make([]*LSym, n)
pc.Nfile = n
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
pc.File[i] = rdsym(ctxt, f, pkg)
}
@@ -257,7 +238,9 @@
fmt.Fprintf(ctxt.Bso, " args=%#x locals=%#x", uint64(s.Args), uint64(s.Locals))
}
fmt.Fprintf(ctxt.Bso, "\n")
- for i = 0; i < len(s.P); {
+ var c int
+ var j int
+ for i := 0; i < len(s.P); {
fmt.Fprintf(ctxt.Bso, "\t%#04x", uint(i))
for j = i; j < i+16 && j < len(s.P); j++ {
fmt.Fprintf(ctxt.Bso, " %02x", s.P[j])
@@ -279,7 +262,8 @@
i += 16
}
- for i = 0; i < len(s.R); i++ {
+ var r *Reloc
+ for i := 0; i < len(s.R); i++ {
r = &s.R[i]
fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%d\n", int(r.Off), r.Siz, r.Type, r.Sym.Name, int64(r.Add))
}
@@ -288,11 +272,9 @@
func rdint(f *Biobuf) int64 {
var c int
- var uv uint64
- var shift int
- uv = 0
- for shift = 0; ; shift += 7 {
+ uv := uint64(0)
+ for shift := 0; ; shift += 7 {
if shift >= 64 {
log.Fatalf("corrupt input")
}
@@ -322,12 +304,7 @@
var symbuf []byte
func rdsym(ctxt *Link, f *Biobuf, pkg string) *LSym {
- var n int
- var v int
- var p string
- var s *LSym
-
- n = int(rdint(f))
+ n := int(rdint(f))
if n == 0 {
rdint(f)
return nil
@@ -337,25 +314,23 @@
symbuf = make([]byte, n)
}
Bread(f, symbuf[:n])
- p = string(symbuf[:n])
- v = int(rdint(f))
+ p := string(symbuf[:n])
+ v := int(rdint(f))
if v != 0 {
v = ctxt.Version
}
- s = Linklookup(ctxt, expandpkg(p, pkg), v)
+ s := Linklookup(ctxt, expandpkg(p, pkg), v)
if v == 0 && s.Name[0] == '$' && s.Type == 0 {
if strings.HasPrefix(s.Name, "$f32.") {
- var i32 int32
x, _ := strconv.ParseUint(s.Name[5:], 16, 32)
- i32 = int32(x)
+ i32 := int32(x)
s.Type = SRODATA
Adduint32(ctxt, s, uint32(i32))
s.Reachable = false
} else if strings.HasPrefix(s.Name, "$f64.") || strings.HasPrefix(s.Name, "$i64.") {
- var i64 int64
x, _ := strconv.ParseUint(s.Name[5:], 16, 64)
- i64 = int64(x)
+ i64 := int64(x)
s.Type = SRODATA
Adduint64(ctxt, s, uint64(i64))
s.Reachable = false
diff --git a/src/cmd/internal/ld/pcln.go b/src/cmd/internal/ld/pcln.go
index 2900664..e8ef252 100644
--- a/src/cmd/internal/ld/pcln.go
+++ b/src/cmd/internal/ld/pcln.go
@@ -40,13 +40,9 @@
// iteration over encoded pcdata tables.
func getvarint(pp *[]byte) uint32 {
- var p []byte
- var shift int
- var v uint32
-
- v = 0
- p = *pp
- for shift = 0; ; shift += 7 {
+ v := uint32(0)
+ p := *pp
+ for shift := 0; ; shift += 7 {
v |= uint32(p[0]&0x7F) << uint(shift)
tmp4 := p
p = p[1:]
@@ -60,9 +56,6 @@
}
func pciternext(it *Pciter) {
- var v uint32
- var dv int32
-
it.pc = it.nextpc
if it.done != 0 {
return
@@ -73,7 +66,7 @@
}
// value delta
- v = getvarint(&it.p)
+ v := getvarint(&it.p)
if v == 0 && it.start == 0 {
it.done = 1
@@ -81,7 +74,7 @@
}
it.start = 0
- dv = int32(v>>1) ^ (int32(v<<31) >> 31)
+ dv := int32(v>>1) ^ (int32(v<<31) >> 31)
it.value += dv
// pc delta
@@ -107,12 +100,8 @@
// license that can be found in the LICENSE file.
func addvarint(d *Pcdata, val uint32) {
- var n int32
- var v uint32
- var p []byte
-
- n = 0
- for v = val; v >= 0x80; v >>= 7 {
+ n := int32(0)
+ for v := val; v >= 0x80; v >>= 7 {
n++
}
n++
@@ -123,7 +112,8 @@
}
d.P = d.P[:old+int(n)]
- p = d.P[old:]
+ p := d.P[old:]
+ var v uint32
for v = val; v >= 0x80; v >>= 7 {
p[0] = byte(v | 0x80)
p = p[1:]
@@ -132,9 +122,7 @@
}
func addpctab(ftab *LSym, off int32, d *Pcdata) int32 {
- var start int32
-
- start = int32(len(ftab.P))
+ start := int32(len(ftab.P))
Symgrow(Ctxt, ftab, int64(start)+int64(len(d.P)))
copy(ftab.P[start:], d.P)
@@ -142,29 +130,18 @@
}
func ftabaddstring(ftab *LSym, s string) int32 {
- var n int32
- var start int32
-
- n = int32(len(s)) + 1
- start = int32(len(ftab.P))
+ n := int32(len(s)) + 1
+ start := int32(len(ftab.P))
Symgrow(Ctxt, ftab, int64(start)+int64(n)+1)
copy(ftab.P[start:], s)
return start
}
func renumberfiles(ctxt *Link, files []*LSym, d *Pcdata) {
- var i int
var f *LSym
- var out Pcdata
- var it Pciter
- var v uint32
- var oldval int32
- var newval int32
- var val int32
- var dv int32
// Give files numbers.
- for i = 0; i < len(files); i++ {
+ for i := 0; i < len(files); i++ {
f = files[i]
if f.Type != SFILEPATH {
ctxt.Nhistfile++
@@ -175,9 +152,14 @@
}
}
- newval = -1
- out = Pcdata{}
+ newval := int32(-1)
+ out := Pcdata{}
+ var dv int32
+ var it Pciter
+ var oldval int32
+ var v uint32
+ var val int32
for pciterinit(ctxt, &it, d); it.done == 0; pciternext(&it) {
// value delta
oldval = it.value
@@ -221,22 +203,8 @@
var pclntab_zpcln Pcln
func pclntab() {
- var i int32
- var nfunc int32
- var start int32
- var funcstart int32
- var ftab *LSym
- var s *LSym
- var last *LSym
- var off int32
- var end int32
- var frameptrsize int32
- var funcdata_bytes int64
- var pcln *Pcln
- var it Pciter
-
- funcdata_bytes = 0
- ftab = Linklookup(Ctxt, "runtime.pclntab", 0)
+ funcdata_bytes := int64(0)
+ ftab := Linklookup(Ctxt, "runtime.pclntab", 0)
ftab.Type = SPCLNTAB
ftab.Reachable = true
@@ -246,7 +214,7 @@
// function table, alternating PC and offset to func struct [each entry thearch.ptrsize bytes]
// end PC [thearch.ptrsize bytes]
// offset to file table [4 bytes]
- nfunc = 0
+ nfunc := int32(0)
for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
if container(Ctxt.Cursym) == 0 {
@@ -261,7 +229,14 @@
setuintxx(Ctxt, ftab, 8, uint64(nfunc), int64(Thearch.Ptrsize))
nfunc = 0
- last = nil
+ last := (*LSym)(nil)
+ var end int32
+ var frameptrsize int32
+ var funcstart int32
+ var i int32
+ var it Pciter
+ var off int32
+ var pcln *Pcln
for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
last = Ctxt.Cursym
if container(Ctxt.Cursym) != 0 {
@@ -366,14 +341,14 @@
setaddrplus(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize), last, last.Size)
// Start file table.
- start = int32(len(ftab.P))
+ start := int32(len(ftab.P))
start += int32(-len(ftab.P)) & (int32(Thearch.Ptrsize) - 1)
setuint32(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize)+int64(Thearch.Ptrsize), uint32(start))
Symgrow(Ctxt, ftab, int64(start)+(int64(Ctxt.Nhistfile)+1)*4)
setuint32(Ctxt, ftab, int64(start), uint32(Ctxt.Nhistfile))
- for s = Ctxt.Filesyms; s != nil; s = s.Next {
+ for s := Ctxt.Filesyms; s != nil; s = s.Next {
setuint32(Ctxt, ftab, int64(start)+s.Value*4, uint32(ftabaddstring(ftab, s.Name)))
}
@@ -394,43 +369,32 @@
// findfunctab generates a lookup table to quickly find the containing
// function for a pc. See src/runtime/symtab.go:findfunc for details.
func findfunctab() {
- var t *LSym
- var s *LSym
- var e *LSym
- var idx int32
- var i int32
- var j int32
- var nbuckets int32
- var n int32
- var base int32
- var min int64
- var max int64
- var p int64
- var q int64
- var indexes []int32
-
- t = Linklookup(Ctxt, "runtime.findfunctab", 0)
+ t := Linklookup(Ctxt, "runtime.findfunctab", 0)
t.Type = SRODATA
t.Reachable = true
// find min and max address
- min = Ctxt.Textp.Value
+ min := Ctxt.Textp.Value
- max = 0
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ max := int64(0)
+ for s := Ctxt.Textp; s != nil; s = s.Next {
max = s.Value + s.Size
}
// for each subbucket, compute the minimum of all symbol indexes
// that map to that subbucket.
- n = int32((max - min + SUBBUCKETSIZE - 1) / SUBBUCKETSIZE)
+ n := int32((max - min + SUBBUCKETSIZE - 1) / SUBBUCKETSIZE)
- indexes = make([]int32, n)
- for i = 0; i < n; i++ {
+ indexes := make([]int32, n)
+ for i := int32(0); i < n; i++ {
indexes[i] = NOIDX
}
- idx = 0
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ idx := int32(0)
+ var e *LSym
+ var i int32
+ var p int64
+ var q int64
+ for s := Ctxt.Textp; s != nil; s = s.Next {
if container(s) != 0 {
continue
}
@@ -461,12 +425,14 @@
}
// allocate table
- nbuckets = int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE)
+ nbuckets := int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE)
Symgrow(Ctxt, t, 4*int64(nbuckets)+int64(n))
// fill in table
- for i = 0; i < nbuckets; i++ {
+ var base int32
+ var j int32
+ for i := int32(0); i < nbuckets; i++ {
base = indexes[i*SUBBUCKETS]
if base == NOIDX {
Diag("hole in findfunctab")
diff --git a/src/cmd/internal/ld/pe.go b/src/cmd/internal/ld/pe.go
index c2ef49f..30fe400 100644
--- a/src/cmd/internal/ld/pe.go
+++ b/src/cmd/internal/ld/pe.go
@@ -371,14 +371,12 @@
var ncoffsym int
func addpesection(name string, sectsize int, filesize int) *IMAGE_SECTION_HEADER {
- var h *IMAGE_SECTION_HEADER
-
if pensect == 16 {
Diag("too many sections")
Errorexit()
}
- h = &sh[pensect]
+ h := &sh[pensect]
pensect++
copy(h.Name[:], name)
h.VirtualSize = uint32(sectsize)
@@ -466,14 +464,11 @@
}
func initdynimport() *Dll {
- var m *Imp
var d *Dll
- var s *LSym
- var dynamic *LSym
dr = nil
- m = nil
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ m := (*Imp)(nil)
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Type != SDYNIMPORT {
continue
}
@@ -497,10 +492,10 @@
d.ms = m
}
- dynamic = Linklookup(Ctxt, ".windynamic", 0)
+ dynamic := Linklookup(Ctxt, ".windynamic", 0)
dynamic.Reachable = true
dynamic.Type = SWINDOWS
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
for m = d.ms; m != nil; m = m.next {
m.s.Type = SWINDOWS | SSUB
m.s.Sub = dynamic.Sub
@@ -516,35 +511,26 @@
}
func addimports(datsect *IMAGE_SECTION_HEADER) {
- var isect *IMAGE_SECTION_HEADER
- var n uint64
- var oftbase uint64
- var ftbase uint64
- var startoff int64
- var endoff int64
- var m *Imp
- var d *Dll
- var dynamic *LSym
-
- startoff = Cpos()
- dynamic = Linklookup(Ctxt, ".windynamic", 0)
+ startoff := Cpos()
+ dynamic := Linklookup(Ctxt, ".windynamic", 0)
// skip import descriptor table (will write it later)
- n = 0
+ n := uint64(0)
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
n++
}
Cseek(startoff + int64(binary.Size(&IMAGE_IMPORT_DESCRIPTOR{}))*int64(n+1))
// write dll names
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
d.nameoff = uint64(Cpos()) - uint64(startoff)
strput(d.name)
}
// write function names
- for d = dr; d != nil; d = d.next {
+ var m *Imp
+ for d := dr; d != nil; d = d.next {
for m = d.ms; m != nil; m = m.next {
m.off = uint64(nextsectoff) + uint64(Cpos()) - uint64(startoff)
Wputl(0) // hint
@@ -553,10 +539,10 @@
}
// write OriginalFirstThunks
- oftbase = uint64(Cpos()) - uint64(startoff)
+ oftbase := uint64(Cpos()) - uint64(startoff)
n = uint64(Cpos())
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
d.thunkoff = uint64(Cpos()) - n
for m = d.ms; m != nil; m = m.next {
if pe64 != 0 {
@@ -576,17 +562,17 @@
// add pe section and pad it at the end
n = uint64(Cpos()) - uint64(startoff)
- isect = addpesection(".idata", int(n), int(n))
+ isect := addpesection(".idata", int(n), int(n))
isect.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE
chksectoff(isect, startoff)
strnput("", int(uint64(isect.SizeOfRawData)-n))
- endoff = Cpos()
+ endoff := Cpos()
// write FirstThunks (allocated in .data section)
- ftbase = uint64(dynamic.Value) - uint64(datsect.VirtualAddress) - PEBASE
+ ftbase := uint64(dynamic.Value) - uint64(datsect.VirtualAddress) - PEBASE
Cseek(int64(uint64(datsect.PointerToRawData) + ftbase))
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
for m = d.ms; m != nil; m = m.next {
if pe64 != 0 {
Vputl(m.off)
@@ -605,7 +591,7 @@
// finally write import descriptor table
Cseek(startoff)
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
Lputl(uint32(uint64(isect.VirtualAddress) + oftbase + d.thunkoff))
Lputl(0)
Lputl(0)
@@ -640,19 +626,14 @@
}
func (x pescmp) Less(i, j int) bool {
- var s1 *LSym
- var s2 *LSym
-
- s1 = x[i]
- s2 = x[j]
+ s1 := x[i]
+ s2 := x[j]
return stringsCompare(s1.Extname, s2.Extname) < 0
}
func initdynexport() {
- var s *LSym
-
nexport = 0
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Cgoexport&CgoExportDynamic == 0 {
continue
}
@@ -669,18 +650,10 @@
}
func addexports() {
- var sect *IMAGE_SECTION_HEADER
var e IMAGE_EXPORT_DIRECTORY
- var size int
- var i int
- var va int
- var va_name int
- var va_addr int
- var va_na int
- var v int
- size = binary.Size(&e) + 10*nexport + len(outfile) + 1
- for i = 0; i < nexport; i++ {
+ size := binary.Size(&e) + 10*nexport + len(outfile) + 1
+ for i := 0; i < nexport; i++ {
size += len(dexport[i].Extname) + 1
}
@@ -688,16 +661,16 @@
return
}
- sect = addpesection(".edata", size, size)
+ sect := addpesection(".edata", size, size)
sect.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ
chksectoff(sect, Cpos())
- va = int(sect.VirtualAddress)
+ va := int(sect.VirtualAddress)
dd[IMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress = uint32(va)
dd[IMAGE_DIRECTORY_ENTRY_EXPORT].Size = sect.VirtualSize
- va_name = va + binary.Size(&e) + nexport*4
- va_addr = va + binary.Size(&e)
- va_na = va + binary.Size(&e) + nexport*8
+ va_name := va + binary.Size(&e) + nexport*4
+ va_addr := va + binary.Size(&e)
+ va_na := va + binary.Size(&e) + nexport*8
e.Characteristics = 0
e.MajorVersion = 0
@@ -714,37 +687,35 @@
binary.Write(&coutbuf, binary.LittleEndian, &e)
// put EXPORT Address Table
- for i = 0; i < nexport; i++ {
+ for i := 0; i < nexport; i++ {
Lputl(uint32(dexport[i].Value - PEBASE))
}
// put EXPORT Name Pointer Table
- v = int(e.Name + uint32(len(outfile)) + 1)
+ v := int(e.Name + uint32(len(outfile)) + 1)
- for i = 0; i < nexport; i++ {
+ for i := 0; i < nexport; i++ {
Lputl(uint32(v))
v += len(dexport[i].Extname) + 1
}
// put EXPORT Ordinal Table
- for i = 0; i < nexport; i++ {
+ for i := 0; i < nexport; i++ {
Wputl(uint16(i))
}
// put Names
strnput(outfile, len(outfile)+1)
- for i = 0; i < nexport; i++ {
+ for i := 0; i < nexport; i++ {
strnput(dexport[i].Extname, len(dexport[i].Extname)+1)
}
strnput("", int(sect.SizeOfRawData-uint32(size)))
}
func dope() {
- var rel *LSym
-
/* relocation table */
- rel = Linklookup(Ctxt, ".rel", 0)
+ rel := Linklookup(Ctxt, ".rel", 0)
rel.Reachable = true
rel.Type = SELFROSECT
@@ -768,25 +739,19 @@
* <http://www.microsoft.com/whdc/system/platform/firmware/PECOFFdwn.mspx>
*/
func newPEDWARFSection(name string, size int64) *IMAGE_SECTION_HEADER {
- var h *IMAGE_SECTION_HEADER
- var s string
- var off int
-
if size == 0 {
return nil
}
- off = strtbladd(name)
- s = fmt.Sprintf("/%d", off)
- h = addpesection(s, int(size), int(size))
+ off := strtbladd(name)
+ s := fmt.Sprintf("/%d", off)
+ h := addpesection(s, int(size), int(size))
h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE
return h
}
func addpesym(s *LSym, name string, type_ int, addr int64, size int64, ver int, gotype *LSym) {
- var cs *COFFSym
-
if s == nil {
return
}
@@ -806,7 +771,7 @@
}
if coffsym != nil {
- cs = &coffsym[ncoffsym]
+ cs := &coffsym[ncoffsym]
cs.sym = s
if len(s.Name) > 8 {
cs.strtbloff = strtbladd(s.Name)
@@ -828,11 +793,6 @@
}
func addpesymtable() {
- var h *IMAGE_SECTION_HEADER
- var i int
- var size int
- var s *COFFSym
-
if Debug['s'] == 0 {
genasmsym(addpesym)
coffsym = make([]COFFSym, ncoffsym)
@@ -840,15 +800,16 @@
genasmsym(addpesym)
}
- size = len(strtbl) + 4 + 18*ncoffsym
- h = addpesection(".symtab", size, size)
+ size := len(strtbl) + 4 + 18*ncoffsym
+ h := addpesection(".symtab", size, size)
h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE
chksectoff(h, Cpos())
fh.PointerToSymbolTable = uint32(Cpos())
fh.NumberOfSymbols = uint32(ncoffsym)
// put COFF symbol table
- for i = 0; i < ncoffsym; i++ {
+ var s *COFFSym
+ for i := 0; i < ncoffsym; i++ {
s = &coffsym[i]
if s.strtbloff == 0 {
strnput(s.sym.Name, 8)
@@ -867,7 +828,7 @@
// put COFF string table
Lputl(uint32(len(strtbl)) + 4)
- for i = 0; i < len(strtbl); i++ {
+ for i := 0; i < len(strtbl); i++ {
Cput(uint8(strtbl[i]))
}
strnput("", int(h.SizeOfRawData-uint32(size)))
@@ -882,22 +843,19 @@
}
func addpersrc() {
- var h *IMAGE_SECTION_HEADER
- var p []byte
- var val uint32
- var r *Reloc
- var ri int
-
if rsrcsym == nil {
return
}
- h = addpesection(".rsrc", int(rsrcsym.Size), int(rsrcsym.Size))
+ h := addpesection(".rsrc", int(rsrcsym.Size), int(rsrcsym.Size))
h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_CNT_INITIALIZED_DATA
chksectoff(h, Cpos())
// relocation
- for ri = 0; ri < len(rsrcsym.R); ri++ {
+ var p []byte
+ var r *Reloc
+ var val uint32
+ for ri := 0; ri < len(rsrcsym.R); ri++ {
r = &rsrcsym.R[ri]
p = rsrcsym.P[r.Off:]
val = uint32(int64(h.VirtualAddress) + r.Add)
@@ -920,9 +878,6 @@
}
func Asmbpe() {
- var t *IMAGE_SECTION_HEADER
- var d *IMAGE_SECTION_HEADER
-
switch Thearch.Thechar {
default:
Diag("unknown PE architecture")
@@ -936,12 +891,12 @@
fh.Machine = IMAGE_FILE_MACHINE_I386
}
- t = addpesection(".text", int(Segtext.Length), int(Segtext.Length))
+ t := addpesection(".text", int(Segtext.Length), int(Segtext.Length))
t.Characteristics = IMAGE_SCN_CNT_CODE | IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ
chksectseg(t, &Segtext)
textsect = pensect
- d = addpesection(".data", int(Segdata.Length), int(Segdata.Filelen))
+ d := addpesection(".data", int(Segdata.Length), int(Segdata.Filelen))
d.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE
chksectseg(d, &Segdata)
datasect = pensect
diff --git a/src/cmd/internal/ld/sym.go b/src/cmd/internal/ld/sym.go
index 7e275a6..cbaa364a 100644
--- a/src/cmd/internal/ld/sym.go
+++ b/src/cmd/internal/ld/sym.go
@@ -63,21 +63,18 @@
}
func linknew(arch *LinkArch) *Link {
- var ctxt *Link
- var p string
- var buf string
-
- ctxt = new(Link)
+ ctxt := new(Link)
ctxt.Hash = make(map[symVer]*LSym)
ctxt.Arch = arch
ctxt.Version = HistVersion
ctxt.Goroot = obj.Getgoroot()
- p = obj.Getgoarch()
+ p := obj.Getgoarch()
if p != arch.Name {
log.Fatalf("invalid goarch %s (want %s)", p, arch.Name)
}
+ var buf string
buf, _ = os.Getwd()
if buf == "" {
buf = "/???"
@@ -149,7 +146,7 @@
// On arm, record goarm.
if ctxt.Arch.Thechar == '5' {
- p = obj.Getgoarm()
+ p := obj.Getgoarm()
if p != "" {
ctxt.Goarm = int32(obj.Atoi(p))
} else {
@@ -161,9 +158,7 @@
}
func linknewsym(ctxt *Link, symb string, v int) *LSym {
- var s *LSym
-
- s = new(LSym)
+ s := new(LSym)
*s = LSym{}
s.Dynid = -1
@@ -215,9 +210,7 @@
var headstr_buf string
func Headstr(v int) string {
- var i int
-
- for i = 0; i < len(headers); i++ {
+ for i := 0; i < len(headers); i++ {
if v == headers[i].val {
return headers[i].name
}
@@ -227,9 +220,7 @@
}
func headtype(name string) int {
- var i int
-
- for i = 0; i < len(headers); i++ {
+ for i := 0; i < len(headers); i++ {
if name == headers[i].name {
return headers[i].val
}
diff --git a/src/cmd/internal/ld/symtab.go b/src/cmd/internal/ld/symtab.go
index 997300c..79c1bf6 100644
--- a/src/cmd/internal/ld/symtab.go
+++ b/src/cmd/internal/ld/symtab.go
@@ -37,9 +37,6 @@
var maxelfstr int
func putelfstr(s string) int {
- var off int
- var n int
-
if len(Elfstrdat) == 0 && s != "" {
// first entry must be empty string
putelfstr("")
@@ -48,12 +45,12 @@
// Rewrite · to . for ASCII-only tools like DTrace (sigh)
s = strings.Replace(s, "·", ".", -1)
- n = len(s) + 1
+ n := len(s) + 1
for len(Elfstrdat)+n > cap(Elfstrdat) {
Elfstrdat = append(Elfstrdat[:cap(Elfstrdat)], 0)[:len(Elfstrdat)]
}
- off = len(Elfstrdat)
+ off := len(Elfstrdat)
Elfstrdat = Elfstrdat[:off+n]
copy(Elfstrdat[off:], s)
@@ -88,11 +85,7 @@
var elfbind int
func putelfsym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *LSym) {
- var bind int
var type_ int
- var off int
- var other int
- var xo *LSym
switch t {
default:
@@ -108,7 +101,7 @@
type_ = STT_OBJECT
}
- xo = x
+ xo := x
for xo.Outer != nil {
xo = xo.Outer
}
@@ -126,7 +119,7 @@
// One pass for each binding: STB_LOCAL, STB_GLOBAL,
// maybe one day STB_WEAK.
- bind = STB_GLOBAL
+ bind := STB_GLOBAL
if ver != 0 || (x.Type&SHIDDEN != 0) {
bind = STB_LOCAL
@@ -144,11 +137,11 @@
return
}
- off = putelfstr(s)
+ off := putelfstr(s)
if Linkmode == LinkExternal {
addr -= int64((xo.Sect.(*Section)).Vaddr)
}
- other = 2
+ other := 2
if x.Type&SHIDDEN != 0 {
other = 0
}
@@ -164,9 +157,7 @@
}
func putelfsymshndx(sympos int64, shndx int) {
- var here int64
-
- here = Cpos()
+ here := Cpos()
switch Thearch.Thechar {
case '6':
Cseek(sympos + 6)
@@ -180,9 +171,6 @@
}
func Asmelfsym() {
- var s *LSym
- var name string
-
// the first symbol entry is reserved
putelfsyment(0, 0, 0, STB_LOCAL<<4|STT_NOTYPE, 0, 0)
@@ -192,7 +180,7 @@
genasmsym(putelfsym)
if Linkmode == LinkExternal && HEADTYPE != Hopenbsd {
- s = Linklookup(Ctxt, "runtime.tlsg", 0)
+ s := Linklookup(Ctxt, "runtime.tlsg", 0)
if s.Sect == nil {
Ctxt.Cursym = nil
Diag("missing section for %s", s.Name)
@@ -214,7 +202,8 @@
elfglobalsymndx = numelfsym
genasmsym(putelfsym)
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ var name string
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Type != SHOSTOBJ && (s.Type != SDYNIMPORT || !s.Reachable) {
continue
}
@@ -230,9 +219,6 @@
}
func putplan9sym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *LSym) {
- var i int
- var l int
-
switch t {
case 'T',
'L',
@@ -249,7 +235,7 @@
'z',
'Z',
'm':
- l = 4
+ l := 4
if HEADTYPE == Hplan9 && Thearch.Thechar == '6' && Debug['8'] == 0 {
Lputb(uint32(addr >> 32))
l = 8
@@ -258,6 +244,7 @@
Lputb(uint32(addr))
Cput(uint8(t + 0x80)) /* 0x80 is variable length */
+ var i int
if t == 'z' || t == 'Z' {
Cput(uint8(s[0]))
for i = 1; s[i] != 0 || s[i+1] != 0; i += 2 {
@@ -327,12 +314,6 @@
}
func symtab() {
- var s *LSym
- var symtype *LSym
- var symtypelink *LSym
- var symgostring *LSym
- var symgofunc *LSym
-
dosymtype()
// Define these so that they'll get put into the symbol table.
@@ -357,7 +338,7 @@
xdefine("runtime.esymtab", SRODATA, 0)
// garbage collection symbols
- s = Linklookup(Ctxt, "runtime.gcdata", 0)
+ s := Linklookup(Ctxt, "runtime.gcdata", 0)
s.Type = SRODATA
s.Size = 0
@@ -376,21 +357,21 @@
s.Type = STYPE
s.Size = 0
s.Reachable = true
- symtype = s
+ symtype := s
s = Linklookup(Ctxt, "go.string.*", 0)
s.Type = SGOSTRING
s.Size = 0
s.Reachable = true
- symgostring = s
+ symgostring := s
s = Linklookup(Ctxt, "go.func.*", 0)
s.Type = SGOFUNC
s.Size = 0
s.Reachable = true
- symgofunc = s
+ symgofunc := s
- symtypelink = Linklookup(Ctxt, "runtime.typelink", 0)
+ symtypelink := Linklookup(Ctxt, "runtime.typelink", 0)
symt = Linklookup(Ctxt, "runtime.symtab", 0)
symt.Type = SSYMTAB
@@ -401,7 +382,7 @@
// within a type they sort by size, so the .* symbols
// just defined above will be first.
// hide the specific symbols.
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Special != 0 || s.Type != SRODATA {
continue
}
diff --git a/src/cmd/internal/ld/util.go b/src/cmd/internal/ld/util.go
index 9a56f09..8c37cab 100644
--- a/src/cmd/internal/ld/util.go
+++ b/src/cmd/internal/ld/util.go
@@ -26,17 +26,14 @@
func plan9quote(s string) string {
if s == "" {
- goto needquote
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
for i := 0; i < len(s); i++ {
if s[i] <= ' ' || s[i] == '\'' {
- goto needquote
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
}
return s
-
-needquote:
- return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
func tokenize(s string) []string {
diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go
index f82b737..064cbe2 100644
--- a/src/cmd/internal/obj/arm/asm5.go
+++ b/src/cmd/internal/obj/arm/asm5.go
@@ -304,13 +304,7 @@
// In rare cases, asmoutnacl might split p into two instructions.
// origPC is the PC for this Prog (no padding is taken into account).
func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint32) int {
- var size int
- var reg int
- var q *obj.Prog
- var a *obj.Addr
- var a2 *obj.Addr
-
- size = int(o.size)
+ size := int(o.size)
// instruction specific
switch p.As {
@@ -437,11 +431,12 @@
// split it into two instructions:
// ADD $-100004, R13
// MOVW R14, 0(R13)
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
p.Scond &^= C_WBIT
*q = *p
- a = &p.To
+ a := &p.To
+ var a2 *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a2 = &q.To
} else {
@@ -479,12 +474,13 @@
}
if (p.To.Type == obj.TYPE_MEM && p.To.Reg != REG_R13 && p.To.Reg != REG_R9) || (p.From.Type == obj.TYPE_MEM && p.From.Reg != REG_R13 && p.From.Reg != REG_R9) { // MOVW Rx, X(Ry), y != 13 && y != 9 // MOVW X(Rx), Ry, x != 13 && x != 9
+ var a *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a = &p.To
} else {
a = &p.From
}
- reg = int(a.Reg)
+ reg := int(a.Reg)
if size == 4 {
// if addr.reg == 0, then it is probably load from x(FP) with small x, no need to modify.
if reg == 0 {
@@ -514,8 +510,9 @@
if p.Scond&(C_PBIT|C_WBIT) != 0 {
ctxt.Diag("unsupported instruction (.P/.W): %v", p)
}
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
*q = *p
+ var a2 *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a2 = &q.To
} else {
@@ -577,16 +574,6 @@
func span5(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog
var op *obj.Prog
- var o *Optab
- var m int
- var bflag int
- var i int
- var v int
- var times int
- var c int32
- var opc int32
- var out [6 + 3]uint32
- var bp []byte
p = cursym.Text
if p == nil || p.Link == nil { // handle external functions and ELF section symbols
@@ -600,10 +587,13 @@
ctxt.Cursym = cursym
ctxt.Autosize = int32(p.To.Offset + 4)
- c = 0
+ c := int32(0)
op = p
p = p.Link
+ var i int
+ var m int
+ var o *Optab
for ; p != nil || ctxt.Blitrl != nil; (func() { op = p; p = p.Link })() {
if p == nil {
if checkpool(ctxt, op, 0) {
@@ -676,8 +666,11 @@
* generate extra passes putting branches
* around jmps to fix. this is rare.
*/
- times = 0
+ times := 0
+ var bflag int
+ var opc int32
+ var out [6 + 3]uint32
for {
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime())
@@ -774,8 +767,9 @@
ctxt.Autosize = int32(p.To.Offset + 4)
obj.Symgrow(ctxt, cursym, cursym.Size)
- bp = cursym.P
+ bp := cursym.P
c = int32(p.Pc) // even p->link might need extra padding
+ var v int
for p = p.Link; p != nil; p = p.Link {
ctxt.Pc = p.Pc
ctxt.Curp = p
@@ -844,14 +838,12 @@
}
func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) bool {
- var q *obj.Prog
-
if ctxt.Blitrl != nil {
if skip != 0 {
if false && skip == 1 {
fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
}
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
q.As = AB
q.To.Type = obj.TYPE_BRANCH
q.Pcond = p.Link
@@ -863,7 +855,7 @@
}
if ctxt.Headtype == obj.Hnacl && pool.size%16 != 0 {
// if pool is not multiple of 16 bytes, add an alignment marker
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
q.As = ADATABUNDLEEND
ctxt.Elitrl.Link = q
@@ -893,11 +885,9 @@
}
func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
- var q *obj.Prog
var t obj.Prog
- var c int
- c = aclass(ctxt, a)
+ c := aclass(ctxt, a)
t.Ctxt = ctxt
t.As = AWORD
@@ -928,7 +918,7 @@
}
if t.Pcrel == nil {
- for q = ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
+ for q := ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
if q.Pcrel == nil && q.To == t.To {
p.Pcond = q
return
@@ -938,7 +928,7 @@
if ctxt.Headtype == obj.Hnacl && pool.size%16 == 0 {
// start a new data bundle
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
q.As = ADATABUNDLE
q.Pc = int64(pool.size)
pool.size += 4
@@ -952,7 +942,7 @@
ctxt.Elitrl = q
}
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
*q = t
q.Pc = int64(pool.size)
@@ -975,9 +965,7 @@
}
func immrot(v uint32) int32 {
- var i int
-
- for i = 0; i < 16; i++ {
+ for i := 0; i < 16; i++ {
if v&^0xff == 0 {
return int32(uint32(int32(i)<<8) | v | 1<<25)
}
@@ -1012,9 +1000,6 @@
}
func aclass(ctxt *obj.Link, a *obj.Addr) int {
- var s *obj.LSym
- var t int
-
switch a.Type {
case obj.TYPE_NONE:
return C_NONE
@@ -1060,7 +1045,7 @@
case obj.NAME_AUTO:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
- t = int(immaddr(int32(ctxt.Instoffset)))
+ t := int(immaddr(int32(ctxt.Instoffset)))
if t != 0 {
if immhalf(int32(ctxt.Instoffset)) {
if immfloat(int32(t)) {
@@ -1079,7 +1064,7 @@
case obj.NAME_PARAM:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
- t = int(immaddr(int32(ctxt.Instoffset)))
+ t := int(immaddr(int32(ctxt.Instoffset)))
if t != 0 {
if immhalf(int32(ctxt.Instoffset)) {
if immfloat(int32(t)) {
@@ -1098,7 +1083,7 @@
case obj.TYPE_NONE:
ctxt.Instoffset = a.Offset
- t = int(immaddr(int32(ctxt.Instoffset)))
+ t := int(immaddr(int32(ctxt.Instoffset)))
if t != 0 {
if immhalf(int32(ctxt.Instoffset)) { /* n.b. that it will also satisfy immrot */
if immfloat(int32(t)) {
@@ -1110,7 +1095,7 @@
if immfloat(int32(t)) {
return C_FOREG /* n.b. that it will also satisfy immrot */
}
- t = int(immrot(uint32(ctxt.Instoffset)))
+ t := int(immrot(uint32(ctxt.Instoffset)))
if t != 0 {
return C_SROREG
}
@@ -1150,7 +1135,7 @@
return aconsize(ctxt)
}
- t = int(immrot(uint32(ctxt.Instoffset)))
+ t := int(immrot(uint32(ctxt.Instoffset)))
if t != 0 {
return C_RCON
}
@@ -1162,7 +1147,7 @@
case obj.NAME_EXTERN,
obj.NAME_STATIC:
- s = a.Sym
+ s := a.Sym
if s == nil {
break
}
@@ -1188,9 +1173,7 @@
}
func aconsize(ctxt *obj.Link) int {
- var t int
-
- t = int(immrot(uint32(ctxt.Instoffset)))
+ t := int(immrot(uint32(ctxt.Instoffset)))
if t != 0 {
return C_RACON
}
@@ -1202,16 +1185,7 @@
}
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
- var a1 int
- var a2 int
- var a3 int
- var r int
- var c1 []byte
- var c3 []byte
- var o []Optab
- var e []Optab
-
- a1 = int(p.Optab)
+ a1 := int(p.Optab)
if a1 != 0 {
return &optab[a1-1:][0]
}
@@ -1222,19 +1196,19 @@
}
a1--
- a3 = int(p.To.Class)
+ a3 := int(p.To.Class)
if a3 == 0 {
a3 = aclass(ctxt, &p.To) + 1
p.To.Class = int8(a3)
}
a3--
- a2 = C_NONE
+ a2 := C_NONE
if p.Reg != 0 {
a2 = C_REG
}
- r = int(p.As)
- o = oprange[r].start
+ r := int(p.As)
+ o := oprange[r].start
if o == nil {
o = oprange[r].stop /* just generate an error */
}
@@ -1244,9 +1218,9 @@
fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
}
- e = oprange[r].stop
- c1 = xcmp[a1][:]
- c3 = xcmp[a3][:]
+ e := oprange[r].stop
+ c1 := xcmp[a1][:]
+ c3 := xcmp[a3][:]
for ; -cap(o) < -cap(e); o = o[1:] {
if int(o[0].a2) == a2 {
if c1[o[0].a1] != 0 {
@@ -1340,13 +1314,9 @@
}
func (x ocmp) Less(i, j int) bool {
- var p1 *Optab
- var p2 *Optab
- var n int
-
- p1 = &x[i]
- p2 = &x[j]
- n = int(p1.as) - int(p2.as)
+ p1 := &x[i]
+ p2 := &x[j]
+ n := int(p1.as) - int(p2.as)
if n != 0 {
return n < 0
}
@@ -1366,11 +1336,9 @@
}
func buildop(ctxt *obj.Link) {
- var i int
var n int
- var r int
- for i = 0; i < C_GOK; i++ {
+ for i := 0; i < C_GOK; i++ {
for n = 0; n < C_GOK; n++ {
if cmp(n, i) {
xcmp[i][n] = 1
@@ -1388,7 +1356,8 @@
}
sort.Sort(ocmp(optab[:n]))
- for i = 0; i < n; i++ {
+ var r int
+ for i := 0; i < n; i++ {
r = int(optab[i].as)
oprange[r].start = optab[i:]
for int(optab[i].as) == r {
@@ -1536,26 +1505,13 @@
}
func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
- var o1 uint32
- var o2 uint32
- var o3 uint32
- var o4 uint32
- var o5 uint32
- var o6 uint32
- var v int32
- var r int
- var rf int
- var rt int
- var rt2 int
- var rel *obj.Reloc
-
ctxt.Printp = p
- o1 = 0
- o2 = 0
- o3 = 0
- o4 = 0
- o5 = 0
- o6 = 0
+ o1 := uint32(0)
+ o2 := uint32(0)
+ o3 := uint32(0)
+ o4 := uint32(0)
+ o5 := uint32(0)
+ o6 := uint32(0)
ctxt.Armsize += int32(o.size)
if false { /*debug['P']*/
fmt.Printf("%x: %v\ttype %d\n", uint32(p.Pc), p, o.type_)
@@ -1573,9 +1529,9 @@
case 1: /* op R,[R],R */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- rf = int(p.From.Reg)
- rt = int(p.To.Reg)
- r = int(p.Reg)
+ rf := int(p.From.Reg)
+ rt := int(p.To.Reg)
+ r := int(p.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
@@ -1591,8 +1547,8 @@
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
- rt = int(p.To.Reg)
- r = int(p.Reg)
+ rt := int(p.To.Reg)
+ r := int(p.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
@@ -1611,7 +1567,7 @@
o1 = oprrr(ctxt, AADD, int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -1621,9 +1577,9 @@
case 5: /* bra s */
o1 = opbra(ctxt, int(p.As), int(p.Scond))
- v = -8
+ v := int32(-8)
if p.To.Sym != nil {
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
rel.Sym = p.To.Sym
@@ -1654,7 +1610,7 @@
}
o1 = oprrr(ctxt, ABL, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 0
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 0
rel.Type = obj.R_CALLIND
@@ -1663,7 +1619,7 @@
aclass(ctxt, &p.From)
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@@ -1674,7 +1630,7 @@
case 9: /* sll R,[R],R -> mov (R<<R),R */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@@ -1697,7 +1653,7 @@
if p.To.Sym != nil {
// This case happens with words generated
// in the PC stream as part of the literal pool.
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
@@ -1742,7 +1698,7 @@
}
o2 = oprrr(ctxt, int(p.As), int(p.Scond))
o2 |= REGTMP & 15
- r = int(p.Reg)
+ r := int(p.Reg)
if p.As == AMOVW || p.As == AMVN {
r = 0
} else if r == 0 {
@@ -1762,7 +1718,7 @@
o2 = oprrr(ctxt, ASRA, int(p.Scond))
}
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
o1 |= (uint32(p.From.Reg)&15)<<0 | (uint32(r)&15)<<12
o2 |= uint32(r)&15 | (uint32(r)&15)<<12
if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU {
@@ -1776,9 +1732,9 @@
case 15: /* mul r,[r,]r */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- rf = int(p.From.Reg)
- rt = int(p.To.Reg)
- r = int(p.Reg)
+ rf := int(p.From.Reg)
+ rt := int(p.To.Reg)
+ r := int(p.Reg)
if r == 0 {
r = rt
}
@@ -1803,16 +1759,16 @@
case 17:
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- rf = int(p.From.Reg)
- rt = int(p.To.Reg)
- rt2 = int(p.To.Offset)
- r = int(p.Reg)
+ rf := int(p.From.Reg)
+ rt := int(p.To.Reg)
+ rt2 := int(p.To.Offset)
+ r := int(p.Reg)
o1 |= (uint32(rf)&15)<<8 | (uint32(r)&15)<<0 | (uint32(rt)&15)<<16 | (uint32(rt2)&15)<<12
case 20: /* mov/movb/movbu R,O(R) */
aclass(ctxt, &p.To)
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@@ -1821,7 +1777,7 @@
case 21: /* mov/movbu O(R),R -> lr */
aclass(ctxt, &p.From)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -1836,7 +1792,7 @@
if o1 == 0 {
break
}
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@@ -1851,7 +1807,7 @@
if o1 == 0 {
break
}
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -1869,7 +1825,7 @@
o2 = oprrr(ctxt, AADD, int(p.Scond))
o2 |= REGTMP & 15
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -1961,18 +1917,18 @@
o1 = 0xe8fd8000
case 50: /* floating point store */
- v = regoff(ctxt, &p.To)
+ v := regoff(ctxt, &p.To)
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
o1 = ofsr(ctxt, int(p.As), int(p.From.Reg), v, r, int(p.Scond), p)
case 51: /* floating point load */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -1984,7 +1940,7 @@
if o1 == 0 {
break
}
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@@ -1997,7 +1953,7 @@
if o1 == 0 {
break
}
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -2007,9 +1963,9 @@
case 54: /* floating point arith */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- rf = int(p.From.Reg)
- rt = int(p.To.Reg)
- r = int(p.Reg)
+ rf := int(p.From.Reg)
+ rt := int(p.To.Reg)
+ r := int(p.Reg)
if r == 0 {
r = rt
if p.As == AMOVF || p.As == AMOVD || p.As == AMOVFD || p.As == AMOVDF || p.As == ASQRTF || p.As == ASQRTD || p.As == AABSF || p.As == AABSD {
@@ -2033,8 +1989,8 @@
o1 = oprrr(ctxt, AAND, int(p.Scond))
o1 |= uint32(immrot(0xff))
- rt = int(p.To.Reg)
- r = int(p.From.Reg)
+ rt := int(p.To.Reg)
+ r := int(p.From.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
@@ -2095,7 +2051,7 @@
case 63: /* bcase */
if p.Pcond != nil {
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
if p.To.Sym != nil && p.To.Sym.Type != 0 {
@@ -2171,7 +2127,7 @@
case 70: /* movh/movhu R,O(R) -> strh */
aclass(ctxt, &p.To)
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@@ -2180,7 +2136,7 @@
case 71: /* movb/movh/movhu O(R),R -> ldrsb/ldrsh/ldrh */
aclass(ctxt, &p.From)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -2197,7 +2153,7 @@
if o1 == 0 {
break
}
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@@ -2209,7 +2165,7 @@
if o1 == 0 {
break
}
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -2278,8 +2234,8 @@
o2 = oprrr(ctxt, ASUBF, int(p.Scond))
}
- v = 0x70 // 1.0
- r = (int(p.To.Reg) & 15) << 0
+ v := int32(0x70) // 1.0
+ r := (int(p.To.Reg) & 15) << 0
// movf $1.0, r
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
@@ -2298,7 +2254,7 @@
}
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
o1 |= (uint32(p.To.Reg) & 15) << 12
- v = int32(chipfloat5(ctxt, p.From.U.Dval))
+ v := int32(chipfloat5(ctxt, p.From.U.Dval))
o1 |= (uint32(v) & 0xf) << 0
o1 |= (uint32(v) & 0xf0) << 12
@@ -2475,18 +2431,14 @@
}
func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
- var o1 uint32
- var rt int
- var r int
-
aclass(ctxt, &p.From)
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 := oprrr(ctxt, int(p.As), int(p.Scond))
o1 |= uint32(p.From.Offset)
- rt = int(p.To.Reg)
+ rt := int(p.To.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
- r = int(p.Reg)
+ r := int(p.Reg)
if p.As == AMOVW || p.As == AMVN {
r = 0
} else if r == 0 {
@@ -2497,9 +2449,7 @@
}
func oprrr(ctxt *obj.Link, a int, sc int) uint32 {
- var o uint32
-
- o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_SBIT != 0 {
o |= 1 << 20
}
@@ -2716,12 +2666,10 @@
}
func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
- var o uint32
-
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on LDR/STR instruction")
}
- o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
o |= 1 << 24
}
@@ -2750,12 +2698,10 @@
}
func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
- var o uint32
-
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on LDRH/STRH instruction")
}
- o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
o |= 1 << 24
}
@@ -2778,9 +2724,7 @@
}
func osr(ctxt *obj.Link, a int, r int, v int32, b int, sc int) uint32 {
- var o uint32
-
- o = olr(ctxt, v, b, r, sc) ^ (1 << 20)
+ o := olr(ctxt, v, b, r, sc) ^ (1 << 20)
if a != AMOVW {
o |= 1 << 22
}
@@ -2788,9 +2732,7 @@
}
func oshr(ctxt *obj.Link, r int, v int32, b int, sc int) uint32 {
- var o uint32
-
- o = olhr(ctxt, v, b, r, sc) ^ (1 << 20)
+ o := olhr(ctxt, v, b, r, sc) ^ (1 << 20)
return o
}
@@ -2811,12 +2753,10 @@
}
func ofsr(ctxt *obj.Link, a int, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
- var o uint32
-
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on FLDR/FSTR instruction")
}
- o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
o |= 1 << 24
}
@@ -2855,11 +2795,10 @@
}
func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
- var v int32
var o1 uint32
if p.Pcond == nil {
aclass(ctxt, a)
- v = immrot(^uint32(ctxt.Instoffset))
+ v := immrot(^uint32(ctxt.Instoffset))
if v == 0 {
ctxt.Diag("missing literal")
prasm(p)
@@ -2870,7 +2809,7 @@
o1 |= uint32(v)
o1 |= (uint32(dr) & 15) << 12
} else {
- v = int32(p.Pcond.Pc - p.Pc - 8)
+ v := int32(p.Pcond.Pc - p.Pc - 8)
o1 = olr(ctxt, v, REGPC, dr, int(p.Scond)&C_SCOND)
}
@@ -2886,29 +2825,23 @@
}
func chipfloat5(ctxt *obj.Link, e float64) int {
- var n int
- var h1 uint32
- var l uint32
- var h uint32
- var ei uint64
-
// We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
if ctxt.Goarm < 7 {
- goto no
+ return -1
}
- ei = math.Float64bits(e)
- l = uint32(ei)
- h = uint32(ei >> 32)
+ ei := math.Float64bits(e)
+ l := uint32(ei)
+ h := uint32(ei >> 32)
if l != 0 || h&0xffff != 0 {
- goto no
+ return -1
}
- h1 = h & 0x7fc00000
+ h1 := h & 0x7fc00000
if h1 != 0x40000000 && h1 != 0x3fc00000 {
- goto no
+ return -1
}
- n = 0
+ n := 0
// sign bit (a)
if h&0x80000000 != 0 {
@@ -2925,7 +2858,4 @@
//print("match %.8lux %.8lux %d\n", l, h, n);
return n
-
-no:
- return -1
}
diff --git a/src/cmd/internal/obj/arm/list5.go b/src/cmd/internal/obj/arm/list5.go
index 70280f3..60caede 100644
--- a/src/cmd/internal/obj/arm/list5.go
+++ b/src/cmd/internal/obj/arm/list5.go
@@ -61,16 +61,9 @@
var bigP *obj.Prog
func Pconv(p *obj.Prog) string {
- var str string
- var sc string
- var fp string
-
- var a int
- var s int
-
- a = int(p.As)
- s = int(p.Scond)
- sc = extra[(s&C_SCOND)^C_SCOND_XOR]
+ a := int(p.As)
+ s := int(p.Scond)
+ sc := extra[(s&C_SCOND)^C_SCOND_XOR]
if s&C_SBIT != 0 {
sc += ".S"
}
@@ -83,6 +76,7 @@
if s&C_UBIT != 0 { /* ambiguous with FBIT */
sc += ".U"
}
+ var str string
if a == obj.ADATA {
str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v",
p.Pc, p.Line(), Aconv(a), obj.Dconv(p, &p.From), p.From3.Offset, obj.Dconv(p, &p.To))
@@ -97,30 +91,23 @@
p.Pc, p.Line(), Aconv(a), sc, obj.Dconv(p, &p.From), Rconv(int(p.Reg)), obj.Dconv(p, &p.To))
}
+ var fp string
fp += str
return fp
}
func Aconv(a int) string {
- var s string
- var fp string
-
- s = "???"
+ s := "???"
if a >= obj.AXXX && a < ALAST {
s = Anames[a]
}
+ var fp string
fp += s
return fp
}
func RAconv(a *obj.Addr) string {
- var str string
- var fp string
-
- var i int
- var v int
-
- str = fmt.Sprintf("GOK-reglist")
+ str := fmt.Sprintf("GOK-reglist")
switch a.Type {
case obj.TYPE_CONST:
if a.Reg != 0 {
@@ -129,9 +116,9 @@
if a.Sym != nil {
break
}
- v = int(a.Offset)
+ v := int(a.Offset)
str = ""
- for i = 0; i < NREG; i++ {
+ for i := 0; i < NREG; i++ {
if v&(1<<uint(i)) != 0 {
if str == "" {
str += "[R"
@@ -145,6 +132,7 @@
str += "]"
}
+ var fp string
fp += str
return fp
}
@@ -182,13 +170,11 @@
}
func DRconv(a int) string {
- var s string
- var fp string
-
- s = "C_??"
+ s := "C_??"
if a >= C_NONE && a <= C_NCLASS {
s = cnames5[a]
}
+ var fp string
fp += s
return fp
}
diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go
index 9c1baca..0b7e1f7 100644
--- a/src/cmd/internal/obj/arm/obj5.go
+++ b/src/cmd/internal/obj/arm/obj5.go
@@ -41,9 +41,6 @@
var progedit_tlsfallback *obj.LSym
func progedit(ctxt *obj.Link, p *obj.Prog) {
- var literal string
- var s *obj.LSym
-
p.From.Class = 0
p.To.Class = 0
@@ -111,12 +108,10 @@
switch p.As {
case AMOVF:
if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
- var i32 uint32
- var f32 float32
- f32 = float32(p.From.U.Dval)
- i32 = math.Float32bits(f32)
- literal = fmt.Sprintf("$f32.%08x", i32)
- s = obj.Linklookup(ctxt, literal, 0)
+ f32 := float32(p.From.U.Dval)
+ i32 := math.Float32bits(f32)
+ literal := fmt.Sprintf("$f32.%08x", i32)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint32(ctxt, s, i32)
@@ -131,10 +126,9 @@
case AMOVD:
if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
- var i64 uint64
- i64 = math.Float64bits(p.From.U.Dval)
- literal = fmt.Sprintf("$f64.%016x", i64)
- s = obj.Linklookup(ctxt, literal, 0)
+ i64 := math.Float64bits(p.From.U.Dval)
+ literal := fmt.Sprintf("$f64.%016x", i64)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint64(ctxt, s, i64)
@@ -175,9 +169,7 @@
)
func linkcase(casep *obj.Prog) {
- var p *obj.Prog
-
- for p = casep; p != nil; p = p.Link {
+ for p := casep; p != nil; p = p.Link {
if p.As == ABCASE {
for ; p != nil && p.As == ABCASE; p = p.Link {
p.Pcrel = casep
@@ -188,25 +180,14 @@
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var pl *obj.Prog
- var p1 *obj.Prog
- var p2 *obj.Prog
- var q *obj.Prog
- var q1 *obj.Prog
- var q2 *obj.Prog
- var o int
- var autosize int32
- var autoffset int32
-
- autosize = 0
+ autosize := int32(0)
if ctxt.Symmorestack[0] == nil {
ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
}
- q = nil
+ q := (*obj.Prog)(nil)
ctxt.Cursym = cursym
@@ -216,8 +197,8 @@
softfloat(ctxt, cursym)
- p = cursym.Text
- autoffset = int32(p.To.Offset)
+ p := cursym.Text
+ autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
}
@@ -259,8 +240,8 @@
// MOVW.nil R3, 0(R1) +4
// CMP R1, R2
// BNE L
- pl = obj.Appendp(ctxt, p)
- p = pl
+ pl := obj.Appendp(ctxt, p)
+ p := pl
p.As = AMOVW
p.From.Type = obj.TYPE_REG
@@ -289,7 +270,8 @@
* expand RET
* expand BECOME pseudo
*/
- for p = cursym.Text; p != nil; p = p.Link {
+ var q1 *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
case ACASE:
if ctxt.Flag_shared != 0 {
@@ -358,7 +340,11 @@
q = p
}
- for p = cursym.Text; p != nil; p = p.Link {
+ var o int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var q2 *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
o = int(p.As)
switch o {
case obj.ATEXT:
@@ -667,24 +653,20 @@
}
func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var next *obj.Prog
- var symsfloat *obj.LSym
- var wasfloat int
-
if ctxt.Goarm > 5 {
return
}
- symsfloat = obj.Linklookup(ctxt, "_sfloat", 0)
+ symsfloat := obj.Linklookup(ctxt, "_sfloat", 0)
- wasfloat = 0
- for p = cursym.Text; p != nil; p = p.Link {
+ wasfloat := 0
+ for p := cursym.Text; p != nil; p = p.Link {
if p.Pcond != nil {
p.Pcond.Mark |= LABEL
}
}
- for p = cursym.Text; p != nil; p = p.Link {
+ var next *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
case AMOVW:
if isfloatreg(&p.To) || isfloatreg(&p.From) {
@@ -880,13 +862,10 @@
}
func follow(ctxt *obj.Link, s *obj.LSym) {
- var firstp *obj.Prog
- var lastp *obj.Prog
-
ctxt.Cursym = s
- firstp = ctxt.NewProg()
- lastp = firstp
+ firstp := ctxt.NewProg()
+ lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link
diff --git a/src/cmd/internal/obj/i386/asm8.go b/src/cmd/internal/obj/i386/asm8.go
index 4284586..112e839 100644
--- a/src/cmd/internal/obj/i386/asm8.go
+++ b/src/cmd/internal/obj/i386/asm8.go
@@ -1132,16 +1132,6 @@
}
func span8(ctxt *obj.Link, s *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var c int32
- var v int32
- var loop int32
- var bp []byte
- var n int
- var m int
- var i int
-
ctxt.Cursym = s
if s.Text == nil || s.Text.Link == nil {
@@ -1152,7 +1142,8 @@
instinit()
}
- for p = s.Text; p != nil; p = p.Link {
+ var v int32
+ for p := s.Text; p != nil; p = p.Link {
if p.To.Type == obj.TYPE_BRANCH {
if p.Pcond == nil {
p.Pcond = p
@@ -1176,7 +1167,8 @@
}
}
- for p = s.Text; p != nil; p = p.Link {
+ var q *obj.Prog
+ for p := s.Text; p != nil; p = p.Link {
p.Back = 2 // use short branches first time through
q = p.Pcond
if q != nil && (q.Back&2 != 0) {
@@ -1201,7 +1193,13 @@
}
}
- n = 0
+ n := 0
+ var bp []byte
+ var c int32
+ var i int
+ var loop int32
+ var m int
+ var p *obj.Prog
for {
loop = 0
for i = 0; i < len(s.R); i++ {
@@ -1313,6 +1311,7 @@
if false { /* debug['a'] > 1 */
fmt.Printf("span1 %s %d (%d tries)\n %.6x", s.Name, s.Size, n, 0)
+ var i int
for i = 0; i < len(s.P); i++ {
fmt.Printf(" %.2x", s.P[i])
if i%16 == 15 {
@@ -1324,20 +1323,17 @@
fmt.Printf("\n")
}
- for i = 0; i < len(s.R); i++ {
- var r *obj.Reloc
-
- r = &s.R[i]
+ for i := 0; i < len(s.R); i++ {
+ r := &s.R[i]
fmt.Printf(" rel %#.4x/%d %s%+d\n", uint32(r.Off), r.Siz, r.Sym.Name, r.Add)
}
}
}
func instinit() {
- var i int
var c int
- for i = 1; optab[i].as != 0; i++ {
+ for i := 1; optab[i].as != 0; i++ {
c = int(optab[i].as)
if opindex[c] != nil {
log.Fatalf("phase error in optab: %d (%v)", i, Aconv(c))
@@ -1345,7 +1341,7 @@
opindex[c] = &optab[i]
}
- for i = 0; i < Ymax; i++ {
+ for i := 0; i < Ymax; i++ {
ycover[i*Ymax+i] = 1
}
@@ -1395,7 +1391,7 @@
ycover[Ym*Ymax+Yxm] = 1
ycover[Yxr*Ymax+Yxm] = 1
- for i = 0; i < MAXREG; i++ {
+ for i := 0; i < MAXREG; i++ {
reg[i] = -1
if i >= REG_AL && i <= REG_BH {
reg[i] = (i - REG_AL) & 7
@@ -1455,8 +1451,6 @@
}
func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {
- var v int32
-
// TODO(rsc): This special case is for SHRQ $3, AX:DX,
// which encodes as SHRQ $32(DX*0), AX.
// Similarly SHRQ CX, AX:DX is really SHRQ CX(DX*0), AX.
@@ -1512,7 +1506,7 @@
ctxt.Diag("TYPE_CONST with symbol: %v", obj.Dconv(p, a))
}
- v = int32(a.Offset)
+ v := int32(a.Offset)
if v == 0 {
return Yi0
}
@@ -1742,16 +1736,14 @@
}
func relput4(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
- var v int64
var rel obj.Reloc
- var r *obj.Reloc
- v = int64(vaddr(ctxt, p, a, &rel))
+ v := int64(vaddr(ctxt, p, a, &rel))
if rel.Siz != 0 {
if rel.Siz != 4 {
ctxt.Diag("bad reloc")
}
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
*r = rel
r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
}
@@ -1760,8 +1752,6 @@
}
func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int32 {
- var s *obj.LSym
-
if r != nil {
*r = obj.Reloc{}
}
@@ -1769,7 +1759,7 @@
switch a.Name {
case obj.NAME_STATIC,
obj.NAME_EXTERN:
- s = a.Sym
+ s := a.Sym
if s != nil {
if r == nil {
ctxt.Diag("need reloc for %v", obj.Dconv(p, a))
@@ -1804,11 +1794,10 @@
}
func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int) {
- var v int32
var base int
var rel obj.Reloc
- v = int32(a.Offset)
+ v := int32(a.Offset)
rel.Siz = 0
switch a.Type {
@@ -1838,7 +1827,7 @@
}
if a.Index != REG_NONE && a.Index != REG_TLS {
- base = int(a.Reg)
+ base := int(a.Reg)
switch a.Name {
case obj.NAME_EXTERN,
obj.NAME_STATIC:
@@ -1956,14 +1945,12 @@
putrelv:
if rel.Siz != 0 {
- var r *obj.Reloc
-
if rel.Siz != 4 {
ctxt.Diag("bad rel")
goto bad
}
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
*r = rel
r.Off = int32(ctxt.Curp.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
}
@@ -2568,15 +2555,10 @@
// If a is empty, it returns BX to account for MULB-like instructions
// that might use DX and AX.
func byteswapreg(ctxt *obj.Link, a *obj.Addr) int {
- var cana int
- var canb int
- var canc int
- var cand int
-
- cand = 1
- canc = cand
- canb = canc
- cana = canb
+ cand := 1
+ canc := cand
+ canb := canc
+ cana := canb
if a.Type == obj.TYPE_NONE {
cand = 0
@@ -2707,25 +2689,9 @@
}
func doasm(ctxt *obj.Link, p *obj.Prog) {
- var o *Optab
- var q *obj.Prog
- var pp obj.Prog
- var t []byte
- var z int
- var op int
- var ft int
- var tt int
- var breg int
- var v int32
- var pre int32
- var rel obj.Reloc
- var r *obj.Reloc
- var a *obj.Addr
- var yt ytab
-
ctxt.Curp = p // TODO
- pre = int32(prefixof(ctxt, &p.From))
+ pre := int32(prefixof(ctxt, &p.From))
if pre != 0 {
ctxt.Andptr[0] = byte(pre)
@@ -2744,458 +2710,637 @@
p.Tt = uint8(oclass(ctxt, p, &p.To))
}
- ft = int(p.Ft) * Ymax
- tt = int(p.Tt) * Ymax
- o = opindex[p.As]
+ ft := int(p.Ft) * Ymax
+ tt := int(p.Tt) * Ymax
+ o := opindex[p.As]
- z = 0
+ z := 0
+ var a *obj.Addr
+ var op int
+ var q *obj.Prog
+ var r *obj.Reloc
+ var rel obj.Reloc
+ var v int32
+ var yt ytab
for _, yt = range o.ytab {
if ycover[ft+int(yt.from)] != 0 && ycover[tt+int(yt.to)] != 0 {
- goto found
+ switch o.prefix {
+ case Pq: /* 16 bit escape and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pf2, /* xmm opcode escape */
+ Pf3:
+ ctxt.Andptr[0] = byte(o.prefix)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pm: /* opcode escape */
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pe: /* 16 bit escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pb: /* botch */
+ break
+ }
+
+ op = int(o.op[z])
+ switch yt.zcase {
+ default:
+ ctxt.Diag("asmins: unknown z %d %v", yt.zcase, p)
+ return
+
+ case Zpseudo:
+ break
+
+ case Zlit:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Zlitm_r:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case Zm_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case Zm2_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case Zm_r_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case Zm_r_i_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibm_r:
+ for {
+ tmp2 := z
+ z++
+ op = int(o.op[tmp2])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zaut_r:
+ ctxt.Andptr[0] = 0x8d
+ ctxt.Andptr = ctxt.Andptr[1:] /* leal */
+ if p.From.Type != obj.TYPE_ADDR {
+ ctxt.Diag("asmins: Zaut sb type ADDR")
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.Ft = 0
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+ p.From.Type = obj.TYPE_ADDR
+ p.Ft = 0
+
+ case Zm_o:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+
+ case Zr_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Reg])
+
+ case Zr_m_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.To, reg[p.From.Reg])
+
+ case Zr_m_i_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.To, reg[p.From.Reg])
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zcallindreg:
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc)
+ r.Type = obj.R_CALLIND
+ r.Siz = 0
+ fallthrough
+
+ // fallthrough
+ case Zo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+
+ case Zm_ibo:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_ib,
+ Zib_:
+ if yt.zcase == Zib_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zib_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zil_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Zib_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.To.Reg])
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_il,
+ Zil_:
+ if yt.zcase == Zil_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
+ }
+
+ case Zm_ilo,
+ Zilo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if yt.zcase == Zilo_m {
+ a = &p.From
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+ } else {
+ a = &p.To
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+ }
+
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
+ }
+
+ case Zil_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.To.Reg])
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Z_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zrp_:
+ ctxt.Andptr[0] = byte(op + reg[p.From.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zclr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.To.Reg])
+
+ case Zcall:
+ if p.To.Sym == nil {
+ ctxt.Diag("call without target")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type = obj.R_CALL
+ r.Siz = 4
+ r.Sym = p.To.Sym
+ r.Add = p.To.Offset
+ put4(ctxt, 0)
+
+ case Zbr,
+ Zjmp,
+ Zloop:
+ if p.To.Sym != nil {
+ if yt.zcase != Zjmp {
+ ctxt.Diag("branch to ATEXT")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Type = obj.R_PCREL
+ r.Siz = 4
+ put4(ctxt, 0)
+ break
+ }
+
+ // Assumes q is in this function.
+ // Fill in backward jump now.
+ q = p.Pcond
+
+ if q == nil {
+ ctxt.Diag("jmp/branch/loop without target")
+ log.Fatalf("bad code")
+ }
+
+ if p.Back&1 != 0 {
+ v = int32(q.Pc - (p.Pc + 2))
+ if v >= -128 {
+ if p.As == AJCXZW {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if yt.zcase == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ v -= 5 - 2
+ if yt.zcase == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ v--
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ break
+ }
+
+ // Annotate target; will fill in later.
+ p.Forwd = q.Comefrom
+
+ q.Comefrom = p
+ if p.Back&2 != 0 { // short
+ if p.As == AJCXZW {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if yt.zcase == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ if yt.zcase == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Zcallcon,
+ Zjmpcon:
+ if yt.zcase == Zcallcon {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type = obj.R_PCREL
+ r.Siz = 4
+ r.Add = p.To.Offset
+ put4(ctxt, 0)
+
+ case Zcallind:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type = obj.R_ADDR
+ r.Siz = 4
+ r.Add = p.To.Offset
+ r.Sym = p.To.Sym
+ put4(ctxt, 0)
+
+ case Zbyte:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ if rel.Siz != 0 {
+ rel.Siz = uint8(op)
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 1 {
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 2 {
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ }
+
+ case Zmov:
+ goto domov
+ }
+
+ return
}
z += int(yt.zoffset)
}
goto domov
-found:
- switch o.prefix {
- case Pq: /* 16 bit escape and opcode escape */
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Pf2, /* xmm opcode escape */
- Pf3:
- ctxt.Andptr[0] = byte(o.prefix)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Pm: /* opcode escape */
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Pe: /* 16 bit escape */
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Pb: /* botch */
- break
- }
-
- op = int(o.op[z])
- switch yt.zcase {
- default:
- ctxt.Diag("asmins: unknown z %d %v", yt.zcase, p)
- return
-
- case Zpseudo:
- break
-
- case Zlit:
- for ; ; z++ {
- op = int(o.op[z])
- if op == 0 {
- break
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- case Zlitm_r:
- for ; ; z++ {
- op = int(o.op[z])
- if op == 0 {
- break
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- case Zm_r:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- case Zm2_r:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- case Zm_r_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- case Zm_r_i_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
- ctxt.Andptr[0] = byte(p.To.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zibm_r:
- for {
- tmp2 := z
- z++
- op = int(o.op[tmp2])
- if op == 0 {
- break
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
- ctxt.Andptr[0] = byte(p.To.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zaut_r:
- ctxt.Andptr[0] = 0x8d
- ctxt.Andptr = ctxt.Andptr[1:] /* leal */
- if p.From.Type != obj.TYPE_ADDR {
- ctxt.Diag("asmins: Zaut sb type ADDR")
- }
- p.From.Type = obj.TYPE_MEM
- p.Ft = 0
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
- p.From.Type = obj.TYPE_ADDR
- p.Ft = 0
-
- case Zm_o:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, int(o.op[z+1]))
-
- case Zr_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.From.Reg])
-
- case Zr_m_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.To, reg[p.From.Reg])
-
- case Zr_m_i_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.To, reg[p.From.Reg])
- ctxt.Andptr[0] = byte(p.From.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zcallindreg:
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc)
- r.Type = obj.R_CALLIND
- r.Siz = 0
- fallthrough
-
- // fallthrough
- case Zo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmand(ctxt, p, &p.To, int(o.op[z+1]))
-
- case Zm_ibo:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, int(o.op[z+1]))
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zibo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, int(o.op[z+1]))
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Z_ib,
- Zib_:
- if yt.zcase == Zib_ {
- a = &p.From
- } else {
- a = &p.To
- }
- v = vaddr(ctxt, p, a, nil)
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zib_rp:
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zil_rp:
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- if o.prefix == Pe {
- v = vaddr(ctxt, p, &p.From, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, &p.From)
- }
-
- case Zib_rr:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.To.Reg])
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Z_il,
- Zil_:
- if yt.zcase == Zil_ {
- a = &p.From
- } else {
- a = &p.To
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- if o.prefix == Pe {
- v = vaddr(ctxt, p, a, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, a)
- }
-
- case Zm_ilo,
- Zilo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- if yt.zcase == Zilo_m {
- a = &p.From
- asmand(ctxt, p, &p.To, int(o.op[z+1]))
- } else {
- a = &p.To
- asmand(ctxt, p, &p.From, int(o.op[z+1]))
- }
-
- if o.prefix == Pe {
- v = vaddr(ctxt, p, a, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, a)
- }
-
- case Zil_rr:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.To.Reg])
- if o.prefix == Pe {
- v = vaddr(ctxt, p, &p.From, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, &p.From)
- }
-
- case Z_rp:
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zrp_:
- ctxt.Andptr[0] = byte(op + reg[p.From.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zclr:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.To.Reg])
-
- case Zcall:
- if p.To.Sym == nil {
- ctxt.Diag("call without target")
- log.Fatalf("bad code")
- }
-
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Type = obj.R_CALL
- r.Siz = 4
- r.Sym = p.To.Sym
- r.Add = p.To.Offset
- put4(ctxt, 0)
-
- case Zbr,
- Zjmp,
- Zloop:
- if p.To.Sym != nil {
- if yt.zcase != Zjmp {
- ctxt.Diag("branch to ATEXT")
- log.Fatalf("bad code")
- }
-
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Sym = p.To.Sym
- r.Type = obj.R_PCREL
- r.Siz = 4
- put4(ctxt, 0)
- break
- }
-
- // Assumes q is in this function.
- // Fill in backward jump now.
- q = p.Pcond
-
- if q == nil {
- ctxt.Diag("jmp/branch/loop without target")
- log.Fatalf("bad code")
- }
-
- if p.Back&1 != 0 {
- v = int32(q.Pc - (p.Pc + 2))
- if v >= -128 {
- if p.As == AJCXZW {
- ctxt.Andptr[0] = 0x67
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else if yt.zcase == Zloop {
- ctxt.Diag("loop too far: %v", p)
- } else {
- v -= 5 - 2
- if yt.zcase == Zbr {
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- v--
- }
-
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 16)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 24)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- break
- }
-
- // Annotate target; will fill in later.
- p.Forwd = q.Comefrom
-
- q.Comefrom = p
- if p.Back&2 != 0 { // short
- if p.As == AJCXZW {
- ctxt.Andptr[0] = 0x67
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- } else if yt.zcase == Zloop {
- ctxt.Diag("loop too far: %v", p)
- } else {
- if yt.zcase == Zbr {
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- case Zcallcon,
- Zjmpcon:
- if yt.zcase == Zcallcon {
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Type = obj.R_PCREL
- r.Siz = 4
- r.Add = p.To.Offset
- put4(ctxt, 0)
-
- case Zcallind:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Type = obj.R_ADDR
- r.Siz = 4
- r.Add = p.To.Offset
- r.Sym = p.To.Sym
- put4(ctxt, 0)
-
- case Zbyte:
- v = vaddr(ctxt, p, &p.From, &rel)
- if rel.Siz != 0 {
- rel.Siz = uint8(op)
- r = obj.Addrel(ctxt.Cursym)
- *r = rel
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- }
-
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- if op > 1 {
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- if op > 2 {
- ctxt.Andptr[0] = byte(v >> 16)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 24)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- }
-
- case Zmov:
- goto domov
- }
-
- return
-
domov:
- for t = []byte(ymovtab); t[0] != 0; t = t[8:] {
+ var pp obj.Prog
+ for t := []byte(ymovtab); t[0] != 0; t = t[8:] {
if p.As == int16(t[0]) {
if ycover[ft+int(t[1])] != 0 {
if ycover[tt+int(t[2])] != 0 {
- goto mfound
+ switch t[3] {
+ default:
+ ctxt.Diag("asmins: unknown mov %d %v", t[3], p)
+
+ case 0: /* lit */
+ for z = 4; t[z] != E; z++ {
+ ctxt.Andptr[0] = t[z]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case 1: /* r,m */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.To, int(t[5]))
+
+ case 2: /* m,r */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.From, int(t[5]))
+
+ case 3: /* r,m - 2op */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, int(t[6]))
+
+ case 4: /* m,r - 2op */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(t[6]))
+
+ case 5: /* load full pointer, trash heap */
+ if t[4] != 0 {
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ switch p.To.Index {
+ default:
+ goto bad
+
+ case REG_DS:
+ ctxt.Andptr[0] = 0xc5
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_SS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb2
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_ES:
+ ctxt.Andptr[0] = 0xc4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_FS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_GS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb5
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case 6: /* double shift */
+ switch p.From.Type {
+ default:
+ goto bad
+
+ case obj.TYPE_CONST:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Index])
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case obj.TYPE_REG:
+ switch p.From.Reg {
+ default:
+ goto bad
+
+ case REG_CL,
+ REG_CX:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Index])
+ }
+ }
+
+ case 7: /* imul rm,r */
+ if t[4] == Pq {
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
+ // where you load the TLS base register into a register and then index off that
+ // register to access the actual TLS variables. Systems that allow direct TLS access
+ // are handled in prefixof above and should not be listed here.
+ case 8: /* mov tls, r */
+ switch ctxt.Headtype {
+ default:
+ log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
+
+ // ELF TLS base is 0(GS).
+ case obj.Hlinux,
+ obj.Hnacl:
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Reg = REG_GS
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Andptr[0] = 0x65
+ ctxt.Andptr = ctxt.Andptr[1:] // GS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Reg])
+
+ case obj.Hplan9:
+ if ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+ pp.From = obj.Addr{}
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_EXTERN
+ pp.From.Sym = ctxt.Plan9privates
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Reg])
+
+ // Windows TLS base is always 0x14(FS).
+ case obj.Hwindows:
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Reg = REG_FS
+ pp.From.Offset = 0x14
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Andptr[0] = 0x64
+ ctxt.Andptr = ctxt.Andptr[1:] // FS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Reg])
+ }
+ }
+ return
}
}
}
@@ -3213,7 +3358,7 @@
z = int(p.From.Reg)
if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
- breg = byteswapreg(ctxt, &p.To)
+ breg := byteswapreg(ctxt, &p.To)
if breg != REG_AX {
ctxt.Andptr[0] = 0x87
ctxt.Andptr = ctxt.Andptr[1:] /* xchg lhs,bx */
@@ -3237,7 +3382,7 @@
z = int(p.To.Reg)
if p.To.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
- breg = byteswapreg(ctxt, &p.From)
+ breg := byteswapreg(ctxt, &p.From)
if breg != REG_AX {
ctxt.Andptr[0] = 0x87
ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
@@ -3261,182 +3406,6 @@
ctxt.Diag("doasm: notfound t2=%d from=%d to=%d %v", yt.zcase, p.Ft, p.Tt, p)
return
-
-mfound:
- switch t[3] {
- default:
- ctxt.Diag("asmins: unknown mov %d %v", t[3], p)
-
- case 0: /* lit */
- for z = 4; t[z] != E; z++ {
- ctxt.Andptr[0] = t[z]
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- case 1: /* r,m */
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmand(ctxt, p, &p.To, int(t[5]))
-
- case 2: /* m,r */
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmand(ctxt, p, &p.From, int(t[5]))
-
- case 3: /* r,m - 2op */
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = t[5]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, int(t[6]))
-
- case 4: /* m,r - 2op */
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = t[5]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, int(t[6]))
-
- case 5: /* load full pointer, trash heap */
- if t[4] != 0 {
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- switch p.To.Index {
- default:
- goto bad
-
- case REG_DS:
- ctxt.Andptr[0] = 0xc5
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_SS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb2
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_ES:
- ctxt.Andptr[0] = 0xc4
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_FS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb4
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_GS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb5
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- case 6: /* double shift */
- switch p.From.Type {
- default:
- goto bad
-
- case obj.TYPE_CONST:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.From.Index])
- ctxt.Andptr[0] = byte(p.From.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case obj.TYPE_REG:
- switch p.From.Reg {
- default:
- goto bad
-
- case REG_CL,
- REG_CX:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = t[5]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.From.Index])
- }
- }
-
- case 7: /* imul rm,r */
- if t[4] == Pq {
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = t[5]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
- // where you load the TLS base register into a register and then index off that
- // register to access the actual TLS variables. Systems that allow direct TLS access
- // are handled in prefixof above and should not be listed here.
- case 8: /* mov tls, r */
- switch ctxt.Headtype {
- default:
- log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
-
- // ELF TLS base is 0(GS).
- case obj.Hlinux,
- obj.Hnacl:
- pp.From = p.From
-
- pp.From.Type = obj.TYPE_MEM
- pp.From.Reg = REG_GS
- pp.From.Offset = 0
- pp.From.Index = REG_NONE
- pp.From.Scale = 0
- ctxt.Andptr[0] = 0x65
- ctxt.Andptr = ctxt.Andptr[1:] // GS
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, reg[p.To.Reg])
-
- case obj.Hplan9:
- if ctxt.Plan9privates == nil {
- ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
- }
- pp.From = obj.Addr{}
- pp.From.Type = obj.TYPE_MEM
- pp.From.Name = obj.NAME_EXTERN
- pp.From.Sym = ctxt.Plan9privates
- pp.From.Offset = 0
- pp.From.Index = REG_NONE
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, reg[p.To.Reg])
-
- // Windows TLS base is always 0x14(FS).
- case obj.Hwindows:
- pp.From = p.From
-
- pp.From.Type = obj.TYPE_MEM
- pp.From.Reg = REG_FS
- pp.From.Offset = 0x14
- pp.From.Index = REG_NONE
- pp.From.Scale = 0
- ctxt.Andptr[0] = 0x64
- ctxt.Andptr = ctxt.Andptr[1:] // FS
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, reg[p.To.Reg])
- }
- }
}
var naclret = []uint8{
@@ -3450,12 +3419,10 @@
}
func asmins(ctxt *obj.Link, p *obj.Prog) {
- var r *obj.Reloc
-
ctxt.Andptr = ctxt.And[:]
if p.As == obj.AUSEFIELD {
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
r.Off = 0
r.Sym = p.From.Sym
r.Type = obj.R_USEFIELD
diff --git a/src/cmd/internal/obj/i386/list8.go b/src/cmd/internal/obj/i386/list8.go
index 3aef35b..f843006 100644
--- a/src/cmd/internal/obj/i386/list8.go
+++ b/src/cmd/internal/obj/i386/list8.go
@@ -43,7 +43,6 @@
func Pconv(p *obj.Prog) string {
var str string
- var fp string
switch p.As {
case obj.ADATA:
@@ -72,6 +71,7 @@
}
}
+ var fp string
fp += str
return fp
}
diff --git a/src/cmd/internal/obj/i386/obj8.go b/src/cmd/internal/obj/i386/obj8.go
index 88adba0..63cb7f2 100644
--- a/src/cmd/internal/obj/i386/obj8.go
+++ b/src/cmd/internal/obj/i386/obj8.go
@@ -51,10 +51,6 @@
}
func progedit(ctxt *obj.Link, p *obj.Prog) {
- var literal string
- var s *obj.LSym
- var q *obj.Prog
-
// See obj6.c for discussion of TLS.
if canuselocaltls(ctxt) {
// Reduce TLS initial exec model to TLS local exec model.
@@ -92,7 +88,7 @@
// MOVL off(BX)(TLS*1), BX
// This allows the C compilers to emit references to m and g using the direct off(TLS) form.
if p.As == AMOVL && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
- q = obj.Appendp(ctxt, p)
+ q := obj.Appendp(ctxt, p)
q.As = p.As
q.From.Type = obj.TYPE_MEM
q.From.Reg = p.To.Reg
@@ -159,12 +155,10 @@
ACOMISS,
AUCOMISS:
if p.From.Type == obj.TYPE_FCONST {
- var i32 uint32
- var f32 float32
- f32 = float32(p.From.U.Dval)
- i32 = math.Float32bits(f32)
- literal = fmt.Sprintf("$f32.%08x", i32)
- s = obj.Linklookup(ctxt, literal, 0)
+ f32 := float32(p.From.U.Dval)
+ i32 := math.Float32bits(f32)
+ literal := fmt.Sprintf("$f32.%08x", i32)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint32(ctxt, s, i32)
@@ -208,10 +202,9 @@
ACOMISD,
AUCOMISD:
if p.From.Type == obj.TYPE_FCONST {
- var i64 uint64
- i64 = math.Float64bits(p.From.U.Dval)
- literal = fmt.Sprintf("$f64.%016x", i64)
- s = obj.Linklookup(ctxt, literal, 0)
+ i64 := math.Float64bits(p.From.U.Dval)
+ literal := fmt.Sprintf("$f64.%016x", i64)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint64(ctxt, s, i64)
@@ -227,14 +220,6 @@
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var p1 *obj.Prog
- var p2 *obj.Prog
- var autoffset int32
- var deltasp int32
- var a int
-
if ctxt.Symmorestack[0] == nil {
ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
@@ -250,8 +235,8 @@
return
}
- p = cursym.Text
- autoffset = int32(p.To.Offset)
+ p := cursym.Text
+ autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
}
@@ -259,7 +244,7 @@
cursym.Locals = autoffset
cursym.Args = p.To.U.Argsize
- q = nil
+ q := (*obj.Prog)(nil)
if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
p = obj.Appendp(ctxt, p)
@@ -292,7 +277,7 @@
if q != nil {
q.Pcond = p
}
- deltasp = autoffset
+ deltasp := autoffset
if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
@@ -329,7 +314,7 @@
p = obj.Appendp(ctxt, p)
p.As = AJEQ
p.To.Type = obj.TYPE_BRANCH
- p1 = p
+ p1 := p
p = obj.Appendp(ctxt, p)
p.As = ALEAL
@@ -350,7 +335,7 @@
p = obj.Appendp(ctxt, p)
p.As = AJNE
p.To.Type = obj.TYPE_BRANCH
- p2 = p
+ p2 := p
p = obj.Appendp(ctxt, p)
p.As = AMOVL
@@ -400,6 +385,7 @@
p.As = ASTOSL
}
+ var a int
for ; p != nil; p = p.Link {
a = int(p.From.Name)
if a == obj.NAME_AUTO {
@@ -479,8 +465,6 @@
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
- var next *obj.Prog
-
p.As = AMOVL
p.From.Type = obj.TYPE_MEM
p.From.Reg = REG_TLS
@@ -488,7 +472,7 @@
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_CX
- next = p.Link
+ next := p.Link
progedit(ctxt, p)
for p.Link != next {
p = p.Link
@@ -508,9 +492,6 @@
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
- var q *obj.Prog
- var q1 *obj.Prog
-
if ctxt.Debugstack != 0 {
// 8l -K means check not only for stack
// overflow but stack underflow.
@@ -530,7 +511,7 @@
p.As = AJCC
p.To.Type = obj.TYPE_BRANCH
p.To.Offset = 4
- q1 = p
+ q1 := p
p = obj.Appendp(ctxt, p)
p.As = AINT
@@ -542,7 +523,7 @@
q1.Pcond = p
}
- q1 = nil
+ q1 := (*obj.Prog)(nil)
if framesize <= obj.StackSmall {
// small stack: SP <= stackguard
@@ -651,7 +632,7 @@
p.As = AJHI
p.To.Type = obj.TYPE_BRANCH
p.To.Offset = 4
- q = p
+ q := p
p = obj.Appendp(ctxt, p)
p.As = obj.ACALL
@@ -679,13 +660,10 @@
}
func follow(ctxt *obj.Link, s *obj.LSym) {
- var firstp *obj.Prog
- var lastp *obj.Prog
-
ctxt.Cursym = s
- firstp = ctxt.NewProg()
- lastp = firstp
+ firstp := ctxt.NewProg()
+ lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index e665229..16d1626 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -407,18 +407,7 @@
var xcmp [C_NCLASS][C_NCLASS]uint8
func span9(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var o *Optab
- var m int
- var bflag int
- var c int64
- var otxt int64
- var out [6]uint32
- var i int32
- var bp []byte
-
- p = cursym.Text
+ p := cursym.Text
if p == nil || p.Link == nil { // handle external functions and ELF section symbols
return
}
@@ -429,9 +418,11 @@
buildop(ctxt)
}
- c = 0
+ c := int64(0)
p.Pc = c
+ var m int
+ var o *Optab
for p = p.Link; p != nil; p = p.Link {
ctxt.Curp = p
p.Pc = c
@@ -455,8 +446,10 @@
* generate extra passes putting branches
* around jmps to fix. this is rare.
*/
- bflag = 1
+ bflag := 1
+ var otxt int64
+ var q *obj.Prog
for bflag != 0 {
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime())
@@ -517,8 +510,10 @@
obj.Symgrow(ctxt, cursym, cursym.Size)
- bp = cursym.P
- for p = cursym.Text.Link; p != nil; p = p.Link {
+ bp := cursym.P
+ var i int32
+ var out [6]uint32
+ for p := cursym.Text.Link; p != nil; p = p.Link {
ctxt.Pc = p.Pc
ctxt.Curp = p
o = oplook(ctxt, p)
@@ -542,8 +537,6 @@
}
func aclass(ctxt *obj.Link, a *obj.Addr) int {
- var s *obj.LSym
-
switch a.Type {
case obj.TYPE_NONE:
return C_NONE
@@ -646,7 +639,7 @@
case obj.NAME_EXTERN,
obj.NAME_STATIC:
- s = a.Sym
+ s := a.Sym
if s == nil {
break
}
@@ -720,18 +713,7 @@
}
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
- var a1 int
- var a2 int
- var a3 int
- var a4 int
- var r int
- var c1 []byte
- var c3 []byte
- var c4 []byte
- var o []Optab
- var e []Optab
-
- a1 = int(p.Optab)
+ a1 := int(p.Optab)
if a1 != 0 {
return &optab[a1-1:][0]
}
@@ -742,36 +724,36 @@
}
a1--
- a3 = int(p.From3.Class)
+ a3 := int(p.From3.Class)
if a3 == 0 {
a3 = aclass(ctxt, &p.From3) + 1
p.From3.Class = int8(a3)
}
a3--
- a4 = int(p.To.Class)
+ a4 := int(p.To.Class)
if a4 == 0 {
a4 = aclass(ctxt, &p.To) + 1
p.To.Class = int8(a4)
}
a4--
- a2 = C_NONE
+ a2 := C_NONE
if p.Reg != 0 {
a2 = C_REG
}
//print("oplook %P %d %d %d %d\n", p, a1, a2, a3, a4);
- r = int(p.As)
+ r := int(p.As)
- o = oprange[r].start
+ o := oprange[r].start
if o == nil {
o = oprange[r].stop /* just generate an error */
}
- e = oprange[r].stop
- c1 = xcmp[a1][:]
- c3 = xcmp[a3][:]
- c4 = xcmp[a4][:]
+ e := oprange[r].stop
+ c1 := xcmp[a1][:]
+ c3 := xcmp[a3][:]
+ c4 := xcmp[a4][:]
for ; -cap(o) < -cap(e); o = o[1:] {
if int(o[0].a2) == a2 {
if c1[o[0].a1] != 0 {
@@ -881,13 +863,9 @@
}
func (x ocmp) Less(i, j int) bool {
- var p1 *Optab
- var p2 *Optab
- var n int
-
- p1 = &x[i]
- p2 = &x[j]
- n = int(p1.as) - int(p2.as)
+ p1 := &x[i]
+ p2 := &x[j]
+ n := int(p1.as) - int(p2.as)
if n != 0 {
return n < 0
}
@@ -911,11 +889,9 @@
}
func buildop(ctxt *obj.Link) {
- var i int
var n int
- var r int
- for i = 0; i < C_NCLASS; i++ {
+ for i := 0; i < C_NCLASS; i++ {
for n = 0; n < C_NCLASS; n++ {
if cmp(n, i) {
xcmp[i][n] = 1
@@ -925,7 +901,8 @@
for n = 0; optab[n].as != obj.AXXX; n++ {
}
sort.Sort(ocmp(optab[:n]))
- for i = 0; i < n; i++ {
+ var r int
+ for i := 0; i < n; i++ {
r = int(optab[i].as)
oprange[r].start = optab[i:]
for int(optab[i].as) == r {
@@ -1393,9 +1370,7 @@
// add R_ADDRPOWER relocation to symbol s for the two instructions o1 and o2.
func addaddrreloc(ctxt *obj.Link, s *obj.LSym, o1 *uint32, o2 *uint32) {
- var rel *obj.Reloc
-
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 8
rel.Sym = s
@@ -1407,13 +1382,11 @@
* 32-bit masks
*/
func getmask(m []byte, v uint32) bool {
- var i int
-
m[1] = 0
m[0] = m[1]
if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
if getmask(m, ^v) {
- i = int(m[0])
+ i := int(m[0])
m[0] = m[1] + 1
m[1] = byte(i - 1)
return true
@@ -1422,7 +1395,7 @@
return false
}
- for i = 0; i < 32; i++ {
+ for i := 0; i < 32; i++ {
if v&(1<<uint(31-i)) != 0 {
m[0] = byte(i)
for {
@@ -1455,11 +1428,9 @@
* 64-bit masks (rldic etc)
*/
func getmask64(m []byte, v uint64) bool {
- var i int
-
m[1] = 0
m[0] = m[1]
- for i = 0; i < 64; i++ {
+ for i := 0; i < 64; i++ {
if v&(uint64(1)<<uint(63-i)) != 0 {
m[0] = byte(i)
for {
@@ -1489,9 +1460,7 @@
}
func loadu32(r int, d int64) uint32 {
- var v int32
-
- v = int32(d >> 16)
+ v := int32(d >> 16)
if isuint32(uint64(d)) {
return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
}
@@ -1506,24 +1475,11 @@
}
func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
- var o1 uint32
- var o2 uint32
- var o3 uint32
- var o4 uint32
- var o5 uint32
- var v int32
- var t int32
- var d int64
- var r int
- var a int
- var mask [2]uint8
- var rel *obj.Reloc
-
- o1 = 0
- o2 = 0
- o3 = 0
- o4 = 0
- o5 = 0
+ o1 := uint32(0)
+ o2 := uint32(0)
+ o3 := uint32(0)
+ o4 := uint32(0)
+ o5 := uint32(0)
//print("%P => case %d\n", p, o->type);
switch o.type_ {
@@ -1536,7 +1492,7 @@
case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
//nerrors--;
ctxt.Diag("literal operation on R0\n%v", p)
@@ -1549,7 +1505,7 @@
o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
case 2: /* int/cr/fp op Rb,[Ra],Rd */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
@@ -1557,17 +1513,17 @@
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
case 3: /* mov $soreg/addcon/ucon, r ==> addis/addi $i,reg',r */
- d = vregoff(ctxt, &p.From)
+ d := vregoff(ctxt, &p.From)
- v = int32(d)
- r = int(p.From.Reg)
+ v := int32(d)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
ctxt.Diag("literal operation on R0\n%v", p)
}
- a = OP_ADDI
+ a := OP_ADDI
if o.a1 == C_UCON {
if d&0xffff != 0 {
log.Fatalf("invalid handling of %v", p)
@@ -1588,9 +1544,9 @@
o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
case 4: /* add/mul $scon,[r1],r2 */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@@ -1606,7 +1562,7 @@
o1 = uint32(oprrr(ctxt, int(p.As)))
case 6: /* logical op Rb,[Rs,]Ra; no literal */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
@@ -1614,12 +1570,12 @@
o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
case 7: /* mov r, soreg ==> stw o(r) */
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
- v = regoff(ctxt, &p.To)
+ v := regoff(ctxt, &p.To)
if p.To.Type == obj.TYPE_MEM && p.Reg != 0 {
if v != 0 {
ctxt.Diag("illegal indexed instruction\n%v", p)
@@ -1633,12 +1589,12 @@
}
case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
if p.From.Type == obj.TYPE_MEM && p.Reg != 0 {
if v != 0 {
ctxt.Diag("illegal indexed instruction\n%v", p)
@@ -1652,12 +1608,12 @@
}
case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
if p.From.Type == obj.TYPE_MEM && p.Reg != 0 {
if v != 0 {
ctxt.Diag("illegal indexed instruction\n%v", p)
@@ -1669,7 +1625,7 @@
o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
@@ -1677,7 +1633,7 @@
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
case 11: /* br/bl lbra */
- v = 0
+ v := int32(0)
if p.Pcond != nil {
v = int32(p.Pcond.Pc - p.Pc)
@@ -1693,7 +1649,7 @@
o1 = OP_BR(uint32(opirr(ctxt, int(p.As))), uint32(v), 0)
if p.To.Sym != nil {
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
rel.Sym = p.To.Sym
@@ -1709,7 +1665,7 @@
case 12: /* movb r,r (extsb); movw r,r (extsw) */
if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
ctxt.Diag("literal operation on R0\n%v", p)
}
@@ -1738,13 +1694,15 @@
}
case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- d = vregoff(ctxt, &p.From3)
+ d := vregoff(ctxt, &p.From3)
+ var mask [2]uint8
maskgen64(ctxt, p, mask[:], uint64(d))
+ var a int
switch p.As {
case ARLDCL,
ARLDCLCC:
@@ -1773,16 +1731,16 @@
case 17, /* bc bo,bi,lbra (same for now) */
16: /* bc bo,bi,sbra */
- a = 0
+ a := 0
if p.From.Type == obj.TYPE_CONST {
a = int(regoff(ctxt, &p.From))
}
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
}
- v = 0
+ v := int32(0)
if p.Pcond != nil {
v = int32(p.Pcond.Pc - p.Pc)
}
@@ -1797,12 +1755,13 @@
o1 = OP_BC(uint32(opirr(ctxt, int(p.As))), uint32(a), uint32(r), uint32(v), 0)
case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
+ var v int32
if p.As == ABC || p.As == ABCL {
v = regoff(ctxt, &p.To) & 31
} else {
v = 20 /* unconditional */
}
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
}
@@ -1814,12 +1773,13 @@
o2 = OP_BCR(o2, uint32(v), uint32(r))
case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
+ var v int32
if p.As == ABC || p.As == ABCL {
v = regoff(ctxt, &p.From) & 31
} else {
v = 20 /* unconditional */
}
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
}
@@ -1841,7 +1801,7 @@
o1 = OP_BCR(o1, uint32(v), uint32(r))
case 19: /* mov $lcon,r ==> cau+or */
- d = vregoff(ctxt, &p.From)
+ d := vregoff(ctxt, &p.From)
if p.From.Sym == nil {
o1 = loadu32(int(p.To.Reg), d)
@@ -1855,9 +1815,9 @@
//if(dlm) reloc(&p->from, p->pc, 0);
case 20: /* add $ucon,,r */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@@ -1870,10 +1830,10 @@
if p.To.Reg == REGTMP || p.Reg == REGTMP {
ctxt.Diag("cant synthesize large constant\n%v", p)
}
- d = vregoff(ctxt, &p.From)
+ d := vregoff(ctxt, &p.From)
o1 = loadu32(REGTMP, d)
o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@@ -1888,10 +1848,10 @@
if p.To.Reg == REGTMP || p.Reg == REGTMP {
ctxt.Diag("cant synthesize large constant\n%v", p)
}
- d = vregoff(ctxt, &p.From)
+ d := vregoff(ctxt, &p.From)
o1 = loadu32(REGTMP, d)
o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@@ -1905,17 +1865,18 @@
/*24*/
case 25:
/* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
if v < 0 {
v = 0
} else if v > 63 {
v = 63
}
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
+ var a int
switch p.As {
case ASLD,
ASLDCC:
@@ -1950,8 +1911,8 @@
if p.To.Reg == REGTMP {
ctxt.Diag("can't synthesize large constant\n%v", p)
}
- v = regoff(ctxt, &p.From)
- r = int(p.From.Reg)
+ v := regoff(ctxt, &p.From)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -1959,16 +1920,16 @@
o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
- v = regoff(ctxt, &p.From3)
+ v := regoff(ctxt, &p.From3)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
ctxt.Diag("can't synthesize large constant\n%v", p)
}
- v = regoff(ctxt, &p.From3)
+ v := regoff(ctxt, &p.From3)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
o3 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
@@ -1979,10 +1940,12 @@
//if(dlm) reloc(&p->from3, p->pc, 0);
case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- d = vregoff(ctxt, &p.From3)
+ d := vregoff(ctxt, &p.From3)
+ var mask [2]uint8
maskgen64(ctxt, p, mask[:], uint64(d))
+ var a int
switch p.As {
case ARLDC,
ARLDCCC:
@@ -2020,9 +1983,10 @@
}
case 30: /* rldimi $sh,s,$mask,a */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- d = vregoff(ctxt, &p.From3)
+ d := vregoff(ctxt, &p.From3)
+ var mask [2]uint8
maskgen64(ctxt, p, mask[:], uint64(d))
if int32(mask[1]) != (63 - v) {
ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
@@ -2037,7 +2001,7 @@
}
case 31: /* dword */
- d = vregoff(ctxt, &p.From)
+ d := vregoff(ctxt, &p.From)
if ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = uint32(d >> 32)
@@ -2048,7 +2012,7 @@
}
if p.From.Sym != nil {
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 8
rel.Sym = p.From.Sym
@@ -2059,7 +2023,7 @@
}
case 32: /* fmul frc,fra,frd */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
@@ -2067,7 +2031,7 @@
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
case 33: /* fabs [frb,]frd; fmr. frb,frd */
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if oclass(&p.From) == C_NONE {
r = int(p.To.Reg)
@@ -2078,9 +2042,9 @@
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.From3.Reg)&31)<<6
case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
- v = regoff(ctxt, &p.To)
+ v := regoff(ctxt, &p.To)
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@@ -2088,9 +2052,9 @@
o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -2098,9 +2062,9 @@
o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@@ -2118,7 +2082,7 @@
o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(regoff(ctxt, &p.From3))&0x7F)<<11
case 43: /* unary indexed source: dcbf (b); dcbf (a+b) */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
@@ -2126,7 +2090,7 @@
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, uint32(r), uint32(p.From.Reg))
case 44: /* indexed store */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
@@ -2134,7 +2098,7 @@
o1 = AOP_RRR(uint32(opstorex(ctxt, int(p.As))), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 45: /* indexed load */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
@@ -2145,7 +2109,7 @@
o1 = uint32(oprrr(ctxt, int(p.As)))
case 47: /* op Ra, Rd; also op [Ra,] Rd */
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(p.To.Reg)
@@ -2153,7 +2117,7 @@
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0)
case 48: /* op Rs, Ra */
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(p.To.Reg)
@@ -2162,20 +2126,20 @@
case 49: /* op Rb; op $n, Rb */
if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
- v = regoff(ctxt, &p.From) & 1
+ v := regoff(ctxt, &p.From) & 1
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
} else {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.From.Reg))
}
case 50: /* rem[u] r1[,r2],r3 */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- v = oprrr(ctxt, int(p.As))
- t = v & (1<<10 | 1) /* OE|Rc */
+ v := oprrr(ctxt, int(p.As))
+ t := v & (1<<10 | 1) /* OE|Rc */
o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r))
@@ -2187,19 +2151,19 @@
}
case 51: /* remd[u] r1[,r2],r3 */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- v = oprrr(ctxt, int(p.As))
- t = v & (1<<10 | 1) /* OE|Rc */
+ v := oprrr(ctxt, int(p.As))
+ t := v & (1<<10 | 1) /* OE|Rc */
o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r))
case 52: /* mtfsbNx cr(n) */
- v = regoff(ctxt, &p.From) & 31
+ v := regoff(ctxt, &p.From) & 31
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(v), 0, 0)
@@ -2221,9 +2185,9 @@
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), 0, uint32(p.From.Reg))
case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@@ -2233,9 +2197,9 @@
}
case 57: /* slw $sh,[s,]a -> rlwinm ... */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@@ -2252,6 +2216,7 @@
} else if v > 32 {
v = 32
}
+ var mask [2]uint8
if p.As == ASRW || p.As == ASRWCC { /* shift right */
mask[0] = uint8(v)
mask[1] = 31
@@ -2267,48 +2232,51 @@
}
case 58: /* logical $andcon,[s],a */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
case 59: /* or/and $ucon,,r */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As)+ALAST)), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis */
case 60: /* tw to,a,b */
- r = int(regoff(ctxt, &p.From) & 31)
+ r := int(regoff(ctxt, &p.From) & 31)
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
case 61: /* tw to,a,$simm */
- r = int(regoff(ctxt, &p.From) & 31)
+ r := int(regoff(ctxt, &p.From) & 31)
- v = regoff(ctxt, &p.To)
+ v := regoff(ctxt, &p.To)
o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(v))
case 62: /* rlwmi $sh,s,$mask,a */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
+ var mask [2]uint8
maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, &p.From3)))
o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
case 63: /* rlwmi b,s,$mask,a */
+ var mask [2]uint8
maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, &p.From3)))
o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
case 64: /* mtfsf fr[, $m] {,fpcsr} */
+ var v int32
if p.From3.Type != obj.TYPE_NONE {
v = regoff(ctxt, &p.From3) & 255
} else {
@@ -2323,6 +2291,8 @@
o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(regoff(ctxt, &p.From))&31)<<12
case 66: /* mov spr,r1; mov r1,spr, also dcr */
+ var r int
+ var v int32
if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
r = int(p.From.Reg)
v = int32(p.To.Reg)
@@ -2351,13 +2321,14 @@
case 68: /* mfcr rD; mfocrf CRM,rD */
if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
- v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
+ v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
} else {
o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
}
case 69: /* mtcrf CRM,rS */
+ var v int32
if p.From3.Type != obj.TYPE_NONE {
if p.To.Reg != 0 {
ctxt.Diag("can't use both mask and CR(n)\n%v", p)
@@ -2374,6 +2345,7 @@
o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
case 70: /* [f]cmp r,r,cr*/
+ var r int
if p.Reg == 0 {
r = 0
} else {
@@ -2382,6 +2354,7 @@
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
case 71: /* cmp[l] r,i,cr*/
+ var r int
if p.Reg == 0 {
r = 0
} else {
@@ -2420,7 +2393,7 @@
/* relocation operations */
case 74:
- v = regoff(ctxt, &p.To)
+ v := regoff(ctxt, &p.To)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
@@ -2429,7 +2402,7 @@
//if(dlm) reloc(&p->to, p->pc, 1);
case 75:
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
@@ -2437,7 +2410,7 @@
//if(dlm) reloc(&p->from, p->pc, 1);
case 76:
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
diff --git a/src/cmd/internal/obj/ppc64/list9.go b/src/cmd/internal/obj/ppc64/list9.go
index 21a8642..6c62fad 100644
--- a/src/cmd/internal/obj/ppc64/list9.go
+++ b/src/cmd/internal/obj/ppc64/list9.go
@@ -54,14 +54,9 @@
var bigP *obj.Prog
func Pconv(p *obj.Prog) string {
- var str string
- var fp string
+ a := int(p.As)
- var a int
-
- a = int(p.As)
-
- str = ""
+ str := ""
if a == obj.ADATA {
str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v",
p.Pc, p.Line(), Aconv(a), obj.Dconv(p, &p.From), p.From3.Offset, obj.Dconv(p, &p.To))
@@ -107,23 +102,23 @@
}
if p.Spadj != 0 {
+ var fp string
fp += fmt.Sprintf("%s # spadj=%d", str, p.Spadj)
return fp
}
}
+ var fp string
fp += str
return fp
}
func Aconv(a int) string {
- var s string
- var fp string
-
- s = "???"
+ s := "???"
if a >= obj.AXXX && a < ALAST {
s = Anames[a]
}
+ var fp string
fp += s
return fp
}
@@ -177,13 +172,11 @@
}
func DRconv(a int) string {
- var s string
- var fp string
-
- s = "C_??"
+ s := "C_??"
if a >= C_NONE && a <= C_NCLASS {
s = cnames9[a]
}
+ var fp string
fp += s
return fp
}
diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go
index a507449..f858bb3 100644
--- a/src/cmd/internal/obj/ppc64/obj9.go
+++ b/src/cmd/internal/obj/ppc64/obj9.go
@@ -37,9 +37,6 @@
)
func progedit(ctxt *obj.Link, p *obj.Prog) {
- var literal string
- var s *obj.LSym
-
p.From.Class = 0
p.To.Class = 0
@@ -59,12 +56,10 @@
switch p.As {
case AFMOVS:
if p.From.Type == obj.TYPE_FCONST {
- var i32 uint32
- var f32 float32
- f32 = float32(p.From.U.Dval)
- i32 = math.Float32bits(f32)
- literal = fmt.Sprintf("$f32.%08x", i32)
- s = obj.Linklookup(ctxt, literal, 0)
+ f32 := float32(p.From.U.Dval)
+ i32 := math.Float32bits(f32)
+ literal := fmt.Sprintf("$f32.%08x", i32)
+ s := obj.Linklookup(ctxt, literal, 0)
s.Size = 4
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
@@ -74,10 +69,9 @@
case AFMOVD:
if p.From.Type == obj.TYPE_FCONST {
- var i64 uint64
- i64 = math.Float64bits(p.From.U.Dval)
- literal = fmt.Sprintf("$f64.%016x", i64)
- s = obj.Linklookup(ctxt, literal, 0)
+ i64 := math.Float64bits(p.From.U.Dval)
+ literal := fmt.Sprintf("$f64.%016x", i64)
+ s := obj.Linklookup(ctxt, literal, 0)
s.Size = 8
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
@@ -88,8 +82,8 @@
// Put >32-bit constants in memory and load them
case AMOVD:
if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset {
- literal = fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
- s = obj.Linklookup(ctxt, literal, 0)
+ literal := fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
+ s := obj.Linklookup(ctxt, literal, 0)
s.Size = 8
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
@@ -121,17 +115,6 @@
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var p1 *obj.Prog
- var p2 *obj.Prog
- var q1 *obj.Prog
- var o int
- var mov int
- var aoffset int
- var textstksiz int64
- var autosize int32
-
if ctxt.Symmorestack[0] == nil {
ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
@@ -144,8 +127,8 @@
return
}
- p = cursym.Text
- textstksiz = p.To.Offset
+ p := cursym.Text
+ textstksiz := p.To.Offset
cursym.Args = p.To.U.Argsize
cursym.Locals = int32(textstksiz)
@@ -161,8 +144,9 @@
}
obj.Bflush(ctxt.Bso)
- q = nil
- for p = cursym.Text; p != nil; p = p.Link {
+ q := (*obj.Prog)(nil)
+ var q1 *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
/* too hard, just leave alone */
case obj.ATEXT:
@@ -326,8 +310,13 @@
}
}
- autosize = 0
- for p = cursym.Text; p != nil; p = p.Link {
+ autosize := int32(0)
+ var aoffset int
+ var mov int
+ var o int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
o = int(p.As)
switch o {
case obj.ATEXT:
@@ -643,9 +632,6 @@
}
*/
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
- var q *obj.Prog
- var q1 *obj.Prog
-
// MOVD g_stackguard(g), R3
p = obj.Appendp(ctxt, p)
@@ -659,7 +645,7 @@
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
- q = nil
+ q := (*obj.Prog)(nil)
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
@@ -750,7 +736,7 @@
// q1: BLT done
p = obj.Appendp(ctxt, p)
- q1 = p
+ q1 := p
p.As = ABLT
p.To.Type = obj.TYPE_BRANCH
@@ -795,13 +781,10 @@
}
func follow(ctxt *obj.Link, s *obj.LSym) {
- var firstp *obj.Prog
- var lastp *obj.Prog
-
ctxt.Cursym = s
- firstp = ctxt.NewProg()
- lastp = firstp
+ firstp := ctxt.NewProg()
+ lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link
diff --git a/src/cmd/internal/obj/sym.go b/src/cmd/internal/obj/sym.go
index d22a339..80ef0bf 100644
--- a/src/cmd/internal/obj/sym.go
+++ b/src/cmd/internal/obj/sym.go
@@ -123,8 +123,6 @@
}
func Linknew(arch *LinkArch) *Link {
- var buf string
-
linksetexp()
ctxt := new(Link)
@@ -137,6 +135,7 @@
ctxt.Windows = 1
}
+ var buf string
buf, _ = os.Getwd()
if buf == "" {
buf = "/???"
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index 5af037f..cb7d524 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -1492,16 +1492,6 @@
}
func span6(ctxt *obj.Link, s *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var c int32
- var v int32
- var loop int32
- var bp []byte
- var n int
- var m int
- var i int
-
ctxt.Cursym = s
if s.P != nil {
@@ -1512,7 +1502,8 @@
instinit()
}
- for p = ctxt.Cursym.Text; p != nil; p = p.Link {
+ var v int32
+ for p := ctxt.Cursym.Text; p != nil; p = p.Link {
if p.To.Type == obj.TYPE_BRANCH {
if p.Pcond == nil {
p.Pcond = p
@@ -1536,7 +1527,8 @@
}
}
- for p = s.Text; p != nil; p = p.Link {
+ var q *obj.Prog
+ for p := s.Text; p != nil; p = p.Link {
p.Back = 2 // use short branches first time through
q = p.Pcond
if q != nil && (q.Back&2 != 0) {
@@ -1562,7 +1554,13 @@
}
}
- n = 0
+ n := 0
+ var bp []byte
+ var c int32
+ var i int
+ var loop int32
+ var m int
+ var p *obj.Prog
for {
loop = 0
for i = 0; i < len(s.R); i++ {
@@ -1686,6 +1684,7 @@
if false { /* debug['a'] > 1 */
fmt.Printf("span1 %s %d (%d tries)\n %.6x", s.Name, s.Size, n, 0)
+ var i int
for i = 0; i < len(s.P); i++ {
fmt.Printf(" %.2x", s.P[i])
if i%16 == 15 {
@@ -1697,10 +1696,8 @@
fmt.Printf("\n")
}
- for i = 0; i < len(s.R); i++ {
- var r *obj.Reloc
-
- r = &s.R[i]
+ for i := 0; i < len(s.R); i++ {
+ r := &s.R[i]
fmt.Printf(" rel %#.4x/%d %s%+d\n", uint32(r.Off), r.Siz, r.Sym.Name, r.Add)
}
}
@@ -1708,9 +1705,8 @@
func instinit() {
var c int
- var i int
- for i = 1; optab[i].as != 0; i++ {
+ for i := 1; optab[i].as != 0; i++ {
c = int(optab[i].as)
if opindex[c] != nil {
log.Fatalf("phase error in optab: %d (%v)", i, Aconv(c))
@@ -1718,7 +1714,7 @@
opindex[c] = &optab[i]
}
- for i = 0; i < Ymax; i++ {
+ for i := 0; i < Ymax; i++ {
ycover[i*Ymax+i] = 1
}
@@ -1783,7 +1779,7 @@
ycover[Ym*Ymax+Yxm] = 1
ycover[Yxr*Ymax+Yxm] = 1
- for i = 0; i < MAXREG; i++ {
+ for i := 0; i < MAXREG; i++ {
reg[i] = -1
if i >= REG_AL && i <= REG_R15B {
reg[i] = (i - REG_AL) & 7
@@ -1888,9 +1884,6 @@
}
func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {
- var v int64
- var l int32
-
// TODO(rsc): This special case is for SHRQ $3, AX:DX,
// which encodes as SHRQ $32(DX*0), AX.
// Similarly SHRQ CX, AX:DX is really SHRQ CX(DX*0), AX.
@@ -1942,7 +1935,7 @@
ctxt.Diag("TYPE_CONST with symbol: %v", obj.Dconv(p, a))
}
- v = a.Offset
+ v := a.Offset
if v == 0 {
return Yi0
}
@@ -1952,7 +1945,7 @@
if v >= -128 && v <= 127 {
return Yi8
}
- l = int32(v)
+ l := int32(v)
if int64(l) == v {
return Ys32 /* can sign extend */
}
@@ -2259,16 +2252,14 @@
}
func relput4(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
- var v int64
var rel obj.Reloc
- var r *obj.Reloc
- v = vaddr(ctxt, p, a, &rel)
+ v := vaddr(ctxt, p, a, &rel)
if rel.Siz != 0 {
if rel.Siz != 4 {
ctxt.Diag("bad reloc")
}
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
*r = rel
r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
}
@@ -2306,8 +2297,6 @@
}
*/
func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 {
- var s *obj.LSym
-
if r != nil {
*r = obj.Reloc{}
}
@@ -2315,7 +2304,7 @@
switch a.Name {
case obj.NAME_STATIC,
obj.NAME_EXTERN:
- s = a.Sym
+ s := a.Sym
if r == nil {
ctxt.Diag("need reloc for %v", obj.Dconv(p, a))
log.Fatalf("reloc")
@@ -2358,12 +2347,11 @@
}
func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int) {
- var v int32
var base int
var rel obj.Reloc
rex &= 0x40 | Rxr
- v = int32(a.Offset)
+ v := int32(a.Offset)
rel.Siz = 0
switch a.Type {
@@ -2394,7 +2382,7 @@
}
if a.Index != REG_NONE && a.Index != REG_TLS {
- base = int(a.Reg)
+ base := int(a.Reg)
switch a.Name {
case obj.NAME_EXTERN,
obj.NAME_STATIC:
@@ -2529,14 +2517,12 @@
putrelv:
if rel.Siz != 0 {
- var r *obj.Reloc
-
if rel.Siz != 4 {
ctxt.Diag("bad rel")
goto bad
}
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
*r = rel
r.Off = int32(ctxt.Curp.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
}
@@ -2770,34 +2756,16 @@
}
func doasm(ctxt *obj.Link, p *obj.Prog) {
- var o *Optab
- var q *obj.Prog
- var pp obj.Prog
- var t []byte
- var mo []Movtab
- var z int
- var op int
- var ft int
- var tt int
- var xo int
- var l int
- var pre int
- var v int64
- var rel obj.Reloc
- var r *obj.Reloc
- var a *obj.Addr
- var yt ytab
-
ctxt.Curp = p // TODO
- o = opindex[p.As]
+ o := opindex[p.As]
if o == nil {
ctxt.Diag("asmins: missing op %v", p)
return
}
- pre = prefixof(ctxt, &p.From)
+ pre := prefixof(ctxt, &p.From)
if pre != 0 {
ctxt.Andptr[0] = byte(pre)
ctxt.Andptr = ctxt.Andptr[1:]
@@ -2815,593 +2783,776 @@
p.Tt = uint8(oclass(ctxt, p, &p.To))
}
- ft = int(p.Ft) * Ymax
- tt = int(p.Tt) * Ymax
+ ft := int(p.Ft) * Ymax
+ tt := int(p.Tt) * Ymax
- xo = bool2int(o.op[0] == 0x0f)
- z = 0
+ xo := bool2int(o.op[0] == 0x0f)
+ z := 0
+ var a *obj.Addr
+ var l int
+ var op int
+ var q *obj.Prog
+ var r *obj.Reloc
+ var rel obj.Reloc
+ var v int64
+ var yt ytab
for _, yt = range o.ytab {
if ycover[ft+int(yt.from)] != 0 && ycover[tt+int(yt.to)] != 0 {
- goto found
- }
- z += int(yt.zoffset) + xo
- }
- goto domov
+ switch o.prefix {
+ case Pq: /* 16 bit escape and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
-found:
- switch o.prefix {
- case Pq: /* 16 bit escape and opcode escape */
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Pq3: /* 16 bit escape, Rex.w, and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pq3: /* 16 bit escape, Rex.w, and opcode escape */
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pw
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = Pw
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Pf2, /* xmm opcode escape */
+ Pf3:
+ ctxt.Andptr[0] = byte(o.prefix)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pf2, /* xmm opcode escape */
- Pf3:
- ctxt.Andptr[0] = byte(o.prefix)
- ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Pm: /* opcode escape */
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pm: /* opcode escape */
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Pe: /* 16 bit escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pe: /* 16 bit escape */
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Pw: /* 64-bit escape */
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal 64: %v", p)
+ }
+ ctxt.Rexflag |= Pw
- case Pw: /* 64-bit escape */
- if p.Mode != 64 {
- ctxt.Diag("asmins: illegal 64: %v", p)
- }
- ctxt.Rexflag |= Pw
+ case Pb: /* botch */
+ bytereg(&p.From, &p.Ft)
- case Pb: /* botch */
- bytereg(&p.From, &p.Ft)
+ bytereg(&p.To, &p.Tt)
- bytereg(&p.To, &p.Tt)
+ case P32: /* 32 bit but illegal if 64-bit mode */
+ if p.Mode == 64 {
+ ctxt.Diag("asmins: illegal in 64-bit mode: %v", p)
+ }
- case P32: /* 32 bit but illegal if 64-bit mode */
- if p.Mode == 64 {
- ctxt.Diag("asmins: illegal in 64-bit mode: %v", p)
- }
+ case Py: /* 64-bit only, no prefix */
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
+ }
+ }
- case Py: /* 64-bit only, no prefix */
- if p.Mode != 64 {
- ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
- }
- }
-
- if z >= len(o.op) {
- log.Fatalf("asmins bad table %v", p)
- }
- op = int(o.op[z])
- if op == 0x0f {
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- z++
- op = int(o.op[z])
- }
-
- switch yt.zcase {
- default:
- ctxt.Diag("asmins: unknown z %d %v", yt.zcase, p)
- return
-
- case Zpseudo:
- break
-
- case Zlit:
- for ; ; z++ {
+ if z >= len(o.op) {
+ log.Fatalf("asmins bad table %v", p)
+ }
op = int(o.op[z])
- if op == 0 {
+ if op == 0x0f {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ z++
+ op = int(o.op[z])
+ }
+
+ switch yt.zcase {
+ default:
+ ctxt.Diag("asmins: unknown z %d %v", yt.zcase, p)
+ return
+
+ case Zpseudo:
break
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- case Zlitm_r:
- for ; ; z++ {
- op = int(o.op[z])
- if op == 0 {
- break
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- asmand(ctxt, p, &p.From, &p.To)
-
- case Zmb_r:
- bytereg(&p.From, &p.Ft)
- fallthrough
-
- /* fall through */
- case Zm_r:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmand(ctxt, p, &p.From, &p.To)
-
- case Zm2_r:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, &p.To)
-
- case Zm_r_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.From, &p.To)
-
- case Zm_r_xm_nr:
- ctxt.Rexflag = 0
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.From, &p.To)
-
- case Zm_r_i_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.From, &p.To)
- ctxt.Andptr[0] = byte(p.To.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zm_r_3d:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, &p.To)
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zibm_r:
- for {
- tmp1 := z
- z++
- op = int(o.op[tmp1])
- if op == 0 {
- break
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- asmand(ctxt, p, &p.From, &p.To)
- ctxt.Andptr[0] = byte(p.To.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zaut_r:
- ctxt.Andptr[0] = 0x8d
- ctxt.Andptr = ctxt.Andptr[1:] /* leal */
- if p.From.Type != obj.TYPE_ADDR {
- ctxt.Diag("asmins: Zaut sb type ADDR")
- }
- p.From.Type = obj.TYPE_MEM
- asmand(ctxt, p, &p.From, &p.To)
- p.From.Type = obj.TYPE_ADDR
-
- case Zm_o:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.From, int(o.op[z+1]))
-
- case Zr_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, &p.From)
-
- case Zr_m_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.To, &p.From)
-
- case Zr_m_xm_nr:
- ctxt.Rexflag = 0
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.To, &p.From)
-
- case Zr_m_i_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.To, &p.From)
- ctxt.Andptr[0] = byte(p.From.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.To, int(o.op[z+1]))
-
- case Zcallindreg:
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc)
- r.Type = obj.R_CALLIND
- r.Siz = 0
- fallthrough
-
- // fallthrough
- case Zo_m64:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmandsz(ctxt, p, &p.To, int(o.op[z+1]), 0, 1)
-
- case Zm_ibo:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.From, int(o.op[z+1]))
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zibo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.To, int(o.op[z+1]))
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zibo_m_xm:
- z = mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmando(ctxt, p, &p.To, int(o.op[z+1]))
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Z_ib,
- Zib_:
- if yt.zcase == Zib_ {
- a = &p.From
- } else {
- a = &p.To
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, a, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zib_rp:
- ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zil_rp:
- ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- if o.prefix == Pe {
- v = vaddr(ctxt, p, &p.From, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, &p.From)
- }
-
- case Zo_iw:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- if p.From.Type != obj.TYPE_NONE {
- v = vaddr(ctxt, p, &p.From, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- case Ziq_rp:
- v = vaddr(ctxt, p, &p.From, &rel)
- l = int(v >> 32)
- if l == 0 && rel.Siz != 8 {
- //p->mark |= 0100;
- //print("zero: %llux %P\n", v, p);
- ctxt.Rexflag &^= (0x40 | Rxw)
-
- ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
- ctxt.Andptr[0] = byte(0xb8 + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- if rel.Type != 0 {
- r = obj.Addrel(ctxt.Cursym)
- *r = rel
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- }
-
- put4(ctxt, int32(v))
- } else if l == -1 && uint64(v)&(uint64(1)<<31) != 0 { /* sign extend */
-
- //p->mark |= 0100;
- //print("sign: %llux %P\n", v, p);
- ctxt.Andptr[0] = 0xc7
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmando(ctxt, p, &p.To, 0)
- put4(ctxt, int32(v)) /* need all 8 */
- } else {
- //print("all: %llux %P\n", v, p);
- ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
-
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- if rel.Type != 0 {
- r = obj.Addrel(ctxt.Cursym)
- *r = rel
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- }
-
- put8(ctxt, v)
- }
-
- case Zib_rr:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, &p.To)
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Z_il,
- Zil_:
- if yt.zcase == Zil_ {
- a = &p.From
- } else {
- a = &p.To
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- if o.prefix == Pe {
- v = vaddr(ctxt, p, a, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, a)
- }
-
- case Zm_ilo,
- Zilo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- if yt.zcase == Zilo_m {
- a = &p.From
- asmando(ctxt, p, &p.To, int(o.op[z+1]))
- } else {
- a = &p.To
- asmando(ctxt, p, &p.From, int(o.op[z+1]))
- }
-
- if o.prefix == Pe {
- v = vaddr(ctxt, p, a, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, a)
- }
-
- case Zil_rr:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, &p.To)
- if o.prefix == Pe {
- v = vaddr(ctxt, p, &p.From, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, &p.From)
- }
-
- case Z_rp:
- ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zrp_:
- ctxt.Rexflag |= regrex[p.From.Reg] & (Rxb | 0x40)
- ctxt.Andptr[0] = byte(op + reg[p.From.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zclr:
- ctxt.Rexflag &^= Pw
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, &p.To)
-
- case Zcall:
- if p.To.Sym == nil {
- ctxt.Diag("call without target")
- log.Fatalf("bad code")
- }
-
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Sym = p.To.Sym
- r.Add = p.To.Offset
- r.Type = obj.R_CALL
- r.Siz = 4
- put4(ctxt, 0)
-
- // TODO: jump across functions needs reloc
- case Zbr,
- Zjmp,
- Zloop:
- if p.To.Sym != nil {
- if yt.zcase != Zjmp {
- ctxt.Diag("branch to ATEXT")
- log.Fatalf("bad code")
- }
-
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Sym = p.To.Sym
- r.Type = obj.R_PCREL
- r.Siz = 4
- put4(ctxt, 0)
- break
- }
-
- // Assumes q is in this function.
- // TODO: Check in input, preserve in brchain.
-
- // Fill in backward jump now.
- q = p.Pcond
-
- if q == nil {
- ctxt.Diag("jmp/branch/loop without target")
- log.Fatalf("bad code")
- }
-
- if p.Back&1 != 0 {
- v = q.Pc - (p.Pc + 2)
- if v >= -128 {
- if p.As == AJCXZL {
- ctxt.Andptr[0] = 0x67
+ case Zlit:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
ctxt.Andptr = ctxt.Andptr[1:]
}
+
+ case Zlitm_r:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zmb_r:
+ bytereg(&p.From, &p.Ft)
+ fallthrough
+
+ /* fall through */
+ case Zm_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm2_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm_r_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm_r_xm_nr:
+ ctxt.Rexflag = 0
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm_r_i_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zm_r_3d:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibm_r:
+ for {
+ tmp1 := z
+ z++
+ op = int(o.op[tmp1])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zaut_r:
+ ctxt.Andptr[0] = 0x8d
+ ctxt.Andptr = ctxt.Andptr[1:] /* leal */
+ if p.From.Type != obj.TYPE_ADDR {
+ ctxt.Diag("asmins: Zaut sb type ADDR")
+ }
+ p.From.Type = obj.TYPE_MEM
+ asmand(ctxt, p, &p.From, &p.To)
+ p.From.Type = obj.TYPE_ADDR
+
+ case Zm_o:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+
+ case Zr_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.From)
+
+ case Zr_m_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.To, &p.From)
+
+ case Zr_m_xm_nr:
+ ctxt.Rexflag = 0
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.To, &p.From)
+
+ case Zr_m_i_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.To, &p.From)
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+
+ case Zcallindreg:
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc)
+ r.Type = obj.R_CALLIND
+ r.Siz = 0
+ fallthrough
+
+ // fallthrough
+ case Zo_m64:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmandsz(ctxt, p, &p.To, int(o.op[z+1]), 0, 1)
+
+ case Zm_ibo:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibo_m_xm:
+ z = mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_ib,
+ Zib_:
+ if yt.zcase == Zib_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
ctxt.Andptr[0] = byte(op)
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, a, nil))
ctxt.Andptr = ctxt.Andptr[1:]
- } else if yt.zcase == Zloop {
- ctxt.Diag("loop too far: %v", p)
- } else {
- v -= 5 - 2
- if yt.zcase == Zbr {
- ctxt.Andptr[0] = 0x0f
+
+ case Zib_rp:
+ ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zil_rp:
+ ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
ctxt.Andptr = ctxt.Andptr[1:]
- v--
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
}
- ctxt.Andptr[0] = byte(o.op[z+1])
+ case Zo_iw:
+ ctxt.Andptr[0] = byte(op)
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 16)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 24)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- break
- }
-
- // Annotate target; will fill in later.
- p.Forwd = q.Comefrom
-
- q.Comefrom = p
- if p.Back&2 != 0 { // short
- if p.As == AJCXZL {
- ctxt.Andptr[0] = 0x67
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- } else if yt.zcase == Zloop {
- ctxt.Diag("loop too far: %v", p)
- } else {
- if yt.zcase == Zbr {
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- break
-
- /*
- v = q->pc - p->pc - 2;
- if((v >= -128 && v <= 127) || p->pc == -1 || q->pc == -1) {
- *ctxt->andptr++ = op;
- *ctxt->andptr++ = v;
- } else {
- v -= 5-2;
- if(yt.zcase == Zbr) {
- *ctxt->andptr++ = 0x0f;
- v--;
- }
- *ctxt->andptr++ = o->op[z+1];
- *ctxt->andptr++ = v;
- *ctxt->andptr++ = v>>8;
- *ctxt->andptr++ = v>>16;
- *ctxt->andptr++ = v>>24;
- }
- */
-
- case Zbyte:
- v = vaddr(ctxt, p, &p.From, &rel)
- if rel.Siz != 0 {
- rel.Siz = uint8(op)
- r = obj.Addrel(ctxt.Cursym)
- *r = rel
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- }
-
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- if op > 1 {
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- if op > 2 {
- ctxt.Andptr[0] = byte(v >> 16)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 24)
- ctxt.Andptr = ctxt.Andptr[1:]
- if op > 4 {
- ctxt.Andptr[0] = byte(v >> 32)
+ if p.From.Type != obj.TYPE_NONE {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 40)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 48)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 56)
+ ctxt.Andptr[0] = byte(v >> 8)
ctxt.Andptr = ctxt.Andptr[1:]
}
+
+ case Ziq_rp:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ l = int(v >> 32)
+ if l == 0 && rel.Siz != 8 {
+ //p->mark |= 0100;
+ //print("zero: %llux %P\n", v, p);
+ ctxt.Rexflag &^= (0x40 | Rxw)
+
+ ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
+ ctxt.Andptr[0] = byte(0xb8 + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if rel.Type != 0 {
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, int32(v))
+ } else if l == -1 && uint64(v)&(uint64(1)<<31) != 0 { /* sign extend */
+
+ //p->mark |= 0100;
+ //print("sign: %llux %P\n", v, p);
+ ctxt.Andptr[0] = 0xc7
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.To, 0)
+ put4(ctxt, int32(v)) /* need all 8 */
+ } else {
+ //print("all: %llux %P\n", v, p);
+ ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
+
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if rel.Type != 0 {
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put8(ctxt, v)
+ }
+
+ case Zib_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.To)
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_il,
+ Zil_:
+ if yt.zcase == Zil_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
+ }
+
+ case Zm_ilo,
+ Zilo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if yt.zcase == Zilo_m {
+ a = &p.From
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ } else {
+ a = &p.To
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+ }
+
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
+ }
+
+ case Zil_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.To)
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Z_rp:
+ ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zrp_:
+ ctxt.Rexflag |= regrex[p.From.Reg] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.From.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zclr:
+ ctxt.Rexflag &^= Pw
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.To)
+
+ case Zcall:
+ if p.To.Sym == nil {
+ ctxt.Diag("call without target")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Add = p.To.Offset
+ r.Type = obj.R_CALL
+ r.Siz = 4
+ put4(ctxt, 0)
+
+ // TODO: jump across functions needs reloc
+ case Zbr,
+ Zjmp,
+ Zloop:
+ if p.To.Sym != nil {
+ if yt.zcase != Zjmp {
+ ctxt.Diag("branch to ATEXT")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Type = obj.R_PCREL
+ r.Siz = 4
+ put4(ctxt, 0)
+ break
+ }
+
+ // Assumes q is in this function.
+ // TODO: Check in input, preserve in brchain.
+
+ // Fill in backward jump now.
+ q = p.Pcond
+
+ if q == nil {
+ ctxt.Diag("jmp/branch/loop without target")
+ log.Fatalf("bad code")
+ }
+
+ if p.Back&1 != 0 {
+ v = q.Pc - (p.Pc + 2)
+ if v >= -128 {
+ if p.As == AJCXZL {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if yt.zcase == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ v -= 5 - 2
+ if yt.zcase == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ v--
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ break
+ }
+
+ // Annotate target; will fill in later.
+ p.Forwd = q.Comefrom
+
+ q.Comefrom = p
+ if p.Back&2 != 0 { // short
+ if p.As == AJCXZL {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if yt.zcase == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ if yt.zcase == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ break
+
+ /*
+ v = q->pc - p->pc - 2;
+ if((v >= -128 && v <= 127) || p->pc == -1 || q->pc == -1) {
+ *ctxt->andptr++ = op;
+ *ctxt->andptr++ = v;
+ } else {
+ v -= 5-2;
+ if(yt.zcase == Zbr) {
+ *ctxt->andptr++ = 0x0f;
+ v--;
+ }
+ *ctxt->andptr++ = o->op[z+1];
+ *ctxt->andptr++ = v;
+ *ctxt->andptr++ = v>>8;
+ *ctxt->andptr++ = v>>16;
+ *ctxt->andptr++ = v>>24;
+ }
+ */
+
+ case Zbyte:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ if rel.Siz != 0 {
+ rel.Siz = uint8(op)
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 1 {
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 2 {
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 4 {
+ ctxt.Andptr[0] = byte(v >> 32)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 40)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 48)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 56)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ }
+ }
}
+
+ return
}
+ z += int(yt.zoffset) + xo
}
-
- return
-
-domov:
- for mo = ymovtab; mo[0].as != 0; mo = mo[1:] {
+ var pp obj.Prog
+ var t []byte
+ for mo := ymovtab; mo[0].as != 0; mo = mo[1:] {
if p.As == mo[0].as {
if ycover[ft+int(mo[0].ft)] != 0 {
if ycover[tt+int(mo[0].tt)] != 0 {
t = mo[0].op[:]
- goto mfound
+ switch mo[0].code {
+ default:
+ ctxt.Diag("asmins: unknown mov %d %v", mo[0].code, p)
+
+ case 0: /* lit */
+ for z = 0; t[z] != E; z++ {
+ ctxt.Andptr[0] = t[z]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case 1: /* r,m */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.To, int(t[1]))
+
+ case 2: /* m,r */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.From, int(t[1]))
+
+ case 3: /* r,m - 2op */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(t[2]))
+ ctxt.Rexflag |= regrex[p.From.Reg] & (Rxr | 0x40)
+
+ case 4: /* m,r - 2op */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(t[2]))
+ ctxt.Rexflag |= regrex[p.To.Reg] & (Rxr | 0x40)
+
+ case 5: /* load full pointer, trash heap */
+ if t[0] != 0 {
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ switch p.To.Index {
+ default:
+ goto bad
+
+ case REG_DS:
+ ctxt.Andptr[0] = 0xc5
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_SS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb2
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_ES:
+ ctxt.Andptr[0] = 0xc4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_FS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_GS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb5
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case 6: /* double shift */
+ if t[0] == Pw {
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal 64: %v", p)
+ }
+ ctxt.Rexflag |= Pw
+ t = t[1:]
+ } else if t[0] == Pe {
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+ t = t[1:]
+ }
+
+ switch p.From.Type {
+ default:
+ goto bad
+
+ case obj.TYPE_CONST:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case obj.TYPE_REG:
+ switch p.From.Reg {
+ default:
+ goto bad
+
+ case REG_CL,
+ REG_CX:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
+ }
+ }
+
+ // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
+ // where you load the TLS base register into a register and then index off that
+ // register to access the actual TLS variables. Systems that allow direct TLS access
+ // are handled in prefixof above and should not be listed here.
+ case 7: /* mov tls, r */
+ switch ctxt.Headtype {
+ default:
+ log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
+
+ case obj.Hplan9:
+ if ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+ pp.From = obj.Addr{}
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_EXTERN
+ pp.From.Sym = ctxt.Plan9privates
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+
+ // TLS base is 0(FS).
+ case obj.Hsolaris: // TODO(rsc): Delete Hsolaris from list. Should not use this code. See progedit in obj6.c.
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_NONE
+ pp.From.Reg = REG_NONE
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x64
+ ctxt.Andptr = ctxt.Andptr[1:] // FS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+
+ // Windows TLS base is always 0x28(GS).
+ case obj.Hwindows:
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_NONE
+ pp.From.Reg = REG_GS
+ pp.From.Offset = 0x28
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x65
+ ctxt.Andptr = ctxt.Andptr[1:] // GS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+ }
+ }
+ return
}
}
}
}
+ goto bad
bad:
if p.Mode != 64 {
@@ -3412,9 +3563,9 @@
* exchange registers and reissue the
* instruction with the operands renamed.
*/
- pp = *p
+ pp := *p
- z = int(p.From.Reg)
+ z := int(p.From.Reg)
if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
if isax(&p.To) || p.To.Type == obj.TYPE_NONE {
// We certainly don't want to exchange
@@ -3465,186 +3616,6 @@
ctxt.Diag("doasm: notfound ft=%d tt=%d %v %d %d", p.Ft, p.Tt, p, oclass(ctxt, p, &p.From), oclass(ctxt, p, &p.To))
return
-
-mfound:
- switch mo[0].code {
- default:
- ctxt.Diag("asmins: unknown mov %d %v", mo[0].code, p)
-
- case 0: /* lit */
- for z = 0; t[z] != E; z++ {
- ctxt.Andptr[0] = t[z]
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- case 1: /* r,m */
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmando(ctxt, p, &p.To, int(t[1]))
-
- case 2: /* m,r */
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmando(ctxt, p, &p.From, int(t[1]))
-
- case 3: /* r,m - 2op */
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = t[1]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.To, int(t[2]))
- ctxt.Rexflag |= regrex[p.From.Reg] & (Rxr | 0x40)
-
- case 4: /* m,r - 2op */
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = t[1]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.From, int(t[2]))
- ctxt.Rexflag |= regrex[p.To.Reg] & (Rxr | 0x40)
-
- case 5: /* load full pointer, trash heap */
- if t[0] != 0 {
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- switch p.To.Index {
- default:
- goto bad
-
- case REG_DS:
- ctxt.Andptr[0] = 0xc5
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_SS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb2
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_ES:
- ctxt.Andptr[0] = 0xc4
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_FS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb4
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_GS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb5
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- asmand(ctxt, p, &p.From, &p.To)
-
- case 6: /* double shift */
- if t[0] == Pw {
- if p.Mode != 64 {
- ctxt.Diag("asmins: illegal 64: %v", p)
- }
- ctxt.Rexflag |= Pw
- t = t[1:]
- } else if t[0] == Pe {
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
- t = t[1:]
- }
-
- switch p.From.Type {
- default:
- goto bad
-
- case obj.TYPE_CONST:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
- ctxt.Andptr[0] = byte(p.From.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case obj.TYPE_REG:
- switch p.From.Reg {
- default:
- goto bad
-
- case REG_CL,
- REG_CX:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = t[1]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
- }
- }
-
- // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
- // where you load the TLS base register into a register and then index off that
- // register to access the actual TLS variables. Systems that allow direct TLS access
- // are handled in prefixof above and should not be listed here.
- case 7: /* mov tls, r */
- switch ctxt.Headtype {
- default:
- log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
-
- case obj.Hplan9:
- if ctxt.Plan9privates == nil {
- ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
- }
- pp.From = obj.Addr{}
- pp.From.Type = obj.TYPE_MEM
- pp.From.Name = obj.NAME_EXTERN
- pp.From.Sym = ctxt.Plan9privates
- pp.From.Offset = 0
- pp.From.Index = REG_NONE
- ctxt.Rexflag |= Pw
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, &p.To)
-
- // TLS base is 0(FS).
- case obj.Hsolaris: // TODO(rsc): Delete Hsolaris from list. Should not use this code. See progedit in obj6.c.
- pp.From = p.From
-
- pp.From.Type = obj.TYPE_MEM
- pp.From.Name = obj.NAME_NONE
- pp.From.Reg = REG_NONE
- pp.From.Offset = 0
- pp.From.Index = REG_NONE
- pp.From.Scale = 0
- ctxt.Rexflag |= Pw
- ctxt.Andptr[0] = 0x64
- ctxt.Andptr = ctxt.Andptr[1:] // FS
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, &p.To)
-
- // Windows TLS base is always 0x28(GS).
- case obj.Hwindows:
- pp.From = p.From
-
- pp.From.Type = obj.TYPE_MEM
- pp.From.Name = obj.NAME_NONE
- pp.From.Reg = REG_GS
- pp.From.Offset = 0x28
- pp.From.Index = REG_NONE
- pp.From.Scale = 0
- ctxt.Rexflag |= Pw
- ctxt.Andptr[0] = 0x65
- ctxt.Andptr = ctxt.Andptr[1:] // GS
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, &p.To)
- }
- }
}
var naclret = []uint8{
@@ -3701,18 +3672,11 @@
}
func asmins(ctxt *obj.Link, p *obj.Prog) {
- var i int
- var n int
- var np int
- var c int
- var and0 []byte
- var r *obj.Reloc
-
ctxt.Andptr = ctxt.And[:]
ctxt.Asmode = int(p.Mode)
if p.As == obj.AUSEFIELD {
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
r.Off = 0
r.Siz = 0
r.Sym = p.From.Sym
@@ -3839,7 +3803,7 @@
}
ctxt.Rexflag = 0
- and0 = ctxt.Andptr
+ and0 := ctxt.Andptr
ctxt.Asmode = int(p.Mode)
doasm(ctxt, p)
if ctxt.Rexflag != 0 {
@@ -3853,7 +3817,9 @@
if p.Mode != 64 {
ctxt.Diag("asmins: illegal in mode %d: %v", p.Mode, p)
}
- n = -cap(ctxt.Andptr) + cap(and0)
+ n := -cap(ctxt.Andptr) + cap(and0)
+ var c int
+ var np int
for np = 0; np < n; np++ {
c = int(and0[np])
if c != 0xf2 && c != 0xf3 && (c < 0x64 || c > 0x67) && c != 0x2e && c != 0x3e && c != 0x26 {
@@ -3866,8 +3832,9 @@
ctxt.Andptr = ctxt.Andptr[1:]
}
- n = -cap(ctxt.Andptr) + cap(ctxt.And[:])
- for i = len(ctxt.Cursym.R) - 1; i >= 0; i-- {
+ n := -cap(ctxt.Andptr) + cap(ctxt.And[:])
+ var r *obj.Reloc
+ for i := len(ctxt.Cursym.R) - 1; i >= 0; i-- {
r = &ctxt.Cursym.R[i:][0]
if int64(r.Off) < p.Pc {
break
diff --git a/src/cmd/internal/obj/x86/list6.go b/src/cmd/internal/obj/x86/list6.go
index 41c69c9..b2a7a7a 100644
--- a/src/cmd/internal/obj/x86/list6.go
+++ b/src/cmd/internal/obj/x86/list6.go
@@ -55,7 +55,6 @@
func Pconv(p *obj.Prog) string {
var str string
- var fp string
switch p.As {
case obj.ADATA:
@@ -84,6 +83,7 @@
}
}
+ var fp string
fp += str
return fp
}
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index 4cb2f45..81e4e0e 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -49,10 +49,6 @@
}
func progedit(ctxt *obj.Link, p *obj.Prog) {
- var literal string
- var s *obj.LSym
- var q *obj.Prog
-
// Thread-local storage references use the TLS pseudo-register.
// As a register, TLS refers to the thread-local storage base, and it
// can only be loaded into another register:
@@ -121,7 +117,7 @@
// MOVQ off(BX)(TLS*1), BX
// This allows the C compilers to emit references to m and g using the direct off(TLS) form.
if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 {
- q = obj.Appendp(ctxt, p)
+ q := obj.Appendp(ctxt, p)
q.As = p.As
q.From = p.From
q.From.Type = obj.TYPE_MEM
@@ -214,12 +210,10 @@
ACOMISS,
AUCOMISS:
if p.From.Type == obj.TYPE_FCONST {
- var i32 uint32
- var f32 float32
- f32 = float32(p.From.U.Dval)
- i32 = math.Float32bits(f32)
- literal = fmt.Sprintf("$f32.%08x", i32)
- s = obj.Linklookup(ctxt, literal, 0)
+ f32 := float32(p.From.U.Dval)
+ i32 := math.Float32bits(f32)
+ literal := fmt.Sprintf("$f32.%08x", i32)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint32(ctxt, s, i32)
@@ -262,10 +256,9 @@
ACOMISD,
AUCOMISD:
if p.From.Type == obj.TYPE_FCONST {
- var i64 uint64
- i64 = math.Float64bits(p.From.U.Dval)
- literal = fmt.Sprintf("$f64.%016x", i64)
- s = obj.Linklookup(ctxt, literal, 0)
+ i64 := math.Float64bits(p.From.U.Dval)
+ literal := fmt.Sprintf("$f64.%016x", i64)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint64(ctxt, s, i64)
@@ -315,17 +308,6 @@
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var p1 *obj.Prog
- var p2 *obj.Prog
- var autoffset int32
- var deltasp int32
- var a int
- var pcsize int
- var bpsize int
- var textarg int64
-
if ctxt.Tlsg == nil {
ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
}
@@ -344,12 +326,13 @@
return
}
- p = cursym.Text
- autoffset = int32(p.To.Offset)
+ p := cursym.Text
+ autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
}
+ var bpsize int
if obj.Framepointer_enabled != 0 && autoffset > 0 {
// Make room for to save a base pointer. If autoffset == 0,
// this might do something special like a tail jump to
@@ -362,12 +345,12 @@
bpsize = 0
}
- textarg = int64(p.To.U.Argsize)
+ textarg := int64(p.To.U.Argsize)
cursym.Args = int32(textarg)
cursym.Locals = int32(p.To.Offset)
if autoffset < obj.StackSmall && p.From3.Offset&obj.NOSPLIT == 0 {
- for q = p; q != nil; q = q.Link {
+ for q := p; q != nil; q = q.Link {
if q.As == obj.ACALL {
goto noleaf
}
@@ -380,7 +363,7 @@
noleaf:
}
- q = nil
+ q := (*obj.Prog)(nil)
if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
p = obj.Appendp(ctxt, p)
p = load_g_cx(ctxt, p) // load g into CX
@@ -415,7 +398,7 @@
if q != nil {
q.Pcond = p
}
- deltasp = autoffset
+ deltasp := autoffset
if bpsize > 0 {
// Save caller's BP
@@ -486,7 +469,7 @@
p = obj.Appendp(ctxt, p)
p.As = AJEQ
p.To.Type = obj.TYPE_BRANCH
- p1 = p
+ p1 := p
p = obj.Appendp(ctxt, p)
p.As = ALEAQ
@@ -517,7 +500,7 @@
p = obj.Appendp(ctxt, p)
p.As = AJNE
p.To.Type = obj.TYPE_BRANCH
- p2 = p
+ p2 := p
p = obj.Appendp(ctxt, p)
p.As = AMOVQ
@@ -573,6 +556,8 @@
p.As = ASTOSQ
}
+ var a int
+ var pcsize int
for ; p != nil; p = p.Link {
pcsize = int(p.Mode) / 8
a = int(p.From.Name)
@@ -691,8 +676,6 @@
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
- var next *obj.Prog
-
p.As = AMOVQ
if ctxt.Arch.Ptrsize == 4 {
p.As = AMOVL
@@ -703,7 +686,7 @@
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_CX
- next = p.Link
+ next := p.Link
progedit(ctxt, p)
for p.Link != next {
p = p.Link
@@ -723,17 +706,10 @@
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
- var q *obj.Prog
- var q1 *obj.Prog
- var cmp int
- var lea int
- var mov int
- var sub int
-
- cmp = ACMPQ
- lea = ALEAQ
- mov = AMOVQ
- sub = ASUBQ
+ cmp := ACMPQ
+ lea := ALEAQ
+ mov := AMOVQ
+ sub := ASUBQ
if ctxt.Headtype == obj.Hnacl {
cmp = ACMPL
@@ -742,7 +718,7 @@
sub = ASUBL
}
- q1 = nil
+ q1 := (*obj.Prog)(nil)
if framesize <= obj.StackSmall {
// small stack: SP <= stackguard
// CMPQ SP, stackguard
@@ -845,7 +821,7 @@
p.As = AJHI
p.To.Type = obj.TYPE_BRANCH
- q = p
+ q := p
p = obj.Appendp(ctxt, p)
p.As = obj.ACALL
@@ -873,13 +849,10 @@
}
func follow(ctxt *obj.Link, s *obj.LSym) {
- var firstp *obj.Prog
- var lastp *obj.Prog
-
ctxt.Cursym = s
- firstp = ctxt.NewProg()
- lastp = firstp
+ firstp := ctxt.NewProg()
+ lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link