cmd/compile: some SSA cleanup
Do some easy TODOs.
Move a bunch of other TODOs into bugs.
Change-Id: Iaba9dad6221a2af11b3cbcc512875f4a85842873
Reviewed-on: https://go-review.googlesource.com/20114
Run-TryBot: Todd Neal <todd@tneal.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Todd Neal <todd@tneal.org>
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 1033cd9..91c4916 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -1935,7 +1935,7 @@
for !data.Type.IsPtr() {
switch {
case data.Type.IsArray():
- data = s.newValue2(ssa.OpArrayIndex, data.Type.Elem(), data, s.constInt(Types[TINT], 0))
+ data = s.newValue1I(ssa.OpArrayIndex, data.Type.Elem(), 0, data)
case data.Type.IsStruct():
for i := data.Type.NumFields() - 1; i >= 0; i-- {
f := data.Type.FieldType(i)
diff --git a/src/cmd/compile/internal/ssa/TODO b/src/cmd/compile/internal/ssa/TODO
index a457e67..1eb30d6 100644
--- a/src/cmd/compile/internal/ssa/TODO
+++ b/src/cmd/compile/internal/ssa/TODO
@@ -1,9 +1,6 @@
This is a list of things that need to be worked on. It will hopefully
be complete soon.
-Coverage
---------
-
Correctness
-----------
- Debugging info (check & fix as much as we can)
@@ -14,24 +11,12 @@
- More strength reduction: multiply -> shift/add combos (Worth doing?)
- Add a value range propagation pass (for bounds elim & bitwidth reduction)
- Make dead store pass inter-block
-- redundant CMP in sequences like this:
- SUBQ $8, AX
- CMP AX, $0
- JEQ ...
- If there are a lot of MOVQ $0, ..., then load
0 into a register and use the register as the source instead.
- Allow arrays of length 1 (or longer, with all constant indexes?) to be SSAable.
-- Figure out how to make PARAMOUT variables ssa-able.
- They need to get spilled automatically at end-of-function somehow.
- If strings are being passed around without being interpreted (ptr
and len fields being accessed) pass them in xmm registers?
Same for interfaces?
-- OpArrayIndex should take its index in AuxInt, not a full value.
-- remove FLAGS from REP instruction clobbers
-- (x86) Combine loads into other ops
- Note that this is challenging for ops that generate flags
- because flagalloc wants to move those instructions around for
- flag regeneration.
- Non-constant rotate detection.
- Do 0 <= x && x < n with one unsigned compare
- nil-check removal in indexed load/store case:
@@ -44,17 +29,13 @@
Optimizations (better compiler)
-------------------------------
-- Smaller Value.Type (int32 or ptr)? Get rid of types altogether?
- OpStore uses 3 args. Increase the size of Value.argstorage to 3?
- Use a constant cache for OpConstNil, OpConstInterface, OpConstSlice, maybe OpConstString
- Handle signed division overflow and sign extension earlier
-- Implement 64 bit const division with high multiply, maybe in the frontend?
-- Add bit widths to complex ops
Regalloc
--------
- Make less arch-dependent
-- Allow return values to be ssa-able
- Handle 2-address instructions
- Make liveness analysis non-quadratic
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index 59a94c8..4ec24e1 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -438,7 +438,7 @@
argLength: 4,
reg: regInfo{
inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")},
- clobbers: buildReg("DI CX FLAGS"),
+ clobbers: buildReg("DI CX"),
},
},
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index 5c23fdf..37e2bd6 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -400,7 +400,7 @@
// indexing operations
// Note: bounds check has already been done
-(ArrayIndex (Load ptr mem) idx) && b == v.Args[0].Block -> (Load (PtrIndex <v.Type.PtrTo()> ptr idx) mem)
+(ArrayIndex <t> [0] (Load ptr mem)) -> @v.Args[0].Block (Load <t> ptr mem)
(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <config.fe.TypeInt()> idx (Const32 <config.fe.TypeInt()> [t.Elem().Size()])))
(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <config.fe.TypeInt()> idx (Const64 <config.fe.TypeInt()> [t.Elem().Size()])))
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index f1ab468..3b55ebf 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -335,9 +335,9 @@
{name: "GetClosurePtr"}, // get closure pointer from dedicated register
// Indexing operations
- {name: "ArrayIndex", argLength: 2}, // arg0=array, arg1=index. Returns a[i]
- {name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
- {name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers)
+ {name: "ArrayIndex", aux: "Int64", argLength: 1}, // arg0=array, auxint=index. Returns a[i]
+ {name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
+ {name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers)
// Slices
{name: "SliceMake", argLength: 3}, // arg0=ptr, arg1=len, arg2=cap
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index a48766ff..34809a7 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -3689,7 +3689,7 @@
{1, 2}, // .CX
{2, 1}, // .AX
},
- clobbers: 8589934722, // .CX .DI .FLAGS
+ clobbers: 130, // .CX .DI
},
},
{
@@ -5110,7 +5110,8 @@
},
{
name: "ArrayIndex",
- argLen: 2,
+ auxType: auxInt64,
+ argLen: 1,
generic: true,
},
{
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index ad2abc5..2e15dac 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -1142,25 +1142,25 @@
func rewriteValuegeneric_OpArrayIndex(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (ArrayIndex (Load ptr mem) idx)
- // cond: b == v.Args[0].Block
- // result: (Load (PtrIndex <v.Type.PtrTo()> ptr idx) mem)
+ // match: (ArrayIndex <t> [0] (Load ptr mem))
+ // cond:
+ // result: @v.Args[0].Block (Load <t> ptr mem)
for {
+ t := v.Type
+ if v.AuxInt != 0 {
+ break
+ }
if v.Args[0].Op != OpLoad {
break
}
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
- idx := v.Args[1]
- if !(b == v.Args[0].Block) {
- break
- }
- v.reset(OpLoad)
- v0 := b.NewValue0(v.Line, OpPtrIndex, v.Type.PtrTo())
- v0.AddArg(ptr)
- v0.AddArg(idx)
+ b = v.Args[0].Block
+ v0 := b.NewValue0(v.Line, OpLoad, t)
+ v.reset(OpCopy)
v.AddArg(v0)
- v.AddArg(mem)
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
return false