Commit 62ac107a authored by Keith Randall's avatar Keith Randall

cmd/compile: some SSA cleanup

Do some easy TODOs.
Move a bunch of other TODOs into bugs.

Change-Id: Iaba9dad6221a2af11b3cbcc512875f4a85842873
Reviewed-on: https://go-review.googlesource.com/20114
Run-TryBot: Todd Neal <todd@tneal.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarTodd Neal <todd@tneal.org>
parent 3afbb690
...@@ -1935,7 +1935,7 @@ func (s *state) expr(n *Node) *ssa.Value { ...@@ -1935,7 +1935,7 @@ func (s *state) expr(n *Node) *ssa.Value {
for !data.Type.IsPtr() { for !data.Type.IsPtr() {
switch { switch {
case data.Type.IsArray(): case data.Type.IsArray():
data = s.newValue2(ssa.OpArrayIndex, data.Type.Elem(), data, s.constInt(Types[TINT], 0)) data = s.newValue1I(ssa.OpArrayIndex, data.Type.Elem(), 0, data)
case data.Type.IsStruct(): case data.Type.IsStruct():
for i := data.Type.NumFields() - 1; i >= 0; i-- { for i := data.Type.NumFields() - 1; i >= 0; i-- {
f := data.Type.FieldType(i) f := data.Type.FieldType(i)
......
This is a list of things that need to be worked on. It will hopefully This is a list of things that need to be worked on. It will hopefully
be complete soon. be complete soon.
Coverage
--------
Correctness Correctness
----------- -----------
- Debugging info (check & fix as much as we can) - Debugging info (check & fix as much as we can)
...@@ -14,24 +11,12 @@ Optimizations (better compiled code) ...@@ -14,24 +11,12 @@ Optimizations (better compiled code)
- More strength reduction: multiply -> shift/add combos (Worth doing?) - More strength reduction: multiply -> shift/add combos (Worth doing?)
- Add a value range propagation pass (for bounds elim & bitwidth reduction) - Add a value range propagation pass (for bounds elim & bitwidth reduction)
- Make dead store pass inter-block - Make dead store pass inter-block
- redundant CMP in sequences like this:
SUBQ $8, AX
CMP AX, $0
JEQ ...
- If there are a lot of MOVQ $0, ..., then load - If there are a lot of MOVQ $0, ..., then load
0 into a register and use the register as the source instead. 0 into a register and use the register as the source instead.
- Allow arrays of length 1 (or longer, with all constant indexes?) to be SSAable. - Allow arrays of length 1 (or longer, with all constant indexes?) to be SSAable.
- Figure out how to make PARAMOUT variables ssa-able.
They need to get spilled automatically at end-of-function somehow.
- If strings are being passed around without being interpreted (ptr - If strings are being passed around without being interpreted (ptr
and len fields being accessed) pass them in xmm registers? and len fields being accessed) pass them in xmm registers?
Same for interfaces? Same for interfaces?
- OpArrayIndex should take its index in AuxInt, not a full value.
- remove FLAGS from REP instruction clobbers
- (x86) Combine loads into other ops
Note that this is challenging for ops that generate flags
because flagalloc wants to move those instructions around for
flag regeneration.
- Non-constant rotate detection. - Non-constant rotate detection.
- Do 0 <= x && x < n with one unsigned compare - Do 0 <= x && x < n with one unsigned compare
- nil-check removal in indexed load/store case: - nil-check removal in indexed load/store case:
...@@ -44,17 +29,13 @@ Optimizations (better compiled code) ...@@ -44,17 +29,13 @@ Optimizations (better compiled code)
Optimizations (better compiler) Optimizations (better compiler)
------------------------------- -------------------------------
- Smaller Value.Type (int32 or ptr)? Get rid of types altogether?
- OpStore uses 3 args. Increase the size of Value.argstorage to 3? - OpStore uses 3 args. Increase the size of Value.argstorage to 3?
- Use a constant cache for OpConstNil, OpConstInterface, OpConstSlice, maybe OpConstString - Use a constant cache for OpConstNil, OpConstInterface, OpConstSlice, maybe OpConstString
- Handle signed division overflow and sign extension earlier - Handle signed division overflow and sign extension earlier
- Implement 64 bit const division with high multiply, maybe in the frontend?
- Add bit widths to complex ops
Regalloc Regalloc
-------- --------
- Make less arch-dependent - Make less arch-dependent
- Allow return values to be ssa-able
- Handle 2-address instructions - Handle 2-address instructions
- Make liveness analysis non-quadratic - Make liveness analysis non-quadratic
......
...@@ -438,7 +438,7 @@ func init() { ...@@ -438,7 +438,7 @@ func init() {
argLength: 4, argLength: 4,
reg: regInfo{ reg: regInfo{
inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")}, inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")},
clobbers: buildReg("DI CX FLAGS"), clobbers: buildReg("DI CX"),
}, },
}, },
......
...@@ -400,7 +400,7 @@ ...@@ -400,7 +400,7 @@
// indexing operations // indexing operations
// Note: bounds check has already been done // Note: bounds check has already been done
(ArrayIndex (Load ptr mem) idx) && b == v.Args[0].Block -> (Load (PtrIndex <v.Type.PtrTo()> ptr idx) mem) (ArrayIndex <t> [0] (Load ptr mem)) -> @v.Args[0].Block (Load <t> ptr mem)
(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <config.fe.TypeInt()> idx (Const32 <config.fe.TypeInt()> [t.Elem().Size()]))) (PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <config.fe.TypeInt()> idx (Const32 <config.fe.TypeInt()> [t.Elem().Size()])))
(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <config.fe.TypeInt()> idx (Const64 <config.fe.TypeInt()> [t.Elem().Size()]))) (PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <config.fe.TypeInt()> idx (Const64 <config.fe.TypeInt()> [t.Elem().Size()])))
......
...@@ -335,9 +335,9 @@ var genericOps = []opData{ ...@@ -335,9 +335,9 @@ var genericOps = []opData{
{name: "GetClosurePtr"}, // get closure pointer from dedicated register {name: "GetClosurePtr"}, // get closure pointer from dedicated register
// Indexing operations // Indexing operations
{name: "ArrayIndex", argLength: 2}, // arg0=array, arg1=index. Returns a[i] {name: "ArrayIndex", aux: "Int64", argLength: 1}, // arg0=array, auxint=index. Returns a[i]
{name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type {name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
{name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers) {name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers)
// Slices // Slices
{name: "SliceMake", argLength: 3}, // arg0=ptr, arg1=len, arg2=cap {name: "SliceMake", argLength: 3}, // arg0=ptr, arg1=len, arg2=cap
......
...@@ -3689,7 +3689,7 @@ var opcodeTable = [...]opInfo{ ...@@ -3689,7 +3689,7 @@ var opcodeTable = [...]opInfo{
{1, 2}, // .CX {1, 2}, // .CX
{2, 1}, // .AX {2, 1}, // .AX
}, },
clobbers: 8589934722, // .CX .DI .FLAGS clobbers: 130, // .CX .DI
}, },
}, },
{ {
...@@ -5110,7 +5110,8 @@ var opcodeTable = [...]opInfo{ ...@@ -5110,7 +5110,8 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "ArrayIndex", name: "ArrayIndex",
argLen: 2, auxType: auxInt64,
argLen: 1,
generic: true, generic: true,
}, },
{ {
......
...@@ -1142,25 +1142,25 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { ...@@ -1142,25 +1142,25 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
func rewriteValuegeneric_OpArrayIndex(v *Value, config *Config) bool { func rewriteValuegeneric_OpArrayIndex(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (ArrayIndex (Load ptr mem) idx) // match: (ArrayIndex <t> [0] (Load ptr mem))
// cond: b == v.Args[0].Block // cond:
// result: (Load (PtrIndex <v.Type.PtrTo()> ptr idx) mem) // result: @v.Args[0].Block (Load <t> ptr mem)
for { for {
t := v.Type
if v.AuxInt != 0 {
break
}
if v.Args[0].Op != OpLoad { if v.Args[0].Op != OpLoad {
break break
} }
ptr := v.Args[0].Args[0] ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1] mem := v.Args[0].Args[1]
idx := v.Args[1] b = v.Args[0].Block
if !(b == v.Args[0].Block) { v0 := b.NewValue0(v.Line, OpLoad, t)
break v.reset(OpCopy)
}
v.reset(OpLoad)
v0 := b.NewValue0(v.Line, OpPtrIndex, v.Type.PtrTo())
v0.AddArg(ptr)
v0.AddArg(idx)
v.AddArg(v0) v.AddArg(v0)
v.AddArg(mem) v0.AddArg(ptr)
v0.AddArg(mem)
return true return true
} }
return false return false
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment