Commit 582baae2 authored by Keith Randall's avatar Keith Randall

[dev.ssa] cmd/compile: Do pointer arithmetic with int, not uintptr

Be more consistent about this.  There's no reason to do the
pointer arithmetic on a different type, as sizeof(int) >=
sizeof(ptr) on all of our platforms.  It simplifies our
rewrite rules also, except for a few that need duplication.

Add some more constant folding to get constant indexing and
slicing to fold down to nothing.

Change-Id: I3e56cdb14b3dc1a6a0514f0333e883f92c19e3c7
Reviewed-on: https://go-review.googlesource.com/16586
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: default avatarDavid Chase <drchase@google.com>
parent 99187311
......@@ -471,12 +471,6 @@ func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value {
func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value {
return s.f.ConstFloat64(s.peekLine(), t, c)
}
func (s *state) constIntPtr(t ssa.Type, c int64) *ssa.Value {
if s.config.PtrSize == 4 && int64(int32(c)) != c {
s.Fatalf("pointer constant too big %d", c)
}
return s.f.ConstIntPtr(s.peekLine(), t, c)
}
func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
if s.config.IntSize == 8 {
return s.constInt64(t, c)
......@@ -1781,7 +1775,7 @@ func (s *state) expr(n *Node) *ssa.Value {
case ODOTPTR:
p := s.expr(n.Left)
s.nilCheck(p)
p = s.newValue2(ssa.OpAddPtr, p.Type, p, s.constIntPtr(Types[TUINTPTR], n.Xoffset))
p = s.newValue2(ssa.OpAddPtr, p.Type, p, s.constInt(Types[TINT], n.Xoffset))
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case OINDEX:
......@@ -1978,7 +1972,7 @@ func (s *state) expr(n *Node) *ssa.Value {
c = s.variable(&capVar, Types[TINT]) // generates phi for cap
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args {
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TUINTPTR], int64(i)))
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i)))
if store[i] {
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg, s.mem())
} else {
......@@ -2370,17 +2364,17 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value {
return p
case ODOT:
p := s.addr(n.Left, bounded)
return s.newValue2(ssa.OpAddPtr, t, p, s.constIntPtr(Types[TUINTPTR], n.Xoffset))
return s.newValue2(ssa.OpAddPtr, t, p, s.constInt(Types[TINT], n.Xoffset))
case ODOTPTR:
p := s.expr(n.Left)
if !bounded {
s.nilCheck(p)
}
return s.newValue2(ssa.OpAddPtr, t, p, s.constIntPtr(Types[TUINTPTR], n.Xoffset))
return s.newValue2(ssa.OpAddPtr, t, p, s.constInt(Types[TINT], n.Xoffset))
case OCLOSUREVAR:
return s.newValue2(ssa.OpAddPtr, t,
s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8])),
s.constIntPtr(Types[TUINTPTR], n.Xoffset))
s.constInt(Types[TINT], n.Xoffset))
case OPARAM:
p := n.Left
if p.Op != ONAME || !(p.Class == PPARAM|PHEAP || p.Class == PPARAMOUT|PHEAP) {
......@@ -2682,14 +2676,17 @@ func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
// Generate the following code assuming that indexes are in bounds.
// The conditional is to make sure that we don't generate a slice
// that points to the next object in memory.
// rlen = (SubPtr j i)
// rcap = (SubPtr k i)
// rlen = (Sub64 j i)
// rcap = (Sub64 k i)
// p = ptr
// if rcap != 0 {
// p = (AddPtr ptr (MulPtr low (ConstPtr size)))
// p = (AddPtr ptr (Mul64 low (Const64 size)))
// }
// result = (SliceMake p size)
rlen := s.newValue2(ssa.OpSubPtr, Types[TINT], j, i)
subOp := s.ssaOp(OSUB, Types[TINT])
neqOp := s.ssaOp(ONE, Types[TINT])
mulOp := s.ssaOp(OMUL, Types[TINT])
rlen := s.newValue2(subOp, Types[TINT], j, i)
var rcap *ssa.Value
switch {
case t.IsString():
......@@ -2700,18 +2697,13 @@ func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
case j == k:
rcap = rlen
default:
rcap = s.newValue2(ssa.OpSubPtr, Types[TINT], k, i)
rcap = s.newValue2(subOp, Types[TINT], k, i)
}
s.vars[&ptrVar] = ptr
// Generate code to test the resulting slice length.
var cmp *ssa.Value
if s.config.IntSize == 8 {
cmp = s.newValue2(ssa.OpNeq64, Types[TBOOL], rcap, s.constInt(Types[TINT], 0))
} else {
cmp = s.newValue2(ssa.OpNeq32, Types[TBOOL], rcap, s.constInt(Types[TINT], 0))
}
cmp := s.newValue2(neqOp, Types[TBOOL], rcap, s.constInt(Types[TINT], 0))
b := s.endBlock()
b.Kind = ssa.BlockIf
......@@ -2726,7 +2718,7 @@ func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
if elemtype.Width == 1 {
inc = i
} else {
inc = s.newValue2(ssa.OpMulPtr, Types[TUINTPTR], i, s.constInt(Types[TINT], elemtype.Width))
inc = s.newValue2(mulOp, Types[TINT], i, s.constInt(Types[TINT], elemtype.Width))
}
s.vars[&ptrVar] = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, inc)
s.endBlock()
......@@ -4338,13 +4330,13 @@ func addAux2(a *obj.Addr, v *ssa.Value, offset int64) {
}
}
// extendIndex extends v to a full pointer width.
// extendIndex extends v to a full int width.
func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
size := v.Type.Size()
if size == s.config.PtrSize {
if size == s.config.IntSize {
return v
}
if size > s.config.PtrSize {
if size > s.config.IntSize {
// TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test
// the high word and branch to out-of-bounds failure if it is not 0.
s.Unimplementedf("64->32 index truncation not implemented")
......@@ -4354,7 +4346,7 @@ func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
// Extend value to the required size
var op ssa.Op
if v.Type.IsSigned() {
switch 10*size + s.config.PtrSize {
switch 10*size + s.config.IntSize {
case 14:
op = ssa.OpSignExt8to32
case 18:
......@@ -4369,7 +4361,7 @@ func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
s.Fatalf("bad signed index extension %s", v.Type)
}
} else {
switch 10*size + s.config.PtrSize {
switch 10*size + s.config.IntSize {
case 14:
op = ssa.OpZeroExt8to32
case 18:
......@@ -4384,7 +4376,7 @@ func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
s.Fatalf("bad unsigned index extension %s", v.Type)
}
}
return s.newValue1(op, Types[TUINTPTR], v)
return s.newValue1(op, Types[TINT], v)
}
// ssaRegToReg maps ssa register numbers to obj register numbers.
......
......@@ -297,10 +297,6 @@ func (f *Func) ConstInt64(line int32, t Type, c int64) *Value {
// TODO: cache?
return f.Entry.NewValue0I(line, OpConst64, t, c)
}
func (f *Func) ConstIntPtr(line int32, t Type, c int64) *Value {
// TODO: cache?
return f.Entry.NewValue0I(line, OpConstPtr, t, c)
}
func (f *Func) ConstFloat32(line int32, t Type, c float64) *Value {
// TODO: cache?
return f.Entry.NewValue0I(line, OpConst32F, t, int64(math.Float64bits(c)))
......
......@@ -27,7 +27,6 @@
(Sub64F x y) -> (SUBSD x y)
(Mul64 x y) -> (MULQ x y)
(MulPtr x y) -> (MULQ x y)
(Mul32 x y) -> (MULL x y)
(Mul16 x y) -> (MULW x y)
(Mul8 x y) -> (MULB x y)
......@@ -348,7 +347,6 @@
(Const64 [val]) -> (MOVQconst [val])
(Const32F [val]) -> (MOVSSconst [val])
(Const64F [val]) -> (MOVSDconst [val])
(ConstPtr [val]) -> (MOVQconst [val])
(ConstNil) -> (MOVQconst [0])
(ConstBool [b]) -> (MOVBconst [b])
......
......@@ -20,14 +20,25 @@
// For now, the generated successors must be a permutation of the matched successors.
// constant folding
(Add8 (Const8 [c]) (Const8 [d])) -> (Const8 [c+d])
(Add16 (Const16 [c]) (Const16 [d])) -> (Const16 [c+d])
(Add32 (Const32 [c]) (Const32 [d])) -> (Const32 [c+d])
(Add64 (Const64 [c]) (Const64 [d])) -> (Const64 [c+d])
(AddPtr (ConstPtr [c]) (ConstPtr [d])) -> (ConstPtr [c+d])
(Sub8 (Const8 [c]) (Const8 [d])) -> (Const8 [c-d])
(Sub16 (Const16 [c]) (Const16 [d])) -> (Const16 [c-d])
(Sub32 (Const32 [c]) (Const32 [d])) -> (Const32 [c-d])
(Sub64 (Const64 [c]) (Const64 [d])) -> (Const64 [c-d])
(Mul8 (Const8 [c]) (Const8 [d])) -> (Const8 [c*d])
(Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [c*d])
(Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [c*d])
(Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d])
(MulPtr (ConstPtr [c]) (ConstPtr [d])) -> (ConstPtr [c*d])
(IsInBounds (Const32 [c]) (Const32 [d])) -> (ConstBool [b2i(inBounds32(c,d))])
(IsInBounds (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(inBounds64(c,d))])
(IsInBounds (ConstPtr [c]) (ConstPtr [d])) && config.PtrSize == 4 -> (ConstBool [b2i(inBounds32(c,d))])
(IsInBounds (ConstPtr [c]) (ConstPtr [d])) && config.PtrSize == 8 -> (ConstBool [b2i(inBounds64(c,d))])
(IsSliceInBounds (Const32 [c]) (Const32 [d])) -> (ConstBool [b2i(sliceInBounds32(c,d))])
(IsSliceInBounds (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(sliceInBounds64(c,d))])
(Eq64 x x) -> (ConstBool [1])
(Eq32 x x) -> (ConstBool [1])
(Eq16 x x) -> (ConstBool [1])
......@@ -127,7 +138,8 @@
// indexing operations
// Note: bounds check has already been done
(ArrayIndex (Load ptr mem) idx) && b == v.Args[0].Block -> (Load (PtrIndex <v.Type.PtrTo()> ptr idx) mem)
(PtrIndex <t> ptr idx) -> (AddPtr ptr (MulPtr idx (ConstPtr [t.Elem().Size()])))
(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <config.fe.TypeInt()> idx (Const32 <config.fe.TypeInt()> [t.Elem().Size()])))
(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <config.fe.TypeInt()> idx (Const64 <config.fe.TypeInt()> [t.Elem().Size()])))
(StructSelect [idx] (Load ptr mem)) -> @v.Args[0].Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [idx] ptr) mem)
// complex ops
......@@ -163,11 +175,16 @@
// string ops
(StringPtr (StringMake ptr _)) -> ptr
(StringLen (StringMake _ len)) -> len
(ConstString {s}) ->
(ConstString {s}) && config.PtrSize == 4 ->
(StringMake
(Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))}
(SB))
(Const32 <config.fe.TypeInt()> [int64(len(s.(string)))]))
(ConstString {s}) && config.PtrSize == 8 ->
(StringMake
(Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))}
(SB))
(ConstPtr [int64(len(s.(string)))]))
(Const64 <config.fe.TypeInt()> [int64(len(s.(string)))]))
(Load <t> ptr mem) && t.IsString() ->
(StringMake
(Load <config.fe.TypeBytePtr()> ptr mem)
......@@ -184,11 +201,16 @@
(SlicePtr (SliceMake ptr _ _ )) -> ptr
(SliceLen (SliceMake _ len _)) -> len
(SliceCap (SliceMake _ _ cap)) -> cap
(ConstSlice) ->
(ConstSlice) && config.PtrSize == 4 ->
(SliceMake
(ConstNil <config.fe.TypeBytePtr()>)
(Const32 <config.fe.TypeInt()> [0])
(Const32 <config.fe.TypeInt()> [0]))
(ConstSlice) && config.PtrSize == 8 ->
(SliceMake
(ConstNil <config.fe.TypeBytePtr()>)
(ConstPtr [0])
(ConstPtr [0]))
(Const64 <config.fe.TypeInt()> [0])
(Const64 <config.fe.TypeInt()> [0]))
(Load <t> ptr mem) && t.IsSlice() ->
(SliceMake
......
......@@ -12,7 +12,7 @@ var genericOps = []opData{
{name: "Add16"},
{name: "Add32"},
{name: "Add64"},
{name: "AddPtr"},
{name: "AddPtr"}, // For address calculations. arg0 is a pointer and arg1 is an int.
{name: "Add32F"},
{name: "Add64F"},
// TODO: Add64C, Add128C
......@@ -29,7 +29,6 @@ var genericOps = []opData{
{name: "Mul16"},
{name: "Mul32"},
{name: "Mul64"},
{name: "MulPtr", typ: "Uintptr"}, // MulPtr is used for address calculations
{name: "Mul32F"},
{name: "Mul64F"},
......@@ -256,7 +255,6 @@ var genericOps = []opData{
{name: "Const64"},
{name: "Const32F"},
{name: "Const64F"},
{name: "ConstPtr", typ: "Uintptr"}, // pointer-sized integer constant
{name: "ConstInterface"}, // nil interface
{name: "ConstSlice"}, // nil slice
// TODO: Const32F, ...
......@@ -338,7 +336,7 @@ var genericOps = []opData{
// Slices
{name: "SliceMake"}, // arg0=ptr, arg1=len, arg2=cap
{name: "SlicePtr", typ: "Uintptr"}, // ptr(arg0)
{name: "SlicePtr", typ: "BytePtr"}, // ptr(arg0)
{name: "SliceLen"}, // len(arg0)
{name: "SliceCap"}, // cap(arg0)
......@@ -354,7 +352,7 @@ var genericOps = []opData{
// Interfaces
{name: "IMake"}, // arg0=itab, arg1=data
{name: "ITab", typ: "Uintptr"}, // arg0=interface, returns itable field
{name: "ITab", typ: "BytePtr"}, // arg0=interface, returns itable field
{name: "IData"}, // arg0=interface, returns data field
// Spill&restore ops for the register allocator. These are
......
......@@ -71,7 +71,7 @@ func TestNilcheckSimple(t *testing.T) {
Valu("sb", OpSB, TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpConstPtr, ptrType, 0, nil, "sb"),
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
If("bool1", "secondCheck", "exit")),
Bloc("secondCheck",
......@@ -108,7 +108,7 @@ func TestNilcheckDomOrder(t *testing.T) {
Valu("sb", OpSB, TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpConstPtr, ptrType, 0, nil, "sb"),
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
If("bool1", "secondCheck", "exit")),
Bloc("exit",
......@@ -255,11 +255,11 @@ func TestNilcheckKeepRemove(t *testing.T) {
Valu("sb", OpSB, TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpConstPtr, ptrType, 0, nil, "sb"),
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
If("bool1", "differentCheck", "exit")),
Bloc("differentCheck",
Valu("ptr2", OpConstPtr, ptrType, 0, nil, "sb"),
Valu("ptr2", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr2"),
If("bool2", "secondCheck", "exit")),
Bloc("secondCheck",
......@@ -303,7 +303,7 @@ func TestNilcheckInFalseBranch(t *testing.T) {
Valu("sb", OpSB, TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpConstPtr, ptrType, 0, nil, "sb"),
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
If("bool1", "extra", "secondCheck")),
Bloc("secondCheck",
......@@ -354,7 +354,7 @@ func TestNilcheckUser(t *testing.T) {
Valu("sb", OpSB, TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpConstPtr, ptrType, 0, nil, "sb"),
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("nilptr", OpConstNil, ptrType, 0, nil, "sb"),
Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
If("bool1", "secondCheck", "exit")),
......@@ -393,7 +393,7 @@ func TestNilcheckBug(t *testing.T) {
Valu("sb", OpSB, TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpConstPtr, ptrType, 0, nil, "sb"),
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("nilptr", OpConstNil, ptrType, 0, nil, "sb"),
Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
If("bool1", "secondCheck", "couldBeNil")),
......
......@@ -301,7 +301,6 @@ const (
OpMul16
OpMul32
OpMul64
OpMulPtr
OpMul32F
OpMul64F
OpDiv32F
......@@ -474,7 +473,6 @@ const (
OpConst64
OpConst32F
OpConst64F
OpConstPtr
OpConstInterface
OpConstSlice
OpArg
......@@ -3293,10 +3291,6 @@ var opcodeTable = [...]opInfo{
name: "Mul64",
generic: true,
},
{
name: "MulPtr",
generic: true,
},
{
name: "Mul32F",
generic: true,
......@@ -3985,10 +3979,6 @@ var opcodeTable = [...]opInfo{
name: "Const64F",
generic: true,
},
{
name: "ConstPtr",
generic: true,
},
{
name: "ConstInterface",
generic: true,
......
......@@ -145,6 +145,8 @@ func inBounds8(idx, len int64) bool { return int8(idx) >= 0 && int8(idx) < int8
func inBounds16(idx, len int64) bool { return int16(idx) >= 0 && int16(idx) < int16(len) }
func inBounds32(idx, len int64) bool { return int32(idx) >= 0 && int32(idx) < int32(len) }
func inBounds64(idx, len int64) bool { return idx >= 0 && idx < len }
func sliceInBounds32(idx, len int64) bool { return int32(idx) >= 0 && int32(idx) <= int32(len) }
func sliceInBounds64(idx, len int64) bool { return idx >= 0 && idx <= len }
// log2 returns logarithm in base of n.
// expects n to be a power of 2.
......
......@@ -97,8 +97,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpConstBool(v, config)
case OpConstNil:
return rewriteValueAMD64_OpConstNil(v, config)
case OpConstPtr:
return rewriteValueAMD64_OpConstPtr(v, config)
case OpConvert:
return rewriteValueAMD64_OpConvert(v, config)
case OpCvt32Fto32:
......@@ -405,8 +403,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpMul64F(v, config)
case OpMul8:
return rewriteValueAMD64_OpMul8(v, config)
case OpMulPtr:
return rewriteValueAMD64_OpMulPtr(v, config)
case OpAMD64NEGB:
return rewriteValueAMD64_OpAMD64NEGB(v, config)
case OpAMD64NEGL:
......@@ -2526,26 +2522,6 @@ endea557d921056c25b945a49649e4b9b91:
;
return false
}
func rewriteValueAMD64_OpConstPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ConstPtr [val])
// cond:
// result: (MOVQconst [val])
{
val := v.AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = val
return true
}
goto endc395c0a53eeccf597e225a07b53047d1
endc395c0a53eeccf597e225a07b53047d1:
;
return false
}
func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
b := v.Block
_ = b
......@@ -8325,28 +8301,6 @@ endd876d6bc42a2285b801f42dadbd8757c:
;
return false
}
func rewriteValueAMD64_OpMulPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MulPtr x y)
// cond:
// result: (MULQ x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64MULQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endbbedad106c011a93243e2062afdcc75f
endbbedad106c011a93243e2062afdcc75f:
;
return false
}
func rewriteValueAMD64_OpAMD64NEGB(v *Value, config *Config) bool {
b := v.Block
_ = b
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment