Commit 67fdb0de authored by Keith Randall's avatar Keith Randall

[dev.ssa] cmd/compile/internal/ssa: use width and sign specific opcodes

Bake the bit width and signedness into opcodes.
Pro: Rewrite rules become easier.  Less chance for confusion.
Con: Lots more opcodes.

Let me know what you think.  I'm leaning towards this, but I could be
convinced otherwise if people think this is too ugly.

Update #11467

Change-Id: Icf1b894268cdf73515877bb123839800d97b9df9
Reviewed-on: https://go-review.googlesource.com/12362Reviewed-by: default avatarAlan Donovan <adonovan@google.com>
Reviewed-by: default avatarJosh Bleecher Snyder <josharian@gmail.com>
parent 8043f450
......@@ -446,19 +446,122 @@ func (s *state) stmt(n *Node) {
}
}
var binOpToSSA = [...]ssa.Op{
// Comparisons
OEQ: ssa.OpEq,
ONE: ssa.OpNeq,
OLT: ssa.OpLess,
OLE: ssa.OpLeq,
OGT: ssa.OpGreater,
OGE: ssa.OpGeq,
// Arithmetic
OADD: ssa.OpAdd,
OSUB: ssa.OpSub,
OLSH: ssa.OpLsh,
ORSH: ssa.OpRsh,
type opAndType struct {
op uint8
etype uint8
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{OADD, TINT8}: ssa.OpAdd8,
opAndType{OADD, TUINT8}: ssa.OpAdd8U,
opAndType{OADD, TINT16}: ssa.OpAdd16,
opAndType{OADD, TUINT16}: ssa.OpAdd16U,
opAndType{OADD, TINT32}: ssa.OpAdd32,
opAndType{OADD, TUINT32}: ssa.OpAdd32U,
opAndType{OADD, TINT64}: ssa.OpAdd64,
opAndType{OADD, TUINT64}: ssa.OpAdd64U,
opAndType{OSUB, TINT8}: ssa.OpSub8,
opAndType{OSUB, TUINT8}: ssa.OpSub8U,
opAndType{OSUB, TINT16}: ssa.OpSub16,
opAndType{OSUB, TUINT16}: ssa.OpSub16U,
opAndType{OSUB, TINT32}: ssa.OpSub32,
opAndType{OSUB, TUINT32}: ssa.OpSub32U,
opAndType{OSUB, TINT64}: ssa.OpSub64,
opAndType{OSUB, TUINT64}: ssa.OpSub64U,
opAndType{OLSH, TINT8}: ssa.OpLsh8,
opAndType{OLSH, TUINT8}: ssa.OpLsh8,
opAndType{OLSH, TINT16}: ssa.OpLsh16,
opAndType{OLSH, TUINT16}: ssa.OpLsh16,
opAndType{OLSH, TINT32}: ssa.OpLsh32,
opAndType{OLSH, TUINT32}: ssa.OpLsh32,
opAndType{OLSH, TINT64}: ssa.OpLsh64,
opAndType{OLSH, TUINT64}: ssa.OpLsh64,
opAndType{ORSH, TINT8}: ssa.OpRsh8,
opAndType{ORSH, TUINT8}: ssa.OpRsh8U,
opAndType{ORSH, TINT16}: ssa.OpRsh16,
opAndType{ORSH, TUINT16}: ssa.OpRsh16U,
opAndType{ORSH, TINT32}: ssa.OpRsh32,
opAndType{ORSH, TUINT32}: ssa.OpRsh32U,
opAndType{ORSH, TINT64}: ssa.OpRsh64,
opAndType{ORSH, TUINT64}: ssa.OpRsh64U,
opAndType{OEQ, TINT8}: ssa.OpEq8,
opAndType{OEQ, TUINT8}: ssa.OpEq8,
opAndType{OEQ, TINT16}: ssa.OpEq16,
opAndType{OEQ, TUINT16}: ssa.OpEq16,
opAndType{OEQ, TINT32}: ssa.OpEq32,
opAndType{OEQ, TUINT32}: ssa.OpEq32,
opAndType{OEQ, TINT64}: ssa.OpEq64,
opAndType{OEQ, TUINT64}: ssa.OpEq64,
opAndType{ONE, TINT8}: ssa.OpNeq8,
opAndType{ONE, TUINT8}: ssa.OpNeq8,
opAndType{ONE, TINT16}: ssa.OpNeq16,
opAndType{ONE, TUINT16}: ssa.OpNeq16,
opAndType{ONE, TINT32}: ssa.OpNeq32,
opAndType{ONE, TUINT32}: ssa.OpNeq32,
opAndType{ONE, TINT64}: ssa.OpNeq64,
opAndType{ONE, TUINT64}: ssa.OpNeq64,
opAndType{OLT, TINT8}: ssa.OpLess8,
opAndType{OLT, TUINT8}: ssa.OpLess8U,
opAndType{OLT, TINT16}: ssa.OpLess16,
opAndType{OLT, TUINT16}: ssa.OpLess16U,
opAndType{OLT, TINT32}: ssa.OpLess32,
opAndType{OLT, TUINT32}: ssa.OpLess32U,
opAndType{OLT, TINT64}: ssa.OpLess64,
opAndType{OLT, TUINT64}: ssa.OpLess64U,
opAndType{OGT, TINT8}: ssa.OpGreater8,
opAndType{OGT, TUINT8}: ssa.OpGreater8U,
opAndType{OGT, TINT16}: ssa.OpGreater16,
opAndType{OGT, TUINT16}: ssa.OpGreater16U,
opAndType{OGT, TINT32}: ssa.OpGreater32,
opAndType{OGT, TUINT32}: ssa.OpGreater32U,
opAndType{OGT, TINT64}: ssa.OpGreater64,
opAndType{OGT, TUINT64}: ssa.OpGreater64U,
opAndType{OLE, TINT8}: ssa.OpLeq8,
opAndType{OLE, TUINT8}: ssa.OpLeq8U,
opAndType{OLE, TINT16}: ssa.OpLeq16,
opAndType{OLE, TUINT16}: ssa.OpLeq16U,
opAndType{OLE, TINT32}: ssa.OpLeq32,
opAndType{OLE, TUINT32}: ssa.OpLeq32U,
opAndType{OLE, TINT64}: ssa.OpLeq64,
opAndType{OLE, TUINT64}: ssa.OpLeq64U,
opAndType{OGE, TINT8}: ssa.OpGeq8,
opAndType{OGE, TUINT8}: ssa.OpGeq8U,
opAndType{OGE, TINT16}: ssa.OpGeq16,
opAndType{OGE, TUINT16}: ssa.OpGeq16U,
opAndType{OGE, TINT32}: ssa.OpGeq32,
opAndType{OGE, TUINT32}: ssa.OpGeq32U,
opAndType{OGE, TINT64}: ssa.OpGeq64,
opAndType{OGE, TUINT64}: ssa.OpGeq64U,
}
func (s *state) ssaOp(op uint8, t *Type) ssa.Op {
etype := t.Etype
switch etype {
case TINT:
etype = TINT32
if s.config.PtrSize == 8 {
etype = TINT64
}
case TUINT:
etype = TUINT32
if s.config.PtrSize == 8 {
etype = TUINT64
}
}
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
s.Unimplementedf("unhandled binary op %s etype=%d", opnames[op], etype)
}
return x
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
......@@ -503,11 +606,11 @@ func (s *state) expr(n *Node) *ssa.Value {
case OLT, OEQ, ONE, OLE, OGE, OGT:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(binOpToSSA[n.Op], ssa.TypeBool, a, b)
return s.newValue2(s.ssaOp(n.Op, n.Left.Type), ssa.TypeBool, a, b)
case OADD, OSUB, OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(binOpToSSA[n.Op], a.Type, a, b)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OANDAND, OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
......@@ -569,7 +672,7 @@ func (s *state) expr(n *Node) *ssa.Value {
case ODOTPTR:
p := s.expr(n.Left)
s.nilCheck(p)
p = s.newValue2(ssa.OpAdd, p.Type, p, s.constInt(s.config.Uintptr, n.Xoffset))
p = s.newValue2(ssa.OpAddPtr, p.Type, p, s.constInt(s.config.Uintptr, n.Xoffset))
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case OINDEX:
......@@ -742,11 +845,11 @@ func (s *state) addr(n *Node) *ssa.Value {
return p
case ODOT:
p := s.addr(n.Left)
return s.newValue2(ssa.OpAdd, p.Type, p, s.constInt(s.config.Uintptr, n.Xoffset))
return s.newValue2(ssa.OpAddPtr, p.Type, p, s.constInt(s.config.Uintptr, n.Xoffset))
case ODOTPTR:
p := s.expr(n.Left)
s.nilCheck(p)
return s.newValue2(ssa.OpAdd, p.Type, p, s.constInt(s.config.Uintptr, n.Xoffset))
return s.newValue2(ssa.OpAddPtr, p.Type, p, s.constInt(s.config.Uintptr, n.Xoffset))
default:
s.Unimplementedf("addr: bad op %v", Oconv(int(n.Op), 0))
return nil
......
......@@ -6,7 +6,7 @@ package ssa
type Config struct {
arch string // "amd64", etc.
ptrSize int64 // 4 or 8
PtrSize int64 // 4 or 8
Uintptr Type // pointer arithmetic type
Int Type
lowerBlock func(*Block) bool // lowering function
......@@ -38,11 +38,11 @@ func NewConfig(arch string, fe Frontend) *Config {
c := &Config{arch: arch, fe: fe}
switch arch {
case "amd64":
c.ptrSize = 8
c.PtrSize = 8
c.lowerBlock = rewriteBlockAMD64
c.lowerValue = rewriteValueAMD64
case "386":
c.ptrSize = 4
c.PtrSize = 4
c.lowerBlock = rewriteBlockAMD64
c.lowerValue = rewriteValueAMD64 // TODO(khr): full 32-bit support
default:
......@@ -52,7 +52,7 @@ func NewConfig(arch string, fe Frontend) *Config {
// cache the frequently-used types in the config
c.Uintptr = TypeUInt32
c.Int = TypeInt32
if c.ptrSize == 8 {
if c.PtrSize == 8 {
c.Uintptr = TypeUInt64
c.Int = TypeInt64
}
......
......@@ -267,7 +267,7 @@ func TestArgs(t *testing.T) {
Bloc("entry",
Valu("a", OpConst, TypeInt64, 14, nil),
Valu("b", OpConst, TypeInt64, 26, nil),
Valu("sum", OpAdd, TypeInt64, 0, nil, "a", "b"),
Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
Valu("mem", OpArg, TypeMem, 0, ".mem"),
Goto("exit")),
Bloc("exit",
......@@ -290,7 +290,7 @@ func TestEquiv(t *testing.T) {
Bloc("entry",
Valu("a", OpConst, TypeInt64, 14, nil),
Valu("b", OpConst, TypeInt64, 26, nil),
Valu("sum", OpAdd, TypeInt64, 0, nil, "a", "b"),
Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
Valu("mem", OpArg, TypeMem, 0, ".mem"),
Goto("exit")),
Bloc("exit",
......@@ -299,7 +299,7 @@ func TestEquiv(t *testing.T) {
Bloc("entry",
Valu("a", OpConst, TypeInt64, 14, nil),
Valu("b", OpConst, TypeInt64, 26, nil),
Valu("sum", OpAdd, TypeInt64, 0, nil, "a", "b"),
Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
Valu("mem", OpArg, TypeMem, 0, ".mem"),
Goto("exit")),
Bloc("exit",
......@@ -311,7 +311,7 @@ func TestEquiv(t *testing.T) {
Bloc("entry",
Valu("a", OpConst, TypeInt64, 14, nil),
Valu("b", OpConst, TypeInt64, 26, nil),
Valu("sum", OpAdd, TypeInt64, 0, nil, "a", "b"),
Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
Valu("mem", OpArg, TypeMem, 0, ".mem"),
Goto("exit")),
Bloc("exit",
......@@ -322,7 +322,7 @@ func TestEquiv(t *testing.T) {
Bloc("entry",
Valu("a", OpConst, TypeInt64, 14, nil),
Valu("b", OpConst, TypeInt64, 26, nil),
Valu("sum", OpAdd, TypeInt64, 0, nil, "a", "b"),
Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
Valu("mem", OpArg, TypeMem, 0, ".mem"),
Goto("exit"))),
},
......@@ -397,14 +397,14 @@ func TestEquiv(t *testing.T) {
Valu("mem", OpArg, TypeMem, 0, ".mem"),
Valu("a", OpConst, TypeInt64, 14, nil),
Valu("b", OpConst, TypeInt64, 26, nil),
Valu("sum", OpAdd, TypeInt64, 0, nil, "a", "b"),
Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
Exit("mem"))),
Fun(c, "entry",
Bloc("entry",
Valu("mem", OpArg, TypeMem, 0, ".mem"),
Valu("a", OpConst, TypeInt64, 0, nil),
Valu("b", OpConst, TypeInt64, 14, nil),
Valu("sum", OpAdd, TypeInt64, 0, nil, "b", "a"),
Valu("sum", OpAdd64, TypeInt64, 0, nil, "b", "a"),
Exit("mem"))),
},
}
......
......@@ -13,14 +13,25 @@
// Unused portions are junk.
// Lowering arithmetic
(Add <t> x y) && (is64BitInt(t) || isPtr(t)) -> (ADDQ x y)
(Add <t> x y) && is32BitInt(t) && !isSigned(t) -> (ADDL x y)
(Add <t> x y) && is32BitInt(t) && isSigned(t) -> (MOVLQSX (ADDL <t> x y))
(Add <t> x y) && is16BitInt(t) && !isSigned(t) -> (ADDW x y)
(Add <t> x y) && is16BitInt(t) && isSigned(t) -> (MOVWQSX (ADDW <t> x y))
(Add <t> x y) && is8BitInt(t) && !isSigned(t) -> (ADDB x y)
(Add <t> x y) && is8BitInt(t) && isSigned(t) -> (MOVBQSX (ADDB <t> x y))
(Sub <t> x y) && is64BitInt(t) -> (SUBQ x y)
(Add64 x y) -> (ADDQ x y)
(Add64U x y) -> (ADDQ x y)
(AddPtr x y) -> (ADDQ x y)
(Add32U x y) -> (ADDL x y)
(Add32 x y) -> (MOVLQSX (ADDL <v.Type> x y))
(Add16U x y) -> (ADDW x y)
(Add16 x y) -> (MOVWQSX (ADDW <v.Type> x y))
(Add8U x y) -> (ADDB x y)
(Add8 x y) -> (MOVBQSX (ADDB <v.Type> x y))
(Sub64 x y) -> (SUBQ x y)
(Sub64U x y) -> (SUBQ x y)
(Sub32U x y) -> (SUBL x y)
(Sub32 x y) -> (MOVLQSX (SUBL <v.Type> x y))
(Sub16U x y) -> (SUBW x y)
(Sub16 x y) -> (MOVWQSX (SUBW <v.Type> x y))
(Sub8U x y) -> (SUBB x y)
(Sub8 x y) -> (MOVBQSX (SUBB <v.Type> x y))
(Mul <t> x y) && is64BitInt(t) -> (MULQ x y)
(MOVLstore ptr (MOVLQSX x) mem) -> (MOVLstore ptr x mem)
......@@ -34,26 +45,26 @@
// Note: unsigned shifts need to return 0 if shift amount is >= 64.
// mask = shift >= 64 ? 0 : 0xffffffffffffffff
// result = mask & arg << shift
(Lsh <t> x y) && is64BitInt(t) ->
(Lsh64 <t> x y) ->
(ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst <TypeFlags> [64] y)))
(Rsh <t> x y) && is64BitInt(t) && !t.IsSigned() ->
(Rsh64U <t> x y) ->
(ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst <TypeFlags> [64] y)))
// Note: signed right shift needs to return 0/-1 if shift amount is >= 64.
// if shift > 63 { shift = 63 }
// result = arg >> shift
(Rsh <t> x y) && is64BitInt(t) && t.IsSigned() ->
(Rsh64 <t> x y) ->
(SARQ <t> x (CMOVQCC <t>
(CMPQconst <TypeFlags> [64] y)
(Const <t> [63])
y))
(Less x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETL (CMPQ <TypeFlags> x y))
(Leq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETLE (CMPQ <TypeFlags> x y))
(Greater x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETG (CMPQ <TypeFlags> x y))
(Geq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETGE (CMPQ <TypeFlags> x y))
(Eq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETEQ (CMPQ <TypeFlags> x y))
(Neq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETNE (CMPQ <TypeFlags> x y))
(Less64 x y) -> (SETL (CMPQ <TypeFlags> x y))
(Leq64 x y) -> (SETLE (CMPQ <TypeFlags> x y))
(Greater64 x y) -> (SETG (CMPQ <TypeFlags> x y))
(Geq64 x y) -> (SETGE (CMPQ <TypeFlags> x y))
(Eq64 x y) -> (SETEQ (CMPQ <TypeFlags> x y))
(Neq64 x y) -> (SETNE (CMPQ <TypeFlags> x y))
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
......
......@@ -174,6 +174,10 @@ func init() {
{name: "ADDW", reg: gp21, asm: "ADDW"}, // arg0+arg1
{name: "ADDB", reg: gp21, asm: "ADDB"}, // arg0+arg1
{name: "SUBL", reg: gp21, asm: "SUBL"}, // arg0-arg1
{name: "SUBW", reg: gp21, asm: "SUBW"}, // arg0-arg1
{name: "SUBB", reg: gp21, asm: "SUBB"}, // arg0-arg1
// (InvertFlags (CMPQ a b)) == (CMPQ b a)
// So if we want (SETL (CMPQ a b)) but we can't do that because a is a constant,
// then we do (SETL (InvertFlags (CMPQ b a))) instead.
......
......@@ -20,20 +20,21 @@
// For now, the generated successors must be a permutation of the matched successors.
// constant folding
(Add <t> (Const [c]) (Const [d])) && is64BitInt(t) -> (Const [c+d])
(Add64 (Const [c]) (Const [d])) -> (Const [c+d])
(Add64U (Const [c]) (Const [d])) -> (Const [c+d])
(Mul <t> (Const [c]) (Const [d])) && is64BitInt(t) -> (Const [c*d])
(IsInBounds (Const [c]) (Const [d])) -> (Const {inBounds(c,d)})
// tear apart slices
// TODO: anything that generates a slice needs to go in here.
(SlicePtr (Load ptr mem)) -> (Load ptr mem)
(SliceLen (Load ptr mem)) -> (Load (Add <ptr.Type> ptr (Const <config.Uintptr> [config.ptrSize])) mem)
(SliceCap (Load ptr mem)) -> (Load (Add <ptr.Type> ptr (Const <config.Uintptr> [config.ptrSize*2])) mem)
(SliceLen (Load ptr mem)) -> (Load (AddPtr <ptr.Type> ptr (Const <config.Uintptr> [config.PtrSize])) mem)
(SliceCap (Load ptr mem)) -> (Load (AddPtr <ptr.Type> ptr (Const <config.Uintptr> [config.PtrSize*2])) mem)
// indexing operations
// Note: bounds check has already been done
(ArrayIndex (Load ptr mem) idx) -> (Load (PtrIndex <v.Type.PtrTo()> ptr idx) mem)
(PtrIndex <t> ptr idx) -> (Add ptr (Mul <config.Uintptr> idx (Const <config.Uintptr> [t.Elem().Size()])))
(PtrIndex <t> ptr idx) -> (AddPtr ptr (Mul <config.Uintptr> idx (Const <config.Uintptr> [t.Elem().Size()])))
(StructSelect [idx] (Load ptr mem)) -> (Load (OffPtr <v.Type.PtrTo()> [idx] ptr) mem)
// big-object moves
......@@ -41,11 +42,11 @@
(Store dst (Load <t> src mem) mem) && t.Size() > 8 -> (Move [t.Size()] dst src mem)
// string ops
(Const <t> {s}) && t.IsString() -> (StringMake (OffPtr <TypeBytePtr> [2*config.ptrSize] (Addr <TypeBytePtr> {config.fe.StringSym(s.(string))} (SB <config.Uintptr>))) (Const <config.Uintptr> [int64(len(s.(string)))])) // TODO: ptr
(Load <t> ptr mem) && t.IsString() -> (StringMake (Load <TypeBytePtr> ptr mem) (Load <config.Uintptr> (OffPtr <TypeBytePtr> [config.ptrSize] ptr) mem))
(Const <t> {s}) && t.IsString() -> (StringMake (OffPtr <TypeBytePtr> [2*config.PtrSize] (Addr <TypeBytePtr> {config.fe.StringSym(s.(string))} (SB <config.Uintptr>))) (Const <config.Uintptr> [int64(len(s.(string)))])) // TODO: ptr
(Load <t> ptr mem) && t.IsString() -> (StringMake (Load <TypeBytePtr> ptr mem) (Load <config.Uintptr> (OffPtr <TypeBytePtr> [config.PtrSize] ptr) mem))
(StringPtr (StringMake ptr _)) -> ptr
(StringLen (StringMake _ len)) -> len
(Store dst str mem) && str.Type.IsString() -> (Store (OffPtr <TypeBytePtr> [config.ptrSize] dst) (StringLen <config.Uintptr> str) (Store <TypeMem> dst (StringPtr <TypeBytePtr> str) mem))
(Store dst str mem) && str.Type.IsString() -> (Store (OffPtr <TypeBytePtr> [config.PtrSize] dst) (StringLen <config.Uintptr> str) (Store <TypeMem> dst (StringPtr <TypeBytePtr> str) mem))
(If (Const {c}) yes no) && c.(bool) -> (Plain nil yes)
(If (Const {c}) yes no) && !c.(bool) -> (Plain nil no)
......@@ -8,19 +8,89 @@ var genericOps = []opData{
// 2-input arithmetic
// Types must be consistent with Go typing. Add, for example, must take two values
// of the same type and produces that same type.
{name: "Add"}, // arg0 + arg1
{name: "Sub"}, // arg0 - arg1
{name: "Add8"}, // arg0 + arg1
{name: "Add16"},
{name: "Add32"},
{name: "Add64"},
{name: "Add8U"},
{name: "Add16U"},
{name: "Add32U"},
{name: "Add64U"},
{name: "AddPtr"},
// TODO: Add32F, Add64F, Add64C, Add128C
{name: "Sub8"}, // arg0 - arg1
{name: "Sub16"},
{name: "Sub32"},
{name: "Sub64"},
{name: "Sub8U"},
{name: "Sub16U"},
{name: "Sub32U"},
{name: "Sub64U"},
// TODO: Sub32F, Sub64F, Sub64C, Sub128C
{name: "Mul"}, // arg0 * arg1
{name: "Lsh"}, // arg0 << arg1
{name: "Rsh"}, // arg0 >> arg1 (signed/unsigned depending on signedness of type)
{name: "Lsh8"}, // arg0 << arg1
{name: "Lsh16"},
{name: "Lsh32"},
{name: "Lsh64"},
{name: "Rsh8"}, // arg0 >> arg1
{name: "Rsh8U"},
{name: "Rsh16"},
{name: "Rsh16U"},
{name: "Rsh32"},
{name: "Rsh32U"},
{name: "Rsh64"},
{name: "Rsh64U"},
// 2-input comparisons
{name: "Eq"}, // arg0 == arg1
{name: "Neq"}, // arg0 != arg1
{name: "Less"}, // arg0 < arg1
{name: "Leq"}, // arg0 <= arg1
{name: "Greater"}, // arg0 > arg1
{name: "Geq"}, // arg0 <= arg1
{name: "Eq8"}, // arg0 == arg1
{name: "Eq16"},
{name: "Eq32"},
{name: "Eq64"},
{name: "Neq8"}, // arg0 != arg1
{name: "Neq16"},
{name: "Neq32"},
{name: "Neq64"},
{name: "Less8"}, // arg0 < arg1
{name: "Less8U"},
{name: "Less16"},
{name: "Less16U"},
{name: "Less32"},
{name: "Less32U"},
{name: "Less64"},
{name: "Less64U"},
{name: "Leq8"}, // arg0 <= arg1
{name: "Leq8U"},
{name: "Leq16"},
{name: "Leq16U"},
{name: "Leq32"},
{name: "Leq32U"},
{name: "Leq64"},
{name: "Leq64U"},
{name: "Greater8"}, // arg0 > arg1
{name: "Greater8U"},
{name: "Greater16"},
{name: "Greater16U"},
{name: "Greater32"},
{name: "Greater32U"},
{name: "Greater64"},
{name: "Greater64U"},
{name: "Geq8"}, // arg0 <= arg1
{name: "Geq8U"},
{name: "Geq16"},
{name: "Geq16U"},
{name: "Geq32"},
{name: "Geq32U"},
{name: "Geq64"},
{name: "Geq64U"},
// 1-input ops
{name: "Not"}, // !arg0
......
This diff is collapsed.
......@@ -4,23 +4,42 @@ package ssa
func rewriteValuegeneric(v *Value, config *Config) bool {
switch v.Op {
case OpAdd:
// match: (Add <t> (Const [c]) (Const [d]))
// cond: is64BitInt(t)
case OpAdd64:
// match: (Add64 (Const [c]) (Const [d]))
// cond:
// result: (Const [c+d])
{
t := v.Type
if v.Args[0].Op != OpConst {
goto end279f4ea85ed10e5ffc5b53f9e060529b
goto endd2f4bfaaf6c937171a287b73e5c2f73e
}
c := v.Args[0].AuxInt
if v.Args[1].Op != OpConst {
goto end279f4ea85ed10e5ffc5b53f9e060529b
goto endd2f4bfaaf6c937171a287b73e5c2f73e
}
d := v.Args[1].AuxInt
if !(is64BitInt(t)) {
goto end279f4ea85ed10e5ffc5b53f9e060529b
v.Op = OpConst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c + d
return true
}
goto endd2f4bfaaf6c937171a287b73e5c2f73e
endd2f4bfaaf6c937171a287b73e5c2f73e:
;
case OpAdd64U:
// match: (Add64U (Const [c]) (Const [d]))
// cond:
// result: (Const [c+d])
{
if v.Args[0].Op != OpConst {
goto endfedc373d8be0243cb5dbbc948996fe3a
}
c := v.Args[0].AuxInt
if v.Args[1].Op != OpConst {
goto endfedc373d8be0243cb5dbbc948996fe3a
}
d := v.Args[1].AuxInt
v.Op = OpConst
v.AuxInt = 0
v.Aux = nil
......@@ -28,8 +47,8 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
v.AuxInt = c + d
return true
}
goto end279f4ea85ed10e5ffc5b53f9e060529b
end279f4ea85ed10e5ffc5b53f9e060529b:
goto endfedc373d8be0243cb5dbbc948996fe3a
endfedc373d8be0243cb5dbbc948996fe3a:
;
case OpArrayIndex:
// match: (ArrayIndex (Load ptr mem) idx)
......@@ -60,12 +79,12 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
case OpConst:
// match: (Const <t> {s})
// cond: t.IsString()
// result: (StringMake (OffPtr <TypeBytePtr> [2*config.ptrSize] (Addr <TypeBytePtr> {config.fe.StringSym(s.(string))} (SB <config.Uintptr>))) (Const <config.Uintptr> [int64(len(s.(string)))]))
// result: (StringMake (OffPtr <TypeBytePtr> [2*config.PtrSize] (Addr <TypeBytePtr> {config.fe.StringSym(s.(string))} (SB <config.Uintptr>))) (Const <config.Uintptr> [int64(len(s.(string)))]))
{
t := v.Type
s := v.Aux
if !(t.IsString()) {
goto end55cd8fd3b98a2459d0ee9d6cbb456b01
goto endedcb8bd24122d6a47bdc9b752460c344
}
v.Op = OpStringMake
v.AuxInt = 0
......@@ -73,7 +92,7 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpOffPtr, TypeInvalid)
v0.Type = TypeBytePtr
v0.AuxInt = 2 * config.ptrSize
v0.AuxInt = 2 * config.PtrSize
v1 := v.Block.NewValue0(v.Line, OpAddr, TypeInvalid)
v1.Type = TypeBytePtr
v1.Aux = config.fe.StringSym(s.(string))
......@@ -88,8 +107,8 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
v.AddArg(v3)
return true
}
goto end55cd8fd3b98a2459d0ee9d6cbb456b01
end55cd8fd3b98a2459d0ee9d6cbb456b01:
goto endedcb8bd24122d6a47bdc9b752460c344
endedcb8bd24122d6a47bdc9b752460c344:
;
case OpIsInBounds:
// match: (IsInBounds (Const [c]) (Const [d]))
......@@ -117,13 +136,13 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
case OpLoad:
// match: (Load <t> ptr mem)
// cond: t.IsString()
// result: (StringMake (Load <TypeBytePtr> ptr mem) (Load <config.Uintptr> (OffPtr <TypeBytePtr> [config.ptrSize] ptr) mem))
// result: (StringMake (Load <TypeBytePtr> ptr mem) (Load <config.Uintptr> (OffPtr <TypeBytePtr> [config.PtrSize] ptr) mem))
{
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsString()) {
goto endd0afd003b70d726a1c5bbaf51fe06182
goto endce3ba169a57b8a9f6b12751d49b4e23a
}
v.Op = OpStringMake
v.AuxInt = 0
......@@ -138,15 +157,15 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
v1.Type = config.Uintptr
v2 := v.Block.NewValue0(v.Line, OpOffPtr, TypeInvalid)
v2.Type = TypeBytePtr
v2.AuxInt = config.ptrSize
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
goto endd0afd003b70d726a1c5bbaf51fe06182
endd0afd003b70d726a1c5bbaf51fe06182:
goto endce3ba169a57b8a9f6b12751d49b4e23a
endce3ba169a57b8a9f6b12751d49b4e23a:
;
case OpMul:
// match: (Mul <t> (Const [c]) (Const [d]))
......@@ -178,12 +197,12 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
case OpPtrIndex:
// match: (PtrIndex <t> ptr idx)
// cond:
// result: (Add ptr (Mul <config.Uintptr> idx (Const <config.Uintptr> [t.Elem().Size()])))
// result: (AddPtr ptr (Mul <config.Uintptr> idx (Const <config.Uintptr> [t.Elem().Size()])))
{
t := v.Type
ptr := v.Args[0]
idx := v.Args[1]
v.Op = OpAdd
v.Op = OpAddPtr
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
......@@ -198,16 +217,16 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
goto end88c7c383675420d1581daeb899039fa8
end88c7c383675420d1581daeb899039fa8:
goto endc181347cd3c740e2a1da431a981fdd7e
endc181347cd3c740e2a1da431a981fdd7e:
;
case OpSliceCap:
// match: (SliceCap (Load ptr mem))
// cond:
// result: (Load (Add <ptr.Type> ptr (Const <config.Uintptr> [config.ptrSize*2])) mem)
// result: (Load (AddPtr <ptr.Type> ptr (Const <config.Uintptr> [config.PtrSize*2])) mem)
{
if v.Args[0].Op != OpLoad {
goto end919cfa3d3539eb2e06a435d5f89654b9
goto end83c0ff7760465a4184bad9e4b47f7be8
}
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
......@@ -215,27 +234,27 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAdd, TypeInvalid)
v0 := v.Block.NewValue0(v.Line, OpAddPtr, TypeInvalid)
v0.Type = ptr.Type
v0.AddArg(ptr)
v1 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
v1.Type = config.Uintptr
v1.AuxInt = config.ptrSize * 2
v1.AuxInt = config.PtrSize * 2
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto end919cfa3d3539eb2e06a435d5f89654b9
end919cfa3d3539eb2e06a435d5f89654b9:
goto end83c0ff7760465a4184bad9e4b47f7be8
end83c0ff7760465a4184bad9e4b47f7be8:
;
case OpSliceLen:
// match: (SliceLen (Load ptr mem))
// cond:
// result: (Load (Add <ptr.Type> ptr (Const <config.Uintptr> [config.ptrSize])) mem)
// result: (Load (AddPtr <ptr.Type> ptr (Const <config.Uintptr> [config.PtrSize])) mem)
{
if v.Args[0].Op != OpLoad {
goto end3d74a5ef07180a709a91052da88bcd01
goto end20579b262d017d875d579683996f0ef9
}
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
......@@ -243,19 +262,19 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpAdd, TypeInvalid)
v0 := v.Block.NewValue0(v.Line, OpAddPtr, TypeInvalid)
v0.Type = ptr.Type
v0.AddArg(ptr)
v1 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
v1.Type = config.Uintptr
v1.AuxInt = config.ptrSize
v1.AuxInt = config.PtrSize
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(mem)
return true
}
goto end3d74a5ef07180a709a91052da88bcd01
end3d74a5ef07180a709a91052da88bcd01:
goto end20579b262d017d875d579683996f0ef9
end20579b262d017d875d579683996f0ef9:
;
case OpSlicePtr:
// match: (SlicePtr (Load ptr mem))
......@@ -311,13 +330,13 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
;
// match: (Store dst str mem)
// cond: str.Type.IsString()
// result: (Store (OffPtr <TypeBytePtr> [config.ptrSize] dst) (StringLen <config.Uintptr> str) (Store <TypeMem> dst (StringPtr <TypeBytePtr> str) mem))
// result: (Store (OffPtr <TypeBytePtr> [config.PtrSize] dst) (StringLen <config.Uintptr> str) (Store <TypeMem> dst (StringPtr <TypeBytePtr> str) mem))
{
dst := v.Args[0]
str := v.Args[1]
mem := v.Args[2]
if !(str.Type.IsString()) {
goto end410559d97aed8018f820cd88723de442
goto endb47e037c1e5ac54c3a41d53163d8aef6
}
v.Op = OpStore
v.AuxInt = 0
......@@ -325,7 +344,7 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
v.resetArgs()
v0 := v.Block.NewValue0(v.Line, OpOffPtr, TypeInvalid)
v0.Type = TypeBytePtr
v0.AuxInt = config.ptrSize
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v1 := v.Block.NewValue0(v.Line, OpStringLen, TypeInvalid)
......@@ -343,8 +362,8 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
v.AddArg(v2)
return true
}
goto end410559d97aed8018f820cd88723de442
end410559d97aed8018f820cd88723de442:
goto endb47e037c1e5ac54c3a41d53163d8aef6
endb47e037c1e5ac54c3a41d53163d8aef6:
;
case OpStringLen:
// match: (StringLen (StringMake _ len))
......
......@@ -19,7 +19,7 @@ func TestSchedule(t *testing.T) {
Valu("mem3", OpStore, TypeInt64, 0, nil, "ptr", "sum", "mem2"),
Valu("l1", OpLoad, TypeInt64, 0, nil, "ptr", "mem1"),
Valu("l2", OpLoad, TypeInt64, 0, nil, "ptr", "mem2"),
Valu("sum", OpAdd, TypeInt64, 0, nil, "l1", "l2"),
Valu("sum", OpAdd64, TypeInt64, 0, nil, "l1", "l2"),
Goto("exit")),
Bloc("exit",
Exit("mem3"))),
......
......@@ -10,17 +10,17 @@ import (
func TestShiftConstAMD64(t *testing.T) {
c := NewConfig("amd64", DummyFrontend{t})
fun := makeConstShiftFunc(c, 18, OpLsh, TypeUInt64)
fun := makeConstShiftFunc(c, 18, OpLsh64, TypeUInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
fun = makeConstShiftFunc(c, 66, OpLsh, TypeUInt64)
fun = makeConstShiftFunc(c, 66, OpLsh64, TypeUInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
fun = makeConstShiftFunc(c, 18, OpRsh, TypeUInt64)
fun = makeConstShiftFunc(c, 18, OpRsh64U, TypeUInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
fun = makeConstShiftFunc(c, 66, OpRsh, TypeUInt64)
fun = makeConstShiftFunc(c, 66, OpRsh64U, TypeUInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
fun = makeConstShiftFunc(c, 18, OpRsh, TypeInt64)
fun = makeConstShiftFunc(c, 18, OpRsh64, TypeInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
fun = makeConstShiftFunc(c, 66, OpRsh, TypeInt64)
fun = makeConstShiftFunc(c, 66, OpRsh64, TypeInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
}
......
......@@ -82,8 +82,8 @@ func stackalloc(f *Func) {
}
}
n = align(n, f.Config.ptrSize)
n += f.Config.ptrSize // space for return address. TODO: arch-dependent
n = align(n, f.Config.PtrSize)
n += f.Config.PtrSize // space for return address. TODO: arch-dependent
f.RegAlloc = home
f.FrameSize = n
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment