Commit 1cc5789d authored by Keith Randall's avatar Keith Randall

[dev.ssa] cmd/compile: lots of small rewrite optimizations

Small optimizations I noticed while looking at Giovanni's test cases.

More shifts by constants.
Indexed stores for smaller types.
Fold LEA into loads/stores.
More extending loads.
CMP $0 of AND -> TEST

Fix order of TEST ops.

Giovanni's test cases at https://gist.github.com/rasky/62fba94e3a20d1b05b2a

Change-Id: I7077bc0b5319bf05767eeb39f401f4bb4b39f635
Reviewed-on: https://go-review.googlesource.com/19086
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: default avatarTodd Neal <todd@tneal.org>
Reviewed-by: default avatarDavid Chase <drchase@google.com>
parent f962f330
......@@ -4003,13 +4003,18 @@ func (s *genState) genValue(v *ssa.Value) {
// Go assembler has swapped operands for UCOMISx relative to CMP,
// must account for that right here.
opregreg(v.Op.Asm(), regnum(v.Args[0]), regnum(v.Args[1]))
case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst,
ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst:
case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = regnum(v.Args[0])
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = regnum(v.Args[0])
case ssa.OpAMD64MOVBconst, ssa.OpAMD64MOVWconst, ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
x := regnum(v)
p := Prog(v.Op.Asm())
......@@ -4040,7 +4045,7 @@ func (s *genState) genValue(v *ssa.Value) {
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
p.To.Reg = x
case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVBQZXload, ssa.OpAMD64MOVOload:
case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVBQZXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVWQZXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVLQZXload, ssa.OpAMD64MOVOload:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = regnum(v.Args[0])
......@@ -4081,7 +4086,7 @@ func (s *genState) genValue(v *ssa.Value) {
p.To.Scale = 8
p.To.Index = regnum(v.Args[1])
addAux(&p.To, v)
case ssa.OpAMD64MOVSSstoreidx4:
case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = regnum(v.Args[2])
......@@ -4090,6 +4095,24 @@ func (s *genState) genValue(v *ssa.Value) {
p.To.Scale = 4
p.To.Index = regnum(v.Args[1])
addAux(&p.To, v)
case ssa.OpAMD64MOVWstoreidx2:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = regnum(v.Args[2])
p.To.Type = obj.TYPE_MEM
p.To.Reg = regnum(v.Args[0])
p.To.Scale = 2
p.To.Index = regnum(v.Args[1])
addAux(&p.To, v)
case ssa.OpAMD64MOVBstoreidx1:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = regnum(v.Args[2])
p.To.Type = obj.TYPE_MEM
p.To.Reg = regnum(v.Args[0])
p.To.Scale = 1
p.To.Index = regnum(v.Args[1])
addAux(&p.To, v)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
......@@ -4365,7 +4388,9 @@ func (s *genState) genValue(v *ssa.Value) {
}
switch w.Op {
case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload,
ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore:
ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore,
ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVBQZXload, ssa.OpAMD64MOVWQSXload,
ssa.OpAMD64MOVWQZXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVLQZXload:
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if Debug_checknil != 0 && int(v.Line) > 1 {
Warnl(int(v.Line), "removed nil check")
......
......@@ -51,6 +51,8 @@ Optimizations (better compiled code)
Note that this is challenging for ops that generate flags
because flagalloc wants to move those instructions around for
flag regeneration.
- In forms like if ... { call } else { no call }, mark the call branch as unlikely.
- Non-constant rotate detection.
Optimizations (better compiler)
-------------------------------
......
......@@ -464,18 +464,63 @@
(XORB (MOVBconst [c]) x) -> (XORBconst [c] x)
(SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x)
(SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x)
(SHLQ x (MOVWconst [c])) -> (SHLQconst [c&63] x)
(SHLQ x (MOVBconst [c])) -> (SHLQconst [c&63] x)
(SHLL x (MOVQconst [c])) -> (SHLLconst [c&31] x)
(SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
(SHLL x (MOVWconst [c])) -> (SHLLconst [c&31] x)
(SHLL x (MOVBconst [c])) -> (SHLLconst [c&31] x)
(SHLW x (MOVQconst [c])) -> (SHLWconst [c&31] x)
(SHLW x (MOVLconst [c])) -> (SHLWconst [c&31] x)
(SHLW x (MOVWconst [c])) -> (SHLWconst [c&31] x)
(SHLW x (MOVBconst [c])) -> (SHLWconst [c&31] x)
(SHLB x (MOVQconst [c])) -> (SHLBconst [c&31] x)
(SHLB x (MOVLconst [c])) -> (SHLBconst [c&31] x)
(SHLB x (MOVWconst [c])) -> (SHLBconst [c&31] x)
(SHLB x (MOVBconst [c])) -> (SHLBconst [c&31] x)
(SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x)
(SHRQ x (MOVLconst [c])) -> (SHRQconst [c&63] x)
(SHRQ x (MOVWconst [c])) -> (SHRQconst [c&63] x)
(SHRQ x (MOVBconst [c])) -> (SHRQconst [c&63] x)
(SHRL x (MOVQconst [c])) -> (SHRLconst [c&31] x)
(SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
(SHRL x (MOVWconst [c])) -> (SHRLconst [c&31] x)
(SHRL x (MOVBconst [c])) -> (SHRLconst [c&31] x)
(SHRW x (MOVQconst [c])) -> (SHRWconst [c&31] x)
(SHRW x (MOVLconst [c])) -> (SHRWconst [c&31] x)
(SHRW x (MOVWconst [c])) -> (SHRWconst [c&31] x)
(SHRW x (MOVBconst [c])) -> (SHRWconst [c&31] x)
(SHRB x (MOVQconst [c])) -> (SHRBconst [c&31] x)
(SHRB x (MOVLconst [c])) -> (SHRBconst [c&31] x)
(SHRB x (MOVWconst [c])) -> (SHRBconst [c&31] x)
(SHRB x (MOVBconst [c])) -> (SHRBconst [c&31] x)
(SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x)
(SARQ x (MOVLconst [c])) -> (SARQconst [c&63] x)
(SARQ x (MOVWconst [c])) -> (SARQconst [c&63] x)
(SARQ x (MOVBconst [c])) -> (SARQconst [c&63] x)
(SARL x (MOVQconst [c])) -> (SARLconst [c&31] x)
(SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
(SARL x (MOVWconst [c])) -> (SARLconst [c&31] x)
(SARL x (MOVBconst [c])) -> (SARLconst [c&31] x)
(SARW x (MOVQconst [c])) -> (SARWconst [c&31] x)
(SARW x (MOVLconst [c])) -> (SARWconst [c&31] x)
(SARW x (MOVWconst [c])) -> (SARWconst [c&31] x)
(SARW x (MOVBconst [c])) -> (SARWconst [c&31] x)
(SARB x (MOVQconst [c])) -> (SARBconst [c&31] x)
(SARB x (MOVLconst [c])) -> (SARBconst [c&31] x)
(SARB x (MOVWconst [c])) -> (SARBconst [c&31] x)
(SARB x (MOVBconst [c])) -> (SARBconst [c&31] x)
// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
......@@ -524,7 +569,18 @@
// multiple memory values alive simultaneously.
(MOVBQSX (MOVBload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
(MOVBQZX (MOVBload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVBQZXload <v.Type> [off] {sym} ptr mem)
// TODO: more
(MOVWQSX (MOVWload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
(MOVWQZX (MOVWload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVWQZXload <v.Type> [off] {sym} ptr mem)
(MOVLQSX (MOVLload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
(MOVLQZX (MOVLload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVLQZXload <v.Type> [off] {sym} ptr mem)
// Fold extensions and ANDs together.
(MOVBQZX (ANDBconst [c] x)) -> (ANDQconst [c & 0xff] x)
(MOVWQZX (ANDWconst [c] x)) -> (ANDQconst [c & 0xffff] x)
(MOVLQZX (ANDLconst [c] x)) -> (ANDQconst [c & 0xffffffff] x)
(MOVBQSX (ANDBconst [c] x)) && c & 0x80 == 0 -> (ANDQconst [c & 0x7f] x)
(MOVWQSX (ANDWconst [c] x)) && c & 0x8000 == 0 -> (ANDQconst [c & 0x7fff] x)
(MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x)
// Don't extend before storing
(MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem)
......@@ -623,22 +679,63 @@
(MOVSSstoreidx4 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx val mem) -> (MOVSSstoreidx4 [addOff(off1, off2)] {sym} ptr idx val mem)
(MOVSDloadidx8 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx mem) -> (MOVSDloadidx8 [addOff(off1, off2)] {sym} ptr idx mem)
(MOVSDstoreidx8 [off1] {sym} (ADDQconst [off2] {sym} ptr) idx val mem) -> (MOVSDstoreidx8 [addOff(off1, off2)] {sym} ptr idx val mem)
(MOVLstoreidx4 [off1] {sym} (ADDQconst [off2] ptr) idx val mem) -> (MOVLstoreidx4 [addOff(off1, off2)] {sym} ptr idx val mem)
(MOVWstoreidx2 [off1] {sym} (ADDQconst [off2] ptr) idx val mem) -> (MOVWstoreidx2 [addOff(off1, off2)] {sym} ptr idx val mem)
(MOVBstoreidx1 [off1] {sym} (ADDQconst [off2] ptr) idx val mem) -> (MOVBstoreidx1 [addOff(off1, off2)] {sym} ptr idx val mem)
(MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVQloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVQstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVSSloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVSSstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVSDloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVBstoreidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVWstoreidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVLstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVQstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVSSstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && canMergeSym(sym1, sym2) ->
(MOVSDstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVBstore [off] {sym} (ADDQ ptr idx) val mem) -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
// fold LEAQs together
(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && canMergeSym(sym1, sym2) ->
(LEAQ [addOff(off1,off2)] {mergeSym(sym1,sym2)} x)
// LEAQ into LEAQ1
(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
(LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// LEAQ1 into LEAQ
(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
(LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// LEAQ into LEAQ[248]
(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// LEAQ[248] into LEAQ
(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
(LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
(LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && canMergeSym(sym1, sym2) ->
(LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
// lower Zero instructions with word sizes
(Zero [0] _ mem) -> mem
(Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
......@@ -963,3 +1060,12 @@
(XORW x x) -> (MOVWconst [0])
(XORB x x) -> (MOVBconst [0])
// checking AND against 0.
(CMPQconst (ANDQ x y) [0]) -> (TESTQ x y)
(CMPLconst (ANDL x y) [0]) -> (TESTL x y)
(CMPWconst (ANDW x y) [0]) -> (TESTW x y)
(CMPBconst (ANDB x y) [0]) -> (TESTB x y)
(CMPQconst (ANDQconst [c] x) [0]) -> (TESTQconst [c] x)
(CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
(CMPWconst (ANDWconst [c] x) [0]) -> (TESTWconst [c] x)
(CMPBconst (ANDBconst [c] x) [0]) -> (TESTBconst [c] x)
......@@ -368,14 +368,22 @@ func init() {
{name: "MOVBQSXload", reg: gpload, asm: "MOVBQSX"}, // ditto, extend to int64
{name: "MOVBQZXload", reg: gpload, asm: "MOVBQZX"}, // ditto, extend to uint64
{name: "MOVWload", reg: gpload, asm: "MOVW", typ: "UInt16"}, // load 2 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVWQSXload", reg: gpload, asm: "MOVWQSX"}, // ditto, extend to int64
{name: "MOVWQZXload", reg: gpload, asm: "MOVWQZX"}, // ditto, extend to uint64
{name: "MOVLload", reg: gpload, asm: "MOVL", typ: "UInt32"}, // load 4 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVLQSXload", reg: gpload, asm: "MOVLQSX"}, // ditto, extend to int64
{name: "MOVLQZXload", reg: gpload, asm: "MOVLQZX"}, // ditto, extend to uint64
{name: "MOVQload", reg: gpload, asm: "MOVQ", typ: "UInt64"}, // load 8 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVQloadidx8", reg: gploadidx, asm: "MOVQ"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem
{name: "MOVBstore", reg: gpstore, asm: "MOVB", typ: "Mem"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVWstore", reg: gpstore, asm: "MOVW", typ: "Mem"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVLstore", reg: gpstore, asm: "MOVL", typ: "Mem"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVQstore", reg: gpstore, asm: "MOVQ", typ: "Mem"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVQstoreidx8", reg: gpstoreidx, asm: "MOVQ"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
{name: "MOVBstoreidx1", reg: gpstoreidx, asm: "MOVB"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
{name: "MOVWstoreidx2", reg: gpstoreidx, asm: "MOVW"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
{name: "MOVLstoreidx4", reg: gpstoreidx, asm: "MOVL"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
{name: "MOVQstoreidx8", reg: gpstoreidx, asm: "MOVQ"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
{name: "MOVOload", reg: fpload, asm: "MOVUPS", typ: "Int128"}, // load 16 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVOstore", reg: fpstore, asm: "MOVUPS", typ: "Mem"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem
......
......@@ -254,13 +254,20 @@ const (
OpAMD64MOVBQSXload
OpAMD64MOVBQZXload
OpAMD64MOVWload
OpAMD64MOVWQSXload
OpAMD64MOVWQZXload
OpAMD64MOVLload
OpAMD64MOVLQSXload
OpAMD64MOVLQZXload
OpAMD64MOVQload
OpAMD64MOVQloadidx8
OpAMD64MOVBstore
OpAMD64MOVWstore
OpAMD64MOVLstore
OpAMD64MOVQstore
OpAMD64MOVBstoreidx1
OpAMD64MOVWstoreidx2
OpAMD64MOVLstoreidx4
OpAMD64MOVQstoreidx8
OpAMD64MOVOload
OpAMD64MOVOstore
......@@ -2966,6 +2973,30 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "MOVWQSXload",
asm: x86.AMOVWQSX,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
},
},
{
name: "MOVWQZXload",
asm: x86.AMOVWQZX,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
},
},
{
name: "MOVLload",
asm: x86.AMOVL,
......@@ -2978,6 +3009,30 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "MOVLQSXload",
asm: x86.AMOVLQSX,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
},
},
{
name: "MOVLQZXload",
asm: x86.AMOVLQZX,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
outputs: []regMask{
65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
},
},
{
name: "MOVQload",
asm: x86.AMOVQ,
......@@ -3043,6 +3098,39 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "MOVBstoreidx1",
asm: x86.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{2, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
},
},
{
name: "MOVWstoreidx2",
asm: x86.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{2, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
},
},
{
name: "MOVLstoreidx4",
asm: x86.AMOVL,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{2, 65535}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
},
},
{
name: "MOVQstoreidx8",
asm: x86.AMOVQ,
......
......@@ -231,6 +231,16 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpIsNonNil(v, config)
case OpIsSliceInBounds:
return rewriteValueAMD64_OpIsSliceInBounds(v, config)
case OpAMD64LEAQ:
return rewriteValueAMD64_OpAMD64LEAQ(v, config)
case OpAMD64LEAQ1:
return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
case OpAMD64LEAQ2:
return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
case OpAMD64LEAQ4:
return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
case OpAMD64LEAQ8:
return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
case OpLeq16:
return rewriteValueAMD64_OpLeq16(v, config)
case OpLeq16U:
......@@ -323,12 +333,20 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
case OpAMD64MOVBstoreconst:
return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
case OpAMD64MOVBstoreidx1:
return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
case OpAMD64MOVLQSX:
return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
case OpAMD64MOVLQZX:
return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
case OpAMD64MOVLload:
return rewriteValueAMD64_OpAMD64MOVLload(v, config)
case OpAMD64MOVLstore:
return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
case OpAMD64MOVLstoreconst:
return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
case OpAMD64MOVLstoreidx4:
return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
case OpAMD64MOVOload:
return rewriteValueAMD64_OpAMD64MOVOload(v, config)
case OpAMD64MOVOstore:
......@@ -359,12 +377,18 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
case OpAMD64MOVSSstoreidx4:
return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
case OpAMD64MOVWQSX:
return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
case OpAMD64MOVWQZX:
return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
case OpAMD64MOVWload:
return rewriteValueAMD64_OpAMD64MOVWload(v, config)
case OpAMD64MOVWstore:
return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
case OpAMD64MOVWstoreconst:
return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
case OpAMD64MOVWstoreidx2:
return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
case OpAMD64MULB:
return rewriteValueAMD64_OpAMD64MULB(v, config)
case OpAMD64MULBconst:
......@@ -2303,6 +2327,52 @@ endac1c49c82fb6b76dd324042c4588973c:
}
goto end82aa9d89330cb5dc58592048bfc16ebc
end82aa9d89330cb5dc58592048bfc16ebc:
;
// match: (CMPBconst [0] (ANDB x y))
// cond:
// result: (TESTB x y)
{
if v.AuxInt != 0 {
goto end30c06897ce79b745c782650c71157f7b
}
if v.Args[0].Op != OpAMD64ANDB {
goto end30c06897ce79b745c782650c71157f7b
}
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
v.Op = OpAMD64TESTB
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end30c06897ce79b745c782650c71157f7b
end30c06897ce79b745c782650c71157f7b:
;
// match: (CMPBconst [0] (ANDBconst [c] x))
// cond:
// result: (TESTBconst [c] x)
{
if v.AuxInt != 0 {
goto endfc700b49578635afa44d447c3ef97859
}
if v.Args[0].Op != OpAMD64ANDBconst {
goto endfc700b49578635afa44d447c3ef97859
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64TESTBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endfc700b49578635afa44d447c3ef97859
endfc700b49578635afa44d447c3ef97859:
;
return false
}
......@@ -2482,6 +2552,52 @@ endc7b8e86e537d6e106e237023dc2c9a7b:
}
goto endf202b9830a1e45f3888f2598c762c702
endf202b9830a1e45f3888f2598c762c702:
;
// match: (CMPLconst [0] (ANDL x y))
// cond:
// result: (TESTL x y)
{
if v.AuxInt != 0 {
goto endb730012ce2555c10f2918eed023dd6f3
}
if v.Args[0].Op != OpAMD64ANDL {
goto endb730012ce2555c10f2918eed023dd6f3
}
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
v.Op = OpAMD64TESTL
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endb730012ce2555c10f2918eed023dd6f3
endb730012ce2555c10f2918eed023dd6f3:
;
// match: (CMPLconst [0] (ANDLconst [c] x))
// cond:
// result: (TESTLconst [c] x)
{
if v.AuxInt != 0 {
goto enda56a89f365433eb9e15b0c9696ce5afb
}
if v.Args[0].Op != OpAMD64ANDLconst {
goto enda56a89f365433eb9e15b0c9696ce5afb
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64TESTLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto enda56a89f365433eb9e15b0c9696ce5afb
enda56a89f365433eb9e15b0c9696ce5afb:
;
return false
}
......@@ -2667,6 +2783,52 @@ end1248b87e4a141c78bc8eff05d3fac70e:
}
goto end934098fb12e383829b654938269abc12
end934098fb12e383829b654938269abc12:
;
// match: (CMPQconst [0] (ANDQ x y))
// cond:
// result: (TESTQ x y)
{
if v.AuxInt != 0 {
goto end9f63614ab4b6b51b299dcfacae096b23
}
if v.Args[0].Op != OpAMD64ANDQ {
goto end9f63614ab4b6b51b299dcfacae096b23
}
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
v.Op = OpAMD64TESTQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto end9f63614ab4b6b51b299dcfacae096b23
end9f63614ab4b6b51b299dcfacae096b23:
;
// match: (CMPQconst [0] (ANDQconst [c] x))
// cond:
// result: (TESTQconst [c] x)
{
if v.AuxInt != 0 {
goto enda5aa8044be9d61e9e149558e9ec8ca83
}
if v.Args[0].Op != OpAMD64ANDQconst {
goto enda5aa8044be9d61e9e149558e9ec8ca83
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64TESTQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto enda5aa8044be9d61e9e149558e9ec8ca83
enda5aa8044be9d61e9e149558e9ec8ca83:
;
return false
}
......@@ -2846,6 +3008,52 @@ end4493f5af38d242ebb4bc2f64055a0854:
}
goto endfcea07d93ded49b0e02d5fa0059309a4
endfcea07d93ded49b0e02d5fa0059309a4:
;
// match: (CMPWconst [0] (ANDW x y))
// cond:
// result: (TESTW x y)
{
if v.AuxInt != 0 {
goto endd9d4754c561a7bd11697a51d800f8eca
}
if v.Args[0].Op != OpAMD64ANDW {
goto endd9d4754c561a7bd11697a51d800f8eca
}
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
v.Op = OpAMD64TESTW
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(x)
v.AddArg(y)
return true
}
goto endd9d4754c561a7bd11697a51d800f8eca
endd9d4754c561a7bd11697a51d800f8eca:
;
// match: (CMPWconst [0] (ANDWconst [c] x))
// cond:
// result: (TESTWconst [c] x)
{
if v.AuxInt != 0 {
goto endb532b10789c7ce4cedeb17af417ceb2b
}
if v.Args[0].Op != OpAMD64ANDWconst {
goto endb532b10789c7ce4cedeb17af417ceb2b
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64TESTWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c
v.AddArg(x)
return true
}
goto endb532b10789c7ce4cedeb17af417ceb2b
endb532b10789c7ce4cedeb17af417ceb2b:
;
return false
}
......@@ -4582,109 +4790,422 @@ end02799ad95fe7fb5ce3c2c8ab313b737c:
;
return false
}
func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq16 x y)
// cond:
// result: (SETLE (CMPW x y))
// match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
// cond: canMergeSym(sym1, sym2)
// result: (LEAQ [addOff(off1,off2)] {mergeSym(sym1,sym2)} x)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETLE
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end2e2249051d6776a92bcb0d83107e0d82
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
if !(canMergeSym(sym1, sym2)) {
goto end2e2249051d6776a92bcb0d83107e0d82
}
v.Op = OpAMD64LEAQ
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.AddArg(x)
v0.AddArg(y)
v0.Type = TypeFlags
v.AddArg(v0)
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
return true
}
goto end586c647ca6bb8ec725eea917c743d1ea
end586c647ca6bb8ec725eea917c743d1ea:
goto end2e2249051d6776a92bcb0d83107e0d82
end2e2249051d6776a92bcb0d83107e0d82:
;
return false
}
func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq16U x y)
// cond:
// result: (SETBE (CMPW x y))
// match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
// cond: canMergeSym(sym1, sym2)
// result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETBE
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ1 {
goto end4e2502574680cc8e02dcc07561e96ef9
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(canMergeSym(sym1, sym2)) {
goto end4e2502574680cc8e02dcc07561e96ef9
}
v.Op = OpAMD64LEAQ1
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.AddArg(x)
v0.AddArg(y)
v0.Type = TypeFlags
v.AddArg(v0)
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
goto end9c24a81bc6a4a92267bd6638362dfbfc
end9c24a81bc6a4a92267bd6638362dfbfc:
goto end4e2502574680cc8e02dcc07561e96ef9
end4e2502574680cc8e02dcc07561e96ef9:
;
return false
}
func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq32 x y)
// cond:
// result: (SETLE (CMPL x y))
// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
// cond: canMergeSym(sym1, sym2)
// result: (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETLE
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ2 {
goto end92e54b1fbb5ba0b17a6006fe56b4d57b
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(canMergeSym(sym1, sym2)) {
goto end92e54b1fbb5ba0b17a6006fe56b4d57b
}
v.Op = OpAMD64LEAQ2
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.AddArg(x)
v0.AddArg(y)
v0.Type = TypeFlags
v.AddArg(v0)
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
goto end595ee99a9fc3460b2748b9129b139f88
end595ee99a9fc3460b2748b9129b139f88:
goto end92e54b1fbb5ba0b17a6006fe56b4d57b
end92e54b1fbb5ba0b17a6006fe56b4d57b:
;
return false
}
func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq32F x y)
// cond:
// result: (SETGEF (UCOMISS y x))
// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
// cond: canMergeSym(sym1, sym2)
// result: (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETGEF
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ4 {
goto end5da4c89d542d34d0d7f8848c3ea0fead
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(canMergeSym(sym1, sym2)) {
goto end5da4c89d542d34d0d7f8848c3ea0fead
}
v.Op = OpAMD64LEAQ4
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeInvalid)
v0.AddArg(y)
v0.AddArg(x)
v0.Type = TypeFlags
v.AddArg(v0)
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
goto endfee4b989a80cc43328b24f7017e80a17
endfee4b989a80cc43328b24f7017e80a17:
goto end5da4c89d542d34d0d7f8848c3ea0fead
end5da4c89d542d34d0d7f8848c3ea0fead:
;
return false
}
func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
// cond: canMergeSym(sym1, sym2)
// result: (LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ8 {
goto endc051937df5f12598e76c0923b5a60a39
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[0].Args[1]
if !(canMergeSym(sym1, sym2)) {
goto endc051937df5f12598e76c0923b5a60a39
}
v.Op = OpAMD64LEAQ8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
goto endc051937df5f12598e76c0923b5a60a39
endc051937df5f12598e76c0923b5a60a39:
;
return false
}
func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end3b837b0ce1bd6a79804a28ee529fc65b
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
goto end3b837b0ce1bd6a79804a28ee529fc65b
}
v.Op = OpAMD64LEAQ1
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
goto end3b837b0ce1bd6a79804a28ee529fc65b
end3b837b0ce1bd6a79804a28ee529fc65b:
;
// match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y))
// cond: canMergeSym(sym1, sym2) && y.Op != OpSB
// result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
{
off1 := v.AuxInt
sym1 := v.Aux
x := v.Args[0]
if v.Args[1].Op != OpAMD64LEAQ {
goto endfd9dd9448d726fc7d82274b404cddb67
}
off2 := v.Args[1].AuxInt
sym2 := v.Args[1].Aux
y := v.Args[1].Args[0]
if !(canMergeSym(sym1, sym2) && y.Op != OpSB) {
goto endfd9dd9448d726fc7d82274b404cddb67
}
v.Op = OpAMD64LEAQ1
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
goto endfd9dd9448d726fc7d82274b404cddb67
endfd9dd9448d726fc7d82274b404cddb67:
;
return false
}
func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end2bf3cb6e212c3f62ab83ce10059e672e
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
goto end2bf3cb6e212c3f62ab83ce10059e672e
}
v.Op = OpAMD64LEAQ2
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
goto end2bf3cb6e212c3f62ab83ce10059e672e
end2bf3cb6e212c3f62ab83ce10059e672e:
;
return false
}
func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end066907f169f09e56139e801397316c95
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
goto end066907f169f09e56139e801397316c95
}
v.Op = OpAMD64LEAQ4
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
goto end066907f169f09e56139e801397316c95
end066907f169f09e56139e801397316c95:
;
return false
}
func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ {
goto end6bde9448027690b01bbf30dee061ce23
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
x := v.Args[0].Args[0]
y := v.Args[1]
if !(canMergeSym(sym1, sym2) && x.Op != OpSB) {
goto end6bde9448027690b01bbf30dee061ce23
}
v.Op = OpAMD64LEAQ8
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
goto end6bde9448027690b01bbf30dee061ce23
end6bde9448027690b01bbf30dee061ce23:
;
return false
}
func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq16 x y)
// cond:
// result: (SETLE (CMPW x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETLE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.AddArg(x)
v0.AddArg(y)
v0.Type = TypeFlags
v.AddArg(v0)
return true
}
goto end586c647ca6bb8ec725eea917c743d1ea
end586c647ca6bb8ec725eea917c743d1ea:
;
return false
}
func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq16U x y)
// cond:
// result: (SETBE (CMPW x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETBE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
v0.AddArg(x)
v0.AddArg(y)
v0.Type = TypeFlags
v.AddArg(v0)
return true
}
goto end9c24a81bc6a4a92267bd6638362dfbfc
end9c24a81bc6a4a92267bd6638362dfbfc:
;
return false
}
func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq32 x y)
// cond:
// result: (SETLE (CMPL x y))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETLE
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
v0.AddArg(x)
v0.AddArg(y)
v0.Type = TypeFlags
v.AddArg(v0)
return true
}
goto end595ee99a9fc3460b2748b9129b139f88
end595ee99a9fc3460b2748b9129b139f88:
;
return false
}
func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq32F x y)
// cond:
// result: (SETGEF (UCOMISS y x))
{
x := v.Args[0]
y := v.Args[1]
v.Op = OpAMD64SETGEF
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeInvalid)
v0.AddArg(y)
v0.AddArg(x)
v0.Type = TypeFlags
v.AddArg(v0)
return true
}
goto endfee4b989a80cc43328b24f7017e80a17
endfee4b989a80cc43328b24f7017e80a17:
;
return false
}
func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Leq32U x y)
// cond:
// result: (SETBE (CMPL x y))
......@@ -5883,19 +6404,42 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
goto end19c38f3a1a37dca50637c917fa26e4f7
end19c38f3a1a37dca50637c917fa26e4f7:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBQZX (MOVBload [off] {sym} ptr mem))
// cond:
// result: @v.Args[0].Block (MOVBQZXload <v.Type> [off] {sym} ptr mem)
// match: (MOVBQSX (ANDBconst [c] x))
// cond: c & 0x80 == 0
// result: (ANDQconst [c & 0x7f] x)
{
if v.Args[0].Op != OpAMD64MOVBload {
goto end1169bcf3d56fa24321b002eaebd5a62d
if v.Args[0].Op != OpAMD64ANDBconst {
goto endf998318725c3cc6c701ebb69a2473650
}
off := v.Args[0].AuxInt
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
if !(c&0x80 == 0) {
goto endf998318725c3cc6c701ebb69a2473650
}
v.Op = OpAMD64ANDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 0x7f
v.AddArg(x)
return true
}
goto endf998318725c3cc6c701ebb69a2473650
endf998318725c3cc6c701ebb69a2473650:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBQZX (MOVBload [off] {sym} ptr mem))
// cond:
// result: @v.Args[0].Block (MOVBQZXload <v.Type> [off] {sym} ptr mem)
{
if v.Args[0].Op != OpAMD64MOVBload {
goto end1169bcf3d56fa24321b002eaebd5a62d
}
off := v.Args[0].AuxInt
sym := v.Args[0].Aux
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
......@@ -5914,6 +6458,26 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
}
goto end1169bcf3d56fa24321b002eaebd5a62d
end1169bcf3d56fa24321b002eaebd5a62d:
;
// match: (MOVBQZX (ANDBconst [c] x))
// cond:
// result: (ANDQconst [c & 0xff] x)
{
if v.Args[0].Op != OpAMD64ANDBconst {
goto enddca0c0e20f19210fe65677bfd758b24e
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ANDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 0xff
v.AddArg(x)
return true
}
goto enddca0c0e20f19210fe65677bfd758b24e
enddca0c0e20f19210fe65677bfd758b24e:
;
return false
}
......@@ -6116,6 +6680,67 @@ endfdf24c49923451a076f1868988b8c9d9:
}
goto enda7086cf7f6b8cf81972e2c3d4b12f3fc
enda7086cf7f6b8cf81972e2c3d4b12f3fc:
;
// match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVBstoreidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ1 {
goto ende386ced77f1acdae2e8bbc379803b7cf
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto ende386ced77f1acdae2e8bbc379803b7cf
}
v.Op = OpAMD64MOVBstoreidx1
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto ende386ced77f1acdae2e8bbc379803b7cf
ende386ced77f1acdae2e8bbc379803b7cf:
;
// match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
// cond:
// result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
{
off := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQ {
goto endc7abfa0b473c622e6d5aa3b1846fb2b7
}
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
v.Op = OpAMD64MOVBstoreidx1
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endc7abfa0b473c622e6d5aa3b1846fb2b7
endc7abfa0b473c622e6d5aa3b1846fb2b7:
;
return false
}
......@@ -6181,6 +6806,147 @@ end8deb839acf84818dd8fc827c0338f42c:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVBstoreidx1 [off1] {sym} (ADDQconst [off2] ptr) idx val mem)
// cond:
// result: (MOVBstoreidx1 [addOff(off1, off2)] {sym} ptr idx val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto endba611397b0dfd416156f29d7bd95b945
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.Op = OpAMD64MOVBstoreidx1
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endba611397b0dfd416156f29d7bd95b945
endba611397b0dfd416156f29d7bd95b945:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVLQSX (MOVLload [off] {sym} ptr mem))
// cond:
// result: @v.Args[0].Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
{
if v.Args[0].Op != OpAMD64MOVLload {
goto end9498ad52d5051e8e3ee9b0ed7af68d01
}
off := v.Args[0].AuxInt
sym := v.Args[0].Aux
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
v0 := v.Args[0].Block.NewValue0(v.Line, OpAMD64MOVLQSXload, TypeInvalid)
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(v0)
v0.Type = v.Type
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
return true
}
goto end9498ad52d5051e8e3ee9b0ed7af68d01
end9498ad52d5051e8e3ee9b0ed7af68d01:
;
// match: (MOVLQSX (ANDLconst [c] x))
// cond: c & 0x80000000 == 0
// result: (ANDQconst [c & 0x7fffffff] x)
{
if v.Args[0].Op != OpAMD64ANDLconst {
goto end286a5aa0d10b04039cbe6e09307b4cbe
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
if !(c&0x80000000 == 0) {
goto end286a5aa0d10b04039cbe6e09307b4cbe
}
v.Op = OpAMD64ANDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 0x7fffffff
v.AddArg(x)
return true
}
goto end286a5aa0d10b04039cbe6e09307b4cbe
end286a5aa0d10b04039cbe6e09307b4cbe:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVLQZX (MOVLload [off] {sym} ptr mem))
// cond:
// result: @v.Args[0].Block (MOVLQZXload <v.Type> [off] {sym} ptr mem)
{
if v.Args[0].Op != OpAMD64MOVLload {
goto endb00602ccd4180bd749a3b01914264fbc
}
off := v.Args[0].AuxInt
sym := v.Args[0].Aux
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
v0 := v.Args[0].Block.NewValue0(v.Line, OpAMD64MOVLQZXload, TypeInvalid)
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(v0)
v0.Type = v.Type
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
return true
}
goto endb00602ccd4180bd749a3b01914264fbc
endb00602ccd4180bd749a3b01914264fbc:
;
// match: (MOVLQZX (ANDLconst [c] x))
// cond:
// result: (ANDQconst [c & 0xffffffff] x)
{
if v.Args[0].Op != OpAMD64ANDLconst {
goto end71446f0e4f530fbbc6b25a3d07761c06
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ANDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 0xffffffff
v.AddArg(x)
return true
}
goto end71446f0e4f530fbbc6b25a3d07761c06
end71446f0e4f530fbbc6b25a3d07761c06:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
b := v.Block
_ = b
......@@ -6380,6 +7146,39 @@ enda62a54c45bf42db801af4095d27faccd:
}
goto endd57b1e4313fc7a3331340a9af00ba116
endd57b1e4313fc7a3331340a9af00ba116:
;
// match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVLstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ4 {
goto end6d2bbe089d6de8d261fcdeef263d2f7c
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto end6d2bbe089d6de8d261fcdeef263d2f7c
}
v.Op = OpAMD64MOVLstoreidx4
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end6d2bbe089d6de8d261fcdeef263d2f7c
end6d2bbe089d6de8d261fcdeef263d2f7c:
;
return false
}
......@@ -6445,6 +7244,40 @@ endd579250954b5df84a77518b36f739e12:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVLstoreidx4 [off1] {sym} (ADDQconst [off2] ptr) idx val mem)
// cond:
// result: (MOVLstoreidx4 [addOff(off1, off2)] {sym} ptr idx val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto endf4921486b8eca2abd4a92ffadc6cb52d
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.Op = OpAMD64MOVLstoreidx4
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endf4921486b8eca2abd4a92ffadc6cb52d
endf4921486b8eca2abd4a92ffadc6cb52d:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool {
b := v.Block
_ = b
......@@ -7425,27 +8258,134 @@ end66e4853026306cd46f414c22d281254f:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
// match: (MOVWQSX (MOVWload [off] {sym} ptr mem))
// cond:
// result: (MOVWload [addOff(off1, off2)] {sym} ptr mem)
// result: @v.Args[0].Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto endfcb0ce76f96e8b0c2eb19a9b827c1b73
if v.Args[0].Op != OpAMD64MOVWload {
goto endef39da125e2794cdafd008426ecc91eb
}
off2 := v.Args[0].AuxInt
off := v.Args[0].AuxInt
sym := v.Args[0].Aux
ptr := v.Args[0].Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVWload
mem := v.Args[0].Args[1]
v0 := v.Args[0].Block.NewValue0(v.Line, OpAMD64MOVWQSXload, TypeInvalid)
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(v0)
v0.Type = v.Type
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
return true
}
goto endef39da125e2794cdafd008426ecc91eb
endef39da125e2794cdafd008426ecc91eb:
;
// match: (MOVWQSX (ANDWconst [c] x))
// cond: c & 0x8000 == 0
// result: (ANDQconst [c & 0x7fff] x)
{
if v.Args[0].Op != OpAMD64ANDWconst {
goto end8581b4c4dfd1278e97aa536308519e68
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
if !(c&0x8000 == 0) {
goto end8581b4c4dfd1278e97aa536308519e68
}
v.Op = OpAMD64ANDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 0x7fff
v.AddArg(x)
return true
}
goto end8581b4c4dfd1278e97aa536308519e68
end8581b4c4dfd1278e97aa536308519e68:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWQZX (MOVWload [off] {sym} ptr mem))
// cond:
// result: @v.Args[0].Block (MOVWQZXload <v.Type> [off] {sym} ptr mem)
{
if v.Args[0].Op != OpAMD64MOVWload {
goto end348d59b382c9d0c64896811facbe4c5e
}
off := v.Args[0].AuxInt
sym := v.Args[0].Aux
ptr := v.Args[0].Args[0]
mem := v.Args[0].Args[1]
v0 := v.Args[0].Block.NewValue0(v.Line, OpAMD64MOVWQZXload, TypeInvalid)
v.Op = OpCopy
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AddArg(v0)
v0.Type = v.Type
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
return true
}
goto end348d59b382c9d0c64896811facbe4c5e
end348d59b382c9d0c64896811facbe4c5e:
;
// match: (MOVWQZX (ANDWconst [c] x))
// cond:
// result: (ANDQconst [c & 0xffff] x)
{
if v.Args[0].Op != OpAMD64ANDWconst {
goto end15c2a3b0ade49892e79289e562bac52f
}
c := v.Args[0].AuxInt
x := v.Args[0].Args[0]
v.Op = OpAMD64ANDQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 0xffff
v.AddArg(x)
return true
}
goto end15c2a3b0ade49892e79289e562bac52f
end15c2a3b0ade49892e79289e562bac52f:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVWload [addOff(off1, off2)] {sym} ptr mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto endfcb0ce76f96e8b0c2eb19a9b827c1b73
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
mem := v.Args[1]
v.Op = OpAMD64MOVWload
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
......@@ -7624,6 +8564,39 @@ end60327daf9965d73a8c1971d098e1e31d:
}
goto end4cc466ede8e64e415c899ccac81c0f27
end4cc466ede8e64e415c899ccac81c0f27:
;
// match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem)
// cond: canMergeSym(sym1, sym2)
// result: (MOVWstoreidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem)
{
off1 := v.AuxInt
sym1 := v.Aux
if v.Args[0].Op != OpAMD64LEAQ2 {
goto endecfc76d1ba8fcce5d4110a452cd39752
}
off2 := v.Args[0].AuxInt
sym2 := v.Args[0].Aux
ptr := v.Args[0].Args[0]
idx := v.Args[0].Args[1]
val := v.Args[1]
mem := v.Args[2]
if !(canMergeSym(sym1, sym2)) {
goto endecfc76d1ba8fcce5d4110a452cd39752
}
v.Op = OpAMD64MOVWstoreidx2
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto endecfc76d1ba8fcce5d4110a452cd39752
endecfc76d1ba8fcce5d4110a452cd39752:
;
return false
}
......@@ -7689,6 +8662,40 @@ endba47397e07b40a64fa4cad36ac2e32ad:
;
return false
}
func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWstoreidx2 [off1] {sym} (ADDQconst [off2] ptr) idx val mem)
// cond:
// result: (MOVWstoreidx2 [addOff(off1, off2)] {sym} ptr idx val mem)
{
off1 := v.AuxInt
sym := v.Aux
if v.Args[0].Op != OpAMD64ADDQconst {
goto end7ab3a4fbfc9bac9d46ba72d40f667794
}
off2 := v.Args[0].AuxInt
ptr := v.Args[0].Args[0]
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.Op = OpAMD64MOVWstoreidx2
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = addOff(off1, off2)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(idx)
v.AddArg(val)
v.AddArg(mem)
return true
}
goto end7ab3a4fbfc9bac9d46ba72d40f667794
end7ab3a4fbfc9bac9d46ba72d40f667794:
;
return false
}
func rewriteValueAMD64_OpAMD64MULB(v *Value, config *Config) bool {
b := v.Block
_ = b
......@@ -11198,6 +12205,66 @@ end6453a48c573d0dc7c8b0163a266c6218:
func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SARB x (MOVQconst [c]))
// cond:
// result: (SARBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end03194336f801b91c1423aed6f39247f0
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end03194336f801b91c1423aed6f39247f0
end03194336f801b91c1423aed6f39247f0:
;
// match: (SARB x (MOVLconst [c]))
// cond:
// result: (SARBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end3f623e78dd789403b299106625e0d6df
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end3f623e78dd789403b299106625e0d6df
end3f623e78dd789403b299106625e0d6df:
;
// match: (SARB x (MOVWconst [c]))
// cond:
// result: (SARBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end4393e26c64e39342a0634d9a5706cb10
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end4393e26c64e39342a0634d9a5706cb10
end4393e26c64e39342a0634d9a5706cb10:
;
// match: (SARB x (MOVBconst [c]))
// cond:
// result: (SARBconst [c&31] x)
......@@ -11247,6 +12314,26 @@ end06e0e38775f0650ed672427d19cd8fff:
func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SARL x (MOVQconst [c]))
// cond:
// result: (SARLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end8fb4e77be1f4d21d0f2a0facf9a60add
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end8fb4e77be1f4d21d0f2a0facf9a60add
end8fb4e77be1f4d21d0f2a0facf9a60add:
;
// match: (SARL x (MOVLconst [c]))
// cond:
// result: (SARLconst [c&31] x)
......@@ -11266,6 +12353,46 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool {
}
goto ende586a72c1b232ee0b63e37c71eeb8470
ende586a72c1b232ee0b63e37c71eeb8470:
;
// match: (SARL x (MOVWconst [c]))
// cond:
// result: (SARLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end37389c13b9fb94c44bd10b1143809afb
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end37389c13b9fb94c44bd10b1143809afb
end37389c13b9fb94c44bd10b1143809afb:
;
// match: (SARL x (MOVBconst [c]))
// cond:
// result: (SARLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto end72550eb8c44c45e76e40888bce753160
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end72550eb8c44c45e76e40888bce753160
end72550eb8c44c45e76e40888bce753160:
;
return false
}
......@@ -11316,35 +12443,135 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool {
goto end25e720ab203be2745dded5550e6d8a7c
end25e720ab203be2745dded5550e6d8a7c:
;
return false
}
func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SARQconst [c] (MOVQconst [d]))
// match: (SARQ x (MOVLconst [c]))
// cond:
// result: (MOVQconst [d>>uint64(c)])
// result: (SARQconst [c&63] x)
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto endd949ba69a1ff71ba62c49b39c68f269e
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto endd04cf826c5db444107cf4e0bf789bcda
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
c := v.Args[1].AuxInt
v.Op = OpAMD64SARQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = d >> uint64(c)
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto endd949ba69a1ff71ba62c49b39c68f269e
endd949ba69a1ff71ba62c49b39c68f269e:
goto endd04cf826c5db444107cf4e0bf789bcda
endd04cf826c5db444107cf4e0bf789bcda:
;
return false
}
// match: (SARQ x (MOVWconst [c]))
// cond:
// result: (SARQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end6266051b3a126922286c298594535622
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto end6266051b3a126922286c298594535622
end6266051b3a126922286c298594535622:
;
// match: (SARQ x (MOVBconst [c]))
// cond:
// result: (SARQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto endcf2a1bdfeda535fc96ae1e7f5c54d531
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto endcf2a1bdfeda535fc96ae1e7f5c54d531
endcf2a1bdfeda535fc96ae1e7f5c54d531:
;
return false
}
func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SARQconst [c] (MOVQconst [d]))
// cond:
// result: (MOVQconst [d>>uint64(c)])
{
c := v.AuxInt
if v.Args[0].Op != OpAMD64MOVQconst {
goto endd949ba69a1ff71ba62c49b39c68f269e
}
d := v.Args[0].AuxInt
v.Op = OpAMD64MOVQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = d >> uint64(c)
return true
}
goto endd949ba69a1ff71ba62c49b39c68f269e
endd949ba69a1ff71ba62c49b39c68f269e:
;
return false
}
func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SARW x (MOVQconst [c]))
// cond:
// result: (SARWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto endec8cafea5ff91b2a1b5cf5a169be924f
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto endec8cafea5ff91b2a1b5cf5a169be924f
endec8cafea5ff91b2a1b5cf5a169be924f:
;
// match: (SARW x (MOVLconst [c]))
// cond:
// result: (SARWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end9303d0edeebdc8a2a7e93fecf0fff61c
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end9303d0edeebdc8a2a7e93fecf0fff61c
end9303d0edeebdc8a2a7e93fecf0fff61c:
;
// match: (SARW x (MOVWconst [c]))
// cond:
// result: (SARWconst [c&31] x)
......@@ -11364,6 +12591,26 @@ func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool {
}
goto endc46e3f211f94238f9a0aec3c498af490
endc46e3f211f94238f9a0aec3c498af490:
;
// match: (SARW x (MOVBconst [c]))
// cond:
// result: (SARWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto end0bf07ce9cd2c536c07768f8dfbe13c62
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SARWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end0bf07ce9cd2c536c07768f8dfbe13c62
end0bf07ce9cd2c536c07768f8dfbe13c62:
;
return false
}
......@@ -12654,6 +13901,66 @@ end9249b3ed3e1e582dd5435fb73cbc13ac:
func rewriteValueAMD64_OpAMD64SHLB(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SHLB x (MOVQconst [c]))
// cond:
// result: (SHLBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto endb1f377b81b6f4c1864893934230ecbd1
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto endb1f377b81b6f4c1864893934230ecbd1
endb1f377b81b6f4c1864893934230ecbd1:
;
// match: (SHLB x (MOVLconst [c]))
// cond:
// result: (SHLBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end434bc4ee26d93bf1c734be760d7a1aa6
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end434bc4ee26d93bf1c734be760d7a1aa6
end434bc4ee26d93bf1c734be760d7a1aa6:
;
// match: (SHLB x (MOVWconst [c]))
// cond:
// result: (SHLBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end2c4fe4cce2ae24e0bc5c7d209d22e9d9
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end2c4fe4cce2ae24e0bc5c7d209d22e9d9
end2c4fe4cce2ae24e0bc5c7d209d22e9d9:
;
// match: (SHLB x (MOVBconst [c]))
// cond:
// result: (SHLBconst [c&31] x)
......@@ -12679,6 +13986,26 @@ end2d0d0111d831d8a575b5627284a6337a:
func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SHLL x (MOVQconst [c]))
// cond:
// result: (SHLLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end1b4f8b8d62445fdcb3cf9cd5036b559b
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end1b4f8b8d62445fdcb3cf9cd5036b559b
end1b4f8b8d62445fdcb3cf9cd5036b559b:
;
// match: (SHLL x (MOVLconst [c]))
// cond:
// result: (SHLLconst [c&31] x)
......@@ -12698,6 +14025,46 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
}
goto end633f9ddcfbb63374c895a5f78da75d25
end633f9ddcfbb63374c895a5f78da75d25:
;
// match: (SHLL x (MOVWconst [c]))
// cond:
// result: (SHLLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto enda4f59495061db6cfe796b6dba8d3cad8
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto enda4f59495061db6cfe796b6dba8d3cad8
enda4f59495061db6cfe796b6dba8d3cad8:
;
// match: (SHLL x (MOVBconst [c]))
// cond:
// result: (SHLLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto endd6f39b5f3174ca738ae1c48a96d837a6
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto endd6f39b5f3174ca738ae1c48a96d837a6
endd6f39b5f3174ca738ae1c48a96d837a6:
;
return false
}
......@@ -12723,12 +14090,112 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool {
}
goto end4d7e3a945cacdd6b6c8c0de6f465d4ae
end4d7e3a945cacdd6b6c8c0de6f465d4ae:
;
// match: (SHLQ x (MOVLconst [c]))
// cond:
// result: (SHLQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end394bae2652a3e4bc4b70a6fc193949f8
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto end394bae2652a3e4bc4b70a6fc193949f8
end394bae2652a3e4bc4b70a6fc193949f8:
;
// match: (SHLQ x (MOVWconst [c]))
// cond:
// result: (SHLQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end358be4078efa15ceb443ccda7ce592a0
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto end358be4078efa15ceb443ccda7ce592a0
end358be4078efa15ceb443ccda7ce592a0:
;
// match: (SHLQ x (MOVBconst [c]))
// cond:
// result: (SHLQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto end032e0efd085f37a12322dbc63795a1b2
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto end032e0efd085f37a12322dbc63795a1b2
end032e0efd085f37a12322dbc63795a1b2:
;
return false
}
func rewriteValueAMD64_OpAMD64SHLW(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SHLW x (MOVQconst [c]))
// cond:
// result: (SHLWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto enda29aa85ce58b1fdb63d71e2632efd6db
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto enda29aa85ce58b1fdb63d71e2632efd6db
enda29aa85ce58b1fdb63d71e2632efd6db:
;
// match: (SHLW x (MOVLconst [c]))
// cond:
// result: (SHLWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end59ce264ffde0ef9af8ea1a25db7173b6
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end59ce264ffde0ef9af8ea1a25db7173b6
end59ce264ffde0ef9af8ea1a25db7173b6:
;
// match: (SHLW x (MOVWconst [c]))
// cond:
// result: (SHLWconst [c&31] x)
......@@ -12748,12 +14215,92 @@ func rewriteValueAMD64_OpAMD64SHLW(v *Value, config *Config) bool {
}
goto endba96a52aa58d28b3357828051e0e695c
endba96a52aa58d28b3357828051e0e695c:
;
// match: (SHLW x (MOVBconst [c]))
// cond:
// result: (SHLWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto endf9c2165ea24ac7bbdd46cdf0e084104f
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHLWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto endf9c2165ea24ac7bbdd46cdf0e084104f
endf9c2165ea24ac7bbdd46cdf0e084104f:
;
return false
}
func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SHRB x (MOVQconst [c]))
// cond:
// result: (SHRBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end2e7fb7a5406cbf51c69a0d04dc73d16a
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end2e7fb7a5406cbf51c69a0d04dc73d16a
end2e7fb7a5406cbf51c69a0d04dc73d16a:
;
// match: (SHRB x (MOVLconst [c]))
// cond:
// result: (SHRBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end69603cc51e4f244388f368dd188a526a
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end69603cc51e4f244388f368dd188a526a
end69603cc51e4f244388f368dd188a526a:
;
// match: (SHRB x (MOVWconst [c]))
// cond:
// result: (SHRBconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto endd96421647299a1bb1b68ad0a90fa0be3
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRBconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto endd96421647299a1bb1b68ad0a90fa0be3
endd96421647299a1bb1b68ad0a90fa0be3:
;
// match: (SHRB x (MOVBconst [c]))
// cond:
// result: (SHRBconst [c&31] x)
......@@ -12779,6 +14326,26 @@ enddb1cd5aaa826d43fa4f6d1b2b8795e58:
func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SHRL x (MOVQconst [c]))
// cond:
// result: (SHRLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto end893880cdc59697295c1849a250163e59
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end893880cdc59697295c1849a250163e59
end893880cdc59697295c1849a250163e59:
;
// match: (SHRL x (MOVLconst [c]))
// cond:
// result: (SHRLconst [c&31] x)
......@@ -12798,6 +14365,46 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool {
}
goto end344b8b9202e1925e8d0561f1c21412fc
end344b8b9202e1925e8d0561f1c21412fc:
;
// match: (SHRL x (MOVWconst [c]))
// cond:
// result: (SHRLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end561280f746f9983f4a4b4a5119b53028
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end561280f746f9983f4a4b4a5119b53028
end561280f746f9983f4a4b4a5119b53028:
;
// match: (SHRL x (MOVBconst [c]))
// cond:
// result: (SHRLconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto enda339271c59d274b73c04ba1f2c44c2b9
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRLconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto enda339271c59d274b73c04ba1f2c44c2b9
enda339271c59d274b73c04ba1f2c44c2b9:
;
return false
}
......@@ -12823,12 +14430,112 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool {
}
goto end699d35e2d5cfa08b8a3b1c8a183ddcf3
end699d35e2d5cfa08b8a3b1c8a183ddcf3:
;
// match: (SHRQ x (MOVLconst [c]))
// cond:
// result: (SHRQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end3189f4abaac8028d9191c9ba64124999
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto end3189f4abaac8028d9191c9ba64124999
end3189f4abaac8028d9191c9ba64124999:
;
// match: (SHRQ x (MOVWconst [c]))
// cond:
// result: (SHRQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVWconst {
goto end0cbc86ae04a355c0e2a96400242f4633
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto end0cbc86ae04a355c0e2a96400242f4633
end0cbc86ae04a355c0e2a96400242f4633:
;
// match: (SHRQ x (MOVBconst [c]))
// cond:
// result: (SHRQconst [c&63] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto endb9c003612674e7a1ea7c13e463c229d2
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRQconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 63
v.AddArg(x)
return true
}
goto endb9c003612674e7a1ea7c13e463c229d2
endb9c003612674e7a1ea7c13e463c229d2:
;
return false
}
func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SHRW x (MOVQconst [c]))
// cond:
// result: (SHRWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVQconst {
goto endc5c82eea9a6b51b1d6b76e57f21f46ff
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto endc5c82eea9a6b51b1d6b76e57f21f46ff
endc5c82eea9a6b51b1d6b76e57f21f46ff:
;
// match: (SHRW x (MOVLconst [c]))
// cond:
// result: (SHRWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVLconst {
goto end773e94c857256ae9a31eb5b3d667e64b
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end773e94c857256ae9a31eb5b3d667e64b
end773e94c857256ae9a31eb5b3d667e64b:
;
// match: (SHRW x (MOVWconst [c]))
// cond:
// result: (SHRWconst [c&31] x)
......@@ -12848,6 +14555,26 @@ func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
}
goto endd75ff1f9b3e9ec9c942a39b6179da1b3
endd75ff1f9b3e9ec9c942a39b6179da1b3:
;
// match: (SHRW x (MOVBconst [c]))
// cond:
// result: (SHRWconst [c&31] x)
{
x := v.Args[0]
if v.Args[1].Op != OpAMD64MOVBconst {
goto end6761530cd742ad00057c19a6a3c38ada
}
c := v.Args[1].AuxInt
v.Op = OpAMD64SHRWconst
v.AuxInt = 0
v.Aux = nil
v.resetArgs()
v.AuxInt = c & 31
v.AddArg(x)
return true
}
goto end6761530cd742ad00057c19a6a3c38ada
end6761530cd742ad00057c19a6a3c38ada:
;
return false
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment