Commit 29fcd57a authored by Giovanni Bajo's avatar Giovanni Bajo

cmd/compile: fold offsets into memory ops

Fold offsets for:

  {ADD,SUB,MUL}[SD]mem
  ADD[LQ]constmem
  {ADD,SUB,AND,OR,XOR}[LQ]mem

Cumulatively, the rules trigger ~900 times in all.bash.

Fixes #23325

Change-Id: If6c701f68fa0b57907a353a07a516b914127d0d8
Reviewed-on: https://go-review.googlesource.com/98035
Run-TryBot: Giovanni Bajo <rasky@develer.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarKeith Randall <khr@golang.org>
parent ee58eccc
...@@ -962,6 +962,16 @@ ...@@ -962,6 +962,16 @@
(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem) (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem)
(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) -> (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1+off2] {sym} base val mem) (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1+off2] {sym} base val mem)
((ADD|SUB|AND|OR|XOR)Qmem [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR)Qmem [off1+off2] {sym} val base mem)
((ADD|SUB|AND|OR|XOR)Lmem [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR)Lmem [off1+off2] {sym} val base mem)
((ADD|SUB|MUL)SSmem [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL)SSmem [off1+off2] {sym} val base mem)
((ADD|SUB|MUL)SDmem [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL)SDmem [off1+off2] {sym} val base mem)
(ADD(L|Q)constmem [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
(ADD(L|Q)constmem [ValAndOff(valoff1).add(off2)] {sym} base mem)
// Fold constants into stores. // Fold constants into stores.
(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
...@@ -990,6 +1000,21 @@ ...@@ -990,6 +1000,21 @@
(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1+off2] {mergeSym(sym1,sym2)} base val mem) (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR)Qmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|AND|OR|XOR)Qmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|AND|OR|XOR)Lmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|AND|OR|XOR)Lmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL)SSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|MUL)SSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL)SDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|MUL)SDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
(ADD(L|Q)constmem [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
(ADD(L|Q)constmem [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
// generating indexed loads and stores // generating indexed loads and stores
(MOV(B|W|L|Q|SS|SD)load [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOV(B|W|L|Q|SS|SD)load [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
......
...@@ -1309,6 +1309,55 @@ func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool { ...@@ -1309,6 +1309,55 @@ func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (ADDLconstmem [valoff1] {sym} (ADDQconst [off2] base) mem)
// cond: ValAndOff(valoff1).canAdd(off2)
// result: (ADDLconstmem [ValAndOff(valoff1).add(off2)] {sym} base mem)
for {
valoff1 := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpAMD64ADDQconst {
break
}
off2 := v_0.AuxInt
base := v_0.Args[0]
mem := v.Args[1]
if !(ValAndOff(valoff1).canAdd(off2)) {
break
}
v.reset(OpAMD64ADDLconstmem)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDLconstmem [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
// result: (ADDLconstmem [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
for {
valoff1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpAMD64LEAQ {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDLconstmem)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) // match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _))
// cond: // cond:
// result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x)) // result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x))
...@@ -1346,6 +1395,59 @@ func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool { ...@@ -1346,6 +1395,59 @@ func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (ADDLmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (ADDLmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64ADDLmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (ADDLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDLmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond: // cond:
// result: (ADDL x (MOVLf2i y)) // result: (ADDL x (MOVLf2i y))
...@@ -2083,6 +2185,55 @@ func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool { ...@@ -2083,6 +2185,55 @@ func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (ADDQconstmem [valoff1] {sym} (ADDQconst [off2] base) mem)
// cond: ValAndOff(valoff1).canAdd(off2)
// result: (ADDQconstmem [ValAndOff(valoff1).add(off2)] {sym} base mem)
for {
valoff1 := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpAMD64ADDQconst {
break
}
off2 := v_0.AuxInt
base := v_0.Args[0]
mem := v.Args[1]
if !(ValAndOff(valoff1).canAdd(off2)) {
break
}
v.reset(OpAMD64ADDQconstmem)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDQconstmem [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
// result: (ADDQconstmem [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
for {
valoff1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpAMD64LEAQ {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDQconstmem)
v.AuxInt = ValAndOff(valoff1).add(off2)
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) // match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _))
// cond: // cond:
// result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x)) // result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x))
...@@ -2120,6 +2271,59 @@ func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool { ...@@ -2120,6 +2271,59 @@ func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (ADDQmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (ADDQmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64ADDQmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (ADDQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDQmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond: // cond:
// result: (ADDQ x (MOVQf2i y)) // result: (ADDQ x (MOVQf2i y))
...@@ -2213,6 +2417,59 @@ func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool { ...@@ -2213,6 +2417,59 @@ func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (ADDSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (ADDSDmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64ADDSDmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (ADDSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDSDmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) // match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// cond: // cond:
// result: (ADDSD x (MOVQi2f y)) // result: (ADDSD x (MOVQi2f y))
...@@ -2306,6 +2563,59 @@ func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool { ...@@ -2306,6 +2563,59 @@ func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (ADDSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (ADDSSmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64ADDSSmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (ADDSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ADDSSmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) // match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// cond: // cond:
// result: (ADDSS x (MOVLi2f y)) // result: (ADDSS x (MOVLi2f y))
...@@ -2528,6 +2838,59 @@ func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool { ...@@ -2528,6 +2838,59 @@ func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (ANDLmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (ANDLmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64ANDLmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ANDLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (ANDLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ANDLmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond: // cond:
// result: (ANDL x (MOVLf2i y)) // result: (ANDL x (MOVLf2i y))
...@@ -2766,6 +3129,59 @@ func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool { ...@@ -2766,6 +3129,59 @@ func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (ANDQmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (ANDQmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64ANDQmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ANDQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (ANDQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ANDQmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond: // cond:
// result: (ANDQ x (MOVQf2i y)) // result: (ANDQ x (MOVQf2i y))
...@@ -15683,6 +16099,59 @@ func rewriteValueAMD64_OpAMD64MULSDmem_0(v *Value) bool { ...@@ -15683,6 +16099,59 @@ func rewriteValueAMD64_OpAMD64MULSDmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (MULSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (MULSDmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MULSDmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MULSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MULSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MULSDmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) // match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// cond: // cond:
// result: (MULSD x (MOVQi2f y)) // result: (MULSD x (MOVQi2f y))
...@@ -15776,6 +16245,59 @@ func rewriteValueAMD64_OpAMD64MULSSmem_0(v *Value) bool { ...@@ -15776,6 +16245,59 @@ func rewriteValueAMD64_OpAMD64MULSSmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (MULSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (MULSSmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64MULSSmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MULSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MULSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MULSSmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) // match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// cond: // cond:
// result: (MULSS x (MOVLi2f y)) // result: (MULSS x (MOVLi2f y))
...@@ -24415,6 +24937,59 @@ func rewriteValueAMD64_OpAMD64ORLmem_0(v *Value) bool { ...@@ -24415,6 +24937,59 @@ func rewriteValueAMD64_OpAMD64ORLmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (ORLmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (ORLmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64ORLmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ORLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (ORLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ORLmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond: // cond:
// result: ( ORL x (MOVLf2i y)) // result: ( ORL x (MOVLf2i y))
...@@ -35157,6 +35732,59 @@ func rewriteValueAMD64_OpAMD64ORQmem_0(v *Value) bool { ...@@ -35157,6 +35732,59 @@ func rewriteValueAMD64_OpAMD64ORQmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (ORQmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (ORQmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64ORQmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ORQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (ORQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64ORQmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond: // cond:
// result: ( ORQ x (MOVQf2i y)) // result: ( ORQ x (MOVQf2i y))
...@@ -41646,6 +42274,59 @@ func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool { ...@@ -41646,6 +42274,59 @@ func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (SUBLmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (SUBLmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64SUBLmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (SUBLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (SUBLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SUBLmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond: // cond:
// result: (SUBL x (MOVLf2i y)) // result: (SUBL x (MOVLf2i y))
...@@ -41831,6 +42512,59 @@ func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool { ...@@ -41831,6 +42512,59 @@ func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (SUBQmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (SUBQmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64SUBQmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (SUBQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (SUBQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SUBQmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond: // cond:
// result: (SUBQ x (MOVQf2i y)) // result: (SUBQ x (MOVQf2i y))
...@@ -41898,6 +42632,59 @@ func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool { ...@@ -41898,6 +42632,59 @@ func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (SUBSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (SUBSDmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64SUBSDmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (SUBSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (SUBSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SUBSDmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) // match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// cond: // cond:
// result: (SUBSD x (MOVQi2f y)) // result: (SUBSD x (MOVQi2f y))
...@@ -41965,6 +42752,59 @@ func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool { ...@@ -41965,6 +42752,59 @@ func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (SUBSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (SUBSSmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64SUBSSmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (SUBSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (SUBSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64SUBSSmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) // match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// cond: // cond:
// result: (SUBSS x (MOVLi2f y)) // result: (SUBSS x (MOVLi2f y))
...@@ -43030,6 +43870,59 @@ func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool { ...@@ -43030,6 +43870,59 @@ func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (XORLmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (XORLmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64XORLmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (XORLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (XORLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64XORLmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond: // cond:
// result: (XORL x (MOVLf2i y)) // result: (XORL x (MOVLf2i y))
...@@ -43274,6 +44167,59 @@ func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool { ...@@ -43274,6 +44167,59 @@ func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (XORQmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
// result: (XORQmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
v.reset(OpAMD64XORQmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (XORQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (XORQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64LEAQ {
break
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64XORQmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(base)
v.AddArg(mem)
return true
}
// match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond: // cond:
// result: (XORQ x (MOVQf2i y)) // result: (XORQ x (MOVQf2i y))
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment