Commit fa9435cd authored by Keith Randall's avatar Keith Randall

cmd/compile: clean up rewrite rules

Break really long lines.
Add spacing to line up columns.

In AMD64, put all the optimization rules after all the
lowering rules.

Change-Id: I45cc7368bf278416e67f89e74358db1bd4326a93
Reviewed-on: https://go-review.googlesource.com/22470Reviewed-by: default avatarDavid Chase <drchase@google.com>
parent 1fb4e4de
......@@ -85,40 +85,27 @@
(Com16 x) -> (NOTL x)
(Com8 x) -> (NOTL x)
// CMPQconst 0 below is redundant because BSF sets Z but how to remove?
// Lowering boolean ops
(AndB x y) -> (ANDL x y)
(OrB x y) -> (ORL x y)
(Not x) -> (XORLconst [1] x)
// Lowering pointer arithmetic
(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
// Lowering other arithmetic
// TODO: CMPQconst 0 below is redundant because BSF sets Z but how to remove?
(Ctz64 <t> x) -> (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
(Ctz32 <t> x) -> (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
(Ctz16 <t> x) -> (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
(CMOVQEQconst x (InvertFlags y) [c]) -> (CMOVQNEconst x y [c])
(CMOVLEQconst x (InvertFlags y) [c]) -> (CMOVLNEconst x y [c])
(CMOVWEQconst x (InvertFlags y) [c]) -> (CMOVWNEconst x y [c])
(CMOVQEQconst _ (FlagEQ) [c]) -> (Const64 [c])
(CMOVLEQconst _ (FlagEQ) [c]) -> (Const32 [c])
(CMOVWEQconst _ (FlagEQ) [c]) -> (Const16 [c])
(CMOVQEQconst x (FlagLT_ULT)) -> x
(CMOVLEQconst x (FlagLT_ULT)) -> x
(CMOVWEQconst x (FlagLT_ULT)) -> x
(CMOVQEQconst x (FlagLT_UGT)) -> x
(CMOVLEQconst x (FlagLT_UGT)) -> x
(CMOVWEQconst x (FlagLT_UGT)) -> x
(CMOVQEQconst x (FlagGT_ULT)) -> x
(CMOVLEQconst x (FlagGT_ULT)) -> x
(CMOVWEQconst x (FlagGT_ULT)) -> x
(CMOVQEQconst x (FlagGT_UGT)) -> x
(CMOVLEQconst x (FlagGT_UGT)) -> x
(CMOVWEQconst x (FlagGT_UGT)) -> x
(Bswap64 x) -> (BSWAPQ x)
(Bswap32 x) -> (BSWAPL x)
(Sqrt x) -> (SQRTSD x)
// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits.
(SignExt8to16 x) -> (MOVBQSX x)
(SignExt8to32 x) -> (MOVBQSX x)
......@@ -134,6 +121,16 @@
(ZeroExt16to64 x) -> (MOVWQZX x)
(ZeroExt32to64 x) -> (MOVLQZX x)
// Lowering truncation
// Because we ignore high parts of registers, truncates are just copies.
(Trunc16to8 x) -> x
(Trunc32to8 x) -> x
(Trunc32to16 x) -> x
(Trunc64to8 x) -> x
(Trunc64to16 x) -> x
(Trunc64to32 x) -> x
// Lowering float <-> int
(Cvt32to32F x) -> (CVTSL2SS x)
(Cvt32to64F x) -> (CVTSL2SD x)
(Cvt64to32F x) -> (CVTSQ2SS x)
......@@ -147,18 +144,9 @@
(Cvt32Fto64F x) -> (CVTSS2SD x)
(Cvt64Fto32F x) -> (CVTSD2SS x)
// Because we ignore high parts of registers, truncates are just copies.
(Trunc16to8 x) -> x
(Trunc32to8 x) -> x
(Trunc32to16 x) -> x
(Trunc64to8 x) -> x
(Trunc64to16 x) -> x
(Trunc64to32 x) -> x
// Lowering shifts
// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
// Note: for small shifts we generate 32 bits of mask even when we don't need it all.
(Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
(Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
(Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
......@@ -206,7 +194,6 @@
// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
// Note: for small shift widths we generate 32 bits of mask even when we don't need it all.
(Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
(Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
(Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
......@@ -227,6 +214,7 @@
(Rsh8x16 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
(Rsh8x8 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
// Lowering comparisons
(Less64 x y) -> (SETL (CMPQ x y))
(Less32 x y) -> (SETL (CMPL x y))
(Less16 x y) -> (SETL (CMPW x y))
......@@ -295,6 +283,7 @@
(Neq64F x y) -> (SETNEF (UCOMISD x y))
(Neq32F x y) -> (SETNEF (UCOMISS x y))
// Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
(Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
......@@ -302,6 +291,7 @@
(Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
(Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem)
// Lowering stores
// These more-specific FP versions of Store pattern should come first.
(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
......@@ -311,19 +301,7 @@
(Store [2] ptr val mem) -> (MOVWstore ptr val mem)
(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
// We want this to stick out so the to/from ptr conversion is obvious
(Convert <t> x mem) -> (MOVQconvert <t> x mem)
// checks
(IsNonNil p) -> (SETNE (TESTQ p p))
(IsInBounds idx len) -> (SETB (CMPQ idx len))
(IsSliceInBounds idx len) -> (SETBE (CMPQ idx len))
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
// Small moves
// Lowering moves
(Move [0] _ _ mem) -> mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
(Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
......@@ -368,13 +346,56 @@
(Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 ->
(REPMOVSQ dst src (MOVQconst [size/8]) mem)
(AndB x y) -> (ANDL x y)
(OrB x y) -> (ORL x y)
(Not x) -> (XORLconst [1] x)
// Lowering Zero instructions
(Zero [0] _ mem) -> mem
(Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
(Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
(Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
(Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
(Zero [3] destptr mem) ->
(MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [5] destptr mem) ->
(MOVBstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
(Zero [6] destptr mem) ->
(MOVWstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
(Zero [7] destptr mem) ->
(MOVLstoreconst [makeValAndOff(0,3)] destptr
(MOVLstoreconst [0] destptr mem))
// Strip off any fractional word zeroing.
(Zero [size] destptr mem) && size%8 != 0 && size > 8 ->
(Zero [size-size%8] (ADDQconst destptr [size%8])
(MOVQstoreconst [0] destptr mem))
// Zero small numbers of words directly.
(Zero [16] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))
(Zero [24] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem)))
(Zero [32] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,24)] destptr
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))))
// Medium zeroing uses a duff device.
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice ->
(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice ->
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
// Large zeroing uses REP STOSQ.
(Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 ->
(REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
// Lowering constants
(Const8 [val]) -> (MOVLconst [val])
(Const16 [val]) -> (MOVLconst [val])
(Const32 [val]) -> (MOVLconst [val])
......@@ -384,8 +405,22 @@
(ConstNil) -> (MOVQconst [0])
(ConstBool [b]) -> (MOVLconst [b])
(Addr {sym} base) -> (LEAQ {sym} base)
// Lowering calls
(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
// Miscellaneous
(Convert <t> x mem) -> (MOVQconvert <t> x mem)
(IsNonNil p) -> (SETNE (TESTQ p p))
(IsInBounds idx len) -> (SETB (CMPQ idx len))
(IsSliceInBounds idx len) -> (SETBE (CMPQ idx len))
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
(Addr {sym} base) -> (LEAQ {sym} base)
(ITab (Load ptr mem)) -> (MOVQload ptr mem)
// block rewrites
......@@ -408,6 +443,13 @@
(If cond yes no) -> (NE (TESTB cond cond) yes no)
// ***************************
// Above: lowering rules
// Below: optimizations
// ***************************
// TODO: Should the optimizations be a separate pass?
// Fold boolean tests into blocks
(NE (TESTB (SETL cmp) (SETL cmp)) yes no) -> (LT cmp yes no)
(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE cmp yes no)
(NE (TESTB (SETG cmp) (SETG cmp)) yes no) -> (GT cmp yes no)
......@@ -429,15 +471,6 @@
// (SETNEF x) -> (ORQ (SETNE <config.Frontend().TypeInt8()> x) (SETNAN <config.Frontend().TypeInt8()> x))
// (SETEQF x) -> (ANDQ (SETEQ <config.Frontend().TypeInt8()> x) (SETORD <config.Frontend().TypeInt8()> x))
(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
// Rules below here apply some simple optimizations after lowering.
// TODO: Should this be a separate pass?
// fold constants into instructions
(ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
(ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x)
......@@ -831,7 +864,6 @@
(MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
(MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
// combine SHLQ into indexed loads and stores
(MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
(MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem)
......@@ -952,55 +984,6 @@
(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// lower Zero instructions with word sizes
(Zero [0] _ mem) -> mem
(Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
(Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
(Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
(Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
(Zero [3] destptr mem) ->
(MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [5] destptr mem) ->
(MOVBstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
(Zero [6] destptr mem) ->
(MOVWstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
(Zero [7] destptr mem) ->
(MOVLstoreconst [makeValAndOff(0,3)] destptr
(MOVLstoreconst [0] destptr mem))
// Strip off any fractional word zeroing.
(Zero [size] destptr mem) && size%8 != 0 && size > 8 ->
(Zero [size-size%8] (ADDQconst destptr [size%8])
(MOVQstoreconst [0] destptr mem))
// Zero small numbers of words directly.
(Zero [16] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))
(Zero [24] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem)))
(Zero [32] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,24)] destptr
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))))
// Medium zeroing uses a duff device.
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice ->
(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice ->
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
// Large zeroing uses REP STOSQ.
(Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 ->
(REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
// Absorb InvertFlags into branches.
(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
......@@ -1262,6 +1245,31 @@
(CMPWconst x [0]) -> (TESTW x x)
(CMPBconst x [0]) -> (TESTB x x)
// Optimizing conditional moves
(CMOVQEQconst x (InvertFlags y) [c]) -> (CMOVQNEconst x y [c])
(CMOVLEQconst x (InvertFlags y) [c]) -> (CMOVLNEconst x y [c])
(CMOVWEQconst x (InvertFlags y) [c]) -> (CMOVWNEconst x y [c])
(CMOVQEQconst _ (FlagEQ) [c]) -> (Const64 [c])
(CMOVLEQconst _ (FlagEQ) [c]) -> (Const32 [c])
(CMOVWEQconst _ (FlagEQ) [c]) -> (Const16 [c])
(CMOVQEQconst x (FlagLT_ULT)) -> x
(CMOVLEQconst x (FlagLT_ULT)) -> x
(CMOVWEQconst x (FlagLT_ULT)) -> x
(CMOVQEQconst x (FlagLT_UGT)) -> x
(CMOVLEQconst x (FlagLT_UGT)) -> x
(CMOVWEQconst x (FlagLT_UGT)) -> x
(CMOVQEQconst x (FlagGT_ULT)) -> x
(CMOVLEQconst x (FlagGT_ULT)) -> x
(CMOVWEQconst x (FlagGT_ULT)) -> x
(CMOVQEQconst x (FlagGT_UGT)) -> x
(CMOVLEQconst x (FlagGT_UGT)) -> x
(CMOVWEQconst x (FlagGT_UGT)) -> x
// Combining byte loads into larger (unaligned) loads.
// There are many ways these combinations could occur. This is
// designed to match the way encoding/binary.LittleEndian does it.
......
......@@ -13,7 +13,8 @@
// - an additional conditional can be provided after the match pattern with "&&".
// on the generated side
// - the type of the top-level expression is the same as the one on the left-hand side.
// - the type of any subexpressions must be specified explicitly.
// - the type of any subexpressions must be specified explicitly (or
// be specified in the op's type field).
// - auxint will be 0 if not specified.
// - aux will be nil if not specified.
......@@ -66,15 +67,15 @@
(Const32F [f2i(float64(i2f32(c) * i2f32(d)))])
(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) * i2f(d))])
(Mod8 (Const8 [c]) (Const8 [d])) && d != 0-> (Const8 [int64(int8(c % d))])
(Mod16 (Const16 [c]) (Const16 [d])) && d != 0-> (Const16 [int64(int16(c % d))])
(Mod32 (Const32 [c]) (Const32 [d])) && d != 0-> (Const32 [int64(int32(c % d))])
(Mod64 (Const64 [c]) (Const64 [d])) && d != 0-> (Const64 [c % d])
(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 -> (Const8 [int64(int8(c % d))])
(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(c % d))])
(Mod32 (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(c % d))])
(Mod64 (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [c % d])
(Mod8u (Const8 [c]) (Const8 [d])) && d != 0-> (Const8 [int64(uint8(c) % uint8(d))])
(Mod16u (Const16 [c]) (Const16 [d])) && d != 0-> (Const16 [int64(uint16(c) % uint16(d))])
(Mod32u (Const32 [c]) (Const32 [d])) && d != 0-> (Const32 [int64(uint32(c) % uint32(d))])
(Mod64u (Const64 [c]) (Const64 [d])) && d != 0-> (Const64 [int64(uint64(c) % uint64(d))])
(Mod8u (Const8 [c]) (Const8 [d])) && d != 0 -> (Const8 [int64(uint8(c) % uint8(d))])
(Mod16u (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(uint16(c) % uint16(d))])
(Mod32u (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(uint32(c) % uint32(d))])
(Mod64u (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c) % uint64(d))])
(Lsh64x64 (Const64 [c]) (Const64 [d])) -> (Const64 [c << uint64(d)])
(Rsh64x64 (Const64 [c]) (Const64 [d])) -> (Const64 [c >> uint64(d)])
......@@ -89,31 +90,6 @@
(Rsh8x64 (Const8 [c]) (Const64 [d])) -> (Const8 [int64(int8(c) >> uint64(d))])
(Rsh8Ux64 (Const8 [c]) (Const64 [d])) -> (Const8 [int64(int8(uint8(c) >> uint64(d)))])
(Lsh64x64 (Const64 [0]) _) -> (Const64 [0])
(Rsh64x64 (Const64 [0]) _) -> (Const64 [0])
(Rsh64Ux64 (Const64 [0]) _) -> (Const64 [0])
(Lsh32x64 (Const32 [0]) _) -> (Const32 [0])
(Rsh32x64 (Const32 [0]) _) -> (Const32 [0])
(Rsh32Ux64 (Const32 [0]) _) -> (Const32 [0])
(Lsh16x64 (Const16 [0]) _) -> (Const16 [0])
(Rsh16x64 (Const16 [0]) _) -> (Const16 [0])
(Rsh16Ux64 (Const16 [0]) _) -> (Const16 [0])
(Lsh8x64 (Const8 [0]) _) -> (Const8 [0])
(Rsh8x64 (Const8 [0]) _) -> (Const8 [0])
(Rsh8Ux64 (Const8 [0]) _) -> (Const8 [0])
// ((x >> c1) << c2) >> c3
(Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) -> (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
(Rsh32Ux32 (Lsh32x32 (Rsh32Ux32 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3])) && uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2) -> (Rsh32Ux32 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
(Rsh16Ux16 (Lsh16x16 (Rsh16Ux16 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3])) && uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2) -> (Rsh16Ux16 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
(Rsh8Ux8 (Lsh8x8 (Rsh8Ux8 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3])) && uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2) -> (Rsh8Ux8 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
// ((x << c1) >> c2) << c3
(Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) -> (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
(Lsh32x32 (Rsh32Ux32 (Lsh32x32 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3])) && uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2) -> (Lsh32x32 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
(Lsh16x16 (Rsh16Ux16 (Lsh16x16 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3])) && uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2) -> (Lsh16x16 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
(Lsh8x8 (Rsh8Ux8 (Lsh8x8 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3])) && uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2) -> (Lsh8x8 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
// Fold IsInBounds when the range of the index cannot exceed the limit.
(IsInBounds (ZeroExt8to32 _) (Const32 [c])) && (1 << 8) <= c -> (ConstBool [1])
(IsInBounds (ZeroExt8to64 _) (Const64 [c])) && (1 << 8) <= c -> (ConstBool [1])
......@@ -207,8 +183,10 @@
// Distribute multiplication c * (d+x) -> c*d + c*x. Useful for:
// a[i].b = ...; a[i+1].b = ...
(Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x)) -> (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
(Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x)) -> (Add32 (Const32 <t> [int64(int32(c*d))]) (Mul32 <t> (Const32 <t> [c]) x))
(Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x)) ->
(Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
(Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x)) ->
(Add32 (Const32 <t> [int64(int32(c*d))]) (Mul32 <t> (Const32 <t> [c]) x))
// rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce
// the number of the other rewrite rules for const shifts
......@@ -267,19 +245,18 @@
(Rsh8Ux64 x (Const64 [0])) -> x
// zero shifted.
// TODO: other bit sizes.
(Lsh64x64 (Const64 [0]) _) -> (Const64 [0])
(Rsh64x64 (Const64 [0]) _) -> (Const64 [0])
(Rsh64Ux64 (Const64 [0]) _) -> (Const64 [0])
(Lsh64x32 (Const64 [0]) _) -> (Const64 [0])
(Rsh64x32 (Const64 [0]) _) -> (Const64 [0])
(Rsh64Ux32 (Const64 [0]) _) -> (Const64 [0])
(Lsh64x16 (Const64 [0]) _) -> (Const64 [0])
(Rsh64x16 (Const64 [0]) _) -> (Const64 [0])
(Rsh64Ux16 (Const64 [0]) _) -> (Const64 [0])
(Lsh64x8 (Const64 [0]) _) -> (Const64 [0])
(Rsh64x8 (Const64 [0]) _) -> (Const64 [0])
(Rsh64Ux8 (Const64 [0]) _) -> (Const64 [0])
(Lsh32x64 (Const64 [0]) _) -> (Const32 [0])
(Rsh32x64 (Const64 [0]) _) -> (Const32 [0])
(Rsh32Ux64 (Const64 [0]) _) -> (Const32 [0])
(Lsh16x64 (Const64 [0]) _) -> (Const16 [0])
(Rsh16x64 (Const64 [0]) _) -> (Const16 [0])
(Rsh16Ux64 (Const64 [0]) _) -> (Const16 [0])
(Lsh8x64 (Const64 [0]) _) -> (Const8 [0])
(Rsh8x64 (Const64 [0]) _) -> (Const8 [0])
(Rsh8Ux64 (Const64 [0]) _) -> (Const8 [0])
// large left shifts of all values, and right shifts of unsigned values
(Lsh64x64 _ (Const64 [c])) && uint64(c) >= 64 -> (Const64 [0])
......@@ -307,6 +284,34 @@
(Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) -> (Rsh16Ux64 x (Const64 <t> [c+d]))
(Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) -> (Rsh8Ux64 x (Const64 <t> [c+d]))
// ((x >> c1) << c2) >> c3
(Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
-> (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
(Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
&& uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
-> (Rsh32Ux64 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
(Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
&& uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
-> (Rsh16Ux64 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
(Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
&& uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)
-> (Rsh8Ux64 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
// ((x << c1) >> c2) << c3
(Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
-> (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
(Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
&& uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
-> (Lsh32x64 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
(Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
&& uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
-> (Lsh16x64 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
(Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
&& uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)
-> (Lsh8x64 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
// constant comparisons
(Eq64 (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(c == d)])
(Eq32 (Const32 [c]) (Const32 [d])) -> (ConstBool [b2i(c == d)])
......@@ -470,9 +475,11 @@
// Rewrite AND of consts as shifts if possible, slightly faster for 64 bit operands
// leading zeros can be shifted left, then right
(And64 <t> (Const64 [y]) x) && nlz(y) + nto(y) == 64 && nto(y) >= 32 -> (Rsh64Ux64 (Lsh64x64 <t> x (Const64 <t> [nlz(y)])) (Const64 <t> [nlz(y)]))
(And64 <t> (Const64 [y]) x) && nlz(y) + nto(y) == 64 && nto(y) >= 32
-> (Rsh64Ux64 (Lsh64x64 <t> x (Const64 <t> [nlz(y)])) (Const64 <t> [nlz(y)]))
// trailing zeros can be shifted right, then left
(And64 <t> (Const64 [y]) x) && nlo(y) + ntz(y) == 64 && ntz(y) >= 32 -> (Lsh64x64 (Rsh64Ux64 <t> x (Const64 <t> [ntz(y)])) (Const64 <t> [ntz(y)]))
(And64 <t> (Const64 [y]) x) && nlo(y) + ntz(y) == 64 && ntz(y) >= 32
-> (Lsh64x64 (Rsh64Ux64 <t> x (Const64 <t> [ntz(y)])) (Const64 <t> [ntz(y)]))
// simplifications often used for lengths. e.g. len(s[i:i+5])==5
(Sub64 (Add64 x y) x) -> y
......@@ -785,5 +792,7 @@
// A%B = A-(A/B*B).
// This implements % with two * and a bunch of ancillary ops.
// One of the * is free if the user's code also computes A/B.
(Mod64 <t> x (Const64 [c])) && x.Op != OpConst64 && smagic64ok(c) -> (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
(Mod64u <t> x (Const64 [c])) && x.Op != OpConst64 && umagic64ok(c) -> (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
(Mod64 <t> x (Const64 [c])) && x.Op != OpConst64 && smagic64ok(c)
-> (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
(Mod64u <t> x (Const64 [c])) && x.Op != OpConst64 && umagic64ok(c)
-> (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
......@@ -3808,44 +3808,6 @@ func rewriteValuegeneric_OpLoad(v *Value, config *Config) bool {
func rewriteValuegeneric_OpLsh16x16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh16x16 (Rsh16Ux16 (Lsh16x16 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
// cond: uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
// result: (Lsh16x16 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh16Ux16 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh16x16 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst16 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst16 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst16 {
break
}
c3 := v_1.AuxInt
if !(uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)) {
break
}
v.reset(OpLsh16x16)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
v0.AuxInt = int64(int16(c1 - c2 + c3))
v.AddArg(v0)
return true
}
// match: (Lsh16x16 <t> x (Const16 [c]))
// cond:
// result: (Lsh16x64 x (Const64 <t> [int64(uint16(c))]))
......@@ -3910,21 +3872,6 @@ func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool {
v.AuxInt = int64(int16(c) << uint64(d))
return true
}
// match: (Lsh16x64 (Const16 [0]) _)
// cond:
// result: (Const16 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst16 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst16)
v.AuxInt = 0
return true
}
// match: (Lsh16x64 x (Const64 [0]))
// cond:
// result: x
......@@ -3942,6 +3889,21 @@ func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
// match: (Lsh16x64 (Const64 [0]) _)
// cond:
// result: (Const16 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst16)
v.AuxInt = 0
return true
}
// match: (Lsh16x64 _ (Const64 [c]))
// cond: uint64(c) >= 16
// result: (Const16 [0])
......@@ -3988,6 +3950,44 @@ func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
// cond: uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
// result: (Lsh16x64 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh16Ux64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh16x64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst16 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst16 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst16 {
break
}
c3 := v_1.AuxInt
if !(uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)) {
break
}
v.reset(OpLsh16x64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
v0.AuxInt = int64(int16(c1 - c2 + c3))
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpLsh16x8(v *Value, config *Config) bool {
......@@ -4039,44 +4039,6 @@ func rewriteValuegeneric_OpLsh32x16(v *Value, config *Config) bool {
func rewriteValuegeneric_OpLsh32x32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh32x32 (Rsh32Ux32 (Lsh32x32 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
// cond: uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
// result: (Lsh32x32 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh32Ux32 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh32x32 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst32 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst32 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst32 {
break
}
c3 := v_1.AuxInt
if !(uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)) {
break
}
v.reset(OpLsh32x32)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
v0.AuxInt = int64(int32(c1 - c2 + c3))
v.AddArg(v0)
return true
}
// match: (Lsh32x32 <t> x (Const32 [c]))
// cond:
// result: (Lsh32x64 x (Const64 <t> [int64(uint32(c))]))
......@@ -4118,21 +4080,6 @@ func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool {
v.AuxInt = int64(int32(c) << uint64(d))
return true
}
// match: (Lsh32x64 (Const32 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Lsh32x64 x (Const64 [0]))
// cond:
// result: x
......@@ -4150,6 +4097,21 @@ func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
// match: (Lsh32x64 (Const64 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Lsh32x64 _ (Const64 [c]))
// cond: uint64(c) >= 32
// result: (Const32 [0])
......@@ -4196,6 +4158,44 @@ func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
// cond: uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
// result: (Lsh32x64 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh32Ux64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh32x64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst32 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst32 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst32 {
break
}
c3 := v_1.AuxInt
if !(uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)) {
break
}
v.reset(OpLsh32x64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
v0.AuxInt = int64(int32(c1 - c2 + c3))
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpLsh32x8(v *Value, config *Config) bool {
......@@ -4242,21 +4242,6 @@ func rewriteValuegeneric_OpLsh64x16(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Lsh64x16 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false
}
func rewriteValuegeneric_OpLsh64x32(v *Value, config *Config) bool {
......@@ -4280,21 +4265,6 @@ func rewriteValuegeneric_OpLsh64x32(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Lsh64x32 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false
}
func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool {
......@@ -4318,59 +4288,6 @@ func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool {
v.AuxInt = c << uint64(d)
return true
}
// match: (Lsh64x64 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
// match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
// result: (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh64Ux64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh64x64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst64 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst64 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst64 {
break
}
c3 := v_1.AuxInt
if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)) {
break
}
v.reset(OpLsh64x64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
// match: (Lsh64x64 x (Const64 [0]))
// cond:
// result: x
......@@ -4449,6 +4366,44 @@ func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
// result: (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh64Ux64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh64x64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst64 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst64 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst64 {
break
}
c3 := v_1.AuxInt
if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)) {
break
}
v.reset(OpLsh64x64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpLsh64x8(v *Value, config *Config) bool {
......@@ -4472,21 +4427,6 @@ func rewriteValuegeneric_OpLsh64x8(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Lsh64x8 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false
}
func rewriteValuegeneric_OpLsh8x16(v *Value, config *Config) bool {
......@@ -4556,21 +4496,6 @@ func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c) << uint64(d))
return true
}
// match: (Lsh8x64 (Const8 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst8 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Lsh8x64 x (Const64 [0]))
// cond:
// result: x
......@@ -4588,6 +4513,21 @@ func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
// match: (Lsh8x64 (Const64 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Lsh8x64 _ (Const64 [c]))
// cond: uint64(c) >= 8
// result: (Const8 [0])
......@@ -4634,21 +4574,16 @@ func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpLsh8x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh8x8 (Rsh8Ux8 (Lsh8x8 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
// match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
// cond: uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)
// result: (Lsh8x8 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
// result: (Lsh8x64 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh8Ux8 {
if v_0.Op != OpRsh8Ux64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh8x8 {
if v_0_0.Op != OpLsh8x64 {
break
}
x := v_0_0.Args[0]
......@@ -4670,13 +4605,18 @@ func rewriteValuegeneric_OpLsh8x8(v *Value, config *Config) bool {
if !(uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)) {
break
}
v.reset(OpLsh8x8)
v.reset(OpLsh8x64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
v0.AuxInt = int64(int8(c1 - c2 + c3))
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpLsh8x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh8x8 <t> x (Const8 [c]))
// cond:
// result: (Lsh8x64 x (Const64 <t> [int64(uint8(c))]))
......@@ -6626,65 +6566,27 @@ func rewriteValuegeneric_OpPtrIndex(v *Value, config *Config) bool {
// cond: config.PtrSize == 8
// result: (AddPtr ptr (Mul64 <config.fe.TypeInt()> idx (Const64 <config.fe.TypeInt()> [t.ElemType().Size()])))
for {
t := v.Type
ptr := v.Args[0]
idx := v.Args[1]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAddPtr)
v.AddArg(ptr)
v0 := b.NewValue0(v.Line, OpMul64, config.fe.TypeInt())
v0.AddArg(idx)
v1 := b.NewValue0(v.Line, OpConst64, config.fe.TypeInt())
v1.AuxInt = t.ElemType().Size()
v0.AddArg(v1)
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpRsh16Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh16Ux16 (Lsh16x16 (Rsh16Ux16 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
// cond: uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
// result: (Rsh16Ux16 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh16x16 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh16Ux16 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst16 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst16 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst16 {
break
}
c3 := v_1.AuxInt
if !(uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)) {
t := v.Type
ptr := v.Args[0]
idx := v.Args[1]
if !(config.PtrSize == 8) {
break
}
v.reset(OpRsh16Ux16)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
v0.AuxInt = int64(int16(c1 - c2 + c3))
v.reset(OpAddPtr)
v.AddArg(ptr)
v0 := b.NewValue0(v.Line, OpMul64, config.fe.TypeInt())
v0.AddArg(idx)
v1 := b.NewValue0(v.Line, OpConst64, config.fe.TypeInt())
v1.AuxInt = t.ElemType().Size()
v0.AddArg(v1)
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpRsh16Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh16Ux16 <t> x (Const16 [c]))
// cond:
// result: (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
......@@ -6749,21 +6651,6 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool {
v.AuxInt = int64(int16(uint16(c) >> uint64(d)))
return true
}
// match: (Rsh16Ux64 (Const16 [0]) _)
// cond:
// result: (Const16 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst16 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst16)
v.AuxInt = 0
return true
}
// match: (Rsh16Ux64 x (Const64 [0]))
// cond:
// result: x
......@@ -6781,6 +6668,21 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
// match: (Rsh16Ux64 (Const64 [0]) _)
// cond:
// result: (Const16 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst16)
v.AuxInt = 0
return true
}
// match: (Rsh16Ux64 _ (Const64 [c]))
// cond: uint64(c) >= 16
// result: (Const16 [0])
......@@ -6827,6 +6729,44 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
// cond: uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
// result: (Rsh16Ux64 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh16x64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh16Ux64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst16 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst16 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst16 {
break
}
c3 := v_1.AuxInt
if !(uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)) {
break
}
v.reset(OpRsh16Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
v0.AuxInt = int64(int16(c1 - c2 + c3))
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpRsh16Ux8(v *Value, config *Config) bool {
......@@ -6919,21 +6859,6 @@ func rewriteValuegeneric_OpRsh16x64(v *Value, config *Config) bool {
v.AuxInt = int64(int16(c) >> uint64(d))
return true
}
// match: (Rsh16x64 (Const16 [0]) _)
// cond:
// result: (Const16 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst16 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst16)
v.AuxInt = 0
return true
}
// match: (Rsh16x64 x (Const64 [0]))
// cond:
// result: x
......@@ -6951,6 +6876,21 @@ func rewriteValuegeneric_OpRsh16x64(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
// match: (Rsh16x64 (Const64 [0]) _)
// cond:
// result: (Const16 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst16)
v.AuxInt = 0
return true
}
// match: (Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d]))
// cond: !uaddOvf(c,d)
// result: (Rsh16x64 x (Const64 <t> [c+d]))
......@@ -7032,44 +6972,6 @@ func rewriteValuegeneric_OpRsh32Ux16(v *Value, config *Config) bool {
func rewriteValuegeneric_OpRsh32Ux32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh32Ux32 (Lsh32x32 (Rsh32Ux32 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
// cond: uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
// result: (Rsh32Ux32 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x32 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh32Ux32 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst32 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst32 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst32 {
break
}
c3 := v_1.AuxInt
if !(uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)) {
break
}
v.reset(OpRsh32Ux32)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
v0.AuxInt = int64(int32(c1 - c2 + c3))
v.AddArg(v0)
return true
}
// match: (Rsh32Ux32 <t> x (Const32 [c]))
// cond:
// result: (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
......@@ -7111,21 +7013,6 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool {
v.AuxInt = int64(int32(uint32(c) >> uint64(d)))
return true
}
// match: (Rsh32Ux64 (Const32 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Rsh32Ux64 x (Const64 [0]))
// cond:
// result: x
......@@ -7143,6 +7030,21 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
// match: (Rsh32Ux64 (Const64 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Rsh32Ux64 _ (Const64 [c]))
// cond: uint64(c) >= 32
// result: (Const32 [0])
......@@ -7189,6 +7091,44 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
// cond: uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
// result: (Rsh32Ux64 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh32Ux64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst32 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst32 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst32 {
break
}
c3 := v_1.AuxInt
if !(uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)) {
break
}
v.reset(OpRsh32Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
v0.AuxInt = int64(int32(c1 - c2 + c3))
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpRsh32Ux8(v *Value, config *Config) bool {
......@@ -7281,21 +7221,6 @@ func rewriteValuegeneric_OpRsh32x64(v *Value, config *Config) bool {
v.AuxInt = int64(int32(c) >> uint64(d))
return true
}
// match: (Rsh32x64 (Const32 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Rsh32x64 x (Const64 [0]))
// cond:
// result: x
......@@ -7313,6 +7238,21 @@ func rewriteValuegeneric_OpRsh32x64(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
// match: (Rsh32x64 (Const64 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d]))
// cond: !uaddOvf(c,d)
// result: (Rsh32x64 x (Const64 <t> [c+d]))
......@@ -7375,33 +7315,18 @@ func rewriteValuegeneric_OpRsh64Ux16(v *Value, config *Config) bool {
// cond:
// result: (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))]))
for {
t := v.Type
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpConst16 {
break
}
c := v_1.AuxInt
v.reset(OpRsh64Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst64, t)
v0.AuxInt = int64(uint16(c))
v.AddArg(v0)
return true
}
// match: (Rsh64Ux16 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
t := v.Type
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpConst16 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
c := v_1.AuxInt
v.reset(OpRsh64Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst64, t)
v0.AuxInt = int64(uint16(c))
v.AddArg(v0)
return true
}
return false
......@@ -7427,21 +7352,6 @@ func rewriteValuegeneric_OpRsh64Ux32(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Rsh64Ux32 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false
}
func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool {
......@@ -7465,59 +7375,6 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool {
v.AuxInt = int64(uint64(c) >> uint64(d))
return true
}
// match: (Rsh64Ux64 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
// match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
// result: (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh64Ux64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst64 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst64 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst64 {
break
}
c3 := v_1.AuxInt
if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)) {
break
}
v.reset(OpRsh64Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
// match: (Rsh64Ux64 x (Const64 [0]))
// cond:
// result: x
......@@ -7596,6 +7453,44 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
// result: (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh64Ux64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst64 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst64 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst64 {
break
}
c3 := v_1.AuxInt
if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)) {
break
}
v.reset(OpRsh64Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpRsh64Ux8(v *Value, config *Config) bool {
......@@ -7619,21 +7514,6 @@ func rewriteValuegeneric_OpRsh64Ux8(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Rsh64Ux8 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false
}
func rewriteValuegeneric_OpRsh64x16(v *Value, config *Config) bool {
......@@ -7657,21 +7537,6 @@ func rewriteValuegeneric_OpRsh64x16(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Rsh64x16 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false
}
func rewriteValuegeneric_OpRsh64x32(v *Value, config *Config) bool {
......@@ -7695,21 +7560,6 @@ func rewriteValuegeneric_OpRsh64x32(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Rsh64x32 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false
}
func rewriteValuegeneric_OpRsh64x64(v *Value, config *Config) bool {
......@@ -7733,21 +7583,6 @@ func rewriteValuegeneric_OpRsh64x64(v *Value, config *Config) bool {
v.AuxInt = c >> uint64(d)
return true
}
// match: (Rsh64x64 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
// match: (Rsh64x64 x (Const64 [0]))
// cond:
// result: x
......@@ -7833,21 +7668,6 @@ func rewriteValuegeneric_OpRsh64x8(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
// match: (Rsh64x8 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false
}
func rewriteValuegeneric_OpRsh8Ux16(v *Value, config *Config) bool {
......@@ -7917,21 +7737,6 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool {
v.AuxInt = int64(int8(uint8(c) >> uint64(d)))
return true
}
// match: (Rsh8Ux64 (Const8 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst8 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Rsh8Ux64 x (Const64 [0]))
// cond:
// result: x
......@@ -7949,6 +7754,21 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
// match: (Rsh8Ux64 (Const64 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Rsh8Ux64 _ (Const64 [c]))
// cond: uint64(c) >= 8
// result: (Const8 [0])
......@@ -7995,21 +7815,16 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpRsh8Ux8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8Ux8 (Lsh8x8 (Rsh8Ux8 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
// match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
// cond: uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)
// result: (Rsh8Ux8 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
// result: (Rsh8Ux64 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh8x8 {
if v_0.Op != OpLsh8x64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh8Ux8 {
if v_0_0.Op != OpRsh8Ux64 {
break
}
x := v_0_0.Args[0]
......@@ -8031,13 +7846,18 @@ func rewriteValuegeneric_OpRsh8Ux8(v *Value, config *Config) bool {
if !(uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)) {
break
}
v.reset(OpRsh8Ux8)
v.reset(OpRsh8Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
v0.AuxInt = int64(int8(c1 - c2 + c3))
v.AddArg(v0)
return true
}
return false
}
func rewriteValuegeneric_OpRsh8Ux8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8Ux8 <t> x (Const8 [c]))
// cond:
// result: (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
......@@ -8125,21 +7945,6 @@ func rewriteValuegeneric_OpRsh8x64(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c) >> uint64(d))
return true
}
// match: (Rsh8x64 (Const8 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst8 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Rsh8x64 x (Const64 [0]))
// cond:
// result: x
......@@ -8157,6 +7962,21 @@ func rewriteValuegeneric_OpRsh8x64(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
// match: (Rsh8x64 (Const64 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d]))
// cond: !uaddOvf(c,d)
// result: (Rsh8x64 x (Const64 <t> [c+d]))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment