Commit fa9435cd authored by Keith Randall's avatar Keith Randall

cmd/compile: clean up rewrite rules

Break really long lines.
Add spacing to line up columns.

In AMD64, put all the optimization rules after all the
lowering rules.

Change-Id: I45cc7368bf278416e67f89e74358db1bd4326a93
Reviewed-on: https://go-review.googlesource.com/22470Reviewed-by: default avatarDavid Chase <drchase@google.com>
parent 1fb4e4de
...@@ -3,137 +3,134 @@ ...@@ -3,137 +3,134 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Lowering arithmetic // Lowering arithmetic
(Add64 x y) -> (ADDQ x y) (Add64 x y) -> (ADDQ x y)
(AddPtr x y) -> (ADDQ x y) (AddPtr x y) -> (ADDQ x y)
(Add32 x y) -> (ADDL x y) (Add32 x y) -> (ADDL x y)
(Add16 x y) -> (ADDL x y) (Add16 x y) -> (ADDL x y)
(Add8 x y) -> (ADDL x y) (Add8 x y) -> (ADDL x y)
(Add32F x y) -> (ADDSS x y) (Add32F x y) -> (ADDSS x y)
(Add64F x y) -> (ADDSD x y) (Add64F x y) -> (ADDSD x y)
(Sub64 x y) -> (SUBQ x y) (Sub64 x y) -> (SUBQ x y)
(SubPtr x y) -> (SUBQ x y) (SubPtr x y) -> (SUBQ x y)
(Sub32 x y) -> (SUBL x y) (Sub32 x y) -> (SUBL x y)
(Sub16 x y) -> (SUBL x y) (Sub16 x y) -> (SUBL x y)
(Sub8 x y) -> (SUBL x y) (Sub8 x y) -> (SUBL x y)
(Sub32F x y) -> (SUBSS x y) (Sub32F x y) -> (SUBSS x y)
(Sub64F x y) -> (SUBSD x y) (Sub64F x y) -> (SUBSD x y)
(Mul64 x y) -> (MULQ x y) (Mul64 x y) -> (MULQ x y)
(Mul32 x y) -> (MULL x y) (Mul32 x y) -> (MULL x y)
(Mul16 x y) -> (MULL x y) (Mul16 x y) -> (MULL x y)
(Mul8 x y) -> (MULL x y) (Mul8 x y) -> (MULL x y)
(Mul32F x y) -> (MULSS x y) (Mul32F x y) -> (MULSS x y)
(Mul64F x y) -> (MULSD x y) (Mul64F x y) -> (MULSD x y)
(Div32F x y) -> (DIVSS x y) (Div32F x y) -> (DIVSS x y)
(Div64F x y) -> (DIVSD x y) (Div64F x y) -> (DIVSD x y)
(Div64 x y) -> (DIVQ x y) (Div64 x y) -> (DIVQ x y)
(Div64u x y) -> (DIVQU x y) (Div64u x y) -> (DIVQU x y)
(Div32 x y) -> (DIVL x y) (Div32 x y) -> (DIVL x y)
(Div32u x y) -> (DIVLU x y) (Div32u x y) -> (DIVLU x y)
(Div16 x y) -> (DIVW x y) (Div16 x y) -> (DIVW x y)
(Div16u x y) -> (DIVWU x y) (Div16u x y) -> (DIVWU x y)
(Div8 x y) -> (DIVW (SignExt8to16 x) (SignExt8to16 y)) (Div8 x y) -> (DIVW (SignExt8to16 x) (SignExt8to16 y))
(Div8u x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)) (Div8u x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
(Hmul64 x y) -> (HMULQ x y) (Hmul64 x y) -> (HMULQ x y)
(Hmul64u x y) -> (HMULQU x y) (Hmul64u x y) -> (HMULQU x y)
(Hmul32 x y) -> (HMULL x y) (Hmul32 x y) -> (HMULL x y)
(Hmul32u x y) -> (HMULLU x y) (Hmul32u x y) -> (HMULLU x y)
(Hmul16 x y) -> (HMULW x y) (Hmul16 x y) -> (HMULW x y)
(Hmul16u x y) -> (HMULWU x y) (Hmul16u x y) -> (HMULWU x y)
(Hmul8 x y) -> (HMULB x y) (Hmul8 x y) -> (HMULB x y)
(Hmul8u x y) -> (HMULBU x y) (Hmul8u x y) -> (HMULBU x y)
(Avg64u x y) -> (AVGQU x y) (Avg64u x y) -> (AVGQU x y)
(Mod64 x y) -> (MODQ x y) (Mod64 x y) -> (MODQ x y)
(Mod64u x y) -> (MODQU x y) (Mod64u x y) -> (MODQU x y)
(Mod32 x y) -> (MODL x y) (Mod32 x y) -> (MODL x y)
(Mod32u x y) -> (MODLU x y) (Mod32u x y) -> (MODLU x y)
(Mod16 x y) -> (MODW x y) (Mod16 x y) -> (MODW x y)
(Mod16u x y) -> (MODWU x y) (Mod16u x y) -> (MODWU x y)
(Mod8 x y) -> (MODW (SignExt8to16 x) (SignExt8to16 y)) (Mod8 x y) -> (MODW (SignExt8to16 x) (SignExt8to16 y))
(Mod8u x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y)) (Mod8u x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
(And64 x y) -> (ANDQ x y) (And64 x y) -> (ANDQ x y)
(And32 x y) -> (ANDL x y) (And32 x y) -> (ANDL x y)
(And16 x y) -> (ANDL x y) (And16 x y) -> (ANDL x y)
(And8 x y) -> (ANDL x y) (And8 x y) -> (ANDL x y)
(Or64 x y) -> (ORQ x y) (Or64 x y) -> (ORQ x y)
(Or32 x y) -> (ORL x y) (Or32 x y) -> (ORL x y)
(Or16 x y) -> (ORL x y) (Or16 x y) -> (ORL x y)
(Or8 x y) -> (ORL x y) (Or8 x y) -> (ORL x y)
(Xor64 x y) -> (XORQ x y) (Xor64 x y) -> (XORQ x y)
(Xor32 x y) -> (XORL x y) (Xor32 x y) -> (XORL x y)
(Xor16 x y) -> (XORL x y) (Xor16 x y) -> (XORL x y)
(Xor8 x y) -> (XORL x y) (Xor8 x y) -> (XORL x y)
(Neg64 x) -> (NEGQ x) (Neg64 x) -> (NEGQ x)
(Neg32 x) -> (NEGL x) (Neg32 x) -> (NEGL x)
(Neg16 x) -> (NEGL x) (Neg16 x) -> (NEGL x)
(Neg8 x) -> (NEGL x) (Neg8 x) -> (NEGL x)
(Neg32F x) -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) (Neg32F x) -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
(Neg64F x) -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) (Neg64F x) -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
(Com64 x) -> (NOTQ x) (Com64 x) -> (NOTQ x)
(Com32 x) -> (NOTL x) (Com32 x) -> (NOTL x)
(Com16 x) -> (NOTL x) (Com16 x) -> (NOTL x)
(Com8 x) -> (NOTL x) (Com8 x) -> (NOTL x)
// CMPQconst 0 below is redundant because BSF sets Z but how to remove? // Lowering boolean ops
(AndB x y) -> (ANDL x y)
(OrB x y) -> (ORL x y)
(Not x) -> (XORLconst [1] x)
// Lowering pointer arithmetic
(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
// Lowering other arithmetic
// TODO: CMPQconst 0 below is redundant because BSF sets Z but how to remove?
(Ctz64 <t> x) -> (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64]) (Ctz64 <t> x) -> (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
(Ctz32 <t> x) -> (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32]) (Ctz32 <t> x) -> (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
(Ctz16 <t> x) -> (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16]) (Ctz16 <t> x) -> (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
(CMOVQEQconst x (InvertFlags y) [c]) -> (CMOVQNEconst x y [c])
(CMOVLEQconst x (InvertFlags y) [c]) -> (CMOVLNEconst x y [c])
(CMOVWEQconst x (InvertFlags y) [c]) -> (CMOVWNEconst x y [c])
(CMOVQEQconst _ (FlagEQ) [c]) -> (Const64 [c])
(CMOVLEQconst _ (FlagEQ) [c]) -> (Const32 [c])
(CMOVWEQconst _ (FlagEQ) [c]) -> (Const16 [c])
(CMOVQEQconst x (FlagLT_ULT)) -> x
(CMOVLEQconst x (FlagLT_ULT)) -> x
(CMOVWEQconst x (FlagLT_ULT)) -> x
(CMOVQEQconst x (FlagLT_UGT)) -> x
(CMOVLEQconst x (FlagLT_UGT)) -> x
(CMOVWEQconst x (FlagLT_UGT)) -> x
(CMOVQEQconst x (FlagGT_ULT)) -> x
(CMOVLEQconst x (FlagGT_ULT)) -> x
(CMOVWEQconst x (FlagGT_ULT)) -> x
(CMOVQEQconst x (FlagGT_UGT)) -> x
(CMOVLEQconst x (FlagGT_UGT)) -> x
(CMOVWEQconst x (FlagGT_UGT)) -> x
(Bswap64 x) -> (BSWAPQ x) (Bswap64 x) -> (BSWAPQ x)
(Bswap32 x) -> (BSWAPL x) (Bswap32 x) -> (BSWAPL x)
(Sqrt x) -> (SQRTSD x) (Sqrt x) -> (SQRTSD x)
// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits. // Note: we always extend to 64 bits even though some ops don't need that many result bits.
(SignExt8to16 x) -> (MOVBQSX x) (SignExt8to16 x) -> (MOVBQSX x)
(SignExt8to32 x) -> (MOVBQSX x) (SignExt8to32 x) -> (MOVBQSX x)
(SignExt8to64 x) -> (MOVBQSX x) (SignExt8to64 x) -> (MOVBQSX x)
(SignExt16to32 x) -> (MOVWQSX x) (SignExt16to32 x) -> (MOVWQSX x)
(SignExt16to64 x) -> (MOVWQSX x) (SignExt16to64 x) -> (MOVWQSX x)
(SignExt32to64 x) -> (MOVLQSX x) (SignExt32to64 x) -> (MOVLQSX x)
(ZeroExt8to16 x) -> (MOVBQZX x) (ZeroExt8to16 x) -> (MOVBQZX x)
(ZeroExt8to32 x) -> (MOVBQZX x) (ZeroExt8to32 x) -> (MOVBQZX x)
(ZeroExt8to64 x) -> (MOVBQZX x) (ZeroExt8to64 x) -> (MOVBQZX x)
(ZeroExt16to32 x) -> (MOVWQZX x) (ZeroExt16to32 x) -> (MOVWQZX x)
(ZeroExt16to64 x) -> (MOVWQZX x) (ZeroExt16to64 x) -> (MOVWQZX x)
(ZeroExt32to64 x) -> (MOVLQZX x) (ZeroExt32to64 x) -> (MOVLQZX x)
// Lowering truncation
// Because we ignore high parts of registers, truncates are just copies.
(Trunc16to8 x) -> x
(Trunc32to8 x) -> x
(Trunc32to16 x) -> x
(Trunc64to8 x) -> x
(Trunc64to16 x) -> x
(Trunc64to32 x) -> x
// Lowering float <-> int
(Cvt32to32F x) -> (CVTSL2SS x) (Cvt32to32F x) -> (CVTSL2SS x)
(Cvt32to64F x) -> (CVTSL2SD x) (Cvt32to64F x) -> (CVTSL2SD x)
(Cvt64to32F x) -> (CVTSQ2SS x) (Cvt64to32F x) -> (CVTSQ2SS x)
...@@ -147,90 +144,81 @@ ...@@ -147,90 +144,81 @@
(Cvt32Fto64F x) -> (CVTSS2SD x) (Cvt32Fto64F x) -> (CVTSS2SD x)
(Cvt64Fto32F x) -> (CVTSD2SS x) (Cvt64Fto32F x) -> (CVTSD2SS x)
// Because we ignore high parts of registers, truncates are just copies.
(Trunc16to8 x) -> x
(Trunc32to8 x) -> x
(Trunc32to16 x) -> x
(Trunc64to8 x) -> x
(Trunc64to16 x) -> x
(Trunc64to32 x) -> x
// Lowering shifts // Lowering shifts
// Unsigned shifts need to return 0 if shift amount is >= width of shifted value. // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) // result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
// Note: for small shifts we generate 32 bits of mask even when we don't need it all.
(Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) (Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
(Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) (Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
(Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) (Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
(Lsh64x8 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) (Lsh64x8 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
(Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) (Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
(Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) (Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
(Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) (Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
(Lsh32x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) (Lsh32x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
(Lsh16x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) (Lsh16x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
(Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) (Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
(Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) (Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
(Lsh16x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) (Lsh16x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
(Lsh8x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) (Lsh8x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
(Lsh8x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) (Lsh8x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
(Lsh8x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) (Lsh8x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
(Lsh8x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) (Lsh8x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
(Lrot64 <t> x [c]) -> (ROLQconst <t> [c&63] x) (Lrot64 <t> x [c]) -> (ROLQconst <t> [c&63] x)
(Lrot32 <t> x [c]) -> (ROLLconst <t> [c&31] x) (Lrot32 <t> x [c]) -> (ROLLconst <t> [c&31] x)
(Lrot16 <t> x [c]) -> (ROLWconst <t> [c&15] x) (Lrot16 <t> x [c]) -> (ROLWconst <t> [c&15] x)
(Lrot8 <t> x [c]) -> (ROLBconst <t> [c&7] x) (Lrot8 <t> x [c]) -> (ROLBconst <t> [c&7] x)
(Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) (Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
(Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) (Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
(Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) (Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
(Rsh64Ux8 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) (Rsh64Ux8 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
(Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) (Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
(Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) (Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
(Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) (Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
(Rsh32Ux8 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) (Rsh32Ux8 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
(Rsh16Ux64 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) (Rsh16Ux64 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
(Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) (Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
(Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) (Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
(Rsh16Ux8 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) (Rsh16Ux8 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
(Rsh8Ux64 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) (Rsh8Ux64 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
(Rsh8Ux32 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) (Rsh8Ux32 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
(Rsh8Ux16 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) (Rsh8Ux16 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
(Rsh8Ux8 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) (Rsh8Ux8 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
// Note: for small shift widths we generate 32 bits of mask even when we don't need it all.
(Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) (Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
(Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) (Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
(Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) (Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
(Rsh64x8 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) (Rsh64x8 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
(Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) (Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
(Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) (Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
(Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) (Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
(Rsh32x8 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) (Rsh32x8 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
(Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) (Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
(Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) (Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
(Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) (Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
(Rsh16x8 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) (Rsh16x8 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
(Rsh8x64 <t> x y) -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) (Rsh8x64 <t> x y) -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
(Rsh8x32 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) (Rsh8x32 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
(Rsh8x16 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) (Rsh8x16 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
(Rsh8x8 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) (Rsh8x8 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
(Less64 x y) -> (SETL (CMPQ x y)) // Lowering comparisons
(Less32 x y) -> (SETL (CMPL x y)) (Less64 x y) -> (SETL (CMPQ x y))
(Less16 x y) -> (SETL (CMPW x y)) (Less32 x y) -> (SETL (CMPL x y))
(Less8 x y) -> (SETL (CMPB x y)) (Less16 x y) -> (SETL (CMPW x y))
(Less8 x y) -> (SETL (CMPB x y))
(Less64U x y) -> (SETB (CMPQ x y)) (Less64U x y) -> (SETB (CMPQ x y))
(Less32U x y) -> (SETB (CMPL x y)) (Less32U x y) -> (SETB (CMPL x y))
(Less16U x y) -> (SETB (CMPW x y)) (Less16U x y) -> (SETB (CMPW x y))
...@@ -239,10 +227,10 @@ ...@@ -239,10 +227,10 @@
(Less64F x y) -> (SETGF (UCOMISD y x)) (Less64F x y) -> (SETGF (UCOMISD y x))
(Less32F x y) -> (SETGF (UCOMISS y x)) (Less32F x y) -> (SETGF (UCOMISS y x))
(Leq64 x y) -> (SETLE (CMPQ x y)) (Leq64 x y) -> (SETLE (CMPQ x y))
(Leq32 x y) -> (SETLE (CMPL x y)) (Leq32 x y) -> (SETLE (CMPL x y))
(Leq16 x y) -> (SETLE (CMPW x y)) (Leq16 x y) -> (SETLE (CMPW x y))
(Leq8 x y) -> (SETLE (CMPB x y)) (Leq8 x y) -> (SETLE (CMPB x y))
(Leq64U x y) -> (SETBE (CMPQ x y)) (Leq64U x y) -> (SETBE (CMPQ x y))
(Leq32U x y) -> (SETBE (CMPL x y)) (Leq32U x y) -> (SETBE (CMPL x y))
(Leq16U x y) -> (SETBE (CMPW x y)) (Leq16U x y) -> (SETBE (CMPW x y))
...@@ -251,10 +239,10 @@ ...@@ -251,10 +239,10 @@
(Leq64F x y) -> (SETGEF (UCOMISD y x)) (Leq64F x y) -> (SETGEF (UCOMISD y x))
(Leq32F x y) -> (SETGEF (UCOMISS y x)) (Leq32F x y) -> (SETGEF (UCOMISS y x))
(Greater64 x y) -> (SETG (CMPQ x y)) (Greater64 x y) -> (SETG (CMPQ x y))
(Greater32 x y) -> (SETG (CMPL x y)) (Greater32 x y) -> (SETG (CMPL x y))
(Greater16 x y) -> (SETG (CMPW x y)) (Greater16 x y) -> (SETG (CMPW x y))
(Greater8 x y) -> (SETG (CMPB x y)) (Greater8 x y) -> (SETG (CMPB x y))
(Greater64U x y) -> (SETA (CMPQ x y)) (Greater64U x y) -> (SETA (CMPQ x y))
(Greater32U x y) -> (SETA (CMPL x y)) (Greater32U x y) -> (SETA (CMPL x y))
(Greater16U x y) -> (SETA (CMPW x y)) (Greater16U x y) -> (SETA (CMPW x y))
...@@ -264,10 +252,10 @@ ...@@ -264,10 +252,10 @@
(Greater64F x y) -> (SETGF (UCOMISD x y)) (Greater64F x y) -> (SETGF (UCOMISD x y))
(Greater32F x y) -> (SETGF (UCOMISS x y)) (Greater32F x y) -> (SETGF (UCOMISS x y))
(Geq64 x y) -> (SETGE (CMPQ x y)) (Geq64 x y) -> (SETGE (CMPQ x y))
(Geq32 x y) -> (SETGE (CMPL x y)) (Geq32 x y) -> (SETGE (CMPL x y))
(Geq16 x y) -> (SETGE (CMPW x y)) (Geq16 x y) -> (SETGE (CMPW x y))
(Geq8 x y) -> (SETGE (CMPB x y)) (Geq8 x y) -> (SETGE (CMPB x y))
(Geq64U x y) -> (SETAE (CMPQ x y)) (Geq64U x y) -> (SETAE (CMPQ x y))
(Geq32U x y) -> (SETAE (CMPL x y)) (Geq32U x y) -> (SETAE (CMPL x y))
(Geq16U x y) -> (SETAE (CMPW x y)) (Geq16U x y) -> (SETAE (CMPW x y))
...@@ -277,24 +265,25 @@ ...@@ -277,24 +265,25 @@
(Geq64F x y) -> (SETGEF (UCOMISD x y)) (Geq64F x y) -> (SETGEF (UCOMISD x y))
(Geq32F x y) -> (SETGEF (UCOMISS x y)) (Geq32F x y) -> (SETGEF (UCOMISS x y))
(Eq64 x y) -> (SETEQ (CMPQ x y)) (Eq64 x y) -> (SETEQ (CMPQ x y))
(Eq32 x y) -> (SETEQ (CMPL x y)) (Eq32 x y) -> (SETEQ (CMPL x y))
(Eq16 x y) -> (SETEQ (CMPW x y)) (Eq16 x y) -> (SETEQ (CMPW x y))
(Eq8 x y) -> (SETEQ (CMPB x y)) (Eq8 x y) -> (SETEQ (CMPB x y))
(EqB x y) -> (SETEQ (CMPB x y)) (EqB x y) -> (SETEQ (CMPB x y))
(EqPtr x y) -> (SETEQ (CMPQ x y)) (EqPtr x y) -> (SETEQ (CMPQ x y))
(Eq64F x y) -> (SETEQF (UCOMISD x y)) (Eq64F x y) -> (SETEQF (UCOMISD x y))
(Eq32F x y) -> (SETEQF (UCOMISS x y)) (Eq32F x y) -> (SETEQF (UCOMISS x y))
(Neq64 x y) -> (SETNE (CMPQ x y)) (Neq64 x y) -> (SETNE (CMPQ x y))
(Neq32 x y) -> (SETNE (CMPL x y)) (Neq32 x y) -> (SETNE (CMPL x y))
(Neq16 x y) -> (SETNE (CMPW x y)) (Neq16 x y) -> (SETNE (CMPW x y))
(Neq8 x y) -> (SETNE (CMPB x y)) (Neq8 x y) -> (SETNE (CMPB x y))
(NeqB x y) -> (SETNE (CMPB x y)) (NeqB x y) -> (SETNE (CMPB x y))
(NeqPtr x y) -> (SETNE (CMPQ x y)) (NeqPtr x y) -> (SETNE (CMPQ x y))
(Neq64F x y) -> (SETNEF (UCOMISD x y)) (Neq64F x y) -> (SETNEF (UCOMISD x y))
(Neq32F x y) -> (SETNEF (UCOMISS x y)) (Neq32F x y) -> (SETNEF (UCOMISS x y))
// Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem) (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem) (Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
(Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem) (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
...@@ -302,6 +291,7 @@ ...@@ -302,6 +291,7 @@
(Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem) (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
(Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem) (Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem)
// Lowering stores
// These more-specific FP versions of Store pattern should come first. // These more-specific FP versions of Store pattern should come first.
(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem) (Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem) (Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
...@@ -311,19 +301,7 @@ ...@@ -311,19 +301,7 @@
(Store [2] ptr val mem) -> (MOVWstore ptr val mem) (Store [2] ptr val mem) -> (MOVWstore ptr val mem)
(Store [1] ptr val mem) -> (MOVBstore ptr val mem) (Store [1] ptr val mem) -> (MOVBstore ptr val mem)
// We want this to stick out so the to/from ptr conversion is obvious // Lowering moves
(Convert <t> x mem) -> (MOVQconvert <t> x mem)
// checks
(IsNonNil p) -> (SETNE (TESTQ p p))
(IsInBounds idx len) -> (SETB (CMPQ idx len))
(IsSliceInBounds idx len) -> (SETBE (CMPQ idx len))
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
// Small moves
(Move [0] _ _ mem) -> mem (Move [0] _ _ mem) -> mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem) (Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
(Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem) (Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
...@@ -368,24 +346,81 @@ ...@@ -368,24 +346,81 @@
(Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 -> (Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 ->
(REPMOVSQ dst src (MOVQconst [size/8]) mem) (REPMOVSQ dst src (MOVQconst [size/8]) mem)
(AndB x y) -> (ANDL x y) // Lowering Zero instructions
(OrB x y) -> (ORL x y) (Zero [0] _ mem) -> mem
(Not x) -> (XORLconst [1] x) (Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
(Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
(Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
(Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr) (Zero [3] destptr mem) ->
(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr) (MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [5] destptr mem) ->
(MOVBstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
(Zero [6] destptr mem) ->
(MOVWstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
(Zero [7] destptr mem) ->
(MOVLstoreconst [makeValAndOff(0,3)] destptr
(MOVLstoreconst [0] destptr mem))
(Const8 [val]) -> (MOVLconst [val]) // Strip off any fractional word zeroing.
(Const16 [val]) -> (MOVLconst [val]) (Zero [size] destptr mem) && size%8 != 0 && size > 8 ->
(Const32 [val]) -> (MOVLconst [val]) (Zero [size-size%8] (ADDQconst destptr [size%8])
(Const64 [val]) -> (MOVQconst [val]) (MOVQstoreconst [0] destptr mem))
// Zero small numbers of words directly.
(Zero [16] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))
(Zero [24] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem)))
(Zero [32] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,24)] destptr
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))))
// Medium zeroing uses a duff device.
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice ->
(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice ->
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
// Large zeroing uses REP STOSQ.
(Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 ->
(REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
// Lowering constants
(Const8 [val]) -> (MOVLconst [val])
(Const16 [val]) -> (MOVLconst [val])
(Const32 [val]) -> (MOVLconst [val])
(Const64 [val]) -> (MOVQconst [val])
(Const32F [val]) -> (MOVSSconst [val]) (Const32F [val]) -> (MOVSSconst [val])
(Const64F [val]) -> (MOVSDconst [val]) (Const64F [val]) -> (MOVSDconst [val])
(ConstNil) -> (MOVQconst [0]) (ConstNil) -> (MOVQconst [0])
(ConstBool [b]) -> (MOVLconst [b]) (ConstBool [b]) -> (MOVLconst [b])
(Addr {sym} base) -> (LEAQ {sym} base) // Lowering calls
(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
// Miscellaneous
(Convert <t> x mem) -> (MOVQconvert <t> x mem)
(IsNonNil p) -> (SETNE (TESTQ p p))
(IsInBounds idx len) -> (SETB (CMPQ idx len))
(IsSliceInBounds idx len) -> (SETBE (CMPQ idx len))
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
(Addr {sym} base) -> (LEAQ {sym} base)
(ITab (Load ptr mem)) -> (MOVQload ptr mem) (ITab (Load ptr mem)) -> (MOVQload ptr mem)
// block rewrites // block rewrites
...@@ -408,6 +443,13 @@ ...@@ -408,6 +443,13 @@
(If cond yes no) -> (NE (TESTB cond cond) yes no) (If cond yes no) -> (NE (TESTB cond cond) yes no)
// ***************************
// Above: lowering rules
// Below: optimizations
// ***************************
// TODO: Should the optimizations be a separate pass?
// Fold boolean tests into blocks
(NE (TESTB (SETL cmp) (SETL cmp)) yes no) -> (LT cmp yes no) (NE (TESTB (SETL cmp) (SETL cmp)) yes no) -> (LT cmp yes no)
(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE cmp yes no) (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE cmp yes no)
(NE (TESTB (SETG cmp) (SETG cmp)) yes no) -> (GT cmp yes no) (NE (TESTB (SETG cmp) (SETG cmp)) yes no) -> (GT cmp yes no)
...@@ -429,15 +471,6 @@ ...@@ -429,15 +471,6 @@
// (SETNEF x) -> (ORQ (SETNE <config.Frontend().TypeInt8()> x) (SETNAN <config.Frontend().TypeInt8()> x)) // (SETNEF x) -> (ORQ (SETNE <config.Frontend().TypeInt8()> x) (SETNAN <config.Frontend().TypeInt8()> x))
// (SETEQF x) -> (ANDQ (SETEQ <config.Frontend().TypeInt8()> x) (SETORD <config.Frontend().TypeInt8()> x)) // (SETEQF x) -> (ANDQ (SETEQ <config.Frontend().TypeInt8()> x) (SETORD <config.Frontend().TypeInt8()> x))
(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
// Rules below here apply some simple optimizations after lowering.
// TODO: Should this be a separate pass?
// fold constants into instructions // fold constants into instructions
(ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x) (ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
(ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x) (ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x)
...@@ -831,7 +864,6 @@ ...@@ -831,7 +864,6 @@
(MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem) (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
(MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVQstoreconstidx1 [x] {sym} ptr idx mem) (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
// combine SHLQ into indexed loads and stores // combine SHLQ into indexed loads and stores
(MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem) (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
(MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem) (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem)
...@@ -952,55 +984,6 @@ ...@@ -952,55 +984,6 @@
(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// lower Zero instructions with word sizes
(Zero [0] _ mem) -> mem
(Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
(Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
(Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
(Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
(Zero [3] destptr mem) ->
(MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [5] destptr mem) ->
(MOVBstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
(Zero [6] destptr mem) ->
(MOVWstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
(Zero [7] destptr mem) ->
(MOVLstoreconst [makeValAndOff(0,3)] destptr
(MOVLstoreconst [0] destptr mem))
// Strip off any fractional word zeroing.
(Zero [size] destptr mem) && size%8 != 0 && size > 8 ->
(Zero [size-size%8] (ADDQconst destptr [size%8])
(MOVQstoreconst [0] destptr mem))
// Zero small numbers of words directly.
(Zero [16] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))
(Zero [24] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem)))
(Zero [32] destptr mem) ->
(MOVQstoreconst [makeValAndOff(0,24)] destptr
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))))
// Medium zeroing uses a duff device.
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice ->
(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice ->
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
// Large zeroing uses REP STOSQ.
(Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 ->
(REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
// Absorb InvertFlags into branches. // Absorb InvertFlags into branches.
(LT (InvertFlags cmp) yes no) -> (GT cmp yes no) (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
(GT (InvertFlags cmp) yes no) -> (LT cmp yes no) (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
...@@ -1262,6 +1245,31 @@ ...@@ -1262,6 +1245,31 @@
(CMPWconst x [0]) -> (TESTW x x) (CMPWconst x [0]) -> (TESTW x x)
(CMPBconst x [0]) -> (TESTB x x) (CMPBconst x [0]) -> (TESTB x x)
// Optimizing conditional moves
(CMOVQEQconst x (InvertFlags y) [c]) -> (CMOVQNEconst x y [c])
(CMOVLEQconst x (InvertFlags y) [c]) -> (CMOVLNEconst x y [c])
(CMOVWEQconst x (InvertFlags y) [c]) -> (CMOVWNEconst x y [c])
(CMOVQEQconst _ (FlagEQ) [c]) -> (Const64 [c])
(CMOVLEQconst _ (FlagEQ) [c]) -> (Const32 [c])
(CMOVWEQconst _ (FlagEQ) [c]) -> (Const16 [c])
(CMOVQEQconst x (FlagLT_ULT)) -> x
(CMOVLEQconst x (FlagLT_ULT)) -> x
(CMOVWEQconst x (FlagLT_ULT)) -> x
(CMOVQEQconst x (FlagLT_UGT)) -> x
(CMOVLEQconst x (FlagLT_UGT)) -> x
(CMOVWEQconst x (FlagLT_UGT)) -> x
(CMOVQEQconst x (FlagGT_ULT)) -> x
(CMOVLEQconst x (FlagGT_ULT)) -> x
(CMOVWEQconst x (FlagGT_ULT)) -> x
(CMOVQEQconst x (FlagGT_UGT)) -> x
(CMOVLEQconst x (FlagGT_UGT)) -> x
(CMOVWEQconst x (FlagGT_UGT)) -> x
// Combining byte loads into larger (unaligned) loads. // Combining byte loads into larger (unaligned) loads.
// There are many ways these combinations could occur. This is // There are many ways these combinations could occur. This is
// designed to match the way encoding/binary.LittleEndian does it. // designed to match the way encoding/binary.LittleEndian does it.
......
...@@ -13,7 +13,8 @@ ...@@ -13,7 +13,8 @@
// - an additional conditional can be provided after the match pattern with "&&". // - an additional conditional can be provided after the match pattern with "&&".
// on the generated side // on the generated side
// - the type of the top-level expression is the same as the one on the left-hand side. // - the type of the top-level expression is the same as the one on the left-hand side.
// - the type of any subexpressions must be specified explicitly. // - the type of any subexpressions must be specified explicitly (or
// be specified in the op's type field).
// - auxint will be 0 if not specified. // - auxint will be 0 if not specified.
// - aux will be nil if not specified. // - aux will be nil if not specified.
...@@ -24,12 +25,12 @@ ...@@ -24,12 +25,12 @@
// For now, the generated successors must be a permutation of the matched successors. // For now, the generated successors must be a permutation of the matched successors.
// constant folding // constant folding
(Trunc16to8 (Const16 [c])) -> (Const8 [int64(int8(c))]) (Trunc16to8 (Const16 [c])) -> (Const8 [int64(int8(c))])
(Trunc32to8 (Const32 [c])) -> (Const8 [int64(int8(c))]) (Trunc32to8 (Const32 [c])) -> (Const8 [int64(int8(c))])
(Trunc32to16 (Const32 [c])) -> (Const16 [int64(int16(c))]) (Trunc32to16 (Const32 [c])) -> (Const16 [int64(int16(c))])
(Trunc64to8 (Const64 [c])) -> (Const8 [int64(int8(c))]) (Trunc64to8 (Const64 [c])) -> (Const8 [int64(int8(c))])
(Trunc64to16 (Const64 [c])) -> (Const16 [int64(int16(c))]) (Trunc64to16 (Const64 [c])) -> (Const16 [int64(int16(c))])
(Trunc64to32 (Const64 [c])) -> (Const32 [int64(int32(c))]) (Trunc64to32 (Const64 [c])) -> (Const32 [int64(int32(c))])
(Cvt64Fto32F (Const64F [c])) -> (Const32F [f2i(float64(i2f32(c)))]) (Cvt64Fto32F (Const64F [c])) -> (Const32F [f2i(float64(i2f32(c)))])
(Cvt32Fto64F (Const32F [c])) -> (Const64F [c]) // c is already a 64 bit float (Cvt32Fto64F (Const32F [c])) -> (Const64F [c]) // c is already a 64 bit float
...@@ -41,40 +42,40 @@ ...@@ -41,40 +42,40 @@
//(Neg32F (Const32F [c])) -> (Const32F [f2i(-i2f(c))]) //(Neg32F (Const32F [c])) -> (Const32F [f2i(-i2f(c))])
//(Neg64F (Const64F [c])) -> (Const64F [f2i(-i2f(c))]) //(Neg64F (Const64F [c])) -> (Const64F [f2i(-i2f(c))])
(Add8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c+d))]) (Add8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c+d))])
(Add16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c+d))]) (Add16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c+d))])
(Add32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c+d))]) (Add32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c+d))])
(Add64 (Const64 [c]) (Const64 [d])) -> (Const64 [c+d]) (Add64 (Const64 [c]) (Const64 [d])) -> (Const64 [c+d])
(Add32F (Const32F [c]) (Const32F [d])) -> (Add32F (Const32F [c]) (Const32F [d])) ->
(Const32F [f2i(float64(i2f32(c) + i2f32(d)))]) // ensure we combine the operands with 32 bit precision (Const32F [f2i(float64(i2f32(c) + i2f32(d)))]) // ensure we combine the operands with 32 bit precision
(Add64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) + i2f(d))]) (Add64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) + i2f(d))])
(AddPtr <t> x (Const64 [c])) -> (OffPtr <t> x [c]) (AddPtr <t> x (Const64 [c])) -> (OffPtr <t> x [c])
(Sub8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c-d))]) (Sub8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c-d))])
(Sub16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c-d))]) (Sub16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c-d))])
(Sub32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c-d))]) (Sub32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c-d))])
(Sub64 (Const64 [c]) (Const64 [d])) -> (Const64 [c-d]) (Sub64 (Const64 [c]) (Const64 [d])) -> (Const64 [c-d])
(Sub32F (Const32F [c]) (Const32F [d])) -> (Sub32F (Const32F [c]) (Const32F [d])) ->
(Const32F [f2i(float64(i2f32(c) - i2f32(d)))]) (Const32F [f2i(float64(i2f32(c) - i2f32(d)))])
(Sub64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) - i2f(d))]) (Sub64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) - i2f(d))])
(Mul8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c*d))]) (Mul8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c*d))])
(Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c*d))]) (Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c*d))])
(Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c*d))])
(Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d]) (Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d])
(Mul32F (Const32F [c]) (Const32F [d])) -> (Mul32F (Const32F [c]) (Const32F [d])) ->
(Const32F [f2i(float64(i2f32(c) * i2f32(d)))]) (Const32F [f2i(float64(i2f32(c) * i2f32(d)))])
(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) * i2f(d))]) (Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) * i2f(d))])
(Mod8 (Const8 [c]) (Const8 [d])) && d != 0-> (Const8 [int64(int8(c % d))]) (Mod8 (Const8 [c]) (Const8 [d])) && d != 0 -> (Const8 [int64(int8(c % d))])
(Mod16 (Const16 [c]) (Const16 [d])) && d != 0-> (Const16 [int64(int16(c % d))]) (Mod16 (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(c % d))])
(Mod32 (Const32 [c]) (Const32 [d])) && d != 0-> (Const32 [int64(int32(c % d))]) (Mod32 (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(c % d))])
(Mod64 (Const64 [c]) (Const64 [d])) && d != 0-> (Const64 [c % d]) (Mod64 (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [c % d])
(Mod8u (Const8 [c]) (Const8 [d])) && d != 0-> (Const8 [int64(uint8(c) % uint8(d))]) (Mod8u (Const8 [c]) (Const8 [d])) && d != 0 -> (Const8 [int64(uint8(c) % uint8(d))])
(Mod16u (Const16 [c]) (Const16 [d])) && d != 0-> (Const16 [int64(uint16(c) % uint16(d))]) (Mod16u (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(uint16(c) % uint16(d))])
(Mod32u (Const32 [c]) (Const32 [d])) && d != 0-> (Const32 [int64(uint32(c) % uint32(d))]) (Mod32u (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(uint32(c) % uint32(d))])
(Mod64u (Const64 [c]) (Const64 [d])) && d != 0-> (Const64 [int64(uint64(c) % uint64(d))]) (Mod64u (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c) % uint64(d))])
(Lsh64x64 (Const64 [c]) (Const64 [d])) -> (Const64 [c << uint64(d)]) (Lsh64x64 (Const64 [c]) (Const64 [d])) -> (Const64 [c << uint64(d)])
(Rsh64x64 (Const64 [c]) (Const64 [d])) -> (Const64 [c >> uint64(d)]) (Rsh64x64 (Const64 [c]) (Const64 [d])) -> (Const64 [c >> uint64(d)])
...@@ -89,34 +90,9 @@ ...@@ -89,34 +90,9 @@
(Rsh8x64 (Const8 [c]) (Const64 [d])) -> (Const8 [int64(int8(c) >> uint64(d))]) (Rsh8x64 (Const8 [c]) (Const64 [d])) -> (Const8 [int64(int8(c) >> uint64(d))])
(Rsh8Ux64 (Const8 [c]) (Const64 [d])) -> (Const8 [int64(int8(uint8(c) >> uint64(d)))]) (Rsh8Ux64 (Const8 [c]) (Const64 [d])) -> (Const8 [int64(int8(uint8(c) >> uint64(d)))])
(Lsh64x64 (Const64 [0]) _) -> (Const64 [0])
(Rsh64x64 (Const64 [0]) _) -> (Const64 [0])
(Rsh64Ux64 (Const64 [0]) _) -> (Const64 [0])
(Lsh32x64 (Const32 [0]) _) -> (Const32 [0])
(Rsh32x64 (Const32 [0]) _) -> (Const32 [0])
(Rsh32Ux64 (Const32 [0]) _) -> (Const32 [0])
(Lsh16x64 (Const16 [0]) _) -> (Const16 [0])
(Rsh16x64 (Const16 [0]) _) -> (Const16 [0])
(Rsh16Ux64 (Const16 [0]) _) -> (Const16 [0])
(Lsh8x64 (Const8 [0]) _) -> (Const8 [0])
(Rsh8x64 (Const8 [0]) _) -> (Const8 [0])
(Rsh8Ux64 (Const8 [0]) _) -> (Const8 [0])
// ((x >> c1) << c2) >> c3
(Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) -> (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
(Rsh32Ux32 (Lsh32x32 (Rsh32Ux32 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3])) && uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2) -> (Rsh32Ux32 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
(Rsh16Ux16 (Lsh16x16 (Rsh16Ux16 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3])) && uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2) -> (Rsh16Ux16 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
(Rsh8Ux8 (Lsh8x8 (Rsh8Ux8 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3])) && uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2) -> (Rsh8Ux8 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
// ((x << c1) >> c2) << c3
(Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) -> (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
(Lsh32x32 (Rsh32Ux32 (Lsh32x32 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3])) && uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2) -> (Lsh32x32 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
(Lsh16x16 (Rsh16Ux16 (Lsh16x16 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3])) && uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2) -> (Lsh16x16 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
(Lsh8x8 (Rsh8Ux8 (Lsh8x8 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3])) && uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2) -> (Lsh8x8 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
// Fold IsInBounds when the range of the index cannot exceed the limit. // Fold IsInBounds when the range of the index cannot exceed the limit.
(IsInBounds (ZeroExt8to32 _) (Const32 [c])) && (1 << 8) <= c -> (ConstBool [1]) (IsInBounds (ZeroExt8to32 _) (Const32 [c])) && (1 << 8) <= c -> (ConstBool [1])
(IsInBounds (ZeroExt8to64 _) (Const64 [c])) && (1 << 8) <= c -> (ConstBool [1]) (IsInBounds (ZeroExt8to64 _) (Const64 [c])) && (1 << 8) <= c -> (ConstBool [1])
(IsInBounds (ZeroExt16to32 _) (Const32 [c])) && (1 << 16) <= c -> (ConstBool [1]) (IsInBounds (ZeroExt16to32 _) (Const32 [c])) && (1 << 16) <= c -> (ConstBool [1])
(IsInBounds (ZeroExt16to64 _) (Const64 [c])) && (1 << 16) <= c -> (ConstBool [1]) (IsInBounds (ZeroExt16to64 _) (Const64 [c])) && (1 << 16) <= c -> (ConstBool [1])
(IsInBounds x x) -> (ConstBool [0]) (IsInBounds x x) -> (ConstBool [0])
...@@ -140,7 +116,7 @@ ...@@ -140,7 +116,7 @@
(Eq64 x x) -> (ConstBool [1]) (Eq64 x x) -> (ConstBool [1])
(Eq32 x x) -> (ConstBool [1]) (Eq32 x x) -> (ConstBool [1])
(Eq16 x x) -> (ConstBool [1]) (Eq16 x x) -> (ConstBool [1])
(Eq8 x x) -> (ConstBool [1]) (Eq8 x x) -> (ConstBool [1])
(EqB (ConstBool [c]) (ConstBool [d])) -> (ConstBool [b2i(c == d)]) (EqB (ConstBool [c]) (ConstBool [d])) -> (ConstBool [b2i(c == d)])
(EqB (ConstBool [0]) x) -> (Not x) (EqB (ConstBool [0]) x) -> (Not x)
(EqB (ConstBool [1]) x) -> x (EqB (ConstBool [1]) x) -> x
...@@ -148,7 +124,7 @@ ...@@ -148,7 +124,7 @@
(Neq64 x x) -> (ConstBool [0]) (Neq64 x x) -> (ConstBool [0])
(Neq32 x x) -> (ConstBool [0]) (Neq32 x x) -> (ConstBool [0])
(Neq16 x x) -> (ConstBool [0]) (Neq16 x x) -> (ConstBool [0])
(Neq8 x x) -> (ConstBool [0]) (Neq8 x x) -> (ConstBool [0])
(NeqB (ConstBool [c]) (ConstBool [d])) -> (ConstBool [b2i(c != d)]) (NeqB (ConstBool [c]) (ConstBool [d])) -> (ConstBool [b2i(c != d)])
(NeqB (ConstBool [0]) x) -> x (NeqB (ConstBool [0]) x) -> x
(NeqB (ConstBool [1]) x) -> (Not x) (NeqB (ConstBool [1]) x) -> (Not x)
...@@ -156,101 +132,103 @@ ...@@ -156,101 +132,103 @@
(Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) -> (Eq64 (Const64 <t> [c-d]) x) (Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) -> (Eq64 (Const64 <t> [c-d]) x)
(Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) -> (Eq32 (Const32 <t> [int64(int32(c-d))]) x) (Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) -> (Eq32 (Const32 <t> [int64(int32(c-d))]) x)
(Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) -> (Eq16 (Const16 <t> [int64(int16(c-d))]) x) (Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) -> (Eq16 (Const16 <t> [int64(int16(c-d))]) x)
(Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) -> (Eq8 (Const8 <t> [int64(int8(c-d))]) x) (Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) -> (Eq8 (Const8 <t> [int64(int8(c-d))]) x)
(Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) -> (Neq64 (Const64 <t> [c-d]) x) (Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) -> (Neq64 (Const64 <t> [c-d]) x)
(Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) -> (Neq32 (Const32 <t> [int64(int32(c-d))]) x) (Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) -> (Neq32 (Const32 <t> [int64(int32(c-d))]) x)
(Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) -> (Neq16 (Const16 <t> [int64(int16(c-d))]) x) (Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) -> (Neq16 (Const16 <t> [int64(int16(c-d))]) x)
(Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) -> (Neq8 (Const8 <t> [int64(int8(c-d))]) x) (Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) -> (Neq8 (Const8 <t> [int64(int8(c-d))]) x)
// canonicalize: swap arguments for commutative operations when one argument is a constant. // canonicalize: swap arguments for commutative operations when one argument is a constant.
(Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x) (Eq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Eq64 (Const64 <t> [c]) x)
(Eq32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Eq32 (Const32 <t> [c]) x) (Eq32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Eq32 (Const32 <t> [c]) x)
(Eq16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Eq16 (Const16 <t> [c]) x) (Eq16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Eq16 (Const16 <t> [c]) x)
(Eq8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Eq8 (Const8 <t> [c]) x) (Eq8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Eq8 (Const8 <t> [c]) x)
(Neq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Neq64 (Const64 <t> [c]) x) (Neq64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Neq64 (Const64 <t> [c]) x)
(Neq32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Neq32 (Const32 <t> [c]) x) (Neq32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Neq32 (Const32 <t> [c]) x)
(Neq16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Neq16 (Const16 <t> [c]) x) (Neq16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Neq16 (Const16 <t> [c]) x)
(Neq8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Neq8 (Const8 <t> [c]) x) (Neq8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Neq8 (Const8 <t> [c]) x)
// AddPtr is not canonicalized because nilcheck ptr checks the first argument to be non-nil. // AddPtr is not canonicalized because nilcheck ptr checks the first argument to be non-nil.
(Add64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Add64 (Const64 <t> [c]) x) (Add64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Add64 (Const64 <t> [c]) x)
(Add32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Add32 (Const32 <t> [c]) x) (Add32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Add32 (Const32 <t> [c]) x)
(Add16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Add16 (Const16 <t> [c]) x) (Add16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Add16 (Const16 <t> [c]) x)
(Add8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Add8 (Const8 <t> [c]) x) (Add8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Add8 (Const8 <t> [c]) x)
(Mul64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Mul64 (Const64 <t> [c]) x) (Mul64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Mul64 (Const64 <t> [c]) x)
(Mul32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Mul32 (Const32 <t> [c]) x) (Mul32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Mul32 (Const32 <t> [c]) x)
(Mul16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Mul16 (Const16 <t> [c]) x) (Mul16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Mul16 (Const16 <t> [c]) x)
(Mul8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Mul8 (Const8 <t> [c]) x) (Mul8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Mul8 (Const8 <t> [c]) x)
(Sub64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Add64 (Const64 <t> [-c]) x) (Sub64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Add64 (Const64 <t> [-c]) x)
(Sub32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Add32 (Const32 <t> [int64(int32(-c))]) x) (Sub32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Add32 (Const32 <t> [int64(int32(-c))]) x)
(Sub16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Add16 (Const16 <t> [int64(int16(-c))]) x) (Sub16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Add16 (Const16 <t> [int64(int16(-c))]) x)
(Sub8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Add8 (Const8 <t> [int64(int8(-c))]) x) (Sub8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Add8 (Const8 <t> [int64(int8(-c))]) x)
(And64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (And64 (Const64 <t> [c]) x) (And64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (And64 (Const64 <t> [c]) x)
(And32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (And32 (Const32 <t> [c]) x) (And32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (And32 (Const32 <t> [c]) x)
(And16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (And16 (Const16 <t> [c]) x) (And16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (And16 (Const16 <t> [c]) x)
(And8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (And8 (Const8 <t> [c]) x) (And8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (And8 (Const8 <t> [c]) x)
(Or64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Or64 (Const64 <t> [c]) x) (Or64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Or64 (Const64 <t> [c]) x)
(Or32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Or32 (Const32 <t> [c]) x) (Or32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Or32 (Const32 <t> [c]) x)
(Or16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Or16 (Const16 <t> [c]) x) (Or16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Or16 (Const16 <t> [c]) x)
(Or8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Or8 (Const8 <t> [c]) x) (Or8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Or8 (Const8 <t> [c]) x)
(Xor64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Xor64 (Const64 <t> [c]) x) (Xor64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Xor64 (Const64 <t> [c]) x)
(Xor32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Xor32 (Const32 <t> [c]) x) (Xor32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Xor32 (Const32 <t> [c]) x)
(Xor16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Xor16 (Const16 <t> [c]) x) (Xor16 x (Const16 <t> [c])) && x.Op != OpConst16 -> (Xor16 (Const16 <t> [c]) x)
(Xor8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Xor8 (Const8 <t> [c]) x) (Xor8 x (Const8 <t> [c])) && x.Op != OpConst8 -> (Xor8 (Const8 <t> [c]) x)
// Distribute multiplication c * (d+x) -> c*d + c*x. Useful for: // Distribute multiplication c * (d+x) -> c*d + c*x. Useful for:
// a[i].b = ...; a[i+1].b = ... // a[i].b = ...; a[i+1].b = ...
(Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x)) -> (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x)) (Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x)) ->
(Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x)) -> (Add32 (Const32 <t> [int64(int32(c*d))]) (Mul32 <t> (Const32 <t> [c]) x)) (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
(Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x)) ->
(Add32 (Const32 <t> [int64(int32(c*d))]) (Mul32 <t> (Const32 <t> [c]) x))
// rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce // rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce
// the number of the other rewrite rules for const shifts // the number of the other rewrite rules for const shifts
(Lsh64x32 <t> x (Const32 [c])) -> (Lsh64x64 x (Const64 <t> [int64(uint32(c))])) (Lsh64x32 <t> x (Const32 [c])) -> (Lsh64x64 x (Const64 <t> [int64(uint32(c))]))
(Lsh64x16 <t> x (Const16 [c])) -> (Lsh64x64 x (Const64 <t> [int64(uint16(c))])) (Lsh64x16 <t> x (Const16 [c])) -> (Lsh64x64 x (Const64 <t> [int64(uint16(c))]))
(Lsh64x8 <t> x (Const8 [c])) -> (Lsh64x64 x (Const64 <t> [int64(uint8(c))])) (Lsh64x8 <t> x (Const8 [c])) -> (Lsh64x64 x (Const64 <t> [int64(uint8(c))]))
(Rsh64x32 <t> x (Const32 [c])) -> (Rsh64x64 x (Const64 <t> [int64(uint32(c))])) (Rsh64x32 <t> x (Const32 [c])) -> (Rsh64x64 x (Const64 <t> [int64(uint32(c))]))
(Rsh64x16 <t> x (Const16 [c])) -> (Rsh64x64 x (Const64 <t> [int64(uint16(c))])) (Rsh64x16 <t> x (Const16 [c])) -> (Rsh64x64 x (Const64 <t> [int64(uint16(c))]))
(Rsh64x8 <t> x (Const8 [c])) -> (Rsh64x64 x (Const64 <t> [int64(uint8(c))])) (Rsh64x8 <t> x (Const8 [c])) -> (Rsh64x64 x (Const64 <t> [int64(uint8(c))]))
(Rsh64Ux32 <t> x (Const32 [c])) -> (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))])) (Rsh64Ux32 <t> x (Const32 [c])) -> (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))]))
(Rsh64Ux16 <t> x (Const16 [c])) -> (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))])) (Rsh64Ux16 <t> x (Const16 [c])) -> (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))]))
(Rsh64Ux8 <t> x (Const8 [c])) -> (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))])) (Rsh64Ux8 <t> x (Const8 [c])) -> (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))]))
(Lsh32x32 <t> x (Const32 [c])) -> (Lsh32x64 x (Const64 <t> [int64(uint32(c))])) (Lsh32x32 <t> x (Const32 [c])) -> (Lsh32x64 x (Const64 <t> [int64(uint32(c))]))
(Lsh32x16 <t> x (Const16 [c])) -> (Lsh32x64 x (Const64 <t> [int64(uint16(c))])) (Lsh32x16 <t> x (Const16 [c])) -> (Lsh32x64 x (Const64 <t> [int64(uint16(c))]))
(Lsh32x8 <t> x (Const8 [c])) -> (Lsh32x64 x (Const64 <t> [int64(uint8(c))])) (Lsh32x8 <t> x (Const8 [c])) -> (Lsh32x64 x (Const64 <t> [int64(uint8(c))]))
(Rsh32x32 <t> x (Const32 [c])) -> (Rsh32x64 x (Const64 <t> [int64(uint32(c))])) (Rsh32x32 <t> x (Const32 [c])) -> (Rsh32x64 x (Const64 <t> [int64(uint32(c))]))
(Rsh32x16 <t> x (Const16 [c])) -> (Rsh32x64 x (Const64 <t> [int64(uint16(c))])) (Rsh32x16 <t> x (Const16 [c])) -> (Rsh32x64 x (Const64 <t> [int64(uint16(c))]))
(Rsh32x8 <t> x (Const8 [c])) -> (Rsh32x64 x (Const64 <t> [int64(uint8(c))])) (Rsh32x8 <t> x (Const8 [c])) -> (Rsh32x64 x (Const64 <t> [int64(uint8(c))]))
(Rsh32Ux32 <t> x (Const32 [c])) -> (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))])) (Rsh32Ux32 <t> x (Const32 [c])) -> (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
(Rsh32Ux16 <t> x (Const16 [c])) -> (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))])) (Rsh32Ux16 <t> x (Const16 [c])) -> (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))]))
(Rsh32Ux8 <t> x (Const8 [c])) -> (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))])) (Rsh32Ux8 <t> x (Const8 [c])) -> (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))]))
(Lsh16x32 <t> x (Const32 [c])) -> (Lsh16x64 x (Const64 <t> [int64(uint32(c))])) (Lsh16x32 <t> x (Const32 [c])) -> (Lsh16x64 x (Const64 <t> [int64(uint32(c))]))
(Lsh16x16 <t> x (Const16 [c])) -> (Lsh16x64 x (Const64 <t> [int64(uint16(c))])) (Lsh16x16 <t> x (Const16 [c])) -> (Lsh16x64 x (Const64 <t> [int64(uint16(c))]))
(Lsh16x8 <t> x (Const8 [c])) -> (Lsh16x64 x (Const64 <t> [int64(uint8(c))])) (Lsh16x8 <t> x (Const8 [c])) -> (Lsh16x64 x (Const64 <t> [int64(uint8(c))]))
(Rsh16x32 <t> x (Const32 [c])) -> (Rsh16x64 x (Const64 <t> [int64(uint32(c))])) (Rsh16x32 <t> x (Const32 [c])) -> (Rsh16x64 x (Const64 <t> [int64(uint32(c))]))
(Rsh16x16 <t> x (Const16 [c])) -> (Rsh16x64 x (Const64 <t> [int64(uint16(c))])) (Rsh16x16 <t> x (Const16 [c])) -> (Rsh16x64 x (Const64 <t> [int64(uint16(c))]))
(Rsh16x8 <t> x (Const8 [c])) -> (Rsh16x64 x (Const64 <t> [int64(uint8(c))])) (Rsh16x8 <t> x (Const8 [c])) -> (Rsh16x64 x (Const64 <t> [int64(uint8(c))]))
(Rsh16Ux32 <t> x (Const32 [c])) -> (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))])) (Rsh16Ux32 <t> x (Const32 [c])) -> (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))]))
(Rsh16Ux16 <t> x (Const16 [c])) -> (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))])) (Rsh16Ux16 <t> x (Const16 [c])) -> (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
(Rsh16Ux8 <t> x (Const8 [c])) -> (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))])) (Rsh16Ux8 <t> x (Const8 [c])) -> (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))]))
(Lsh8x32 <t> x (Const32 [c])) -> (Lsh8x64 x (Const64 <t> [int64(uint32(c))])) (Lsh8x32 <t> x (Const32 [c])) -> (Lsh8x64 x (Const64 <t> [int64(uint32(c))]))
(Lsh8x16 <t> x (Const16 [c])) -> (Lsh8x64 x (Const64 <t> [int64(uint16(c))])) (Lsh8x16 <t> x (Const16 [c])) -> (Lsh8x64 x (Const64 <t> [int64(uint16(c))]))
(Lsh8x8 <t> x (Const8 [c])) -> (Lsh8x64 x (Const64 <t> [int64(uint8(c))])) (Lsh8x8 <t> x (Const8 [c])) -> (Lsh8x64 x (Const64 <t> [int64(uint8(c))]))
(Rsh8x32 <t> x (Const32 [c])) -> (Rsh8x64 x (Const64 <t> [int64(uint32(c))])) (Rsh8x32 <t> x (Const32 [c])) -> (Rsh8x64 x (Const64 <t> [int64(uint32(c))]))
(Rsh8x16 <t> x (Const16 [c])) -> (Rsh8x64 x (Const64 <t> [int64(uint16(c))])) (Rsh8x16 <t> x (Const16 [c])) -> (Rsh8x64 x (Const64 <t> [int64(uint16(c))]))
(Rsh8x8 <t> x (Const8 [c])) -> (Rsh8x64 x (Const64 <t> [int64(uint8(c))])) (Rsh8x8 <t> x (Const8 [c])) -> (Rsh8x64 x (Const64 <t> [int64(uint8(c))]))
(Rsh8Ux32 <t> x (Const32 [c])) -> (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))])) (Rsh8Ux32 <t> x (Const32 [c])) -> (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))]))
(Rsh8Ux16 <t> x (Const16 [c])) -> (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))])) (Rsh8Ux16 <t> x (Const16 [c])) -> (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))]))
(Rsh8Ux8 <t> x (Const8 [c])) -> (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))])) (Rsh8Ux8 <t> x (Const8 [c])) -> (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
// shifts by zero // shifts by zero
(Lsh64x64 x (Const64 [0])) -> x (Lsh64x64 x (Const64 [0])) -> x
...@@ -267,19 +245,18 @@ ...@@ -267,19 +245,18 @@
(Rsh8Ux64 x (Const64 [0])) -> x (Rsh8Ux64 x (Const64 [0])) -> x
// zero shifted. // zero shifted.
// TODO: other bit sizes.
(Lsh64x64 (Const64 [0]) _) -> (Const64 [0]) (Lsh64x64 (Const64 [0]) _) -> (Const64 [0])
(Rsh64x64 (Const64 [0]) _) -> (Const64 [0]) (Rsh64x64 (Const64 [0]) _) -> (Const64 [0])
(Rsh64Ux64 (Const64 [0]) _) -> (Const64 [0]) (Rsh64Ux64 (Const64 [0]) _) -> (Const64 [0])
(Lsh64x32 (Const64 [0]) _) -> (Const64 [0]) (Lsh32x64 (Const64 [0]) _) -> (Const32 [0])
(Rsh64x32 (Const64 [0]) _) -> (Const64 [0]) (Rsh32x64 (Const64 [0]) _) -> (Const32 [0])
(Rsh64Ux32 (Const64 [0]) _) -> (Const64 [0]) (Rsh32Ux64 (Const64 [0]) _) -> (Const32 [0])
(Lsh64x16 (Const64 [0]) _) -> (Const64 [0]) (Lsh16x64 (Const64 [0]) _) -> (Const16 [0])
(Rsh64x16 (Const64 [0]) _) -> (Const64 [0]) (Rsh16x64 (Const64 [0]) _) -> (Const16 [0])
(Rsh64Ux16 (Const64 [0]) _) -> (Const64 [0]) (Rsh16Ux64 (Const64 [0]) _) -> (Const16 [0])
(Lsh64x8 (Const64 [0]) _) -> (Const64 [0]) (Lsh8x64 (Const64 [0]) _) -> (Const8 [0])
(Rsh64x8 (Const64 [0]) _) -> (Const64 [0]) (Rsh8x64 (Const64 [0]) _) -> (Const8 [0])
(Rsh64Ux8 (Const64 [0]) _) -> (Const64 [0]) (Rsh8Ux64 (Const64 [0]) _) -> (Const8 [0])
// large left shifts of all values, and right shifts of unsigned values // large left shifts of all values, and right shifts of unsigned values
(Lsh64x64 _ (Const64 [c])) && uint64(c) >= 64 -> (Const64 [0]) (Lsh64x64 _ (Const64 [c])) && uint64(c) >= 64 -> (Const64 [0])
...@@ -288,8 +265,8 @@ ...@@ -288,8 +265,8 @@
(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0]) (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0]) (Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0]) (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0]) (Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0]) (Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
// combine const shifts // combine const shifts
(Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) -> (Lsh64x64 x (Const64 <t> [c+d])) (Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) -> (Lsh64x64 x (Const64 <t> [c+d]))
...@@ -307,6 +284,34 @@ ...@@ -307,6 +284,34 @@
(Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) -> (Rsh16Ux64 x (Const64 <t> [c+d])) (Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) -> (Rsh16Ux64 x (Const64 <t> [c+d]))
(Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) -> (Rsh8Ux64 x (Const64 <t> [c+d])) (Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) -> (Rsh8Ux64 x (Const64 <t> [c+d]))
// ((x >> c1) << c2) >> c3
(Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
-> (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
(Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
&& uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
-> (Rsh32Ux64 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
(Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
&& uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
-> (Rsh16Ux64 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
(Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
&& uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)
-> (Rsh8Ux64 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
// ((x << c1) >> c2) << c3
(Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
-> (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
(Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
&& uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
-> (Lsh32x64 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
(Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
&& uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
-> (Lsh16x64 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
(Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
&& uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)
-> (Lsh8x64 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
// constant comparisons // constant comparisons
(Eq64 (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(c == d)]) (Eq64 (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(c == d)])
(Eq32 (Const32 [c]) (Const32 [d])) -> (ConstBool [b2i(c == d)]) (Eq32 (Const32 [c]) (Const32 [d])) -> (ConstBool [b2i(c == d)])
...@@ -362,52 +367,52 @@ ...@@ -362,52 +367,52 @@
(Or64 x x) -> x (Or64 x x) -> x
(Or32 x x) -> x (Or32 x x) -> x
(Or16 x x) -> x (Or16 x x) -> x
(Or8 x x) -> x (Or8 x x) -> x
(Or64 (Const64 [0]) x) -> x (Or64 (Const64 [0]) x) -> x
(Or32 (Const32 [0]) x) -> x (Or32 (Const32 [0]) x) -> x
(Or16 (Const16 [0]) x) -> x (Or16 (Const16 [0]) x) -> x
(Or8 (Const8 [0]) x) -> x (Or8 (Const8 [0]) x) -> x
(Or64 (Const64 [-1]) _) -> (Const64 [-1]) (Or64 (Const64 [-1]) _) -> (Const64 [-1])
(Or32 (Const32 [-1]) _) -> (Const32 [-1]) (Or32 (Const32 [-1]) _) -> (Const32 [-1])
(Or16 (Const16 [-1]) _) -> (Const16 [-1]) (Or16 (Const16 [-1]) _) -> (Const16 [-1])
(Or8 (Const8 [-1]) _) -> (Const8 [-1]) (Or8 (Const8 [-1]) _) -> (Const8 [-1])
(And64 x x) -> x (And64 x x) -> x
(And32 x x) -> x (And32 x x) -> x
(And16 x x) -> x (And16 x x) -> x
(And8 x x) -> x (And8 x x) -> x
(And64 (Const64 [-1]) x) -> x (And64 (Const64 [-1]) x) -> x
(And32 (Const32 [-1]) x) -> x (And32 (Const32 [-1]) x) -> x
(And16 (Const16 [-1]) x) -> x (And16 (Const16 [-1]) x) -> x
(And8 (Const8 [-1]) x) -> x (And8 (Const8 [-1]) x) -> x
(And64 (Const64 [0]) _) -> (Const64 [0]) (And64 (Const64 [0]) _) -> (Const64 [0])
(And32 (Const32 [0]) _) -> (Const32 [0]) (And32 (Const32 [0]) _) -> (Const32 [0])
(And16 (Const16 [0]) _) -> (Const16 [0]) (And16 (Const16 [0]) _) -> (Const16 [0])
(And8 (Const8 [0]) _) -> (Const8 [0]) (And8 (Const8 [0]) _) -> (Const8 [0])
(Xor64 x x) -> (Const64 [0]) (Xor64 x x) -> (Const64 [0])
(Xor32 x x) -> (Const32 [0]) (Xor32 x x) -> (Const32 [0])
(Xor16 x x) -> (Const16 [0]) (Xor16 x x) -> (Const16 [0])
(Xor8 x x) -> (Const8 [0]) (Xor8 x x) -> (Const8 [0])
(Xor64 (Const64 [0]) x) -> x (Xor64 (Const64 [0]) x) -> x
(Xor32 (Const32 [0]) x) -> x (Xor32 (Const32 [0]) x) -> x
(Xor16 (Const16 [0]) x) -> x (Xor16 (Const16 [0]) x) -> x
(Xor8 (Const8 [0]) x) -> x (Xor8 (Const8 [0]) x) -> x
(Add64 (Const64 [0]) x) -> x (Add64 (Const64 [0]) x) -> x
(Add32 (Const32 [0]) x) -> x (Add32 (Const32 [0]) x) -> x
(Add16 (Const16 [0]) x) -> x (Add16 (Const16 [0]) x) -> x
(Add8 (Const8 [0]) x) -> x (Add8 (Const8 [0]) x) -> x
(Sub64 x x) -> (Const64 [0]) (Sub64 x x) -> (Const64 [0])
(Sub32 x x) -> (Const32 [0]) (Sub32 x x) -> (Const32 [0])
(Sub16 x x) -> (Const16 [0]) (Sub16 x x) -> (Const16 [0])
(Sub8 x x) -> (Const8 [0]) (Sub8 x x) -> (Const8 [0])
(Mul64 (Const64 [0]) _) -> (Const64 [0]) (Mul64 (Const64 [0]) _) -> (Const64 [0])
(Mul32 (Const32 [0]) _) -> (Const32 [0]) (Mul32 (Const32 [0]) _) -> (Const32 [0])
(Mul16 (Const16 [0]) _) -> (Const16 [0]) (Mul16 (Const16 [0]) _) -> (Const16 [0])
(Mul8 (Const8 [0]) _) -> (Const8 [0]) (Mul8 (Const8 [0]) _) -> (Const8 [0])
(Com8 (Com8 x)) -> x (Com8 (Com8 x)) -> x
(Com16 (Com16 x)) -> x (Com16 (Com16 x)) -> x
(Com32 (Com32 x)) -> x (Com32 (Com32 x)) -> x
(Com64 (Com64 x)) -> x (Com64 (Com64 x)) -> x
(Neg8 (Sub8 x y)) -> (Sub8 y x) (Neg8 (Sub8 x y)) -> (Sub8 y x)
(Neg16 (Sub16 x y)) -> (Sub16 y x) (Neg16 (Sub16 x y)) -> (Sub16 y x)
(Neg32 (Sub32 x y)) -> (Sub32 y x) (Neg32 (Sub32 x y)) -> (Sub32 y x)
(Neg64 (Sub64 x y)) -> (Sub64 y x) (Neg64 (Sub64 x y)) -> (Sub64 y x)
...@@ -461,18 +466,20 @@ ...@@ -461,18 +466,20 @@
(Xor16 (Xor16 x y) y) -> x (Xor16 (Xor16 x y) y) -> x
(Xor8 (Xor8 x y) y) -> x (Xor8 (Xor8 x y) y) -> x
(Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF -> (Trunc64to8 x) (Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF -> (Trunc64to8 x)
(Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF -> (Trunc64to16 x) (Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF -> (Trunc64to16 x)
(Trunc64to32 (And64 (Const64 [y]) x)) && y&0xFFFFFFFF == 0xFFFFFFFF -> (Trunc64to32 x) (Trunc64to32 (And64 (Const64 [y]) x)) && y&0xFFFFFFFF == 0xFFFFFFFF -> (Trunc64to32 x)
(Trunc32to8 (And32 (Const32 [y]) x)) && y&0xFF == 0xFF -> (Trunc32to8 x) (Trunc32to8 (And32 (Const32 [y]) x)) && y&0xFF == 0xFF -> (Trunc32to8 x)
(Trunc32to16 (And32 (Const32 [y]) x)) && y&0xFFFF == 0xFFFF -> (Trunc32to16 x) (Trunc32to16 (And32 (Const32 [y]) x)) && y&0xFFFF == 0xFFFF -> (Trunc32to16 x)
(Trunc16to8 (And16 (Const16 [y]) x)) && y&0xFF == 0xFF -> (Trunc16to8 x) (Trunc16to8 (And16 (Const16 [y]) x)) && y&0xFF == 0xFF -> (Trunc16to8 x)
// Rewrite AND of consts as shifts if possible, slightly faster for 64 bit operands // Rewrite AND of consts as shifts if possible, slightly faster for 64 bit operands
// leading zeros can be shifted left, then right // leading zeros can be shifted left, then right
(And64 <t> (Const64 [y]) x) && nlz(y) + nto(y) == 64 && nto(y) >= 32 -> (Rsh64Ux64 (Lsh64x64 <t> x (Const64 <t> [nlz(y)])) (Const64 <t> [nlz(y)])) (And64 <t> (Const64 [y]) x) && nlz(y) + nto(y) == 64 && nto(y) >= 32
-> (Rsh64Ux64 (Lsh64x64 <t> x (Const64 <t> [nlz(y)])) (Const64 <t> [nlz(y)]))
// trailing zeros can be shifted right, then left // trailing zeros can be shifted right, then left
(And64 <t> (Const64 [y]) x) && nlo(y) + ntz(y) == 64 && ntz(y) >= 32 -> (Lsh64x64 (Rsh64Ux64 <t> x (Const64 <t> [ntz(y)])) (Const64 <t> [ntz(y)])) (And64 <t> (Const64 [y]) x) && nlo(y) + ntz(y) == 64 && ntz(y) >= 32
-> (Lsh64x64 (Rsh64Ux64 <t> x (Const64 <t> [ntz(y)])) (Const64 <t> [ntz(y)]))
// simplifications often used for lengths. e.g. len(s[i:i+5])==5 // simplifications often used for lengths. e.g. len(s[i:i+5])==5
(Sub64 (Add64 x y) x) -> y (Sub64 (Add64 x y) x) -> y
...@@ -481,11 +488,11 @@ ...@@ -481,11 +488,11 @@
(Sub32 (Add32 x y) y) -> x (Sub32 (Add32 x y) y) -> x
(Sub16 (Add16 x y) x) -> y (Sub16 (Add16 x y) x) -> y
(Sub16 (Add16 x y) y) -> x (Sub16 (Add16 x y) y) -> x
(Sub8 (Add8 x y) x) -> y (Sub8 (Add8 x y) x) -> y
(Sub8 (Add8 x y) y) -> x (Sub8 (Add8 x y) y) -> x
// basic phi simplifications // basic phi simplifications
(Phi (Const8 [c]) (Const8 [c])) -> (Const8 [c]) (Phi (Const8 [c]) (Const8 [c])) -> (Const8 [c])
(Phi (Const16 [c]) (Const16 [c])) -> (Const16 [c]) (Phi (Const16 [c]) (Const16 [c])) -> (Const16 [c])
(Phi (Const32 [c]) (Const32 [c])) -> (Const32 [c]) (Phi (Const32 [c]) (Const32 [c])) -> (Const32 [c])
(Phi (Const64 [c]) (Const64 [c])) -> (Const64 [c]) (Phi (Const64 [c]) (Const64 [c])) -> (Const64 [c])
...@@ -785,5 +792,7 @@ ...@@ -785,5 +792,7 @@
// A%B = A-(A/B*B). // A%B = A-(A/B*B).
// This implements % with two * and a bunch of ancillary ops. // This implements % with two * and a bunch of ancillary ops.
// One of the * is free if the user's code also computes A/B. // One of the * is free if the user's code also computes A/B.
(Mod64 <t> x (Const64 [c])) && x.Op != OpConst64 && smagic64ok(c) -> (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c]))) (Mod64 <t> x (Const64 [c])) && x.Op != OpConst64 && smagic64ok(c)
(Mod64u <t> x (Const64 [c])) && x.Op != OpConst64 && umagic64ok(c) -> (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c]))) -> (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
(Mod64u <t> x (Const64 [c])) && x.Op != OpConst64 && umagic64ok(c)
-> (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
...@@ -1528,9 +1528,9 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { ...@@ -1528,9 +1528,9 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Add16 x y) // match: (Add16 x y)
// cond: // cond:
// result: (ADDL x y) // result: (ADDL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -1544,9 +1544,9 @@ func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { ...@@ -1544,9 +1544,9 @@ func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool { func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Add32 x y) // match: (Add32 x y)
// cond: // cond:
// result: (ADDL x y) // result: (ADDL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -1576,9 +1576,9 @@ func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool { ...@@ -1576,9 +1576,9 @@ func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool { func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Add64 x y) // match: (Add64 x y)
// cond: // cond:
// result: (ADDQ x y) // result: (ADDQ x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -1608,9 +1608,9 @@ func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool { ...@@ -1608,9 +1608,9 @@ func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Add8 x y) // match: (Add8 x y)
// cond: // cond:
// result: (ADDL x y) // result: (ADDL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -1626,7 +1626,7 @@ func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool { ...@@ -1626,7 +1626,7 @@ func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
_ = b _ = b
// match: (AddPtr x y) // match: (AddPtr x y)
// cond: // cond:
// result: (ADDQ x y) // result: (ADDQ x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -1704,7 +1704,7 @@ func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool { ...@@ -1704,7 +1704,7 @@ func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (And8 x y) // match: (And8 x y)
// cond: // cond:
// result: (ANDL x y) // result: (ANDL x y)
for { for {
...@@ -2946,7 +2946,7 @@ func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool { ...@@ -2946,7 +2946,7 @@ func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Com8 x) // match: (Com8 x)
// cond: // cond:
// result: (NOTL x) // result: (NOTL x)
for { for {
...@@ -2960,7 +2960,7 @@ func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { ...@@ -2960,7 +2960,7 @@ func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Const16 [val]) // match: (Const16 [val])
// cond: // cond:
// result: (MOVLconst [val]) // result: (MOVLconst [val])
for { for {
...@@ -2974,7 +2974,7 @@ func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { ...@@ -2974,7 +2974,7 @@ func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool { func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Const32 [val]) // match: (Const32 [val])
// cond: // cond:
// result: (MOVLconst [val]) // result: (MOVLconst [val])
for { for {
...@@ -3002,7 +3002,7 @@ func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool { ...@@ -3002,7 +3002,7 @@ func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool { func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Const64 [val]) // match: (Const64 [val])
// cond: // cond:
// result: (MOVQconst [val]) // result: (MOVQconst [val])
for { for {
...@@ -3030,7 +3030,7 @@ func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool { ...@@ -3030,7 +3030,7 @@ func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Const8 [val]) // match: (Const8 [val])
// cond: // cond:
// result: (MOVLconst [val]) // result: (MOVLconst [val])
for { for {
...@@ -3311,9 +3311,9 @@ func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool { ...@@ -3311,9 +3311,9 @@ func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool { func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Div16 x y) // match: (Div16 x y)
// cond: // cond:
// result: (DIVW x y) // result: (DIVW x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -3343,9 +3343,9 @@ func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool { ...@@ -3343,9 +3343,9 @@ func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool { func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Div32 x y) // match: (Div32 x y)
// cond: // cond:
// result: (DIVL x y) // result: (DIVL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -3391,9 +3391,9 @@ func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool { ...@@ -3391,9 +3391,9 @@ func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool { func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Div64 x y) // match: (Div64 x y)
// cond: // cond:
// result: (DIVQ x y) // result: (DIVQ x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -3439,9 +3439,9 @@ func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool { ...@@ -3439,9 +3439,9 @@ func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool { func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Div8 x y) // match: (Div8 x y)
// cond: // cond:
// result: (DIVW (SignExt8to16 x) (SignExt8to16 y)) // result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -3459,7 +3459,7 @@ func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool { ...@@ -3459,7 +3459,7 @@ func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool { func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Div8u x y) // match: (Div8u x y)
// cond: // cond:
// result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)) // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
for { for {
...@@ -3479,7 +3479,7 @@ func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool { ...@@ -3479,7 +3479,7 @@ func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool { func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Eq16 x y) // match: (Eq16 x y)
// cond: // cond:
// result: (SETEQ (CMPW x y)) // result: (SETEQ (CMPW x y))
for { for {
...@@ -3497,7 +3497,7 @@ func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool { ...@@ -3497,7 +3497,7 @@ func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool { func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Eq32 x y) // match: (Eq32 x y)
// cond: // cond:
// result: (SETEQ (CMPL x y)) // result: (SETEQ (CMPL x y))
for { for {
...@@ -3533,7 +3533,7 @@ func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool { ...@@ -3533,7 +3533,7 @@ func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool { func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Eq64 x y) // match: (Eq64 x y)
// cond: // cond:
// result: (SETEQ (CMPQ x y)) // result: (SETEQ (CMPQ x y))
for { for {
...@@ -3569,7 +3569,7 @@ func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool { ...@@ -3569,7 +3569,7 @@ func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Eq8 x y) // match: (Eq8 x y)
// cond: // cond:
// result: (SETEQ (CMPB x y)) // result: (SETEQ (CMPB x y))
for { for {
...@@ -3587,7 +3587,7 @@ func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { ...@@ -3587,7 +3587,7 @@ func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool { func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (EqB x y) // match: (EqB x y)
// cond: // cond:
// result: (SETEQ (CMPB x y)) // result: (SETEQ (CMPB x y))
for { for {
...@@ -3623,7 +3623,7 @@ func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool { ...@@ -3623,7 +3623,7 @@ func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool { func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Geq16 x y) // match: (Geq16 x y)
// cond: // cond:
// result: (SETGE (CMPW x y)) // result: (SETGE (CMPW x y))
for { for {
...@@ -3659,7 +3659,7 @@ func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool { ...@@ -3659,7 +3659,7 @@ func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool { func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Geq32 x y) // match: (Geq32 x y)
// cond: // cond:
// result: (SETGE (CMPL x y)) // result: (SETGE (CMPL x y))
for { for {
...@@ -3713,7 +3713,7 @@ func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool { ...@@ -3713,7 +3713,7 @@ func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool { func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Geq64 x y) // match: (Geq64 x y)
// cond: // cond:
// result: (SETGE (CMPQ x y)) // result: (SETGE (CMPQ x y))
for { for {
...@@ -3767,7 +3767,7 @@ func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool { ...@@ -3767,7 +3767,7 @@ func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool { func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Geq8 x y) // match: (Geq8 x y)
// cond: // cond:
// result: (SETGE (CMPB x y)) // result: (SETGE (CMPB x y))
for { for {
...@@ -3845,7 +3845,7 @@ func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool { ...@@ -3845,7 +3845,7 @@ func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool { func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Greater16 x y) // match: (Greater16 x y)
// cond: // cond:
// result: (SETG (CMPW x y)) // result: (SETG (CMPW x y))
for { for {
...@@ -3881,7 +3881,7 @@ func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool { ...@@ -3881,7 +3881,7 @@ func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool { func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Greater32 x y) // match: (Greater32 x y)
// cond: // cond:
// result: (SETG (CMPL x y)) // result: (SETG (CMPL x y))
for { for {
...@@ -3935,7 +3935,7 @@ func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool { ...@@ -3935,7 +3935,7 @@ func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool { func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Greater64 x y) // match: (Greater64 x y)
// cond: // cond:
// result: (SETG (CMPQ x y)) // result: (SETG (CMPQ x y))
for { for {
...@@ -3989,7 +3989,7 @@ func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool { ...@@ -3989,7 +3989,7 @@ func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool { func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Greater8 x y) // match: (Greater8 x y)
// cond: // cond:
// result: (SETG (CMPB x y)) // result: (SETG (CMPB x y))
for { for {
...@@ -4025,9 +4025,9 @@ func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool { ...@@ -4025,9 +4025,9 @@ func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool { func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Hmul16 x y) // match: (Hmul16 x y)
// cond: // cond:
// result: (HMULW x y) // result: (HMULW x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -4057,9 +4057,9 @@ func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool { ...@@ -4057,9 +4057,9 @@ func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool { func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Hmul32 x y) // match: (Hmul32 x y)
// cond: // cond:
// result: (HMULL x y) // result: (HMULL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -4089,9 +4089,9 @@ func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool { ...@@ -4089,9 +4089,9 @@ func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool { func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Hmul64 x y) // match: (Hmul64 x y)
// cond: // cond:
// result: (HMULQ x y) // result: (HMULQ x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -4121,9 +4121,9 @@ func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool { ...@@ -4121,9 +4121,9 @@ func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool { func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Hmul8 x y) // match: (Hmul8 x y)
// cond: // cond:
// result: (HMULB x y) // result: (HMULB x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -4137,7 +4137,7 @@ func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool { ...@@ -4137,7 +4137,7 @@ func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool { func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Hmul8u x y) // match: (Hmul8u x y)
// cond: // cond:
// result: (HMULBU x y) // result: (HMULBU x y)
for { for {
...@@ -4932,7 +4932,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool { ...@@ -4932,7 +4932,7 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool { func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Leq16 x y) // match: (Leq16 x y)
// cond: // cond:
// result: (SETLE (CMPW x y)) // result: (SETLE (CMPW x y))
for { for {
...@@ -4968,7 +4968,7 @@ func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool { ...@@ -4968,7 +4968,7 @@ func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool { func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Leq32 x y) // match: (Leq32 x y)
// cond: // cond:
// result: (SETLE (CMPL x y)) // result: (SETLE (CMPL x y))
for { for {
...@@ -5022,7 +5022,7 @@ func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool { ...@@ -5022,7 +5022,7 @@ func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool { func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Leq64 x y) // match: (Leq64 x y)
// cond: // cond:
// result: (SETLE (CMPQ x y)) // result: (SETLE (CMPQ x y))
for { for {
...@@ -5076,7 +5076,7 @@ func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool { ...@@ -5076,7 +5076,7 @@ func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool { func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Leq8 x y) // match: (Leq8 x y)
// cond: // cond:
// result: (SETLE (CMPB x y)) // result: (SETLE (CMPB x y))
for { for {
...@@ -5112,7 +5112,7 @@ func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool { ...@@ -5112,7 +5112,7 @@ func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool { func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Less16 x y) // match: (Less16 x y)
// cond: // cond:
// result: (SETL (CMPW x y)) // result: (SETL (CMPW x y))
for { for {
...@@ -5148,7 +5148,7 @@ func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool { ...@@ -5148,7 +5148,7 @@ func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool { func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Less32 x y) // match: (Less32 x y)
// cond: // cond:
// result: (SETL (CMPL x y)) // result: (SETL (CMPL x y))
for { for {
...@@ -5202,7 +5202,7 @@ func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool { ...@@ -5202,7 +5202,7 @@ func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool { func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Less64 x y) // match: (Less64 x y)
// cond: // cond:
// result: (SETL (CMPQ x y)) // result: (SETL (CMPQ x y))
for { for {
...@@ -5256,7 +5256,7 @@ func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool { ...@@ -5256,7 +5256,7 @@ func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool { func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Less8 x y) // match: (Less8 x y)
// cond: // cond:
// result: (SETL (CMPB x y)) // result: (SETL (CMPB x y))
for { for {
...@@ -5441,7 +5441,7 @@ func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool { ...@@ -5441,7 +5441,7 @@ func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool { func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Lrot8 <t> x [c]) // match: (Lrot8 <t> x [c])
// cond: // cond:
// result: (ROLBconst <t> [c&7] x) // result: (ROLBconst <t> [c&7] x)
for { for {
...@@ -5534,7 +5534,7 @@ func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { ...@@ -5534,7 +5534,7 @@ func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Lsh16x8 <t> x y) // match: (Lsh16x8 <t> x y)
// cond: // cond:
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
for { for {
...@@ -5634,7 +5634,7 @@ func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool { ...@@ -5634,7 +5634,7 @@ func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool { func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Lsh32x8 <t> x y) // match: (Lsh32x8 <t> x y)
// cond: // cond:
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
for { for {
...@@ -5734,7 +5734,7 @@ func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool { ...@@ -5734,7 +5734,7 @@ func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool { func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Lsh64x8 <t> x y) // match: (Lsh64x8 <t> x y)
// cond: // cond:
// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
for { for {
...@@ -5834,7 +5834,7 @@ func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { ...@@ -5834,7 +5834,7 @@ func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Lsh8x8 <t> x y) // match: (Lsh8x8 <t> x y)
// cond: // cond:
// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
for { for {
...@@ -12009,9 +12009,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { ...@@ -12009,9 +12009,9 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mod16 x y) // match: (Mod16 x y)
// cond: // cond:
// result: (MODW x y) // result: (MODW x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -12041,9 +12041,9 @@ func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool { ...@@ -12041,9 +12041,9 @@ func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool { func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mod32 x y) // match: (Mod32 x y)
// cond: // cond:
// result: (MODL x y) // result: (MODL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -12073,9 +12073,9 @@ func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool { ...@@ -12073,9 +12073,9 @@ func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool { func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mod64 x y) // match: (Mod64 x y)
// cond: // cond:
// result: (MODQ x y) // result: (MODQ x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -12105,9 +12105,9 @@ func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool { ...@@ -12105,9 +12105,9 @@ func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool { func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mod8 x y) // match: (Mod8 x y)
// cond: // cond:
// result: (MODW (SignExt8to16 x) (SignExt8to16 y)) // result: (MODW (SignExt8to16 x) (SignExt8to16 y))
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -12125,7 +12125,7 @@ func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool { ...@@ -12125,7 +12125,7 @@ func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool { func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mod8u x y) // match: (Mod8u x y)
// cond: // cond:
// result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y)) // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
for { for {
...@@ -12499,9 +12499,9 @@ func rewriteValueAMD64_OpMove(v *Value, config *Config) bool { ...@@ -12499,9 +12499,9 @@ func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mul16 x y) // match: (Mul16 x y)
// cond: // cond:
// result: (MULL x y) // result: (MULL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -12515,9 +12515,9 @@ func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { ...@@ -12515,9 +12515,9 @@ func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool { func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mul32 x y) // match: (Mul32 x y)
// cond: // cond:
// result: (MULL x y) // result: (MULL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -12547,9 +12547,9 @@ func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool { ...@@ -12547,9 +12547,9 @@ func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool { func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mul64 x y) // match: (Mul64 x y)
// cond: // cond:
// result: (MULQ x y) // result: (MULQ x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -12579,9 +12579,9 @@ func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool { ...@@ -12579,9 +12579,9 @@ func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mul8 x y) // match: (Mul8 x y)
// cond: // cond:
// result: (MULL x y) // result: (MULL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -12667,7 +12667,7 @@ func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { ...@@ -12667,7 +12667,7 @@ func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool {
func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neg16 x) // match: (Neg16 x)
// cond: // cond:
// result: (NEGL x) // result: (NEGL x)
for { for {
...@@ -12681,7 +12681,7 @@ func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { ...@@ -12681,7 +12681,7 @@ func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool { func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neg32 x) // match: (Neg32 x)
// cond: // cond:
// result: (NEGL x) // result: (NEGL x)
for { for {
...@@ -12712,7 +12712,7 @@ func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool { ...@@ -12712,7 +12712,7 @@ func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool { func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neg64 x) // match: (Neg64 x)
// cond: // cond:
// result: (NEGQ x) // result: (NEGQ x)
for { for {
...@@ -12743,7 +12743,7 @@ func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool { ...@@ -12743,7 +12743,7 @@ func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neg8 x) // match: (Neg8 x)
// cond: // cond:
// result: (NEGL x) // result: (NEGL x)
for { for {
...@@ -12757,7 +12757,7 @@ func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { ...@@ -12757,7 +12757,7 @@ func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool { func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neq16 x y) // match: (Neq16 x y)
// cond: // cond:
// result: (SETNE (CMPW x y)) // result: (SETNE (CMPW x y))
for { for {
...@@ -12775,7 +12775,7 @@ func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool { ...@@ -12775,7 +12775,7 @@ func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool { func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neq32 x y) // match: (Neq32 x y)
// cond: // cond:
// result: (SETNE (CMPL x y)) // result: (SETNE (CMPL x y))
for { for {
...@@ -12811,7 +12811,7 @@ func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool { ...@@ -12811,7 +12811,7 @@ func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool { func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neq64 x y) // match: (Neq64 x y)
// cond: // cond:
// result: (SETNE (CMPQ x y)) // result: (SETNE (CMPQ x y))
for { for {
...@@ -12847,7 +12847,7 @@ func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool { ...@@ -12847,7 +12847,7 @@ func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neq8 x y) // match: (Neq8 x y)
// cond: // cond:
// result: (SETNE (CMPB x y)) // result: (SETNE (CMPB x y))
for { for {
...@@ -12865,7 +12865,7 @@ func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { ...@@ -12865,7 +12865,7 @@ func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool { func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (NeqB x y) // match: (NeqB x y)
// cond: // cond:
// result: (SETNE (CMPB x y)) // result: (SETNE (CMPB x y))
for { for {
...@@ -13961,7 +13961,7 @@ func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool { ...@@ -13961,7 +13961,7 @@ func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Or8 x y) // match: (Or8 x y)
// cond: // cond:
// result: (ORL x y) // result: (ORL x y)
for { for {
...@@ -14068,7 +14068,7 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { ...@@ -14068,7 +14068,7 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh16Ux8 <t> x y) // match: (Rsh16Ux8 <t> x y)
// cond: // cond:
// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
for { for {
...@@ -14177,7 +14177,7 @@ func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool { ...@@ -14177,7 +14177,7 @@ func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh16x8 <t> x y) // match: (Rsh16x8 <t> x y)
// cond: // cond:
// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
for { for {
...@@ -14280,7 +14280,7 @@ func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool { ...@@ -14280,7 +14280,7 @@ func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool { func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh32Ux8 <t> x y) // match: (Rsh32Ux8 <t> x y)
// cond: // cond:
// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
for { for {
...@@ -14389,7 +14389,7 @@ func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool { ...@@ -14389,7 +14389,7 @@ func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh32x8 <t> x y) // match: (Rsh32x8 <t> x y)
// cond: // cond:
// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
for { for {
...@@ -14492,7 +14492,7 @@ func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool { ...@@ -14492,7 +14492,7 @@ func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool { func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh64Ux8 <t> x y) // match: (Rsh64Ux8 <t> x y)
// cond: // cond:
// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
for { for {
...@@ -14601,7 +14601,7 @@ func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool { ...@@ -14601,7 +14601,7 @@ func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh64x8 <t> x y) // match: (Rsh64x8 <t> x y)
// cond: // cond:
// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
for { for {
...@@ -14704,7 +14704,7 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { ...@@ -14704,7 +14704,7 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh8Ux8 <t> x y) // match: (Rsh8Ux8 <t> x y)
// cond: // cond:
// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
for { for {
...@@ -14813,7 +14813,7 @@ func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool { ...@@ -14813,7 +14813,7 @@ func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh8x8 <t> x y) // match: (Rsh8x8 <t> x y)
// cond: // cond:
// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
for { for {
...@@ -16556,7 +16556,7 @@ func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool { ...@@ -16556,7 +16556,7 @@ func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool { func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (SignExt8to16 x) // match: (SignExt8to16 x)
// cond: // cond:
// result: (MOVBQSX x) // result: (MOVBQSX x)
for { for {
...@@ -16570,7 +16570,7 @@ func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool { ...@@ -16570,7 +16570,7 @@ func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool { func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (SignExt8to32 x) // match: (SignExt8to32 x)
// cond: // cond:
// result: (MOVBQSX x) // result: (MOVBQSX x)
for { for {
...@@ -16584,7 +16584,7 @@ func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool { ...@@ -16584,7 +16584,7 @@ func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool {
func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool { func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (SignExt8to64 x) // match: (SignExt8to64 x)
// cond: // cond:
// result: (MOVBQSX x) // result: (MOVBQSX x)
for { for {
...@@ -16737,9 +16737,9 @@ func rewriteValueAMD64_OpStore(v *Value, config *Config) bool { ...@@ -16737,9 +16737,9 @@ func rewriteValueAMD64_OpStore(v *Value, config *Config) bool {
func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Sub16 x y) // match: (Sub16 x y)
// cond: // cond:
// result: (SUBL x y) // result: (SUBL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -16753,9 +16753,9 @@ func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { ...@@ -16753,9 +16753,9 @@ func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool { func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Sub32 x y) // match: (Sub32 x y)
// cond: // cond:
// result: (SUBL x y) // result: (SUBL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -16785,9 +16785,9 @@ func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool { ...@@ -16785,9 +16785,9 @@ func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool { func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Sub64 x y) // match: (Sub64 x y)
// cond: // cond:
// result: (SUBQ x y) // result: (SUBQ x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -16817,9 +16817,9 @@ func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool { ...@@ -16817,9 +16817,9 @@ func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool {
func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Sub8 x y) // match: (Sub8 x y)
// cond: // cond:
// result: (SUBL x y) // result: (SUBL x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -16835,7 +16835,7 @@ func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool { ...@@ -16835,7 +16835,7 @@ func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool {
_ = b _ = b
// match: (SubPtr x y) // match: (SubPtr x y)
// cond: // cond:
// result: (SUBQ x y) // result: (SUBQ x y)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
...@@ -16849,7 +16849,7 @@ func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool { ...@@ -16849,7 +16849,7 @@ func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool {
func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool { func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Trunc16to8 x) // match: (Trunc16to8 x)
// cond: // cond:
// result: x // result: x
for { for {
...@@ -16879,7 +16879,7 @@ func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool { ...@@ -16879,7 +16879,7 @@ func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool { func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Trunc32to8 x) // match: (Trunc32to8 x)
// cond: // cond:
// result: x // result: x
for { for {
...@@ -16924,7 +16924,7 @@ func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool { ...@@ -16924,7 +16924,7 @@ func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool {
func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Trunc64to8 x) // match: (Trunc64to8 x)
// cond: // cond:
// result: x // result: x
for { for {
...@@ -17152,7 +17152,7 @@ func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool { ...@@ -17152,7 +17152,7 @@ func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Xor8 x y) // match: (Xor8 x y)
// cond: // cond:
// result: (XORL x y) // result: (XORL x y)
for { for {
...@@ -17524,7 +17524,7 @@ func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool { ...@@ -17524,7 +17524,7 @@ func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool {
func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool { func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (ZeroExt8to16 x) // match: (ZeroExt8to16 x)
// cond: // cond:
// result: (MOVBQZX x) // result: (MOVBQZX x)
for { for {
...@@ -17538,7 +17538,7 @@ func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool { ...@@ -17538,7 +17538,7 @@ func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool {
func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool { func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (ZeroExt8to32 x) // match: (ZeroExt8to32 x)
// cond: // cond:
// result: (MOVBQZX x) // result: (MOVBQZX x)
for { for {
...@@ -17552,7 +17552,7 @@ func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool { ...@@ -17552,7 +17552,7 @@ func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool {
func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool { func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (ZeroExt8to64 x) // match: (ZeroExt8to64 x)
// cond: // cond:
// result: (MOVBQZX x) // result: (MOVBQZX x)
for { for {
......
...@@ -358,7 +358,7 @@ func rewriteValuegeneric(v *Value, config *Config) bool { ...@@ -358,7 +358,7 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
func rewriteValuegeneric_OpAdd16(v *Value, config *Config) bool { func rewriteValuegeneric_OpAdd16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Add16 (Const16 [c]) (Const16 [d])) // match: (Add16 (Const16 [c]) (Const16 [d]))
// cond: // cond:
// result: (Const16 [int64(int16(c+d))]) // result: (Const16 [int64(int16(c+d))])
for { for {
...@@ -419,7 +419,7 @@ func rewriteValuegeneric_OpAdd16(v *Value, config *Config) bool { ...@@ -419,7 +419,7 @@ func rewriteValuegeneric_OpAdd16(v *Value, config *Config) bool {
func rewriteValuegeneric_OpAdd32(v *Value, config *Config) bool { func rewriteValuegeneric_OpAdd32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Add32 (Const32 [c]) (Const32 [d])) // match: (Add32 (Const32 [c]) (Const32 [d]))
// cond: // cond:
// result: (Const32 [int64(int32(c+d))]) // result: (Const32 [int64(int32(c+d))])
for { for {
...@@ -503,7 +503,7 @@ func rewriteValuegeneric_OpAdd32F(v *Value, config *Config) bool { ...@@ -503,7 +503,7 @@ func rewriteValuegeneric_OpAdd32F(v *Value, config *Config) bool {
func rewriteValuegeneric_OpAdd64(v *Value, config *Config) bool { func rewriteValuegeneric_OpAdd64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Add64 (Const64 [c]) (Const64 [d])) // match: (Add64 (Const64 [c]) (Const64 [d]))
// cond: // cond:
// result: (Const64 [c+d]) // result: (Const64 [c+d])
for { for {
...@@ -587,9 +587,9 @@ func rewriteValuegeneric_OpAdd64F(v *Value, config *Config) bool { ...@@ -587,9 +587,9 @@ func rewriteValuegeneric_OpAdd64F(v *Value, config *Config) bool {
func rewriteValuegeneric_OpAdd8(v *Value, config *Config) bool { func rewriteValuegeneric_OpAdd8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Add8 (Const8 [c]) (Const8 [d])) // match: (Add8 (Const8 [c]) (Const8 [d]))
// cond: // cond:
// result: (Const8 [int64(int8(c+d))]) // result: (Const8 [int64(int8(c+d))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst8 { if v_0.Op != OpConst8 {
...@@ -605,9 +605,9 @@ func rewriteValuegeneric_OpAdd8(v *Value, config *Config) bool { ...@@ -605,9 +605,9 @@ func rewriteValuegeneric_OpAdd8(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c + d)) v.AuxInt = int64(int8(c + d))
return true return true
} }
// match: (Add8 x (Const8 <t> [c])) // match: (Add8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8 // cond: x.Op != OpConst8
// result: (Add8 (Const8 <t> [c]) x) // result: (Add8 (Const8 <t> [c]) x)
for { for {
x := v.Args[0] x := v.Args[0]
v_1 := v.Args[1] v_1 := v.Args[1]
...@@ -626,7 +626,7 @@ func rewriteValuegeneric_OpAdd8(v *Value, config *Config) bool { ...@@ -626,7 +626,7 @@ func rewriteValuegeneric_OpAdd8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Add8 (Const8 [0]) x) // match: (Add8 (Const8 [0]) x)
// cond: // cond:
// result: x // result: x
for { for {
...@@ -1151,9 +1151,9 @@ func rewriteValuegeneric_OpAnd64(v *Value, config *Config) bool { ...@@ -1151,9 +1151,9 @@ func rewriteValuegeneric_OpAnd64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool { func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (And8 x (Const8 <t> [c])) // match: (And8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8 // cond: x.Op != OpConst8
// result: (And8 (Const8 <t> [c]) x) // result: (And8 (Const8 <t> [c]) x)
for { for {
x := v.Args[0] x := v.Args[0]
v_1 := v.Args[1] v_1 := v.Args[1]
...@@ -1172,7 +1172,7 @@ func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool { ...@@ -1172,7 +1172,7 @@ func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (And8 x x) // match: (And8 x x)
// cond: // cond:
// result: x // result: x
for { for {
...@@ -1185,7 +1185,7 @@ func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool { ...@@ -1185,7 +1185,7 @@ func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (And8 (Const8 [-1]) x) // match: (And8 (Const8 [-1]) x)
// cond: // cond:
// result: x // result: x
for { for {
...@@ -1202,9 +1202,9 @@ func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool { ...@@ -1202,9 +1202,9 @@ func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (And8 (Const8 [0]) _) // match: (And8 (Const8 [0]) _)
// cond: // cond:
// result: (Const8 [0]) // result: (Const8 [0])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst8 { if v_0.Op != OpConst8 {
...@@ -1590,7 +1590,7 @@ func rewriteValuegeneric_OpCom64(v *Value, config *Config) bool { ...@@ -1590,7 +1590,7 @@ func rewriteValuegeneric_OpCom64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpCom8(v *Value, config *Config) bool { func rewriteValuegeneric_OpCom8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Com8 (Com8 x)) // match: (Com8 (Com8 x))
// cond: // cond:
// result: x // result: x
for { for {
...@@ -2340,7 +2340,7 @@ func rewriteValuegeneric_OpEq64(v *Value, config *Config) bool { ...@@ -2340,7 +2340,7 @@ func rewriteValuegeneric_OpEq64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool { func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Eq8 x x) // match: (Eq8 x x)
// cond: // cond:
// result: (ConstBool [1]) // result: (ConstBool [1])
for { for {
...@@ -2352,9 +2352,9 @@ func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool { ...@@ -2352,9 +2352,9 @@ func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool {
v.AuxInt = 1 v.AuxInt = 1
return true return true
} }
// match: (Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) // match: (Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
// cond: // cond:
// result: (Eq8 (Const8 <t> [int64(int8(c-d))]) x) // result: (Eq8 (Const8 <t> [int64(int8(c-d))]) x)
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst8 { if v_0.Op != OpConst8 {
...@@ -2382,9 +2382,9 @@ func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool { ...@@ -2382,9 +2382,9 @@ func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Eq8 x (Const8 <t> [c])) // match: (Eq8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8 // cond: x.Op != OpConst8
// result: (Eq8 (Const8 <t> [c]) x) // result: (Eq8 (Const8 <t> [c]) x)
for { for {
x := v.Args[0] x := v.Args[0]
v_1 := v.Args[1] v_1 := v.Args[1]
...@@ -2925,8 +2925,8 @@ func rewriteValuegeneric_OpGreater8U(v *Value, config *Config) bool { ...@@ -2925,8 +2925,8 @@ func rewriteValuegeneric_OpGreater8U(v *Value, config *Config) bool {
func rewriteValuegeneric_OpIsInBounds(v *Value, config *Config) bool { func rewriteValuegeneric_OpIsInBounds(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (IsInBounds (ZeroExt8to32 _) (Const32 [c])) // match: (IsInBounds (ZeroExt8to32 _) (Const32 [c]))
// cond: (1 << 8) <= c // cond: (1 << 8) <= c
// result: (ConstBool [1]) // result: (ConstBool [1])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -2945,8 +2945,8 @@ func rewriteValuegeneric_OpIsInBounds(v *Value, config *Config) bool { ...@@ -2945,8 +2945,8 @@ func rewriteValuegeneric_OpIsInBounds(v *Value, config *Config) bool {
v.AuxInt = 1 v.AuxInt = 1
return true return true
} }
// match: (IsInBounds (ZeroExt8to64 _) (Const64 [c])) // match: (IsInBounds (ZeroExt8to64 _) (Const64 [c]))
// cond: (1 << 8) <= c // cond: (1 << 8) <= c
// result: (ConstBool [1]) // result: (ConstBool [1])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -3808,44 +3808,6 @@ func rewriteValuegeneric_OpLoad(v *Value, config *Config) bool { ...@@ -3808,44 +3808,6 @@ func rewriteValuegeneric_OpLoad(v *Value, config *Config) bool {
func rewriteValuegeneric_OpLsh16x16(v *Value, config *Config) bool { func rewriteValuegeneric_OpLsh16x16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Lsh16x16 (Rsh16Ux16 (Lsh16x16 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
// cond: uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
// result: (Lsh16x16 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh16Ux16 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh16x16 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst16 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst16 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst16 {
break
}
c3 := v_1.AuxInt
if !(uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)) {
break
}
v.reset(OpLsh16x16)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
v0.AuxInt = int64(int16(c1 - c2 + c3))
v.AddArg(v0)
return true
}
// match: (Lsh16x16 <t> x (Const16 [c])) // match: (Lsh16x16 <t> x (Const16 [c]))
// cond: // cond:
// result: (Lsh16x64 x (Const64 <t> [int64(uint16(c))])) // result: (Lsh16x64 x (Const64 <t> [int64(uint16(c))]))
...@@ -3910,21 +3872,6 @@ func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool { ...@@ -3910,21 +3872,6 @@ func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool {
v.AuxInt = int64(int16(c) << uint64(d)) v.AuxInt = int64(int16(c) << uint64(d))
return true return true
} }
// match: (Lsh16x64 (Const16 [0]) _)
// cond:
// result: (Const16 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst16 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst16)
v.AuxInt = 0
return true
}
// match: (Lsh16x64 x (Const64 [0])) // match: (Lsh16x64 x (Const64 [0]))
// cond: // cond:
// result: x // result: x
...@@ -3942,6 +3889,21 @@ func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool { ...@@ -3942,6 +3889,21 @@ func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Lsh16x64 (Const64 [0]) _)
// cond:
// result: (Const16 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst16)
v.AuxInt = 0
return true
}
// match: (Lsh16x64 _ (Const64 [c])) // match: (Lsh16x64 _ (Const64 [c]))
// cond: uint64(c) >= 16 // cond: uint64(c) >= 16
// result: (Const16 [0]) // result: (Const16 [0])
...@@ -3988,12 +3950,50 @@ func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool { ...@@ -3988,12 +3950,50 @@ func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
// cond: uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
// result: (Lsh16x64 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh16Ux64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh16x64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst16 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst16 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst16 {
break
}
c3 := v_1.AuxInt
if !(uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)) {
break
}
v.reset(OpLsh16x64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
v0.AuxInt = int64(int16(c1 - c2 + c3))
v.AddArg(v0)
return true
}
return false return false
} }
func rewriteValuegeneric_OpLsh16x8(v *Value, config *Config) bool { func rewriteValuegeneric_OpLsh16x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Lsh16x8 <t> x (Const8 [c])) // match: (Lsh16x8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Lsh16x64 x (Const64 <t> [int64(uint8(c))])) // result: (Lsh16x64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -4039,44 +4039,6 @@ func rewriteValuegeneric_OpLsh32x16(v *Value, config *Config) bool { ...@@ -4039,44 +4039,6 @@ func rewriteValuegeneric_OpLsh32x16(v *Value, config *Config) bool {
func rewriteValuegeneric_OpLsh32x32(v *Value, config *Config) bool { func rewriteValuegeneric_OpLsh32x32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Lsh32x32 (Rsh32Ux32 (Lsh32x32 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
// cond: uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
// result: (Lsh32x32 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh32Ux32 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh32x32 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst32 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst32 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst32 {
break
}
c3 := v_1.AuxInt
if !(uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)) {
break
}
v.reset(OpLsh32x32)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
v0.AuxInt = int64(int32(c1 - c2 + c3))
v.AddArg(v0)
return true
}
// match: (Lsh32x32 <t> x (Const32 [c])) // match: (Lsh32x32 <t> x (Const32 [c]))
// cond: // cond:
// result: (Lsh32x64 x (Const64 <t> [int64(uint32(c))])) // result: (Lsh32x64 x (Const64 <t> [int64(uint32(c))]))
...@@ -4118,21 +4080,6 @@ func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool { ...@@ -4118,21 +4080,6 @@ func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool {
v.AuxInt = int64(int32(c) << uint64(d)) v.AuxInt = int64(int32(c) << uint64(d))
return true return true
} }
// match: (Lsh32x64 (Const32 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Lsh32x64 x (Const64 [0])) // match: (Lsh32x64 x (Const64 [0]))
// cond: // cond:
// result: x // result: x
...@@ -4150,6 +4097,21 @@ func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool { ...@@ -4150,6 +4097,21 @@ func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Lsh32x64 (Const64 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Lsh32x64 _ (Const64 [c])) // match: (Lsh32x64 _ (Const64 [c]))
// cond: uint64(c) >= 32 // cond: uint64(c) >= 32
// result: (Const32 [0]) // result: (Const32 [0])
...@@ -4196,12 +4158,50 @@ func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool { ...@@ -4196,12 +4158,50 @@ func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
// cond: uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
// result: (Lsh32x64 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh32Ux64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh32x64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst32 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst32 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst32 {
break
}
c3 := v_1.AuxInt
if !(uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)) {
break
}
v.reset(OpLsh32x64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
v0.AuxInt = int64(int32(c1 - c2 + c3))
v.AddArg(v0)
return true
}
return false return false
} }
func rewriteValuegeneric_OpLsh32x8(v *Value, config *Config) bool { func rewriteValuegeneric_OpLsh32x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Lsh32x8 <t> x (Const8 [c])) // match: (Lsh32x8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Lsh32x64 x (Const64 <t> [int64(uint8(c))])) // result: (Lsh32x64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -4242,21 +4242,6 @@ func rewriteValuegeneric_OpLsh64x16(v *Value, config *Config) bool { ...@@ -4242,21 +4242,6 @@ func rewriteValuegeneric_OpLsh64x16(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Lsh64x16 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false return false
} }
func rewriteValuegeneric_OpLsh64x32(v *Value, config *Config) bool { func rewriteValuegeneric_OpLsh64x32(v *Value, config *Config) bool {
...@@ -4280,21 +4265,6 @@ func rewriteValuegeneric_OpLsh64x32(v *Value, config *Config) bool { ...@@ -4280,21 +4265,6 @@ func rewriteValuegeneric_OpLsh64x32(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Lsh64x32 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false return false
} }
func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool { func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool {
...@@ -4318,6 +4288,23 @@ func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool { ...@@ -4318,6 +4288,23 @@ func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool {
v.AuxInt = c << uint64(d) v.AuxInt = c << uint64(d)
return true return true
} }
// match: (Lsh64x64 x (Const64 [0]))
// cond:
// result: x
for {
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpConst64 {
break
}
if v_1.AuxInt != 0 {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
// match: (Lsh64x64 (Const64 [0]) _) // match: (Lsh64x64 (Const64 [0]) _)
// cond: // cond:
// result: (Const64 [0]) // result: (Const64 [0])
...@@ -4333,119 +4320,87 @@ func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool { ...@@ -4333,119 +4320,87 @@ func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool {
v.AuxInt = 0 v.AuxInt = 0
return true return true
} }
// match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) // match: (Lsh64x64 _ (Const64 [c]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) // cond: uint64(c) >= 64
// result: (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3])) // result: (Const64 [0])
for { for {
v_0 := v.Args[0] v_1 := v.Args[1]
if v_0.Op != OpRsh64Ux64 { if v_1.Op != OpConst64 {
break break
} }
v_0_0 := v_0.Args[0] c := v_1.AuxInt
if v_0_0.Op != OpLsh64x64 { if !(uint64(c) >= 64) {
break break
} }
x := v_0_0.Args[0] v.reset(OpConst64)
v_0_0_1 := v_0_0.Args[1] v.AuxInt = 0
if v_0_0_1.Op != OpConst64 { return true
break }
// match: (Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d]))
// cond: !uaddOvf(c,d)
// result: (Lsh64x64 x (Const64 <t> [c+d]))
for {
t := v.Type
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
} }
c1 := v_0_0_1.AuxInt x := v_0.Args[0]
v_0_1 := v_0.Args[1] v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst64 { if v_0_1.Op != OpConst64 {
break break
} }
c2 := v_0_1.AuxInt c := v_0_1.AuxInt
v_1 := v.Args[1] v_1 := v.Args[1]
if v_1.Op != OpConst64 { if v_1.Op != OpConst64 {
break break
} }
c3 := v_1.AuxInt d := v_1.AuxInt
if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)) { if !(!uaddOvf(c, d)) {
break break
} }
v.reset(OpLsh64x64) v.reset(OpLsh64x64)
v.AddArg(x) v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64()) v0 := b.NewValue0(v.Line, OpConst64, t)
v0.AuxInt = c1 - c2 + c3 v0.AuxInt = c + d
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Lsh64x64 x (Const64 [0])) // match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
// result: x // result: (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
for {
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpConst64 {
break
}
if v_1.AuxInt != 0 {
break
}
v.reset(OpCopy)
v.Type = x.Type
v.AddArg(x)
return true
}
// match: (Lsh64x64 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst64 { if v_0.Op != OpRsh64Ux64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
// match: (Lsh64x64 _ (Const64 [c]))
// cond: uint64(c) >= 64
// result: (Const64 [0])
for {
v_1 := v.Args[1]
if v_1.Op != OpConst64 {
break break
} }
c := v_1.AuxInt v_0_0 := v_0.Args[0]
if !(uint64(c) >= 64) { if v_0_0.Op != OpLsh64x64 {
break break
} }
v.reset(OpConst64) x := v_0_0.Args[0]
v.AuxInt = 0 v_0_0_1 := v_0_0.Args[1]
return true if v_0_0_1.Op != OpConst64 {
}
// match: (Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d]))
// cond: !uaddOvf(c,d)
// result: (Lsh64x64 x (Const64 <t> [c+d]))
for {
t := v.Type
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break break
} }
x := v_0.Args[0] c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1] v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst64 { if v_0_1.Op != OpConst64 {
break break
} }
c := v_0_1.AuxInt c2 := v_0_1.AuxInt
v_1 := v.Args[1] v_1 := v.Args[1]
if v_1.Op != OpConst64 { if v_1.Op != OpConst64 {
break break
} }
d := v_1.AuxInt c3 := v_1.AuxInt
if !(!uaddOvf(c, d)) { if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)) {
break break
} }
v.reset(OpLsh64x64) v.reset(OpLsh64x64)
v.AddArg(x) v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst64, t) v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
v0.AuxInt = c + d v0.AuxInt = c1 - c2 + c3
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
...@@ -4454,7 +4409,7 @@ func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool { ...@@ -4454,7 +4409,7 @@ func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpLsh64x8(v *Value, config *Config) bool { func rewriteValuegeneric_OpLsh64x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Lsh64x8 <t> x (Const8 [c])) // match: (Lsh64x8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Lsh64x64 x (Const64 <t> [int64(uint8(c))])) // result: (Lsh64x64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -4472,21 +4427,6 @@ func rewriteValuegeneric_OpLsh64x8(v *Value, config *Config) bool { ...@@ -4472,21 +4427,6 @@ func rewriteValuegeneric_OpLsh64x8(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Lsh64x8 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false return false
} }
func rewriteValuegeneric_OpLsh8x16(v *Value, config *Config) bool { func rewriteValuegeneric_OpLsh8x16(v *Value, config *Config) bool {
...@@ -4556,21 +4496,6 @@ func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool { ...@@ -4556,21 +4496,6 @@ func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c) << uint64(d)) v.AuxInt = int64(int8(c) << uint64(d))
return true return true
} }
// match: (Lsh8x64 (Const8 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst8 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Lsh8x64 x (Const64 [0])) // match: (Lsh8x64 x (Const64 [0]))
// cond: // cond:
// result: x // result: x
...@@ -4588,9 +4513,24 @@ func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool { ...@@ -4588,9 +4513,24 @@ func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Lsh8x64 (Const64 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Lsh8x64 _ (Const64 [c])) // match: (Lsh8x64 _ (Const64 [c]))
// cond: uint64(c) >= 8 // cond: uint64(c) >= 8
// result: (Const8 [0]) // result: (Const8 [0])
for { for {
v_1 := v.Args[1] v_1 := v.Args[1]
if v_1.Op != OpConst64 { if v_1.Op != OpConst64 {
...@@ -4634,21 +4574,16 @@ func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool { ...@@ -4634,21 +4574,16 @@ func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
return false // match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
}
func rewriteValuegeneric_OpLsh8x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh8x8 (Rsh8Ux8 (Lsh8x8 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
// cond: uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2) // cond: uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)
// result: (Lsh8x8 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))])) // result: (Lsh8x64 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpRsh8Ux8 { if v_0.Op != OpRsh8Ux64 {
break break
} }
v_0_0 := v_0.Args[0] v_0_0 := v_0.Args[0]
if v_0_0.Op != OpLsh8x8 { if v_0_0.Op != OpLsh8x64 {
break break
} }
x := v_0_0.Args[0] x := v_0_0.Args[0]
...@@ -4670,14 +4605,19 @@ func rewriteValuegeneric_OpLsh8x8(v *Value, config *Config) bool { ...@@ -4670,14 +4605,19 @@ func rewriteValuegeneric_OpLsh8x8(v *Value, config *Config) bool {
if !(uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)) { if !(uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)) {
break break
} }
v.reset(OpLsh8x8) v.reset(OpLsh8x64)
v.AddArg(x) v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8()) v0 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
v0.AuxInt = int64(int8(c1 - c2 + c3)) v0.AuxInt = int64(int8(c1 - c2 + c3))
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Lsh8x8 <t> x (Const8 [c])) return false
}
func rewriteValuegeneric_OpLsh8x8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Lsh8x8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Lsh8x64 x (Const64 <t> [int64(uint8(c))])) // result: (Lsh8x64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -4935,9 +4875,9 @@ func rewriteValuegeneric_OpMod64u(v *Value, config *Config) bool { ...@@ -4935,9 +4875,9 @@ func rewriteValuegeneric_OpMod64u(v *Value, config *Config) bool {
func rewriteValuegeneric_OpMod8(v *Value, config *Config) bool { func rewriteValuegeneric_OpMod8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mod8 (Const8 [c]) (Const8 [d])) // match: (Mod8 (Const8 [c]) (Const8 [d]))
// cond: d != 0 // cond: d != 0
// result: (Const8 [int64(int8(c % d))]) // result: (Const8 [int64(int8(c % d))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst8 { if v_0.Op != OpConst8 {
...@@ -4961,9 +4901,9 @@ func rewriteValuegeneric_OpMod8(v *Value, config *Config) bool { ...@@ -4961,9 +4901,9 @@ func rewriteValuegeneric_OpMod8(v *Value, config *Config) bool {
func rewriteValuegeneric_OpMod8u(v *Value, config *Config) bool { func rewriteValuegeneric_OpMod8u(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mod8u (Const8 [c]) (Const8 [d])) // match: (Mod8u (Const8 [c]) (Const8 [d]))
// cond: d != 0 // cond: d != 0
// result: (Const8 [int64(uint8(c) % uint8(d))]) // result: (Const8 [int64(uint8(c) % uint8(d))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst8 { if v_0.Op != OpConst8 {
...@@ -4987,7 +4927,7 @@ func rewriteValuegeneric_OpMod8u(v *Value, config *Config) bool { ...@@ -4987,7 +4927,7 @@ func rewriteValuegeneric_OpMod8u(v *Value, config *Config) bool {
func rewriteValuegeneric_OpMul16(v *Value, config *Config) bool { func rewriteValuegeneric_OpMul16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mul16 (Const16 [c]) (Const16 [d])) // match: (Mul16 (Const16 [c]) (Const16 [d]))
// cond: // cond:
// result: (Const16 [int64(int16(c*d))]) // result: (Const16 [int64(int16(c*d))])
for { for {
...@@ -5046,7 +4986,7 @@ func rewriteValuegeneric_OpMul16(v *Value, config *Config) bool { ...@@ -5046,7 +4986,7 @@ func rewriteValuegeneric_OpMul16(v *Value, config *Config) bool {
func rewriteValuegeneric_OpMul32(v *Value, config *Config) bool { func rewriteValuegeneric_OpMul32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mul32 (Const32 [c]) (Const32 [d])) // match: (Mul32 (Const32 [c]) (Const32 [d]))
// cond: // cond:
// result: (Const32 [int64(int32(c*d))]) // result: (Const32 [int64(int32(c*d))])
for { for {
...@@ -5166,7 +5106,7 @@ func rewriteValuegeneric_OpMul32F(v *Value, config *Config) bool { ...@@ -5166,7 +5106,7 @@ func rewriteValuegeneric_OpMul32F(v *Value, config *Config) bool {
func rewriteValuegeneric_OpMul64(v *Value, config *Config) bool { func rewriteValuegeneric_OpMul64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mul64 (Const64 [c]) (Const64 [d])) // match: (Mul64 (Const64 [c]) (Const64 [d]))
// cond: // cond:
// result: (Const64 [c*d]) // result: (Const64 [c*d])
for { for {
...@@ -5286,9 +5226,9 @@ func rewriteValuegeneric_OpMul64F(v *Value, config *Config) bool { ...@@ -5286,9 +5226,9 @@ func rewriteValuegeneric_OpMul64F(v *Value, config *Config) bool {
func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool { func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mul8 (Const8 [c]) (Const8 [d])) // match: (Mul8 (Const8 [c]) (Const8 [d]))
// cond: // cond:
// result: (Const8 [int64(int8(c*d))]) // result: (Const8 [int64(int8(c*d))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst8 { if v_0.Op != OpConst8 {
...@@ -5304,9 +5244,9 @@ func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool { ...@@ -5304,9 +5244,9 @@ func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c * d)) v.AuxInt = int64(int8(c * d))
return true return true
} }
// match: (Mul8 x (Const8 <t> [c])) // match: (Mul8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8 // cond: x.Op != OpConst8
// result: (Mul8 (Const8 <t> [c]) x) // result: (Mul8 (Const8 <t> [c]) x)
for { for {
x := v.Args[0] x := v.Args[0]
v_1 := v.Args[1] v_1 := v.Args[1]
...@@ -5325,9 +5265,9 @@ func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool { ...@@ -5325,9 +5265,9 @@ func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Mul8 (Const8 [0]) _) // match: (Mul8 (Const8 [0]) _)
// cond: // cond:
// result: (Const8 [0]) // result: (Const8 [0])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst8 { if v_0.Op != OpConst8 {
...@@ -5405,9 +5345,9 @@ func rewriteValuegeneric_OpNeg64(v *Value, config *Config) bool { ...@@ -5405,9 +5345,9 @@ func rewriteValuegeneric_OpNeg64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpNeg8(v *Value, config *Config) bool { func rewriteValuegeneric_OpNeg8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neg8 (Sub8 x y)) // match: (Neg8 (Sub8 x y))
// cond: // cond:
// result: (Sub8 y x) // result: (Sub8 y x)
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpSub8 { if v_0.Op != OpSub8 {
...@@ -5683,7 +5623,7 @@ func rewriteValuegeneric_OpNeq64(v *Value, config *Config) bool { ...@@ -5683,7 +5623,7 @@ func rewriteValuegeneric_OpNeq64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool { func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neq8 x x) // match: (Neq8 x x)
// cond: // cond:
// result: (ConstBool [0]) // result: (ConstBool [0])
for { for {
...@@ -5695,7 +5635,7 @@ func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool { ...@@ -5695,7 +5635,7 @@ func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool {
v.AuxInt = 0 v.AuxInt = 0
return true return true
} }
// match: (Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) // match: (Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
// cond: // cond:
// result: (Neq8 (Const8 <t> [int64(int8(c-d))]) x) // result: (Neq8 (Const8 <t> [int64(int8(c-d))]) x)
for { for {
...@@ -5725,9 +5665,9 @@ func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool { ...@@ -5725,9 +5665,9 @@ func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Neq8 x (Const8 <t> [c])) // match: (Neq8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8 // cond: x.Op != OpConst8
// result: (Neq8 (Const8 <t> [c]) x) // result: (Neq8 (Const8 <t> [c]) x)
for { for {
x := v.Args[0] x := v.Args[0]
v_1 := v.Args[1] v_1 := v.Args[1]
...@@ -6362,9 +6302,9 @@ func rewriteValuegeneric_OpOr64(v *Value, config *Config) bool { ...@@ -6362,9 +6302,9 @@ func rewriteValuegeneric_OpOr64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool { func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Or8 x (Const8 <t> [c])) // match: (Or8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8 // cond: x.Op != OpConst8
// result: (Or8 (Const8 <t> [c]) x) // result: (Or8 (Const8 <t> [c]) x)
for { for {
x := v.Args[0] x := v.Args[0]
v_1 := v.Args[1] v_1 := v.Args[1]
...@@ -6383,7 +6323,7 @@ func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool { ...@@ -6383,7 +6323,7 @@ func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Or8 x x) // match: (Or8 x x)
// cond: // cond:
// result: x // result: x
for { for {
...@@ -6396,7 +6336,7 @@ func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool { ...@@ -6396,7 +6336,7 @@ func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Or8 (Const8 [0]) x) // match: (Or8 (Const8 [0]) x)
// cond: // cond:
// result: x // result: x
for { for {
...@@ -6413,9 +6353,9 @@ func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool { ...@@ -6413,9 +6353,9 @@ func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Or8 (Const8 [-1]) _) // match: (Or8 (Const8 [-1]) _)
// cond: // cond:
// result: (Const8 [-1]) // result: (Const8 [-1])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst8 { if v_0.Op != OpConst8 {
...@@ -6505,9 +6445,9 @@ func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool { ...@@ -6505,9 +6445,9 @@ func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool {
func rewriteValuegeneric_OpPhi(v *Value, config *Config) bool { func rewriteValuegeneric_OpPhi(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Phi (Const8 [c]) (Const8 [c])) // match: (Phi (Const8 [c]) (Const8 [c]))
// cond: // cond:
// result: (Const8 [c]) // result: (Const8 [c])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst8 { if v_0.Op != OpConst8 {
...@@ -6647,44 +6587,6 @@ func rewriteValuegeneric_OpPtrIndex(v *Value, config *Config) bool { ...@@ -6647,44 +6587,6 @@ func rewriteValuegeneric_OpPtrIndex(v *Value, config *Config) bool {
func rewriteValuegeneric_OpRsh16Ux16(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh16Ux16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh16Ux16 (Lsh16x16 (Rsh16Ux16 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
// cond: uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
// result: (Rsh16Ux16 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh16x16 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh16Ux16 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst16 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst16 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst16 {
break
}
c3 := v_1.AuxInt
if !(uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)) {
break
}
v.reset(OpRsh16Ux16)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
v0.AuxInt = int64(int16(c1 - c2 + c3))
v.AddArg(v0)
return true
}
// match: (Rsh16Ux16 <t> x (Const16 [c])) // match: (Rsh16Ux16 <t> x (Const16 [c]))
// cond: // cond:
// result: (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))])) // result: (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
...@@ -6749,21 +6651,6 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool { ...@@ -6749,21 +6651,6 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool {
v.AuxInt = int64(int16(uint16(c) >> uint64(d))) v.AuxInt = int64(int16(uint16(c) >> uint64(d)))
return true return true
} }
// match: (Rsh16Ux64 (Const16 [0]) _)
// cond:
// result: (Const16 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst16 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst16)
v.AuxInt = 0
return true
}
// match: (Rsh16Ux64 x (Const64 [0])) // match: (Rsh16Ux64 x (Const64 [0]))
// cond: // cond:
// result: x // result: x
...@@ -6781,6 +6668,21 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool { ...@@ -6781,6 +6668,21 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Rsh16Ux64 (Const64 [0]) _)
// cond:
// result: (Const16 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst16)
v.AuxInt = 0
return true
}
// match: (Rsh16Ux64 _ (Const64 [c])) // match: (Rsh16Ux64 _ (Const64 [c]))
// cond: uint64(c) >= 16 // cond: uint64(c) >= 16
// result: (Const16 [0]) // result: (Const16 [0])
...@@ -6827,12 +6729,50 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool { ...@@ -6827,12 +6729,50 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3]))
// cond: uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)
// result: (Rsh16Ux64 x (Const16 <config.fe.TypeUInt16()> [int64(int16(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh16x64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh16Ux64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst16 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst16 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst16 {
break
}
c3 := v_1.AuxInt
if !(uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2)) {
break
}
v.reset(OpRsh16Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
v0.AuxInt = int64(int16(c1 - c2 + c3))
v.AddArg(v0)
return true
}
return false return false
} }
func rewriteValuegeneric_OpRsh16Ux8(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh16Ux8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh16Ux8 <t> x (Const8 [c])) // match: (Rsh16Ux8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))])) // result: (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -6919,36 +6859,36 @@ func rewriteValuegeneric_OpRsh16x64(v *Value, config *Config) bool { ...@@ -6919,36 +6859,36 @@ func rewriteValuegeneric_OpRsh16x64(v *Value, config *Config) bool {
v.AuxInt = int64(int16(c) >> uint64(d)) v.AuxInt = int64(int16(c) >> uint64(d))
return true return true
} }
// match: (Rsh16x64 (Const16 [0]) _) // match: (Rsh16x64 x (Const64 [0]))
// cond: // cond:
// result: (Const16 [0]) // result: x
for { for {
v_0 := v.Args[0] x := v.Args[0]
if v_0.Op != OpConst16 { v_1 := v.Args[1]
if v_1.Op != OpConst64 {
break break
} }
if v_0.AuxInt != 0 { if v_1.AuxInt != 0 {
break break
} }
v.reset(OpConst16) v.reset(OpCopy)
v.AuxInt = 0 v.Type = x.Type
v.AddArg(x)
return true return true
} }
// match: (Rsh16x64 x (Const64 [0])) // match: (Rsh16x64 (Const64 [0]) _)
// cond: // cond:
// result: x // result: (Const16 [0])
for { for {
x := v.Args[0] v_0 := v.Args[0]
v_1 := v.Args[1] if v_0.Op != OpConst64 {
if v_1.Op != OpConst64 {
break break
} }
if v_1.AuxInt != 0 { if v_0.AuxInt != 0 {
break break
} }
v.reset(OpCopy) v.reset(OpConst16)
v.Type = x.Type v.AuxInt = 0
v.AddArg(x)
return true return true
} }
// match: (Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d])) // match: (Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d]))
...@@ -6986,7 +6926,7 @@ func rewriteValuegeneric_OpRsh16x64(v *Value, config *Config) bool { ...@@ -6986,7 +6926,7 @@ func rewriteValuegeneric_OpRsh16x64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpRsh16x8(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh16x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh16x8 <t> x (Const8 [c])) // match: (Rsh16x8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Rsh16x64 x (Const64 <t> [int64(uint8(c))])) // result: (Rsh16x64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -7032,44 +6972,6 @@ func rewriteValuegeneric_OpRsh32Ux16(v *Value, config *Config) bool { ...@@ -7032,44 +6972,6 @@ func rewriteValuegeneric_OpRsh32Ux16(v *Value, config *Config) bool {
func rewriteValuegeneric_OpRsh32Ux32(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh32Ux32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh32Ux32 (Lsh32x32 (Rsh32Ux32 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
// cond: uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
// result: (Rsh32Ux32 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x32 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh32Ux32 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst32 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst32 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst32 {
break
}
c3 := v_1.AuxInt
if !(uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)) {
break
}
v.reset(OpRsh32Ux32)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
v0.AuxInt = int64(int32(c1 - c2 + c3))
v.AddArg(v0)
return true
}
// match: (Rsh32Ux32 <t> x (Const32 [c])) // match: (Rsh32Ux32 <t> x (Const32 [c]))
// cond: // cond:
// result: (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))])) // result: (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
...@@ -7111,21 +7013,6 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool { ...@@ -7111,21 +7013,6 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool {
v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AuxInt = int64(int32(uint32(c) >> uint64(d)))
return true return true
} }
// match: (Rsh32Ux64 (Const32 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Rsh32Ux64 x (Const64 [0])) // match: (Rsh32Ux64 x (Const64 [0]))
// cond: // cond:
// result: x // result: x
...@@ -7143,6 +7030,21 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool { ...@@ -7143,6 +7030,21 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Rsh32Ux64 (Const64 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Rsh32Ux64 _ (Const64 [c])) // match: (Rsh32Ux64 _ (Const64 [c]))
// cond: uint64(c) >= 32 // cond: uint64(c) >= 32
// result: (Const32 [0]) // result: (Const32 [0])
...@@ -7189,12 +7091,50 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool { ...@@ -7189,12 +7091,50 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const32 [c1])) (Const32 [c2])) (Const32 [c3]))
// cond: uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)
// result: (Rsh32Ux64 x (Const32 <config.fe.TypeUInt32()> [int64(int32(c1-c2+c3))]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh32Ux64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst32 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst32 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst32 {
break
}
c3 := v_1.AuxInt
if !(uint32(c1) >= uint32(c2) && uint32(c3) >= uint32(c2)) {
break
}
v.reset(OpRsh32Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
v0.AuxInt = int64(int32(c1 - c2 + c3))
v.AddArg(v0)
return true
}
return false return false
} }
func rewriteValuegeneric_OpRsh32Ux8(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh32Ux8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh32Ux8 <t> x (Const8 [c])) // match: (Rsh32Ux8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))])) // result: (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -7281,21 +7221,6 @@ func rewriteValuegeneric_OpRsh32x64(v *Value, config *Config) bool { ...@@ -7281,21 +7221,6 @@ func rewriteValuegeneric_OpRsh32x64(v *Value, config *Config) bool {
v.AuxInt = int64(int32(c) >> uint64(d)) v.AuxInt = int64(int32(c) >> uint64(d))
return true return true
} }
// match: (Rsh32x64 (Const32 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Rsh32x64 x (Const64 [0])) // match: (Rsh32x64 x (Const64 [0]))
// cond: // cond:
// result: x // result: x
...@@ -7313,6 +7238,21 @@ func rewriteValuegeneric_OpRsh32x64(v *Value, config *Config) bool { ...@@ -7313,6 +7238,21 @@ func rewriteValuegeneric_OpRsh32x64(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Rsh32x64 (Const64 [0]) _)
// cond:
// result: (Const32 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst32)
v.AuxInt = 0
return true
}
// match: (Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d])) // match: (Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d]))
// cond: !uaddOvf(c,d) // cond: !uaddOvf(c,d)
// result: (Rsh32x64 x (Const64 <t> [c+d])) // result: (Rsh32x64 x (Const64 <t> [c+d]))
...@@ -7348,7 +7288,7 @@ func rewriteValuegeneric_OpRsh32x64(v *Value, config *Config) bool { ...@@ -7348,7 +7288,7 @@ func rewriteValuegeneric_OpRsh32x64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpRsh32x8(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh32x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh32x8 <t> x (Const8 [c])) // match: (Rsh32x8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Rsh32x64 x (Const64 <t> [int64(uint8(c))])) // result: (Rsh32x64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -7389,21 +7329,6 @@ func rewriteValuegeneric_OpRsh64Ux16(v *Value, config *Config) bool { ...@@ -7389,21 +7329,6 @@ func rewriteValuegeneric_OpRsh64Ux16(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Rsh64Ux16 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false return false
} }
func rewriteValuegeneric_OpRsh64Ux32(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh64Ux32(v *Value, config *Config) bool {
...@@ -7427,21 +7352,6 @@ func rewriteValuegeneric_OpRsh64Ux32(v *Value, config *Config) bool { ...@@ -7427,21 +7352,6 @@ func rewriteValuegeneric_OpRsh64Ux32(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Rsh64Ux32 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false return false
} }
func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool {
...@@ -7465,59 +7375,6 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool { ...@@ -7465,59 +7375,6 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool {
v.AuxInt = int64(uint64(c) >> uint64(d)) v.AuxInt = int64(uint64(c) >> uint64(d))
return true return true
} }
// match: (Rsh64Ux64 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
// match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
// result: (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh64Ux64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst64 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst64 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst64 {
break
}
c3 := v_1.AuxInt
if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)) {
break
}
v.reset(OpRsh64Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
// match: (Rsh64Ux64 x (Const64 [0])) // match: (Rsh64Ux64 x (Const64 [0]))
// cond: // cond:
// result: x // result: x
...@@ -7596,12 +7453,50 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool { ...@@ -7596,12 +7453,50 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)
// result: (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh64Ux64 {
break
}
x := v_0_0.Args[0]
v_0_0_1 := v_0_0.Args[1]
if v_0_0_1.Op != OpConst64 {
break
}
c1 := v_0_0_1.AuxInt
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpConst64 {
break
}
c2 := v_0_1.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpConst64 {
break
}
c3 := v_1.AuxInt
if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2)) {
break
}
v.reset(OpRsh64Ux64)
v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
return false return false
} }
func rewriteValuegeneric_OpRsh64Ux8(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh64Ux8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh64Ux8 <t> x (Const8 [c])) // match: (Rsh64Ux8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))])) // result: (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -7619,21 +7514,6 @@ func rewriteValuegeneric_OpRsh64Ux8(v *Value, config *Config) bool { ...@@ -7619,21 +7514,6 @@ func rewriteValuegeneric_OpRsh64Ux8(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Rsh64Ux8 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false return false
} }
func rewriteValuegeneric_OpRsh64x16(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh64x16(v *Value, config *Config) bool {
...@@ -7657,21 +7537,6 @@ func rewriteValuegeneric_OpRsh64x16(v *Value, config *Config) bool { ...@@ -7657,21 +7537,6 @@ func rewriteValuegeneric_OpRsh64x16(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Rsh64x16 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false return false
} }
func rewriteValuegeneric_OpRsh64x32(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh64x32(v *Value, config *Config) bool {
...@@ -7695,21 +7560,6 @@ func rewriteValuegeneric_OpRsh64x32(v *Value, config *Config) bool { ...@@ -7695,21 +7560,6 @@ func rewriteValuegeneric_OpRsh64x32(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Rsh64x32 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false return false
} }
func rewriteValuegeneric_OpRsh64x64(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh64x64(v *Value, config *Config) bool {
...@@ -7733,21 +7583,6 @@ func rewriteValuegeneric_OpRsh64x64(v *Value, config *Config) bool { ...@@ -7733,21 +7583,6 @@ func rewriteValuegeneric_OpRsh64x64(v *Value, config *Config) bool {
v.AuxInt = c >> uint64(d) v.AuxInt = c >> uint64(d)
return true return true
} }
// match: (Rsh64x64 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
// match: (Rsh64x64 x (Const64 [0])) // match: (Rsh64x64 x (Const64 [0]))
// cond: // cond:
// result: x // result: x
...@@ -7815,7 +7650,7 @@ func rewriteValuegeneric_OpRsh64x64(v *Value, config *Config) bool { ...@@ -7815,7 +7650,7 @@ func rewriteValuegeneric_OpRsh64x64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpRsh64x8(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh64x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh64x8 <t> x (Const8 [c])) // match: (Rsh64x8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Rsh64x64 x (Const64 <t> [int64(uint8(c))])) // result: (Rsh64x64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -7833,21 +7668,6 @@ func rewriteValuegeneric_OpRsh64x8(v *Value, config *Config) bool { ...@@ -7833,21 +7668,6 @@ func rewriteValuegeneric_OpRsh64x8(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Rsh64x8 (Const64 [0]) _)
// cond:
// result: (Const64 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst64)
v.AuxInt = 0
return true
}
return false return false
} }
func rewriteValuegeneric_OpRsh8Ux16(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh8Ux16(v *Value, config *Config) bool {
...@@ -7917,21 +7737,6 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool { ...@@ -7917,21 +7737,6 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool {
v.AuxInt = int64(int8(uint8(c) >> uint64(d))) v.AuxInt = int64(int8(uint8(c) >> uint64(d)))
return true return true
} }
// match: (Rsh8Ux64 (Const8 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst8 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Rsh8Ux64 x (Const64 [0])) // match: (Rsh8Ux64 x (Const64 [0]))
// cond: // cond:
// result: x // result: x
...@@ -7949,9 +7754,24 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool { ...@@ -7949,9 +7754,24 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Rsh8Ux64 (Const64 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Rsh8Ux64 _ (Const64 [c])) // match: (Rsh8Ux64 _ (Const64 [c]))
// cond: uint64(c) >= 8 // cond: uint64(c) >= 8
// result: (Const8 [0]) // result: (Const8 [0])
for { for {
v_1 := v.Args[1] v_1 := v.Args[1]
if v_1.Op != OpConst64 { if v_1.Op != OpConst64 {
...@@ -7995,21 +7815,16 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool { ...@@ -7995,21 +7815,16 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool {
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
return false // match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
}
func rewriteValuegeneric_OpRsh8Ux8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8Ux8 (Lsh8x8 (Rsh8Ux8 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3]))
// cond: uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2) // cond: uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)
// result: (Rsh8Ux8 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))])) // result: (Rsh8Ux64 x (Const8 <config.fe.TypeUInt8()> [int64(int8(c1-c2+c3))]))
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpLsh8x8 { if v_0.Op != OpLsh8x64 {
break break
} }
v_0_0 := v_0.Args[0] v_0_0 := v_0.Args[0]
if v_0_0.Op != OpRsh8Ux8 { if v_0_0.Op != OpRsh8Ux64 {
break break
} }
x := v_0_0.Args[0] x := v_0_0.Args[0]
...@@ -8031,14 +7846,19 @@ func rewriteValuegeneric_OpRsh8Ux8(v *Value, config *Config) bool { ...@@ -8031,14 +7846,19 @@ func rewriteValuegeneric_OpRsh8Ux8(v *Value, config *Config) bool {
if !(uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)) { if !(uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2)) {
break break
} }
v.reset(OpRsh8Ux8) v.reset(OpRsh8Ux64)
v.AddArg(x) v.AddArg(x)
v0 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8()) v0 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
v0.AuxInt = int64(int8(c1 - c2 + c3)) v0.AuxInt = int64(int8(c1 - c2 + c3))
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
// match: (Rsh8Ux8 <t> x (Const8 [c])) return false
}
func rewriteValuegeneric_OpRsh8Ux8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Rsh8Ux8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))])) // result: (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -8125,21 +7945,6 @@ func rewriteValuegeneric_OpRsh8x64(v *Value, config *Config) bool { ...@@ -8125,21 +7945,6 @@ func rewriteValuegeneric_OpRsh8x64(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c) >> uint64(d)) v.AuxInt = int64(int8(c) >> uint64(d))
return true return true
} }
// match: (Rsh8x64 (Const8 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst8 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Rsh8x64 x (Const64 [0])) // match: (Rsh8x64 x (Const64 [0]))
// cond: // cond:
// result: x // result: x
...@@ -8157,6 +7962,21 @@ func rewriteValuegeneric_OpRsh8x64(v *Value, config *Config) bool { ...@@ -8157,6 +7962,21 @@ func rewriteValuegeneric_OpRsh8x64(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Rsh8x64 (Const64 [0]) _)
// cond:
// result: (Const8 [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
break
}
if v_0.AuxInt != 0 {
break
}
v.reset(OpConst8)
v.AuxInt = 0
return true
}
// match: (Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d])) // match: (Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d]))
// cond: !uaddOvf(c,d) // cond: !uaddOvf(c,d)
// result: (Rsh8x64 x (Const64 <t> [c+d])) // result: (Rsh8x64 x (Const64 <t> [c+d]))
...@@ -8192,7 +8012,7 @@ func rewriteValuegeneric_OpRsh8x64(v *Value, config *Config) bool { ...@@ -8192,7 +8012,7 @@ func rewriteValuegeneric_OpRsh8x64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpRsh8x8(v *Value, config *Config) bool { func rewriteValuegeneric_OpRsh8x8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Rsh8x8 <t> x (Const8 [c])) // match: (Rsh8x8 <t> x (Const8 [c]))
// cond: // cond:
// result: (Rsh8x64 x (Const64 <t> [int64(uint8(c))])) // result: (Rsh8x64 x (Const64 <t> [int64(uint8(c))]))
for { for {
...@@ -8791,7 +8611,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value, config *Config) bool { ...@@ -8791,7 +8611,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value, config *Config) bool {
func rewriteValuegeneric_OpSub16(v *Value, config *Config) bool { func rewriteValuegeneric_OpSub16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Sub16 (Const16 [c]) (Const16 [d])) // match: (Sub16 (Const16 [c]) (Const16 [d]))
// cond: // cond:
// result: (Const16 [int64(int16(c-d))]) // result: (Const16 [int64(int16(c-d))])
for { for {
...@@ -8883,7 +8703,7 @@ func rewriteValuegeneric_OpSub16(v *Value, config *Config) bool { ...@@ -8883,7 +8703,7 @@ func rewriteValuegeneric_OpSub16(v *Value, config *Config) bool {
func rewriteValuegeneric_OpSub32(v *Value, config *Config) bool { func rewriteValuegeneric_OpSub32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Sub32 (Const32 [c]) (Const32 [d])) // match: (Sub32 (Const32 [c]) (Const32 [d]))
// cond: // cond:
// result: (Const32 [int64(int32(c-d))]) // result: (Const32 [int64(int32(c-d))])
for { for {
...@@ -8998,7 +8818,7 @@ func rewriteValuegeneric_OpSub32F(v *Value, config *Config) bool { ...@@ -8998,7 +8818,7 @@ func rewriteValuegeneric_OpSub32F(v *Value, config *Config) bool {
func rewriteValuegeneric_OpSub64(v *Value, config *Config) bool { func rewriteValuegeneric_OpSub64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Sub64 (Const64 [c]) (Const64 [d])) // match: (Sub64 (Const64 [c]) (Const64 [d]))
// cond: // cond:
// result: (Const64 [c-d]) // result: (Const64 [c-d])
for { for {
...@@ -9113,7 +8933,7 @@ func rewriteValuegeneric_OpSub64F(v *Value, config *Config) bool { ...@@ -9113,7 +8933,7 @@ func rewriteValuegeneric_OpSub64F(v *Value, config *Config) bool {
func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool { func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Sub8 (Const8 [c]) (Const8 [d])) // match: (Sub8 (Const8 [c]) (Const8 [d]))
// cond: // cond:
// result: (Const8 [int64(int8(c-d))]) // result: (Const8 [int64(int8(c-d))])
for { for {
...@@ -9131,9 +8951,9 @@ func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool { ...@@ -9131,9 +8951,9 @@ func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c - d)) v.AuxInt = int64(int8(c - d))
return true return true
} }
// match: (Sub8 x (Const8 <t> [c])) // match: (Sub8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8 // cond: x.Op != OpConst8
// result: (Add8 (Const8 <t> [int64(int8(-c))]) x) // result: (Add8 (Const8 <t> [int64(int8(-c))]) x)
for { for {
x := v.Args[0] x := v.Args[0]
v_1 := v.Args[1] v_1 := v.Args[1]
...@@ -9152,9 +8972,9 @@ func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool { ...@@ -9152,9 +8972,9 @@ func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Sub8 x x) // match: (Sub8 x x)
// cond: // cond:
// result: (Const8 [0]) // result: (Const8 [0])
for { for {
x := v.Args[0] x := v.Args[0]
if x != v.Args[1] { if x != v.Args[1] {
...@@ -9164,7 +8984,7 @@ func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool { ...@@ -9164,7 +8984,7 @@ func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool {
v.AuxInt = 0 v.AuxInt = 0
return true return true
} }
// match: (Sub8 (Add8 x y) x) // match: (Sub8 (Add8 x y) x)
// cond: // cond:
// result: y // result: y
for { for {
...@@ -9182,7 +9002,7 @@ func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool { ...@@ -9182,7 +9002,7 @@ func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool {
v.AddArg(y) v.AddArg(y)
return true return true
} }
// match: (Sub8 (Add8 x y) y) // match: (Sub8 (Add8 x y) y)
// cond: // cond:
// result: x // result: x
for { for {
...@@ -9205,9 +9025,9 @@ func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool { ...@@ -9205,9 +9025,9 @@ func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool {
func rewriteValuegeneric_OpTrunc16to8(v *Value, config *Config) bool { func rewriteValuegeneric_OpTrunc16to8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Trunc16to8 (Const16 [c])) // match: (Trunc16to8 (Const16 [c]))
// cond: // cond:
// result: (Const8 [int64(int8(c))]) // result: (Const8 [int64(int8(c))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst16 { if v_0.Op != OpConst16 {
...@@ -9218,7 +9038,7 @@ func rewriteValuegeneric_OpTrunc16to8(v *Value, config *Config) bool { ...@@ -9218,7 +9038,7 @@ func rewriteValuegeneric_OpTrunc16to8(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c)) v.AuxInt = int64(int8(c))
return true return true
} }
// match: (Trunc16to8 (And16 (Const16 [y]) x)) // match: (Trunc16to8 (And16 (Const16 [y]) x))
// cond: y&0xFF == 0xFF // cond: y&0xFF == 0xFF
// result: (Trunc16to8 x) // result: (Trunc16to8 x)
for { for {
...@@ -9246,7 +9066,7 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value, config *Config) bool { ...@@ -9246,7 +9066,7 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value, config *Config) bool {
_ = b _ = b
// match: (Trunc32to16 (Const32 [c])) // match: (Trunc32to16 (Const32 [c]))
// cond: // cond:
// result: (Const16 [int64(int16(c))]) // result: (Const16 [int64(int16(c))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst32 { if v_0.Op != OpConst32 {
...@@ -9283,9 +9103,9 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value, config *Config) bool { ...@@ -9283,9 +9103,9 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value, config *Config) bool {
func rewriteValuegeneric_OpTrunc32to8(v *Value, config *Config) bool { func rewriteValuegeneric_OpTrunc32to8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Trunc32to8 (Const32 [c])) // match: (Trunc32to8 (Const32 [c]))
// cond: // cond:
// result: (Const8 [int64(int8(c))]) // result: (Const8 [int64(int8(c))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst32 { if v_0.Op != OpConst32 {
...@@ -9296,7 +9116,7 @@ func rewriteValuegeneric_OpTrunc32to8(v *Value, config *Config) bool { ...@@ -9296,7 +9116,7 @@ func rewriteValuegeneric_OpTrunc32to8(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c)) v.AuxInt = int64(int8(c))
return true return true
} }
// match: (Trunc32to8 (And32 (Const32 [y]) x)) // match: (Trunc32to8 (And32 (Const32 [y]) x))
// cond: y&0xFF == 0xFF // cond: y&0xFF == 0xFF
// result: (Trunc32to8 x) // result: (Trunc32to8 x)
for { for {
...@@ -9324,7 +9144,7 @@ func rewriteValuegeneric_OpTrunc64to16(v *Value, config *Config) bool { ...@@ -9324,7 +9144,7 @@ func rewriteValuegeneric_OpTrunc64to16(v *Value, config *Config) bool {
_ = b _ = b
// match: (Trunc64to16 (Const64 [c])) // match: (Trunc64to16 (Const64 [c]))
// cond: // cond:
// result: (Const16 [int64(int16(c))]) // result: (Const16 [int64(int16(c))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst64 { if v_0.Op != OpConst64 {
...@@ -9363,7 +9183,7 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value, config *Config) bool { ...@@ -9363,7 +9183,7 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value, config *Config) bool {
_ = b _ = b
// match: (Trunc64to32 (Const64 [c])) // match: (Trunc64to32 (Const64 [c]))
// cond: // cond:
// result: (Const32 [int64(int32(c))]) // result: (Const32 [int64(int32(c))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst64 { if v_0.Op != OpConst64 {
...@@ -9400,9 +9220,9 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value, config *Config) bool { ...@@ -9400,9 +9220,9 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value, config *Config) bool {
func rewriteValuegeneric_OpTrunc64to8(v *Value, config *Config) bool { func rewriteValuegeneric_OpTrunc64to8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Trunc64to8 (Const64 [c])) // match: (Trunc64to8 (Const64 [c]))
// cond: // cond:
// result: (Const8 [int64(int8(c))]) // result: (Const8 [int64(int8(c))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpConst64 { if v_0.Op != OpConst64 {
...@@ -9413,7 +9233,7 @@ func rewriteValuegeneric_OpTrunc64to8(v *Value, config *Config) bool { ...@@ -9413,7 +9233,7 @@ func rewriteValuegeneric_OpTrunc64to8(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c)) v.AuxInt = int64(int8(c))
return true return true
} }
// match: (Trunc64to8 (And64 (Const64 [y]) x)) // match: (Trunc64to8 (And64 (Const64 [y]) x))
// cond: y&0xFF == 0xFF // cond: y&0xFF == 0xFF
// result: (Trunc64to8 x) // result: (Trunc64to8 x)
for { for {
...@@ -9820,9 +9640,9 @@ func rewriteValuegeneric_OpXor64(v *Value, config *Config) bool { ...@@ -9820,9 +9640,9 @@ func rewriteValuegeneric_OpXor64(v *Value, config *Config) bool {
func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool { func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Xor8 x (Const8 <t> [c])) // match: (Xor8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8 // cond: x.Op != OpConst8
// result: (Xor8 (Const8 <t> [c]) x) // result: (Xor8 (Const8 <t> [c]) x)
for { for {
x := v.Args[0] x := v.Args[0]
v_1 := v.Args[1] v_1 := v.Args[1]
...@@ -9841,9 +9661,9 @@ func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool { ...@@ -9841,9 +9661,9 @@ func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (Xor8 x x) // match: (Xor8 x x)
// cond: // cond:
// result: (Const8 [0]) // result: (Const8 [0])
for { for {
x := v.Args[0] x := v.Args[0]
if x != v.Args[1] { if x != v.Args[1] {
...@@ -9853,7 +9673,7 @@ func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool { ...@@ -9853,7 +9673,7 @@ func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool {
v.AuxInt = 0 v.AuxInt = 0
return true return true
} }
// match: (Xor8 (Const8 [0]) x) // match: (Xor8 (Const8 [0]) x)
// cond: // cond:
// result: x // result: x
for { for {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment