From 1ec78d1dd1bbf2605b0723952a8d30b53ae9f279 Mon Sep 17 00:00:00 2001 From: Ben Shi <powerman1st@163.com> Date: Mon, 2 Oct 2017 03:09:28 +0000 Subject: [PATCH] cmd/compile: optimize ARM code with CMN/TST/TEQ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CMN/TST/TEQ were supported since ARMv4, which can be used to simplify comparisons. This patch implements the optimization and here are the benchmark results. 1. A special test case got 18.21% improvement. name old time/op new time/op delta TSTTEQ-4 806碌s 卤 1% 659碌s 卤 0% -18.21% (p=0.000 n=20+18) (https://github.com/benshi001/ugo1/blob/master/tstteq_test.go) 2. There is no regression in the compilecmp benchmark. name old time/op new time/op delta Template 2.31s 卤 1% 2.30s 卤 1% ~ (p=0.661 n=10+9) Unicode 1.32s 卤 3% 1.32s 卤 5% ~ (p=0.280 n=10+10) GoTypes 7.69s 卤 1% 7.65s 卤 0% -0.52% (p=0.027 n=10+8) Compiler 36.5s 卤 1% 36.4s 卤 1% ~ (p=0.546 n=9+9) SSA 85.1s 卤 2% 84.9s 卤 1% ~ (p=0.529 n=10+10) Flate 1.43s 卤 2% 1.43s 卤 2% ~ (p=0.661 n=10+9) GoParser 1.81s 卤 2% 1.81s 卤 1% ~ (p=0.796 n=10+10) Reflect 5.10s 卤 2% 5.09s 卤 1% ~ (p=0.853 n=10+10) Tar 2.47s 卤 1% 2.48s 卤 1% ~ (p=0.123 n=10+10) XML 2.59s 卤 1% 2.58s 卤 1% ~ (p=0.853 n=10+10) [Geo mean] 4.78s 4.77s -0.17% name old user-time/op new user-time/op delta Template 2.72s 卤 3% 2.73s 卤 2% ~ (p=0.928 n=10+10) Unicode 1.58s 卤 4% 1.60s 卤 1% ~ (p=0.087 n=10+9) GoTypes 9.41s 卤 2% 9.36s 卤 1% ~ (p=0.060 n=10+10) Compiler 44.4s 卤 2% 44.2s 卤 2% ~ (p=0.289 n=10+10) SSA 110s 卤 2% 110s 卤 1% ~ (p=0.739 n=10+10) Flate 1.67s 卤 2% 1.63s 卤 3% ~ (p=0.063 n=10+10) GoParser 2.12s 卤 1% 2.12s 卤 2% ~ (p=0.840 n=10+10) Reflect 5.94s 卤 1% 5.98s 卤 1% ~ (p=0.063 n=9+10) Tar 3.01s 卤 2% 3.02s 卤 2% ~ (p=0.584 n=10+10) XML 3.04s 卤 3% 3.02s 卤 2% ~ (p=0.696 n=10+10) [Geo mean] 5.73s 5.72s -0.20% name old text-bytes new text-bytes delta HelloSize 579kB 卤 0% 579kB 卤 0% ~ (all equal) name old data-bytes new data-bytes delta HelloSize 5.46kB 卤 0% 5.46kB 卤 0% ~ (all equal) name old bss-bytes new bss-bytes delta HelloSize 72.8kB 卤 0% 72.8kB 卤 0% ~ (all equal) name old exe-bytes new exe-bytes delta HelloSize 1.03MB 卤 0% 1.03MB 卤 0% ~ (all equal) 3. There is little change in the go1 benchmark (excluding the noise). name old time/op new time/op delta BinaryTree17-4 40.3s 卤 1% 40.6s 卤 1% +0.80% (p=0.000 n=30+30) Fannkuch11-4 24.2s 卤 1% 24.1s 卤 0% ~ (p=0.093 n=30+30) FmtFprintfEmpty-4 834ns 卤 0% 826ns 卤 0% -0.93% (p=0.000 n=29+24) FmtFprintfString-4 1.39碌s 卤 1% 1.36碌s 卤 0% -2.02% (p=0.000 n=30+30) FmtFprintfInt-4 1.43碌s 卤 1% 1.44碌s 卤 1% ~ (p=0.155 n=30+29) FmtFprintfIntInt-4 2.09碌s 卤 0% 2.11碌s 卤 0% +1.16% (p=0.000 n=28+30) FmtFprintfPrefixedInt-4 2.33碌s 卤 1% 2.36碌s 卤 0% +1.25% (p=0.000 n=30+30) FmtFprintfFloat-4 4.27碌s 卤 1% 4.32碌s 卤 1% +1.27% (p=0.000 n=30+30) FmtManyArgs-4 8.18碌s 卤 0% 8.14碌s 卤 0% -0.46% (p=0.000 n=25+27) GobDecode-4 101ms 卤 1% 101ms 卤 1% ~ (p=0.182 n=29+29) GobEncode-4 89.6ms 卤 1% 87.8ms 卤 2% -2.02% (p=0.000 n=30+29) Gzip-4 4.07s 卤 1% 4.08s 卤 1% ~ (p=0.173 n=30+27) Gunzip-4 602ms 卤 1% 600ms 卤 1% -0.29% (p=0.000 n=29+28) HTTPClientServer-4 679碌s 卤 4% 683碌s 卤 3% ~ (p=0.197 n=30+30) JSONEncode-4 241ms 卤 1% 239ms 卤 1% -0.84% (p=0.000 n=30+30) JSONDecode-4 903ms 卤 1% 882ms 卤 1% -2.33% (p=0.000 n=30+30) Mandelbrot200-4 41.8ms 卤 0% 41.8ms 卤 0% ~ (p=0.719 n=30+30) GoParse-4 45.5ms 卤 1% 45.8ms 卤 1% +0.52% (p=0.000 n=30+30) RegexpMatchEasy0_32-4 1.27碌s 卤 1% 1.27碌s 卤 0% -0.60% (p=0.000 n=30+30) RegexpMatchEasy0_1K-4 7.77碌s 卤 6% 7.69碌s 卤 4% -0.96% (p=0.040 n=30+30) RegexpMatchEasy1_32-4 1.29碌s 卤 1% 1.28碌s 卤 1% -0.54% (p=0.000 n=30+30) RegexpMatchEasy1_1K-4 10.3碌s 卤 6% 10.2碌s 卤 3% ~ (p=0.453 n=30+27) RegexpMatchMedium_32-4 1.98碌s 卤 1% 2.00碌s 卤 1% +0.85% (p=0.000 n=30+29) RegexpMatchMedium_1K-4 503碌s 卤 0% 503碌s 卤 1% ~ (p=0.752 n=30+30) RegexpMatchHard_32-4 27.1碌s 卤 1% 26.5碌s 卤 0% -1.96% (p=0.000 n=30+24) RegexpMatchHard_1K-4 809碌s 卤 1% 799碌s 卤 1% -1.29% (p=0.000 n=29+30) Revcomp-4 67.3ms 卤 2% 67.2ms 卤 1% ~ (p=0.265 n=29+29) Template-4 1.08s 卤 1% 1.07s 卤 0% -1.39% (p=0.000 n=30+22) TimeParse-4 6.93碌s 卤 1% 6.96碌s 卤 1% +0.40% (p=0.005 n=30+30) TimeFormat-4 13.3碌s 卤 0% 13.3碌s 卤 1% ~ (p=0.734 n=30+30) [Geo mean] 709碌s 707碌s -0.32% name old speed new speed delta GobDecode-4 7.59MB/s 卤 1% 7.57MB/s 卤 1% ~ (p=0.145 n=29+29) GobEncode-4 8.56MB/s 卤 1% 8.74MB/s 卤 1% +2.07% (p=0.000 n=30+29) Gzip-4 4.76MB/s 卤 1% 4.75MB/s 卤 1% -0.25% (p=0.037 n=30+30) Gunzip-4 32.2MB/s 卤 1% 32.3MB/s 卤 1% +0.29% (p=0.000 n=29+28) JSONEncode-4 8.04MB/s 卤 1% 8.11MB/s 卤 1% +0.85% (p=0.000 n=30+30) JSONDecode-4 2.15MB/s 卤 1% 2.20MB/s 卤 1% +2.29% (p=0.000 n=30+30) GoParse-4 1.27MB/s 卤 1% 1.26MB/s 卤 1% -0.73% (p=0.000 n=30+30) RegexpMatchEasy0_32-4 25.1MB/s 卤 1% 25.3MB/s 卤 0% +0.61% (p=0.000 n=30+30) RegexpMatchEasy0_1K-4 131MB/s 卤 6% 133MB/s 卤 4% +1.35% (p=0.009 n=28+30) RegexpMatchEasy1_32-4 24.9MB/s 卤 1% 25.0MB/s 卤 1% +0.54% (p=0.000 n=30+30) RegexpMatchEasy1_1K-4 99.2MB/s 卤 6% 100.2MB/s 卤 3% ~ (p=0.448 n=30+27) RegexpMatchMedium_32-4 503kB/s 卤 1% 500kB/s 卤 0% -0.66% (p=0.002 n=30+24) RegexpMatchMedium_1K-4 2.04MB/s 卤 0% 2.04MB/s 卤 1% ~ (p=0.358 n=30+30) RegexpMatchHard_32-4 1.18MB/s 卤 1% 1.20MB/s 卤 1% +1.75% (p=0.000 n=30+30) RegexpMatchHard_1K-4 1.26MB/s 卤 1% 1.28MB/s 卤 1% +1.42% (p=0.000 n=30+30) Revcomp-4 37.8MB/s 卤 2% 37.8MB/s 卤 1% ~ (p=0.266 n=29+29) Template-4 1.80MB/s 卤 1% 1.82MB/s 卤 1% +1.46% (p=0.000 n=30+30) [Geo mean] 6.91MB/s 6.96MB/s +0.70% fixes #21583 Change-Id: I24065a80588ccae7de3ad732a3cfb0026cf7e214 Reviewed-on: https://go-review.googlesource.com/67490 Reviewed-by: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> --- src/cmd/compile/internal/arm/ssa.go | 12 +- src/cmd/compile/internal/ssa/gen/ARM.rules | 136 + src/cmd/compile/internal/ssa/gen/ARMOps.go | 20 +- src/cmd/compile/internal/ssa/opGen.go | 241 +- src/cmd/compile/internal/ssa/rewriteARM.go | 14155 ++++++++++++------- 5 files changed, 9270 insertions(+), 5294 deletions(-) diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index 4188775329..aee9ff3014 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -473,17 +473,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() - case ssa.OpARMCMPshiftLL: + case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL: genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt) - case ssa.OpARMCMPshiftRL: + case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL: genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt) - case ssa.OpARMCMPshiftRA: + case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA: genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt) - case ssa.OpARMCMPshiftLLreg: + case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg: genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL) - case ssa.OpARMCMPshiftRLreg: + case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg: genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR) - case ssa.OpARMCMPshiftRAreg: + case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg: genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR) case ssa.OpARMMOVWaddr: p := s.Prog(arm.AMOVW) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 3378e4db8a..a5e440336a 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -531,6 +531,9 @@ (CMP x (MOVWconst [c])) -> (CMPconst [c] x) (CMP (MOVWconst [c]) x) -> (InvertFlags (CMPconst [c] x)) +(CMN x (MOVWconst [c])) -> (CMNconst [c] x) +(TST x (MOVWconst [c])) -> (TSTconst [c] x) +(TEQ x (MOVWconst [c])) -> (TEQconst [c] x) // don't extend after proper load // MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type. @@ -637,6 +640,17 @@ (CMPconst (MOVWconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT) (CMPconst (MOVWconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT) (CMPconst (MOVWconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT) +(CMNconst (MOVWconst [x]) [y]) && int32(x)==int32(-y) -> (FlagEQ) +(CMNconst (MOVWconst [x]) [y]) && int32(x)<int32(-y) && uint32(x)<uint32(-y) -> (FlagLT_ULT) +(CMNconst (MOVWconst [x]) [y]) && int32(x)<int32(-y) && uint32(x)>uint32(-y) -> (FlagLT_UGT) +(CMNconst (MOVWconst [x]) [y]) && int32(x)>int32(-y) && uint32(x)<uint32(-y) -> (FlagGT_ULT) +(CMNconst (MOVWconst [x]) [y]) && int32(x)>int32(-y) && uint32(x)>uint32(-y) -> (FlagGT_UGT) +(TSTconst (MOVWconst [x]) [y]) && int32(x&y)==0 -> (FlagEQ) +(TSTconst (MOVWconst [x]) [y]) && int32(x&y)<0 -> (FlagLT_UGT) +(TSTconst (MOVWconst [x]) [y]) && int32(x&y)>0 -> (FlagGT_UGT) +(TEQconst (MOVWconst [x]) [y]) && int32(x^y)==0 -> (FlagEQ) +(TEQconst (MOVWconst [x]) [y]) && int32(x^y)<0 -> (FlagLT_UGT) +(TEQconst (MOVWconst [x]) [y]) && int32(x^y)>0 -> (FlagGT_UGT) // other known comparisons (CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT_ULT) @@ -989,6 +1003,24 @@ (CMP (SRL y z) x) -> (InvertFlags (CMPshiftRLreg x y z)) (CMP x (SRA y z)) -> (CMPshiftRAreg x y z) (CMP (SRA y z) x) -> (InvertFlags (CMPshiftRAreg x y z)) +(TST x (SLLconst [c] y)) -> (TSTshiftLL x y [c]) +(TST x (SRLconst [c] y)) -> (TSTshiftRL x y [c]) +(TST x (SRAconst [c] y)) -> (TSTshiftRA x y [c]) +(TST x (SLL y z)) -> (TSTshiftLLreg x y z) +(TST x (SRL y z)) -> (TSTshiftRLreg x y z) +(TST x (SRA y z)) -> (TSTshiftRAreg x y z) +(TEQ x (SLLconst [c] y)) -> (TEQshiftLL x y [c]) +(TEQ x (SRLconst [c] y)) -> (TEQshiftRL x y [c]) +(TEQ x (SRAconst [c] y)) -> (TEQshiftRA x y [c]) +(TEQ x (SLL y z)) -> (TEQshiftLLreg x y z) +(TEQ x (SRL y z)) -> (TEQshiftRLreg x y z) +(TEQ x (SRA y z)) -> (TEQshiftRAreg x y z) +(CMN x (SLLconst [c] y)) -> (CMNshiftLL x y [c]) +(CMN x (SRLconst [c] y)) -> (CMNshiftRL x y [c]) +(CMN x (SRAconst [c] y)) -> (CMNshiftRA x y [c]) +(CMN x (SLL y z)) -> (CMNshiftLLreg x y z) +(CMN x (SRL y z)) -> (CMNshiftRLreg x y z) +(CMN x (SRA y z)) -> (CMNshiftRAreg x y z) // prefer *const ops to *shift ops (ADDshiftLL (MOVWconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d])) @@ -1031,6 +1063,15 @@ (CMPshiftLL (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d]))) (CMPshiftRL (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d]))) (CMPshiftRA (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d]))) +(TSTshiftLL (MOVWconst [c]) x [d]) -> (TSTconst [c] (SLLconst <x.Type> x [d])) +(TSTshiftRL (MOVWconst [c]) x [d]) -> (TSTconst [c] (SRLconst <x.Type> x [d])) +(TSTshiftRA (MOVWconst [c]) x [d]) -> (TSTconst [c] (SRAconst <x.Type> x [d])) +(TEQshiftLL (MOVWconst [c]) x [d]) -> (TEQconst [c] (SLLconst <x.Type> x [d])) +(TEQshiftRL (MOVWconst [c]) x [d]) -> (TEQconst [c] (SRLconst <x.Type> x [d])) +(TEQshiftRA (MOVWconst [c]) x [d]) -> (TEQconst [c] (SRAconst <x.Type> x [d])) +(CMNshiftLL (MOVWconst [c]) x [d]) -> (CMNconst [c] (SLLconst <x.Type> x [d])) +(CMNshiftRL (MOVWconst [c]) x [d]) -> (CMNconst [c] (SRLconst <x.Type> x [d])) +(CMNshiftRA (MOVWconst [c]) x [d]) -> (CMNconst [c] (SRAconst <x.Type> x [d])) (ADDshiftLLreg (MOVWconst [c]) x y) -> (ADDconst [c] (SLL <x.Type> x y)) (ADDshiftRLreg (MOVWconst [c]) x y) -> (ADDconst [c] (SRL <x.Type> x y)) @@ -1071,6 +1112,15 @@ (CMPshiftLLreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SLL <x.Type> x y))) (CMPshiftRLreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SRL <x.Type> x y))) (CMPshiftRAreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SRA <x.Type> x y))) +(TSTshiftLLreg (MOVWconst [c]) x y) -> (TSTconst [c] (SLL <x.Type> x y)) +(TSTshiftRLreg (MOVWconst [c]) x y) -> (TSTconst [c] (SRL <x.Type> x y)) +(TSTshiftRAreg (MOVWconst [c]) x y) -> (TSTconst [c] (SRA <x.Type> x y)) +(TEQshiftLLreg (MOVWconst [c]) x y) -> (TEQconst [c] (SLL <x.Type> x y)) +(TEQshiftRLreg (MOVWconst [c]) x y) -> (TEQconst [c] (SRL <x.Type> x y)) +(TEQshiftRAreg (MOVWconst [c]) x y) -> (TEQconst [c] (SRA <x.Type> x y)) +(CMNshiftLLreg (MOVWconst [c]) x y) -> (CMNconst [c] (SLL <x.Type> x y)) +(CMNshiftRLreg (MOVWconst [c]) x y) -> (CMNconst [c] (SRL <x.Type> x y)) +(CMNshiftRAreg (MOVWconst [c]) x y) -> (CMNconst [c] (SRA <x.Type> x y)) // constant folding in *shift ops (ADDshiftLL x (MOVWconst [c]) [d]) -> (ADDconst x [int64(uint32(c)<<uint64(d))]) @@ -1119,6 +1169,15 @@ (CMPshiftLL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(uint32(c)<<uint64(d))]) (CMPshiftRL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(uint32(c)>>uint64(d))]) (CMPshiftRA x (MOVWconst [c]) [d]) -> (CMPconst x [int64(int32(c)>>uint64(d))]) +(TSTshiftLL x (MOVWconst [c]) [d]) -> (TSTconst x [int64(uint32(c)<<uint64(d))]) +(TSTshiftRL x (MOVWconst [c]) [d]) -> (TSTconst x [int64(uint32(c)>>uint64(d))]) +(TSTshiftRA x (MOVWconst [c]) [d]) -> (TSTconst x [int64(int32(c)>>uint64(d))]) +(TEQshiftLL x (MOVWconst [c]) [d]) -> (TEQconst x [int64(uint32(c)<<uint64(d))]) +(TEQshiftRL x (MOVWconst [c]) [d]) -> (TEQconst x [int64(uint32(c)>>uint64(d))]) +(TEQshiftRA x (MOVWconst [c]) [d]) -> (TEQconst x [int64(int32(c)>>uint64(d))]) +(CMNshiftLL x (MOVWconst [c]) [d]) -> (CMNconst x [int64(uint32(c)<<uint64(d))]) +(CMNshiftRL x (MOVWconst [c]) [d]) -> (CMNconst x [int64(uint32(c)>>uint64(d))]) +(CMNshiftRA x (MOVWconst [c]) [d]) -> (CMNconst x [int64(int32(c)>>uint64(d))]) (ADDshiftLLreg x y (MOVWconst [c])) -> (ADDshiftLL x y [c]) (ADDshiftRLreg x y (MOVWconst [c])) -> (ADDshiftRL x y [c]) @@ -1165,6 +1224,15 @@ (CMPshiftLLreg x y (MOVWconst [c])) -> (CMPshiftLL x y [c]) (CMPshiftRLreg x y (MOVWconst [c])) -> (CMPshiftRL x y [c]) (CMPshiftRAreg x y (MOVWconst [c])) -> (CMPshiftRA x y [c]) +(TSTshiftLLreg x y (MOVWconst [c])) -> (TSTshiftLL x y [c]) +(TSTshiftRLreg x y (MOVWconst [c])) -> (TSTshiftRL x y [c]) +(TSTshiftRAreg x y (MOVWconst [c])) -> (TSTshiftRA x y [c]) +(TEQshiftLLreg x y (MOVWconst [c])) -> (TEQshiftLL x y [c]) +(TEQshiftRLreg x y (MOVWconst [c])) -> (TEQshiftRL x y [c]) +(TEQshiftRAreg x y (MOVWconst [c])) -> (TEQshiftRA x y [c]) +(CMNshiftLLreg x y (MOVWconst [c])) -> (CMNshiftLL x y [c]) +(CMNshiftRLreg x y (MOVWconst [c])) -> (CMNshiftRL x y [c]) +(CMNshiftRAreg x y (MOVWconst [c])) -> (CMNshiftRA x y [c]) // Generate rotates (ADDshiftLL [c] (SRLconst x [32-c]) x) -> (SRRconst [32-c] x) @@ -1294,3 +1362,71 @@ // bit extraction (SRAconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 -> (BFX [(d-c)|(32-d)<<8] x) (SRLconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 -> (BFXU [(d-c)|(32-d)<<8] x) + +// comparison simplification +(CMP x (RSBconst [0] y)) -> (CMN x y) +(CMN x (RSBconst [0] y)) -> (CMP x y) +(EQ (CMPconst [0] (SUB x y)) yes no) -> (EQ (CMP x y) yes no) +(EQ (CMPconst [0] (SUBconst [c] x)) yes no) -> (EQ (CMPconst [c] x) yes no) +(EQ (CMPconst [0] (SUBshiftLL x y [c])) yes no) -> (EQ (CMPshiftLL x y [c]) yes no) +(EQ (CMPconst [0] (SUBshiftRL x y [c])) yes no) -> (EQ (CMPshiftRL x y [c]) yes no) +(EQ (CMPconst [0] (SUBshiftRA x y [c])) yes no) -> (EQ (CMPshiftRA x y [c]) yes no) +(EQ (CMPconst [0] (SUBshiftLLreg x y z)) yes no) -> (EQ (CMPshiftLLreg x y z) yes no) +(EQ (CMPconst [0] (SUBshiftRLreg x y z)) yes no) -> (EQ (CMPshiftRLreg x y z) yes no) +(EQ (CMPconst [0] (SUBshiftRAreg x y z)) yes no) -> (EQ (CMPshiftRAreg x y z) yes no) +(NE (CMPconst [0] (SUB x y)) yes no) -> (NE (CMP x y) yes no) +(NE (CMPconst [0] (SUBconst [c] x)) yes no) -> (NE (CMPconst [c] x) yes no) +(NE (CMPconst [0] (SUBshiftLL x y [c])) yes no) -> (NE (CMPshiftLL x y [c]) yes no) +(NE (CMPconst [0] (SUBshiftRL x y [c])) yes no) -> (NE (CMPshiftRL x y [c]) yes no) +(NE (CMPconst [0] (SUBshiftRA x y [c])) yes no) -> (NE (CMPshiftRA x y [c]) yes no) +(NE (CMPconst [0] (SUBshiftLLreg x y z)) yes no) -> (NE (CMPshiftLLreg x y z) yes no) +(NE (CMPconst [0] (SUBshiftRLreg x y z)) yes no) -> (NE (CMPshiftRLreg x y z) yes no) +(NE (CMPconst [0] (SUBshiftRAreg x y z)) yes no) -> (NE (CMPshiftRAreg x y z) yes no) +(EQ (CMPconst [0] (ADD x y)) yes no) -> (EQ (CMN x y) yes no) +(EQ (CMPconst [0] (ADDconst [c] x)) yes no) -> (EQ (CMNconst [c] x) yes no) +(EQ (CMPconst [0] (ADDshiftLL x y [c])) yes no) -> (EQ (CMNshiftLL x y [c]) yes no) +(EQ (CMPconst [0] (ADDshiftRL x y [c])) yes no) -> (EQ (CMNshiftRL x y [c]) yes no) +(EQ (CMPconst [0] (ADDshiftRA x y [c])) yes no) -> (EQ (CMNshiftRA x y [c]) yes no) +(EQ (CMPconst [0] (ADDshiftLLreg x y z)) yes no) -> (EQ (CMNshiftLLreg x y z) yes no) +(EQ (CMPconst [0] (ADDshiftRLreg x y z)) yes no) -> (EQ (CMNshiftRLreg x y z) yes no) +(EQ (CMPconst [0] (ADDshiftRAreg x y z)) yes no) -> (EQ (CMNshiftRAreg x y z) yes no) +(NE (CMPconst [0] (ADD x y)) yes no) -> (NE (CMN x y) yes no) +(NE (CMPconst [0] (ADDconst [c] x)) yes no) -> (NE (CMNconst [c] x) yes no) +(NE (CMPconst [0] (ADDshiftLL x y [c])) yes no) -> (NE (CMNshiftLL x y [c]) yes no) +(NE (CMPconst [0] (ADDshiftRL x y [c])) yes no) -> (NE (CMNshiftRL x y [c]) yes no) +(NE (CMPconst [0] (ADDshiftRA x y [c])) yes no) -> (NE (CMNshiftRA x y [c]) yes no) +(NE (CMPconst [0] (ADDshiftLLreg x y z)) yes no) -> (NE (CMNshiftLLreg x y z) yes no) +(NE (CMPconst [0] (ADDshiftRLreg x y z)) yes no) -> (NE (CMNshiftRLreg x y z) yes no) +(NE (CMPconst [0] (ADDshiftRAreg x y z)) yes no) -> (NE (CMNshiftRAreg x y z) yes no) +(EQ (CMPconst [0] (AND x y)) yes no) -> (EQ (TST x y) yes no) +(EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (TSTconst [c] x) yes no) +(EQ (CMPconst [0] (ANDshiftLL x y [c])) yes no) -> (EQ (TSTshiftLL x y [c]) yes no) +(EQ (CMPconst [0] (ANDshiftRL x y [c])) yes no) -> (EQ (TSTshiftRL x y [c]) yes no) +(EQ (CMPconst [0] (ANDshiftRA x y [c])) yes no) -> (EQ (TSTshiftRA x y [c]) yes no) +(EQ (CMPconst [0] (ANDshiftLLreg x y z)) yes no) -> (EQ (TSTshiftLLreg x y z) yes no) +(EQ (CMPconst [0] (ANDshiftRLreg x y z)) yes no) -> (EQ (TSTshiftRLreg x y z) yes no) +(EQ (CMPconst [0] (ANDshiftRAreg x y z)) yes no) -> (EQ (TSTshiftRAreg x y z) yes no) +(NE (CMPconst [0] (AND x y)) yes no) -> (NE (TST x y) yes no) +(NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (TSTconst [c] x) yes no) +(NE (CMPconst [0] (ANDshiftLL x y [c])) yes no) -> (NE (TSTshiftLL x y [c]) yes no) +(NE (CMPconst [0] (ANDshiftRL x y [c])) yes no) -> (NE (TSTshiftRL x y [c]) yes no) +(NE (CMPconst [0] (ANDshiftRA x y [c])) yes no) -> (NE (TSTshiftRA x y [c]) yes no) +(NE (CMPconst [0] (ANDshiftLLreg x y z)) yes no) -> (NE (TSTshiftLLreg x y z) yes no) +(NE (CMPconst [0] (ANDshiftRLreg x y z)) yes no) -> (NE (TSTshiftRLreg x y z) yes no) +(NE (CMPconst [0] (ANDshiftRAreg x y z)) yes no) -> (NE (TSTshiftRAreg x y z) yes no) +(EQ (CMPconst [0] (XOR x y)) yes no) -> (EQ (TEQ x y) yes no) +(EQ (CMPconst [0] (XORconst [c] x)) yes no) -> (EQ (TEQconst [c] x) yes no) +(EQ (CMPconst [0] (XORshiftLL x y [c])) yes no) -> (EQ (TEQshiftLL x y [c]) yes no) +(EQ (CMPconst [0] (XORshiftRL x y [c])) yes no) -> (EQ (TEQshiftRL x y [c]) yes no) +(EQ (CMPconst [0] (XORshiftRA x y [c])) yes no) -> (EQ (TEQshiftRA x y [c]) yes no) +(EQ (CMPconst [0] (XORshiftLLreg x y z)) yes no) -> (EQ (TEQshiftLLreg x y z) yes no) +(EQ (CMPconst [0] (XORshiftRLreg x y z)) yes no) -> (EQ (TEQshiftRLreg x y z) yes no) +(EQ (CMPconst [0] (XORshiftRAreg x y z)) yes no) -> (EQ (TEQshiftRAreg x y z) yes no) +(NE (CMPconst [0] (XOR x y)) yes no) -> (NE (TEQ x y) yes no) +(NE (CMPconst [0] (XORconst [c] x)) yes no) -> (NE (TEQconst [c] x) yes no) +(NE (CMPconst [0] (XORshiftLL x y [c])) yes no) -> (NE (TEQshiftLL x y [c]) yes no) +(NE (CMPconst [0] (XORshiftRL x y [c])) yes no) -> (NE (TEQshiftRL x y [c]) yes no) +(NE (CMPconst [0] (XORshiftRA x y [c])) yes no) -> (NE (TEQshiftRA x y [c]) yes no) +(NE (CMPconst [0] (XORshiftLLreg x y z)) yes no) -> (NE (TEQshiftLLreg x y z) yes no) +(NE (CMPconst [0] (XORshiftRLreg x y z)) yes no) -> (NE (TEQshiftRLreg x y z) yes no) +(NE (CMPconst [0] (XORshiftRAreg x y z)) yes no) -> (NE (TEQshiftRAreg x y z) yes no) diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go index 928236b73c..d16675fddb 100644 --- a/src/cmd/compile/internal/ssa/gen/ARMOps.go +++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go @@ -314,7 +314,7 @@ func init() { // comparisons {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt - {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags"}, // arg0 compare to -arg1 + {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1 {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0 {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0 @@ -326,10 +326,28 @@ func init() { {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1<<auxInt {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift + {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1<<auxInt) + {name: "CMNshiftRL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1>>auxInt), unsigned shift + {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1>>auxInt), signed shift + {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1<<auxInt) compare to 0 + {name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1>>auxInt) compare to 0, unsigned shift + {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1>>auxInt) compare to 0, signed shift + {name: "TEQshiftLL", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1<<auxInt) compare to 0 + {name: "TEQshiftRL", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1>>auxInt) compare to 0, unsigned shift + {name: "TEQshiftRA", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1>>auxInt) compare to 0, signed shift {name: "CMPshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1<<arg2 {name: "CMPshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, unsigned shift {name: "CMPshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, signed shift + {name: "CMNshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1<<arg2) compare to 0 + {name: "CMNshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1>>arg2) compare to 0, unsigned shift + {name: "CMNshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1>>arg2) compare to 0, signed shift + {name: "TSTshiftLLreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1<<arg2) compare to 0 + {name: "TSTshiftRLreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1>>arg2) compare to 0, unsigned shift + {name: "TSTshiftRAreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1>>arg2) compare to 0, signed shift + {name: "TEQshiftLLreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1<<arg2) compare to 0 + {name: "TEQshiftRLreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1>>arg2) compare to 0, unsigned shift + {name: "TEQshiftRAreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1>>arg2) compare to 0, signed shift {name: "CMPF0", argLength: 1, reg: fp1flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to 0, float32 {name: "CMPD0", argLength: 1, reg: fp1flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to 0, float64 diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index fccadf342d..9c289bacaf 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -847,9 +847,27 @@ const ( OpARMCMPshiftLL OpARMCMPshiftRL OpARMCMPshiftRA + OpARMCMNshiftLL + OpARMCMNshiftRL + OpARMCMNshiftRA + OpARMTSTshiftLL + OpARMTSTshiftRL + OpARMTSTshiftRA + OpARMTEQshiftLL + OpARMTEQshiftRL + OpARMTEQshiftRA OpARMCMPshiftLLreg OpARMCMPshiftRLreg OpARMCMPshiftRAreg + OpARMCMNshiftLLreg + OpARMCMNshiftRLreg + OpARMCMNshiftRAreg + OpARMTSTshiftLLreg + OpARMTSTshiftRLreg + OpARMTSTshiftRAreg + OpARMTEQshiftLLreg + OpARMTEQshiftRLreg + OpARMTEQshiftRAreg OpARMCMPF0 OpARMCMPD0 OpARMMOVWconst @@ -10570,9 +10588,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CMN", - argLen: 2, - asm: arm.ACMN, + name: "CMN", + argLen: 2, + commutative: true, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 @@ -10695,6 +10714,114 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "CMNshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMNshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMNshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TSTshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TSTshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TSTshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, { name: "CMPshiftLLreg", argLen: 3, @@ -10731,6 +10858,114 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "CMNshiftLLreg", + argLen: 3, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMNshiftRLreg", + argLen: 3, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMNshiftRAreg", + argLen: 3, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TSTshiftLLreg", + argLen: 3, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TSTshiftRLreg", + argLen: 3, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TSTshiftRAreg", + argLen: 3, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TEQshiftLLreg", + argLen: 3, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TEQshiftRLreg", + argLen: 3, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TEQshiftRAreg", + argLen: 3, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "CMPF0", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 3514886e8e..fa22ff2502 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -101,6 +101,22 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMBICshiftRL_0(v) case OpARMBICshiftRLreg: return rewriteValueARM_OpARMBICshiftRLreg_0(v) + case OpARMCMN: + return rewriteValueARM_OpARMCMN_0(v) || rewriteValueARM_OpARMCMN_10(v) + case OpARMCMNconst: + return rewriteValueARM_OpARMCMNconst_0(v) + case OpARMCMNshiftLL: + return rewriteValueARM_OpARMCMNshiftLL_0(v) + case OpARMCMNshiftLLreg: + return rewriteValueARM_OpARMCMNshiftLLreg_0(v) + case OpARMCMNshiftRA: + return rewriteValueARM_OpARMCMNshiftRA_0(v) + case OpARMCMNshiftRAreg: + return rewriteValueARM_OpARMCMNshiftRAreg_0(v) + case OpARMCMNshiftRL: + return rewriteValueARM_OpARMCMNshiftRL_0(v) + case OpARMCMNshiftRLreg: + return rewriteValueARM_OpARMCMNshiftRLreg_0(v) case OpARMCMOVWHSconst: return rewriteValueARM_OpARMCMOVWHSconst_0(v) case OpARMCMOVWLSconst: @@ -361,6 +377,38 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMSUBshiftRL_0(v) case OpARMSUBshiftRLreg: return rewriteValueARM_OpARMSUBshiftRLreg_0(v) + case OpARMTEQ: + return rewriteValueARM_OpARMTEQ_0(v) || rewriteValueARM_OpARMTEQ_10(v) + case OpARMTEQconst: + return rewriteValueARM_OpARMTEQconst_0(v) + case OpARMTEQshiftLL: + return rewriteValueARM_OpARMTEQshiftLL_0(v) + case OpARMTEQshiftLLreg: + return rewriteValueARM_OpARMTEQshiftLLreg_0(v) + case OpARMTEQshiftRA: + return rewriteValueARM_OpARMTEQshiftRA_0(v) + case OpARMTEQshiftRAreg: + return rewriteValueARM_OpARMTEQshiftRAreg_0(v) + case OpARMTEQshiftRL: + return rewriteValueARM_OpARMTEQshiftRL_0(v) + case OpARMTEQshiftRLreg: + return rewriteValueARM_OpARMTEQshiftRLreg_0(v) + case OpARMTST: + return rewriteValueARM_OpARMTST_0(v) || rewriteValueARM_OpARMTST_10(v) + case OpARMTSTconst: + return rewriteValueARM_OpARMTSTconst_0(v) + case OpARMTSTshiftLL: + return rewriteValueARM_OpARMTSTshiftLL_0(v) + case OpARMTSTshiftLLreg: + return rewriteValueARM_OpARMTSTshiftLLreg_0(v) + case OpARMTSTshiftRA: + return rewriteValueARM_OpARMTSTshiftRA_0(v) + case OpARMTSTshiftRAreg: + return rewriteValueARM_OpARMTSTshiftRAreg_0(v) + case OpARMTSTshiftRL: + return rewriteValueARM_OpARMTSTshiftRL_0(v) + case OpARMTSTshiftRLreg: + return rewriteValueARM_OpARMTSTshiftRLreg_0(v) case OpARMXOR: return rewriteValueARM_OpARMXOR_0(v) || rewriteValueARM_OpARMXOR_10(v) case OpARMXORconst: @@ -4410,663 +4458,851 @@ func rewriteValueARM_OpARMBICshiftRLreg_0(v *Value) bool { } return false } -func rewriteValueARM_OpARMCMOVWHSconst_0(v *Value) bool { - // match: (CMOVWHSconst _ (FlagEQ) [c]) - // cond: - // result: (MOVWconst [c]) - for { - c := v.AuxInt - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARMFlagEQ { - break - } - v.reset(OpARMMOVWconst) - v.AuxInt = c - return true - } - // match: (CMOVWHSconst x (FlagLT_ULT)) +func rewriteValueARM_OpARMCMN_0(v *Value) bool { + // match: (CMN x (MOVWconst [c])) // cond: - // result: x + // result: (CMNconst [c] x) for { _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMFlagLT_ULT { + if v_1.Op != OpARMMOVWconst { break } - v.reset(OpCopy) - v.Type = x.Type + c := v_1.AuxInt + v.reset(OpARMCMNconst) + v.AuxInt = c v.AddArg(x) return true } - // match: (CMOVWHSconst _ (FlagLT_UGT) [c]) + // match: (CMN (MOVWconst [c]) x) // cond: - // result: (MOVWconst [c]) + // result: (CMNconst [c] x) for { - c := v.AuxInt _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARMFlagLT_UGT { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMCMNconst) v.AuxInt = c + v.AddArg(x) return true } - // match: (CMOVWHSconst x (FlagGT_ULT)) + // match: (CMN x (SLLconst [c] y)) // cond: - // result: x + // result: (CMNshiftLL x y [c]) for { _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMFlagGT_ULT { + if v_1.Op != OpARMSLLconst { break } - v.reset(OpCopy) - v.Type = x.Type + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMCMNshiftLL) + v.AuxInt = c v.AddArg(x) + v.AddArg(y) return true } - // match: (CMOVWHSconst _ (FlagGT_UGT) [c]) + // match: (CMN (SLLconst [c] y) x) // cond: - // result: (MOVWconst [c]) + // result: (CMNshiftLL x y [c]) for { - c := v.AuxInt _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARMFlagGT_UGT { + v_0 := v.Args[0] + if v_0.Op != OpARMSLLconst { break } - v.reset(OpARMMOVWconst) + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMCMNshiftLL) v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } - // match: (CMOVWHSconst x (InvertFlags flags) [c]) + // match: (CMN x (SRLconst [c] y)) // cond: - // result: (CMOVWLSconst x flags [c]) + // result: (CMNshiftRL x y [c]) for { - c := v.AuxInt _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMInvertFlags { + if v_1.Op != OpARMSRLconst { break } - flags := v_1.Args[0] - v.reset(OpARMCMOVWLSconst) + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMCMNshiftRL) v.AuxInt = c v.AddArg(x) - v.AddArg(flags) - return true - } - return false -} -func rewriteValueARM_OpARMCMOVWLSconst_0(v *Value) bool { - // match: (CMOVWLSconst _ (FlagEQ) [c]) - // cond: - // result: (MOVWconst [c]) - for { - c := v.AuxInt - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARMFlagEQ { - break - } - v.reset(OpARMMOVWconst) - v.AuxInt = c + v.AddArg(y) return true } - // match: (CMOVWLSconst _ (FlagLT_ULT) [c]) + // match: (CMN (SRLconst [c] y) x) // cond: - // result: (MOVWconst [c]) + // result: (CMNshiftRL x y [c]) for { - c := v.AuxInt _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARMFlagLT_ULT { + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { break } - v.reset(OpARMMOVWconst) + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMCMNshiftRL) v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } - // match: (CMOVWLSconst x (FlagLT_UGT)) + // match: (CMN x (SRAconst [c] y)) // cond: - // result: x + // result: (CMNshiftRA x y [c]) for { _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMFlagLT_UGT { + if v_1.Op != OpARMSRAconst { break } - v.reset(OpCopy) - v.Type = x.Type + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMCMNshiftRA) + v.AuxInt = c v.AddArg(x) + v.AddArg(y) return true } - // match: (CMOVWLSconst _ (FlagGT_ULT) [c]) + // match: (CMN (SRAconst [c] y) x) // cond: - // result: (MOVWconst [c]) + // result: (CMNshiftRA x y [c]) for { - c := v.AuxInt _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARMFlagGT_ULT { + v_0 := v.Args[0] + if v_0.Op != OpARMSRAconst { break } - v.reset(OpARMMOVWconst) + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMCMNshiftRA) v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } - // match: (CMOVWLSconst x (FlagGT_UGT)) + // match: (CMN x (SLL y z)) // cond: - // result: x + // result: (CMNshiftLLreg x y z) for { _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMFlagGT_UGT { + if v_1.Op != OpARMSLL { break } - v.reset(OpCopy) - v.Type = x.Type + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMCMNshiftLLreg) v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } - // match: (CMOVWLSconst x (InvertFlags flags) [c]) + // match: (CMN (SLL y z) x) // cond: - // result: (CMOVWHSconst x flags [c]) + // result: (CMNshiftLLreg x y z) for { - c := v.AuxInt _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMInvertFlags { + v_0 := v.Args[0] + if v_0.Op != OpARMSLL { break } - flags := v_1.Args[0] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = c + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMCMNshiftLLreg) v.AddArg(x) - v.AddArg(flags) + v.AddArg(y) + v.AddArg(z) return true } return false } -func rewriteValueARM_OpARMCMP_0(v *Value) bool { - b := v.Block - _ = b - // match: (CMP x (MOVWconst [c])) +func rewriteValueARM_OpARMCMN_10(v *Value) bool { + // match: (CMN x (SRL y z)) // cond: - // result: (CMPconst [c] x) + // result: (CMNshiftRLreg x y z) for { _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + if v_1.Op != OpARMSRL { break } - c := v_1.AuxInt - v.reset(OpARMCMPconst) - v.AuxInt = c + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMCMNshiftRLreg) v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } - // match: (CMP (MOVWconst [c]) x) + // match: (CMN (SRL y z) x) // cond: - // result: (InvertFlags (CMPconst [c] x)) + // result: (CMNshiftRLreg x y z) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMSRL { break } - c := v_0.AuxInt + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] x := v.Args[1] - v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v0.AuxInt = c - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARMCMNshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } - // match: (CMP x (SLLconst [c] y)) + // match: (CMN x (SRA y z)) // cond: - // result: (CMPshiftLL x y [c]) + // result: (CMNshiftRAreg x y z) for { _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { + if v_1.Op != OpARMSRA { break } - c := v_1.AuxInt + _ = v_1.Args[1] y := v_1.Args[0] - v.reset(OpARMCMPshiftLL) - v.AuxInt = c + z := v_1.Args[1] + v.reset(OpARMCMNshiftRAreg) v.AddArg(x) v.AddArg(y) + v.AddArg(z) return true } - // match: (CMP (SLLconst [c] y) x) + // match: (CMN (SRA y z) x) // cond: - // result: (InvertFlags (CMPshiftLL x y [c])) + // result: (CMNshiftRAreg x y z) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { + if v_0.Op != OpARMSRA { break } - c := v_0.AuxInt + _ = v_0.Args[1] y := v_0.Args[0] + z := v_0.Args[1] x := v.Args[1] - v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) - v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMCMNshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } - // match: (CMP x (SRLconst [c] y)) + // match: (CMN x (RSBconst [0] y)) // cond: - // result: (CMPshiftRL x y [c]) + // result: (CMP x y) for { _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { + if v_1.Op != OpARMRSBconst { + break + } + if v_1.AuxInt != 0 { break } - c := v_1.AuxInt y := v_1.Args[0] - v.reset(OpARMCMPshiftRL) - v.AuxInt = c + v.reset(OpARMCMP) v.AddArg(x) v.AddArg(y) return true } - // match: (CMP (SRLconst [c] y) x) + // match: (CMN (RSBconst [0] y) x) // cond: - // result: (InvertFlags (CMPshiftRL x y [c])) + // result: (CMP x y) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { + if v_0.Op != OpARMRSBconst { + break + } + if v_0.AuxInt != 0 { break } - c := v_0.AuxInt y := v_0.Args[0] x := v.Args[1] - v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) - v0.AuxInt = c - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMCMP) + v.AddArg(x) + v.AddArg(y) return true } - // match: (CMP x (SRAconst [c] y)) - // cond: - // result: (CMPshiftRA x y [c]) + return false +} +func rewriteValueARM_OpARMCMNconst_0(v *Value) bool { + // match: (CMNconst (MOVWconst [x]) [y]) + // cond: int32(x)==int32(-y) + // result: (FlagEQ) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMCMPshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + x := v_0.AuxInt + if !(int32(x) == int32(-y)) { + break + } + v.reset(OpARMFlagEQ) return true } - // match: (CMP (SRAconst [c] y) x) + // match: (CMNconst (MOVWconst [x]) [y]) + // cond: int32(x)<int32(-y) && uint32(x)<uint32(-y) + // result: (FlagLT_ULT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x) < int32(-y) && uint32(x) < uint32(-y)) { + break + } + v.reset(OpARMFlagLT_ULT) + return true + } + // match: (CMNconst (MOVWconst [x]) [y]) + // cond: int32(x)<int32(-y) && uint32(x)>uint32(-y) + // result: (FlagLT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x) < int32(-y) && uint32(x) > uint32(-y)) { + break + } + v.reset(OpARMFlagLT_UGT) + return true + } + // match: (CMNconst (MOVWconst [x]) [y]) + // cond: int32(x)>int32(-y) && uint32(x)<uint32(-y) + // result: (FlagGT_ULT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x) > int32(-y) && uint32(x) < uint32(-y)) { + break + } + v.reset(OpARMFlagGT_ULT) + return true + } + // match: (CMNconst (MOVWconst [x]) [y]) + // cond: int32(x)>int32(-y) && uint32(x)>uint32(-y) + // result: (FlagGT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x) > int32(-y) && uint32(x) > uint32(-y)) { + break + } + v.reset(OpARMFlagGT_UGT) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftLL_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftLL (MOVWconst [c]) x [d]) // cond: - // result: (InvertFlags (CMPshiftRA x y [c])) + // result: (CMNconst [c] (SLLconst <x.Type> x [d])) for { + d := v.AuxInt _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { + if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - y := v_0.Args[0] x := v.Args[1] - v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) - v0.AuxInt = c + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = d v0.AddArg(x) - v0.AddArg(y) v.AddArg(v0) return true } - // match: (CMP x (SLL y z)) + // match: (CMNshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (CMPshiftLLreg x y z) + // result: (CMNconst x [int64(uint32(c)<<uint64(d))]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMSLL { + if v_1.Op != OpARMMOVWconst { break } - _ = v_1.Args[1] - y := v_1.Args[0] - z := v_1.Args[1] - v.reset(OpARMCMPshiftLLreg) + c := v_1.AuxInt + v.reset(OpARMCMNconst) + v.AuxInt = int64(uint32(c) << uint64(d)) v.AddArg(x) - v.AddArg(y) - v.AddArg(z) return true } - // match: (CMP (SLL y z) x) + return false +} +func rewriteValueARM_OpARMCMNshiftLLreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftLLreg (MOVWconst [c]) x y) // cond: - // result: (InvertFlags (CMPshiftLLreg x y z)) + // result: (CMNconst [c] (SLL <x.Type> x y)) for { - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMSLL { + if v_0.Op != OpARMMOVWconst { break } - _ = v_0.Args[1] - y := v_0.Args[0] - z := v_0.Args[1] + c := v_0.AuxInt x := v.Args[1] - v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + y := v.Args[2] + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg(x) v0.AddArg(y) - v0.AddArg(z) v.AddArg(v0) return true } - return false -} -func rewriteValueARM_OpARMCMP_10(v *Value) bool { - b := v.Block - _ = b - // match: (CMP x (SRL y z)) + // match: (CMNshiftLLreg x y (MOVWconst [c])) // cond: - // result: (CMPshiftRLreg x y z) + // result: (CMNshiftLL x y [c]) for { - _ = v.Args[1] + _ = v.Args[2] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { break } - _ = v_1.Args[1] - y := v_1.Args[0] - z := v_1.Args[1] - v.reset(OpARMCMPshiftRLreg) + c := v_2.AuxInt + v.reset(OpARMCMNshiftLL) + v.AuxInt = c v.AddArg(x) v.AddArg(y) - v.AddArg(z) return true } - // match: (CMP (SRL y z) x) + return false +} +func rewriteValueARM_OpARMCMNshiftRA_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftRA (MOVWconst [c]) x [d]) // cond: - // result: (InvertFlags (CMPshiftRLreg x y z)) + // result: (CMNconst [c] (SRAconst <x.Type> x [d])) for { + d := v.AuxInt _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMSRL { + if v_0.Op != OpARMMOVWconst { break } - _ = v_0.Args[1] - y := v_0.Args[0] - z := v_0.Args[1] + c := v_0.AuxInt x := v.Args[1] - v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = d v0.AddArg(x) - v0.AddArg(y) - v0.AddArg(z) v.AddArg(v0) return true } - // match: (CMP x (SRA y z)) + // match: (CMNshiftRA x (MOVWconst [c]) [d]) // cond: - // result: (CMPshiftRAreg x y z) + // result: (CMNconst x [int64(int32(c)>>uint64(d))]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMSRA { + if v_1.Op != OpARMMOVWconst { break } - _ = v_1.Args[1] - y := v_1.Args[0] - z := v_1.Args[1] - v.reset(OpARMCMPshiftRAreg) + c := v_1.AuxInt + v.reset(OpARMCMNconst) + v.AuxInt = int64(int32(c) >> uint64(d)) v.AddArg(x) - v.AddArg(y) - v.AddArg(z) return true } - // match: (CMP (SRA y z) x) + return false +} +func rewriteValueARM_OpARMCMNshiftRAreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftRAreg (MOVWconst [c]) x y) // cond: - // result: (InvertFlags (CMPshiftRAreg x y z)) + // result: (CMNconst [c] (SRA <x.Type> x y)) for { - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMSRA { + if v_0.Op != OpARMMOVWconst { break } - _ = v_0.Args[1] - y := v_0.Args[0] - z := v_0.Args[1] + c := v_0.AuxInt x := v.Args[1] - v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + y := v.Args[2] + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) v0.AddArg(x) v0.AddArg(y) - v0.AddArg(z) v.AddArg(v0) return true } - return false -} -func rewriteValueARM_OpARMCMPD_0(v *Value) bool { - // match: (CMPD x (MOVDconst [0])) + // match: (CMNshiftRAreg x y (MOVWconst [c])) // cond: - // result: (CMPD0 x) + // result: (CMNshiftRA x y [c]) for { - _ = v.Args[1] + _ = v.Args[2] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVDconst { - break - } - if v_1.AuxInt != 0 { + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { break } - v.reset(OpARMCMPD0) + c := v_2.AuxInt + v.reset(OpARMCMNshiftRA) + v.AuxInt = c v.AddArg(x) + v.AddArg(y) return true } return false } -func rewriteValueARM_OpARMCMPF_0(v *Value) bool { - // match: (CMPF x (MOVFconst [0])) +func rewriteValueARM_OpARMCMNshiftRL_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftRL (MOVWconst [c]) x [d]) // cond: - // result: (CMPF0 x) + // result: (CMNconst [c] (SRLconst <x.Type> x [d])) for { + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVFconst { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - if v_1.AuxInt != 0 { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMNshiftRL x (MOVWconst [c]) [d]) + // cond: + // result: (CMNconst x [int64(uint32(c)>>uint64(d))]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - v.reset(OpARMCMPF0) + c := v_1.AuxInt + v.reset(OpARMCMNconst) + v.AuxInt = int64(uint32(c) >> uint64(d)) v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { - // match: (CMPconst (MOVWconst [x]) [y]) - // cond: int32(x)==int32(y) - // result: (FlagEQ) +func rewriteValueARM_OpARMCMNshiftRLreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftRLreg (MOVWconst [c]) x y) + // cond: + // result: (CMNconst [c] (SRL <x.Type> x y)) for { - y := v.AuxInt + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } - x := v_0.AuxInt - if !(int32(x) == int32(y)) { - break - } - v.reset(OpARMFlagEQ) + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (CMPconst (MOVWconst [x]) [y]) - // cond: int32(x)<int32(y) && uint32(x)<uint32(y) - // result: (FlagLT_ULT) + // match: (CMNshiftRLreg x y (MOVWconst [c])) + // cond: + // result: (CMNshiftRL x y [c]) for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - x := v_0.AuxInt - if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { break } - v.reset(OpARMFlagLT_ULT) + c := v_2.AuxInt + v.reset(OpARMCMNshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } - // match: (CMPconst (MOVWconst [x]) [y]) - // cond: int32(x)<int32(y) && uint32(x)>uint32(y) - // result: (FlagLT_UGT) + return false +} +func rewriteValueARM_OpARMCMOVWHSconst_0(v *Value) bool { + // match: (CMOVWHSconst _ (FlagEQ) [c]) + // cond: + // result: (MOVWconst [c]) for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - x := v_0.AuxInt - if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { + c := v.AuxInt + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARMFlagEQ { break } - v.reset(OpARMFlagLT_UGT) + v.reset(OpARMMOVWconst) + v.AuxInt = c return true } - // match: (CMPconst (MOVWconst [x]) [y]) - // cond: int32(x)>int32(y) && uint32(x)<uint32(y) - // result: (FlagGT_ULT) + // match: (CMOVWHSconst x (FlagLT_ULT)) + // cond: + // result: x for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - x := v_0.AuxInt - if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMFlagLT_ULT { break } - v.reset(OpARMFlagGT_ULT) + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (CMPconst (MOVWconst [x]) [y]) - // cond: int32(x)>int32(y) && uint32(x)>uint32(y) - // result: (FlagGT_UGT) + // match: (CMOVWHSconst _ (FlagLT_UGT) [c]) + // cond: + // result: (MOVWconst [c]) for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + c := v.AuxInt + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARMFlagLT_UGT { break } - x := v_0.AuxInt - if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { + v.reset(OpARMMOVWconst) + v.AuxInt = c + return true + } + // match: (CMOVWHSconst x (FlagGT_ULT)) + // cond: + // result: x + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMFlagGT_ULT { break } - v.reset(OpARMFlagGT_UGT) + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (CMPconst (MOVBUreg _) [c]) - // cond: 0xff < c - // result: (FlagLT_ULT) + // match: (CMOVWHSconst _ (FlagGT_UGT) [c]) + // cond: + // result: (MOVWconst [c]) for { c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARMMOVBUreg { + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARMFlagGT_UGT { break } - if !(0xff < c) { + v.reset(OpARMMOVWconst) + v.AuxInt = c + return true + } + // match: (CMOVWHSconst x (InvertFlags flags) [c]) + // cond: + // result: (CMOVWLSconst x flags [c]) + for { + c := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMInvertFlags { break } - v.reset(OpARMFlagLT_ULT) + flags := v_1.Args[0] + v.reset(OpARMCMOVWLSconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(flags) return true } - // match: (CMPconst (MOVHUreg _) [c]) - // cond: 0xffff < c - // result: (FlagLT_ULT) + return false +} +func rewriteValueARM_OpARMCMOVWLSconst_0(v *Value) bool { + // match: (CMOVWLSconst _ (FlagEQ) [c]) + // cond: + // result: (MOVWconst [c]) for { c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARMMOVHUreg { + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARMFlagEQ { break } - if !(0xffff < c) { + v.reset(OpARMMOVWconst) + v.AuxInt = c + return true + } + // match: (CMOVWLSconst _ (FlagLT_ULT) [c]) + // cond: + // result: (MOVWconst [c]) + for { + c := v.AuxInt + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARMFlagLT_ULT { break } - v.reset(OpARMFlagLT_ULT) + v.reset(OpARMMOVWconst) + v.AuxInt = c return true } - // match: (CMPconst (ANDconst _ [m]) [n]) - // cond: 0 <= int32(m) && int32(m) < int32(n) - // result: (FlagLT_ULT) + // match: (CMOVWLSconst x (FlagLT_UGT)) + // cond: + // result: x for { - n := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARMANDconst { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMFlagLT_UGT { break } - m := v_0.AuxInt - if !(0 <= int32(m) && int32(m) < int32(n)) { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (CMOVWLSconst _ (FlagGT_ULT) [c]) + // cond: + // result: (MOVWconst [c]) + for { + c := v.AuxInt + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARMFlagGT_ULT { break } - v.reset(OpARMFlagLT_ULT) + v.reset(OpARMMOVWconst) + v.AuxInt = c return true } - // match: (CMPconst (SRLconst _ [c]) [n]) - // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) - // result: (FlagLT_ULT) + // match: (CMOVWLSconst x (FlagGT_UGT)) + // cond: + // result: x for { - n := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMFlagGT_UGT { break } - c := v_0.AuxInt - if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (CMOVWLSconst x (InvertFlags flags) [c]) + // cond: + // result: (CMOVWHSconst x flags [c]) + for { + c := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMInvertFlags { break } - v.reset(OpARMFlagLT_ULT) + flags := v_1.Args[0] + v.reset(OpARMCMOVWHSconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(flags) return true } return false } -func rewriteValueARM_OpARMCMPshiftLL_0(v *Value) bool { +func rewriteValueARM_OpARMCMP_0(v *Value) bool { b := v.Block _ = b - // match: (CMPshiftLL (MOVWconst [c]) x [d]) + // match: (CMP x (MOVWconst [c])) // cond: - // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d]))) + // result: (CMPconst [c] x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMCMPconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (CMP (MOVWconst [c]) x) + // cond: + // result: (InvertFlags (CMPconst [c] x)) for { - d := v.AuxInt _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { @@ -5077,637 +5313,724 @@ func rewriteValueARM_OpARMCMPshiftLL_0(v *Value) bool { v.reset(OpARMInvertFlags) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c - v1 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) - v1.AuxInt = d - v1.AddArg(x) - v0.AddArg(v1) + v0.AddArg(x) v.AddArg(v0) return true } - // match: (CMPshiftLL x (MOVWconst [c]) [d]) + // match: (CMP x (SLLconst [c] y)) // cond: - // result: (CMPconst x [int64(uint32(c)<<uint64(d))]) + // result: (CMPshiftLL x y [c]) for { - d := v.AuxInt _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + if v_1.Op != OpARMSLLconst { break } c := v_1.AuxInt - v.reset(OpARMCMPconst) - v.AuxInt = int64(uint32(c) << uint64(d)) + y := v_1.Args[0] + v.reset(OpARMCMPshiftLL) + v.AuxInt = c v.AddArg(x) + v.AddArg(y) return true } - return false -} -func rewriteValueARM_OpARMCMPshiftLLreg_0(v *Value) bool { - b := v.Block - _ = b - // match: (CMPshiftLLreg (MOVWconst [c]) x y) + // match: (CMP (SLLconst [c] y) x) // cond: - // result: (InvertFlags (CMPconst [c] (SLL <x.Type> x y))) + // result: (InvertFlags (CMPshiftLL x y [c])) for { - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMSLLconst { break } c := v_0.AuxInt + y := v_0.Args[0] x := v.Args[1] - y := v.Args[2] v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c - v1 := b.NewValue0(v.Pos, OpARMSLL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) return true } - // match: (CMPshiftLLreg x y (MOVWconst [c])) + // match: (CMP x (SRLconst [c] y)) // cond: - // result: (CMPshiftLL x y [c]) + // result: (CMPshiftRL x y [c]) for { - _ = v.Args[2] + _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARMMOVWconst { + v_1 := v.Args[1] + if v_1.Op != OpARMSRLconst { break } - c := v_2.AuxInt - v.reset(OpARMCMPshiftLL) + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMCMPshiftRL) v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } - return false -} -func rewriteValueARM_OpARMCMPshiftRA_0(v *Value) bool { - b := v.Block - _ = b - // match: (CMPshiftRA (MOVWconst [c]) x [d]) + // match: (CMP (SRLconst [c] y) x) // cond: - // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d]))) + // result: (InvertFlags (CMPshiftRL x y [c])) for { - d := v.AuxInt _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMSRLconst { break } c := v_0.AuxInt + y := v_0.Args[0] x := v.Args[1] v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c - v1 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) - v1.AuxInt = d - v1.AddArg(x) - v0.AddArg(v1) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) return true } - // match: (CMPshiftRA x (MOVWconst [c]) [d]) + // match: (CMP x (SRAconst [c] y)) // cond: - // result: (CMPconst x [int64(int32(c)>>uint64(d))]) + // result: (CMPshiftRA x y [c]) for { - d := v.AuxInt _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + if v_1.Op != OpARMSRAconst { break } c := v_1.AuxInt - v.reset(OpARMCMPconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + y := v_1.Args[0] + v.reset(OpARMCMPshiftRA) + v.AuxInt = c v.AddArg(x) + v.AddArg(y) return true } - return false -} -func rewriteValueARM_OpARMCMPshiftRAreg_0(v *Value) bool { - b := v.Block - _ = b - // match: (CMPshiftRAreg (MOVWconst [c]) x y) + // match: (CMP (SRAconst [c] y) x) // cond: - // result: (InvertFlags (CMPconst [c] (SRA <x.Type> x y))) + // result: (InvertFlags (CMPshiftRA x y [c])) for { - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMSRAconst { break } c := v_0.AuxInt + y := v_0.Args[0] x := v.Args[1] - y := v.Args[2] v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c - v1 := b.NewValue0(v.Pos, OpARMSRA, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) return true } - // match: (CMPshiftRAreg x y (MOVWconst [c])) + // match: (CMP x (SLL y z)) // cond: - // result: (CMPshiftRA x y [c]) + // result: (CMPshiftLLreg x y z) for { - _ = v.Args[2] + _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARMMOVWconst { + v_1 := v.Args[1] + if v_1.Op != OpARMSLL { break } - c := v_2.AuxInt - v.reset(OpARMCMPshiftRA) - v.AuxInt = c + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMCMPshiftLLreg) v.AddArg(x) v.AddArg(y) + v.AddArg(z) return true } - return false -} -func rewriteValueARM_OpARMCMPshiftRL_0(v *Value) bool { - b := v.Block - _ = b - // match: (CMPshiftRL (MOVWconst [c]) x [d]) + // match: (CMP (SLL y z) x) // cond: - // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d]))) + // result: (InvertFlags (CMPshiftLLreg x y z)) for { - d := v.AuxInt _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMSLL { break } - c := v_0.AuxInt + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] x := v.Args[1] v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v0.AuxInt = c - v1 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) - v1.AuxInt = d - v1.AddArg(x) - v0.AddArg(v1) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) v.AddArg(v0) return true } - // match: (CMPshiftRL x (MOVWconst [c]) [d]) + return false +} +func rewriteValueARM_OpARMCMP_10(v *Value) bool { + b := v.Block + _ = b + // match: (CMP x (SRL y z)) // cond: - // result: (CMPconst x [int64(uint32(c)>>uint64(d))]) + // result: (CMPshiftRLreg x y z) for { - d := v.AuxInt _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + if v_1.Op != OpARMSRL { break } - c := v_1.AuxInt - v.reset(OpARMCMPconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMCMPshiftRLreg) v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } - return false -} -func rewriteValueARM_OpARMCMPshiftRLreg_0(v *Value) bool { - b := v.Block - _ = b - // match: (CMPshiftRLreg (MOVWconst [c]) x y) + // match: (CMP (SRL y z) x) // cond: - // result: (InvertFlags (CMPconst [c] (SRL <x.Type> x y))) + // result: (InvertFlags (CMPshiftRLreg x y z)) for { - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMSRL { break } - c := v_0.AuxInt + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] x := v.Args[1] - y := v.Args[2] v.reset(OpARMInvertFlags) - v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v0.AuxInt = c - v1 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) v.AddArg(v0) return true } - // match: (CMPshiftRLreg x y (MOVWconst [c])) + // match: (CMP x (SRA y z)) // cond: - // result: (CMPshiftRL x y [c]) + // result: (CMPshiftRAreg x y z) for { - _ = v.Args[2] + _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARMMOVWconst { + v_1 := v.Args[1] + if v_1.Op != OpARMSRA { break } - c := v_2.AuxInt - v.reset(OpARMCMPshiftRL) - v.AuxInt = c + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMCMPshiftRAreg) v.AddArg(x) v.AddArg(y) + v.AddArg(z) return true } - return false -} -func rewriteValueARM_OpARMEqual_0(v *Value) bool { - // match: (Equal (FlagEQ)) + // match: (CMP (SRA y z) x) // cond: - // result: (MOVWconst [1]) + // result: (InvertFlags (CMPshiftRAreg x y z)) for { + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMFlagEQ { + if v_0.Op != OpARMSRA { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) return true } - // match: (Equal (FlagLT_ULT)) + // match: (CMP x (RSBconst [0] y)) // cond: - // result: (MOVWconst [0]) + // result: (CMN x y) for { - v_0 := v.Args[0] - if v_0.Op != OpARMFlagLT_ULT { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMRSBconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true - } - // match: (Equal (FlagLT_UGT)) - // cond: - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMFlagLT_UGT { + if v_1.AuxInt != 0 { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + y := v_1.Args[0] + v.reset(OpARMCMN) + v.AddArg(x) + v.AddArg(y) return true } - // match: (Equal (FlagGT_ULT)) + return false +} +func rewriteValueARM_OpARMCMPD_0(v *Value) bool { + // match: (CMPD x (MOVDconst [0])) // cond: - // result: (MOVWconst [0]) + // result: (CMPD0 x) for { - v_0 := v.Args[0] - if v_0.Op != OpARMFlagGT_ULT { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVDconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true - } - // match: (Equal (FlagGT_UGT)) - // cond: - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMFlagGT_UGT { + if v_1.AuxInt != 0 { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + v.reset(OpARMCMPD0) + v.AddArg(x) return true } - // match: (Equal (InvertFlags x)) + return false +} +func rewriteValueARM_OpARMCMPF_0(v *Value) bool { + // match: (CMPF x (MOVFconst [0])) // cond: - // result: (Equal x) + // result: (CMPF0 x) for { - v_0 := v.Args[0] - if v_0.Op != OpARMInvertFlags { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVFconst { break } - x := v_0.Args[0] - v.reset(OpARMEqual) + if v_1.AuxInt != 0 { + break + } + v.reset(OpARMCMPF0) v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMGreaterEqual_0(v *Value) bool { - // match: (GreaterEqual (FlagEQ)) - // cond: - // result: (MOVWconst [1]) +func rewriteValueARM_OpARMCMPconst_0(v *Value) bool { + // match: (CMPconst (MOVWconst [x]) [y]) + // cond: int32(x)==int32(y) + // result: (FlagEQ) for { + y := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARMFlagEQ { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 - return true - } - // match: (GreaterEqual (FlagLT_ULT)) - // cond: - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMFlagLT_ULT { + x := v_0.AuxInt + if !(int32(x) == int32(y)) { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + v.reset(OpARMFlagEQ) return true } - // match: (GreaterEqual (FlagLT_UGT)) - // cond: - // result: (MOVWconst [0]) + // match: (CMPconst (MOVWconst [x]) [y]) + // cond: int32(x)<int32(y) && uint32(x)<uint32(y) + // result: (FlagLT_ULT) for { + y := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARMFlagLT_UGT { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + x := v_0.AuxInt + if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { + break + } + v.reset(OpARMFlagLT_ULT) return true } - // match: (GreaterEqual (FlagGT_ULT)) - // cond: - // result: (MOVWconst [1]) + // match: (CMPconst (MOVWconst [x]) [y]) + // cond: int32(x)<int32(y) && uint32(x)>uint32(y) + // result: (FlagLT_UGT) for { + y := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARMFlagGT_ULT { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 + x := v_0.AuxInt + if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { + break + } + v.reset(OpARMFlagLT_UGT) return true } - // match: (GreaterEqual (FlagGT_UGT)) - // cond: - // result: (MOVWconst [1]) + // match: (CMPconst (MOVWconst [x]) [y]) + // cond: int32(x)>int32(y) && uint32(x)<uint32(y) + // result: (FlagGT_ULT) for { + y := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARMFlagGT_UGT { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 + x := v_0.AuxInt + if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { + break + } + v.reset(OpARMFlagGT_ULT) return true } - // match: (GreaterEqual (InvertFlags x)) - // cond: - // result: (LessEqual x) + // match: (CMPconst (MOVWconst [x]) [y]) + // cond: int32(x)>int32(y) && uint32(x)>uint32(y) + // result: (FlagGT_UGT) for { + y := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARMInvertFlags { + if v_0.Op != OpARMMOVWconst { break } - x := v_0.Args[0] - v.reset(OpARMLessEqual) - v.AddArg(x) + x := v_0.AuxInt + if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { + break + } + v.reset(OpARMFlagGT_UGT) return true } - return false -} -func rewriteValueARM_OpARMGreaterEqualU_0(v *Value) bool { - // match: (GreaterEqualU (FlagEQ)) - // cond: - // result: (MOVWconst [1]) + // match: (CMPconst (MOVBUreg _) [c]) + // cond: 0xff < c + // result: (FlagLT_ULT) for { + c := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARMFlagEQ { + if v_0.Op != OpARMMOVBUreg { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 + if !(0xff < c) { + break + } + v.reset(OpARMFlagLT_ULT) return true } - // match: (GreaterEqualU (FlagLT_ULT)) - // cond: - // result: (MOVWconst [0]) + // match: (CMPconst (MOVHUreg _) [c]) + // cond: 0xffff < c + // result: (FlagLT_ULT) for { + c := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARMFlagLT_ULT { + if v_0.Op != OpARMMOVHUreg { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + if !(0xffff < c) { + break + } + v.reset(OpARMFlagLT_ULT) return true } - // match: (GreaterEqualU (FlagLT_UGT)) - // cond: - // result: (MOVWconst [1]) + // match: (CMPconst (ANDconst _ [m]) [n]) + // cond: 0 <= int32(m) && int32(m) < int32(n) + // result: (FlagLT_ULT) for { + n := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARMFlagLT_UGT { + if v_0.Op != OpARMANDconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 + m := v_0.AuxInt + if !(0 <= int32(m) && int32(m) < int32(n)) { + break + } + v.reset(OpARMFlagLT_ULT) return true } - // match: (GreaterEqualU (FlagGT_ULT)) - // cond: - // result: (MOVWconst [0]) + // match: (CMPconst (SRLconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) + // result: (FlagLT_ULT) for { + n := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARMFlagGT_ULT { + if v_0.Op != OpARMSRLconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + c := v_0.AuxInt + if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) { + break + } + v.reset(OpARMFlagLT_ULT) return true } - // match: (GreaterEqualU (FlagGT_UGT)) + return false +} +func rewriteValueARM_OpARMCMPshiftLL_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMPshiftLL (MOVWconst [c]) x [d]) // cond: - // result: (MOVWconst [1]) + // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d]))) for { + d := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMFlagGT_UGT { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v1 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v1.AuxInt = d + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (GreaterEqualU (InvertFlags x)) + // match: (CMPshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (LessEqualU x) + // result: (CMPconst x [int64(uint32(c)<<uint64(d))]) for { - v_0 := v.Args[0] - if v_0.Op != OpARMInvertFlags { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - x := v_0.Args[0] - v.reset(OpARMLessEqualU) + c := v_1.AuxInt + v.reset(OpARMCMPconst) + v.AuxInt = int64(uint32(c) << uint64(d)) v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMGreaterThan_0(v *Value) bool { - // match: (GreaterThan (FlagEQ)) +func rewriteValueARM_OpARMCMPshiftLLreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMPshiftLLreg (MOVWconst [c]) x y) // cond: - // result: (MOVWconst [0]) + // result: (InvertFlags (CMPconst [c] (SLL <x.Type> x y))) for { + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMFlagEQ { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true - } - // match: (GreaterThan (FlagLT_ULT)) - // cond: - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMFlagLT_ULT { - break - } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true - } - // match: (GreaterThan (FlagLT_UGT)) - // cond: - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMFlagLT_UGT { - break - } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v1 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (GreaterThan (FlagGT_ULT)) + // match: (CMPshiftLLreg x y (MOVWconst [c])) // cond: - // result: (MOVWconst [1]) + // result: (CMPshiftLL x y [c]) for { - v_0 := v.Args[0] - if v_0.Op != OpARMFlagGT_ULT { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 + c := v_2.AuxInt + v.reset(OpARMCMPshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } - // match: (GreaterThan (FlagGT_UGT)) + return false +} +func rewriteValueARM_OpARMCMPshiftRA_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMPshiftRA (MOVWconst [c]) x [d]) // cond: - // result: (MOVWconst [1]) + // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d]))) for { + d := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMFlagGT_UGT { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v1 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v1.AuxInt = d + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (GreaterThan (InvertFlags x)) + // match: (CMPshiftRA x (MOVWconst [c]) [d]) // cond: - // result: (LessThan x) + // result: (CMPconst x [int64(int32(c)>>uint64(d))]) for { - v_0 := v.Args[0] - if v_0.Op != OpARMInvertFlags { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - x := v_0.Args[0] - v.reset(OpARMLessThan) + c := v_1.AuxInt + v.reset(OpARMCMPconst) + v.AuxInt = int64(int32(c) >> uint64(d)) v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMGreaterThanU_0(v *Value) bool { - // match: (GreaterThanU (FlagEQ)) +func rewriteValueARM_OpARMCMPshiftRAreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMPshiftRAreg (MOVWconst [c]) x y) // cond: - // result: (MOVWconst [0]) + // result: (InvertFlags (CMPconst [c] (SRA <x.Type> x y))) for { + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMFlagEQ { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v1 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (GreaterThanU (FlagLT_ULT)) + // match: (CMPshiftRAreg x y (MOVWconst [c])) // cond: - // result: (MOVWconst [0]) + // result: (CMPshiftRA x y [c]) for { - v_0 := v.Args[0] - if v_0.Op != OpARMFlagLT_ULT { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + c := v_2.AuxInt + v.reset(OpARMCMPshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } - // match: (GreaterThanU (FlagLT_UGT)) + return false +} +func rewriteValueARM_OpARMCMPshiftRL_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMPshiftRL (MOVWconst [c]) x [d]) // cond: - // result: (MOVWconst [1]) + // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d]))) for { + d := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMFlagLT_UGT { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v1 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v1.AuxInt = d + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (GreaterThanU (FlagGT_ULT)) + // match: (CMPshiftRL x (MOVWconst [c]) [d]) // cond: - // result: (MOVWconst [0]) + // result: (CMPconst x [int64(uint32(c)>>uint64(d))]) for { - v_0 := v.Args[0] - if v_0.Op != OpARMFlagGT_ULT { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + c := v_1.AuxInt + v.reset(OpARMCMPconst) + v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AddArg(x) return true } - // match: (GreaterThanU (FlagGT_UGT)) + return false +} +func rewriteValueARM_OpARMCMPshiftRLreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMPshiftRLreg (MOVWconst [c]) x y) // cond: - // result: (MOVWconst [1]) + // result: (InvertFlags (CMPconst [c] (SRL <x.Type> x y))) for { + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMFlagGT_UGT { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 1 + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v1 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (GreaterThanU (InvertFlags x)) + // match: (CMPshiftRLreg x y (MOVWconst [c])) // cond: - // result: (LessThanU x) + // result: (CMPshiftRL x y [c]) for { - v_0 := v.Args[0] - if v_0.Op != OpARMInvertFlags { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { break } - x := v_0.Args[0] - v.reset(OpARMLessThanU) + c := v_2.AuxInt + v.reset(OpARMCMPshiftRL) + v.AuxInt = c v.AddArg(x) + v.AddArg(y) return true } return false } -func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { - // match: (LessEqual (FlagEQ)) +func rewriteValueARM_OpARMEqual_0(v *Value) bool { + // match: (Equal (FlagEQ)) // cond: // result: (MOVWconst [1]) for { @@ -5719,31 +6042,31 @@ func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { v.AuxInt = 1 return true } - // match: (LessEqual (FlagLT_ULT)) + // match: (Equal (FlagLT_ULT)) // cond: - // result: (MOVWconst [1]) + // result: (MOVWconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } v.reset(OpARMMOVWconst) - v.AuxInt = 1 + v.AuxInt = 0 return true } - // match: (LessEqual (FlagLT_UGT)) + // match: (Equal (FlagLT_UGT)) // cond: - // result: (MOVWconst [1]) + // result: (MOVWconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_UGT { break } v.reset(OpARMMOVWconst) - v.AuxInt = 1 + v.AuxInt = 0 return true } - // match: (LessEqual (FlagGT_ULT)) + // match: (Equal (FlagGT_ULT)) // cond: // result: (MOVWconst [0]) for { @@ -5755,7 +6078,7 @@ func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { v.AuxInt = 0 return true } - // match: (LessEqual (FlagGT_UGT)) + // match: (Equal (FlagGT_UGT)) // cond: // result: (MOVWconst [0]) for { @@ -5767,23 +6090,23 @@ func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { v.AuxInt = 0 return true } - // match: (LessEqual (InvertFlags x)) + // match: (Equal (InvertFlags x)) // cond: - // result: (GreaterEqual x) + // result: (Equal x) for { v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } x := v_0.Args[0] - v.reset(OpARMGreaterEqual) + v.reset(OpARMEqual) v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { - // match: (LessEqualU (FlagEQ)) +func rewriteValueARM_OpARMGreaterEqual_0(v *Value) bool { + // match: (GreaterEqual (FlagEQ)) // cond: // result: (MOVWconst [1]) for { @@ -5795,19 +6118,19 @@ func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { v.AuxInt = 1 return true } - // match: (LessEqualU (FlagLT_ULT)) + // match: (GreaterEqual (FlagLT_ULT)) // cond: - // result: (MOVWconst [1]) + // result: (MOVWconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } v.reset(OpARMMOVWconst) - v.AuxInt = 1 + v.AuxInt = 0 return true } - // match: (LessEqualU (FlagLT_UGT)) + // match: (GreaterEqual (FlagLT_UGT)) // cond: // result: (MOVWconst [0]) for { @@ -5819,7 +6142,7 @@ func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { v.AuxInt = 0 return true } - // match: (LessEqualU (FlagGT_ULT)) + // match: (GreaterEqual (FlagGT_ULT)) // cond: // result: (MOVWconst [1]) for { @@ -5831,59 +6154,59 @@ func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { v.AuxInt = 1 return true } - // match: (LessEqualU (FlagGT_UGT)) + // match: (GreaterEqual (FlagGT_UGT)) // cond: - // result: (MOVWconst [0]) + // result: (MOVWconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } v.reset(OpARMMOVWconst) - v.AuxInt = 0 + v.AuxInt = 1 return true } - // match: (LessEqualU (InvertFlags x)) + // match: (GreaterEqual (InvertFlags x)) // cond: - // result: (GreaterEqualU x) + // result: (LessEqual x) for { v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } x := v_0.Args[0] - v.reset(OpARMGreaterEqualU) + v.reset(OpARMLessEqual) v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMLessThan_0(v *Value) bool { - // match: (LessThan (FlagEQ)) +func rewriteValueARM_OpARMGreaterEqualU_0(v *Value) bool { + // match: (GreaterEqualU (FlagEQ)) // cond: - // result: (MOVWconst [0]) + // result: (MOVWconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpARMFlagEQ { break } v.reset(OpARMMOVWconst) - v.AuxInt = 0 + v.AuxInt = 1 return true } - // match: (LessThan (FlagLT_ULT)) + // match: (GreaterEqualU (FlagLT_ULT)) // cond: - // result: (MOVWconst [1]) + // result: (MOVWconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } v.reset(OpARMMOVWconst) - v.AuxInt = 1 + v.AuxInt = 0 return true } - // match: (LessThan (FlagLT_UGT)) + // match: (GreaterEqualU (FlagLT_UGT)) // cond: // result: (MOVWconst [1]) for { @@ -5895,7 +6218,7 @@ func rewriteValueARM_OpARMLessThan_0(v *Value) bool { v.AuxInt = 1 return true } - // match: (LessThan (FlagGT_ULT)) + // match: (GreaterEqualU (FlagGT_ULT)) // cond: // result: (MOVWconst [0]) for { @@ -5907,35 +6230,35 @@ func rewriteValueARM_OpARMLessThan_0(v *Value) bool { v.AuxInt = 0 return true } - // match: (LessThan (FlagGT_UGT)) + // match: (GreaterEqualU (FlagGT_UGT)) // cond: - // result: (MOVWconst [0]) + // result: (MOVWconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } v.reset(OpARMMOVWconst) - v.AuxInt = 0 + v.AuxInt = 1 return true } - // match: (LessThan (InvertFlags x)) + // match: (GreaterEqualU (InvertFlags x)) // cond: - // result: (GreaterThan x) + // result: (LessEqualU x) for { v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } x := v_0.Args[0] - v.reset(OpARMGreaterThan) + v.reset(OpARMLessEqualU) v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { - // match: (LessThanU (FlagEQ)) +func rewriteValueARM_OpARMGreaterThan_0(v *Value) bool { + // match: (GreaterThan (FlagEQ)) // cond: // result: (MOVWconst [0]) for { @@ -5947,19 +6270,19 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { v.AuxInt = 0 return true } - // match: (LessThanU (FlagLT_ULT)) + // match: (GreaterThan (FlagLT_ULT)) // cond: - // result: (MOVWconst [1]) + // result: (MOVWconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpARMFlagLT_ULT { break } v.reset(OpARMMOVWconst) - v.AuxInt = 1 + v.AuxInt = 0 return true } - // match: (LessThanU (FlagLT_UGT)) + // match: (GreaterThan (FlagLT_UGT)) // cond: // result: (MOVWconst [0]) for { @@ -5971,7 +6294,7 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { v.AuxInt = 0 return true } - // match: (LessThanU (FlagGT_ULT)) + // match: (GreaterThan (FlagGT_ULT)) // cond: // result: (MOVWconst [1]) for { @@ -5983,580 +6306,467 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { v.AuxInt = 1 return true } - // match: (LessThanU (FlagGT_UGT)) + // match: (GreaterThan (FlagGT_UGT)) // cond: - // result: (MOVWconst [0]) + // result: (MOVWconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpARMFlagGT_UGT { break } v.reset(OpARMMOVWconst) - v.AuxInt = 0 + v.AuxInt = 1 return true } - // match: (LessThanU (InvertFlags x)) + // match: (GreaterThan (InvertFlags x)) // cond: - // result: (GreaterThanU x) + // result: (LessThan x) for { v_0 := v.Args[0] if v_0.Op != OpARMInvertFlags { break } x := v_0.Args[0] - v.reset(OpARMGreaterThanU) + v.reset(OpARMLessThan) v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) +func rewriteValueARM_OpARMGreaterThanU_0(v *Value) bool { + // match: (GreaterThanU (FlagEQ)) // cond: - // result: (MOVBUload [off1+off2] {sym} ptr mem) + // result: (MOVWconst [0]) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + if v_0.Op != OpARMFlagEQ { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVBUload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem) + // match: (GreaterThanU (FlagLT_ULT)) // cond: - // result: (MOVBUload [off1-off2] {sym} ptr mem) + // result: (MOVWconst [0]) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMSUBconst { + if v_0.Op != OpARMFlagLT_ULT { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVBUload) - v.AuxInt = off1 - off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (GreaterThanU (FlagLT_UGT)) + // cond: + // result: (MOVWconst [1]) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if v_0.Op != OpARMFlagLT_UGT { break } - v.reset(OpARMMOVBUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVBUreg x) + // match: (GreaterThanU (FlagGT_ULT)) + // cond: + // result: (MOVWconst [0]) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVBstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { break } - v.reset(OpARMMOVBUreg) - v.AddArg(x) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVBUload [0] {sym} (ADD ptr idx) mem) - // cond: sym == nil && !config.nacl - // result: (MOVBUloadidx ptr idx mem) + // match: (GreaterThanU (FlagGT_UGT)) + // cond: + // result: (MOVWconst [1]) for { - if v.AuxInt != 0 { - break - } - sym := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMADD { + if v_0.Op != OpARMFlagGT_UGT { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(sym == nil && !config.nacl) { + v.reset(OpARMMOVWconst) + v.AuxInt = 1 + return true + } + // match: (GreaterThanU (InvertFlags x)) + // cond: + // result: (LessThanU x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { break } - v.reset(OpARMMOVBUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + x := v_0.Args[0] + v.reset(OpARMLessThanU) + v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool { - // match: (MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) - // cond: isSamePtr(ptr, ptr2) - // result: (MOVBUreg x) +func rewriteValueARM_OpARMLessEqual_0(v *Value) bool { + // match: (LessEqual (FlagEQ)) + // cond: + // result: (MOVWconst [1]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARMMOVBstoreidx { - break - } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } - x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { break } - v.reset(OpARMMOVBUreg) - v.AddArg(x) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (MOVBUloadidx ptr (MOVWconst [c]) mem) + // match: (LessEqual (FlagLT_ULT)) // cond: - // result: (MOVBUload [c] ptr mem) + // result: (MOVWconst [1]) for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { break } - c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARMMOVBUload) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (MOVBUloadidx (MOVWconst [c]) ptr mem) + // match: (LessEqual (FlagLT_UGT)) // cond: - // result: (MOVBUload [c] ptr mem) + // result: (MOVWconst [1]) for { - _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMFlagLT_UGT { break } - c := v_0.AuxInt - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVBUload) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - return false -} -func rewriteValueARM_OpARMMOVBUreg_0(v *Value) bool { - // match: (MOVBUreg x:(MOVBUload _ _)) + // match: (LessEqual (FlagGT_ULT)) // cond: - // result: (MOVWreg x) + // result: (MOVWconst [0]) for { - x := v.Args[0] - if x.Op != OpARMMOVBUload { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { break } - _ = x.Args[1] - v.reset(OpARMMOVWreg) - v.AddArg(x) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVBUreg (ANDconst [c] x)) + // match: (LessEqual (FlagGT_UGT)) // cond: - // result: (ANDconst [c&0xff] x) + // result: (MOVWconst [0]) for { v_0 := v.Args[0] - if v_0.Op != OpARMANDconst { + if v_0.Op != OpARMFlagGT_UGT { break } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARMANDconst) - v.AuxInt = c & 0xff - v.AddArg(x) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVBUreg x:(MOVBUreg _)) + // match: (LessEqual (InvertFlags x)) // cond: - // result: (MOVWreg x) + // result: (GreaterEqual x) for { - x := v.Args[0] - if x.Op != OpARMMOVBUreg { + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { break } - v.reset(OpARMMOVWreg) + x := v_0.Args[0] + v.reset(OpARMGreaterEqual) v.AddArg(x) return true } - // match: (MOVBUreg (MOVWconst [c])) + return false +} +func rewriteValueARM_OpARMLessEqualU_0(v *Value) bool { + // match: (LessEqualU (FlagEQ)) // cond: - // result: (MOVWconst [int64(uint8(c))]) + // result: (MOVWconst [1]) for { v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMFlagEQ { break } - c := v_0.AuxInt v.reset(OpARMMOVWconst) - v.AuxInt = int64(uint8(c)) + v.AuxInt = 1 return true } - return false -} -func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (LessEqualU (FlagLT_ULT)) // cond: - // result: (MOVBload [off1+off2] {sym} ptr mem) + // result: (MOVWconst [1]) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + if v_0.Op != OpARMFlagLT_ULT { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVBload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (MOVBload [off1] {sym} (SUBconst [off2] ptr) mem) + // match: (LessEqualU (FlagLT_UGT)) // cond: - // result: (MOVBload [off1-off2] {sym} ptr mem) + // result: (MOVWconst [0]) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMSUBconst { + if v_0.Op != OpARMFlagLT_UGT { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVBload) - v.AuxInt = off1 - off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (LessEqualU (FlagGT_ULT)) + // cond: + // result: (MOVWconst [1]) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if v_0.Op != OpARMFlagGT_ULT { break } - v.reset(OpARMMOVBload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVBreg x) + // match: (LessEqualU (FlagGT_UGT)) + // cond: + // result: (MOVWconst [0]) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVBstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { break } - v.reset(OpARMMOVBreg) - v.AddArg(x) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVBload [0] {sym} (ADD ptr idx) mem) - // cond: sym == nil && !config.nacl - // result: (MOVBloadidx ptr idx mem) + // match: (LessEqualU (InvertFlags x)) + // cond: + // result: (GreaterEqualU x) for { - if v.AuxInt != 0 { - break - } - sym := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMADD { - break - } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(sym == nil && !config.nacl) { + if v_0.Op != OpARMInvertFlags { break } - v.reset(OpARMMOVBloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + x := v_0.Args[0] + v.reset(OpARMGreaterEqualU) + v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMMOVBloadidx_0(v *Value) bool { - // match: (MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) - // cond: isSamePtr(ptr, ptr2) - // result: (MOVBreg x) +func rewriteValueARM_OpARMLessThan_0(v *Value) bool { + // match: (LessThan (FlagEQ)) + // cond: + // result: (MOVWconst [0]) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARMMOVBstoreidx { - break - } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } - x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { break } - v.reset(OpARMMOVBreg) - v.AddArg(x) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVBloadidx ptr (MOVWconst [c]) mem) + // match: (LessThan (FlagLT_ULT)) // cond: - // result: (MOVBload [c] ptr mem) + // result: (MOVWconst [1]) for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { break } - c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARMMOVBload) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (MOVBloadidx (MOVWconst [c]) ptr mem) + // match: (LessThan (FlagLT_UGT)) // cond: - // result: (MOVBload [c] ptr mem) + // result: (MOVWconst [1]) for { - _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMFlagLT_UGT { break } - c := v_0.AuxInt - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVBload) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - return false -} -func rewriteValueARM_OpARMMOVBreg_0(v *Value) bool { - // match: (MOVBreg x:(MOVBload _ _)) + // match: (LessThan (FlagGT_ULT)) // cond: - // result: (MOVWreg x) + // result: (MOVWconst [0]) for { - x := v.Args[0] - if x.Op != OpARMMOVBload { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { break } - _ = x.Args[1] - v.reset(OpARMMOVWreg) - v.AddArg(x) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVBreg (ANDconst [c] x)) - // cond: c & 0x80 == 0 - // result: (ANDconst [c&0x7f] x) + // match: (LessThan (FlagGT_UGT)) + // cond: + // result: (MOVWconst [0]) for { v_0 := v.Args[0] - if v_0.Op != OpARMANDconst { + if v_0.Op != OpARMFlagGT_UGT { break } - c := v_0.AuxInt - x := v_0.Args[0] - if !(c&0x80 == 0) { + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (LessThan (InvertFlags x)) + // cond: + // result: (GreaterThan x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { break } - v.reset(OpARMANDconst) - v.AuxInt = c & 0x7f + x := v_0.Args[0] + v.reset(OpARMGreaterThan) v.AddArg(x) return true } - // match: (MOVBreg x:(MOVBreg _)) + return false +} +func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { + // match: (LessThanU (FlagEQ)) // cond: - // result: (MOVWreg x) + // result: (MOVWconst [0]) for { - x := v.Args[0] - if x.Op != OpARMMOVBreg { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { break } - v.reset(OpARMMOVWreg) - v.AddArg(x) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVBreg (MOVWconst [c])) + // match: (LessThanU (FlagLT_ULT)) // cond: - // result: (MOVWconst [int64(int8(c))]) + // result: (MOVWconst [1]) for { v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMFlagLT_ULT { break } - c := v_0.AuxInt v.reset(OpARMMOVWconst) - v.AuxInt = int64(int8(c)) + v.AuxInt = 1 + return true + } + // match: (LessThanU (FlagLT_UGT)) + // cond: + // result: (MOVWconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (LessThanU (FlagGT_ULT)) + // cond: + // result: (MOVWconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 + return true + } + // match: (LessThanU (FlagGT_UGT)) + // cond: + // result: (MOVWconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (LessThanU (InvertFlags x)) + // cond: + // result: (GreaterThanU x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMGreaterThanU) + v.AddArg(x) return true } return false } -func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { +func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: - // result: (MOVBstore [off1+off2] {sym} ptr val mem) + // result: (MOVBUload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVBstore) + mem := v.Args[1] + v.reset(OpARMMOVBUload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem) + // match: (MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem) // cond: - // result: (MOVBstore [off1-off2] {sym} ptr val mem) + // result: (MOVBUload [off1-off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVBstore) + mem := v.Args[1] + v.reset(OpARMMOVBUload) v.AuxInt = off1 - off2 v.Aux = sym v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break @@ -6564,116 +6774,50 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(canMergeSym(sym1, sym2)) { break } - v.reset(OpARMMOVBstore) + v.reset(OpARMMOVBUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) - // cond: - // result: (MOVBstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVBreg { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARMMOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) - // cond: - // result: (MOVBstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVBUreg { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARMMOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) - // cond: - // result: (MOVBstore [off] {sym} ptr x mem) + // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBUreg x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVHreg { + if v_1.Op != OpARMMOVBstore { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARMMOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) - // cond: - // result: (MOVBstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVHUreg { + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARMMOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) + v.reset(OpARMMOVBUreg) v.AddArg(x) - v.AddArg(mem) return true } - // match: (MOVBstore [0] {sym} (ADD ptr idx) val mem) + // match: (MOVBUload [0] {sym} (ADD ptr idx) mem) // cond: sym == nil && !config.nacl - // result: (MOVBstoreidx ptr idx val mem) + // result: (MOVBUloadidx ptr idx mem) for { if v.AuxInt != 0 { break } sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARMADD { break @@ -6681,67 +6825,145 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(sym == nil && !config.nacl) { break } - v.reset(OpARMMOVBstoreidx) + v.reset(OpARMMOVBUloadidx) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValueARM_OpARMMOVBstoreidx_0(v *Value) bool { - // match: (MOVBstoreidx ptr (MOVWconst [c]) val mem) - // cond: - // result: (MOVBstore [c] ptr val mem) +func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool { + // match: (MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVBUreg x) for { - _ = v.Args[3] + _ = v.Args[2] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVBstoreidx { + break + } + _ = v_2.Args[3] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] { + break + } + x := v_2.Args[2] + if !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVBUreg) + v.AddArg(x) + return true + } + // match: (MOVBUloadidx ptr (MOVWconst [c]) mem) + // cond: + // result: (MOVBUload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARMMOVBstore) + mem := v.Args[2] + v.reset(OpARMMOVBUload) v.AuxInt = c v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVBstoreidx (MOVWconst [c]) ptr val mem) + // match: (MOVBUloadidx (MOVWconst [c]) ptr mem) // cond: - // result: (MOVBstore [c] ptr val mem) + // result: (MOVBUload [c] ptr mem) for { - _ = v.Args[3] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARMMOVBstore) + mem := v.Args[2] + v.reset(OpARMMOVBUload) v.AuxInt = c v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { - // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) +func rewriteValueARM_OpARMMOVBUreg_0(v *Value) bool { + // match: (MOVBUreg x:(MOVBUload _ _)) // cond: - // result: (MOVDload [off1+off2] {sym} ptr mem) + // result: (MOVWreg x) + for { + x := v.Args[0] + if x.Op != OpARMMOVBUload { + break + } + _ = x.Args[1] + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg (ANDconst [c] x)) + // cond: + // result: (ANDconst [c&0xff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMANDconst) + v.AuxInt = c & 0xff + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUreg _)) + // cond: + // result: (MOVWreg x) + for { + x := v.Args[0] + if x.Op != OpARMMOVBUreg { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg (MOVWconst [c])) + // cond: + // result: (MOVWconst [int64(uint8(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(uint8(c)) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: + // result: (MOVBload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux @@ -6753,16 +6975,16 @@ func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { off2 := v_0.AuxInt ptr := v_0.Args[0] mem := v.Args[1] - v.reset(OpARMMOVDload) + v.reset(OpARMMOVBload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVDload [off1] {sym} (SUBconst [off2] ptr) mem) + // match: (MOVBload [off1] {sym} (SUBconst [off2] ptr) mem) // cond: - // result: (MOVDload [off1-off2] {sym} ptr mem) + // result: (MOVBload [off1-off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux @@ -6774,16 +6996,16 @@ func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { off2 := v_0.AuxInt ptr := v_0.Args[0] mem := v.Args[1] - v.reset(OpARMMOVDload) + v.reset(OpARMMOVBload) v.AuxInt = off1 - off2 v.Aux = sym v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -6799,23 +7021,23 @@ func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { if !(canMergeSym(sym1, sym2)) { break } - v.reset(OpARMMOVDload) + v.reset(OpARMMOVBload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) + // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: x + // result: (MOVBreg x) for { off := v.AuxInt sym := v.Aux _ = v.Args[1] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVDstore { + if v_1.Op != OpARMMOVBstore { break } off2 := v_1.AuxInt @@ -6826,188 +7048,168 @@ func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpCopy) - v.Type = x.Type + v.reset(OpARMMOVBreg) v.AddArg(x) return true } - return false -} -func rewriteValueARM_OpARMMOVDstore_0(v *Value) bool { - // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: - // result: (MOVDstore [off1+off2] {sym} ptr val mem) + // match: (MOVBload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil && !config.nacl + // result: (MOVBloadidx ptr idx mem) for { - off1 := v.AuxInt + if v.AuxInt != 0 { + break + } sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + if v_0.Op != OpARMADD { break } - off2 := v_0.AuxInt + _ = v_0.Args[1] ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = sym + idx := v_0.Args[1] + mem := v.Args[1] + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVBloadidx) v.AddArg(ptr) - v.AddArg(val) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) + return false +} +func rewriteValueARM_OpARMMOVBloadidx_0(v *Value) bool { + // match: (MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVBreg x) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVBstoreidx { + break + } + _ = v_2.Args[3] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] { + break + } + x := v_2.Args[2] + if !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBloadidx ptr (MOVWconst [c]) mem) // cond: - // result: (MOVDstore [off1-off2] {sym} ptr val mem) + // result: (MOVBload [c] ptr mem) for { - off1 := v.AuxInt - sym := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARMSUBconst { + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] + c := v_1.AuxInt mem := v.Args[2] - v.reset(OpARMMOVDstore) - v.AuxInt = off1 - off2 - v.Aux = sym + v.reset(OpARMMOVBload) + v.AuxInt = c v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // match: (MOVBloadidx (MOVWconst [c]) ptr mem) + // cond: + // result: (MOVBload [c] ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { + if v_0.Op != OpARMMOVWconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - val := v.Args[1] + c := v_0.AuxInt + ptr := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpARMMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.reset(OpARMMOVBload) + v.AuxInt = c v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValueARM_OpARMMOVFload_0(v *Value) bool { - // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) +func rewriteValueARM_OpARMMOVBreg_0(v *Value) bool { + // match: (MOVBreg x:(MOVBload _ _)) // cond: - // result: (MOVFload [off1+off2] {sym} ptr mem) + // result: (MOVWreg x) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + x := v.Args[0] + if x.Op != OpARMMOVBload { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVFload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + _ = x.Args[1] + v.reset(OpARMMOVWreg) + v.AddArg(x) return true } - // match: (MOVFload [off1] {sym} (SUBconst [off2] ptr) mem) - // cond: - // result: (MOVFload [off1-off2] {sym} ptr mem) + // match: (MOVBreg (ANDconst [c] x)) + // cond: c & 0x80 == 0 + // result: (ANDconst [c&0x7f] x) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMSUBconst { + if v_0.Op != OpARMANDconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVFload) - v.AuxInt = off1 - off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x80 == 0) { + break + } + v.reset(OpARMANDconst) + v.AuxInt = c & 0x7f + v.AddArg(x) return true } - // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (MOVBreg x:(MOVBreg _)) + // cond: + // result: (MOVWreg x) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + x := v.Args[0] + if x.Op != OpARMMOVBreg { break } - v.reset(OpARMMOVFload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWreg) + v.AddArg(x) return true } - // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: x + // match: (MOVBreg (MOVWconst [c])) + // cond: + // result: (MOVWconst [int64(int8(c))]) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVFstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + c := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(int8(c)) return true } return false } -func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { - // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) +func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: - // result: (MOVFstore [off1+off2] {sym} ptr val mem) + // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux @@ -7020,7 +7222,7 @@ func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { ptr := v_0.Args[0] val := v.Args[1] mem := v.Args[2] - v.reset(OpARMMOVFstore) + v.reset(OpARMMOVBstore) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) @@ -7028,9 +7230,9 @@ func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem) + // match: (MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem) // cond: - // result: (MOVFstore [off1-off2] {sym} ptr val mem) + // result: (MOVBstore [off1-off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux @@ -7043,7 +7245,7 @@ func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { ptr := v_0.Args[0] val := v.Args[1] mem := v.Args[2] - v.reset(OpARMMOVFstore) + v.reset(OpARMMOVBstore) v.AuxInt = off1 - off2 v.Aux = sym v.AddArg(ptr) @@ -7051,9 +7253,9 @@ func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -7070,7 +7272,7 @@ func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { if !(canMergeSym(sym1, sym2)) { break } - v.reset(OpARMMOVFstore) + v.reset(OpARMMOVBstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) @@ -7078,113 +7280,103 @@ func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) // cond: - // result: (MOVHUload [off1+off2] {sym} ptr mem) + // result: (MOVBstore [off] {sym} ptr x mem) for { - off1 := v.AuxInt + off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVBreg { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVHUload) - v.AuxInt = off1 + off2 + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem) + // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) // cond: - // result: (MOVHUload [off1-off2] {sym} ptr mem) + // result: (MOVBstore [off] {sym} ptr x mem) for { - off1 := v.AuxInt + off := v.AuxInt sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSUBconst { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVBUreg { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVHUload) - v.AuxInt = off1 - off2 + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVHreg { break } - v.reset(OpARMMOVHUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = off + v.Aux = sym v.AddArg(ptr) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVHUreg x) + // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] + _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVHstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + if v_1.Op != OpARMMOVHUreg { break } - v.reset(OpARMMOVHUreg) + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVHUload [0] {sym} (ADD ptr idx) mem) + // match: (MOVBstore [0] {sym} (ADD ptr idx) val mem) // cond: sym == nil && !config.nacl - // result: (MOVHUloadidx ptr idx mem) + // result: (MOVBstoreidx ptr idx val mem) for { if v.AuxInt != 0 { break } sym := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARMADD { break @@ -7192,170 +7384,238 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] - mem := v.Args[1] + val := v.Args[1] + mem := v.Args[2] if !(sym == nil && !config.nacl) { break } - v.reset(OpARMMOVHUloadidx) + v.reset(OpARMMOVBstoreidx) v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool { - // match: (MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) - // cond: isSamePtr(ptr, ptr2) - // result: (MOVHUreg x) +func rewriteValueARM_OpARMMOVBstoreidx_0(v *Value) bool { + // match: (MOVBstoreidx ptr (MOVWconst [c]) val mem) + // cond: + // result: (MOVBstore [c] ptr val mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARMMOVHstoreidx { - break - } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } - x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { - break - } - v.reset(OpARMMOVHUreg) - v.AddArg(x) - return true - } - // match: (MOVHUloadidx ptr (MOVWconst [c]) mem) - // cond: - // result: (MOVHUload [c] ptr mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARMMOVHUload) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARMMOVBstore) v.AuxInt = c v.AddArg(ptr) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHUloadidx (MOVWconst [c]) ptr mem) + // match: (MOVBstoreidx (MOVWconst [c]) ptr val mem) // cond: - // result: (MOVHUload [c] ptr mem) + // result: (MOVBstore [c] ptr val mem) for { - _ = v.Args[2] + _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVHUload) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARMMOVBstore) v.AuxInt = c v.AddArg(ptr) + v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { - // match: (MOVHUreg x:(MOVBUload _ _)) +func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { + // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: - // result: (MOVWreg x) + // result: (MOVDload [off1+off2] {sym} ptr mem) for { - x := v.Args[0] - if x.Op != OpARMMOVBUload { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { break } - _ = x.Args[1] - v.reset(OpARMMOVWreg) - v.AddArg(x) + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MOVHUreg x:(MOVHUload _ _)) + // match: (MOVDload [off1] {sym} (SUBconst [off2] ptr) mem) // cond: - // result: (MOVWreg x) + // result: (MOVDload [off1-off2] {sym} ptr mem) for { - x := v.Args[0] - if x.Op != OpARMMOVHUload { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { break } - _ = x.Args[1] - v.reset(OpARMMOVWreg) - v.AddArg(x) + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVDload) + v.AuxInt = off1 - off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MOVHUreg (ANDconst [c] x)) - // cond: - // result: (ANDconst [c&0xffff] x) + // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMANDconst { + if v_0.Op != OpARMMOVWaddr { break } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARMANDconst) - v.AuxInt = c & 0xffff - v.AddArg(x) + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVDload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MOVHUreg x:(MOVBUreg _)) - // cond: - // result: (MOVWreg x) + // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x for { - x := v.Args[0] - if x.Op != OpARMMOVBUreg { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVDstore { break } - v.reset(OpARMMOVWreg) + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } - // match: (MOVHUreg x:(MOVHUreg _)) + return false +} +func rewriteValueARM_OpARMMOVDstore_0(v *Value) bool { + // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: - // result: (MOVWreg x) + // result: (MOVDstore [off1+off2] {sym} ptr val mem) for { - x := v.Args[0] - if x.Op != OpARMMOVHUreg { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { break } - v.reset(OpARMMOVWreg) - v.AddArg(x) + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVDstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (MOVHUreg (MOVWconst [c])) + // match: (MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) // cond: - // result: (MOVWconst [int64(uint16(c))]) + // result: (MOVDstore [off1-off2] {sym} ptr val mem) for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMSUBconst { break } - c := v_0.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = int64(uint16(c)) + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVDstore) + v.AuxInt = off1 - off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVDstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } return false } -func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) +func rewriteValueARM_OpARMMOVFload_0(v *Value) bool { + // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: - // result: (MOVHload [off1+off2] {sym} ptr mem) + // result: (MOVFload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux @@ -7367,16 +7627,16 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { off2 := v_0.AuxInt ptr := v_0.Args[0] mem := v.Args[1] - v.reset(OpARMMOVHload) + v.reset(OpARMMOVFload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHload [off1] {sym} (SUBconst [off2] ptr) mem) + // match: (MOVFload [off1] {sym} (SUBconst [off2] ptr) mem) // cond: - // result: (MOVHload [off1-off2] {sym} ptr mem) + // result: (MOVFload [off1-off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux @@ -7388,16 +7648,16 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { off2 := v_0.AuxInt ptr := v_0.Args[0] mem := v.Args[1] - v.reset(OpARMMOVHload) + v.reset(OpARMMOVFload) v.AuxInt = off1 - off2 v.Aux = sym v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -7413,23 +7673,23 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { if !(canMergeSym(sym1, sym2)) { break } - v.reset(OpARMMOVHload) + v.reset(OpARMMOVFload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVHreg x) + // result: x for { off := v.AuxInt sym := v.Aux _ = v.Args[1] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVHstore { + if v_1.Op != OpARMMOVFstore { break } off2 := v_1.AuxInt @@ -7440,21 +7700,196 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpARMMOVHreg) + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } - // match: (MOVHload [0] {sym} (ADD ptr idx) mem) - // cond: sym == nil && !config.nacl - // result: (MOVHloadidx ptr idx mem) + return false +} +func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { + // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: + // result: (MOVFstore [off1+off2] {sym} ptr val mem) for { - if v.AuxInt != 0 { - break - } + off1 := v.AuxInt sym := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMADD { + if v_0.Op != OpARMADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVFstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem) + // cond: + // result: (MOVFstore [off1-off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVFstore) + v.AuxInt = off1 - off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVFstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: + // result: (MOVHUload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVHUload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem) + // cond: + // result: (MOVHUload [off1-off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVHUload) + v.AuxInt = off1 - off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVHUload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVHUreg x) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVHstore { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVHUreg) + v.AddArg(x) + return true + } + // match: (MOVHUload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil && !config.nacl + // result: (MOVHUloadidx ptr idx mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADD { break } _ = v_0.Args[1] @@ -7464,7 +7899,7 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { if !(sym == nil && !config.nacl) { break } - v.reset(OpARMMOVHloadidx) + v.reset(OpARMMOVHUloadidx) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) @@ -7472,10 +7907,10 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { } return false } -func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { - // match: (MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) +func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool { + // match: (MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) // cond: isSamePtr(ptr, ptr2) - // result: (MOVHreg x) + // result: (MOVHUreg x) for { _ = v.Args[2] ptr := v.Args[0] @@ -7493,13 +7928,13 @@ func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { if !(isSamePtr(ptr, ptr2)) { break } - v.reset(OpARMMOVHreg) + v.reset(OpARMMOVHUreg) v.AddArg(x) return true } - // match: (MOVHloadidx ptr (MOVWconst [c]) mem) + // match: (MOVHUloadidx ptr (MOVWconst [c]) mem) // cond: - // result: (MOVHload [c] ptr mem) + // result: (MOVHUload [c] ptr mem) for { _ = v.Args[2] ptr := v.Args[0] @@ -7509,15 +7944,15 @@ func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { } c := v_1.AuxInt mem := v.Args[2] - v.reset(OpARMMOVHload) + v.reset(OpARMMOVHUload) v.AuxInt = c v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHloadidx (MOVWconst [c]) ptr mem) + // match: (MOVHUloadidx (MOVWconst [c]) ptr mem) // cond: - // result: (MOVHload [c] ptr mem) + // result: (MOVHUload [c] ptr mem) for { _ = v.Args[2] v_0 := v.Args[0] @@ -7527,7 +7962,7 @@ func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { c := v_0.AuxInt ptr := v.Args[1] mem := v.Args[2] - v.reset(OpARMMOVHload) + v.reset(OpARMMOVHUload) v.AuxInt = c v.AddArg(ptr) v.AddArg(mem) @@ -7535,21 +7970,8 @@ func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { } return false } -func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { - // match: (MOVHreg x:(MOVBload _ _)) - // cond: - // result: (MOVWreg x) - for { - x := v.Args[0] - if x.Op != OpARMMOVBload { - break - } - _ = x.Args[1] - v.reset(OpARMMOVWreg) - v.AddArg(x) - return true - } - // match: (MOVHreg x:(MOVBUload _ _)) +func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { + // match: (MOVHUreg x:(MOVBUload _ _)) // cond: // result: (MOVWreg x) for { @@ -7562,12 +7984,12 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVHreg x:(MOVHload _ _)) + // match: (MOVHUreg x:(MOVHUload _ _)) // cond: // result: (MOVWreg x) for { x := v.Args[0] - if x.Op != OpARMMOVHload { + if x.Op != OpARMMOVHUload { break } _ = x.Args[1] @@ -7575,9 +7997,9 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVHreg (ANDconst [c] x)) - // cond: c & 0x8000 == 0 - // result: (ANDconst [c&0x7fff] x) + // match: (MOVHUreg (ANDconst [c] x)) + // cond: + // result: (ANDconst [c&0xffff] x) for { v_0 := v.Args[0] if v_0.Op != OpARMANDconst { @@ -7585,27 +8007,12 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { } c := v_0.AuxInt x := v_0.Args[0] - if !(c&0x8000 == 0) { - break - } v.reset(OpARMANDconst) - v.AuxInt = c & 0x7fff - v.AddArg(x) - return true - } - // match: (MOVHreg x:(MOVBreg _)) - // cond: - // result: (MOVWreg x) - for { - x := v.Args[0] - if x.Op != OpARMMOVBreg { - break - } - v.reset(OpARMMOVWreg) + v.AuxInt = c & 0xffff v.AddArg(x) return true } - // match: (MOVHreg x:(MOVBUreg _)) + // match: (MOVHUreg x:(MOVBUreg _)) // cond: // result: (MOVWreg x) for { @@ -7617,21 +8024,21 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVHreg x:(MOVHreg _)) + // match: (MOVHUreg x:(MOVHUreg _)) // cond: // result: (MOVWreg x) for { x := v.Args[0] - if x.Op != OpARMMOVHreg { + if x.Op != OpARMMOVHUreg { break } v.reset(OpARMMOVWreg) v.AddArg(x) return true } - // match: (MOVHreg (MOVWconst [c])) + // match: (MOVHUreg (MOVWconst [c])) // cond: - // result: (MOVWconst [int64(int16(c))]) + // result: (MOVWconst [int64(uint16(c))]) for { v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { @@ -7639,69 +8046,65 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpARMMOVWconst) - v.AuxInt = int64(int16(c)) + v.AuxInt = int64(uint16(c)) return true } return false } -func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { +func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: - // result: (MOVHstore [off1+off2] {sym} ptr val mem) + // result: (MOVHload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARMADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVHstore) + mem := v.Args[1] + v.reset(OpARMMOVHload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem) + // match: (MOVHload [off1] {sym} (SUBconst [off2] ptr) mem) // cond: - // result: (MOVHstore [off1-off2] {sym} ptr val mem) + // result: (MOVHload [off1-off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARMSUBconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVHstore) + mem := v.Args[1] + v.reset(OpARMMOVHload) v.AuxInt = off1 - off2 v.Aux = sym v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARMMOVWaddr { break @@ -7709,72 +8112,50 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(canMergeSym(sym1, sym2)) { break } - v.reset(OpARMMOVHstore) + v.reset(OpARMMOVHload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) - // cond: - // result: (MOVHstore [off] {sym} ptr x mem) + // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVHreg x) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVHreg { + if v_1.Op != OpARMMOVHstore { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARMMOVHstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) - // cond: - // result: (MOVHstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVHUreg { + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARMMOVHstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) + v.reset(OpARMMOVHreg) v.AddArg(x) - v.AddArg(mem) return true } - // match: (MOVHstore [0] {sym} (ADD ptr idx) val mem) + // match: (MOVHload [0] {sym} (ADD ptr idx) mem) // cond: sym == nil && !config.nacl - // result: (MOVHstoreidx ptr idx val mem) + // result: (MOVHloadidx ptr idx mem) for { if v.AuxInt != 0 { break } sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARMADD { break @@ -7782,526 +8163,613 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(sym == nil && !config.nacl) { break } - v.reset(OpARMMOVHstoreidx) + v.reset(OpARMMOVHloadidx) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValueARM_OpARMMOVHstoreidx_0(v *Value) bool { - // match: (MOVHstoreidx ptr (MOVWconst [c]) val mem) +func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { + // match: (MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVHreg x) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVHstoreidx { + break + } + _ = v_2.Args[3] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] { + break + } + x := v_2.Args[2] + if !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVHreg) + v.AddArg(x) + return true + } + // match: (MOVHloadidx ptr (MOVWconst [c]) mem) // cond: - // result: (MOVHstore [c] ptr val mem) + // result: (MOVHload [c] ptr mem) for { - _ = v.Args[3] + _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARMMOVHstore) + mem := v.Args[2] + v.reset(OpARMMOVHload) v.AuxInt = c v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstoreidx (MOVWconst [c]) ptr val mem) + // match: (MOVHloadidx (MOVWconst [c]) ptr mem) // cond: - // result: (MOVHstore [c] ptr val mem) + // result: (MOVHload [c] ptr mem) for { - _ = v.Args[3] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARMMOVHstore) + mem := v.Args[2] + v.reset(OpARMMOVHload) v.AuxInt = c v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) +func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { + // match: (MOVHreg x:(MOVBload _ _)) // cond: - // result: (MOVWload [off1+off2] {sym} ptr mem) + // result: (MOVWreg x) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + x := v.Args[0] + if x.Op != OpARMMOVBload { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVWload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + _ = x.Args[1] + v.reset(OpARMMOVWreg) + v.AddArg(x) return true } - // match: (MOVWload [off1] {sym} (SUBconst [off2] ptr) mem) + // match: (MOVHreg x:(MOVBUload _ _)) // cond: - // result: (MOVWload [off1-off2] {sym} ptr mem) + // result: (MOVWreg x) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSUBconst { + x := v.Args[0] + if x.Op != OpARMMOVBUload { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVWload) - v.AuxInt = off1 - off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + _ = x.Args[1] + v.reset(OpARMMOVWreg) + v.AddArg(x) return true } - // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (MOVHreg x:(MOVHload _ _)) + // cond: + // result: (MOVWreg x) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + x := v.Args[0] + if x.Op != OpARMMOVHload { break } - v.reset(OpARMMOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + _ = x.Args[1] + v.reset(OpARMMOVWreg) + v.AddArg(x) return true } - // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: x + // match: (MOVHreg (ANDconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDconst [c&0x7fff] x) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWstore { + v_0 := v.Args[0] + if v_0.Op != OpARMANDconst { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x8000 == 0) { break } - v.reset(OpCopy) - v.Type = x.Type + v.reset(OpARMANDconst) + v.AuxInt = c & 0x7fff v.AddArg(x) return true } - // match: (MOVWload [0] {sym} (ADD ptr idx) mem) - // cond: sym == nil && !config.nacl - // result: (MOVWloadidx ptr idx mem) + // match: (MOVHreg x:(MOVBreg _)) + // cond: + // result: (MOVWreg x) for { - if v.AuxInt != 0 { + x := v.Args[0] + if x.Op != OpARMMOVBreg { break } - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMADD { + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUreg _)) + // cond: + // result: (MOVWreg x) + for { + x := v.Args[0] + if x.Op != OpARMMOVBUreg { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(sym == nil && !config.nacl) { + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHreg _)) + // cond: + // result: (MOVWreg x) + for { + x := v.Args[0] + if x.Op != OpARMMOVHreg { break } - v.reset(OpARMMOVWloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.reset(OpARMMOVWreg) + v.AddArg(x) return true } - // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) - // cond: sym == nil && !config.nacl - // result: (MOVWloadshiftLL ptr idx [c] mem) + // match: (MOVHreg (MOVWconst [c])) + // cond: + // result: (MOVWconst [int64(int16(c))]) for { - if v.AuxInt != 0 { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } + c := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(int16(c)) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: + // result: (MOVHstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt sym := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftLL { + if v_0.Op != OpARMADDconst { break } - c := v_0.AuxInt - _ = v_0.Args[1] + off2 := v_0.AuxInt ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(sym == nil && !config.nacl) { - break - } - v.reset(OpARMMOVWloadshiftLL) - v.AuxInt = c + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVHstore) + v.AuxInt = off1 + off2 + v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) - // cond: sym == nil && !config.nacl - // result: (MOVWloadshiftRL ptr idx [c] mem) + // match: (MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem) + // cond: + // result: (MOVHstore [off1-off2] {sym} ptr val mem) for { - if v.AuxInt != 0 { - break - } + off1 := v.AuxInt sym := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftRL { + if v_0.Op != OpARMSUBconst { break } - c := v_0.AuxInt - _ = v_0.Args[1] + off2 := v_0.AuxInt ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(sym == nil && !config.nacl) { - break - } - v.reset(OpARMMOVWloadshiftRL) - v.AuxInt = c + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVHstore) + v.AuxInt = off1 - off2 + v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) - // cond: sym == nil && !config.nacl - // result: (MOVWloadshiftRA ptr idx [c] mem) + // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { - if v.AuxInt != 0 { - break - } - sym := v.Aux - _ = v.Args[1] + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftRA { + if v_0.Op != OpARMMOVWaddr { break } - c := v_0.AuxInt - _ = v_0.Args[1] + off2 := v_0.AuxInt + sym2 := v_0.Aux ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(sym == nil && !config.nacl) { + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { break } - v.reset(OpARMMOVWloadshiftRA) - v.AuxInt = c + v.reset(OpARMMOVHstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) - v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - return false -} -func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { - // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) - // cond: isSamePtr(ptr, ptr2) - // result: x + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // cond: + // result: (MOVHstore [off] {sym} ptr x mem) for { + off := v.AuxInt + sym := v.Aux _ = v.Args[2] ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARMMOVWstoreidx { - break - } - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { - break - } - x := v_2.Args[2] - if !(isSamePtr(ptr, ptr2)) { + v_1 := v.Args[1] + if v_1.Op != OpARMMOVHreg { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVWloadidx ptr (MOVWconst [c]) mem) + // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) // cond: - // result: (MOVWload [c] ptr mem) + // result: (MOVHstore [off] {sym} ptr x mem) for { + off := v.AuxInt + sym := v.Aux _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + if v_1.Op != OpARMMOVHUreg { break } - c := v_1.AuxInt + x := v_1.Args[0] mem := v.Args[2] - v.reset(OpARMMOVWload) - v.AuxInt = c + v.reset(OpARMMOVHstore) + v.AuxInt = off + v.Aux = sym v.AddArg(ptr) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVWloadidx (MOVWconst [c]) ptr mem) - // cond: - // result: (MOVWload [c] ptr mem) + // match: (MOVHstore [0] {sym} (ADD ptr idx) val mem) + // cond: sym == nil && !config.nacl + // result: (MOVHstoreidx ptr idx val mem) for { + if v.AuxInt != 0 { + break + } + sym := v.Aux _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + if v_0.Op != OpARMADD { break } - c := v_0.AuxInt - ptr := v.Args[1] + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] mem := v.Args[2] - v.reset(OpARMMOVWload) - v.AuxInt = c + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVHstoreidx) v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWloadidx ptr (SLLconst idx [c]) mem) + return false +} +func rewriteValueARM_OpARMMOVHstoreidx_0(v *Value) bool { + // match: (MOVHstoreidx ptr (MOVWconst [c]) val mem) // cond: - // result: (MOVWloadshiftLL ptr idx [c] mem) + // result: (MOVHstore [c] ptr val mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { + if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARMMOVWloadshiftLL) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARMMOVHstore) v.AuxInt = c v.AddArg(ptr) - v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWloadidx (SLLconst idx [c]) ptr mem) + // match: (MOVHstoreidx (MOVWconst [c]) ptr val mem) // cond: - // result: (MOVWloadshiftLL ptr idx [c] mem) + // result: (MOVHstore [c] ptr val mem) for { - _ = v.Args[2] + _ = v.Args[3] v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { + if v_0.Op != OpARMMOVWconst { break } c := v_0.AuxInt - idx := v_0.Args[0] ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVWloadshiftLL) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARMMOVHstore) v.AuxInt = c v.AddArg(ptr) - v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWloadidx ptr (SRLconst idx [c]) mem) + return false +} +func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: - // result: (MOVWloadshiftRL ptr idx [c] mem) + // result: (MOVWload [off1+off2] {sym} ptr mem) for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { break } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARMMOVWloadshiftRL) - v.AuxInt = c + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVWload) + v.AuxInt = off1 + off2 + v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWloadidx (SRLconst idx [c]) ptr mem) + // match: (MOVWload [off1] {sym} (SUBconst [off2] ptr) mem) // cond: - // result: (MOVWloadshiftRL ptr idx [c] mem) + // result: (MOVWload [off1-off2] {sym} ptr mem) for { - _ = v.Args[2] + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { + if v_0.Op != OpARMSUBconst { break } - c := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVWloadshiftRL) - v.AuxInt = c + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVWload) + v.AuxInt = off1 - off2 + v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWloadidx ptr (SRAconst idx [c]) mem) - // cond: - // result: (MOVWloadshiftRA ptr idx [c] mem) + // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - _ = v.Args[2] + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVWload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { + if v_1.Op != OpARMMOVWstore { break } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARMMOVWloadshiftRA) - v.AuxInt = c + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil && !config.nacl + // result: (MOVWloadidx ptr idx mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVWloadidx) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWloadidx (SRAconst idx [c]) ptr mem) - // cond: - // result: (MOVWloadshiftRA ptr idx [c] mem) + // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) + // cond: sym == nil && !config.nacl + // result: (MOVWloadshiftLL ptr idx [c] mem) for { - _ = v.Args[2] + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { + if v_0.Op != OpARMADDshiftLL { break } c := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVWloadshiftRA) + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVWloadshiftLL) v.AuxInt = c v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - return false -} -func rewriteValueARM_OpARMMOVWloadshiftLL_0(v *Value) bool { - // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) - // cond: c==d && isSamePtr(ptr, ptr2) - // result: x + // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) + // cond: sym == nil && !config.nacl + // result: (MOVWloadshiftRL ptr idx [c] mem) for { - c := v.AuxInt - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARMMOVWstoreshiftLL { + if v.AuxInt != 0 { break } - d := v_2.AuxInt - _ = v_2.Args[3] - ptr2 := v_2.Args[0] - if idx != v_2.Args[1] { + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftRL { break } - x := v_2.Args[2] - if !(c == d && isSamePtr(ptr, ptr2)) { + c := v_0.AuxInt + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(sym == nil && !config.nacl) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.reset(OpARMMOVWloadshiftRL) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) - // cond: - // result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem) + // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) + // cond: sym == nil && !config.nacl + // result: (MOVWloadshiftRA ptr idx [c] mem) for { - d := v.AuxInt - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + if v.AuxInt != 0 { break } - c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARMMOVWload) - v.AuxInt = int64(uint32(c) << uint64(d)) + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftRA { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVWloadshiftRA) + v.AuxInt = c v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } return false } -func rewriteValueARM_OpARMMOVWloadshiftRA_0(v *Value) bool { - // match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) - // cond: c==d && isSamePtr(ptr, ptr2) +func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { + // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) // result: x for { - c := v.AuxInt _ = v.Args[2] ptr := v.Args[0] idx := v.Args[1] v_2 := v.Args[2] - if v_2.Op != OpARMMOVWstoreshiftRA { + if v_2.Op != OpARMMOVWstoreidx { break } - d := v_2.AuxInt _ = v_2.Args[3] ptr2 := v_2.Args[0] if idx != v_2.Args[1] { break } x := v_2.Args[2] - if !(c == d && isSamePtr(ptr, ptr2)) { + if !(isSamePtr(ptr, ptr2)) { break } v.reset(OpCopy) @@ -8309,11 +8777,10 @@ func rewriteValueARM_OpARMMOVWloadshiftRA_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) + // match: (MOVWloadidx ptr (MOVWconst [c]) mem) // cond: - // result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem) + // result: (MOVWload [c] ptr mem) for { - d := v.AuxInt _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] @@ -8323,19 +8790,255 @@ func rewriteValueARM_OpARMMOVWloadshiftRA_0(v *Value) bool { c := v_1.AuxInt mem := v.Args[2] v.reset(OpARMMOVWload) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = c v.AddArg(ptr) v.AddArg(mem) return true } - return false -} -func rewriteValueARM_OpARMMOVWloadshiftRL_0(v *Value) bool { - // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) - // cond: c==d && isSamePtr(ptr, ptr2) - // result: x - for { - c := v.AuxInt + // match: (MOVWloadidx (MOVWconst [c]) ptr mem) + // cond: + // result: (MOVWload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVWload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx ptr (SLLconst idx [c]) mem) + // cond: + // result: (MOVWloadshiftLL ptr idx [c] mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSLLconst { + break + } + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVWloadshiftLL) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx (SLLconst idx [c]) ptr mem) + // cond: + // result: (MOVWloadshiftLL ptr idx [c] mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMSLLconst { + break + } + c := v_0.AuxInt + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVWloadshiftLL) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx ptr (SRLconst idx [c]) mem) + // cond: + // result: (MOVWloadshiftRL ptr idx [c] mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRLconst { + break + } + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVWloadshiftRL) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx (SRLconst idx [c]) ptr mem) + // cond: + // result: (MOVWloadshiftRL ptr idx [c] mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { + break + } + c := v_0.AuxInt + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVWloadshiftRL) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx ptr (SRAconst idx [c]) mem) + // cond: + // result: (MOVWloadshiftRA ptr idx [c] mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRAconst { + break + } + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVWloadshiftRA) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx (SRAconst idx [c]) ptr mem) + // cond: + // result: (MOVWloadshiftRA ptr idx [c] mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMSRAconst { + break + } + c := v_0.AuxInt + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVWloadshiftRA) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWloadshiftLL_0(v *Value) bool { + // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) + // cond: c==d && isSamePtr(ptr, ptr2) + // result: x + for { + c := v.AuxInt + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWstoreshiftLL { + break + } + d := v_2.AuxInt + _ = v_2.Args[3] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] { + break + } + x := v_2.Args[2] + if !(c == d && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) + // cond: + // result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem) + for { + d := v.AuxInt + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARMMOVWload) + v.AuxInt = int64(uint32(c) << uint64(d)) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWloadshiftRA_0(v *Value) bool { + // match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) + // cond: c==d && isSamePtr(ptr, ptr2) + // result: x + for { + c := v.AuxInt + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWstoreshiftRA { + break + } + d := v_2.AuxInt + _ = v_2.Args[3] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] { + break + } + x := v_2.Args[2] + if !(c == d && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) + // cond: + // result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem) + for { + d := v.AuxInt + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARMMOVWload) + v.AuxInt = int64(int32(c) >> uint64(d)) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWloadshiftRL_0(v *Value) bool { + // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) + // cond: c==d && isSamePtr(ptr, ptr2) + // result: x + for { + c := v.AuxInt _ = v.Args[2] ptr := v.Args[0] idx := v.Args[1] @@ -14919,10 +15622,10 @@ func rewriteValueARM_OpARMSUBshiftRLreg_0(v *Value) bool { } return false } -func rewriteValueARM_OpARMXOR_0(v *Value) bool { - // match: (XOR x (MOVWconst [c])) +func rewriteValueARM_OpARMTEQ_0(v *Value) bool { + // match: (TEQ x (MOVWconst [c])) // cond: - // result: (XORconst [c] x) + // result: (TEQconst [c] x) for { _ = v.Args[1] x := v.Args[0] @@ -14931,14 +15634,14 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARMXORconst) + v.reset(OpARMTEQconst) v.AuxInt = c v.AddArg(x) return true } - // match: (XOR (MOVWconst [c]) x) + // match: (TEQ (MOVWconst [c]) x) // cond: - // result: (XORconst [c] x) + // result: (TEQconst [c] x) for { _ = v.Args[1] v_0 := v.Args[0] @@ -14947,14 +15650,14 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - v.reset(OpARMXORconst) + v.reset(OpARMTEQconst) v.AuxInt = c v.AddArg(x) return true } - // match: (XOR x (SLLconst [c] y)) + // match: (TEQ x (SLLconst [c] y)) // cond: - // result: (XORshiftLL x y [c]) + // result: (TEQshiftLL x y [c]) for { _ = v.Args[1] x := v.Args[0] @@ -14964,15 +15667,15 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { } c := v_1.AuxInt y := v_1.Args[0] - v.reset(OpARMXORshiftLL) + v.reset(OpARMTEQshiftLL) v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } - // match: (XOR (SLLconst [c] y) x) + // match: (TEQ (SLLconst [c] y) x) // cond: - // result: (XORshiftLL x y [c]) + // result: (TEQshiftLL x y [c]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -14982,15 +15685,15 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { c := v_0.AuxInt y := v_0.Args[0] x := v.Args[1] - v.reset(OpARMXORshiftLL) + v.reset(OpARMTEQshiftLL) v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } - // match: (XOR x (SRLconst [c] y)) + // match: (TEQ x (SRLconst [c] y)) // cond: - // result: (XORshiftRL x y [c]) + // result: (TEQshiftRL x y [c]) for { _ = v.Args[1] x := v.Args[0] @@ -15000,15 +15703,15 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { } c := v_1.AuxInt y := v_1.Args[0] - v.reset(OpARMXORshiftRL) + v.reset(OpARMTEQshiftRL) v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } - // match: (XOR (SRLconst [c] y) x) + // match: (TEQ (SRLconst [c] y) x) // cond: - // result: (XORshiftRL x y [c]) + // result: (TEQshiftRL x y [c]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -15018,15 +15721,15 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { c := v_0.AuxInt y := v_0.Args[0] x := v.Args[1] - v.reset(OpARMXORshiftRL) + v.reset(OpARMTEQshiftRL) v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } - // match: (XOR x (SRAconst [c] y)) + // match: (TEQ x (SRAconst [c] y)) // cond: - // result: (XORshiftRA x y [c]) + // result: (TEQshiftRA x y [c]) for { _ = v.Args[1] x := v.Args[0] @@ -15036,15 +15739,15 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { } c := v_1.AuxInt y := v_1.Args[0] - v.reset(OpARMXORshiftRA) + v.reset(OpARMTEQshiftRA) v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } - // match: (XOR (SRAconst [c] y) x) + // match: (TEQ (SRAconst [c] y) x) // cond: - // result: (XORshiftRA x y [c]) + // result: (TEQshiftRA x y [c]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -15054,54 +15757,15 @@ func rewriteValueARM_OpARMXOR_0(v *Value) bool { c := v_0.AuxInt y := v_0.Args[0] x := v.Args[1] - v.reset(OpARMXORshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR x (SRRconst [c] y)) - // cond: - // result: (XORshiftRR x y [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRRconst { - break - } - c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARMXORshiftRR) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR (SRRconst [c] y) x) - // cond: - // result: (XORshiftRR x y [c]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRRconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARMXORshiftRR) + v.reset(OpARMTEQshiftRA) v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } - return false -} -func rewriteValueARM_OpARMXOR_10(v *Value) bool { - // match: (XOR x (SLL y z)) + // match: (TEQ x (SLL y z)) // cond: - // result: (XORshiftLLreg x y z) + // result: (TEQshiftLLreg x y z) for { _ = v.Args[1] x := v.Args[0] @@ -15112,15 +15776,15 @@ func rewriteValueARM_OpARMXOR_10(v *Value) bool { _ = v_1.Args[1] y := v_1.Args[0] z := v_1.Args[1] - v.reset(OpARMXORshiftLLreg) + v.reset(OpARMTEQshiftLLreg) v.AddArg(x) v.AddArg(y) v.AddArg(z) return true } - // match: (XOR (SLL y z) x) + // match: (TEQ (SLL y z) x) // cond: - // result: (XORshiftLLreg x y z) + // result: (TEQshiftLLreg x y z) for { _ = v.Args[1] v_0 := v.Args[0] @@ -15131,15 +15795,18 @@ func rewriteValueARM_OpARMXOR_10(v *Value) bool { y := v_0.Args[0] z := v_0.Args[1] x := v.Args[1] - v.reset(OpARMXORshiftLLreg) + v.reset(OpARMTEQshiftLLreg) v.AddArg(x) v.AddArg(y) v.AddArg(z) return true } - // match: (XOR x (SRL y z)) + return false +} +func rewriteValueARM_OpARMTEQ_10(v *Value) bool { + // match: (TEQ x (SRL y z)) // cond: - // result: (XORshiftRLreg x y z) + // result: (TEQshiftRLreg x y z) for { _ = v.Args[1] x := v.Args[0] @@ -15150,15 +15817,15 @@ func rewriteValueARM_OpARMXOR_10(v *Value) bool { _ = v_1.Args[1] y := v_1.Args[0] z := v_1.Args[1] - v.reset(OpARMXORshiftRLreg) + v.reset(OpARMTEQshiftRLreg) v.AddArg(x) v.AddArg(y) v.AddArg(z) return true } - // match: (XOR (SRL y z) x) + // match: (TEQ (SRL y z) x) // cond: - // result: (XORshiftRLreg x y z) + // result: (TEQshiftRLreg x y z) for { _ = v.Args[1] v_0 := v.Args[0] @@ -15169,15 +15836,15 @@ func rewriteValueARM_OpARMXOR_10(v *Value) bool { y := v_0.Args[0] z := v_0.Args[1] x := v.Args[1] - v.reset(OpARMXORshiftRLreg) + v.reset(OpARMTEQshiftRLreg) v.AddArg(x) v.AddArg(y) v.AddArg(z) return true } - // match: (XOR x (SRA y z)) + // match: (TEQ x (SRA y z)) // cond: - // result: (XORshiftRAreg x y z) + // result: (TEQshiftRAreg x y z) for { _ = v.Args[1] x := v.Args[0] @@ -15188,15 +15855,15 @@ func rewriteValueARM_OpARMXOR_10(v *Value) bool { _ = v_1.Args[1] y := v_1.Args[0] z := v_1.Args[1] - v.reset(OpARMXORshiftRAreg) + v.reset(OpARMTEQshiftRAreg) v.AddArg(x) v.AddArg(y) v.AddArg(z) return true } - // match: (XOR (SRA y z) x) + // match: (TEQ (SRA y z) x) // cond: - // result: (XORshiftRAreg x y z) + // result: (TEQshiftRAreg x y z) for { _ = v.Args[1] v_0 := v.Args[0] @@ -15207,79 +15874,71 @@ func rewriteValueARM_OpARMXOR_10(v *Value) bool { y := v_0.Args[0] z := v_0.Args[1] x := v.Args[1] - v.reset(OpARMXORshiftRAreg) + v.reset(OpARMTEQshiftRAreg) v.AddArg(x) v.AddArg(y) v.AddArg(z) return true } - // match: (XOR x x) - // cond: - // result: (MOVWconst [0]) - for { - _ = v.Args[1] - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true - } return false } -func rewriteValueARM_OpARMXORconst_0(v *Value) bool { - // match: (XORconst [0] x) - // cond: - // result: x +func rewriteValueARM_OpARMTEQconst_0(v *Value) bool { + // match: (TEQconst (MOVWconst [x]) [y]) + // cond: int32(x^y)==0 + // result: (FlagEQ) for { - if v.AuxInt != 0 { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + x := v_0.AuxInt + if !(int32(x^y) == 0) { + break + } + v.reset(OpARMFlagEQ) return true } - // match: (XORconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [c^d]) + // match: (TEQconst (MOVWconst [x]) [y]) + // cond: int32(x^y)<0 + // result: (FlagLT_UGT) for { - c := v.AuxInt + y := v.AuxInt v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { break } - d := v_0.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = c ^ d + x := v_0.AuxInt + if !(int32(x^y) < 0) { + break + } + v.reset(OpARMFlagLT_UGT) return true } - // match: (XORconst [c] (XORconst [d] x)) - // cond: - // result: (XORconst [c^d] x) + // match: (TEQconst (MOVWconst [x]) [y]) + // cond: int32(x^y)>0 + // result: (FlagGT_UGT) for { - c := v.AuxInt + y := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARMXORconst { + if v_0.Op != OpARMMOVWconst { break } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARMXORconst) - v.AuxInt = c ^ d - v.AddArg(x) + x := v_0.AuxInt + if !(int32(x^y) > 0) { + break + } + v.reset(OpARMFlagGT_UGT) return true } return false } -func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { +func rewriteValueARM_OpARMTEQshiftLL_0(v *Value) bool { b := v.Block _ = b - // match: (XORshiftLL (MOVWconst [c]) x [d]) + // match: (TEQshiftLL (MOVWconst [c]) x [d]) // cond: - // result: (XORconst [c] (SLLconst <x.Type> x [d])) + // result: (TEQconst [c] (SLLconst <x.Type> x [d])) for { d := v.AuxInt _ = v.Args[1] @@ -15289,7 +15948,7 @@ func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - v.reset(OpARMXORconst) + v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) v0.AuxInt = d @@ -15297,9 +15956,9 @@ func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { v.AddArg(v0) return true } - // match: (XORshiftLL x (MOVWconst [c]) [d]) + // match: (TEQshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (XORconst x [int64(uint32(c)<<uint64(d))]) + // result: (TEQconst x [int64(uint32(c)<<uint64(d))]) for { d := v.AuxInt _ = v.Args[1] @@ -15309,63 +15968,19 @@ func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARMXORconst) + v.reset(OpARMTEQconst) v.AuxInt = int64(uint32(c) << uint64(d)) v.AddArg(x) return true } - // match: (XORshiftLL [c] (SRLconst x [32-c]) x) - // cond: - // result: (SRRconst [32-c] x) - for { - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - if v_0.AuxInt != 32-c { - break - } - x := v_0.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpARMSRRconst) - v.AuxInt = 32 - c - v.AddArg(x) - return true - } - // match: (XORshiftLL x (SLLconst x [c]) [d]) - // cond: c==d - // result: (MOVWconst [0]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(c == d) { - break - } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true - } return false } -func rewriteValueARM_OpARMXORshiftLLreg_0(v *Value) bool { +func rewriteValueARM_OpARMTEQshiftLLreg_0(v *Value) bool { b := v.Block _ = b - // match: (XORshiftLLreg (MOVWconst [c]) x y) + // match: (TEQshiftLLreg (MOVWconst [c]) x y) // cond: - // result: (XORconst [c] (SLL <x.Type> x y)) + // result: (TEQconst [c] (SLL <x.Type> x y)) for { _ = v.Args[2] v_0 := v.Args[0] @@ -15375,7 +15990,7 @@ func rewriteValueARM_OpARMXORshiftLLreg_0(v *Value) bool { c := v_0.AuxInt x := v.Args[1] y := v.Args[2] - v.reset(OpARMXORconst) + v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg(x) @@ -15383,9 +15998,9 @@ func rewriteValueARM_OpARMXORshiftLLreg_0(v *Value) bool { v.AddArg(v0) return true } - // match: (XORshiftLLreg x y (MOVWconst [c])) + // match: (TEQshiftLLreg x y (MOVWconst [c])) // cond: - // result: (XORshiftLL x y [c]) + // result: (TEQshiftLL x y [c]) for { _ = v.Args[2] x := v.Args[0] @@ -15395,7 +16010,7 @@ func rewriteValueARM_OpARMXORshiftLLreg_0(v *Value) bool { break } c := v_2.AuxInt - v.reset(OpARMXORshiftLL) + v.reset(OpARMTEQshiftLL) v.AuxInt = c v.AddArg(x) v.AddArg(y) @@ -15403,12 +16018,12 @@ func rewriteValueARM_OpARMXORshiftLLreg_0(v *Value) bool { } return false } -func rewriteValueARM_OpARMXORshiftRA_0(v *Value) bool { +func rewriteValueARM_OpARMTEQshiftRA_0(v *Value) bool { b := v.Block _ = b - // match: (XORshiftRA (MOVWconst [c]) x [d]) + // match: (TEQshiftRA (MOVWconst [c]) x [d]) // cond: - // result: (XORconst [c] (SRAconst <x.Type> x [d])) + // result: (TEQconst [c] (SRAconst <x.Type> x [d])) for { d := v.AuxInt _ = v.Args[1] @@ -15418,7 +16033,7 @@ func rewriteValueARM_OpARMXORshiftRA_0(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - v.reset(OpARMXORconst) + v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) v0.AuxInt = d @@ -15426,9 +16041,9 @@ func rewriteValueARM_OpARMXORshiftRA_0(v *Value) bool { v.AddArg(v0) return true } - // match: (XORshiftRA x (MOVWconst [c]) [d]) + // match: (TEQshiftRA x (MOVWconst [c]) [d]) // cond: - // result: (XORconst x [int64(int32(c)>>uint64(d))]) + // result: (TEQconst x [int64(int32(c)>>uint64(d))]) for { d := v.AuxInt _ = v.Args[1] @@ -15438,41 +16053,19 @@ func rewriteValueARM_OpARMXORshiftRA_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARMXORconst) + v.reset(OpARMTEQconst) v.AuxInt = int64(int32(c) >> uint64(d)) v.AddArg(x) return true } - // match: (XORshiftRA x (SRAconst x [c]) [d]) - // cond: c==d - // result: (MOVWconst [0]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(c == d) { - break - } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true - } return false } -func rewriteValueARM_OpARMXORshiftRAreg_0(v *Value) bool { +func rewriteValueARM_OpARMTEQshiftRAreg_0(v *Value) bool { b := v.Block _ = b - // match: (XORshiftRAreg (MOVWconst [c]) x y) + // match: (TEQshiftRAreg (MOVWconst [c]) x y) // cond: - // result: (XORconst [c] (SRA <x.Type> x y)) + // result: (TEQconst [c] (SRA <x.Type> x y)) for { _ = v.Args[2] v_0 := v.Args[0] @@ -15482,7 +16075,7 @@ func rewriteValueARM_OpARMXORshiftRAreg_0(v *Value) bool { c := v_0.AuxInt x := v.Args[1] y := v.Args[2] - v.reset(OpARMXORconst) + v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) v0.AddArg(x) @@ -15490,9 +16083,9 @@ func rewriteValueARM_OpARMXORshiftRAreg_0(v *Value) bool { v.AddArg(v0) return true } - // match: (XORshiftRAreg x y (MOVWconst [c])) + // match: (TEQshiftRAreg x y (MOVWconst [c])) // cond: - // result: (XORshiftRA x y [c]) + // result: (TEQshiftRA x y [c]) for { _ = v.Args[2] x := v.Args[0] @@ -15502,7 +16095,7 @@ func rewriteValueARM_OpARMXORshiftRAreg_0(v *Value) bool { break } c := v_2.AuxInt - v.reset(OpARMXORshiftRA) + v.reset(OpARMTEQshiftRA) v.AuxInt = c v.AddArg(x) v.AddArg(y) @@ -15510,12 +16103,12 @@ func rewriteValueARM_OpARMXORshiftRAreg_0(v *Value) bool { } return false } -func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { +func rewriteValueARM_OpARMTEQshiftRL_0(v *Value) bool { b := v.Block _ = b - // match: (XORshiftRL (MOVWconst [c]) x [d]) + // match: (TEQshiftRL (MOVWconst [c]) x [d]) // cond: - // result: (XORconst [c] (SRLconst <x.Type> x [d])) + // result: (TEQconst [c] (SRLconst <x.Type> x [d])) for { d := v.AuxInt _ = v.Args[1] @@ -15525,7 +16118,7 @@ func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - v.reset(OpARMXORconst) + v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) v0.AuxInt = d @@ -15533,9 +16126,9 @@ func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { v.AddArg(v0) return true } - // match: (XORshiftRL x (MOVWconst [c]) [d]) + // match: (TEQshiftRL x (MOVWconst [c]) [d]) // cond: - // result: (XORconst x [int64(uint32(c)>>uint64(d))]) + // result: (TEQconst x [int64(uint32(c)>>uint64(d))]) for { d := v.AuxInt _ = v.Args[1] @@ -15545,73 +16138,29 @@ func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARMXORconst) + v.reset(OpARMTEQconst) v.AuxInt = int64(uint32(c) >> uint64(d)) v.AddArg(x) return true } - // match: (XORshiftRL [c] (SLLconst x [32-c]) x) + return false +} +func rewriteValueARM_OpARMTEQshiftRLreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (TEQshiftRLreg (MOVWconst [c]) x y) // cond: - // result: (SRRconst [ c] x) + // result: (TEQconst [c] (SRL <x.Type> x y)) for { - c := v.AuxInt - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - if v_0.AuxInt != 32-c { - break - } - x := v_0.Args[0] - if x != v.Args[1] { + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMSRRconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORshiftRL x (SRLconst x [c]) [d]) - // cond: c==d - // result: (MOVWconst [0]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(c == d) { - break - } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueARM_OpARMXORshiftRLreg_0(v *Value) bool { - b := v.Block - _ = b - // match: (XORshiftRLreg (MOVWconst [c]) x y) - // cond: - // result: (XORconst [c] (SRL <x.Type> x y)) - for { - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - y := v.Args[2] - v.reset(OpARMXORconst) + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMTEQconst) v.AuxInt = c v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v0.AddArg(x) @@ -15619,9 +16168,9 @@ func rewriteValueARM_OpARMXORshiftRLreg_0(v *Value) bool { v.AddArg(v0) return true } - // match: (XORshiftRLreg x y (MOVWconst [c])) + // match: (TEQshiftRLreg x y (MOVWconst [c])) // cond: - // result: (XORshiftRL x y [c]) + // result: (TEQshiftRL x y [c]) for { _ = v.Args[2] x := v.Args[0] @@ -15631,7 +16180,7 @@ func rewriteValueARM_OpARMXORshiftRLreg_0(v *Value) bool { break } c := v_2.AuxInt - v.reset(OpARMXORshiftRL) + v.reset(OpARMTEQshiftRL) v.AuxInt = c v.AddArg(x) v.AddArg(y) @@ -15639,14 +16188,27 @@ func rewriteValueARM_OpARMXORshiftRLreg_0(v *Value) bool { } return false } -func rewriteValueARM_OpARMXORshiftRR_0(v *Value) bool { - b := v.Block - _ = b - // match: (XORshiftRR (MOVWconst [c]) x [d]) +func rewriteValueARM_OpARMTST_0(v *Value) bool { + // match: (TST x (MOVWconst [c])) // cond: - // result: (XORconst [c] (SRRconst <x.Type> x [d])) + // result: (TSTconst [c] x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMTSTconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (TST (MOVWconst [c]) x) + // cond: + // result: (TSTconst [c] x) for { - d := v.AuxInt _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARMMOVWconst { @@ -15654,2780 +16216,2694 @@ func rewriteValueARM_OpARMXORshiftRR_0(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - v.reset(OpARMXORconst) + v.reset(OpARMTSTconst) v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARMSRRconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) - v.AddArg(v0) + v.AddArg(x) return true } - // match: (XORshiftRR x (MOVWconst [c]) [d]) + // match: (TST x (SLLconst [c] y)) // cond: - // result: (XORconst x [int64(uint32(c)>>uint64(d)|uint32(c)<<uint64(32-d))]) + // result: (TSTshiftLL x y [c]) for { - d := v.AuxInt _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + if v_1.Op != OpARMSLLconst { break } c := v_1.AuxInt - v.reset(OpARMXORconst) - v.AuxInt = int64(uint32(c)>>uint64(d) | uint32(c)<<uint64(32-d)) + y := v_1.Args[0] + v.reset(OpARMTSTshiftLL) + v.AuxInt = c v.AddArg(x) + v.AddArg(y) return true } - return false -} -func rewriteValueARM_OpAdd16_0(v *Value) bool { - // match: (Add16 x y) + // match: (TST (SLLconst [c] y) x) // cond: - // result: (ADD x y) + // result: (TSTshiftLL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMADD) + v_0 := v.Args[0] + if v_0.Op != OpARMSLLconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMTSTshiftLL) + v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } -} -func rewriteValueARM_OpAdd32_0(v *Value) bool { - // match: (Add32 x y) + // match: (TST x (SRLconst [c] y)) // cond: - // result: (ADD x y) + // result: (TSTshiftRL x y [c]) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMADD) + v_1 := v.Args[1] + if v_1.Op != OpARMSRLconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTSTshiftRL) + v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } -} -func rewriteValueARM_OpAdd32F_0(v *Value) bool { - // match: (Add32F x y) + // match: (TST (SRLconst [c] y) x) // cond: - // result: (ADDF x y) + // result: (TSTshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMADDF) + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMTSTshiftRL) + v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } -} -func rewriteValueARM_OpAdd32carry_0(v *Value) bool { - // match: (Add32carry x y) + // match: (TST x (SRAconst [c] y)) // cond: - // result: (ADDS x y) + // result: (TSTshiftRA x y [c]) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMADDS) + v_1 := v.Args[1] + if v_1.Op != OpARMSRAconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTSTshiftRA) + v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } -} -func rewriteValueARM_OpAdd32withcarry_0(v *Value) bool { - // match: (Add32withcarry x y c) + // match: (TST (SRAconst [c] y) x) // cond: - // result: (ADC x y c) + // result: (TSTshiftRA x y [c]) for { - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - c := v.Args[2] - v.reset(OpARMADC) + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRAconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMTSTshiftRA) + v.AuxInt = c v.AddArg(x) v.AddArg(y) - v.AddArg(c) return true } -} -func rewriteValueARM_OpAdd64F_0(v *Value) bool { - // match: (Add64F x y) + // match: (TST x (SLL y z)) // cond: - // result: (ADDD x y) + // result: (TSTshiftLLreg x y z) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMADDD) + v_1 := v.Args[1] + if v_1.Op != OpARMSLL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMTSTshiftLLreg) v.AddArg(x) v.AddArg(y) + v.AddArg(z) return true } -} -func rewriteValueARM_OpAdd8_0(v *Value) bool { - // match: (Add8 x y) + // match: (TST (SLL y z) x) // cond: - // result: (ADD x y) + // result: (TSTshiftLLreg x y z) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMADD) + v_0 := v.Args[0] + if v_0.Op != OpARMSLL { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMTSTshiftLLreg) v.AddArg(x) v.AddArg(y) + v.AddArg(z) return true } + return false } -func rewriteValueARM_OpAddPtr_0(v *Value) bool { - // match: (AddPtr x y) +func rewriteValueARM_OpARMTST_10(v *Value) bool { + // match: (TST x (SRL y z)) // cond: - // result: (ADD x y) + // result: (TSTshiftRLreg x y z) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMADD) + v_1 := v.Args[1] + if v_1.Op != OpARMSRL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMTSTshiftRLreg) v.AddArg(x) v.AddArg(y) + v.AddArg(z) return true } -} -func rewriteValueARM_OpAddr_0(v *Value) bool { - // match: (Addr {sym} base) + // match: (TST (SRL y z) x) // cond: - // result: (MOVWaddr {sym} base) + // result: (TSTshiftRLreg x y z) for { - sym := v.Aux - base := v.Args[0] - v.reset(OpARMMOVWaddr) - v.Aux = sym - v.AddArg(base) + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRL { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMTSTshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } -} -func rewriteValueARM_OpAnd16_0(v *Value) bool { - // match: (And16 x y) + // match: (TST x (SRA y z)) // cond: - // result: (AND x y) + // result: (TSTshiftRAreg x y z) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMAND) + v_1 := v.Args[1] + if v_1.Op != OpARMSRA { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMTSTshiftRAreg) v.AddArg(x) v.AddArg(y) + v.AddArg(z) return true } -} -func rewriteValueARM_OpAnd32_0(v *Value) bool { - // match: (And32 x y) + // match: (TST (SRA y z) x) // cond: - // result: (AND x y) + // result: (TSTshiftRAreg x y z) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMAND) + v_0 := v.Args[0] + if v_0.Op != OpARMSRA { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMTSTshiftRAreg) v.AddArg(x) v.AddArg(y) + v.AddArg(z) return true } + return false } -func rewriteValueARM_OpAnd8_0(v *Value) bool { - // match: (And8 x y) - // cond: - // result: (AND x y) +func rewriteValueARM_OpARMTSTconst_0(v *Value) bool { + // match: (TSTconst (MOVWconst [x]) [y]) + // cond: int32(x&y)==0 + // result: (FlagEQ) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMAND) - v.AddArg(x) - v.AddArg(y) + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x&y) == 0) { + break + } + v.reset(OpARMFlagEQ) return true } -} -func rewriteValueARM_OpAndB_0(v *Value) bool { - // match: (AndB x y) - // cond: - // result: (AND x y) + // match: (TSTconst (MOVWconst [x]) [y]) + // cond: int32(x&y)<0 + // result: (FlagLT_UGT) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMAND) - v.AddArg(x) - v.AddArg(y) + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x&y) < 0) { + break + } + v.reset(OpARMFlagLT_UGT) + return true + } + // match: (TSTconst (MOVWconst [x]) [y]) + // cond: int32(x&y)>0 + // result: (FlagGT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x&y) > 0) { + break + } + v.reset(OpARMFlagGT_UGT) return true } + return false } -func rewriteValueARM_OpAvg32u_0(v *Value) bool { +func rewriteValueARM_OpARMTSTshiftLL_0(v *Value) bool { b := v.Block _ = b - // match: (Avg32u <t> x y) + // match: (TSTshiftLL (MOVWconst [c]) x [d]) // cond: - // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y) + // result: (TSTconst [c] (SLLconst <x.Type> x [d])) for { - t := v.Type + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMADD) - v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) - v0.AuxInt = 1 - v1 := b.NewValue0(v.Pos, OpARMSUB, t) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) v.AddArg(v0) - v.AddArg(y) return true } + // match: (TSTshiftLL x (MOVWconst [c]) [d]) + // cond: + // result: (TSTconst x [int64(uint32(c)<<uint64(d))]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMTSTconst) + v.AuxInt = int64(uint32(c) << uint64(d)) + v.AddArg(x) + return true + } + return false } -func rewriteValueARM_OpBitLen32_0(v *Value) bool { +func rewriteValueARM_OpARMTSTshiftLLreg_0(v *Value) bool { b := v.Block _ = b - // match: (BitLen32 <t> x) + // match: (TSTshiftLLreg (MOVWconst [c]) x y) // cond: - // result: (RSBconst [32] (CLZ <t> x)) + // result: (TSTconst [c] (SLL <x.Type> x y)) for { - t := v.Type - x := v.Args[0] - v.reset(OpARMRSBconst) - v.AuxInt = 32 - v0 := b.NewValue0(v.Pos, OpARMCLZ, t) + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) return true } + // match: (TSTshiftLLreg x y (MOVWconst [c])) + // cond: + // result: (TSTshiftLL x y [c]) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMTSTshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + return false } -func rewriteValueARM_OpBswap32_0(v *Value) bool { +func rewriteValueARM_OpARMTSTshiftRA_0(v *Value) bool { b := v.Block _ = b - // match: (Bswap32 <t> x) - // cond: objabi.GOARM==5 - // result: (XOR <t> (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8]) (SRRconst <t> x [8])) + // match: (TSTshiftRA (MOVWconst [c]) x [d]) + // cond: + // result: (TSTconst [c] (SRAconst <x.Type> x [d])) for { - t := v.Type - x := v.Args[0] - if !(objabi.GOARM == 5) { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - v.reset(OpARMXOR) - v.Type = t - v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpARMBICconst, t) - v1.AuxInt = 0xff0000 - v2 := b.NewValue0(v.Pos, OpARMXOR, t) - v2.AddArg(x) - v3 := b.NewValue0(v.Pos, OpARMSRRconst, t) - v3.AuxInt = 16 - v3.AddArg(x) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) v.AddArg(v0) - v4 := b.NewValue0(v.Pos, OpARMSRRconst, t) - v4.AuxInt = 8 - v4.AddArg(x) - v.AddArg(v4) return true } - // match: (Bswap32 x) - // cond: objabi.GOARM>=6 - // result: (REV x) + // match: (TSTshiftRA x (MOVWconst [c]) [d]) + // cond: + // result: (TSTconst x [int64(int32(c)>>uint64(d))]) for { + d := v.AuxInt + _ = v.Args[1] x := v.Args[0] - if !(objabi.GOARM >= 6) { + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - v.reset(OpARMREV) + c := v_1.AuxInt + v.reset(OpARMTSTconst) + v.AuxInt = int64(int32(c) >> uint64(d)) v.AddArg(x) return true } return false } -func rewriteValueARM_OpClosureCall_0(v *Value) bool { - // match: (ClosureCall [argwid] entry closure mem) +func rewriteValueARM_OpARMTSTshiftRAreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (TSTshiftRAreg (MOVWconst [c]) x y) // cond: - // result: (CALLclosure [argwid] entry closure mem) + // result: (TSTconst [c] (SRA <x.Type> x y)) for { - argwid := v.AuxInt _ = v.Args[2] - entry := v.Args[0] - closure := v.Args[1] - mem := v.Args[2] - v.reset(OpARMCALLclosure) - v.AuxInt = argwid - v.AddArg(entry) - v.AddArg(closure) - v.AddArg(mem) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } -} -func rewriteValueARM_OpCom16_0(v *Value) bool { - // match: (Com16 x) + // match: (TSTshiftRAreg x y (MOVWconst [c])) // cond: - // result: (MVN x) + // result: (TSTshiftRA x y [c]) for { + _ = v.Args[2] x := v.Args[0] - v.reset(OpARMMVN) + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMTSTshiftRA) + v.AuxInt = c v.AddArg(x) + v.AddArg(y) return true } + return false } -func rewriteValueARM_OpCom32_0(v *Value) bool { - // match: (Com32 x) +func rewriteValueARM_OpARMTSTshiftRL_0(v *Value) bool { + b := v.Block + _ = b + // match: (TSTshiftRL (MOVWconst [c]) x [d]) // cond: - // result: (MVN x) + // result: (TSTconst [c] (SRLconst <x.Type> x [d])) for { - x := v.Args[0] - v.reset(OpARMMVN) - v.AddArg(x) + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) return true } -} -func rewriteValueARM_OpCom8_0(v *Value) bool { - // match: (Com8 x) + // match: (TSTshiftRL x (MOVWconst [c]) [d]) // cond: - // result: (MVN x) + // result: (TSTconst x [int64(uint32(c)>>uint64(d))]) for { + d := v.AuxInt + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMVN) + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMTSTconst) + v.AuxInt = int64(uint32(c) >> uint64(d)) v.AddArg(x) return true } + return false } -func rewriteValueARM_OpConst16_0(v *Value) bool { - // match: (Const16 [val]) +func rewriteValueARM_OpARMTSTshiftRLreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (TSTshiftRLreg (MOVWconst [c]) x y) // cond: - // result: (MOVWconst [val]) + // result: (TSTconst [c] (SRL <x.Type> x y)) for { - val := v.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = val + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } -} -func rewriteValueARM_OpConst32_0(v *Value) bool { - // match: (Const32 [val]) + // match: (TSTshiftRLreg x y (MOVWconst [c])) // cond: - // result: (MOVWconst [val]) + // result: (TSTshiftRL x y [c]) for { - val := v.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = val + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMTSTshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } + return false } -func rewriteValueARM_OpConst32F_0(v *Value) bool { - // match: (Const32F [val]) +func rewriteValueARM_OpARMXOR_0(v *Value) bool { + // match: (XOR x (MOVWconst [c])) // cond: - // result: (MOVFconst [val]) + // result: (XORconst [c] x) for { - val := v.AuxInt - v.reset(OpARMMOVFconst) - v.AuxInt = val + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMXORconst) + v.AuxInt = c + v.AddArg(x) return true } -} -func rewriteValueARM_OpConst64F_0(v *Value) bool { - // match: (Const64F [val]) + // match: (XOR (MOVWconst [c]) x) // cond: - // result: (MOVDconst [val]) + // result: (XORconst [c] x) for { - val := v.AuxInt - v.reset(OpARMMOVDconst) - v.AuxInt = val + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMXORconst) + v.AuxInt = c + v.AddArg(x) return true } -} -func rewriteValueARM_OpConst8_0(v *Value) bool { - // match: (Const8 [val]) + // match: (XOR x (SLLconst [c] y)) // cond: - // result: (MOVWconst [val]) + // result: (XORshiftLL x y [c]) for { - val := v.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = val + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSLLconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMXORshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } -} -func rewriteValueARM_OpConstBool_0(v *Value) bool { - // match: (ConstBool [b]) + // match: (XOR (SLLconst [c] y) x) // cond: - // result: (MOVWconst [b]) + // result: (XORshiftLL x y [c]) for { - b := v.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = b + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSLLconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMXORshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } -} -func rewriteValueARM_OpConstNil_0(v *Value) bool { - // match: (ConstNil) + // match: (XOR x (SRLconst [c] y)) // cond: - // result: (MOVWconst [0]) + // result: (XORshiftRL x y [c]) for { - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRLconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMXORshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } -} -func rewriteValueARM_OpConvert_0(v *Value) bool { - // match: (Convert x mem) + // match: (XOR (SRLconst [c] y) x) // cond: - // result: (MOVWconvert x mem) + // result: (XORshiftRL x y [c]) for { _ = v.Args[1] - x := v.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVWconvert) + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMXORshiftRL) + v.AuxInt = c v.AddArg(x) - v.AddArg(mem) + v.AddArg(y) return true } -} -func rewriteValueARM_OpCtz32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Ctz32 <t> x) - // cond: objabi.GOARM<=6 - // result: (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1]))) + // match: (XOR x (SRAconst [c] y)) + // cond: + // result: (XORshiftRA x y [c]) for { - t := v.Type + _ = v.Args[1] x := v.Args[0] - if !(objabi.GOARM <= 6) { + v_1 := v.Args[1] + if v_1.Op != OpARMSRAconst { break } - v.reset(OpARMRSBconst) - v.AuxInt = 32 - v0 := b.NewValue0(v.Pos, OpARMCLZ, t) - v1 := b.NewValue0(v.Pos, OpARMSUBconst, t) - v1.AuxInt = 1 - v2 := b.NewValue0(v.Pos, OpARMAND, t) - v2.AddArg(x) - v3 := b.NewValue0(v.Pos, OpARMRSBconst, t) - v3.AuxInt = 0 - v3.AddArg(x) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMXORshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } - // match: (Ctz32 <t> x) - // cond: objabi.GOARM==7 - // result: (CLZ <t> (RBIT <t> x)) + // match: (XOR (SRAconst [c] y) x) + // cond: + // result: (XORshiftRA x y [c]) for { - t := v.Type - x := v.Args[0] - if !(objabi.GOARM == 7) { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRAconst { break } - v.reset(OpARMCLZ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpARMRBIT, t) - v0.AddArg(x) - v.AddArg(v0) + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMXORshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } - return false -} -func rewriteValueARM_OpCvt32Fto32_0(v *Value) bool { - // match: (Cvt32Fto32 x) + // match: (XOR x (SRRconst [c] y)) // cond: - // result: (MOVFW x) + // result: (XORshiftRR x y [c]) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVFW) + v_1 := v.Args[1] + if v_1.Op != OpARMSRRconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMXORshiftRR) + v.AuxInt = c v.AddArg(x) + v.AddArg(y) return true } -} -func rewriteValueARM_OpCvt32Fto32U_0(v *Value) bool { - // match: (Cvt32Fto32U x) + // match: (XOR (SRRconst [c] y) x) // cond: - // result: (MOVFWU x) + // result: (XORshiftRR x y [c]) for { - x := v.Args[0] - v.reset(OpARMMOVFWU) + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRRconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMXORshiftRR) + v.AuxInt = c v.AddArg(x) + v.AddArg(y) return true } + return false } -func rewriteValueARM_OpCvt32Fto64F_0(v *Value) bool { - // match: (Cvt32Fto64F x) +func rewriteValueARM_OpARMXOR_10(v *Value) bool { + // match: (XOR x (SLL y z)) // cond: - // result: (MOVFD x) + // result: (XORshiftLLreg x y z) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVFD) + v_1 := v.Args[1] + if v_1.Op != OpARMSLL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMXORshiftLLreg) v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } -} -func rewriteValueARM_OpCvt32Uto32F_0(v *Value) bool { - // match: (Cvt32Uto32F x) + // match: (XOR (SLL y z) x) // cond: - // result: (MOVWUF x) + // result: (XORshiftLLreg x y z) for { - x := v.Args[0] - v.reset(OpARMMOVWUF) + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSLL { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMXORshiftLLreg) v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } -} -func rewriteValueARM_OpCvt32Uto64F_0(v *Value) bool { - // match: (Cvt32Uto64F x) + // match: (XOR x (SRL y z)) // cond: - // result: (MOVWUD x) + // result: (XORshiftRLreg x y z) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVWUD) + v_1 := v.Args[1] + if v_1.Op != OpARMSRL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMXORshiftRLreg) v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } -} -func rewriteValueARM_OpCvt32to32F_0(v *Value) bool { - // match: (Cvt32to32F x) + // match: (XOR (SRL y z) x) // cond: - // result: (MOVWF x) + // result: (XORshiftRLreg x y z) for { - x := v.Args[0] - v.reset(OpARMMOVWF) + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRL { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMXORshiftRLreg) v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } -} -func rewriteValueARM_OpCvt32to64F_0(v *Value) bool { - // match: (Cvt32to64F x) + // match: (XOR x (SRA y z)) // cond: - // result: (MOVWD x) + // result: (XORshiftRAreg x y z) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVWD) + v_1 := v.Args[1] + if v_1.Op != OpARMSRA { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMXORshiftRAreg) v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } -} -func rewriteValueARM_OpCvt64Fto32_0(v *Value) bool { - // match: (Cvt64Fto32 x) + // match: (XOR (SRA y z) x) // cond: - // result: (MOVDW x) + // result: (XORshiftRAreg x y z) for { - x := v.Args[0] - v.reset(OpARMMOVDW) + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRA { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMXORshiftRAreg) v.AddArg(x) + v.AddArg(y) + v.AddArg(z) return true } -} -func rewriteValueARM_OpCvt64Fto32F_0(v *Value) bool { - // match: (Cvt64Fto32F x) + // match: (XOR x x) // cond: - // result: (MOVDF x) + // result: (MOVWconst [0]) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVDF) - v.AddArg(x) + if x != v.Args[1] { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } + return false } -func rewriteValueARM_OpCvt64Fto32U_0(v *Value) bool { - // match: (Cvt64Fto32U x) +func rewriteValueARM_OpARMXORconst_0(v *Value) bool { + // match: (XORconst [0] x) // cond: - // result: (MOVDWU x) + // result: x for { + if v.AuxInt != 0 { + break + } x := v.Args[0] - v.reset(OpARMMOVDWU) + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (XORconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [c^d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = c ^ d + return true + } + // match: (XORconst [c] (XORconst [d] x)) + // cond: + // result: (XORconst [c^d] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMXORconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMXORconst) + v.AuxInt = c ^ d v.AddArg(x) return true } + return false } -func rewriteValueARM_OpDiv16_0(v *Value) bool { +func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Div16 x y) + // match: (XORshiftLL (MOVWconst [c]) x [d]) // cond: - // result: (Div32 (SignExt16to32 x) (SignExt16to32 y)) + // result: (XORconst [c] (SLLconst <x.Type> x [d])) for { + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpDiv32) - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMXORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = d v0.AddArg(x) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(y) - v.AddArg(v1) return true } -} -func rewriteValueARM_OpDiv16u_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Div16u x y) + // match: (XORshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y)) + // result: (XORconst x [int64(uint32(c)<<uint64(d))]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpDiv32u) - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMXORconst) + v.AuxInt = int64(uint32(c) << uint64(d)) + v.AddArg(x) return true } -} -func rewriteValueARM_OpDiv32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Div32 x y) + // match: (XORshiftLL [c] (SRLconst x [32-c]) x) // cond: - // result: (SUB (XOR <typ.UInt32> (Select0 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) (Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y))) + // result: (SRRconst [32-c] x) + for { + c := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { + break + } + if v_0.AuxInt != 32-c { + break + } + x := v_0.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpARMSRRconst) + v.AuxInt = 32 - c + v.AddArg(x) + return true + } + // match: (XORshiftLL x (SLLconst x [c]) [d]) + // cond: c==d + // result: (MOVWconst [0]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSUB) - v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) - v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) - v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v4.AddArg(x) - v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v5.AddArg(x) - v4.AddArg(v5) - v3.AddArg(v4) - v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v6.AddArg(x) - v3.AddArg(v6) - v2.AddArg(v3) - v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) - v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v8.AddArg(y) - v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v9.AddArg(y) - v8.AddArg(v9) - v7.AddArg(v8) - v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v10.AddArg(y) - v7.AddArg(v10) - v2.AddArg(v7) - v1.AddArg(v2) - v0.AddArg(v1) - v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v12 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v12.AddArg(x) - v12.AddArg(y) - v11.AddArg(v12) - v0.AddArg(v11) - v.AddArg(v0) - v13 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v14 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v14.AddArg(x) - v14.AddArg(y) - v13.AddArg(v14) - v.AddArg(v13) - return true - } -} -func rewriteValueARM_OpDiv32F_0(v *Value) bool { - // match: (Div32F x y) - // cond: - // result: (DIVF x y) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMDIVF) - v.AddArg(x) - v.AddArg(y) + v_1 := v.Args[1] + if v_1.Op != OpARMSLLconst { + break + } + c := v_1.AuxInt + if x != v_1.Args[0] { + break + } + if !(c == d) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } + return false } -func rewriteValueARM_OpDiv32u_0(v *Value) bool { +func rewriteValueARM_OpARMXORshiftLLreg_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Div32u x y) + // match: (XORshiftLLreg (MOVWconst [c]) x y) // cond: - // result: (Select0 <typ.UInt32> (CALLudiv x y)) + // result: (XORconst [c] (SLL <x.Type> x y)) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpSelect0) - v.Type = typ.UInt32 - v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMXORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) return true } -} -func rewriteValueARM_OpDiv64F_0(v *Value) bool { - // match: (Div64F x y) + // match: (XORshiftLLreg x y (MOVWconst [c])) // cond: - // result: (DIVD x y) + // result: (XORshiftLL x y [c]) for { - _ = v.Args[1] + _ = v.Args[2] x := v.Args[0] y := v.Args[1] - v.reset(OpARMDIVD) + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMXORshiftLL) + v.AuxInt = c v.AddArg(x) v.AddArg(y) return true } + return false } -func rewriteValueARM_OpDiv8_0(v *Value) bool { +func rewriteValueARM_OpARMXORshiftRA_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Div8 x y) + // match: (XORshiftRA (MOVWconst [c]) x [d]) // cond: - // result: (Div32 (SignExt8to32 x) (SignExt8to32 y)) + // result: (XORconst [c] (SRAconst <x.Type> x [d])) for { + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpDiv32) - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMXORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = d v0.AddArg(x) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(y) - v.AddArg(v1) return true } -} -func rewriteValueARM_OpDiv8u_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Div8u x y) + // match: (XORshiftRA x (MOVWconst [c]) [d]) // cond: - // result: (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y)) + // result: (XORconst x [int64(int32(c)>>uint64(d))]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpDiv32u) - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMXORconst) + v.AuxInt = int64(int32(c) >> uint64(d)) + v.AddArg(x) return true } -} -func rewriteValueARM_OpEq16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Eq16 x y) - // cond: - // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + // match: (XORshiftRA x (SRAconst x [c]) [d]) + // cond: c==d + // result: (MOVWconst [0]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMEqual) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARMSRAconst { + break + } + c := v_1.AuxInt + if x != v_1.Args[0] { + break + } + if !(c == d) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } + return false } -func rewriteValueARM_OpEq32_0(v *Value) bool { +func rewriteValueARM_OpARMXORshiftRAreg_0(v *Value) bool { b := v.Block _ = b - // match: (Eq32 x y) + // match: (XORshiftRAreg (MOVWconst [c]) x y) // cond: - // result: (Equal (CMP x y)) + // result: (XORconst [c] (SRA <x.Type> x y)) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMEqual) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMXORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) return true } -} -func rewriteValueARM_OpEq32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Eq32F x y) + // match: (XORshiftRAreg x y (MOVWconst [c])) // cond: - // result: (Equal (CMPF x y)) + // result: (XORshiftRA x y [c]) for { - _ = v.Args[1] + _ = v.Args[2] x := v.Args[0] y := v.Args[1] - v.reset(OpARMEqual) - v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMXORshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } + return false } -func rewriteValueARM_OpEq64F_0(v *Value) bool { +func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { b := v.Block _ = b - // match: (Eq64F x y) + // match: (XORshiftRL (MOVWconst [c]) x [d]) // cond: - // result: (Equal (CMPD x y)) + // result: (XORconst [c] (SRLconst <x.Type> x [d])) for { + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMEqual) - v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMXORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = d v0.AddArg(x) - v0.AddArg(y) v.AddArg(v0) return true } -} -func rewriteValueARM_OpEq8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Eq8 x y) + // match: (XORshiftRL x (MOVWconst [c]) [d]) // cond: - // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + // result: (XORconst x [int64(uint32(c)>>uint64(d))]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMEqual) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMXORconst) + v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AddArg(x) return true } -} -func rewriteValueARM_OpEqB_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (EqB x y) + // match: (XORshiftRL [c] (SLLconst x [32-c]) x) // cond: - // result: (XORconst [1] (XOR <typ.Bool> x y)) + // result: (SRRconst [ c] x) + for { + c := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSLLconst { + break + } + if v_0.AuxInt != 32-c { + break + } + x := v_0.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpARMSRRconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORshiftRL x (SRLconst x [c]) [d]) + // cond: c==d + // result: (MOVWconst [0]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMXORconst) - v.AuxInt = 1 - v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARMSRLconst { + break + } + c := v_1.AuxInt + if x != v_1.Args[0] { + break + } + if !(c == d) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } + return false } -func rewriteValueARM_OpEqPtr_0(v *Value) bool { +func rewriteValueARM_OpARMXORshiftRLreg_0(v *Value) bool { b := v.Block _ = b - // match: (EqPtr x y) + // match: (XORshiftRLreg (MOVWconst [c]) x y) // cond: - // result: (Equal (CMP x y)) + // result: (XORconst [c] (SRL <x.Type> x y)) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMEqual) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMXORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) return true } -} -func rewriteValueARM_OpGeq16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Geq16 x y) + // match: (XORshiftRLreg x y (MOVWconst [c])) // cond: - // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) + // result: (XORshiftRL x y [c]) for { - _ = v.Args[1] + _ = v.Args[2] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterEqual) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMXORshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } + return false } -func rewriteValueARM_OpGeq16U_0(v *Value) bool { +func rewriteValueARM_OpARMXORshiftRR_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Geq16U x y) + // match: (XORshiftRR (MOVWconst [c]) x [d]) // cond: - // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + // result: (XORconst [c] (SRRconst <x.Type> x [d])) for { + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMGreaterEqualU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMXORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRRconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) v.AddArg(v0) return true } -} -func rewriteValueARM_OpGeq32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Geq32 x y) + // match: (XORshiftRR x (MOVWconst [c]) [d]) // cond: - // result: (GreaterEqual (CMP x y)) + // result: (XORconst x [int64(uint32(c)>>uint64(d)|uint32(c)<<uint64(32-d))]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMGreaterEqual) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMXORconst) + v.AuxInt = int64(uint32(c)>>uint64(d) | uint32(c)<<uint64(32-d)) + v.AddArg(x) return true } + return false } -func rewriteValueARM_OpGeq32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Geq32F x y) +func rewriteValueARM_OpAdd16_0(v *Value) bool { + // match: (Add16 x y) // cond: - // result: (GreaterEqual (CMPF x y)) + // result: (ADD x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterEqual) - v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMADD) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGeq32U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Geq32U x y) +func rewriteValueARM_OpAdd32_0(v *Value) bool { + // match: (Add32 x y) // cond: - // result: (GreaterEqualU (CMP x y)) + // result: (ADD x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterEqualU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMADD) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGeq64F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Geq64F x y) +func rewriteValueARM_OpAdd32F_0(v *Value) bool { + // match: (Add32F x y) // cond: - // result: (GreaterEqual (CMPD x y)) + // result: (ADDF x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterEqual) - v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMADDF) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGeq8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Geq8 x y) +func rewriteValueARM_OpAdd32carry_0(v *Value) bool { + // match: (Add32carry x y) // cond: - // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) + // result: (ADDS x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterEqual) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v.reset(OpARMADDS) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGeq8U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Geq8U x y) +func rewriteValueARM_OpAdd32withcarry_0(v *Value) bool { + // match: (Add32withcarry x y c) // cond: - // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + // result: (ADC x y c) for { - _ = v.Args[1] + _ = v.Args[2] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterEqualU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } -} -func rewriteValueARM_OpGetCallerSP_0(v *Value) bool { - // match: (GetCallerSP) - // cond: - // result: (LoweredGetCallerSP) - for { - v.reset(OpARMLoweredGetCallerSP) + c := v.Args[2] + v.reset(OpARMADC) + v.AddArg(x) + v.AddArg(y) + v.AddArg(c) return true } } -func rewriteValueARM_OpGetClosurePtr_0(v *Value) bool { - // match: (GetClosurePtr) +func rewriteValueARM_OpAdd64F_0(v *Value) bool { + // match: (Add64F x y) // cond: - // result: (LoweredGetClosurePtr) + // result: (ADDD x y) for { - v.reset(OpARMLoweredGetClosurePtr) + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMADDD) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGreater16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Greater16 x y) +func rewriteValueARM_OpAdd8_0(v *Value) bool { + // match: (Add8 x y) // cond: - // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y))) + // result: (ADD x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterThan) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v.reset(OpARMADD) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGreater16U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Greater16U x y) +func rewriteValueARM_OpAddPtr_0(v *Value) bool { + // match: (AddPtr x y) // cond: - // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + // result: (ADD x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterThanU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v.reset(OpARMADD) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGreater32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Greater32 x y) +func rewriteValueARM_OpAddr_0(v *Value) bool { + // match: (Addr {sym} base) // cond: - // result: (GreaterThan (CMP x y)) + // result: (MOVWaddr {sym} base) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMGreaterThan) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + sym := v.Aux + base := v.Args[0] + v.reset(OpARMMOVWaddr) + v.Aux = sym + v.AddArg(base) return true } } -func rewriteValueARM_OpGreater32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Greater32F x y) +func rewriteValueARM_OpAnd16_0(v *Value) bool { + // match: (And16 x y) // cond: - // result: (GreaterThan (CMPF x y)) + // result: (AND x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterThan) - v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMAND) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGreater32U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Greater32U x y) +func rewriteValueARM_OpAnd32_0(v *Value) bool { + // match: (And32 x y) // cond: - // result: (GreaterThanU (CMP x y)) + // result: (AND x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterThanU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMAND) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGreater64F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Greater64F x y) +func rewriteValueARM_OpAnd8_0(v *Value) bool { + // match: (And8 x y) // cond: - // result: (GreaterThan (CMPD x y)) + // result: (AND x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterThan) - v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMAND) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGreater8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Greater8 x y) +func rewriteValueARM_OpAndB_0(v *Value) bool { + // match: (AndB x y) // cond: - // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y))) + // result: (AND x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterThan) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v.reset(OpARMAND) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpGreater8U_0(v *Value) bool { +func rewriteValueARM_OpAvg32u_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Greater8U x y) + // match: (Avg32u <t> x y) // cond: - // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y) for { + t := v.Type _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterThanU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) + v0.AuxInt = 1 + v1 := b.NewValue0(v.Pos, OpARMSUB, t) v1.AddArg(x) + v1.AddArg(y) v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) v.AddArg(v0) + v.AddArg(y) return true } } -func rewriteValueARM_OpHmul32_0(v *Value) bool { - // match: (Hmul32 x y) +func rewriteValueARM_OpBitLen32_0(v *Value) bool { + b := v.Block + _ = b + // match: (BitLen32 <t> x) // cond: - // result: (HMUL x y) + // result: (RSBconst [32] (CLZ <t> x)) for { - _ = v.Args[1] + t := v.Type x := v.Args[0] - y := v.Args[1] - v.reset(OpARMHMUL) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMRSBconst) + v.AuxInt = 32 + v0 := b.NewValue0(v.Pos, OpARMCLZ, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueARM_OpHmul32u_0(v *Value) bool { - // match: (Hmul32u x y) - // cond: - // result: (HMULU x y) +func rewriteValueARM_OpBswap32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Bswap32 <t> x) + // cond: objabi.GOARM==5 + // result: (XOR <t> (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8]) (SRRconst <t> x [8])) for { - _ = v.Args[1] + t := v.Type x := v.Args[0] - y := v.Args[1] - v.reset(OpARMHMULU) - v.AddArg(x) - v.AddArg(y) + if !(objabi.GOARM == 5) { + break + } + v.reset(OpARMXOR) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) + v0.AuxInt = 8 + v1 := b.NewValue0(v.Pos, OpARMBICconst, t) + v1.AuxInt = 0xff0000 + v2 := b.NewValue0(v.Pos, OpARMXOR, t) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpARMSRRconst, t) + v3.AuxInt = 16 + v3.AddArg(x) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + v4 := b.NewValue0(v.Pos, OpARMSRRconst, t) + v4.AuxInt = 8 + v4.AddArg(x) + v.AddArg(v4) + return true + } + // match: (Bswap32 x) + // cond: objabi.GOARM>=6 + // result: (REV x) + for { + x := v.Args[0] + if !(objabi.GOARM >= 6) { + break + } + v.reset(OpARMREV) + v.AddArg(x) return true } + return false } -func rewriteValueARM_OpInterCall_0(v *Value) bool { - // match: (InterCall [argwid] entry mem) +func rewriteValueARM_OpClosureCall_0(v *Value) bool { + // match: (ClosureCall [argwid] entry closure mem) // cond: - // result: (CALLinter [argwid] entry mem) + // result: (CALLclosure [argwid] entry closure mem) for { argwid := v.AuxInt - _ = v.Args[1] + _ = v.Args[2] entry := v.Args[0] - mem := v.Args[1] - v.reset(OpARMCALLinter) + closure := v.Args[1] + mem := v.Args[2] + v.reset(OpARMCALLclosure) v.AuxInt = argwid v.AddArg(entry) + v.AddArg(closure) v.AddArg(mem) return true } } -func rewriteValueARM_OpIsInBounds_0(v *Value) bool { - b := v.Block - _ = b - // match: (IsInBounds idx len) +func rewriteValueARM_OpCom16_0(v *Value) bool { + // match: (Com16 x) // cond: - // result: (LessThanU (CMP idx len)) + // result: (MVN x) for { - _ = v.Args[1] - idx := v.Args[0] - len := v.Args[1] - v.reset(OpARMLessThanU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) - v.AddArg(v0) + x := v.Args[0] + v.reset(OpARMMVN) + v.AddArg(x) return true } } -func rewriteValueARM_OpIsNonNil_0(v *Value) bool { - b := v.Block - _ = b - // match: (IsNonNil ptr) +func rewriteValueARM_OpCom32_0(v *Value) bool { + // match: (Com32 x) // cond: - // result: (NotEqual (CMPconst [0] ptr)) + // result: (MVN x) for { - ptr := v.Args[0] - v.reset(OpARMNotEqual) - v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(ptr) - v.AddArg(v0) + x := v.Args[0] + v.reset(OpARMMVN) + v.AddArg(x) return true } } -func rewriteValueARM_OpIsSliceInBounds_0(v *Value) bool { - b := v.Block - _ = b - // match: (IsSliceInBounds idx len) +func rewriteValueARM_OpCom8_0(v *Value) bool { + // match: (Com8 x) // cond: - // result: (LessEqualU (CMP idx len)) + // result: (MVN x) for { - _ = v.Args[1] - idx := v.Args[0] - len := v.Args[1] - v.reset(OpARMLessEqualU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) - v.AddArg(v0) + x := v.Args[0] + v.reset(OpARMMVN) + v.AddArg(x) return true } } -func rewriteValueARM_OpLeq16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Leq16 x y) +func rewriteValueARM_OpConst16_0(v *Value) bool { + // match: (Const16 [val]) // cond: - // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) + // result: (MOVWconst [val]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessEqual) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + val := v.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = val return true } } -func rewriteValueARM_OpLeq16U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Leq16U x y) +func rewriteValueARM_OpConst32_0(v *Value) bool { + // match: (Const32 [val]) // cond: - // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + // result: (MOVWconst [val]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessEqualU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + val := v.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = val return true } } -func rewriteValueARM_OpLeq32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Leq32 x y) +func rewriteValueARM_OpConst32F_0(v *Value) bool { + // match: (Const32F [val]) // cond: - // result: (LessEqual (CMP x y)) + // result: (MOVFconst [val]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessEqual) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + val := v.AuxInt + v.reset(OpARMMOVFconst) + v.AuxInt = val return true } } -func rewriteValueARM_OpLeq32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Leq32F x y) +func rewriteValueARM_OpConst64F_0(v *Value) bool { + // match: (Const64F [val]) // cond: - // result: (GreaterEqual (CMPF y x)) + // result: (MOVDconst [val]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMGreaterEqual) - v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) + val := v.AuxInt + v.reset(OpARMMOVDconst) + v.AuxInt = val return true } } -func rewriteValueARM_OpLeq32U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Leq32U x y) +func rewriteValueARM_OpConst8_0(v *Value) bool { + // match: (Const8 [val]) // cond: - // result: (LessEqualU (CMP x y)) + // result: (MOVWconst [val]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessEqualU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + val := v.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = val return true } } -func rewriteValueARM_OpLeq64F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Leq64F x y) +func rewriteValueARM_OpConstBool_0(v *Value) bool { + // match: (ConstBool [b]) // cond: - // result: (GreaterEqual (CMPD y x)) + // result: (MOVWconst [b]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMGreaterEqual) - v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) + b := v.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = b return true } } -func rewriteValueARM_OpLeq8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Leq8 x y) +func rewriteValueARM_OpConstNil_0(v *Value) bool { + // match: (ConstNil) // cond: - // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) + // result: (MOVWconst [0]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessEqual) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } } -func rewriteValueARM_OpLeq8U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Leq8U x y) +func rewriteValueARM_OpConvert_0(v *Value) bool { + // match: (Convert x mem) // cond: - // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + // result: (MOVWconvert x mem) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessEqualU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + mem := v.Args[1] + v.reset(OpARMMOVWconvert) + v.AddArg(x) + v.AddArg(mem) return true } } -func rewriteValueARM_OpLess16_0(v *Value) bool { +func rewriteValueARM_OpCtz32_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Less16 x y) - // cond: - // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y))) + // match: (Ctz32 <t> x) + // cond: objabi.GOARM<=6 + // result: (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1]))) for { - _ = v.Args[1] + t := v.Type x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessThan) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) + if !(objabi.GOARM <= 6) { + break + } + v.reset(OpARMRSBconst) + v.AuxInt = 32 + v0 := b.NewValue0(v.Pos, OpARMCLZ, t) + v1 := b.NewValue0(v.Pos, OpARMSUBconst, t) + v1.AuxInt = 1 + v2 := b.NewValue0(v.Pos, OpARMAND, t) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpARMRSBconst, t) + v3.AuxInt = 0 + v3.AddArg(x) + v2.AddArg(v3) + v1.AddArg(v2) v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) v.AddArg(v0) return true } + // match: (Ctz32 <t> x) + // cond: objabi.GOARM==7 + // result: (CLZ <t> (RBIT <t> x)) + for { + t := v.Type + x := v.Args[0] + if !(objabi.GOARM == 7) { + break + } + v.reset(OpARMCLZ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARMRBIT, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false } -func rewriteValueARM_OpLess16U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Less16U x y) +func rewriteValueARM_OpCvt32Fto32_0(v *Value) bool { + // match: (Cvt32Fto32 x) // cond: - // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + // result: (MOVFW x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessThanU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v.reset(OpARMMOVFW) + v.AddArg(x) return true } } -func rewriteValueARM_OpLess32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Less32 x y) +func rewriteValueARM_OpCvt32Fto32U_0(v *Value) bool { + // match: (Cvt32Fto32U x) // cond: - // result: (LessThan (CMP x y)) + // result: (MOVFWU x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessThan) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMMOVFWU) + v.AddArg(x) return true } } -func rewriteValueARM_OpLess32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Less32F x y) +func rewriteValueARM_OpCvt32Fto64F_0(v *Value) bool { + // match: (Cvt32Fto64F x) // cond: - // result: (GreaterThan (CMPF y x)) + // result: (MOVFD x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMGreaterThan) - v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARMMOVFD) + v.AddArg(x) return true } } -func rewriteValueARM_OpLess32U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Less32U x y) +func rewriteValueARM_OpCvt32Uto32F_0(v *Value) bool { + // match: (Cvt32Uto32F x) // cond: - // result: (LessThanU (CMP x y)) + // result: (MOVWUF x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessThanU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMMOVWUF) + v.AddArg(x) return true } } -func rewriteValueARM_OpLess64F_0(v *Value) bool { +func rewriteValueARM_OpCvt32Uto64F_0(v *Value) bool { + // match: (Cvt32Uto64F x) + // cond: + // result: (MOVWUD x) + for { + x := v.Args[0] + v.reset(OpARMMOVWUD) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpCvt32to32F_0(v *Value) bool { + // match: (Cvt32to32F x) + // cond: + // result: (MOVWF x) + for { + x := v.Args[0] + v.reset(OpARMMOVWF) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpCvt32to64F_0(v *Value) bool { + // match: (Cvt32to64F x) + // cond: + // result: (MOVWD x) + for { + x := v.Args[0] + v.reset(OpARMMOVWD) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpCvt64Fto32_0(v *Value) bool { + // match: (Cvt64Fto32 x) + // cond: + // result: (MOVDW x) + for { + x := v.Args[0] + v.reset(OpARMMOVDW) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpCvt64Fto32F_0(v *Value) bool { + // match: (Cvt64Fto32F x) + // cond: + // result: (MOVDF x) + for { + x := v.Args[0] + v.reset(OpARMMOVDF) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpCvt64Fto32U_0(v *Value) bool { + // match: (Cvt64Fto32U x) + // cond: + // result: (MOVDWU x) + for { + x := v.Args[0] + v.reset(OpARMMOVDWU) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpDiv16_0(v *Value) bool { b := v.Block _ = b - // match: (Less64F x y) + typ := &b.Func.Config.Types + _ = typ + // match: (Div16 x y) // cond: - // result: (GreaterThan (CMPD y x)) + // result: (Div32 (SignExt16to32 x) (SignExt16to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterThan) - v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(y) + v.reset(OpDiv32) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpLess8_0(v *Value) bool { +func rewriteValueARM_OpDiv16u_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Less8 x y) + // match: (Div16u x y) // cond: - // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y))) + // result: (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMLessThan) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) + v.reset(OpDiv32u) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpLess8U_0(v *Value) bool { +func rewriteValueARM_OpDiv32_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Less8U x y) + // match: (Div32 x y) // cond: - // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + // result: (SUB (XOR <typ.UInt32> (Select0 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) (Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMLessThanU) - v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) + v.reset(OpARMSUB) + v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) + v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v5.AddArg(x) + v4.AddArg(v5) + v3.AddArg(v4) + v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v6.AddArg(x) + v3.AddArg(v6) + v2.AddArg(v3) + v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v8.AddArg(y) + v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v9.AddArg(y) + v8.AddArg(v9) + v7.AddArg(v8) + v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v10.AddArg(y) + v7.AddArg(v10) + v2.AddArg(v7) + v1.AddArg(v2) v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) + v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v12 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v12.AddArg(x) + v12.AddArg(y) + v11.AddArg(v12) + v0.AddArg(v11) v.AddArg(v0) + v13 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v14 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v14.AddArg(x) + v14.AddArg(y) + v13.AddArg(v14) + v.AddArg(v13) return true } } -func rewriteValueARM_OpLoad_0(v *Value) bool { - // match: (Load <t> ptr mem) - // cond: t.IsBoolean() - // result: (MOVBUload ptr mem) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(t.IsBoolean()) { - break - } - v.reset(OpARMMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (Load <t> ptr mem) - // cond: (is8BitInt(t) && isSigned(t)) - // result: (MOVBload ptr mem) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is8BitInt(t) && isSigned(t)) { - break - } - v.reset(OpARMMOVBload) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (Load <t> ptr mem) - // cond: (is8BitInt(t) && !isSigned(t)) - // result: (MOVBUload ptr mem) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is8BitInt(t) && !isSigned(t)) { - break - } - v.reset(OpARMMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (Load <t> ptr mem) - // cond: (is16BitInt(t) && isSigned(t)) - // result: (MOVHload ptr mem) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is16BitInt(t) && isSigned(t)) { - break - } - v.reset(OpARMMOVHload) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (Load <t> ptr mem) - // cond: (is16BitInt(t) && !isSigned(t)) - // result: (MOVHUload ptr mem) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is16BitInt(t) && !isSigned(t)) { - break - } - v.reset(OpARMMOVHUload) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (Load <t> ptr mem) - // cond: (is32BitInt(t) || isPtr(t)) - // result: (MOVWload ptr mem) +func rewriteValueARM_OpDiv32F_0(v *Value) bool { + // match: (Div32F x y) + // cond: + // result: (DIVF x y) for { - t := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is32BitInt(t) || isPtr(t)) { - break - } - v.reset(OpARMMOVWload) - v.AddArg(ptr) - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMDIVF) + v.AddArg(x) + v.AddArg(y) return true } - // match: (Load <t> ptr mem) - // cond: is32BitFloat(t) - // result: (MOVFload ptr mem) +} +func rewriteValueARM_OpDiv32u_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Div32u x y) + // cond: + // result: (Select0 <typ.UInt32> (CALLudiv x y)) for { - t := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is32BitFloat(t)) { - break - } - v.reset(OpARMMOVFload) - v.AddArg(ptr) - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (Load <t> ptr mem) - // cond: is64BitFloat(t) - // result: (MOVDload ptr mem) +} +func rewriteValueARM_OpDiv64F_0(v *Value) bool { + // match: (Div64F x y) + // cond: + // result: (DIVD x y) for { - t := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is64BitFloat(t)) { - break - } - v.reset(OpARMMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMDIVD) + v.AddArg(x) + v.AddArg(y) return true } - return false } -func rewriteValueARM_OpLsh16x16_0(v *Value) bool { +func rewriteValueARM_OpDiv8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Lsh16x16 x y) + // match: (Div8 x y) // cond: - // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + // result: (Div32 (SignExt8to32 x) (SignExt8to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v.reset(OpDiv32) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(y) - v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 - v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v3.AddArg(y) - v2.AddArg(v3) - v.AddArg(v2) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpLsh16x32_0(v *Value) bool { +func rewriteValueARM_OpDiv8u_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh16x32 x y) + typ := &b.Func.Config.Types + _ = typ + // match: (Div8u x y) // cond: - // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0]) + // result: (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v.reset(OpDiv32u) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) - v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) v.AddArg(v1) return true } } -func rewriteValueARM_OpLsh16x64_0(v *Value) bool { - // match: (Lsh16x64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SLLconst x [c]) +func rewriteValueARM_OpEq16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Eq16 x y) + // cond: + // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh16x64 _ (Const64 [c])) - // cond: uint64(c) >= 16 - // result: (Const16 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 16) { - break - } - v.reset(OpConst16) - v.AuxInt = 0 + y := v.Args[1] + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } - return false } -func rewriteValueARM_OpLsh16x8_0(v *Value) bool { +func rewriteValueARM_OpEq32_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh16x8 x y) + // match: (Eq32 x y) // cond: - // result: (SLL x (ZeroExt8to32 y)) + // result: (Equal (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSLL) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) return true } } -func rewriteValueARM_OpLsh32x16_0(v *Value) bool { +func rewriteValueARM_OpEq32F_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh32x16 x y) + // match: (Eq32F x y) // cond: - // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + // result: (Equal (CMPF x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(y) - v0.AddArg(v1) + v0.AddArg(y) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 - v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v3.AddArg(y) - v2.AddArg(v3) - v.AddArg(v2) return true } } -func rewriteValueARM_OpLsh32x32_0(v *Value) bool { +func rewriteValueARM_OpEq64F_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x32 x y) + // match: (Eq64F x y) // cond: - // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0]) + // result: (Equal (CMPD x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 - v1.AddArg(y) - v.AddArg(v1) - return true - } -} -func rewriteValueARM_OpLsh32x64_0(v *Value) bool { - // match: (Lsh32x64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SLLconst x [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh32x64 _ (Const64 [c])) - // cond: uint64(c) >= 32 - // result: (Const32 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 32) { - break - } - v.reset(OpConst32) - v.AuxInt = 0 return true } - return false } -func rewriteValueARM_OpLsh32x8_0(v *Value) bool { +func rewriteValueARM_OpEq8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Lsh32x8 x y) + // match: (Eq8 x y) // cond: - // result: (SLL x (ZeroExt8to32 y)) + // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSLL) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(y) + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) return true } } -func rewriteValueARM_OpLsh8x16_0(v *Value) bool { +func rewriteValueARM_OpEqB_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Lsh8x16 x y) + // match: (EqB x y) // cond: - // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + // result: (XORconst [1] (XOR <typ.Bool> x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v.reset(OpARMXORconst) + v.AuxInt = 1 + v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool) v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(y) - v0.AddArg(v1) + v0.AddArg(y) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 - v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v3.AddArg(y) - v2.AddArg(v3) - v.AddArg(v2) return true } } -func rewriteValueARM_OpLsh8x32_0(v *Value) bool { +func rewriteValueARM_OpEqPtr_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x32 x y) + // match: (EqPtr x y) // cond: - // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0]) + // result: (Equal (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 - v1.AddArg(y) - v.AddArg(v1) - return true - } -} -func rewriteValueARM_OpLsh8x64_0(v *Value) bool { - // match: (Lsh8x64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SLLconst x [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh8x64 _ (Const64 [c])) - // cond: uint64(c) >= 8 - // result: (Const8 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 8) { - break - } - v.reset(OpConst8) - v.AuxInt = 0 return true } - return false } -func rewriteValueARM_OpLsh8x8_0(v *Value) bool { +func rewriteValueARM_OpGeq16_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Lsh8x8 x y) + // match: (Geq16 x y) // cond: - // result: (SLL x (ZeroExt8to32 y)) + // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSLL) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(y) + v.reset(OpARMGreaterEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) return true } } -func rewriteValueARM_OpMod16_0(v *Value) bool { +func rewriteValueARM_OpGeq16U_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Mod16 x y) + // match: (Geq16U x y) // cond: - // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y)) + // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpMod32) - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v0.AddArg(x) + v.reset(OpARMGreaterEqualU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(y) - v.AddArg(v1) return true } } -func rewriteValueARM_OpMod16u_0(v *Value) bool { +func rewriteValueARM_OpGeq32_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Mod16u x y) + // match: (Geq32 x y) // cond: - // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) + // result: (GreaterEqual (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpMod32u) - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v.reset(OpARMGreaterEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) return true } } -func rewriteValueARM_OpMod32_0(v *Value) bool { +func rewriteValueARM_OpGeq32F_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Mod32 x y) + // match: (Geq32F x y) // cond: - // result: (SUB (XOR <typ.UInt32> (Select1 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x)) + // result: (GreaterEqual (CMPF x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUB) - v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) - v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) - v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v4.AddArg(x) - v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v5.AddArg(x) - v4.AddArg(v5) - v3.AddArg(v4) - v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v6.AddArg(x) - v3.AddArg(v6) - v2.AddArg(v3) - v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) - v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) - v8.AddArg(y) - v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v9.AddArg(y) - v8.AddArg(v9) - v7.AddArg(v8) - v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v10.AddArg(y) - v7.AddArg(v10) - v2.AddArg(v7) - v1.AddArg(v2) - v0.AddArg(v1) - v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v11.AddArg(x) - v0.AddArg(v11) + v.reset(OpARMGreaterEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v12 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) - v12.AddArg(x) - v.AddArg(v12) return true } } -func rewriteValueARM_OpMod32u_0(v *Value) bool { +func rewriteValueARM_OpGeq32U_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Mod32u x y) + // match: (Geq32U x y) // cond: - // result: (Select1 <typ.UInt32> (CALLudiv x y)) + // result: (GreaterEqualU (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpSelect1) - v.Type = typ.UInt32 - v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) + v.reset(OpARMGreaterEqualU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) return true } } -func rewriteValueARM_OpMod8_0(v *Value) bool { +func rewriteValueARM_OpGeq64F_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Mod8 x y) + // match: (Geq64F x y) // cond: - // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y)) + // result: (GreaterEqual (CMPD x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpMod32) - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v.reset(OpARMGreaterEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(y) - v.AddArg(v1) return true } } -func rewriteValueARM_OpMod8u_0(v *Value) bool { +func rewriteValueARM_OpGeq8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Mod8u x y) + // match: (Geq8 x y) // cond: - // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) + // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpMod32u) - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(x) + v.reset(OpARMGreaterEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) return true } } -func rewriteValueARM_OpMove_0(v *Value) bool { +func rewriteValueARM_OpGeq8U_0(v *Value) bool { b := v.Block _ = b - config := b.Func.Config - _ = config typ := &b.Func.Config.Types _ = typ - // match: (Move [0] _ _ mem) + // match: (Geq8U x y) // cond: - // result: mem + // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - if v.AuxInt != 0 { - break - } - _ = v.Args[2] - mem := v.Args[2] - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMGreaterEqualU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } - // match: (Move [1] dst src mem) +} +func rewriteValueARM_OpGetCallerSP_0(v *Value) bool { + // match: (GetCallerSP) // cond: - // result: (MOVBstore dst (MOVBUload src mem) mem) + // result: (LoweredGetCallerSP) for { - if v.AuxInt != 1 { - break - } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVBstore) - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v.reset(OpARMLoweredGetCallerSP) return true } - // match: (Move [2] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 - // result: (MOVHstore dst (MOVHUload src mem) mem) +} +func rewriteValueARM_OpGetClosurePtr_0(v *Value) bool { + // match: (GetClosurePtr) + // cond: + // result: (LoweredGetClosurePtr) for { - if v.AuxInt != 2 { - break - } - t := v.Aux - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Alignment()%2 == 0) { - break - } - v.reset(OpARMMOVHstore) - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v.reset(OpARMLoweredGetClosurePtr) return true } - // match: (Move [2] dst src mem) +} +func rewriteValueARM_OpGreater16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Greater16 x y) // cond: - // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)) + // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y))) for { - if v.AuxInt != 2 { - break - } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVBstore) - v.AuxInt = 1 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v0.AuxInt = 1 - v0.AddArg(src) - v0.AddArg(mem) + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMGreaterThan) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) return true } - // match: (Move [4] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%4 == 0 - // result: (MOVWstore dst (MOVWload src mem) mem) +} +func rewriteValueARM_OpGreater16U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Greater16U x y) + // cond: + // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - if v.AuxInt != 4 { - break - } - t := v.Aux - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Alignment()%4 == 0) { - break - } - v.reset(OpARMMOVWstore) - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARMMOVWload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (Move [4] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 - // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) - for { - if v.AuxInt != 4 { - break - } - t := v.Aux - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Alignment()%2 == 0) { - break - } - v.reset(OpARMMOVHstore) - v.AuxInt = 2 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) - v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) - return true - } - // match: (Move [4] dst src mem) - // cond: - // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))) - for { - if v.AuxInt != 4 { - break - } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVBstore) - v.AuxInt = 3 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v1.AuxInt = 2 - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v3.AuxInt = 1 - v3.AddArg(dst) - v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v4.AuxInt = 1 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v5.AddArg(dst) - v6 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) - return true - } - // match: (Move [3] dst src mem) - // cond: - // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))) - for { - if v.AuxInt != 3 { - break - } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVBstore) - v.AuxInt = 2 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v1.AuxInt = 1 - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v2.AuxInt = 1 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v3.AddArg(dst) - v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) - return true - } - // match: (Move [s] {t} dst src mem) - // cond: s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice - // result: (DUFFCOPY [8 * (128 - int64(s/4))] dst src mem) - for { - s := v.AuxInt - t := v.Aux - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice) { - break - } - v.reset(OpARMDUFFCOPY) - v.AuxInt = 8 * (128 - int64(s/4)) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) - return true - } - // match: (Move [s] {t} dst src mem) - // cond: (s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0 - // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem) - for { - s := v.AuxInt - t := v.Aux - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) { - break - } - v.reset(OpARMLoweredMove) - v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(dst) - v.AddArg(src) - v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type) - v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) - v0.AddArg(src) + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMGreaterThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) - v.AddArg(mem) return true } - return false } -func rewriteValueARM_OpMul16_0(v *Value) bool { - // match: (Mul16 x y) +func rewriteValueARM_OpGreater32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Greater32 x y) // cond: - // result: (MUL x y) + // result: (GreaterThan (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMMUL) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMGreaterThan) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM_OpMul32_0(v *Value) bool { - // match: (Mul32 x y) +func rewriteValueARM_OpGreater32F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Greater32F x y) // cond: - // result: (MUL x y) + // result: (GreaterThan (CMPF x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMMUL) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMGreaterThan) + v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM_OpMul32F_0(v *Value) bool { - // match: (Mul32F x y) +func rewriteValueARM_OpGreater32U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Greater32U x y) // cond: - // result: (MULF x y) + // result: (GreaterThanU (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMMULF) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMGreaterThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM_OpMul32uhilo_0(v *Value) bool { - // match: (Mul32uhilo x y) +func rewriteValueARM_OpGreater64F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Greater64F x y) // cond: - // result: (MULLU x y) + // result: (GreaterThan (CMPD x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMMULLU) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMGreaterThan) + v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM_OpMul64F_0(v *Value) bool { - // match: (Mul64F x y) +func rewriteValueARM_OpGreater8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Greater8 x y) // cond: - // result: (MULD x y) + // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMMULD) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMGreaterThan) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM_OpMul8_0(v *Value) bool { - // match: (Mul8 x y) +func rewriteValueARM_OpGreater8U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Greater8U x y) // cond: - // result: (MUL x y) + // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMMUL) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMGreaterThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM_OpNeg16_0(v *Value) bool { - // match: (Neg16 x) +func rewriteValueARM_OpHmul32_0(v *Value) bool { + // match: (Hmul32 x y) // cond: - // result: (RSBconst [0] x) + // result: (HMUL x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMRSBconst) - v.AuxInt = 0 + y := v.Args[1] + v.reset(OpARMHMUL) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpNeg32_0(v *Value) bool { - // match: (Neg32 x) +func rewriteValueARM_OpHmul32u_0(v *Value) bool { + // match: (Hmul32u x y) // cond: - // result: (RSBconst [0] x) + // result: (HMULU x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMRSBconst) - v.AuxInt = 0 + y := v.Args[1] + v.reset(OpARMHMULU) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpNeg32F_0(v *Value) bool { - // match: (Neg32F x) +func rewriteValueARM_OpInterCall_0(v *Value) bool { + // match: (InterCall [argwid] entry mem) // cond: - // result: (NEGF x) + // result: (CALLinter [argwid] entry mem) for { - x := v.Args[0] - v.reset(OpARMNEGF) - v.AddArg(x) + argwid := v.AuxInt + _ = v.Args[1] + entry := v.Args[0] + mem := v.Args[1] + v.reset(OpARMCALLinter) + v.AuxInt = argwid + v.AddArg(entry) + v.AddArg(mem) return true } } -func rewriteValueARM_OpNeg64F_0(v *Value) bool { - // match: (Neg64F x) +func rewriteValueARM_OpIsInBounds_0(v *Value) bool { + b := v.Block + _ = b + // match: (IsInBounds idx len) // cond: - // result: (NEGD x) + // result: (LessThanU (CMP idx len)) for { - x := v.Args[0] - v.reset(OpARMNEGD) - v.AddArg(x) + _ = v.Args[1] + idx := v.Args[0] + len := v.Args[1] + v.reset(OpARMLessThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(idx) + v0.AddArg(len) + v.AddArg(v0) return true } } -func rewriteValueARM_OpNeg8_0(v *Value) bool { - // match: (Neg8 x) +func rewriteValueARM_OpIsNonNil_0(v *Value) bool { + b := v.Block + _ = b + // match: (IsNonNil ptr) // cond: - // result: (RSBconst [0] x) + // result: (NotEqual (CMPconst [0] ptr)) + for { + ptr := v.Args[0] + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(ptr) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpIsSliceInBounds_0(v *Value) bool { + b := v.Block + _ = b + // match: (IsSliceInBounds idx len) + // cond: + // result: (LessEqualU (CMP idx len)) + for { + _ = v.Args[1] + idx := v.Args[0] + len := v.Args[1] + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(idx) + v0.AddArg(len) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Leq16 x y) + // cond: + // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMRSBconst) - v.AuxInt = 0 - v.AddArg(x) + y := v.Args[1] + v.reset(OpARMLessEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM_OpNeq16_0(v *Value) bool { +func rewriteValueARM_OpLeq16U_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Neq16 x y) + // match: (Leq16U x y) // cond: - // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMNotEqual) + v.reset(OpARMLessEqualU) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) @@ -18439,17 +18915,17 @@ func rewriteValueARM_OpNeq16_0(v *Value) bool { return true } } -func rewriteValueARM_OpNeq32_0(v *Value) bool { +func rewriteValueARM_OpLeq32_0(v *Value) bool { b := v.Block _ = b - // match: (Neq32 x y) + // match: (Leq32 x y) // cond: - // result: (NotEqual (CMP x y)) + // result: (LessEqual (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMNotEqual) + v.reset(OpARMLessEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) v0.AddArg(y) @@ -18457,376 +18933,427 @@ func rewriteValueARM_OpNeq32_0(v *Value) bool { return true } } -func rewriteValueARM_OpNeq32F_0(v *Value) bool { +func rewriteValueARM_OpLeq32F_0(v *Value) bool { b := v.Block _ = b - // match: (Neq32F x y) + // match: (Leq32F x y) // cond: - // result: (NotEqual (CMPF x y)) + // result: (GreaterEqual (CMPF y x)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMNotEqual) + v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq32U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Leq32U x y) + // cond: + // result: (LessEqualU (CMP x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) return true } } -func rewriteValueARM_OpNeq64F_0(v *Value) bool { +func rewriteValueARM_OpLeq64F_0(v *Value) bool { b := v.Block _ = b - // match: (Neq64F x y) + // match: (Leq64F x y) // cond: - // result: (NotEqual (CMPD x y)) + // result: (GreaterEqual (CMPD y x)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMNotEqual) + v.reset(OpARMGreaterEqual) v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) - v0.AddArg(x) v0.AddArg(y) + v0.AddArg(x) v.AddArg(v0) return true } } -func rewriteValueARM_OpNeq8_0(v *Value) bool { +func rewriteValueARM_OpLeq8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Neq8 x y) + // match: (Leq8 x y) // cond: - // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMNotEqual) + v.reset(OpARMLessEqual) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) return true } } -func rewriteValueARM_OpNeqB_0(v *Value) bool { - // match: (NeqB x y) +func rewriteValueARM_OpLeq8U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Leq8U x y) // cond: - // result: (XOR x y) + // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMXOR) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM_OpNeqPtr_0(v *Value) bool { +func rewriteValueARM_OpLess16_0(v *Value) bool { b := v.Block _ = b - // match: (NeqPtr x y) + typ := &b.Func.Config.Types + _ = typ + // match: (Less16 x y) // cond: - // result: (NotEqual (CMP x y)) + // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMNotEqual) + v.reset(OpARMLessThan) v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) return true } } -func rewriteValueARM_OpNilCheck_0(v *Value) bool { - // match: (NilCheck ptr mem) - // cond: - // result: (LoweredNilCheck ptr mem) - for { - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARMLoweredNilCheck) - v.AddArg(ptr) - v.AddArg(mem) - return true - } -} -func rewriteValueARM_OpNot_0(v *Value) bool { - // match: (Not x) - // cond: - // result: (XORconst [1] x) - for { - x := v.Args[0] - v.reset(OpARMXORconst) - v.AuxInt = 1 - v.AddArg(x) - return true - } -} -func rewriteValueARM_OpOffPtr_0(v *Value) bool { - // match: (OffPtr [off] ptr:(SP)) - // cond: - // result: (MOVWaddr [off] ptr) - for { - off := v.AuxInt - ptr := v.Args[0] - if ptr.Op != OpSP { - break - } - v.reset(OpARMMOVWaddr) - v.AuxInt = off - v.AddArg(ptr) - return true - } - // match: (OffPtr [off] ptr) - // cond: - // result: (ADDconst [off] ptr) - for { - off := v.AuxInt - ptr := v.Args[0] - v.reset(OpARMADDconst) - v.AuxInt = off - v.AddArg(ptr) - return true - } -} -func rewriteValueARM_OpOr16_0(v *Value) bool { - // match: (Or16 x y) +func rewriteValueARM_OpLess16U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Less16U x y) // cond: - // result: (OR x y) + // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMOR) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMLessThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM_OpOr32_0(v *Value) bool { - // match: (Or32 x y) +func rewriteValueARM_OpLess32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Less32 x y) // cond: - // result: (OR x y) + // result: (LessThan (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMOR) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMLessThan) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM_OpOr8_0(v *Value) bool { - // match: (Or8 x y) +func rewriteValueARM_OpLess32F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Less32F x y) // cond: - // result: (OR x y) + // result: (GreaterThan (CMPF y x)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMOR) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMGreaterThan) + v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueARM_OpOrB_0(v *Value) bool { - // match: (OrB x y) +func rewriteValueARM_OpLess32U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Less32U x y) // cond: - // result: (OR x y) + // result: (LessThanU (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMOR) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM_OpRound32F_0(v *Value) bool { - // match: (Round32F x) - // cond: - // result: x - for { - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.reset(OpARMLessThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM_OpRound64F_0(v *Value) bool { - // match: (Round64F x) +func rewriteValueARM_OpLess64F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Less64F x y) // cond: - // result: x + // result: (GreaterThan (CMPD y x)) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + y := v.Args[1] + v.reset(OpARMGreaterThan) + v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueARM_OpRsh16Ux16_0(v *Value) bool { +func rewriteValueARM_OpLess8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh16Ux16 x y) + // match: (Less8 x y) // cond: - // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v.reset(OpARMLessThan) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v3.AuxInt = 256 - v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) return true } } -func rewriteValueARM_OpRsh16Ux32_0(v *Value) bool { +func rewriteValueARM_OpLess8U_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh16Ux32 x y) + // match: (Less8U x y) // cond: - // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0]) + // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v.reset(OpARMLessThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) - v.AddArg(v2) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM_OpRsh16Ux64_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16Ux64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16]) +func rewriteValueARM_OpLoad_0(v *Value) bool { + // match: (Load <t> ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) for { + t := v.Type _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsBoolean()) { break } - v.reset(OpARMSRLconst) - v.AuxInt = c + 16 - v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 16 - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARMMOVBUload) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (Rsh16Ux64 _ (Const64 [c])) - // cond: uint64(c) >= 16 - // result: (Const16 [0]) + // match: (Load <t> ptr mem) + // cond: (is8BitInt(t) && isSigned(t)) + // result: (MOVBload ptr mem) for { + t := v.Type _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 16) { + ptr := v.Args[0] + mem := v.Args[1] + if !(is8BitInt(t) && isSigned(t)) { break } - v.reset(OpConst16) - v.AuxInt = 0 + v.reset(OpARMMOVBload) + v.AddArg(ptr) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM_OpRsh16Ux8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16Ux8 x y) - // cond: - // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y)) - for { + // match: (Load <t> ptr mem) + // cond: (is8BitInt(t) && !isSigned(t)) + // result: (MOVBUload ptr mem) + for { + t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRL) - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) + ptr := v.Args[0] + mem := v.Args[1] + if !(is8BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpARMMOVBUload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load <t> ptr mem) + // cond: (is16BitInt(t) && isSigned(t)) + // result: (MOVHload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is16BitInt(t) && isSigned(t)) { + break + } + v.reset(OpARMMOVHload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load <t> ptr mem) + // cond: (is16BitInt(t) && !isSigned(t)) + // result: (MOVHUload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is16BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpARMMOVHUload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load <t> ptr mem) + // cond: (is32BitInt(t) || isPtr(t)) + // result: (MOVWload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitInt(t) || isPtr(t)) { + break + } + v.reset(OpARMMOVWload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load <t> ptr mem) + // cond: is32BitFloat(t) + // result: (MOVFload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitFloat(t)) { + break + } + v.reset(OpARMMOVFload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load <t> ptr mem) + // cond: is64BitFloat(t) + // result: (MOVDload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is64BitFloat(t)) { + break + } + v.reset(OpARMMOVDload) + v.AddArg(ptr) + v.AddArg(mem) return true } + return false } -func rewriteValueARM_OpRsh16x16_0(v *Value) bool { +func rewriteValueARM_OpLsh16x16_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh16x16 x y) + // match: (Lsh16x16 x y) // cond: - // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) + // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSRAcond) - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v.reset(OpARMCMOVWHSconst) + v.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg(x) - v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) - v.AddArg(v1) + v0.AddArg(v1) + v.AddArg(v0) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v2.AuxInt = 256 v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) @@ -18836,23 +19363,22 @@ func rewriteValueARM_OpRsh16x16_0(v *Value) bool { return true } } -func rewriteValueARM_OpRsh16x32_0(v *Value) bool { +func rewriteValueARM_OpLsh16x32_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16x32 x y) + // match: (Lsh16x32 x y) // cond: - // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y)) + // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0]) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSRAcond) - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v.reset(OpARMCMOVWHSconst) + v.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v.AddArg(y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v1.AuxInt = 256 v1.AddArg(y) @@ -18860,14 +19386,10 @@ func rewriteValueARM_OpRsh16x32_0(v *Value) bool { return true } } -func rewriteValueARM_OpRsh16x64_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16x64 x (Const64 [c])) +func rewriteValueARM_OpLsh16x64_0(v *Value) bool { + // match: (Lsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 - // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16]) + // result: (SLLconst x [c]) for { _ = v.Args[1] x := v.Args[0] @@ -18879,20 +19401,16 @@ func rewriteValueARM_OpRsh16x64_0(v *Value) bool { if !(uint64(c) < 16) { break } - v.reset(OpARMSRAconst) - v.AuxInt = c + 16 - v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 16 - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARMSLLconst) + v.AuxInt = c + v.AddArg(x) return true } - // match: (Rsh16x64 x (Const64 [c])) + // match: (Lsh16x64 _ (Const64 [c])) // cond: uint64(c) >= 16 - // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31]) + // result: (Const16 [0]) for { _ = v.Args[1] - x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst64 { break @@ -18901,53 +19419,47 @@ func rewriteValueARM_OpRsh16x64_0(v *Value) bool { if !(uint64(c) >= 16) { break } - v.reset(OpARMSRAconst) - v.AuxInt = 31 - v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 16 - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpConst16) + v.AuxInt = 0 return true } return false } -func rewriteValueARM_OpRsh16x8_0(v *Value) bool { +func rewriteValueARM_OpLsh16x8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh16x8 x y) + // match: (Lsh16x8 x y) // cond: - // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y)) + // result: (SLL x (ZeroExt8to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSRA) - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v0.AddArg(x) + v.reset(OpARMSLL) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) return true } } -func rewriteValueARM_OpRsh32Ux16_0(v *Value) bool { +func rewriteValueARM_OpLsh32x16_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh32Ux16 x y) + // match: (Lsh32x16 x y) // cond: - // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) @@ -18962,19 +19474,19 @@ func rewriteValueARM_OpRsh32Ux16_0(v *Value) bool { return true } } -func rewriteValueARM_OpRsh32Ux32_0(v *Value) bool { +func rewriteValueARM_OpLsh32x32_0(v *Value) bool { b := v.Block _ = b - // match: (Rsh32Ux32 x y) + // match: (Lsh32x32 x y) // cond: - // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0]) + // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0]) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] v.reset(OpARMCMOVWHSconst) v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) @@ -18985,10 +19497,10 @@ func rewriteValueARM_OpRsh32Ux32_0(v *Value) bool { return true } } -func rewriteValueARM_OpRsh32Ux64_0(v *Value) bool { - // match: (Rsh32Ux64 x (Const64 [c])) +func rewriteValueARM_OpLsh32x64_0(v *Value) bool { + // match: (Lsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 - // result: (SRLconst x [c]) + // result: (SLLconst x [c]) for { _ = v.Args[1] x := v.Args[0] @@ -19000,12 +19512,12 @@ func rewriteValueARM_OpRsh32Ux64_0(v *Value) bool { if !(uint64(c) < 32) { break } - v.reset(OpARMSRLconst) + v.reset(OpARMSLLconst) v.AuxInt = c v.AddArg(x) return true } - // match: (Rsh32Ux64 _ (Const64 [c])) + // match: (Lsh32x64 _ (Const64 [c])) // cond: uint64(c) >= 32 // result: (Const32 [0]) for { @@ -19024,19 +19536,19 @@ func rewriteValueARM_OpRsh32Ux64_0(v *Value) bool { } return false } -func rewriteValueARM_OpRsh32Ux8_0(v *Value) bool { +func rewriteValueARM_OpLsh32x8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh32Ux8 x y) + // match: (Lsh32x8 x y) // cond: - // result: (SRL x (ZeroExt8to32 y)) + // result: (SLL x (ZeroExt8to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSRL) + v.reset(OpARMSLL) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) @@ -19044,56 +19556,62 @@ func rewriteValueARM_OpRsh32Ux8_0(v *Value) bool { return true } } -func rewriteValueARM_OpRsh32x16_0(v *Value) bool { +func rewriteValueARM_OpLsh8x16_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh32x16 x y) + // match: (Lsh8x16 x y) // cond: - // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) + // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSRAcond) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v0.AddArg(y) + v.reset(OpARMCMOVWHSconst) + v.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = 256 + v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) return true } } -func rewriteValueARM_OpRsh32x32_0(v *Value) bool { +func rewriteValueARM_OpLsh8x32_0(v *Value) bool { b := v.Block _ = b - // match: (Rsh32x32 x y) + // match: (Lsh8x32 x y) // cond: - // result: (SRAcond x y (CMPconst [256] y)) + // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0]) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSRAcond) - v.AddArg(x) - v.AddArg(y) - v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v0.AuxInt = 256 - v0.AddArg(y) + v.reset(OpARMCMOVWHSconst) + v.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = 256 + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpRsh32x64_0(v *Value) bool { - // match: (Rsh32x64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SRAconst x [c]) +func rewriteValueARM_OpLsh8x64_0(v *Value) bool { + // match: (Lsh8x64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SLLconst x [c]) for { _ = v.Args[1] x := v.Args[0] @@ -19102,48 +19620,46 @@ func rewriteValueARM_OpRsh32x64_0(v *Value) bool { break } c := v_1.AuxInt - if !(uint64(c) < 32) { + if !(uint64(c) < 8) { break } - v.reset(OpARMSRAconst) + v.reset(OpARMSLLconst) v.AuxInt = c v.AddArg(x) return true } - // match: (Rsh32x64 x (Const64 [c])) - // cond: uint64(c) >= 32 - // result: (SRAconst x [31]) + // match: (Lsh8x64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) for { _ = v.Args[1] - x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst64 { break } c := v_1.AuxInt - if !(uint64(c) >= 32) { + if !(uint64(c) >= 8) { break } - v.reset(OpARMSRAconst) - v.AuxInt = 31 - v.AddArg(x) + v.reset(OpConst8) + v.AuxInt = 0 return true } return false } -func rewriteValueARM_OpRsh32x8_0(v *Value) bool { +func rewriteValueARM_OpLsh8x8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh32x8 x y) + // match: (Lsh8x8 x y) // cond: - // result: (SRA x (ZeroExt8to32 y)) + // result: (SLL x (ZeroExt8to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSRA) + v.reset(OpARMSLL) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(y) @@ -19151,1092 +19667,2411 @@ func rewriteValueARM_OpRsh32x8_0(v *Value) bool { return true } } -func rewriteValueARM_OpRsh8Ux16_0(v *Value) bool { +func rewriteValueARM_OpMod16_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh8Ux16 x y) + // match: (Mod16 x y) // cond: - // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) + v.reset(OpMod32) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v3.AuxInt = 256 - v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpRsh8Ux32_0(v *Value) bool { +func rewriteValueARM_OpMod16u_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh8Ux32 x y) + // match: (Mod16u x y) // cond: - // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0]) + // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 - v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpMod32u) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 - v2.AddArg(y) - v.AddArg(v2) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpRsh8Ux64_0(v *Value) bool { +func rewriteValueARM_OpMod32_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh8Ux64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24]) + // match: (Mod32 x y) + // cond: + // result: (SUB (XOR <typ.UInt32> (Select1 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x)) for { _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpARMSRLconst) - v.AuxInt = c + 24 - v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 24 - v0.AddArg(x) + y := v.Args[1] + v.reset(OpARMSUB) + v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) + v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v5.AddArg(x) + v4.AddArg(v5) + v3.AddArg(v4) + v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v6.AddArg(x) + v3.AddArg(v6) + v2.AddArg(v3) + v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v8.AddArg(y) + v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v9.AddArg(y) + v8.AddArg(v9) + v7.AddArg(v8) + v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v10.AddArg(y) + v7.AddArg(v10) + v2.AddArg(v7) + v1.AddArg(v2) + v0.AddArg(v1) + v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v11.AddArg(x) + v0.AddArg(v11) v.AddArg(v0) + v12 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v12.AddArg(x) + v.AddArg(v12) return true } - // match: (Rsh8Ux64 _ (Const64 [c])) - // cond: uint64(c) >= 8 - // result: (Const8 [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 8) { - break - } - v.reset(OpConst8) - v.AuxInt = 0 - return true - } - return false } -func rewriteValueARM_OpRsh8Ux8_0(v *Value) bool { +func rewriteValueARM_OpMod32u_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh8Ux8 x y) + // match: (Mod32u x y) // cond: - // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y)) + // result: (Select1 <typ.UInt32> (CALLudiv x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSRL) - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v.reset(OpSelect1) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) return true } } -func rewriteValueARM_OpRsh8x16_0(v *Value) bool { +func rewriteValueARM_OpMod8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh8x16 x y) + // match: (Mod8 x y) // cond: - // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) + // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSRAcond) + v.reset(OpMod32) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 - v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v3.AddArg(y) - v2.AddArg(v3) - v.AddArg(v2) return true } } -func rewriteValueARM_OpRsh8x32_0(v *Value) bool { +func rewriteValueARM_OpMod8u_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh8x32 x y) + // match: (Mod8u x y) // cond: - // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y)) + // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSRAcond) - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v.reset(OpMod32u) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) v.AddArg(v0) - v.AddArg(y) - v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) v.AddArg(v1) return true } } -func rewriteValueARM_OpRsh8x64_0(v *Value) bool { +func rewriteValueARM_OpMove_0(v *Value) bool { b := v.Block _ = b + config := b.Func.Config + _ = config typ := &b.Func.Config.Types _ = typ - // match: (Rsh8x64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24]) + // match: (Move [0] _ _ mem) + // cond: + // result: mem for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { + if v.AuxInt != 0 { break } - v.reset(OpARMSRAconst) - v.AuxInt = c + 24 - v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 24 - v0.AddArg(x) - v.AddArg(v0) + _ = v.Args[2] + mem := v.Args[2] + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) return true } - // match: (Rsh8x64 x (Const64 [c])) - // cond: uint64(c) >= 8 - // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31]) + // match: (Move [1] dst src mem) + // cond: + // result: (MOVBstore dst (MOVBUload src mem) mem) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v.AuxInt != 1 { break } - c := v_1.AuxInt - if !(uint64(c) >= 8) { - break - } - v.reset(OpARMSRAconst) - v.AuxInt = 31 - v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 24 - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueARM_OpRsh8x8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh8x8 x y) - // cond: - // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y)) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRA) - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v0.AddArg(x) + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v0.AddArg(src) + v0.AddArg(mem) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpSelect0_0(v *Value) bool { - // match: (Select0 (CALLudiv x (MOVWconst [1]))) - // cond: - // result: x + // match: (Move [2] {t} dst src mem) + // cond: t.(*types.Type).Alignment()%2 == 0 + // result: (MOVHstore dst (MOVHUload src mem) mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARMCALLudiv { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARMMOVWconst { + if v.AuxInt != 2 { break } - if v_0_1.AuxInt != 1 { + t := v.Aux + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Alignment()%2 == 0) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.reset(OpARMMOVHstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) return true } - // match: (Select0 (CALLudiv x (MOVWconst [c]))) - // cond: isPowerOfTwo(c) - // result: (SRLconst [log2(c)] x) + // match: (Move [2] dst src mem) + // cond: + // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)) for { - v_0 := v.Args[0] - if v_0.Op != OpARMCALLudiv { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARMMOVWconst { - break - } - c := v_0_1.AuxInt - if !(isPowerOfTwo(c)) { + if v.AuxInt != 2 { break } - v.reset(OpARMSRLconst) - v.AuxInt = log2(c) - v.AddArg(x) + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = 1 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v0.AuxInt = 1 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) return true } - // match: (Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) - // cond: - // result: (MOVWconst [int64(uint32(c)/uint32(d))]) + // match: (Move [4] {t} dst src mem) + // cond: t.(*types.Type).Alignment()%4 == 0 + // result: (MOVWstore dst (MOVWload src mem) mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARMCALLudiv { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARMMOVWconst { + if v.AuxInt != 4 { break } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARMMOVWconst { + t := v.Aux + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Alignment()%4 == 0) { break } - d := v_0_1.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = int64(uint32(c) / uint32(d)) + v.reset(OpARMMOVWstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARMMOVWload, typ.UInt32) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM_OpSelect1_0(v *Value) bool { - // match: (Select1 (CALLudiv _ (MOVWconst [1]))) - // cond: - // result: (MOVWconst [0]) + // match: (Move [4] {t} dst src mem) + // cond: t.(*types.Type).Alignment()%2 == 0 + // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) for { - v_0 := v.Args[0] - if v_0.Op != OpARMCALLudiv { - break - } - _ = v_0.Args[1] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARMMOVWconst { + if v.AuxInt != 4 { break } - if v_0_1.AuxInt != 1 { + t := v.Aux + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Alignment()%2 == 0) { break } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 + v.reset(OpARMMOVHstore) + v.AuxInt = 2 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) + v0.AuxInt = 2 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) return true } - // match: (Select1 (CALLudiv x (MOVWconst [c]))) - // cond: isPowerOfTwo(c) - // result: (ANDconst [c-1] x) + // match: (Move [4] dst src mem) + // cond: + // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))) for { - v_0 := v.Args[0] - if v_0.Op != OpARMCALLudiv { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARMMOVWconst { - break - } - c := v_0_1.AuxInt - if !(isPowerOfTwo(c)) { + if v.AuxInt != 4 { break } - v.reset(OpARMANDconst) - v.AuxInt = c - 1 - v.AddArg(x) + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = 3 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v0.AuxInt = 3 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AuxInt = 2 + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v2.AuxInt = 2 + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v3.AuxInt = 1 + v3.AddArg(dst) + v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v4.AuxInt = 1 + v4.AddArg(src) + v4.AddArg(mem) + v3.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v5.AddArg(dst) + v6 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v6.AddArg(src) + v6.AddArg(mem) + v5.AddArg(v6) + v5.AddArg(mem) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) return true } - // match: (Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) + // match: (Move [3] dst src mem) // cond: - // result: (MOVWconst [int64(uint32(c)%uint32(d))]) + // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))) for { - v_0 := v.Args[0] - if v_0.Op != OpARMCALLudiv { + if v.AuxInt != 3 { break } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARMMOVWconst { + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = 2 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v0.AuxInt = 2 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AuxInt = 1 + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v2.AuxInt = 1 + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v3.AddArg(dst) + v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v4.AddArg(src) + v4.AddArg(mem) + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice + // result: (DUFFCOPY [8 * (128 - int64(s/4))] dst src mem) + for { + s := v.AuxInt + t := v.Aux + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice) { break } - c := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARMMOVWconst { + v.reset(OpARMDUFFCOPY) + v.AuxInt = 8 * (128 - int64(s/4)) + v.AddArg(dst) + v.AddArg(src) + v.AddArg(mem) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: (s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0 + // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem) + for { + s := v.AuxInt + t := v.Aux + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) { break } - d := v_0_1.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = int64(uint32(c) % uint32(d)) + v.reset(OpARMLoweredMove) + v.AuxInt = t.(*types.Type).Alignment() + v.AddArg(dst) + v.AddArg(src) + v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type) + v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) + v0.AddArg(src) + v.AddArg(v0) + v.AddArg(mem) return true } return false } -func rewriteValueARM_OpSignExt16to32_0(v *Value) bool { - // match: (SignExt16to32 x) +func rewriteValueARM_OpMul16_0(v *Value) bool { + // match: (Mul16 x y) // cond: - // result: (MOVHreg x) + // result: (MUL x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVHreg) + y := v.Args[1] + v.reset(OpARMMUL) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpSignExt8to16_0(v *Value) bool { - // match: (SignExt8to16 x) +func rewriteValueARM_OpMul32_0(v *Value) bool { + // match: (Mul32 x y) // cond: - // result: (MOVBreg x) + // result: (MUL x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVBreg) + y := v.Args[1] + v.reset(OpARMMUL) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpSignExt8to32_0(v *Value) bool { - // match: (SignExt8to32 x) +func rewriteValueARM_OpMul32F_0(v *Value) bool { + // match: (Mul32F x y) // cond: - // result: (MOVBreg x) + // result: (MULF x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVBreg) + y := v.Args[1] + v.reset(OpARMMULF) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpSignmask_0(v *Value) bool { - // match: (Signmask x) +func rewriteValueARM_OpMul32uhilo_0(v *Value) bool { + // match: (Mul32uhilo x y) // cond: - // result: (SRAconst x [31]) + // result: (MULLU x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMSRAconst) - v.AuxInt = 31 + y := v.Args[1] + v.reset(OpARMMULLU) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpSlicemask_0(v *Value) bool { - b := v.Block - _ = b - // match: (Slicemask <t> x) +func rewriteValueARM_OpMul64F_0(v *Value) bool { + // match: (Mul64F x y) // cond: - // result: (SRAconst (RSBconst <t> [0] x) [31]) + // result: (MULD x y) for { - t := v.Type + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMSRAconst) - v.AuxInt = 31 - v0 := b.NewValue0(v.Pos, OpARMRSBconst, t) - v0.AuxInt = 0 - v0.AddArg(x) - v.AddArg(v0) + y := v.Args[1] + v.reset(OpARMMULD) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpSqrt_0(v *Value) bool { - // match: (Sqrt x) +func rewriteValueARM_OpMul8_0(v *Value) bool { + // match: (Mul8 x y) // cond: - // result: (SQRTD x) + // result: (MUL x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMSQRTD) + y := v.Args[1] + v.reset(OpARMMUL) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpStaticCall_0(v *Value) bool { - // match: (StaticCall [argwid] {target} mem) +func rewriteValueARM_OpNeg16_0(v *Value) bool { + // match: (Neg16 x) // cond: - // result: (CALLstatic [argwid] {target} mem) + // result: (RSBconst [0] x) for { - argwid := v.AuxInt - target := v.Aux - mem := v.Args[0] - v.reset(OpARMCALLstatic) - v.AuxInt = argwid - v.Aux = target - v.AddArg(mem) + x := v.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = 0 + v.AddArg(x) return true } } -func rewriteValueARM_OpStore_0(v *Value) bool { - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 1 - // result: (MOVBstore ptr val mem) +func rewriteValueARM_OpNeg32_0(v *Value) bool { + // match: (Neg32 x) + // cond: + // result: (RSBconst [0] x) for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 1) { - break - } - v.reset(OpARMMOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + x := v.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = 0 + v.AddArg(x) return true } - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 2 - // result: (MOVHstore ptr val mem) +} +func rewriteValueARM_OpNeg32F_0(v *Value) bool { + // match: (Neg32F x) + // cond: + // result: (NEGF x) for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 2) { - break - } - v.reset(OpARMMOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + x := v.Args[0] + v.reset(OpARMNEGF) + v.AddArg(x) return true } - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) - // result: (MOVWstore ptr val mem) +} +func rewriteValueARM_OpNeg64F_0(v *Value) bool { + // match: (Neg64F x) + // cond: + // result: (NEGD x) for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { - break - } - v.reset(OpARMMOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + x := v.Args[0] + v.reset(OpARMNEGD) + v.AddArg(x) return true } - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) - // result: (MOVFstore ptr val mem) +} +func rewriteValueARM_OpNeg8_0(v *Value) bool { + // match: (Neg8 x) + // cond: + // result: (RSBconst [0] x) for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { - break - } - v.reset(OpARMMOVFstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) - // result: (MOVDstore ptr val mem) - for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { - break - } - v.reset(OpARMMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + x := v.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = 0 + v.AddArg(x) return true } - return false } -func rewriteValueARM_OpSub16_0(v *Value) bool { - // match: (Sub16 x y) +func rewriteValueARM_OpNeq16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Neq16 x y) // cond: - // result: (SUB x y) + // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUB) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM_OpSub32_0(v *Value) bool { - // match: (Sub32 x y) +func rewriteValueARM_OpNeq32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Neq32 x y) // cond: - // result: (SUB x y) + // result: (NotEqual (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUB) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM_OpSub32F_0(v *Value) bool { - // match: (Sub32F x y) +func rewriteValueARM_OpNeq32F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Neq32F x y) // cond: - // result: (SUBF x y) + // result: (NotEqual (CMPF x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUBF) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM_OpSub32carry_0(v *Value) bool { - // match: (Sub32carry x y) +func rewriteValueARM_OpNeq64F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Neq64F x y) // cond: - // result: (SUBS x y) + // result: (NotEqual (CMPD x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUBS) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM_OpSub32withcarry_0(v *Value) bool { - // match: (Sub32withcarry x y c) +func rewriteValueARM_OpNeq8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Neq8 x y) // cond: - // result: (SBC x y c) + // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - _ = v.Args[2] + _ = v.Args[1] x := v.Args[0] y := v.Args[1] - c := v.Args[2] - v.reset(OpARMSBC) - v.AddArg(x) - v.AddArg(y) - v.AddArg(c) + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM_OpSub64F_0(v *Value) bool { - // match: (Sub64F x y) +func rewriteValueARM_OpNeqB_0(v *Value) bool { + // match: (NeqB x y) // cond: - // result: (SUBD x y) + // result: (XOR x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUBD) + v.reset(OpARMXOR) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM_OpSub8_0(v *Value) bool { - // match: (Sub8 x y) +func rewriteValueARM_OpNeqPtr_0(v *Value) bool { + b := v.Block + _ = b + // match: (NeqPtr x y) // cond: - // result: (SUB x y) + // result: (NotEqual (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUB) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM_OpSubPtr_0(v *Value) bool { - // match: (SubPtr x y) +func rewriteValueARM_OpNilCheck_0(v *Value) bool { + // match: (NilCheck ptr mem) // cond: - // result: (SUB x y) + // result: (LoweredNilCheck ptr mem) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSUB) - v.AddArg(x) - v.AddArg(y) + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARMLoweredNilCheck) + v.AddArg(ptr) + v.AddArg(mem) return true } } -func rewriteValueARM_OpTrunc16to8_0(v *Value) bool { - // match: (Trunc16to8 x) +func rewriteValueARM_OpNot_0(v *Value) bool { + // match: (Not x) // cond: - // result: x + // result: (XORconst [1] x) for { x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type + v.reset(OpARMXORconst) + v.AuxInt = 1 v.AddArg(x) return true } } -func rewriteValueARM_OpTrunc32to16_0(v *Value) bool { - // match: (Trunc32to16 x) +func rewriteValueARM_OpOffPtr_0(v *Value) bool { + // match: (OffPtr [off] ptr:(SP)) // cond: - // result: x + // result: (MOVWaddr [off] ptr) for { - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + off := v.AuxInt + ptr := v.Args[0] + if ptr.Op != OpSP { + break + } + v.reset(OpARMMOVWaddr) + v.AuxInt = off + v.AddArg(ptr) return true } -} -func rewriteValueARM_OpTrunc32to8_0(v *Value) bool { - // match: (Trunc32to8 x) + // match: (OffPtr [off] ptr) // cond: - // result: x + // result: (ADDconst [off] ptr) for { - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + off := v.AuxInt + ptr := v.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = off + v.AddArg(ptr) return true } } -func rewriteValueARM_OpXor16_0(v *Value) bool { - // match: (Xor16 x y) +func rewriteValueARM_OpOr16_0(v *Value) bool { + // match: (Or16 x y) // cond: - // result: (XOR x y) + // result: (OR x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMXOR) + v.reset(OpARMOR) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM_OpXor32_0(v *Value) bool { - // match: (Xor32 x y) +func rewriteValueARM_OpOr32_0(v *Value) bool { + // match: (Or32 x y) // cond: - // result: (XOR x y) + // result: (OR x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMXOR) + v.reset(OpARMOR) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM_OpXor8_0(v *Value) bool { - // match: (Xor8 x y) +func rewriteValueARM_OpOr8_0(v *Value) bool { + // match: (Or8 x y) // cond: - // result: (XOR x y) + // result: (OR x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARMXOR) + v.reset(OpARMOR) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM_OpZero_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - typ := &b.Func.Config.Types - _ = typ - // match: (Zero [0] _ mem) +func rewriteValueARM_OpOrB_0(v *Value) bool { + // match: (OrB x y) // cond: - // result: mem + // result: (OR x y) for { - if v.AuxInt != 0 { - break - } _ = v.Args[1] - mem := v.Args[1] - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMOR) + v.AddArg(x) + v.AddArg(y) return true } - // match: (Zero [1] ptr mem) +} +func rewriteValueARM_OpRound32F_0(v *Value) bool { + // match: (Round32F x) // cond: - // result: (MOVBstore ptr (MOVWconst [0]) mem) + // result: x for { - if v.AuxInt != 1 { - break - } - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVBstore) - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (Zero [2] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 - // result: (MOVHstore ptr (MOVWconst [0]) mem) +} +func rewriteValueARM_OpRound64F_0(v *Value) bool { + // match: (Round64F x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpRsh16Ux16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16Ux16 x y) + // cond: + // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { - if v.AuxInt != 2 { - break - } - t := v.Aux _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(t.(*types.Type).Alignment()%2 == 0) { - break - } - v.reset(OpARMMOVHstore) - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMCMOVWHSconst) + v.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) - v.AddArg(mem) + v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v3.AuxInt = 256 + v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) return true } - // match: (Zero [2] ptr mem) +} +func rewriteValueARM_OpRsh16Ux32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16Ux32 x y) // cond: - // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)) + // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0]) for { - if v.AuxInt != 2 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVBstore) - v.AuxInt = 1 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMCMOVWHSconst) + v.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v1.AuxInt = 0 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = 256 + v2.AddArg(y) + v.AddArg(v2) return true } - // match: (Zero [4] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%4 == 0 - // result: (MOVWstore ptr (MOVWconst [0]) mem) +} +func rewriteValueARM_OpRsh16Ux64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16Ux64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16]) for { - if v.AuxInt != 4 { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - t := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(t.(*types.Type).Alignment()%4 == 0) { + c := v_1.AuxInt + if !(uint64(c) < 16) { break } - v.reset(OpARMMOVWstore) - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v.reset(OpARMSRLconst) + v.AuxInt = c + 16 + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = 16 + v0.AddArg(x) v.AddArg(v0) - v.AddArg(mem) return true } - // match: (Zero [4] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 - // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)) + // match: (Rsh16Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (Const16 [0]) for { - if v.AuxInt != 4 { + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - t := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(t.(*types.Type).Alignment()%2 == 0) { + c := v_1.AuxInt + if !(uint64(c) >= 16) { break } - v.reset(OpARMMOVHstore) - v.AuxInt = 2 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) - v1.AuxInt = 0 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v.reset(OpConst16) + v.AuxInt = 0 return true } - // match: (Zero [4] ptr mem) + return false +} +func rewriteValueARM_OpRsh16Ux8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16Ux8 x y) // cond: - // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))) + // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y)) for { - if v.AuxInt != 4 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVBstore) - v.AuxInt = 3 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v1.AuxInt = 2 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v2.AuxInt = 0 - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v3.AuxInt = 1 - v3.AddArg(ptr) - v4 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v4.AuxInt = 0 - v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v5.AuxInt = 0 - v5.AddArg(ptr) - v6 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) v.AddArg(v1) return true } - // match: (Zero [3] ptr mem) +} +func rewriteValueARM_OpRsh16x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16x16 x y) // cond: - // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))) + // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) for { - if v.AuxInt != 3 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVBstore) - v.AuxInt = 2 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v1.AuxInt = 1 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v2.AuxInt = 0 - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v3.AuxInt = 0 - v3.AddArg(ptr) - v4 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRAcond) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = 256 + v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) return true } - // match: (Zero [s] {t} ptr mem) - // cond: s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice - // result: (DUFFZERO [4 * (128 - int64(s/4))] ptr (MOVWconst [0]) mem) +} +func rewriteValueARM_OpRsh16x32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16x32 x y) + // cond: + // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y)) for { - s := v.AuxInt - t := v.Aux _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice) { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRAcond) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = 256 + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh16x64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16x64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - v.reset(OpARMDUFFZERO) - v.AuxInt = 4 * (128 - int64(s/4)) - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 + c := v_1.AuxInt + if !(uint64(c) < 16) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = c + 16 + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = 16 + v0.AddArg(x) v.AddArg(v0) - v.AddArg(mem) return true } - // match: (Zero [s] {t} ptr mem) - // cond: (s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0 - // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) (MOVWconst [0]) mem) + // match: (Rsh16x64 x (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31]) for { - s := v.AuxInt - t := v.Aux _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - v.reset(OpARMLoweredZero) - v.AuxInt = t.(*types.Type).Alignment() - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type) - v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) - v0.AddArg(ptr) + c := v_1.AuxInt + if !(uint64(c) >= 16) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = 31 + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = 16 + v0.AddArg(x) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) return true } return false } -func rewriteValueARM_OpZeroExt16to32_0(v *Value) bool { - // match: (ZeroExt16to32 x) +func rewriteValueARM_OpRsh16x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16x8 x y) // cond: - // result: (MOVHUreg x) + // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y)) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVHUreg) - v.AddArg(x) + y := v.Args[1] + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpZeroExt8to16_0(v *Value) bool { - // match: (ZeroExt8to16 x) +func rewriteValueARM_OpRsh32Ux16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh32Ux16 x y) // cond: - // result: (MOVBUreg x) + // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVBUreg) - v.AddArg(x) + y := v.Args[1] + v.reset(OpARMCMOVWHSconst) + v.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = 256 + v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) return true } } -func rewriteValueARM_OpZeroExt8to32_0(v *Value) bool { - // match: (ZeroExt8to32 x) +func rewriteValueARM_OpRsh32Ux32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Rsh32Ux32 x y) // cond: - // result: (MOVBUreg x) + // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0]) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMMOVBUreg) + y := v.Args[1] + v.reset(OpARMCMOVWHSconst) + v.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = 256 + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh32Ux64_0(v *Value) bool { + // match: (Rsh32Ux64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SRLconst x [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 32) { + break + } + v.reset(OpARMSRLconst) + v.AuxInt = c v.AddArg(x) return true } + // match: (Rsh32Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (Const32 [0]) + for { + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 32) { + break + } + v.reset(OpConst32) + v.AuxInt = 0 + return true + } + return false } -func rewriteValueARM_OpZeromask_0(v *Value) bool { +func rewriteValueARM_OpRsh32Ux8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Zeromask x) + // match: (Rsh32Ux8 x y) // cond: - // result: (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31]) + // result: (SRL x (ZeroExt8to32 y)) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARMSRAconst) - v.AuxInt = 31 - v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) + y := v.Args[1] + v.reset(OpARMSRL) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(y) v.AddArg(v0) return true } } -func rewriteBlockARM(b *Block) bool { - config := b.Func.Config - _ = config - fe := b.Func.fe - _ = fe - typ := &config.Types +func rewriteValueARM_OpRsh32x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types _ = typ - switch b.Kind { + // match: (Rsh32x16 x y) + // cond: + // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRAcond) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = 256 + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh32x32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Rsh32x32 x y) + // cond: + // result: (SRAcond x y (CMPconst [256] y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRAcond) + v.AddArg(x) + v.AddArg(y) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = 256 + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpRsh32x64_0(v *Value) bool { + // match: (Rsh32x64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SRAconst x [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 32) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (Rsh32x64 x (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (SRAconst x [31]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 32) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpRsh32x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh32x8 x y) + // cond: + // result: (SRA x (ZeroExt8to32 y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpRsh8Ux16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8Ux16 x y) + // cond: + // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMCMOVWHSconst) + v.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v3.AuxInt = 256 + v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM_OpRsh8Ux32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8Ux32 x y) + // cond: + // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0]) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMCMOVWHSconst) + v.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = 256 + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueARM_OpRsh8Ux64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8Ux64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 8) { + break + } + v.reset(OpARMSRLconst) + v.AuxInt = c + 24 + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = 24 + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh8Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) + for { + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 8) { + break + } + v.reset(OpConst8) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM_OpRsh8Ux8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8Ux8 x y) + // cond: + // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh8x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8x16 x y) + // cond: + // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRAcond) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = 256 + v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueARM_OpRsh8x32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8x32 x y) + // cond: + // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRAcond) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = 256 + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh8x64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8x64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 8) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = c + 24 + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = 24 + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh8x64 x (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 8) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = 31 + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = 24 + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM_OpRsh8x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8x8 x y) + // cond: + // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpSelect0_0(v *Value) bool { + // match: (Select0 (CALLudiv x (MOVWconst [1]))) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst { + break + } + if v_0_1.AuxInt != 1 { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (Select0 (CALLudiv x (MOVWconst [c]))) + // cond: isPowerOfTwo(c) + // result: (SRLconst [log2(c)] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst { + break + } + c := v_0_1.AuxInt + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpARMSRLconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) + // cond: + // result: (MOVWconst [int64(uint32(c)/uint32(d))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMMOVWconst { + break + } + c := v_0_0.AuxInt + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst { + break + } + d := v_0_1.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(uint32(c) / uint32(d)) + return true + } + return false +} +func rewriteValueARM_OpSelect1_0(v *Value) bool { + // match: (Select1 (CALLudiv _ (MOVWconst [1]))) + // cond: + // result: (MOVWconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst { + break + } + if v_0_1.AuxInt != 1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (Select1 (CALLudiv x (MOVWconst [c]))) + // cond: isPowerOfTwo(c) + // result: (ANDconst [c-1] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst { + break + } + c := v_0_1.AuxInt + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpARMANDconst) + v.AuxInt = c - 1 + v.AddArg(x) + return true + } + // match: (Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) + // cond: + // result: (MOVWconst [int64(uint32(c)%uint32(d))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMMOVWconst { + break + } + c := v_0_0.AuxInt + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst { + break + } + d := v_0_1.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(uint32(c) % uint32(d)) + return true + } + return false +} +func rewriteValueARM_OpSignExt16to32_0(v *Value) bool { + // match: (SignExt16to32 x) + // cond: + // result: (MOVHreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVHreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpSignExt8to16_0(v *Value) bool { + // match: (SignExt8to16 x) + // cond: + // result: (MOVBreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVBreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpSignExt8to32_0(v *Value) bool { + // match: (SignExt8to32 x) + // cond: + // result: (MOVBreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVBreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpSignmask_0(v *Value) bool { + // match: (Signmask x) + // cond: + // result: (SRAconst x [31]) + for { + x := v.Args[0] + v.reset(OpARMSRAconst) + v.AuxInt = 31 + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpSlicemask_0(v *Value) bool { + b := v.Block + _ = b + // match: (Slicemask <t> x) + // cond: + // result: (SRAconst (RSBconst <t> [0] x) [31]) + for { + t := v.Type + x := v.Args[0] + v.reset(OpARMSRAconst) + v.AuxInt = 31 + v0 := b.NewValue0(v.Pos, OpARMRSBconst, t) + v0.AuxInt = 0 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpSqrt_0(v *Value) bool { + // match: (Sqrt x) + // cond: + // result: (SQRTD x) + for { + x := v.Args[0] + v.reset(OpARMSQRTD) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpStaticCall_0(v *Value) bool { + // match: (StaticCall [argwid] {target} mem) + // cond: + // result: (CALLstatic [argwid] {target} mem) + for { + argwid := v.AuxInt + target := v.Aux + mem := v.Args[0] + v.reset(OpARMCALLstatic) + v.AuxInt = argwid + v.Aux = target + v.AddArg(mem) + return true + } +} +func rewriteValueARM_OpStore_0(v *Value) bool { + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 1) { + break + } + v.reset(OpARMMOVBstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 2) { + break + } + v.reset(OpARMMOVHstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) + // result: (MOVWstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { + break + } + v.reset(OpARMMOVWstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) + // result: (MOVFstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { + break + } + v.reset(OpARMMOVFstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) + // result: (MOVDstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { + break + } + v.reset(OpARMMOVDstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpSub16_0(v *Value) bool { + // match: (Sub16 x y) + // cond: + // result: (SUB x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSub32_0(v *Value) bool { + // match: (Sub32 x y) + // cond: + // result: (SUB x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSub32F_0(v *Value) bool { + // match: (Sub32F x y) + // cond: + // result: (SUBF x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUBF) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSub32carry_0(v *Value) bool { + // match: (Sub32carry x y) + // cond: + // result: (SUBS x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUBS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSub32withcarry_0(v *Value) bool { + // match: (Sub32withcarry x y c) + // cond: + // result: (SBC x y c) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + c := v.Args[2] + v.reset(OpARMSBC) + v.AddArg(x) + v.AddArg(y) + v.AddArg(c) + return true + } +} +func rewriteValueARM_OpSub64F_0(v *Value) bool { + // match: (Sub64F x y) + // cond: + // result: (SUBD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUBD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSub8_0(v *Value) bool { + // match: (Sub8 x y) + // cond: + // result: (SUB x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSubPtr_0(v *Value) bool { + // match: (SubPtr x y) + // cond: + // result: (SUB x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpTrunc16to8_0(v *Value) bool { + // match: (Trunc16to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpTrunc32to16_0(v *Value) bool { + // match: (Trunc32to16 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpTrunc32to8_0(v *Value) bool { + // match: (Trunc32to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpXor16_0(v *Value) bool { + // match: (Xor16 x y) + // cond: + // result: (XOR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMXOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpXor32_0(v *Value) bool { + // match: (Xor32 x y) + // cond: + // result: (XOR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMXOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpXor8_0(v *Value) bool { + // match: (Xor8 x y) + // cond: + // result: (XOR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMXOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpZero_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + typ := &b.Func.Config.Types + _ = typ + // match: (Zero [0] _ mem) + // cond: + // result: mem + for { + if v.AuxInt != 0 { + break + } + _ = v.Args[1] + mem := v.Args[1] + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Zero [1] ptr mem) + // cond: + // result: (MOVBstore ptr (MOVWconst [0]) mem) + for { + if v.AuxInt != 1 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVBstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [2] {t} ptr mem) + // cond: t.(*types.Type).Alignment()%2 == 0 + // result: (MOVHstore ptr (MOVWconst [0]) mem) + for { + if v.AuxInt != 2 { + break + } + t := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(t.(*types.Type).Alignment()%2 == 0) { + break + } + v.reset(OpARMMOVHstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [2] ptr mem) + // cond: + // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)) + for { + if v.AuxInt != 2 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVBstore) + v.AuxInt = 1 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AuxInt = 0 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 + // result: (MOVWstore ptr (MOVWconst [0]) mem) + for { + if v.AuxInt != 4 { + break + } + t := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(t.(*types.Type).Alignment()%4 == 0) { + break + } + v.reset(OpARMMOVWstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.(*types.Type).Alignment()%2 == 0 + // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)) + for { + if v.AuxInt != 4 { + break + } + t := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(t.(*types.Type).Alignment()%2 == 0) { + break + } + v.reset(OpARMMOVHstore) + v.AuxInt = 2 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) + v1.AuxInt = 0 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [4] ptr mem) + // cond: + // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))) + for { + if v.AuxInt != 4 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVBstore) + v.AuxInt = 3 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AuxInt = 2 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v3.AuxInt = 1 + v3.AddArg(ptr) + v4 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v4.AuxInt = 0 + v3.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v5.AuxInt = 0 + v5.AddArg(ptr) + v6 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v6.AuxInt = 0 + v5.AddArg(v6) + v5.AddArg(mem) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [3] ptr mem) + // cond: + // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))) + for { + if v.AuxInt != 3 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVBstore) + v.AuxInt = 2 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AuxInt = 1 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v3.AuxInt = 0 + v3.AddArg(ptr) + v4 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v4.AuxInt = 0 + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice + // result: (DUFFZERO [4 * (128 - int64(s/4))] ptr (MOVWconst [0]) mem) + for { + s := v.AuxInt + t := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice) { + break + } + v.reset(OpARMDUFFZERO) + v.AuxInt = 4 * (128 - int64(s/4)) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: (s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0 + // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) (MOVWconst [0]) mem) + for { + s := v.AuxInt + t := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) { + break + } + v.reset(OpARMLoweredZero) + v.AuxInt = t.(*types.Type).Alignment() + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type) + v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) + v0.AddArg(ptr) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v1.AuxInt = 0 + v.AddArg(v1) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpZeroExt16to32_0(v *Value) bool { + // match: (ZeroExt16to32 x) + // cond: + // result: (MOVHUreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVHUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpZeroExt8to16_0(v *Value) bool { + // match: (ZeroExt8to16 x) + // cond: + // result: (MOVBUreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVBUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpZeroExt8to32_0(v *Value) bool { + // match: (ZeroExt8to32 x) + // cond: + // result: (MOVBUreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVBUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpZeromask_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Zeromask x) + // cond: + // result: (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31]) + for { + x := v.Args[0] + v.reset(OpARMSRAconst) + v.AuxInt = 31 + v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteBlockARM(b *Block) bool { + config := b.Func.Config + _ = config + fe := b.Func.fe + _ = fe + typ := &config.Types + _ = typ + switch b.Kind { case BlockARMEQ: // match: (EQ (FlagEQ) yes no) // cond: @@ -20251,9 +22086,1290 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (FlagLT_ULT) yes no) + // match: (EQ (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (EQ (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (EQ (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (EQ (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (EQ (InvertFlags cmp) yes no) + // cond: + // result: (EQ cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + b.Kind = BlockARMEQ + b.SetControl(cmp) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (SUB x y)) yes no) + // cond: + // result: (EQ (CMP x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUB { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (SUBconst [c] x)) yes no) + // cond: + // result: (EQ (CMPconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (SUBshiftLL x y [c])) yes no) + // cond: + // result: (EQ (CMPshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftLL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (SUBshiftRL x y [c])) yes no) + // cond: + // result: (EQ (CMPshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftRL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (SUBshiftRA x y [c])) yes no) + // cond: + // result: (EQ (CMPshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftRA { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (SUBshiftLLreg x y z)) yes no) + // cond: + // result: (EQ (CMPshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftLLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (SUBshiftRLreg x y z)) yes no) + // cond: + // result: (EQ (CMPshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftRLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (SUBshiftRAreg x y z)) yes no) + // cond: + // result: (EQ (CMPshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftRAreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ADD x y)) yes no) + // cond: + // result: (EQ (CMN x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ADDconst [c] x)) yes no) + // cond: + // result: (EQ (CMNconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ADDshiftLL x y [c])) yes no) + // cond: + // result: (EQ (CMNshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftLL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ADDshiftRL x y [c])) yes no) + // cond: + // result: (EQ (CMNshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftRL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ADDshiftRA x y [c])) yes no) + // cond: + // result: (EQ (CMNshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftRA { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ADDshiftLLreg x y z)) yes no) + // cond: + // result: (EQ (CMNshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftLLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ADDshiftRLreg x y z)) yes no) + // cond: + // result: (EQ (CMNshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftRLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ADDshiftRAreg x y z)) yes no) + // cond: + // result: (EQ (CMNshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftRAreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (AND x y)) yes no) + // cond: + // result: (EQ (TST x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMAND { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no) + // cond: + // result: (EQ (TSTconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ANDshiftLL x y [c])) yes no) + // cond: + // result: (EQ (TSTshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMANDshiftLL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ANDshiftRL x y [c])) yes no) + // cond: + // result: (EQ (TSTshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMANDshiftRL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ANDshiftRA x y [c])) yes no) + // cond: + // result: (EQ (TSTshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMANDshiftRA { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ANDshiftLLreg x y z)) yes no) + // cond: + // result: (EQ (TSTshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMANDshiftLLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ANDshiftRLreg x y z)) yes no) + // cond: + // result: (EQ (TSTshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMANDshiftRLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ANDshiftRAreg x y z)) yes no) + // cond: + // result: (EQ (TSTshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMANDshiftRAreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (XOR x y)) yes no) + // cond: + // result: (EQ (TEQ x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXOR { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (XORconst [c] x)) yes no) + // cond: + // result: (EQ (TEQconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (XORshiftLL x y [c])) yes no) + // cond: + // result: (EQ (TEQshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftLL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (XORshiftRL x y [c])) yes no) + // cond: + // result: (EQ (TEQshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftRL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (XORshiftRA x y [c])) yes no) + // cond: + // result: (EQ (TEQshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftRA { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (XORshiftLLreg x y z)) yes no) + // cond: + // result: (EQ (TEQshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftLLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (XORshiftRLreg x y z)) yes no) + // cond: + // result: (EQ (TEQshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftRLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (XORshiftRAreg x y z)) yes no) + // cond: + // result: (EQ (TEQshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftRAreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + case BlockARMGE: + // match: (GE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (GE (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (GE (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (GE (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (GE (FlagGT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (GE (InvertFlags cmp) yes no) + // cond: + // result: (LE cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + b.Kind = BlockARMLE + b.SetControl(cmp) + b.Aux = nil + return true + } + case BlockARMGT: + // match: (GT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (GT (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (GT (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (GT (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (GT (FlagGT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (GT (InvertFlags cmp) yes no) + // cond: + // result: (LT cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + b.Kind = BlockARMLT + b.SetControl(cmp) + b.Aux = nil + return true + } + case BlockIf: + // match: (If (Equal cc) yes no) + // cond: + // result: (EQ cc yes no) + for { + v := b.Control + if v.Op != OpARMEqual { + break + } + cc := v.Args[0] + b.Kind = BlockARMEQ + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (NotEqual cc) yes no) + // cond: + // result: (NE cc yes no) + for { + v := b.Control + if v.Op != OpARMNotEqual { + break + } + cc := v.Args[0] + b.Kind = BlockARMNE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (LessThan cc) yes no) + // cond: + // result: (LT cc yes no) + for { + v := b.Control + if v.Op != OpARMLessThan { + break + } + cc := v.Args[0] + b.Kind = BlockARMLT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (LessThanU cc) yes no) + // cond: + // result: (ULT cc yes no) + for { + v := b.Control + if v.Op != OpARMLessThanU { + break + } + cc := v.Args[0] + b.Kind = BlockARMULT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (LessEqual cc) yes no) + // cond: + // result: (LE cc yes no) + for { + v := b.Control + if v.Op != OpARMLessEqual { + break + } + cc := v.Args[0] + b.Kind = BlockARMLE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (LessEqualU cc) yes no) + // cond: + // result: (ULE cc yes no) + for { + v := b.Control + if v.Op != OpARMLessEqualU { + break + } + cc := v.Args[0] + b.Kind = BlockARMULE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (GreaterThan cc) yes no) + // cond: + // result: (GT cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterThan { + break + } + cc := v.Args[0] + b.Kind = BlockARMGT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (GreaterThanU cc) yes no) + // cond: + // result: (UGT cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterThanU { + break + } + cc := v.Args[0] + b.Kind = BlockARMUGT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (GreaterEqual cc) yes no) + // cond: + // result: (GE cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterEqual { + break + } + cc := v.Args[0] + b.Kind = BlockARMGE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (GreaterEqualU cc) yes no) + // cond: + // result: (UGE cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterEqualU { + break + } + cc := v.Args[0] + b.Kind = BlockARMUGE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If cond yes no) + // cond: + // result: (NE (CMPconst [0] cond) yes no) + for { + v := b.Control + _ = v + cond := b.Control + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(cond) + b.SetControl(v0) + b.Aux = nil + return true + } + case BlockARMLE: + // match: (LE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (LE (FlagLT_ULT) yes no) // cond: - // result: (First nil no yes) + // result: (First nil yes no) for { v := b.Control if v.Op != OpARMFlagLT_ULT { @@ -20262,12 +23378,11 @@ func rewriteBlockARM(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil - b.swapSuccessors() return true } - // match: (EQ (FlagLT_UGT) yes no) + // match: (LE (FlagLT_UGT) yes no) // cond: - // result: (First nil no yes) + // result: (First nil yes no) for { v := b.Control if v.Op != OpARMFlagLT_UGT { @@ -20276,10 +23391,9 @@ func rewriteBlockARM(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil - b.swapSuccessors() return true } - // match: (EQ (FlagGT_ULT) yes no) + // match: (LE (FlagGT_ULT) yes no) // cond: // result: (First nil no yes) for { @@ -20293,7 +23407,7 @@ func rewriteBlockARM(b *Block) bool { b.swapSuccessors() return true } - // match: (EQ (FlagGT_UGT) yes no) + // match: (LE (FlagGT_UGT) yes no) // cond: // result: (First nil no yes) for { @@ -20307,24 +23421,24 @@ func rewriteBlockARM(b *Block) bool { b.swapSuccessors() return true } - // match: (EQ (InvertFlags cmp) yes no) + // match: (LE (InvertFlags cmp) yes no) // cond: - // result: (EQ cmp yes no) + // result: (GE cmp yes no) for { v := b.Control if v.Op != OpARMInvertFlags { break } cmp := v.Args[0] - b.Kind = BlockARMEQ + b.Kind = BlockARMGE b.SetControl(cmp) b.Aux = nil return true } - case BlockARMGE: - // match: (GE (FlagEQ) yes no) + case BlockARMLT: + // match: (LT (FlagEQ) yes no) // cond: - // result: (First nil yes no) + // result: (First nil no yes) for { v := b.Control if v.Op != OpARMFlagEQ { @@ -20333,11 +23447,12 @@ func rewriteBlockARM(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil + b.swapSuccessors() return true } - // match: (GE (FlagLT_ULT) yes no) + // match: (LT (FlagLT_ULT) yes no) // cond: - // result: (First nil no yes) + // result: (First nil yes no) for { v := b.Control if v.Op != OpARMFlagLT_ULT { @@ -20346,12 +23461,11 @@ func rewriteBlockARM(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil - b.swapSuccessors() return true } - // match: (GE (FlagLT_UGT) yes no) + // match: (LT (FlagLT_UGT) yes no) // cond: - // result: (First nil no yes) + // result: (First nil yes no) for { v := b.Control if v.Op != OpARMFlagLT_UGT { @@ -20360,12 +23474,11 @@ func rewriteBlockARM(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil - b.swapSuccessors() return true } - // match: (GE (FlagGT_ULT) yes no) + // match: (LT (FlagGT_ULT) yes no) // cond: - // result: (First nil yes no) + // result: (First nil no yes) for { v := b.Control if v.Op != OpARMFlagGT_ULT { @@ -20374,11 +23487,12 @@ func rewriteBlockARM(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil + b.swapSuccessors() return true } - // match: (GE (FlagGT_UGT) yes no) + // match: (LT (FlagGT_UGT) yes no) // cond: - // result: (First nil yes no) + // result: (First nil no yes) for { v := b.Control if v.Op != OpARMFlagGT_UGT { @@ -20387,24 +23501,235 @@ func rewriteBlockARM(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil + b.swapSuccessors() return true } - // match: (GE (InvertFlags cmp) yes no) + // match: (LT (InvertFlags cmp) yes no) + // cond: + // result: (GT cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + b.Kind = BlockARMGT + b.SetControl(cmp) + b.Aux = nil + return true + } + case BlockARMNE: + // match: (NE (CMPconst [0] (Equal cc)) yes no) + // cond: + // result: (EQ cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMEqual { + break + } + cc := v_0.Args[0] + b.Kind = BlockARMEQ + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] (NotEqual cc)) yes no) + // cond: + // result: (NE cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMNotEqual { + break + } + cc := v_0.Args[0] + b.Kind = BlockARMNE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] (LessThan cc)) yes no) + // cond: + // result: (LT cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMLessThan { + break + } + cc := v_0.Args[0] + b.Kind = BlockARMLT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] (LessThanU cc)) yes no) + // cond: + // result: (ULT cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMLessThanU { + break + } + cc := v_0.Args[0] + b.Kind = BlockARMULT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] (LessEqual cc)) yes no) + // cond: + // result: (LE cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMLessEqual { + break + } + cc := v_0.Args[0] + b.Kind = BlockARMLE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] (LessEqualU cc)) yes no) + // cond: + // result: (ULE cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMLessEqualU { + break + } + cc := v_0.Args[0] + b.Kind = BlockARMULE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] (GreaterThan cc)) yes no) + // cond: + // result: (GT cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMGreaterThan { + break + } + cc := v_0.Args[0] + b.Kind = BlockARMGT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no) + // cond: + // result: (UGT cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMGreaterThanU { + break + } + cc := v_0.Args[0] + b.Kind = BlockARMUGT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no) + // cond: + // result: (GE cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMGreaterEqual { + break + } + cc := v_0.Args[0] + b.Kind = BlockARMGE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no) // cond: - // result: (LE cmp yes no) + // result: (UGE cc yes no) for { v := b.Control - if v.Op != OpARMInvertFlags { + if v.Op != OpARMCMPconst { break } - cmp := v.Args[0] - b.Kind = BlockARMLE - b.SetControl(cmp) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMGreaterEqualU { + break + } + cc := v_0.Args[0] + b.Kind = BlockARMUGE + b.SetControl(cc) b.Aux = nil return true } - case BlockARMGT: - // match: (GT (FlagEQ) yes no) + // match: (NE (FlagEQ) yes no) // cond: // result: (First nil no yes) for { @@ -20418,9 +23743,9 @@ func rewriteBlockARM(b *Block) bool { b.swapSuccessors() return true } - // match: (GT (FlagLT_ULT) yes no) + // match: (NE (FlagLT_ULT) yes no) // cond: - // result: (First nil no yes) + // result: (First nil yes no) for { v := b.Control if v.Op != OpARMFlagLT_ULT { @@ -20429,12 +23754,11 @@ func rewriteBlockARM(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil - b.swapSuccessors() return true } - // match: (GT (FlagLT_UGT) yes no) + // match: (NE (FlagLT_UGT) yes no) // cond: - // result: (First nil no yes) + // result: (First nil yes no) for { v := b.Control if v.Op != OpARMFlagLT_UGT { @@ -20443,10 +23767,9 @@ func rewriteBlockARM(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil - b.swapSuccessors() return true } - // match: (GT (FlagGT_ULT) yes no) + // match: (NE (FlagGT_ULT) yes no) // cond: // result: (First nil yes no) for { @@ -20459,7 +23782,7 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (GT (FlagGT_UGT) yes no) + // match: (NE (FlagGT_UGT) yes no) // cond: // result: (First nil yes no) for { @@ -20472,345 +23795,461 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (GT (InvertFlags cmp) yes no) + // match: (NE (InvertFlags cmp) yes no) // cond: - // result: (LT cmp yes no) + // result: (NE cmp yes no) for { v := b.Control if v.Op != OpARMInvertFlags { break } cmp := v.Args[0] - b.Kind = BlockARMLT + b.Kind = BlockARMNE b.SetControl(cmp) b.Aux = nil return true } - case BlockIf: - // match: (If (Equal cc) yes no) + // match: (NE (CMPconst [0] (SUB x y)) yes no) // cond: - // result: (EQ cc yes no) + // result: (NE (CMP x y) yes no) for { v := b.Control - if v.Op != OpARMEqual { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMEQ - b.SetControl(cc) - b.Aux = nil - return true - } - // match: (If (NotEqual cc) yes no) - // cond: - // result: (NE cc yes no) - for { - v := b.Control - if v.Op != OpARMNotEqual { + if v.AuxInt != 0 { break } - cc := v.Args[0] + v_0 := v.Args[0] + if v_0.Op != OpARMSUB { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] b.Kind = BlockARMNE - b.SetControl(cc) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (LessThan cc) yes no) + // match: (NE (CMPconst [0] (SUBconst [c] x)) yes no) // cond: - // result: (LT cc yes no) + // result: (NE (CMPconst [c] x) yes no) for { v := b.Control - if v.Op != OpARMLessThan { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMLT - b.SetControl(cc) - b.Aux = nil - return true - } - // match: (If (LessThanU cc) yes no) - // cond: - // result: (ULT cc yes no) - for { - v := b.Control - if v.Op != OpARMLessThanU { + if v.AuxInt != 0 { break } - cc := v.Args[0] - b.Kind = BlockARMULT - b.SetControl(cc) - b.Aux = nil - return true - } - // match: (If (LessEqual cc) yes no) - // cond: - // result: (LE cc yes no) - for { - v := b.Control - if v.Op != OpARMLessEqual { + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { break } - cc := v.Args[0] - b.Kind = BlockARMLE - b.SetControl(cc) + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (LessEqualU cc) yes no) + // match: (NE (CMPconst [0] (SUBshiftLL x y [c])) yes no) // cond: - // result: (ULE cc yes no) + // result: (NE (CMPshiftLL x y [c]) yes no) for { v := b.Control - if v.Op != OpARMLessEqualU { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMULE - b.SetControl(cc) - b.Aux = nil - return true - } - // match: (If (GreaterThan cc) yes no) - // cond: - // result: (GT cc yes no) - for { - v := b.Control - if v.Op != OpARMGreaterThan { + if v.AuxInt != 0 { break } - cc := v.Args[0] - b.Kind = BlockARMGT - b.SetControl(cc) - b.Aux = nil - return true - } - // match: (If (GreaterThanU cc) yes no) - // cond: - // result: (UGT cc yes no) - for { - v := b.Control - if v.Op != OpARMGreaterThanU { + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftLL { break } - cc := v.Args[0] - b.Kind = BlockARMUGT - b.SetControl(cc) + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (GreaterEqual cc) yes no) + // match: (NE (CMPconst [0] (SUBshiftRL x y [c])) yes no) // cond: - // result: (GE cc yes no) + // result: (NE (CMPshiftRL x y [c]) yes no) for { v := b.Control - if v.Op != OpARMGreaterEqual { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMGE - b.SetControl(cc) - b.Aux = nil - return true - } - // match: (If (GreaterEqualU cc) yes no) - // cond: - // result: (UGE cc yes no) - for { - v := b.Control - if v.Op != OpARMGreaterEqualU { + if v.AuxInt != 0 { break } - cc := v.Args[0] - b.Kind = BlockARMUGE - b.SetControl(cc) - b.Aux = nil - return true - } - // match: (If cond yes no) - // cond: - // result: (NE (CMPconst [0] cond) yes no) - for { - v := b.Control - _ = v - cond := b.Control + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftRL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] b.Kind = BlockARMNE - v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(cond) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) b.SetControl(v0) b.Aux = nil return true } - case BlockARMLE: - // match: (LE (FlagEQ) yes no) + // match: (NE (CMPconst [0] (SUBshiftRA x y [c])) yes no) // cond: - // result: (First nil yes no) + // result: (NE (CMPshiftRA x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagEQ { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftRA { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (LE (FlagLT_ULT) yes no) + // match: (NE (CMPconst [0] (SUBshiftLLreg x y z)) yes no) // cond: - // result: (First nil yes no) + // result: (NE (CMPshiftLLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagLT_ULT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftLLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (LE (FlagLT_UGT) yes no) + // match: (NE (CMPconst [0] (SUBshiftRLreg x y z)) yes no) // cond: - // result: (First nil yes no) + // result: (NE (CMPshiftRLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagLT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftRLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (LE (FlagGT_ULT) yes no) + // match: (NE (CMPconst [0] (SUBshiftRAreg x y z)) yes no) // cond: - // result: (First nil no yes) + // result: (NE (CMPshiftRAreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_ULT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMSUBshiftRAreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LE (FlagGT_UGT) yes no) + // match: (NE (CMPconst [0] (ADD x y)) yes no) // cond: - // result: (First nil no yes) + // result: (NE (CMN x y) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LE (InvertFlags cmp) yes no) + // match: (NE (CMPconst [0] (ADDconst [c] x)) yes no) // cond: - // result: (GE cmp yes no) + // result: (NE (CMNconst [c] x) yes no) for { v := b.Control - if v.Op != OpARMInvertFlags { + if v.Op != OpARMCMPconst { break } - cmp := v.Args[0] - b.Kind = BlockARMGE - b.SetControl(cmp) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil return true } - case BlockARMLT: - // match: (LT (FlagEQ) yes no) + // match: (NE (CMPconst [0] (ADDshiftLL x y [c])) yes no) // cond: - // result: (First nil no yes) + // result: (NE (CMNshiftLL x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagEQ { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftLL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LT (FlagLT_ULT) yes no) + // match: (NE (CMPconst [0] (ADDshiftRL x y [c])) yes no) // cond: - // result: (First nil yes no) + // result: (NE (CMNshiftRL x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagLT_ULT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftRL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (LT (FlagLT_UGT) yes no) + // match: (NE (CMPconst [0] (ADDshiftRA x y [c])) yes no) // cond: - // result: (First nil yes no) + // result: (NE (CMNshiftRA x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagLT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftRA { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (LT (FlagGT_ULT) yes no) + // match: (NE (CMPconst [0] (ADDshiftLLreg x y z)) yes no) // cond: - // result: (First nil no yes) + // result: (NE (CMNshiftLLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_ULT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftLLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LT (FlagGT_UGT) yes no) + // match: (NE (CMPconst [0] (ADDshiftRLreg x y z)) yes no) // cond: - // result: (First nil no yes) + // result: (NE (CMNshiftRLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftRLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LT (InvertFlags cmp) yes no) + // match: (NE (CMPconst [0] (ADDshiftRAreg x y z)) yes no) // cond: - // result: (GT cmp yes no) + // result: (NE (CMNshiftRAreg x y z) yes no) for { v := b.Control - if v.Op != OpARMInvertFlags { + if v.Op != OpARMCMPconst { break } - cmp := v.Args[0] - b.Kind = BlockARMGT - b.SetControl(cmp) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMADDshiftRAreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - case BlockARMNE: - // match: (NE (CMPconst [0] (Equal cc)) yes no) + // match: (NE (CMPconst [0] (AND x y)) yes no) // cond: - // result: (EQ cc yes no) + // result: (NE (TST x y) yes no) for { v := b.Control if v.Op != OpARMCMPconst { @@ -20820,18 +24259,23 @@ func rewriteBlockARM(b *Block) bool { break } v_0 := v.Args[0] - if v_0.Op != OpARMEqual { + if v_0.Op != OpARMAND { break } - cc := v_0.Args[0] - b.Kind = BlockARMEQ - b.SetControl(cc) + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (CMPconst [0] (NotEqual cc)) yes no) + // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no) // cond: - // result: (NE cc yes no) + // result: (NE (TSTconst [c] x) yes no) for { v := b.Control if v.Op != OpARMCMPconst { @@ -20841,18 +24285,22 @@ func rewriteBlockARM(b *Block) bool { break } v_0 := v.Args[0] - if v_0.Op != OpARMNotEqual { + if v_0.Op != OpARMANDconst { break } - cc := v_0.Args[0] + c := v_0.AuxInt + x := v_0.Args[0] b.Kind = BlockARMNE - b.SetControl(cc) + v0 := b.NewValue0(v.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (CMPconst [0] (LessThan cc)) yes no) + // match: (NE (CMPconst [0] (ANDshiftLL x y [c])) yes no) // cond: - // result: (LT cc yes no) + // result: (NE (TSTshiftLL x y [c]) yes no) for { v := b.Control if v.Op != OpARMCMPconst { @@ -20862,18 +24310,25 @@ func rewriteBlockARM(b *Block) bool { break } v_0 := v.Args[0] - if v_0.Op != OpARMLessThan { + if v_0.Op != OpARMANDshiftLL { break } - cc := v_0.Args[0] - b.Kind = BlockARMLT - b.SetControl(cc) + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (CMPconst [0] (LessThanU cc)) yes no) + // match: (NE (CMPconst [0] (ANDshiftRL x y [c])) yes no) // cond: - // result: (ULT cc yes no) + // result: (NE (TSTshiftRL x y [c]) yes no) for { v := b.Control if v.Op != OpARMCMPconst { @@ -20883,18 +24338,25 @@ func rewriteBlockARM(b *Block) bool { break } v_0 := v.Args[0] - if v_0.Op != OpARMLessThanU { + if v_0.Op != OpARMANDshiftRL { break } - cc := v_0.Args[0] - b.Kind = BlockARMULT - b.SetControl(cc) + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (CMPconst [0] (LessEqual cc)) yes no) + // match: (NE (CMPconst [0] (ANDshiftRA x y [c])) yes no) // cond: - // result: (LE cc yes no) + // result: (NE (TSTshiftRA x y [c]) yes no) for { v := b.Control if v.Op != OpARMCMPconst { @@ -20904,18 +24366,25 @@ func rewriteBlockARM(b *Block) bool { break } v_0 := v.Args[0] - if v_0.Op != OpARMLessEqual { + if v_0.Op != OpARMANDshiftRA { break } - cc := v_0.Args[0] - b.Kind = BlockARMLE - b.SetControl(cc) + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (CMPconst [0] (LessEqualU cc)) yes no) + // match: (NE (CMPconst [0] (ANDshiftLLreg x y z)) yes no) // cond: - // result: (ULE cc yes no) + // result: (NE (TSTshiftLLreg x y z) yes no) for { v := b.Control if v.Op != OpARMCMPconst { @@ -20925,18 +24394,25 @@ func rewriteBlockARM(b *Block) bool { break } v_0 := v.Args[0] - if v_0.Op != OpARMLessEqualU { + if v_0.Op != OpARMANDshiftLLreg { break } - cc := v_0.Args[0] - b.Kind = BlockARMULE - b.SetControl(cc) + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (CMPconst [0] (GreaterThan cc)) yes no) + // match: (NE (CMPconst [0] (ANDshiftRLreg x y z)) yes no) // cond: - // result: (GT cc yes no) + // result: (NE (TSTshiftRLreg x y z) yes no) for { v := b.Control if v.Op != OpARMCMPconst { @@ -20946,18 +24422,25 @@ func rewriteBlockARM(b *Block) bool { break } v_0 := v.Args[0] - if v_0.Op != OpARMGreaterThan { + if v_0.Op != OpARMANDshiftRLreg { break } - cc := v_0.Args[0] - b.Kind = BlockARMGT - b.SetControl(cc) + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no) + // match: (NE (CMPconst [0] (ANDshiftRAreg x y z)) yes no) // cond: - // result: (UGT cc yes no) + // result: (NE (TSTshiftRAreg x y z) yes no) for { v := b.Control if v.Op != OpARMCMPconst { @@ -20967,18 +24450,25 @@ func rewriteBlockARM(b *Block) bool { break } v_0 := v.Args[0] - if v_0.Op != OpARMGreaterThanU { + if v_0.Op != OpARMANDshiftRAreg { break } - cc := v_0.Args[0] - b.Kind = BlockARMUGT - b.SetControl(cc) + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no) + // match: (NE (CMPconst [0] (XOR x y)) yes no) // cond: - // result: (GE cc yes no) + // result: (NE (TEQ x y) yes no) for { v := b.Control if v.Op != OpARMCMPconst { @@ -20988,18 +24478,23 @@ func rewriteBlockARM(b *Block) bool { break } v_0 := v.Args[0] - if v_0.Op != OpARMGreaterEqual { + if v_0.Op != OpARMXOR { break } - cc := v_0.Args[0] - b.Kind = BlockARMGE - b.SetControl(cc) + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no) + // match: (NE (CMPconst [0] (XORconst [c] x)) yes no) // cond: - // result: (UGE cc yes no) + // result: (NE (TEQconst [c] x) yes no) for { v := b.Control if v.Op != OpARMCMPconst { @@ -21009,92 +24504,184 @@ func rewriteBlockARM(b *Block) bool { break } v_0 := v.Args[0] - if v_0.Op != OpARMGreaterEqualU { + if v_0.Op != OpARMXORconst { break } - cc := v_0.Args[0] - b.Kind = BlockARMUGE - b.SetControl(cc) + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (FlagEQ) yes no) + // match: (NE (CMPconst [0] (XORshiftLL x y [c])) yes no) // cond: - // result: (First nil no yes) + // result: (NE (TEQshiftLL x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagEQ { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftLL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (NE (FlagLT_ULT) yes no) + // match: (NE (CMPconst [0] (XORshiftRL x y [c])) yes no) // cond: - // result: (First nil yes no) + // result: (NE (TEQshiftRL x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagLT_ULT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftRL { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (FlagLT_UGT) yes no) + // match: (NE (CMPconst [0] (XORshiftRA x y [c])) yes no) // cond: - // result: (First nil yes no) + // result: (NE (TEQshiftRA x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagLT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftRA { + break + } + c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (FlagGT_ULT) yes no) + // match: (NE (CMPconst [0] (XORshiftLLreg x y z)) yes no) // cond: - // result: (First nil yes no) + // result: (NE (TEQshiftLLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_ULT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftLLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (FlagGT_UGT) yes no) + // match: (NE (CMPconst [0] (XORshiftRLreg x y z)) yes no) // cond: - // result: (First nil yes no) + // result: (NE (TEQshiftRLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftRLreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (NE (InvertFlags cmp) yes no) + // match: (NE (CMPconst [0] (XORshiftRAreg x y z)) yes no) // cond: - // result: (NE cmp yes no) + // result: (NE (TEQshiftRAreg x y z) yes no) for { v := b.Control - if v.Op != OpARMInvertFlags { + if v.Op != OpARMCMPconst { break } - cmp := v.Args[0] + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMXORshiftRAreg { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + z := v_0.Args[2] b.Kind = BlockARMNE - b.SetControl(cmp) + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } -- 2.30.9