Commit 6ec993ad authored by Michael Munday's avatar Michael Munday

cmd/compile: add SSA backend for s390x and enable by default

The new SSA backend modifies the ABI slightly: R0 is now a usable
general purpose register.

Fixes #16677.

Change-Id: I367435ce921e0c7e79e021c80cf8ef5d1d1466cf
Reviewed-on: https://go-review.googlesource.com/28978
Run-TryBot: Michael Munday <munday@ca.ibm.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarKeith Randall <khr@golang.org>
parent b7e53038
...@@ -40,7 +40,7 @@ func shouldssa(fn *Node) bool { ...@@ -40,7 +40,7 @@ func shouldssa(fn *Node) bool {
if os.Getenv("SSATEST") == "" { if os.Getenv("SSATEST") == "" {
return false return false
} }
case "amd64", "amd64p32", "arm", "386", "arm64", "ppc64le", "mips64", "mips64le": case "amd64", "amd64p32", "arm", "386", "arm64", "ppc64le", "mips64", "mips64le", "s390x":
// Generally available. // Generally available.
} }
if !ssaEnabled { if !ssaEnabled {
......
...@@ -58,6 +58,11 @@ func Main() { ...@@ -58,6 +58,11 @@ func Main() {
gc.Thearch.Doregbits = doregbits gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames gc.Thearch.Regnames = regnames
gc.Thearch.SSARegToReg = ssaRegToReg
gc.Thearch.SSAMarkMoves = ssaMarkMoves
gc.Thearch.SSAGenValue = ssaGenValue
gc.Thearch.SSAGenBlock = ssaGenBlock
gc.Main() gc.Main()
gc.Exit(0) gc.Exit(0)
} }
...@@ -233,8 +233,8 @@ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -233,8 +233,8 @@ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Handle divide-by-zero panic. // Handle divide-by-zero panic.
p1 := gins(optoas(gc.OCMP, t), &tr, nil) p1 := gins(optoas(gc.OCMP, t), &tr, nil)
p1.To.Type = obj.TYPE_REG p1.To.Type = obj.TYPE_CONST
p1.To.Reg = s390x.REGZERO p1.To.Offset = 0
p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1) p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
if panicdiv == nil { if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide") panicdiv = gc.Sysfunc("panicdivide")
...@@ -561,8 +561,8 @@ func expandchecks(firstp *obj.Prog) { ...@@ -561,8 +561,8 @@ func expandchecks(firstp *obj.Prog) {
// crash by write to memory address 0. // crash by write to memory address 0.
p1.As = s390x.AMOVD p1.As = s390x.AMOVD
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_CONST
p1.From.Reg = s390x.REGZERO p1.From.Offset = 0
p1.To.Type = obj.TYPE_MEM p1.To.Type = obj.TYPE_MEM
p1.To.Reg = s390x.REGZERO p1.To.Reg = s390x.REGZERO
p1.To.Offset = 0 p1.To.Offset = 0
......
...@@ -36,23 +36,38 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{ ...@@ -36,23 +36,38 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{
// Integer // Integer
s390x.AADD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AADD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ASUB & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ASUB & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ASUBE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AADDW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ASUBW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ANEG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ANEG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ANEGW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AAND & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AAND & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AXOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AXOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AMULLD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AMULLD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AMULLW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AMULLW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AMULHD & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AMULHD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AMULHDU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AMULHDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ADIVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ADIVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ADIVDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ADIVDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ADIVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ADIVWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ASLD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ASLD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ASLW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ASRD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ASRD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ASRW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ASRAD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ASRAD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ASRAW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ARLL & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ARLL & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ARLLG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.ARLLG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.ACMP & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead}, s390x.ACMP & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
s390x.ACMPU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead}, s390x.ACMPU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
s390x.ACMPW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
s390x.ACMPWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
s390x.AMODD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AMODDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AMODW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
s390x.AMODWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
// Floating point. // Floating point.
s390x.AFADD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, s390x.AFADD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
...@@ -68,6 +83,8 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{ ...@@ -68,6 +83,8 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{
s390x.ALEDBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, s390x.ALEDBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
s390x.ALDEBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, s390x.ALDEBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
s390x.AFSQRT & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite}, s390x.AFSQRT & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
s390x.AFNEG & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
s390x.AFNEGS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
// Conversions // Conversions
s390x.ACEFBRA & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv}, s390x.ACEFBRA & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
...@@ -88,15 +105,24 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{ ...@@ -88,15 +105,24 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{
s390x.ACLGDBR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv}, s390x.ACLGDBR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
// Moves // Moves
s390x.AMOVB & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, s390x.AMOVB & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
s390x.AMOVBZ & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, s390x.AMOVBZ & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
s390x.AMOVH & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, s390x.AMOVH & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
s390x.AMOVHZ & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, s390x.AMOVHZ & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
s390x.AMOVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, s390x.AMOVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
s390x.AMOVWZ & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, s390x.AMOVWZ & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
s390x.AMOVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, s390x.AMOVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
s390x.AFMOVS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, s390x.AMOVHBR & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
s390x.AFMOVD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move}, s390x.AMOVWBR & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
s390x.AMOVDBR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
s390x.AFMOVS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
s390x.AFMOVD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
s390x.AMOVDEQ & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
s390x.AMOVDGE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
s390x.AMOVDGT & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
s390x.AMOVDLE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
s390x.AMOVDLT & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
s390x.AMOVDNE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
// Storage operations // Storage operations
s390x.AMVC & obj.AMask: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr}, s390x.AMVC & obj.AMask: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
...@@ -114,6 +140,8 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{ ...@@ -114,6 +140,8 @@ var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{
s390x.ABLT & obj.AMask: {Flags: gc.Cjmp}, s390x.ABLT & obj.AMask: {Flags: gc.Cjmp},
s390x.ABGT & obj.AMask: {Flags: gc.Cjmp}, s390x.ABGT & obj.AMask: {Flags: gc.Cjmp},
s390x.ABLE & obj.AMask: {Flags: gc.Cjmp}, s390x.ABLE & obj.AMask: {Flags: gc.Cjmp},
s390x.ABLEU & obj.AMask: {Flags: gc.Cjmp},
s390x.ABLTU & obj.AMask: {Flags: gc.Cjmp},
s390x.ACMPBEQ & obj.AMask: {Flags: gc.Cjmp}, s390x.ACMPBEQ & obj.AMask: {Flags: gc.Cjmp},
s390x.ACMPBNE & obj.AMask: {Flags: gc.Cjmp}, s390x.ACMPBNE & obj.AMask: {Flags: gc.Cjmp},
s390x.ACMPBGE & obj.AMask: {Flags: gc.Cjmp}, s390x.ACMPBGE & obj.AMask: {Flags: gc.Cjmp},
......
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s390x
import (
"math"
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
)
// Smallest possible faulting page at address zero.
const minZeroPage = 4096
// ssaRegToReg maps ssa register numbers to obj register numbers.
var ssaRegToReg = []int16{
s390x.REG_R0,
s390x.REG_R1,
s390x.REG_R2,
s390x.REG_R3,
s390x.REG_R4,
s390x.REG_R5,
s390x.REG_R6,
s390x.REG_R7,
s390x.REG_R8,
s390x.REG_R9,
s390x.REG_R10,
s390x.REG_R11,
s390x.REG_R12,
s390x.REG_R13,
s390x.REG_R14,
s390x.REG_R15,
s390x.REG_F0,
s390x.REG_F1,
s390x.REG_F2,
s390x.REG_F3,
s390x.REG_F4,
s390x.REG_F5,
s390x.REG_F6,
s390x.REG_F7,
s390x.REG_F8,
s390x.REG_F9,
s390x.REG_F10,
s390x.REG_F11,
s390x.REG_F12,
s390x.REG_F13,
s390x.REG_F14,
s390x.REG_F15,
0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
}
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
if b.Control != nil && b.Control.Type.IsFlags() {
flive = true
}
for i := len(b.Values) - 1; i >= 0; i-- {
v := b.Values[i]
if flive && v.Op == ssa.OpS390XMOVDconst {
// The "mark" is any non-nil Aux value.
v.Aux = v
}
if v.Type.IsFlags() {
flive = false
}
for _, a := range v.Args {
if a.Type.IsFlags() {
flive = true
}
}
}
}
// loadByType returns the load instruction of the given type.
func loadByType(t ssa.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
return s390x.AFMOVS
case 8:
return s390x.AFMOVD
}
} else {
switch t.Size() {
case 1:
if t.IsSigned() {
return s390x.AMOVB
} else {
return s390x.AMOVBZ
}
case 2:
if t.IsSigned() {
return s390x.AMOVH
} else {
return s390x.AMOVHZ
}
case 4:
if t.IsSigned() {
return s390x.AMOVW
} else {
return s390x.AMOVWZ
}
case 8:
return s390x.AMOVD
}
}
panic("bad load type")
}
// storeByType returns the store instruction of the given type.
func storeByType(t ssa.Type) obj.As {
width := t.Size()
if t.IsFloat() {
switch width {
case 4:
return s390x.AFMOVS
case 8:
return s390x.AFMOVD
}
} else {
switch width {
case 1:
return s390x.AMOVB
case 2:
return s390x.AMOVH
case 4:
return s390x.AMOVW
case 8:
return s390x.AMOVD
}
}
panic("bad store type")
}
// moveByType returns the reg->reg move instruction of the given type.
func moveByType(t ssa.Type) obj.As {
if t.IsFloat() {
return s390x.AFMOVD
} else {
switch t.Size() {
case 1:
if t.IsSigned() {
return s390x.AMOVB
} else {
return s390x.AMOVBZ
}
case 2:
if t.IsSigned() {
return s390x.AMOVH
} else {
return s390x.AMOVHZ
}
case 4:
if t.IsSigned() {
return s390x.AMOVW
} else {
return s390x.AMOVWZ
}
case 8:
return s390x.AMOVD
}
}
panic("bad load type")
}
// opregreg emits instructions for
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(op obj.As, dest, src int16) *obj.Prog {
p := gc.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = dest
p.From.Reg = src
return p
}
// opregregimm emits instructions for
// dest := src(From) op off
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregregimm(op obj.As, dest, src int16, off int64) *obj.Prog {
p := gc.Prog(op)
p.From.Type = obj.TYPE_CONST
p.From.Offset = off
p.Reg = src
p.To.Reg = dest
p.To.Type = obj.TYPE_REG
return p
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.SetLineno(v.Line)
switch v.Op {
case ssa.OpS390XSLD, ssa.OpS390XSLW,
ssa.OpS390XSRD, ssa.OpS390XSRW,
ssa.OpS390XSRAD, ssa.OpS390XSRAW:
r := gc.SSARegNum(v)
r1 := gc.SSARegNum(v.Args[0])
r2 := gc.SSARegNum(v.Args[1])
if r2 == s390x.REG_R0 {
v.Fatalf("cannot use R0 as shift value %s", v.LongString())
}
p := opregreg(v.Op.Asm(), r, r2)
if r != r1 {
p.Reg = r1
}
case ssa.OpS390XADD, ssa.OpS390XADDW,
ssa.OpS390XSUB, ssa.OpS390XSUBW,
ssa.OpS390XAND, ssa.OpS390XANDW,
ssa.OpS390XOR, ssa.OpS390XORW,
ssa.OpS390XXOR, ssa.OpS390XXORW:
r := gc.SSARegNum(v)
r1 := gc.SSARegNum(v.Args[0])
r2 := gc.SSARegNum(v.Args[1])
p := opregreg(v.Op.Asm(), r, r2)
if r != r1 {
p.Reg = r1
}
// 2-address opcode arithmetic
case ssa.OpS390XMULLD, ssa.OpS390XMULLW,
ssa.OpS390XMULHD, ssa.OpS390XMULHDU,
ssa.OpS390XFADDS, ssa.OpS390XFADD, ssa.OpS390XFSUBS, ssa.OpS390XFSUB,
ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV:
r := gc.SSARegNum(v)
if r != gc.SSARegNum(v.Args[0]) {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
opregreg(v.Op.Asm(), r, gc.SSARegNum(v.Args[1]))
case ssa.OpS390XDIVD, ssa.OpS390XDIVW,
ssa.OpS390XDIVDU, ssa.OpS390XDIVWU,
ssa.OpS390XMODD, ssa.OpS390XMODW,
ssa.OpS390XMODDU, ssa.OpS390XMODWU:
// TODO(mundaym): use the temp registers every time like x86 does with AX?
dividend := gc.SSARegNum(v.Args[0])
divisor := gc.SSARegNum(v.Args[1])
// CPU faults upon signed overflow, which occurs when most
// negative int is divided by -1.
var j *obj.Prog
if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW ||
v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW {
var c *obj.Prog
c = gc.Prog(s390x.ACMP)
j = gc.Prog(s390x.ABEQ)
c.From.Type = obj.TYPE_REG
c.From.Reg = divisor
c.To.Type = obj.TYPE_CONST
c.To.Offset = -1
j.To.Type = obj.TYPE_BRANCH
}
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = divisor
p.Reg = 0
p.To.Type = obj.TYPE_REG
p.To.Reg = dividend
// signed division, rest of the check for -1 case
if j != nil {
j2 := gc.Prog(s390x.ABR)
j2.To.Type = obj.TYPE_BRANCH
var n *obj.Prog
if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW {
// n * -1 = -n
n = gc.Prog(s390x.ANEG)
n.To.Type = obj.TYPE_REG
n.To.Reg = dividend
} else {
// n % -1 == 0
n = gc.Prog(s390x.AXOR)
n.From.Type = obj.TYPE_REG
n.From.Reg = dividend
n.To.Type = obj.TYPE_REG
n.To.Reg = dividend
}
j.To.Val = n
j2.To.Val = s.Pc()
}
case ssa.OpS390XADDconst, ssa.OpS390XADDWconst:
opregregimm(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0]), v.AuxInt)
case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst,
ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst,
ssa.OpS390XANDconst, ssa.OpS390XANDWconst,
ssa.OpS390XORconst, ssa.OpS390XORWconst,
ssa.OpS390XXORconst, ssa.OpS390XXORWconst:
r := gc.SSARegNum(v)
if r != gc.SSARegNum(v.Args[0]) {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst,
ssa.OpS390XSRDconst, ssa.OpS390XSRWconst,
ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst,
ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
r := gc.SSARegNum(v)
r1 := gc.SSARegNum(v.Args[0])
if r != r1 {
p.Reg = r1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask:
r := gc.SSARegNum(v)
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XMOVDaddridx:
r := gc.SSARegNum(v.Args[0])
i := gc.SSARegNum(v.Args[1])
p := gc.Prog(s390x.AMOVD)
p.From.Scale = 1
if i == s390x.REGSP {
r, i = i, r
}
p.From.Type = obj.TYPE_ADDR
p.From.Reg = r
p.From.Index = i
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpS390XMOVDaddr:
p := gc.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
opregreg(v.Op.Asm(), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[0]))
case ssa.OpS390XTESTB:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xFF
p.Reg = gc.SSARegNum(v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = s390x.REGTMP
case ssa.OpS390XFCMPS, ssa.OpS390XFCMP:
opregreg(v.Op.Asm(), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[0]))
case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst, ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[0])
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
case ssa.OpS390XMOVDconst:
x := gc.SSARegNum(v)
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = x
case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst:
x := gc.SSARegNum(v)
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
p.To.Reg = x
case ssa.OpS390XMOVDload,
ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload,
ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload,
ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload,
ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, ssa.OpS390XMOVDloadidx,
ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx,
ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx:
r := gc.SSARegNum(v.Args[0])
i := gc.SSARegNum(v.Args[1])
if i == s390x.REGSP {
r, i = i, r
}
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Scale = 1
p.From.Index = i
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[1])
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.To, v)
case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx,
ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx:
r := gc.SSARegNum(v.Args[0])
i := gc.SSARegNum(v.Args[1])
if i == s390x.REGSP {
r, i = i, r
}
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[2])
p.To.Type = obj.TYPE_MEM
p.To.Reg = r
p.To.Scale = 1
p.To.Index = i
gc.AddAux(&p.To, v)
case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg,
ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg,
ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA,
ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
ssa.OpS390XFNEG, ssa.OpS390XFNEGS:
opregreg(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0]))
case ssa.OpS390XCLEAR:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpCopy, ssa.OpS390XMOVDconvert:
if v.Type.IsMemory() {
return
}
x := gc.SSARegNum(v.Args[0])
y := gc.SSARegNum(v)
if x != y {
opregreg(moveByType(v.Type), y, x)
}
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Unimplementedf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
n, off := gc.AutoVar(v.Args[0])
p.From.Type = obj.TYPE_MEM
p.From.Node = n
p.From.Sym = gc.Linksym(n.Sym)
p.From.Offset = off
if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
p.From.Name = obj.NAME_PARAM
p.From.Offset += n.Xoffset
} else {
p.From.Name = obj.NAME_AUTO
}
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpStoreReg:
if v.Type.IsFlags() {
v.Unimplementedf("store flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[0])
n, off := gc.AutoVar(v)
p.To.Type = obj.TYPE_MEM
p.To.Node = n
p.To.Sym = gc.Linksym(n.Sym)
p.To.Offset = off
if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
p.To.Name = obj.NAME_PARAM
p.To.Offset += n.Xoffset
} else {
p.To.Name = obj.NAME_AUTO
}
case ssa.OpPhi:
gc.CheckLoweredPhi(v)
case ssa.OpInitMem:
// memory arg needs no code
case ssa.OpArg:
// input args need no code
case ssa.OpS390XLoweredGetClosurePtr:
// Closure pointer is R12 (already)
gc.CheckLoweredGetClosurePtr(v)
case ssa.OpS390XLoweredGetG:
r := gc.SSARegNum(v)
p := gc.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = s390x.REGG
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XCALLstatic:
if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
ginsnop()
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpS390XCALLclosure:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v.Args[0])
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpS390XCALLdefer:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpS390XCALLgo:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(gc.Newproc.Sym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpS390XCALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v.Args[0])
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpS390XNEG, ssa.OpS390XNEGW:
r := gc.SSARegNum(v)
p := gc.Prog(v.Op.Asm())
r1 := gc.SSARegNum(v.Args[0])
if r != r1 {
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XNOT, ssa.OpS390XNOTW:
v.Fatalf("NOT/NOTW generated %s", v.LongString())
case ssa.OpS390XMOVDEQ, ssa.OpS390XMOVDNE,
ssa.OpS390XMOVDLT, ssa.OpS390XMOVDLE,
ssa.OpS390XMOVDGT, ssa.OpS390XMOVDGE,
ssa.OpS390XMOVDGTnoinv, ssa.OpS390XMOVDGEnoinv:
r := gc.SSARegNum(v)
if r != gc.SSARegNum(v.Args[0]) {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[1])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XFSQRT:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpSP, ssa.OpSB:
// nothing to do
case ssa.OpVarDef:
gc.Gvardef(v.Aux.(*gc.Node))
case ssa.OpVarKill:
gc.Gvarkill(v.Aux.(*gc.Node))
case ssa.OpVarLive:
gc.Gvarlive(v.Aux.(*gc.Node))
case ssa.OpKeepAlive:
gc.KeepAlive(v)
case ssa.OpS390XInvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpS390XLoweredNilCheck:
// Optimization - if the subsequent block has a load or store
// at the same address, we don't need to issue this instruction.
mem := v.Args[1]
for _, w := range v.Block.Succs[0].Block().Values {
if w.Op == ssa.OpPhi {
if w.Type.IsMemory() {
mem = w
}
continue
}
if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
// w doesn't use a store - can't be a memory op.
continue
}
if w.Args[len(w.Args)-1] != mem {
v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
}
switch w.Op {
case ssa.OpS390XMOVDload,
ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload,
ssa.OpS390XMOVBZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVWZload,
ssa.OpS390XMOVHBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVDBRload,
ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload,
ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore,
ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4,
ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4:
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst,
ssa.OpS390XCLEAR:
off := ssa.ValAndOff(v.AuxInt).Off()
if w.Args[0] == v.Args[0] && w.Aux == nil && off >= 0 && off < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
case ssa.OpS390XMVC:
off := ssa.ValAndOff(v.AuxInt).Off()
if (w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0]) && w.Aux == nil && off >= 0 && off < minZeroPage {
if gc.Debug_checknil != 0 && int(v.Line) > 1 {
gc.Warnl(v.Line, "removed nil check")
}
return
}
}
if w.Type.IsMemory() {
if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
// these ops are OK
mem = w
continue
}
// We can't delay the nil check past the next store.
break
}
}
// Issue a load which will fault if the input is nil.
p := gc.Prog(s390x.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = s390x.REGTMP
if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
gc.Warnl(v.Line, "generated nil check")
}
case ssa.OpS390XMVC:
vo := v.AuxValAndOff()
p := gc.Prog(s390x.AMVC)
p.From.Type = obj.TYPE_MEM
p.From.Reg = gc.SSARegNum(v.Args[1])
p.From.Offset = vo.Off()
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
p.To.Offset = vo.Off()
p.From3 = new(obj.Addr)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = vo.Val()
case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4,
ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4:
for i := 2; i < len(v.Args)-1; i++ {
if gc.SSARegNum(v.Args[i]) != gc.SSARegNum(v.Args[i-1])+1 {
v.Fatalf("invalid store multiple %s", v.LongString())
}
}
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[1])
p.Reg = gc.SSARegNum(v.Args[len(v.Args)-2])
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.To, v)
case ssa.OpS390XLoweredMove:
// Inputs must be valid pointers to memory,
// so adjust arg0 and arg1 as part of the expansion.
// arg2 should be src+size,
//
// mvc: MVC $256, 0(R2), 0(R1)
// MOVD $256(R1), R1
// MOVD $256(R2), R2
// CMP R2, Rarg2
// BNE mvc
// MVC $rem, 0(R2), 0(R1) // if rem > 0
// arg2 is the last address to move in the loop + 256
mvc := gc.Prog(s390x.AMVC)
mvc.From.Type = obj.TYPE_MEM
mvc.From.Reg = gc.SSARegNum(v.Args[1])
mvc.To.Type = obj.TYPE_MEM
mvc.To.Reg = gc.SSARegNum(v.Args[0])
mvc.From3 = new(obj.Addr)
mvc.From3.Type = obj.TYPE_CONST
mvc.From3.Offset = 256
for i := 0; i < 2; i++ {
movd := gc.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_ADDR
movd.From.Reg = gc.SSARegNum(v.Args[i])
movd.From.Offset = 256
movd.To.Type = obj.TYPE_REG
movd.To.Reg = gc.SSARegNum(v.Args[i])
}
cmpu := gc.Prog(s390x.ACMPU)
cmpu.From.Reg = gc.SSARegNum(v.Args[1])
cmpu.From.Type = obj.TYPE_REG
cmpu.To.Reg = gc.SSARegNum(v.Args[2])
cmpu.To.Type = obj.TYPE_REG
bne := gc.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, mvc)
if v.AuxInt > 0 {
mvc := gc.Prog(s390x.AMVC)
mvc.From.Type = obj.TYPE_MEM
mvc.From.Reg = gc.SSARegNum(v.Args[1])
mvc.To.Type = obj.TYPE_MEM
mvc.To.Reg = gc.SSARegNum(v.Args[0])
mvc.From3 = new(obj.Addr)
mvc.From3.Type = obj.TYPE_CONST
mvc.From3.Offset = v.AuxInt
}
case ssa.OpS390XLoweredZero:
// Input must be valid pointers to memory,
// so adjust arg0 as part of the expansion.
// arg1 should be src+size,
//
// clear: CLEAR $256, 0(R1)
// MOVD $256(R1), R1
// CMP R1, Rarg1
// BNE clear
// CLEAR $rem, 0(R1) // if rem > 0
// arg1 is the last address to zero in the loop + 256
clear := gc.Prog(s390x.ACLEAR)
clear.From.Type = obj.TYPE_CONST
clear.From.Offset = 256
clear.To.Type = obj.TYPE_MEM
clear.To.Reg = gc.SSARegNum(v.Args[0])
movd := gc.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_ADDR
movd.From.Reg = gc.SSARegNum(v.Args[0])
movd.From.Offset = 256
movd.To.Type = obj.TYPE_REG
movd.To.Reg = gc.SSARegNum(v.Args[0])
cmpu := gc.Prog(s390x.ACMPU)
cmpu.From.Reg = gc.SSARegNum(v.Args[0])
cmpu.From.Type = obj.TYPE_REG
cmpu.To.Reg = gc.SSARegNum(v.Args[1])
cmpu.To.Type = obj.TYPE_REG
bne := gc.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, clear)
if v.AuxInt > 0 {
clear := gc.Prog(s390x.ACLEAR)
clear.From.Type = obj.TYPE_CONST
clear.From.Offset = v.AuxInt
clear.To.Type = obj.TYPE_MEM
clear.To.Reg = gc.SSARegNum(v.Args[0])
}
default:
v.Unimplementedf("genValue not implemented: %s", v.LongString())
}
}
var blockJump = [...]struct {
asm, invasm obj.As
}{
ssa.BlockS390XEQ: {s390x.ABEQ, s390x.ABNE},
ssa.BlockS390XNE: {s390x.ABNE, s390x.ABEQ},
ssa.BlockS390XLT: {s390x.ABLT, s390x.ABGE},
ssa.BlockS390XGE: {s390x.ABGE, s390x.ABLT},
ssa.BlockS390XLE: {s390x.ABLE, s390x.ABGT},
ssa.BlockS390XGT: {s390x.ABGT, s390x.ABLE},
ssa.BlockS390XGTF: {s390x.ABGT, s390x.ABLEU},
ssa.BlockS390XGEF: {s390x.ABGE, s390x.ABLTU},
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line)
switch b.Kind {
case ssa.BlockPlain, ssa.BlockCheck:
if b.Succs[0].Block() != next {
p := gc.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in R3:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(s390x.AAND)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xFFFFFFFF
p.Reg = s390x.REG_R3
p.To.Type = obj.TYPE_REG
p.To.Reg = s390x.REG_R3
p = gc.Prog(s390x.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(s390x.ABR)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
case ssa.BlockS390XEQ, ssa.BlockS390XNE,
ssa.BlockS390XLT, ssa.BlockS390XGE,
ssa.BlockS390XLE, ssa.BlockS390XGT,
ssa.BlockS390XGEF, ssa.BlockS390XGTF:
jmp := blockJump[b.Kind]
likely := b.Likely
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
likely *= -1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
p = gc.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(s390x.ABR)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
default:
b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
}
}
...@@ -206,6 +206,17 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config ...@@ -206,6 +206,17 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
c.specialRegMask = specialRegMaskMIPS64 c.specialRegMask = specialRegMaskMIPS64
c.FPReg = framepointerRegMIPS64 c.FPReg = framepointerRegMIPS64
c.hasGReg = true c.hasGReg = true
case "s390x":
c.IntSize = 8
c.PtrSize = 8
c.lowerBlock = rewriteBlockS390X
c.lowerValue = rewriteValueS390X
c.registers = registersS390X[:]
c.gpRegMask = gpRegMaskS390X
c.fpRegMask = fpRegMaskS390X
c.FPReg = framepointerRegS390X
c.hasGReg = true
c.noDuffDevice = true
default: default:
fe.Unimplementedf(0, "arch %s not implemented", arch) fe.Unimplementedf(0, "arch %s not implemented", arch)
} }
......
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Lowering arithmetic
(Add64 x y) -> (ADD x y)
(AddPtr x y) -> (ADD x y)
(Add32 x y) -> (ADDW x y)
(Add16 x y) -> (ADDW x y)
(Add8 x y) -> (ADDW x y)
(Add32F x y) -> (FADDS x y)
(Add64F x y) -> (FADD x y)
(Sub64 x y) -> (SUB x y)
(SubPtr x y) -> (SUB x y)
(Sub32 x y) -> (SUBW x y)
(Sub16 x y) -> (SUBW x y)
(Sub8 x y) -> (SUBW x y)
(Sub32F x y) -> (FSUBS x y)
(Sub64F x y) -> (FSUB x y)
(Mul64 x y) -> (MULLD x y)
(Mul32 x y) -> (MULLW x y)
(Mul16 x y) -> (MULLW x y)
(Mul8 x y) -> (MULLW x y)
(Mul32F x y) -> (FMULS x y)
(Mul64F x y) -> (FMUL x y)
(Div32F x y) -> (FDIVS x y)
(Div64F x y) -> (FDIV x y)
(Div64 x y) -> (DIVD x y)
(Div64u x y) -> (DIVDU x y)
// DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor,
// so a sign/zero extension of the dividend is required.
(Div32 x y) -> (DIVW (MOVWreg x) y)
(Div32u x y) -> (DIVWU (MOVWZreg x) y)
(Div16 x y) -> (DIVW (MOVHreg x) (MOVHreg y))
(Div16u x y) -> (DIVWU (MOVHZreg x) (MOVHZreg y))
(Div8 x y) -> (DIVW (MOVBreg x) (MOVBreg y))
(Div8u x y) -> (DIVWU (MOVBZreg x) (MOVBZreg y))
(Hmul64 x y) -> (MULHD x y)
(Hmul64u x y) -> (MULHDU x y)
(Hmul32 x y) -> (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
(Hmul32u x y) -> (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
(Hmul16 x y) -> (SRDconst [16] (MULLW (MOVHreg x) (MOVHreg y)))
(Hmul16u x y) -> (SRDconst [16] (MULLW (MOVHZreg x) (MOVHZreg y)))
(Hmul8 x y) -> (SRDconst [8] (MULLW (MOVBreg x) (MOVBreg y)))
(Hmul8u x y) -> (SRDconst [8] (MULLW (MOVBZreg x) (MOVBZreg y)))
(Mod64 x y) -> (MODD x y)
(Mod64u x y) -> (MODDU x y)
// MODW/MODWU has a 64-bit dividend and a 32-bit divisor,
// so a sign/zero extension of the dividend is required.
(Mod32 x y) -> (MODW (MOVWreg x) y)
(Mod32u x y) -> (MODWU (MOVWZreg x) y)
(Mod16 x y) -> (MODW (MOVHreg x) (MOVHreg y))
(Mod16u x y) -> (MODWU (MOVHZreg x) (MOVHZreg y))
(Mod8 x y) -> (MODW (MOVBreg x) (MOVBreg y))
(Mod8u x y) -> (MODWU (MOVBZreg x) (MOVBZreg y))
(Avg64u <t> x y) -> (ADD (ADD <t> (SRDconst <t> x [1]) (SRDconst <t> y [1])) (ANDconst <t> (AND <t> x y) [1]))
(And64 x y) -> (AND x y)
(And32 x y) -> (ANDW x y)
(And16 x y) -> (ANDW x y)
(And8 x y) -> (ANDW x y)
(Or64 x y) -> (OR x y)
(Or32 x y) -> (ORW x y)
(Or16 x y) -> (ORW x y)
(Or8 x y) -> (ORW x y)
(Xor64 x y) -> (XOR x y)
(Xor32 x y) -> (XORW x y)
(Xor16 x y) -> (XORW x y)
(Xor8 x y) -> (XORW x y)
(Neg64 x) -> (NEG x)
(Neg32 x) -> (NEGW x)
(Neg16 x) -> (NEGW (MOVHreg x))
(Neg8 x) -> (NEGW (MOVBreg x))
(Neg32F x) -> (FNEGS x)
(Neg64F x) -> (FNEG x)
(Com64 x) -> (NOT x)
(Com32 x) -> (NOTW x)
(Com16 x) -> (NOTW x)
(Com8 x) -> (NOTW x)
(NOT x) && true -> (XORconst [-1] x)
(NOTW x) && true -> (XORWconst [-1] x)
// Lowering boolean ops
(AndB x y) -> (ANDW x y)
(OrB x y) -> (ORW x y)
(Not x) -> (XORWconst [1] x)
// Lowering pointer arithmetic
(OffPtr [off] ptr:(SP)) -> (MOVDaddr [off] ptr)
(OffPtr [off] ptr) && is32Bit(off) -> (ADDconst [off] ptr)
(OffPtr [off] ptr) -> (ADD (MOVDconst [off]) ptr)
(Sqrt x) -> (FSQRT x)
// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits.
(SignExt8to16 x) -> (MOVBreg x)
(SignExt8to32 x) -> (MOVBreg x)
(SignExt8to64 x) -> (MOVBreg x)
(SignExt16to32 x) -> (MOVHreg x)
(SignExt16to64 x) -> (MOVHreg x)
(SignExt32to64 x) -> (MOVWreg x)
(ZeroExt8to16 x) -> (MOVBZreg x)
(ZeroExt8to32 x) -> (MOVBZreg x)
(ZeroExt8to64 x) -> (MOVBZreg x)
(ZeroExt16to32 x) -> (MOVHZreg x)
(ZeroExt16to64 x) -> (MOVHZreg x)
(ZeroExt32to64 x) -> (MOVWZreg x)
// Lowering truncation
// Because we ignore high parts of registers, truncates are just copies.
(Trunc16to8 x) -> x
(Trunc32to8 x) -> x
(Trunc32to16 x) -> x
(Trunc64to8 x) -> x
(Trunc64to16 x) -> x
(Trunc64to32 x) -> x
// Lowering float <-> int
(Cvt32to32F x) -> (CEFBRA x)
(Cvt32to64F x) -> (CDFBRA x)
(Cvt64to32F x) -> (CEGBRA x)
(Cvt64to64F x) -> (CDGBRA x)
(Cvt32Fto32 x) -> (CFEBRA x)
(Cvt32Fto64 x) -> (CGEBRA x)
(Cvt64Fto32 x) -> (CFDBRA x)
(Cvt64Fto64 x) -> (CGDBRA x)
(Cvt32Fto64F x) -> (LDEBR x)
(Cvt64Fto32F x) -> (LEDBR x)
// Lowering shifts
// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
(Lsh64x64 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPUconst y [63])))
(Lsh64x32 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst y [63])))
(Lsh64x16 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
(Lsh64x8 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
(Lsh32x64 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
(Lsh32x32 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
(Lsh32x16 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
(Lsh32x8 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
(Lsh16x64 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
(Lsh16x32 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
(Lsh16x16 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
(Lsh16x8 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
(Lsh8x64 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
(Lsh8x32 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
(Lsh8x16 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
(Lsh8x8 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
(Lrot64 <t> x [c]) -> (RLLGconst <t> [c&63] x)
(Lrot32 <t> x [c]) -> (RLLconst <t> [c&31] x)
(Rsh64Ux64 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPUconst y [63])))
(Rsh64Ux32 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst y [63])))
(Rsh64Ux16 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
(Rsh64Ux8 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
(Rsh32Ux64 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
(Rsh32Ux32 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
(Rsh32Ux16 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
(Rsh32Ux8 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
(Rsh16Ux64 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [15])))
(Rsh16Ux32 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [15])))
(Rsh16Ux16 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [15])))
(Rsh16Ux8 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [15])))
(Rsh8Ux64 <t> x y) -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [7])))
(Rsh8Ux32 <t> x y) -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [7])))
(Rsh8Ux16 <t> x y) -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [7])))
(Rsh8Ux8 <t> x y) -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [7])))
// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
(Rsh64x64 <t> x y) -> (SRAD <t> x (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [63])))))
(Rsh64x32 <t> x y) -> (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [63])))))
(Rsh64x16 <t> x y) -> (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [63])))))
(Rsh64x8 <t> x y) -> (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [63])))))
(Rsh32x64 <t> x y) -> (SRAW <t> x (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [31])))))
(Rsh32x32 <t> x y) -> (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [31])))))
(Rsh32x16 <t> x y) -> (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [31])))))
(Rsh32x8 <t> x y) -> (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [31])))))
(Rsh16x64 <t> x y) -> (SRAW <t> (MOVHreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [15])))))
(Rsh16x32 <t> x y) -> (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [15])))))
(Rsh16x16 <t> x y) -> (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [15])))))
(Rsh16x8 <t> x y) -> (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [15])))))
(Rsh8x64 <t> x y) -> (SRAW <t> (MOVBreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [7])))))
(Rsh8x32 <t> x y) -> (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [7])))))
(Rsh8x16 <t> x y) -> (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [7])))))
(Rsh8x8 <t> x y) -> (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [7])))))
// Lowering comparisons
(Less64 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
(Less32 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
(Less16 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
(Less8 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
(Less64U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
(Less32U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
(Less16U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
(Less8U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
// Use SETG with reversed operands to dodge NaN case.
(Less64F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
(Less32F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
(Leq64 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
(Leq32 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
(Leq16 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
(Leq8 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
(Leq64U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
(Leq32U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
(Leq16U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
(Leq8U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
// Use SETGE with reversed operands to dodge NaN case.
(Leq64F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
(Leq32F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
(Greater64 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
(Greater32 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
(Greater16 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
(Greater8 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
(Greater64U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
(Greater32U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
(Greater16U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
(Greater8U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
(Greater64F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
(Greater32F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
(Geq64 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
(Geq32 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
(Geq16 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
(Geq8 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
(Geq64U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
(Geq32U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
(Geq16U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
(Geq8U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
(Geq64F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
(Geq32F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
(Eq64 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
(Eq32 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
(Eq16 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
(Eq8 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
(EqB x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
(EqPtr x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
(Eq64F x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
(Eq32F x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
(Neq64 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
(Neq32 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
(Neq16 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
(Neq8 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
(NeqB x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
(NeqPtr x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
(Neq64F x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
(Neq32F x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
// Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
(Load <t> ptr mem) && is32BitInt(t) -> (MOVWZload ptr mem)
(Load <t> ptr mem) && is16BitInt(t) -> (MOVHZload ptr mem)
(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBZload ptr mem)
(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
// Lowering stores
// These more-specific FP versions of Store pattern should come first.
(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
(Store [8] ptr val mem) -> (MOVDstore ptr val mem)
(Store [4] ptr val mem) -> (MOVWstore ptr val mem)
(Store [2] ptr val mem) -> (MOVHstore ptr val mem)
(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
// Lowering moves
// Load and store for small copies.
(Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBZload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 -> (MOVHstore dst (MOVHZload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 -> (MOVWstore dst (MOVWZload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 -> (MOVDstore dst (MOVDload src mem) mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 ->
(MOVDstore [8] dst (MOVDload [8] src mem)
(MOVDstore dst (MOVDload src mem) mem))
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 24 ->
(MOVDstore [16] dst (MOVDload [16] src mem)
(MOVDstore [8] dst (MOVDload [8] src mem)
(MOVDstore dst (MOVDload src mem) mem)))
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
(MOVBstore [2] dst (MOVBZload [2] src mem)
(MOVHstore dst (MOVHZload src mem) mem))
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 5 ->
(MOVBstore [4] dst (MOVBZload [4] src mem)
(MOVWstore dst (MOVWZload src mem) mem))
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 ->
(MOVHstore [4] dst (MOVHZload [4] src mem)
(MOVWstore dst (MOVWZload src mem) mem))
(Move [s] dst src mem) && SizeAndAlign(s).Size() == 7 ->
(MOVBstore [6] dst (MOVBZload [6] src mem)
(MOVHstore [4] dst (MOVHZload [4] src mem)
(MOVWstore dst (MOVWZload src mem) mem)))
// MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes).
(Move [s] dst src mem) && SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 256 ->
(MVC [makeValAndOff(SizeAndAlign(s).Size(), 0)] dst src mem)
(Move [s] dst src mem) && SizeAndAlign(s).Size() > 256 && SizeAndAlign(s).Size() <= 512 ->
(MVC [makeValAndOff(SizeAndAlign(s).Size()-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
(Move [s] dst src mem) && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Size() <= 768 ->
(MVC [makeValAndOff(SizeAndAlign(s).Size()-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
(Move [s] dst src mem) && SizeAndAlign(s).Size() > 768 && SizeAndAlign(s).Size() <= 1024 ->
(MVC [makeValAndOff(SizeAndAlign(s).Size()-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
// Move more than 1024 bytes using a loop.
(Move [s] dst src mem) && SizeAndAlign(s).Size() > 1024 ->
(LoweredMove [SizeAndAlign(s).Size()%256] dst src (ADDconst <src.Type> src [(SizeAndAlign(s).Size()/256)*256]) mem)
// Lowering Zero instructions
(Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem)
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 -> (MOVHstoreconst [0] destptr mem)
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 -> (MOVWstoreconst [0] destptr mem)
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 -> (MOVDstoreconst [0] destptr mem)
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
(MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVHstoreconst [0] destptr mem))
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 5 ->
(MOVBstoreconst [makeValAndOff(0,4)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 6 ->
(MOVHstoreconst [makeValAndOff(0,4)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 7 ->
(MOVWstoreconst [makeValAndOff(0,3)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [s] destptr mem) && SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 1024 ->
(CLEAR [makeValAndOff(SizeAndAlign(s).Size(), 0)] destptr mem)
// Move more than 1024 bytes using a loop.
(Zero [s] destptr mem) && SizeAndAlign(s).Size() > 1024 ->
(LoweredZero [SizeAndAlign(s).Size()%256] destptr (ADDconst <destptr.Type> destptr [(SizeAndAlign(s).Size()/256)*256]) mem)
// Lowering constants
(Const8 [val]) -> (MOVDconst [val])
(Const16 [val]) -> (MOVDconst [val])
(Const32 [val]) -> (MOVDconst [val])
(Const64 [val]) -> (MOVDconst [val])
(Const32F [val]) -> (FMOVSconst [val])
(Const64F [val]) -> (FMOVDconst [val])
(ConstNil) -> (MOVDconst [0])
(ConstBool [b]) -> (MOVDconst [b])
// Lowering calls
(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
// Miscellaneous
(Convert <t> x mem) -> (MOVDconvert <t> x mem)
(IsNonNil p) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
(IsInBounds idx len) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
(IsSliceInBounds idx len) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
(Addr {sym} base) -> (MOVDaddr {sym} base)
(ITab (Load ptr mem)) -> (MOVDload ptr mem)
// block rewrites
(If (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (LT cmp yes no)
(If (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (LE cmp yes no)
(If (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GT cmp yes no)
(If (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GE cmp yes no)
(If (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (EQ cmp yes no)
(If (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (NE cmp yes no)
// Special case for floating point - LF/LEF not generated.
(If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GTF cmp yes no)
(If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GEF cmp yes no)
(If cond yes no) -> (NE (TESTB cond) yes no)
// ***************************
// Above: lowering rules
// Below: optimizations
// ***************************
// TODO: Should the optimizations be a separate pass?
// Fold boolean tests into blocks
(NE (TESTB (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (LT cmp yes no)
(NE (TESTB (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (LE cmp yes no)
(NE (TESTB (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GT cmp yes no)
(NE (TESTB (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GE cmp yes no)
(NE (TESTB (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (EQ cmp yes no)
(NE (TESTB (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (NE cmp yes no)
(NE (TESTB (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GTF cmp yes no)
(NE (TESTB (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GEF cmp yes no)
// Fold constants into instructions.
(ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
(ADD (MOVDconst [c]) x) && is32Bit(c) -> (ADDconst [c] x)
(ADDW x (MOVDconst [c])) -> (ADDWconst [c] x)
(ADDW (MOVDconst [c]) x) -> (ADDWconst [c] x)
(SUB x (MOVDconst [c])) && is32Bit(c) -> (SUBconst x [c])
(SUB (MOVDconst [c]) x) && is32Bit(c) -> (NEG (SUBconst <v.Type> x [c]))
(SUBW x (MOVDconst [c])) -> (SUBWconst x [c])
(SUBW (MOVDconst [c]) x) -> (NEGW (SUBWconst <v.Type> x [c]))
(MULLD x (MOVDconst [c])) && is32Bit(c) -> (MULLDconst [c] x)
(MULLD (MOVDconst [c]) x) && is32Bit(c) -> (MULLDconst [c] x)
(MULLW x (MOVDconst [c])) -> (MULLWconst [c] x)
(MULLW (MOVDconst [c]) x) -> (MULLWconst [c] x)
(AND x (MOVDconst [c])) && is32Bit(c) -> (ANDconst [c] x)
(AND (MOVDconst [c]) x) && is32Bit(c) -> (ANDconst [c] x)
(ANDW x (MOVDconst [c])) -> (ANDWconst [c] x)
(ANDW (MOVDconst [c]) x) -> (ANDWconst [c] x)
(ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x)
(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c & d] x)
(OR x (MOVDconst [c])) && is32Bit(c) -> (ORconst [c] x)
(OR (MOVDconst [c]) x) && is32Bit(c) -> (ORconst [c] x)
(ORW x (MOVDconst [c])) -> (ORWconst [c] x)
(ORW (MOVDconst [c]) x) -> (ORWconst [c] x)
(XOR x (MOVDconst [c])) && is32Bit(c) -> (XORconst [c] x)
(XOR (MOVDconst [c]) x) && is32Bit(c) -> (XORconst [c] x)
(XORW x (MOVDconst [c])) -> (XORWconst [c] x)
(XORW (MOVDconst [c]) x) -> (XORWconst [c] x)
(SLD x (MOVDconst [c])) -> (SLDconst [c&63] x)
(SLW x (MOVDconst [c])) -> (SLWconst [c&63] x)
(SRD x (MOVDconst [c])) -> (SRDconst [c&63] x)
(SRW x (MOVDconst [c])) -> (SRWconst [c&63] x)
(SRAD x (MOVDconst [c])) -> (SRADconst [c&63] x)
(SRAW x (MOVDconst [c])) -> (SRAWconst [c&63] x)
(SRAW x (ANDWconst [63] y)) -> (SRAW x y)
(SRAD x (ANDconst [63] y)) -> (SRAD x y)
(SLW x (ANDWconst [63] y)) -> (SLW x y)
(SLD x (ANDconst [63] y)) -> (SLD x y)
(SRW x (ANDWconst [63] y)) -> (SRW x y)
(SRD x (ANDconst [63] y)) -> (SRD x y)
(CMP x (MOVDconst [c])) && is32Bit(c) -> (CMPconst x [c])
(CMP (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPconst x [c]))
(CMPW x (MOVDconst [c])) -> (CMPWconst x [c])
(CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst x [c]))
(CMPU x (MOVDconst [c])) && is32Bit(c) -> (CMPUconst x [int64(uint32(c))])
(CMPU (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPUconst x [int64(uint32(c))]))
(CMPWU x (MOVDconst [c])) -> (CMPWUconst x [int64(uint32(c))])
(CMPWU (MOVDconst [c]) x) -> (InvertFlags (CMPWUconst x [int64(uint32(c))]))
// Using MOVBZreg instead of AND is cheaper.
(ANDconst [0xFF] x) -> (MOVBZreg x)
(ANDconst [0xFFFF] x) -> (MOVHZreg x)
(ANDconst [0xFFFFFFFF] x) -> (MOVWZreg x)
// strength reduction
(MULLDconst [-1] x) -> (NEG x)
(MULLDconst [0] _) -> (MOVDconst [0])
(MULLDconst [1] x) -> x
(MULLDconst [c] x) && isPowerOfTwo(c) -> (SLDconst [log2(c)] x)
(MULLDconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUB (SLDconst <v.Type> [log2(c+1)] x) x)
(MULLDconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (ADD (SLDconst <v.Type> [log2(c-1)] x) x)
(MULLWconst [-1] x) -> (NEGW x)
(MULLWconst [0] _) -> (MOVDconst [0])
(MULLWconst [1] x) -> x
(MULLWconst [c] x) && isPowerOfTwo(c) -> (SLWconst [log2(c)] x)
(MULLWconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBW (SLWconst <v.Type> [log2(c+1)] x) x)
(MULLWconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (ADDW (SLWconst <v.Type> [log2(c-1)] x) x)
// Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them).
(ADDconst [c] (MOVDaddr [d] {s} x)) && ((c+d)&1 == 0) && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x)
(MOVDaddr [c] {s} (ADDconst [d] x)) && ((c+d)&1 == 0) && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x)
(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x)
(MOVDaddr [c] {s} (ADDconst [d] x)) && x.Op != OpSB && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x)
(MOVDaddr [c] {s} (ADD x y)) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y)
(ADD x (MOVDaddr [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y)
(ADD (MOVDaddr [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y)
// fold ADDconst into MOVDaddrx
(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is32Bit(c+d) -> (MOVDaddridx [c+d] {s} x y)
(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (MOVDaddridx [c+d] {s} x y)
(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (MOVDaddridx [c+d] {s} x y)
// reverse ordering of compare instruction
(MOVDLT x y (InvertFlags cmp)) -> (MOVDGT x y cmp)
(MOVDGT x y (InvertFlags cmp)) -> (MOVDLT x y cmp)
(MOVDLE x y (InvertFlags cmp)) -> (MOVDGE x y cmp)
(MOVDGE x y (InvertFlags cmp)) -> (MOVDLE x y cmp)
(MOVDEQ x y (InvertFlags cmp)) -> (MOVDEQ x y cmp)
(MOVDNE x y (InvertFlags cmp)) -> (MOVDNE x y cmp)
// don't extend after proper load
(MOVBreg x:(MOVBload _ _)) -> x
(MOVBZreg x:(MOVBZload _ _)) -> x
(MOVHreg x:(MOVBload _ _)) -> x
(MOVHreg x:(MOVBZload _ _)) -> x
(MOVHreg x:(MOVHload _ _)) -> x
(MOVHZreg x:(MOVBZload _ _)) -> x
(MOVHZreg x:(MOVHZload _ _)) -> x
(MOVWreg x:(MOVBload _ _)) -> x
(MOVWreg x:(MOVBZload _ _)) -> x
(MOVWreg x:(MOVHload _ _)) -> x
(MOVWreg x:(MOVHZload _ _)) -> x
(MOVWreg x:(MOVWload _ _)) -> x
(MOVWZreg x:(MOVBZload _ _)) -> x
(MOVWZreg x:(MOVHZload _ _)) -> x
(MOVWZreg x:(MOVWZload _ _)) -> x
// fold double extensions
(MOVBreg x:(MOVBreg _)) -> x
(MOVBZreg x:(MOVBZreg _)) -> x
(MOVHreg x:(MOVBreg _)) -> x
(MOVHreg x:(MOVBZreg _)) -> x
(MOVHreg x:(MOVHreg _)) -> x
(MOVHZreg x:(MOVBZreg _)) -> x
(MOVHZreg x:(MOVHZreg _)) -> x
(MOVWreg x:(MOVBreg _)) -> x
(MOVWreg x:(MOVBZreg _)) -> x
(MOVWreg x:(MOVHreg _)) -> x
(MOVWreg x:(MOVHreg _)) -> x
(MOVWreg x:(MOVWreg _)) -> x
(MOVWZreg x:(MOVBZreg _)) -> x
(MOVWZreg x:(MOVHZreg _)) -> x
(MOVWZreg x:(MOVWZreg _)) -> x
// sign extended loads
// Note: The combined instruction must end up in the same block
// as the original load. If not, we end up making a value with
// memory type live in two different blocks, which can lead to
// multiple memory values alive simultaneously.
// Make sure we don't combine these ops if the load has another use.
// This prevents a single load from being split into multiple loads
// which then might return different values. See test/atomicload.go.
(MOVBreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
(MOVBZreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZload <v.Type> [off] {sym} ptr mem)
(MOVHreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload <v.Type> [off] {sym} ptr mem)
(MOVHZreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZload <v.Type> [off] {sym} ptr mem)
(MOVWreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
(MOVWZreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZload <v.Type> [off] {sym} ptr mem)
(MOVBZreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZloadidx <v.Type> [off] {sym} ptr idx mem)
(MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZloadidx <v.Type> [off] {sym} ptr idx mem)
(MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZloadidx <v.Type> [off] {sym} ptr idx mem)
// replace load from same location as preceding store with copy
(MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
(MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
(MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
// Fold extensions and ANDs together.
(MOVBZreg (ANDWconst [c] x)) -> (ANDconst [c & 0xff] x)
(MOVHZreg (ANDWconst [c] x)) -> (ANDconst [c & 0xffff] x)
(MOVWZreg (ANDWconst [c] x)) -> (ANDconst [c & 0xffffffff] x)
(MOVBreg (ANDWconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c & 0x7f] x)
(MOVHreg (ANDWconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c & 0x7fff] x)
(MOVWreg (ANDWconst [c] x)) && c & 0x80000000 == 0 -> (ANDconst [c & 0x7fffffff] x)
(MOVBZreg (ANDconst [c] x)) -> (ANDconst [c & 0xff] x)
(MOVHZreg (ANDconst [c] x)) -> (ANDconst [c & 0xffff] x)
(MOVWZreg (ANDconst [c] x)) -> (ANDconst [c & 0xffffffff] x)
(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c & 0x7f] x)
(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c & 0x7fff] x)
(MOVWreg (ANDconst [c] x)) && c & 0x80000000 == 0 -> (ANDconst [c & 0x7fffffff] x)
// Don't extend before storing
(MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
(MOVWstore [off] {sym} ptr (MOVWZreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
(MOVHstore [off] {sym} ptr (MOVHZreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (MOVBZreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
// Fold constants into memory operations.
// Note that this is not always a good idea because if not all the uses of
// the ADDconst get eliminated, we still have to compute the ADDconst and we now
// have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one.
// Nevertheless, let's do it!
(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDload [off1+off2] {sym} ptr mem)
(MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} ptr mem)
(MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} ptr mem)
(MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} ptr mem)
(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} ptr val mem)
(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} ptr val mem)
(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
// Fold constants into stores.
(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && validValAndOff(c,off) && int64(int16(c)) == c && ptr.Op != OpSB ->
(MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB ->
(MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && ptr.Op != OpSB ->
(MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && ptr.Op != OpSB ->
(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
// Fold address offsets into constant stores.
(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
(MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
(MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
// We need to fold MOVDaddr into the MOVx ops so that the live variable analysis knows
// what variables are being read/written by the ops.
(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
(MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
(MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
// generating indexed loads and stores
(MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(FMOVSloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(FMOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(FMOVSstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(FMOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVBZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVBZloadidx [off] {sym} ptr idx mem)
(MOVHZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVHZloadidx [off] {sym} ptr idx mem)
(MOVWZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVWZloadidx [off] {sym} ptr idx mem)
(MOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVDloadidx [off] {sym} ptr idx mem)
(FMOVSload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (FMOVSloadidx [off] {sym} ptr idx mem)
(FMOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (FMOVDloadidx [off] {sym} ptr idx mem)
(MOVBstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx [off] {sym} ptr idx val mem)
(MOVHstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVHstoreidx [off] {sym} ptr idx val mem)
(MOVWstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx [off] {sym} ptr idx val mem)
(MOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVDstoreidx [off] {sym} ptr idx val mem)
(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (FMOVSstoreidx [off] {sym} ptr idx val mem)
(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (FMOVDstoreidx [off] {sym} ptr idx val mem)
// combine ADD into indexed loads and stores
(MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVBZloadidx [c+d] {sym} ptr idx mem)
(MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVHZloadidx [c+d] {sym} ptr idx mem)
(MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVWZloadidx [c+d] {sym} ptr idx mem)
(MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVDloadidx [c+d] {sym} ptr idx mem)
(FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (FMOVSloadidx [c+d] {sym} ptr idx mem)
(FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (FMOVDloadidx [c+d] {sym} ptr idx mem)
(MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem)
(MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem)
(MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem)
(MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem)
(FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
(FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
(MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVBZloadidx [c+d] {sym} ptr idx mem)
(MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVHZloadidx [c+d] {sym} ptr idx mem)
(MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVWZloadidx [c+d] {sym} ptr idx mem)
(MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVDloadidx [c+d] {sym} ptr idx mem)
(FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (FMOVSloadidx [c+d] {sym} ptr idx mem)
(FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (FMOVDloadidx [c+d] {sym} ptr idx mem)
(MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem)
(MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem)
(MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem)
(MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem)
(FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
(FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
// fold MOVDaddrs together
(MOVDaddr [off1] {sym1} (MOVDaddr [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVDaddr [off1+off2] {mergeSym(sym1,sym2)} x)
// MOVDaddr into MOVDaddridx
(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
(MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
(MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
// MOVDaddridx into MOVDaddr
(MOVDaddr [off1] {sym1} (MOVDaddridx [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
// Absorb InvertFlags into branches.
(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
// Constant comparisons.
(CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
(CMPconst (MOVDconst [x]) [y]) && x<y -> (FlagLT)
(CMPconst (MOVDconst [x]) [y]) && x>y -> (FlagGT)
(CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) -> (FlagEQ)
(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) -> (FlagLT)
(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) -> (FlagGT)
(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) -> (FlagEQ)
(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
// Other known comparisons.
(CMPconst (MOVBZreg _) [c]) && 0xFF < c -> (FlagLT)
(CMPconst (MOVHZreg _) [c]) && 0xFFFF < c -> (FlagLT)
(CMPconst (MOVWZreg _) [c]) && 0xFFFFFFFF < c -> (FlagLT)
(CMPWconst (SRWconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT)
(CMPconst (SRDconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT)
(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT)
(CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
// Absorb flag constants into SBB ops.
(SUBEcarrymask (FlagEQ)) -> (MOVDconst [-1])
(SUBEcarrymask (FlagLT)) -> (MOVDconst [-1])
(SUBEcarrymask (FlagGT)) -> (MOVDconst [0])
(SUBEWcarrymask (FlagEQ)) -> (MOVDconst [-1])
(SUBEWcarrymask (FlagLT)) -> (MOVDconst [-1])
(SUBEWcarrymask (FlagGT)) -> (MOVDconst [0])
// Absorb flag constants into branches.
(EQ (FlagEQ) yes no) -> (First nil yes no)
(EQ (FlagLT) yes no) -> (First nil no yes)
(EQ (FlagGT) yes no) -> (First nil no yes)
(NE (FlagEQ) yes no) -> (First nil no yes)
(NE (FlagLT) yes no) -> (First nil yes no)
(NE (FlagGT) yes no) -> (First nil yes no)
(LT (FlagEQ) yes no) -> (First nil no yes)
(LT (FlagLT) yes no) -> (First nil yes no)
(LT (FlagGT) yes no) -> (First nil no yes)
(LE (FlagEQ) yes no) -> (First nil yes no)
(LE (FlagLT) yes no) -> (First nil yes no)
(LE (FlagGT) yes no) -> (First nil no yes)
(GT (FlagEQ) yes no) -> (First nil no yes)
(GT (FlagLT) yes no) -> (First nil no yes)
(GT (FlagGT) yes no) -> (First nil yes no)
(GE (FlagEQ) yes no) -> (First nil yes no)
(GE (FlagLT) yes no) -> (First nil no yes)
(GE (FlagGT) yes no) -> (First nil yes no)
// Absorb flag constants into SETxx ops.
(MOVDEQ _ x (FlagEQ)) -> x
(MOVDEQ y _ (FlagLT)) -> y
(MOVDEQ y _ (FlagGT)) -> y
(MOVDNE _ y (FlagEQ)) -> y
(MOVDNE x _ (FlagLT)) -> x
(MOVDNE x _ (FlagGT)) -> x
(MOVDLT y _ (FlagEQ)) -> y
(MOVDLT _ x (FlagLT)) -> x
(MOVDLT y _ (FlagGT)) -> y
(MOVDLE _ x (FlagEQ)) -> x
(MOVDLE _ x (FlagLT)) -> x
(MOVDLE y _ (FlagGT)) -> y
(MOVDGT y _ (FlagEQ)) -> y
(MOVDGT y _ (FlagLT)) -> y
(MOVDGT _ x (FlagGT)) -> x
(MOVDGE _ x (FlagEQ)) -> x
(MOVDGE y _ (FlagLT)) -> y
(MOVDGE _ x (FlagGT)) -> x
// Remove redundant *const ops
(ADDconst [0] x) -> x
(ADDWconst [c] x) && int32(c)==0 -> x
(SUBconst [0] x) -> x
(SUBWconst [c] x) && int32(c) == 0 -> x
(ANDconst [0] _) -> (MOVDconst [0])
(ANDWconst [c] _) && int32(c)==0 -> (MOVDconst [0])
(ANDconst [-1] x) -> x
(ANDWconst [c] x) && int32(c)==-1 -> x
(ORconst [0] x) -> x
(ORWconst [c] x) && int32(c)==0 -> x
(ORconst [-1] _) -> (MOVDconst [-1])
(ORWconst [c] _) && int32(c)==-1 -> (MOVDconst [-1])
(XORconst [0] x) -> x
(XORWconst [c] x) && int32(c)==0 -> x
// Convert constant subtracts to constant adds.
(SUBconst [c] x) && c != -(1<<31) -> (ADDconst [-c] x)
(SUBWconst [c] x) -> (ADDWconst [int64(int32(-c))] x)
// generic constant folding
// TODO: more of this
(ADDconst [c] (MOVDconst [d])) -> (MOVDconst [c+d])
(ADDWconst [c] (MOVDconst [d])) -> (MOVDconst [int64(int32(c+d))])
(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
(ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [int64(int32(c+d))] x)
(SUBconst (MOVDconst [d]) [c]) -> (MOVDconst [d-c])
(SUBconst (SUBconst x [d]) [c]) && is32Bit(-c-d) -> (ADDconst [-c-d] x)
(SRADconst [c] (MOVDconst [d])) -> (MOVDconst [d>>uint64(c)])
(SRAWconst [c] (MOVDconst [d])) -> (MOVDconst [d>>uint64(c)])
(NEG (MOVDconst [c])) -> (MOVDconst [-c])
(NEGW (MOVDconst [c])) -> (MOVDconst [int64(int32(-c))])
(MULLDconst [c] (MOVDconst [d])) -> (MOVDconst [c*d])
(MULLWconst [c] (MOVDconst [d])) -> (MOVDconst [int64(int32(c*d))])
(ANDconst [c] (MOVDconst [d])) -> (MOVDconst [c&d])
(ANDWconst [c] (MOVDconst [d])) -> (MOVDconst [c&d])
(ORconst [c] (MOVDconst [d])) -> (MOVDconst [c|d])
(ORWconst [c] (MOVDconst [d])) -> (MOVDconst [c|d])
(XORconst [c] (MOVDconst [d])) -> (MOVDconst [c^d])
(XORWconst [c] (MOVDconst [d])) -> (MOVDconst [c^d])
(NOT (MOVDconst [c])) -> (MOVDconst [^c])
(NOTW (MOVDconst [c])) -> (MOVDconst [^c])
// generic simplifications
// TODO: more of this
(ADD x (NEG y)) -> (SUB x y)
(ADDW x (NEGW y)) -> (SUBW x y)
(SUB x x) -> (MOVDconst [0])
(SUBW x x) -> (MOVDconst [0])
(AND x x) -> x
(ANDW x x) -> x
(OR x x) -> x
(ORW x x) -> x
(XOR x x) -> (MOVDconst [0])
(XORW x x) -> (MOVDconst [0])
// Combine constant stores into larger (unaligned) stores.
// It doesn't work to global data (based on SB),
// because STGRL doesn't support unaligned address
(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
&& p.Op != OpSB
&& x.Uses == 1
&& ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVHstoreconst [makeValAndOff(ValAndOff(c).Val()&0xff | ValAndOff(a).Val()<<8, ValAndOff(a).Off())] {s} p mem)
(MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
&& p.Op != OpSB
&& x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVWstoreconst [makeValAndOff(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16, ValAndOff(a).Off())] {s} p mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& p.Op != OpSB
&& x.Uses == 1
&& ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem)
// Combine stores into larger (unaligned) stores.
// It doesn't work to global data (based on SB),
// because STGRL doesn't support unaligned address
(MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVHstore [i-1] {s} p w mem)
(MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVHstore [i-1] {s} p w0 mem)
(MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVWstore [i-2] {s} p w mem)
(MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVWstore [i-2] {s} p w0 mem)
(MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVDstore [i-4] {s} p w mem)
(MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVDstore [i-4] {s} p w0 mem)
(MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVHstoreidx [i-1] {s} p idx w mem)
(MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVHstoreidx [i-1] {s} p idx w0 mem)
(MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVWstoreidx [i-2] {s} p idx w mem)
(MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVWstoreidx [i-2] {s} p idx w0 mem)
(MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVDstoreidx [i-4] {s} p idx w mem)
(MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem))
&& p.Op != OpSB
&& x.Uses == 1
&& clobber(x)
-> (MOVDstoreidx [i-4] {s} p idx w0 mem)
// Combining byte loads into larger (unaligned) loads.
// Little endian loads.
// b[0] | b[1]<<8 -> load 16-bit, reverse bytes
(ORW x0:(MOVBZload [i] {s} p mem)
s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& s0.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(s0)
-> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i] {s} p mem))
// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes
(ORW o0:(ORW o1:(ORW
x0:(MOVBZload [i] {s} p mem)
s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem)))
s1:(SLWconst [16] x2:(MOVBZload [i+2] {s} p mem)))
s2:(SLWconst [24] x3:(MOVBZload [i+3] {s} p mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& x3.Uses == 1
&& s0.Uses == 1
&& s1.Uses == 1
&& s2.Uses == 1
&& o0.Uses == 1
&& o1.Uses == 1
&& mergePoint(b,x0,x1,x2,x3) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(x2)
&& clobber(x3)
&& clobber(s0)
&& clobber(s1)
&& clobber(s2)
&& clobber(o0)
&& clobber(o1)
-> @mergePoint(b,x0,x1,x2,x3) (MOVWZreg (MOVWBRload [i] {s} p mem))
// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes
(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
x0:(MOVBZload [i] {s} p mem)
s0:(SLDconst [8] x1:(MOVBZload [i+1] {s} p mem)))
s1:(SLDconst [16] x2:(MOVBZload [i+2] {s} p mem)))
s2:(SLDconst [24] x3:(MOVBZload [i+3] {s} p mem)))
s3:(SLDconst [32] x4:(MOVBZload [i+4] {s} p mem)))
s4:(SLDconst [40] x5:(MOVBZload [i+5] {s} p mem)))
s5:(SLDconst [48] x6:(MOVBZload [i+6] {s} p mem)))
s6:(SLDconst [56] x7:(MOVBZload [i+7] {s} p mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& x3.Uses == 1
&& x4.Uses == 1
&& x5.Uses == 1
&& x6.Uses == 1
&& x7.Uses == 1
&& s0.Uses == 1
&& s1.Uses == 1
&& s2.Uses == 1
&& s3.Uses == 1
&& s4.Uses == 1
&& s5.Uses == 1
&& s6.Uses == 1
&& o0.Uses == 1
&& o1.Uses == 1
&& o2.Uses == 1
&& o3.Uses == 1
&& o4.Uses == 1
&& o5.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(x2)
&& clobber(x3)
&& clobber(x4)
&& clobber(x5)
&& clobber(x6)
&& clobber(x7)
&& clobber(s0)
&& clobber(s1)
&& clobber(s2)
&& clobber(s3)
&& clobber(s4)
&& clobber(s5)
&& clobber(s6)
&& clobber(o0)
&& clobber(o1)
&& clobber(o2)
&& clobber(o3)
&& clobber(o4)
&& clobber(o5)
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRload [i] {s} p mem)
// b[0] | b[1]<<8 -> load 16-bit, reverse bytes
(ORW x0:(MOVBZloadidx [i] {s} p idx mem)
s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& s0.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(s0)
-> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx <v.Type> [i] {s} p idx mem))
// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes
(ORW o0:(ORW o1:(ORW
x0:(MOVBZloadidx [i] {s} p idx mem)
s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
s1:(SLWconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem)))
s2:(SLWconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& x3.Uses == 1
&& s0.Uses == 1
&& s1.Uses == 1
&& s2.Uses == 1
&& o0.Uses == 1
&& o1.Uses == 1
&& mergePoint(b,x0,x1,x2,x3) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(x2)
&& clobber(x3)
&& clobber(s0)
&& clobber(s1)
&& clobber(s2)
&& clobber(o0)
&& clobber(o1)
-> @mergePoint(b,x0,x1,x2,x3) (MOVWZreg (MOVWBRloadidx <v.Type> [i] {s} p idx mem))
// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes
(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
x0:(MOVBZloadidx [i] {s} p idx mem)
s0:(SLDconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
s1:(SLDconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem)))
s2:(SLDconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem)))
s3:(SLDconst [32] x4:(MOVBZloadidx [i+4] {s} p idx mem)))
s4:(SLDconst [40] x5:(MOVBZloadidx [i+5] {s} p idx mem)))
s5:(SLDconst [48] x6:(MOVBZloadidx [i+6] {s} p idx mem)))
s6:(SLDconst [56] x7:(MOVBZloadidx [i+7] {s} p idx mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& x3.Uses == 1
&& x4.Uses == 1
&& x5.Uses == 1
&& x6.Uses == 1
&& x7.Uses == 1
&& s0.Uses == 1
&& s1.Uses == 1
&& s2.Uses == 1
&& s3.Uses == 1
&& s4.Uses == 1
&& s5.Uses == 1
&& s6.Uses == 1
&& o0.Uses == 1
&& o1.Uses == 1
&& o2.Uses == 1
&& o3.Uses == 1
&& o4.Uses == 1
&& o5.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(x2)
&& clobber(x3)
&& clobber(x4)
&& clobber(x5)
&& clobber(x6)
&& clobber(x7)
&& clobber(s0)
&& clobber(s1)
&& clobber(s2)
&& clobber(s3)
&& clobber(s4)
&& clobber(s5)
&& clobber(s6)
&& clobber(o0)
&& clobber(o1)
&& clobber(o2)
&& clobber(o3)
&& clobber(o4)
&& clobber(o5)
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRloadidx <v.Type> [i] {s} p idx mem)
// Big endian loads.
// b[1] | b[0]<<8 -> load 16-bit
(ORW x0:(MOVBZload [i] {s} p mem)
s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& s0.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(s0)
-> @mergePoint(b,x0,x1) (MOVHZload [i-1] {s} p mem)
// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit
(ORW o0:(ORW o1:(ORW
x0:(MOVBZload [i] {s} p mem)
s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem)))
s1:(SLWconst [16] x2:(MOVBZload [i-2] {s} p mem)))
s2:(SLWconst [24] x3:(MOVBZload [i-3] {s} p mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& x3.Uses == 1
&& s0.Uses == 1
&& s1.Uses == 1
&& s2.Uses == 1
&& o0.Uses == 1
&& o1.Uses == 1
&& mergePoint(b,x0,x1,x2,x3) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(x2)
&& clobber(x3)
&& clobber(s0)
&& clobber(s1)
&& clobber(s2)
&& clobber(o0)
&& clobber(o1)
-> @mergePoint(b,x0,x1,x2,x3) (MOVWZload [i-3] {s} p mem)
// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit
(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
x0:(MOVBZload [i] {s} p mem)
s0:(SLDconst [8] x1:(MOVBZload [i-1] {s} p mem)))
s1:(SLDconst [16] x2:(MOVBZload [i-2] {s} p mem)))
s2:(SLDconst [24] x3:(MOVBZload [i-3] {s} p mem)))
s3:(SLDconst [32] x4:(MOVBZload [i-4] {s} p mem)))
s4:(SLDconst [40] x5:(MOVBZload [i-5] {s} p mem)))
s5:(SLDconst [48] x6:(MOVBZload [i-6] {s} p mem)))
s6:(SLDconst [56] x7:(MOVBZload [i-7] {s} p mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& x3.Uses == 1
&& x4.Uses == 1
&& x5.Uses == 1
&& x6.Uses == 1
&& x7.Uses == 1
&& s0.Uses == 1
&& s1.Uses == 1
&& s2.Uses == 1
&& s3.Uses == 1
&& s4.Uses == 1
&& s5.Uses == 1
&& s6.Uses == 1
&& o0.Uses == 1
&& o1.Uses == 1
&& o2.Uses == 1
&& o3.Uses == 1
&& o4.Uses == 1
&& o5.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(x2)
&& clobber(x3)
&& clobber(x4)
&& clobber(x5)
&& clobber(x6)
&& clobber(x7)
&& clobber(s0)
&& clobber(s1)
&& clobber(s2)
&& clobber(s3)
&& clobber(s4)
&& clobber(s5)
&& clobber(s6)
&& clobber(o0)
&& clobber(o1)
&& clobber(o2)
&& clobber(o3)
&& clobber(o4)
&& clobber(o5)
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload [i-7] {s} p mem)
// b[1] | b[0]<<8 -> load 16-bit
(ORW x0:(MOVBZloadidx [i] {s} p idx mem)
s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& s0.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(s0)
-> @mergePoint(b,x0,x1) (MOVHZloadidx <v.Type> [i-1] {s} p idx mem)
// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit
(ORW o0:(ORW o1:(ORW
x0:(MOVBZloadidx [i] {s} p idx mem)
s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
s1:(SLWconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem)))
s2:(SLWconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& x3.Uses == 1
&& s0.Uses == 1
&& s1.Uses == 1
&& s2.Uses == 1
&& o0.Uses == 1
&& o1.Uses == 1
&& mergePoint(b,x0,x1,x2,x3) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(x2)
&& clobber(x3)
&& clobber(s0)
&& clobber(s1)
&& clobber(s2)
&& clobber(o0)
&& clobber(o1)
-> @mergePoint(b,x0,x1,x2,x3) (MOVWZloadidx <v.Type> [i-3] {s} p idx mem)
// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit
(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
x0:(MOVBZloadidx [i] {s} p idx mem)
s0:(SLDconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
s1:(SLDconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem)))
s2:(SLDconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem)))
s3:(SLDconst [32] x4:(MOVBZloadidx [i-4] {s} p idx mem)))
s4:(SLDconst [40] x5:(MOVBZloadidx [i-5] {s} p idx mem)))
s5:(SLDconst [48] x6:(MOVBZloadidx [i-6] {s} p idx mem)))
s6:(SLDconst [56] x7:(MOVBZloadidx [i-7] {s} p idx mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& x3.Uses == 1
&& x4.Uses == 1
&& x5.Uses == 1
&& x6.Uses == 1
&& x7.Uses == 1
&& s0.Uses == 1
&& s1.Uses == 1
&& s2.Uses == 1
&& s3.Uses == 1
&& s4.Uses == 1
&& s5.Uses == 1
&& s6.Uses == 1
&& o0.Uses == 1
&& o1.Uses == 1
&& o2.Uses == 1
&& o3.Uses == 1
&& o4.Uses == 1
&& o5.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
&& clobber(x0)
&& clobber(x1)
&& clobber(x2)
&& clobber(x3)
&& clobber(x4)
&& clobber(x5)
&& clobber(x6)
&& clobber(x7)
&& clobber(s0)
&& clobber(s1)
&& clobber(s2)
&& clobber(s3)
&& clobber(s4)
&& clobber(s5)
&& clobber(s6)
&& clobber(o0)
&& clobber(o1)
&& clobber(o2)
&& clobber(o3)
&& clobber(o4)
&& clobber(o5)
-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <v.Type> [i-7] {s} p idx mem)
// Combine stores into store multiples.
(MOVWstore [i] {s} p w3
x2:(MOVWstore [i-4] {s} p w2
x1:(MOVWstore [i-8] {s} p w1
x0:(MOVWstore [i-12] {s} p w0 mem))))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& is20Bit(i-12)
&& clobber(x0)
&& clobber(x1)
&& clobber(x2)
-> (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
(MOVWstore [i] {s} p w2
x1:(MOVWstore [i-4] {s} p w1
x0:(MOVWstore [i-8] {s} p w0 mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& is20Bit(i-8)
&& clobber(x0)
&& clobber(x1)
-> (STM3 [i-8] {s} p w0 w1 w2 mem)
(MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
&& p.Op != OpSB
&& x.Uses == 1
&& is20Bit(i-4)
&& clobber(x)
-> (STM2 [i-4] {s} p w0 w1 mem)
(MOVDstore [i] {s} p w3
x2:(MOVDstore [i-8] {s} p w2
x1:(MOVDstore [i-16] {s} p w1
x0:(MOVDstore [i-24] {s} p w0 mem))))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& is20Bit(i-24)
&& clobber(x0)
&& clobber(x1)
&& clobber(x2)
-> (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
(MOVDstore [i] {s} p w2
x1:(MOVDstore [i-8] {s} p w1
x0:(MOVDstore [i-16] {s} p w0 mem)))
&& p.Op != OpSB
&& x0.Uses == 1
&& x1.Uses == 1
&& is20Bit(i-16)
&& clobber(x0)
&& clobber(x1)
-> (STMG3 [i-16] {s} p w0 w1 w2 mem)
(MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
&& p.Op != OpSB
&& x.Uses == 1
&& is20Bit(i-8)
&& clobber(x)
-> (STMG2 [i-8] {s} p w0 w1 mem)
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import "strings"
// Notes:
// - Integer types live in the low portion of registers. Upper portions are junk.
// - Boolean types use the low-order byte of a register. 0=false, 1=true.
// Upper bytes are junk.
// - When doing sub-register operations, we try to write the whole
// destination register to avoid a partial-register write.
// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
// filled by sign-extending the used portion. Users of AuxInt which interpret
// AuxInt as unsigned (e.g. shifts) must be careful.
// Suffixes encode the bit width of various instructions.
// D (double word) = 64 bit (frequently omitted)
// W (word) = 32 bit
// H (half word) = 16 bit
// B (byte) = 8 bit
// copied from ../../s390x/reg.go
var regNamesS390X = []string{
"R0",
"R1",
"R2",
"R3",
"R4",
"R5",
"R6",
"R7",
"R8",
"R9",
"R10",
"R11",
"R12",
"g", // R13
"R14",
"SP", // R15
"F0",
"F1",
"F2",
"F3",
"F4",
"F5",
"F6",
"F7",
"F8",
"F9",
"F10",
"F11",
"F12",
"F13",
"F14",
"F15",
//pseudo-registers
"SB",
}
func init() {
// Make map from reg names to reg integers.
if len(regNamesS390X) > 64 {
panic("too many registers")
}
num := map[string]int{}
for i, name := range regNamesS390X {
num[name] = i
}
buildReg := func(s string) regMask {
m := regMask(0)
for _, r := range strings.Split(s, " ") {
if n, ok := num[r]; ok {
m |= regMask(1) << uint(n)
continue
}
panic("register " + r + " not found")
}
return m
}
// Common individual register masks
var (
sp = buildReg("SP")
sb = buildReg("SB")
r0 = buildReg("R0")
// R10 and R11 are reserved by the assembler.
gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12")
gpsp = gp | sp
// R0 is considered to contain the value 0 in address calculations.
ptr = gp &^ r0
ptrsp = ptr | sp
ptrspsb = ptrsp | sb
fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
callerSave = gp | fp
)
// Common slices of register masks
var (
gponly = []regMask{gp}
fponly = []regMask{fp}
)
// Common regInfo
var (
gp01 = regInfo{inputs: []regMask{}, outputs: gponly}
gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
// R0 evaluates to 0 when used as the number of bits to shift
// so we need to exclude it from that operand.
sh21 = regInfo{inputs: []regMask{gp, ptr}, outputs: gponly}
addr = regInfo{inputs: []regMask{sp | sb}, outputs: gponly}
addridx = regInfo{inputs: []regMask{sp | sb, ptrsp}, outputs: gponly}
gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
gp1flags = regInfo{inputs: []regMask{gpsp}}
flagsgp = regInfo{outputs: gponly}
gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
gpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: gponly}
gploadidx = regInfo{inputs: []regMask{ptrspsb, ptrsp, 0}, outputs: gponly}
gpstore = regInfo{inputs: []regMask{ptrspsb, gpsp, 0}}
gpstoreconst = regInfo{inputs: []regMask{ptrspsb, 0}}
gpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, gpsp, 0}}
gpmvc = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}}
fp01 = regInfo{inputs: []regMask{}, outputs: fponly}
fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
fp21clobber = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
fpgp = regInfo{inputs: fponly, outputs: gponly}
gpfp = regInfo{inputs: gponly, outputs: fponly}
fp11 = regInfo{inputs: fponly, outputs: fponly}
fp11clobber = regInfo{inputs: fponly, outputs: fponly}
fp2flags = regInfo{inputs: []regMask{fp, fp}}
fpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: fponly}
fploadidx = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}, outputs: fponly}
fpstore = regInfo{inputs: []regMask{ptrspsb, fp, 0}}
fpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, fp, 0}}
)
var S390Xops = []opData{
// fp ops
{name: "FADDS", argLength: 2, reg: fp21clobber, asm: "FADDS", commutative: true, resultInArg0: true, clobberFlags: true}, // fp32 add
{name: "FADD", argLength: 2, reg: fp21clobber, asm: "FADD", commutative: true, resultInArg0: true, clobberFlags: true}, // fp64 add
{name: "FSUBS", argLength: 2, reg: fp21clobber, asm: "FSUBS", resultInArg0: true, clobberFlags: true}, // fp32 sub
{name: "FSUB", argLength: 2, reg: fp21clobber, asm: "FSUB", resultInArg0: true, clobberFlags: true}, // fp64 sub
{name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, resultInArg0: true}, // fp32 mul
{name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true, resultInArg0: true}, // fp64 mul
{name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", resultInArg0: true}, // fp32 div
{name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV", resultInArg0: true}, // fp64 div
{name: "FNEGS", argLength: 1, reg: fp11clobber, asm: "FNEGS", clobberFlags: true}, // fp32 neg
{name: "FNEG", argLength: 1, reg: fp11clobber, asm: "FNEG", clobberFlags: true}, // fp64 neg
{name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff"}, // fp32 load
{name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff"}, // fp64 load
{name: "FMOVSconst", reg: fp01, asm: "FMOVS", aux: "Float32", rematerializeable: true}, // fp32 constant
{name: "FMOVDconst", reg: fp01, asm: "FMOVD", aux: "Float64", rematerializeable: true}, // fp64 constant
{name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", aux: "SymOff"}, // fp32 load indexed by i
{name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", aux: "SymOff"}, // fp64 load indexed by i
{name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff"}, // fp32 store
{name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff"}, // fp64 store
{name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", aux: "SymOff"}, // fp32 indexed by i store
{name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", aux: "SymOff"}, // fp64 indexed by i store
// binary ops
{name: "ADD", argLength: 2, reg: gp21sp, asm: "ADD", commutative: true, clobberFlags: true}, // arg0 + arg1
{name: "ADDW", argLength: 2, reg: gp21sp, asm: "ADDW", commutative: true, clobberFlags: true}, // arg0 + arg1
{name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int64", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
{name: "ADDWconst", argLength: 1, reg: gp11sp, asm: "ADDW", aux: "Int32", clobberFlags: true}, // arg0 + auxint
{name: "SUB", argLength: 2, reg: gp21, asm: "SUB", clobberFlags: true}, // arg0 - arg1
{name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW", clobberFlags: true}, // arg0 - arg1
{name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
{name: "SUBWconst", argLength: 1, reg: gp11, asm: "SUBW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
{name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
{name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
{name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int64", typ: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
{name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
{name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", typ: "Int64", resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
{name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", typ: "Int64", resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
{name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
{name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
{name: "DIVDU", argLength: 2, reg: gp21, asm: "DIVDU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
{name: "DIVWU", argLength: 2, reg: gp21, asm: "DIVWU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
{name: "MODD", argLength: 2, reg: gp21, asm: "MODD", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
{name: "MODW", argLength: 2, reg: gp21, asm: "MODW", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
{name: "MODDU", argLength: 2, reg: gp21, asm: "MODDU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
{name: "MODWU", argLength: 2, reg: gp21, asm: "MODWU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
{name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true, clobberFlags: true}, // arg0 & arg1
{name: "ANDW", argLength: 2, reg: gp21, asm: "AND", commutative: true, clobberFlags: true}, // arg0 & arg1
{name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
{name: "ANDWconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
{name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true, clobberFlags: true}, // arg0 | arg1
{name: "ORW", argLength: 2, reg: gp21, asm: "OR", commutative: true, clobberFlags: true}, // arg0 | arg1
{name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
{name: "ORWconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, clobberFlags: true}, // arg0 ^ arg1
{name: "XORW", argLength: 2, reg: gp21, asm: "XOR", commutative: true, clobberFlags: true}, // arg0 ^ arg1
{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
{name: "XORWconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
{name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPU", argLength: 2, reg: gp2flags, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPWU", argLength: 2, reg: gp2flags, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint
{name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
{name: "CMPUconst", argLength: 1, reg: gp1flags, asm: "CMPU", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint
{name: "CMPWUconst", argLength: 1, reg: gp1flags, asm: "CMPWU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
{name: "FCMPS", argLength: 2, reg: fp2flags, asm: "CEBR", typ: "Flags"}, // arg0 compare to arg1, f32
{name: "FCMP", argLength: 2, reg: fp2flags, asm: "FCMPU", typ: "Flags"}, // arg0 compare to arg1, f64
{name: "TESTB", argLength: 1, reg: gp1flags, asm: "AND", typ: "Flags"}, // (arg0 & 0xFF) compare to 0
{name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64
{name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 32
{name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxint, shift amount 0-63
{name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int32"}, // arg0 << auxint, shift amount 0-31
{name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64
{name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned arg0 >> arg1, shift amount is mod 32
{name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxint, shift amount 0-63
{name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int32"}, // unsigned arg0 >> auxint, shift amount 0-31
// Arithmetic shifts clobber flags.
{name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
{name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
{name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
{name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int32", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
{name: "RLLGconst", argLength: 1, reg: gp11, asm: "RLLG", aux: "Int64"}, // arg0 rotate left auxint, rotate amount 0-63
{name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "Int32"}, // arg0 rotate left auxint, rotate amount 0-31
// unary ops
{name: "NEG", argLength: 1, reg: gp11, asm: "NEG", clobberFlags: true}, // -arg0
{name: "NEGW", argLength: 1, reg: gp11, asm: "NEGW", clobberFlags: true}, // -arg0
{name: "NOT", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0
{name: "NOTW", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0
{name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0)
{name: "SUBEcarrymask", argLength: 1, reg: flagsgp, asm: "SUBE"}, // (int64)(-1) if carry is set, 0 if carry is clear.
{name: "SUBEWcarrymask", argLength: 1, reg: flagsgp, asm: "SUBE"}, // (int32)(-1) if carry is set, 0 if carry is clear.
// Note: 32-bits subtraction is not implemented in S390X. Temporarily use SUBE (64-bits).
{name: "MOVDEQ", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDEQ"}, // extract == condition from arg0
{name: "MOVDNE", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDNE"}, // extract != condition from arg0
{name: "MOVDLT", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDLT"}, // extract signed < condition from arg0
{name: "MOVDLE", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDLE"}, // extract signed <= condition from arg0
{name: "MOVDGT", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDGT"}, // extract signed > condition from arg0
{name: "MOVDGE", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDGE"}, // extract signed >= condition from arg0
// Different rules for floating point conditions because
// any comparison involving a NaN is always false and thus
// the patterns for inverting conditions cannot be used.
{name: "MOVDGTnoinv", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDGT"}, // extract floating > condition from arg0
{name: "MOVDGEnoinv", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "MOVDGE"}, // extract floating >= condition from arg0
{name: "MOVBreg", argLength: 1, reg: gp11sp, asm: "MOVB", typ: "Int64"}, // sign extend arg0 from int8 to int64
{name: "MOVBZreg", argLength: 1, reg: gp11sp, asm: "MOVBZ", typ: "UInt64"}, // zero extend arg0 from int8 to int64
{name: "MOVHreg", argLength: 1, reg: gp11sp, asm: "MOVH", typ: "Int64"}, // sign extend arg0 from int16 to int64
{name: "MOVHZreg", argLength: 1, reg: gp11sp, asm: "MOVHZ", typ: "UInt64"}, // zero extend arg0 from int16 to int64
{name: "MOVWreg", argLength: 1, reg: gp11sp, asm: "MOVW", typ: "Int64"}, // sign extend arg0 from int32 to int64
{name: "MOVWZreg", argLength: 1, reg: gp11sp, asm: "MOVWZ", typ: "UInt64"}, // zero extend arg0 from int32 to int64
{name: "MOVDconst", reg: gp01, asm: "MOVD", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
{name: "CFDBRA", argLength: 1, reg: fpgp, asm: "CFDBRA"}, // convert float64 to int32
{name: "CGDBRA", argLength: 1, reg: fpgp, asm: "CGDBRA"}, // convert float64 to int64
{name: "CFEBRA", argLength: 1, reg: fpgp, asm: "CFEBRA"}, // convert float32 to int32
{name: "CGEBRA", argLength: 1, reg: fpgp, asm: "CGEBRA"}, // convert float32 to int64
{name: "CEFBRA", argLength: 1, reg: gpfp, asm: "CEFBRA"}, // convert int32 to float32
{name: "CDFBRA", argLength: 1, reg: gpfp, asm: "CDFBRA"}, // convert int32 to float64
{name: "CEGBRA", argLength: 1, reg: gpfp, asm: "CEGBRA"}, // convert int64 to float32
{name: "CDGBRA", argLength: 1, reg: gpfp, asm: "CDGBRA"}, // convert int64 to float64
{name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32
{name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64
{name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, clobberFlags: true}, // arg0 + auxint + offset encoded in aux
{name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", clobberFlags: true}, // arg0 + arg1 + auxint + aux
// auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
{name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", clobberFlags: true}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
{name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", clobberFlags: true}, // ditto, sign extend to int64
{name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", clobberFlags: true}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
{name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", clobberFlags: true}, // ditto, sign extend to int64
{name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", clobberFlags: true}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
{name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", clobberFlags: true}, // ditto, sign extend to int64
{name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "UInt64", clobberFlags: true}, // load 8 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "UInt16", clobberFlags: true}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
{name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "UInt32", clobberFlags: true}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
{name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", aux: "SymOff", typ: "UInt64", clobberFlags: true}, // load 8 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
{name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", clobberFlags: true}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", clobberFlags: true}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", clobberFlags: true}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", clobberFlags: true}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
{name: "MVC", argLength: 3, reg: gpmvc, asm: "MVC", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size,off
// indexed loads/stores
// TODO(mundaym): add sign-extended indexed loads
{name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", aux: "SymOff", clobberFlags: true}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
{name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", aux: "SymOff", clobberFlags: true}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
{name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", aux: "SymOff", clobberFlags: true}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
{name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", aux: "SymOff", clobberFlags: true}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem
{name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", aux: "SymOff", clobberFlags: true}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
{name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", aux: "SymOff", clobberFlags: true}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
{name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", aux: "SymOff", clobberFlags: true}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
{name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff", clobberFlags: true}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
{name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", aux: "SymOff", clobberFlags: true}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
{name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", clobberFlags: true}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
{name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", aux: "SymOff", clobberFlags: true}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
// For storeconst ops, the AuxInt field encodes both
// the value to store and an address offset of the store.
// Cast AuxInt to a ValAndOff to extract Val and Off fields.
{name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
{name: "MOVHstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVH", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, // store low 2 bytes of ...
{name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, // store low 4 bytes of ...
{name: "MOVDstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVD", aux: "SymValAndOff", typ: "Mem", clobberFlags: true}, // store 8 bytes of ...
{name: "CLEAR", argLength: 2, reg: regInfo{inputs: []regMask{ptr, 0}}, asm: "CLEAR", aux: "SymValAndOff", typ: "Mem", clobberFlags: true},
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call deferproc. arg0=mem, auxint=argsize, returns mem
{name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call newproc. arg0=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// (InvertFlags (CMP a b)) == (CMP b a)
// InvertFlags is a pseudo-op which can't appear in assembly output.
{name: "InvertFlags", argLength: 1}, // reverse direction of arg0
// Pseudo-ops
{name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
// Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
// and sorts it to the very beginning of the block to prevent other
// use of R12 (the closure pointer)
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R12")}}},
// arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{ptrsp}}, clobberFlags: true},
// MOVDconvert converts between pointers and integers.
// We have a special op for this so as to not confuse GC
// (particularly stack maps). It takes a memory arg so it
// gets correctly ordered with respect to GC safepoints.
// arg0=ptr/int arg1=mem, output=int/ptr
{name: "MOVDconvert", argLength: 2, reg: gp11sp, asm: "MOVD"},
// Constant flag values. For any comparison, there are 5 possible
// outcomes: the three from the signed total order (<,==,>) and the
// three from the unsigned total order. The == cases overlap.
// Note: there's a sixth "unordered" outcome for floating-point
// comparisons, but we don't use such a beast yet.
// These ops are for temporary use by rewrite rules. They
// cannot appear in the generated assembly.
{name: "FlagEQ"}, // equal
{name: "FlagLT"}, // <
{name: "FlagGT"}, // >
// store multiple
{
name: "STMG2",
argLength: 4,
reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}},
aux: "SymOff",
typ: "Mem",
asm: "STMG",
},
{
name: "STMG3",
argLength: 5,
reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}},
aux: "SymOff",
typ: "Mem",
asm: "STMG",
},
{
name: "STMG4",
argLength: 6,
reg: regInfo{inputs: []regMask{
ptrsp,
buildReg("R1"),
buildReg("R2"),
buildReg("R3"),
buildReg("R4"),
0,
}},
aux: "SymOff",
typ: "Mem",
asm: "STMG",
},
{
name: "STM2",
argLength: 4,
reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}},
aux: "SymOff",
typ: "Mem",
asm: "STMY",
},
{
name: "STM3",
argLength: 5,
reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}},
aux: "SymOff",
typ: "Mem",
asm: "STMY",
},
{
name: "STM4",
argLength: 6,
reg: regInfo{inputs: []regMask{
ptrsp,
buildReg("R1"),
buildReg("R2"),
buildReg("R3"),
buildReg("R4"),
0,
}},
aux: "SymOff",
typ: "Mem",
asm: "STMY",
},
// large move
// auxint = remaining bytes after loop (rem)
// arg0 = address of dst memory (in R1, changed as a side effect)
// arg1 = address of src memory (in R2, changed as a side effect)
// arg2 = pointer to last address to move in loop + 256
// arg3 = mem
// returns mem
//
// mvc: MVC $256, 0(R2), 0(R1)
// MOVD $256(R1), R1
// MOVD $256(R2), R2
// CMP R2, Rarg2
// BNE mvc
// MVC $rem, 0(R2), 0(R1) // if rem > 0
{
name: "LoweredMove",
aux: "Int64",
argLength: 4,
reg: regInfo{
inputs: []regMask{buildReg("R1"), buildReg("R2"), gpsp},
clobbers: buildReg("R1 R2"),
},
clobberFlags: true,
typ: "Mem",
},
// large clear
// auxint = remaining bytes after loop (rem)
// arg0 = address of dst memory (in R1, changed as a side effect)
// arg1 = pointer to last address to zero in loop + 256
// arg2 = mem
// returns mem
//
// clear: CLEAR $256, 0(R1)
// MOVD $256(R1), R1
// CMP R1, Rarg2
// BNE clear
// CLEAR $rem, 0(R1) // if rem > 0
{
name: "LoweredZero",
aux: "Int64",
argLength: 3,
reg: regInfo{
inputs: []regMask{buildReg("R1"), gpsp},
clobbers: buildReg("R1"),
},
clobberFlags: true,
typ: "Mem",
},
}
var S390Xblocks = []blockData{
{name: "EQ"},
{name: "NE"},
{name: "LT"},
{name: "LE"},
{name: "GT"},
{name: "GE"},
{name: "GTF"}, // FP comparison
{name: "GEF"}, // FP comparison
}
archs = append(archs, arch{
name: "S390X",
pkg: "cmd/internal/obj/s390x",
genfile: "../../s390x/ssa.go",
ops: S390Xops,
blocks: S390Xblocks,
regnames: regNamesS390X,
gpregmask: gp,
fpregmask: fp,
framepointerreg: -1, // not used
})
}
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"cmd/internal/obj/arm64" "cmd/internal/obj/arm64"
"cmd/internal/obj/mips" "cmd/internal/obj/mips"
"cmd/internal/obj/ppc64" "cmd/internal/obj/ppc64"
"cmd/internal/obj/s390x"
"cmd/internal/obj/x86" "cmd/internal/obj/x86"
) )
...@@ -87,6 +88,15 @@ const ( ...@@ -87,6 +88,15 @@ const (
BlockPPC64FGT BlockPPC64FGT
BlockPPC64FGE BlockPPC64FGE
BlockS390XEQ
BlockS390XNE
BlockS390XLT
BlockS390XLE
BlockS390XGT
BlockS390XGE
BlockS390XGTF
BlockS390XGEF
BlockPlain BlockPlain
BlockIf BlockIf
BlockDefer BlockDefer
...@@ -172,6 +182,15 @@ var blockString = [...]string{ ...@@ -172,6 +182,15 @@ var blockString = [...]string{
BlockPPC64FGT: "FGT", BlockPPC64FGT: "FGT",
BlockPPC64FGE: "FGE", BlockPPC64FGE: "FGE",
BlockS390XEQ: "EQ",
BlockS390XNE: "NE",
BlockS390XLT: "LT",
BlockS390XLE: "LE",
BlockS390XGT: "GT",
BlockS390XGE: "GE",
BlockS390XGTF: "GTF",
BlockS390XGEF: "GEF",
BlockPlain: "Plain", BlockPlain: "Plain",
BlockIf: "If", BlockIf: "If",
BlockDefer: "Defer", BlockDefer: "Defer",
...@@ -1217,6 +1236,172 @@ const ( ...@@ -1217,6 +1236,172 @@ const (
OpPPC64FlagLT OpPPC64FlagLT
OpPPC64FlagGT OpPPC64FlagGT
OpS390XFADDS
OpS390XFADD
OpS390XFSUBS
OpS390XFSUB
OpS390XFMULS
OpS390XFMUL
OpS390XFDIVS
OpS390XFDIV
OpS390XFNEGS
OpS390XFNEG
OpS390XFMOVSload
OpS390XFMOVDload
OpS390XFMOVSconst
OpS390XFMOVDconst
OpS390XFMOVSloadidx
OpS390XFMOVDloadidx
OpS390XFMOVSstore
OpS390XFMOVDstore
OpS390XFMOVSstoreidx
OpS390XFMOVDstoreidx
OpS390XADD
OpS390XADDW
OpS390XADDconst
OpS390XADDWconst
OpS390XSUB
OpS390XSUBW
OpS390XSUBconst
OpS390XSUBWconst
OpS390XMULLD
OpS390XMULLW
OpS390XMULLDconst
OpS390XMULLWconst
OpS390XMULHD
OpS390XMULHDU
OpS390XDIVD
OpS390XDIVW
OpS390XDIVDU
OpS390XDIVWU
OpS390XMODD
OpS390XMODW
OpS390XMODDU
OpS390XMODWU
OpS390XAND
OpS390XANDW
OpS390XANDconst
OpS390XANDWconst
OpS390XOR
OpS390XORW
OpS390XORconst
OpS390XORWconst
OpS390XXOR
OpS390XXORW
OpS390XXORconst
OpS390XXORWconst
OpS390XCMP
OpS390XCMPW
OpS390XCMPU
OpS390XCMPWU
OpS390XCMPconst
OpS390XCMPWconst
OpS390XCMPUconst
OpS390XCMPWUconst
OpS390XFCMPS
OpS390XFCMP
OpS390XTESTB
OpS390XSLD
OpS390XSLW
OpS390XSLDconst
OpS390XSLWconst
OpS390XSRD
OpS390XSRW
OpS390XSRDconst
OpS390XSRWconst
OpS390XSRAD
OpS390XSRAW
OpS390XSRADconst
OpS390XSRAWconst
OpS390XRLLGconst
OpS390XRLLconst
OpS390XNEG
OpS390XNEGW
OpS390XNOT
OpS390XNOTW
OpS390XFSQRT
OpS390XSUBEcarrymask
OpS390XSUBEWcarrymask
OpS390XMOVDEQ
OpS390XMOVDNE
OpS390XMOVDLT
OpS390XMOVDLE
OpS390XMOVDGT
OpS390XMOVDGE
OpS390XMOVDGTnoinv
OpS390XMOVDGEnoinv
OpS390XMOVBreg
OpS390XMOVBZreg
OpS390XMOVHreg
OpS390XMOVHZreg
OpS390XMOVWreg
OpS390XMOVWZreg
OpS390XMOVDconst
OpS390XCFDBRA
OpS390XCGDBRA
OpS390XCFEBRA
OpS390XCGEBRA
OpS390XCEFBRA
OpS390XCDFBRA
OpS390XCEGBRA
OpS390XCDGBRA
OpS390XLEDBR
OpS390XLDEBR
OpS390XMOVDaddr
OpS390XMOVDaddridx
OpS390XMOVBZload
OpS390XMOVBload
OpS390XMOVHZload
OpS390XMOVHload
OpS390XMOVWZload
OpS390XMOVWload
OpS390XMOVDload
OpS390XMOVHBRload
OpS390XMOVWBRload
OpS390XMOVDBRload
OpS390XMOVBstore
OpS390XMOVHstore
OpS390XMOVWstore
OpS390XMOVDstore
OpS390XMVC
OpS390XMOVBZloadidx
OpS390XMOVHZloadidx
OpS390XMOVWZloadidx
OpS390XMOVDloadidx
OpS390XMOVHBRloadidx
OpS390XMOVWBRloadidx
OpS390XMOVDBRloadidx
OpS390XMOVBstoreidx
OpS390XMOVHstoreidx
OpS390XMOVWstoreidx
OpS390XMOVDstoreidx
OpS390XMOVBstoreconst
OpS390XMOVHstoreconst
OpS390XMOVWstoreconst
OpS390XMOVDstoreconst
OpS390XCLEAR
OpS390XCALLstatic
OpS390XCALLclosure
OpS390XCALLdefer
OpS390XCALLgo
OpS390XCALLinter
OpS390XInvertFlags
OpS390XLoweredGetG
OpS390XLoweredGetClosurePtr
OpS390XLoweredNilCheck
OpS390XMOVDconvert
OpS390XFlagEQ
OpS390XFlagLT
OpS390XFlagGT
OpS390XSTMG2
OpS390XSTMG3
OpS390XSTMG4
OpS390XSTM2
OpS390XSTM3
OpS390XSTM4
OpS390XLoweredMove
OpS390XLoweredZero
OpAdd8 OpAdd8
OpAdd16 OpAdd16
OpAdd32 OpAdd32
...@@ -15053,111 +15238,2393 @@ var opcodeTable = [...]opInfo{ ...@@ -15053,111 +15238,2393 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "Add8", name: "FADDS",
argLen: 2, argLen: 2,
commutative: true, commutative: true,
generic: true, resultInArg0: true,
}, clobberFlags: true,
{ asm: s390x.AFADDS,
name: "Add16", reg: regInfo{
argLen: 2, inputs: []inputInfo{
commutative: true, {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
generic: true, {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Add32", name: "FADD",
argLen: 2, argLen: 2,
commutative: true, commutative: true,
generic: true, resultInArg0: true,
clobberFlags: true,
asm: s390x.AFADD,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Add64", name: "FSUBS",
argLen: 2, argLen: 2,
commutative: true, resultInArg0: true,
generic: true, clobberFlags: true,
asm: s390x.AFSUBS,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "AddPtr", name: "FSUB",
argLen: 2, argLen: 2,
generic: true, resultInArg0: true,
clobberFlags: true,
asm: s390x.AFSUB,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Add32F", name: "FMULS",
argLen: 2, argLen: 2,
generic: true, commutative: true,
resultInArg0: true,
asm: s390x.AFMULS,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Add64F", name: "FMUL",
argLen: 2, argLen: 2,
generic: true, commutative: true,
resultInArg0: true,
asm: s390x.AFMUL,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Sub8", name: "FDIVS",
argLen: 2, argLen: 2,
generic: true, resultInArg0: true,
asm: s390x.AFDIVS,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Sub16", name: "FDIV",
argLen: 2, argLen: 2,
generic: true, resultInArg0: true,
asm: s390x.AFDIV,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Sub32", name: "FNEGS",
argLen: 2, argLen: 1,
generic: true, clobberFlags: true,
asm: s390x.AFNEGS,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Sub64", name: "FNEG",
argLen: 2, argLen: 1,
generic: true, clobberFlags: true,
asm: s390x.AFNEG,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "SubPtr", name: "FMOVSload",
auxType: auxSymOff,
argLen: 2, argLen: 2,
generic: true, asm: s390x.AFMOVS,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Sub32F", name: "FMOVDload",
auxType: auxSymOff,
argLen: 2, argLen: 2,
generic: true, asm: s390x.AFMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Sub64F", name: "FMOVSconst",
argLen: 2, auxType: auxFloat32,
generic: true, argLen: 0,
rematerializeable: true,
asm: s390x.AFMOVS,
reg: regInfo{
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Mul8", name: "FMOVDconst",
argLen: 2, auxType: auxFloat64,
commutative: true, argLen: 0,
generic: true, rematerializeable: true,
asm: s390x.AFMOVD,
reg: regInfo{
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Mul16", name: "FMOVSloadidx",
argLen: 2, auxType: auxSymOff,
commutative: true, argLen: 3,
generic: true, asm: s390x.AFMOVS,
reg: regInfo{
inputs: []inputInfo{
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Mul32", name: "FMOVDloadidx",
argLen: 2, auxType: auxSymOff,
commutative: true, argLen: 3,
generic: true, asm: s390x.AFMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Mul64", name: "FMOVSstore",
argLen: 2, auxType: auxSymOff,
commutative: true, argLen: 3,
generic: true, asm: s390x.AFMOVS,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Mul32F", name: "FMOVDstore",
argLen: 2, auxType: auxSymOff,
generic: true, argLen: 3,
asm: s390x.AFMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
}, },
{ {
name: "Mul64F", name: "FMOVSstoreidx",
argLen: 2, auxType: auxSymOff,
argLen: 4,
asm: s390x.AFMOVS,
reg: regInfo{
inputs: []inputInfo{
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "FMOVDstoreidx",
auxType: auxSymOff,
argLen: 4,
asm: s390x.AFMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "ADD",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: s390x.AADD,
reg: regInfo{
inputs: []inputInfo{
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "ADDW",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: s390x.AADDW,
reg: regInfo{
inputs: []inputInfo{
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "ADDconst",
auxType: auxInt64,
argLen: 1,
clobberFlags: true,
asm: s390x.AADD,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "ADDWconst",
auxType: auxInt32,
argLen: 1,
clobberFlags: true,
asm: s390x.AADDW,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SUB",
argLen: 2,
clobberFlags: true,
asm: s390x.ASUB,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SUBW",
argLen: 2,
clobberFlags: true,
asm: s390x.ASUBW,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SUBconst",
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
asm: s390x.ASUB,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SUBWconst",
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
asm: s390x.ASUBW,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MULLD",
argLen: 2,
commutative: true,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AMULLD,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MULLW",
argLen: 2,
commutative: true,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AMULLW,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MULLDconst",
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AMULLD,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MULLWconst",
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AMULLW,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MULHD",
argLen: 2,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AMULHD,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MULHDU",
argLen: 2,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AMULHDU,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "DIVD",
argLen: 2,
resultInArg0: true,
clobberFlags: true,
asm: s390x.ADIVD,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "DIVW",
argLen: 2,
resultInArg0: true,
clobberFlags: true,
asm: s390x.ADIVW,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "DIVDU",
argLen: 2,
resultInArg0: true,
clobberFlags: true,
asm: s390x.ADIVDU,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "DIVWU",
argLen: 2,
resultInArg0: true,
clobberFlags: true,
asm: s390x.ADIVWU,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MODD",
argLen: 2,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AMODD,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MODW",
argLen: 2,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AMODW,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MODDU",
argLen: 2,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AMODDU,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MODWU",
argLen: 2,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AMODWU,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "AND",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: s390x.AAND,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "ANDW",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: s390x.AAND,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "ANDconst",
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AAND,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "ANDWconst",
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AAND,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "OR",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: s390x.AOR,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "ORW",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: s390x.AOR,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "ORconst",
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AOR,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "ORWconst",
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AOR,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "XOR",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: s390x.AXOR,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "XORW",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: s390x.AXOR,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "XORconst",
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AXOR,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "XORWconst",
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
asm: s390x.AXOR,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "CMP",
argLen: 2,
asm: s390x.ACMP,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "CMPW",
argLen: 2,
asm: s390x.ACMPW,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "CMPU",
argLen: 2,
asm: s390x.ACMPU,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "CMPWU",
argLen: 2,
asm: s390x.ACMPWU,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "CMPconst",
auxType: auxInt64,
argLen: 1,
asm: s390x.ACMP,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "CMPWconst",
auxType: auxInt32,
argLen: 1,
asm: s390x.ACMPW,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "CMPUconst",
auxType: auxInt64,
argLen: 1,
asm: s390x.ACMPU,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "CMPWUconst",
auxType: auxInt32,
argLen: 1,
asm: s390x.ACMPWU,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "FCMPS",
argLen: 2,
asm: s390x.ACEBR,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "FCMP",
argLen: 2,
asm: s390x.AFCMPU,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "TESTB",
argLen: 1,
asm: s390x.AAND,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "SLD",
argLen: 2,
asm: s390x.ASLD,
reg: regInfo{
inputs: []inputInfo{
{1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SLW",
argLen: 2,
asm: s390x.ASLW,
reg: regInfo{
inputs: []inputInfo{
{1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SLDconst",
auxType: auxInt64,
argLen: 1,
asm: s390x.ASLD,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SLWconst",
auxType: auxInt32,
argLen: 1,
asm: s390x.ASLW,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SRD",
argLen: 2,
asm: s390x.ASRD,
reg: regInfo{
inputs: []inputInfo{
{1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SRW",
argLen: 2,
asm: s390x.ASRW,
reg: regInfo{
inputs: []inputInfo{
{1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SRDconst",
auxType: auxInt64,
argLen: 1,
asm: s390x.ASRD,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SRWconst",
auxType: auxInt32,
argLen: 1,
asm: s390x.ASRW,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SRAD",
argLen: 2,
clobberFlags: true,
asm: s390x.ASRAD,
reg: regInfo{
inputs: []inputInfo{
{1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SRAW",
argLen: 2,
clobberFlags: true,
asm: s390x.ASRAW,
reg: regInfo{
inputs: []inputInfo{
{1, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SRADconst",
auxType: auxInt64,
argLen: 1,
clobberFlags: true,
asm: s390x.ASRAD,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SRAWconst",
auxType: auxInt32,
argLen: 1,
clobberFlags: true,
asm: s390x.ASRAW,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "RLLGconst",
auxType: auxInt64,
argLen: 1,
asm: s390x.ARLLG,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "RLLconst",
auxType: auxInt32,
argLen: 1,
asm: s390x.ARLL,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "NEG",
argLen: 1,
clobberFlags: true,
asm: s390x.ANEG,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "NEGW",
argLen: 1,
clobberFlags: true,
asm: s390x.ANEGW,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "NOT",
argLen: 1,
resultInArg0: true,
clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "NOTW",
argLen: 1,
resultInArg0: true,
clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "FSQRT",
argLen: 1,
asm: s390x.AFSQRT,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "SUBEcarrymask",
argLen: 1,
asm: s390x.ASUBE,
reg: regInfo{
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "SUBEWcarrymask",
argLen: 1,
asm: s390x.ASUBE,
reg: regInfo{
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDEQ",
argLen: 3,
resultInArg0: true,
asm: s390x.AMOVDEQ,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDNE",
argLen: 3,
resultInArg0: true,
asm: s390x.AMOVDNE,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDLT",
argLen: 3,
resultInArg0: true,
asm: s390x.AMOVDLT,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDLE",
argLen: 3,
resultInArg0: true,
asm: s390x.AMOVDLE,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDGT",
argLen: 3,
resultInArg0: true,
asm: s390x.AMOVDGT,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDGE",
argLen: 3,
resultInArg0: true,
asm: s390x.AMOVDGE,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDGTnoinv",
argLen: 3,
resultInArg0: true,
asm: s390x.AMOVDGT,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDGEnoinv",
argLen: 3,
resultInArg0: true,
asm: s390x.AMOVDGE,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
{1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVBreg",
argLen: 1,
asm: s390x.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVBZreg",
argLen: 1,
asm: s390x.AMOVBZ,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVHreg",
argLen: 1,
asm: s390x.AMOVH,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVHZreg",
argLen: 1,
asm: s390x.AMOVHZ,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVWreg",
argLen: 1,
asm: s390x.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVWZreg",
argLen: 1,
asm: s390x.AMOVWZ,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDconst",
auxType: auxInt64,
argLen: 0,
rematerializeable: true,
asm: s390x.AMOVD,
reg: regInfo{
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "CFDBRA",
argLen: 1,
asm: s390x.ACFDBRA,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "CGDBRA",
argLen: 1,
asm: s390x.ACGDBRA,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "CFEBRA",
argLen: 1,
asm: s390x.ACFEBRA,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "CGEBRA",
argLen: 1,
asm: s390x.ACGEBRA,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "CEFBRA",
argLen: 1,
asm: s390x.ACEFBRA,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "CDFBRA",
argLen: 1,
asm: s390x.ACDFBRA,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "CEGBRA",
argLen: 1,
asm: s390x.ACEGBRA,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "CDGBRA",
argLen: 1,
asm: s390x.ACDGBRA,
reg: regInfo{
inputs: []inputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "LEDBR",
argLen: 1,
asm: s390x.ALEDBR,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "LDEBR",
argLen: 1,
asm: s390x.ALDEBR,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "MOVDaddr",
auxType: auxSymOff,
argLen: 1,
rematerializeable: true,
clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 4295000064}, // SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDaddridx",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 4295000064}, // SP SB
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVBZload",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVBZ,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVBload",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVHZload",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVHZ,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVHload",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVH,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVWZload",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVWZ,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVWload",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDload",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVHBRload",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVHBR,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVWBRload",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVWBR,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDBRload",
auxType: auxSymOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVDBR,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVBstore",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
{1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "MOVHstore",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVH,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
{1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "MOVWstore",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
{1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "MOVDstore",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
{1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "MVC",
auxType: auxSymValAndOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMVC,
reg: regInfo{
inputs: []inputInfo{
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "MOVBZloadidx",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVBZ,
reg: regInfo{
inputs: []inputInfo{
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVHZloadidx",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVHZ,
reg: regInfo{
inputs: []inputInfo{
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVWZloadidx",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVWZ,
reg: regInfo{
inputs: []inputInfo{
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDloadidx",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVD,
reg: regInfo{
inputs: []inputInfo{
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVHBRloadidx",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVHBR,
reg: regInfo{
inputs: []inputInfo{
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVWBRloadidx",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVWBR,
reg: regInfo{
inputs: []inputInfo{
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVDBRloadidx",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
asm: s390x.AMOVDBR,
reg: regInfo{
inputs: []inputInfo{
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "MOVBstoreidx",
auxType: auxSymOff,
argLen: 4,
clobberFlags: true,
asm: s390x.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{2, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "MOVHstoreidx",
auxType: auxSymOff,
argLen: 4,
clobberFlags: true,
asm: s390x.AMOVH,
reg: regInfo{
inputs: []inputInfo{
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{2, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "MOVWstoreidx",
auxType: auxSymOff,
argLen: 4,
clobberFlags: true,
asm: s390x.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{2, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "MOVDstoreidx",
auxType: auxSymOff,
argLen: 4,
clobberFlags: true,
asm: s390x.AMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{1, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
{2, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "MOVBstoreconst",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
},
},
{
name: "MOVHstoreconst",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVH,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
},
},
{
name: "MOVWstoreconst",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
},
},
{
name: "MOVDstoreconst",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
asm: s390x.AMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 4295005182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP SB
},
},
},
{
name: "CLEAR",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
asm: s390x.ACLEAR,
reg: regInfo{
inputs: []inputInfo{
{0, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "CALLstatic",
auxType: auxSymOff,
argLen: 1,
clobberFlags: true,
call: true,
reg: regInfo{
clobbers: 4294906879, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
{
name: "CALLclosure",
auxType: auxInt64,
argLen: 3,
clobberFlags: true,
call: true,
reg: regInfo{
inputs: []inputInfo{
{1, 4096}, // R12
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
clobbers: 4294906879, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
{
name: "CALLdefer",
auxType: auxInt64,
argLen: 1,
clobberFlags: true,
call: true,
reg: regInfo{
clobbers: 4294906879, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
{
name: "CALLgo",
auxType: auxInt64,
argLen: 1,
clobberFlags: true,
call: true,
reg: regInfo{
clobbers: 4294906879, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
{
name: "CALLinter",
auxType: auxInt64,
argLen: 2,
clobberFlags: true,
call: true,
reg: regInfo{
inputs: []inputInfo{
{0, 5118}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
clobbers: 4294906879, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
{
name: "InvertFlags",
argLen: 1,
reg: regInfo{},
},
{
name: "LoweredGetG",
argLen: 1,
reg: regInfo{
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "LoweredGetClosurePtr",
argLen: 0,
reg: regInfo{
outputs: []outputInfo{
{0, 4096}, // R12
},
},
},
{
name: "LoweredNilCheck",
argLen: 2,
clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "MOVDconvert",
argLen: 2,
asm: s390x.AMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
outputs: []outputInfo{
{0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "FlagEQ",
argLen: 0,
reg: regInfo{},
},
{
name: "FlagLT",
argLen: 0,
reg: regInfo{},
},
{
name: "FlagGT",
argLen: 0,
reg: regInfo{},
},
{
name: "STMG2",
auxType: auxSymOff,
argLen: 4,
asm: s390x.ASTMG,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // R1
{2, 4}, // R2
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "STMG3",
auxType: auxSymOff,
argLen: 5,
asm: s390x.ASTMG,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // R1
{2, 4}, // R2
{3, 8}, // R3
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "STMG4",
auxType: auxSymOff,
argLen: 6,
asm: s390x.ASTMG,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // R1
{2, 4}, // R2
{3, 8}, // R3
{4, 16}, // R4
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "STM2",
auxType: auxSymOff,
argLen: 4,
asm: s390x.ASTMY,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // R1
{2, 4}, // R2
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "STM3",
auxType: auxSymOff,
argLen: 5,
asm: s390x.ASTMY,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // R1
{2, 4}, // R2
{3, 8}, // R3
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "STM4",
auxType: auxSymOff,
argLen: 6,
asm: s390x.ASTMY,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // R1
{2, 4}, // R2
{3, 8}, // R3
{4, 16}, // R4
{0, 37886}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
},
},
{
name: "LoweredMove",
auxType: auxInt64,
argLen: 4,
clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 2}, // R1
{1, 4}, // R2
{2, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
clobbers: 6, // R1 R2
},
},
{
name: "LoweredZero",
auxType: auxInt64,
argLen: 3,
clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 2}, // R1
{1, 37887}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
},
clobbers: 2, // R1
},
},
{
name: "Add8",
argLen: 2,
commutative: true,
generic: true,
},
{
name: "Add16",
argLen: 2,
commutative: true,
generic: true,
},
{
name: "Add32",
argLen: 2,
commutative: true,
generic: true,
},
{
name: "Add64",
argLen: 2,
commutative: true,
generic: true,
},
{
name: "AddPtr",
argLen: 2,
generic: true,
},
{
name: "Add32F",
argLen: 2,
generic: true,
},
{
name: "Add64F",
argLen: 2,
generic: true,
},
{
name: "Sub8",
argLen: 2,
generic: true,
},
{
name: "Sub16",
argLen: 2,
generic: true,
},
{
name: "Sub32",
argLen: 2,
generic: true,
},
{
name: "Sub64",
argLen: 2,
generic: true,
},
{
name: "SubPtr",
argLen: 2,
generic: true,
},
{
name: "Sub32F",
argLen: 2,
generic: true,
},
{
name: "Sub64F",
argLen: 2,
generic: true,
},
{
name: "Mul8",
argLen: 2,
commutative: true,
generic: true,
},
{
name: "Mul16",
argLen: 2,
commutative: true,
generic: true,
},
{
name: "Mul32",
argLen: 2,
commutative: true,
generic: true,
},
{
name: "Mul64",
argLen: 2,
commutative: true,
generic: true,
},
{
name: "Mul32F",
argLen: 2,
generic: true,
},
{
name: "Mul64F",
argLen: 2,
generic: true, generic: true,
}, },
{ {
...@@ -17011,3 +19478,42 @@ var gpRegMaskPPC64 = regMask(536866812) ...@@ -17011,3 +19478,42 @@ var gpRegMaskPPC64 = regMask(536866812)
var fpRegMaskPPC64 = regMask(288230371856744448) var fpRegMaskPPC64 = regMask(288230371856744448)
var specialRegMaskPPC64 = regMask(0) var specialRegMaskPPC64 = regMask(0)
var framepointerRegPPC64 = int8(0) var framepointerRegPPC64 = int8(0)
var registersS390X = [...]Register{
{0, "R0"},
{1, "R1"},
{2, "R2"},
{3, "R3"},
{4, "R4"},
{5, "R5"},
{6, "R6"},
{7, "R7"},
{8, "R8"},
{9, "R9"},
{10, "R10"},
{11, "R11"},
{12, "R12"},
{13, "g"},
{14, "R14"},
{15, "SP"},
{16, "F0"},
{17, "F1"},
{18, "F2"},
{19, "F3"},
{20, "F4"},
{21, "F5"},
{22, "F6"},
{23, "F7"},
{24, "F8"},
{25, "F9"},
{26, "F10"},
{27, "F11"},
{28, "F12"},
{29, "F13"},
{30, "F14"},
{31, "F15"},
{32, "SB"},
}
var gpRegMaskS390X = regMask(5119)
var fpRegMaskS390X = regMask(4294901760)
var specialRegMaskS390X = regMask(0)
var framepointerRegS390X = int8(-1)
...@@ -502,6 +502,8 @@ func (s *regAllocState) init(f *Func) { ...@@ -502,6 +502,8 @@ func (s *regAllocState) init(f *Func) {
// we do need to be careful, but that carefulness is hidden // we do need to be careful, but that carefulness is hidden
// in the rewrite rules so we always have a free register // in the rewrite rules so we always have a free register
// available for global load/stores. See gen/386.rules (search for Flag_shared). // available for global load/stores. See gen/386.rules (search for Flag_shared).
case "s390x":
// nothing to do, R10 & R11 already reserved
default: default:
s.f.Config.fe.Unimplementedf(0, "arch %s not implemented", s.f.Config.arch) s.f.Config.fe.Unimplementedf(0, "arch %s not implemented", s.f.Config.arch)
} }
......
...@@ -228,6 +228,11 @@ func is16Bit(n int64) bool { ...@@ -228,6 +228,11 @@ func is16Bit(n int64) bool {
return n == int64(int16(n)) return n == int64(int16(n))
} }
// is20Bit reports whether n can be represented as a signed 20 bit integer.
func is20Bit(n int64) bool {
return -(1<<19) <= n && n < (1<<19)
}
// b2i translates a boolean value to 0 or 1 for assigning to auxInt. // b2i translates a boolean value to 0 or 1 for assigning to auxInt.
func b2i(b bool) int64 { func b2i(b bool) int64 {
if b { if b {
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -84,7 +84,10 @@ func schedule(f *Func) { ...@@ -84,7 +84,10 @@ func schedule(f *Func) {
// Compute score. Larger numbers are scheduled closer to the end of the block. // Compute score. Larger numbers are scheduled closer to the end of the block.
for _, v := range b.Values { for _, v := range b.Values {
switch { switch {
case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr || v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr || v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr: case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr ||
v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr ||
v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr ||
v.Op == OpS390XLoweredGetClosurePtr:
// We also score GetLoweredClosurePtr as early as possible to ensure that the // We also score GetLoweredClosurePtr as early as possible to ensure that the
// context register is not stomped. GetLoweredClosurePtr should only appear // context register is not stomped. GetLoweredClosurePtr should only appear
// in the entry block where there are no phi functions, so there is no // in the entry block where there are no phi functions, so there is no
......
// +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le,!mips64,!mips64le // +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le,!mips64,!mips64le,!s390x
// errorcheck -0 -l -live -wb=0 // errorcheck -0 -l -live -wb=0
// Copyright 2014 The Go Authors. All rights reserved. // Copyright 2014 The Go Authors. All rights reserved.
......
// +build amd64 arm amd64p32 386 arm64 mips64 mips64le // +build amd64 arm amd64p32 386 arm64 mips64 mips64le s390x
// errorcheck -0 -l -live -wb=0 // errorcheck -0 -l -live -wb=0
// Copyright 2014 The Go Authors. All rights reserved. // Copyright 2014 The Go Authors. All rights reserved.
......
// errorcheck -0 -d=nil // errorcheck -0 -d=nil
// +build amd64 arm amd64p32 386 arm64 mips64 mips64le ppc64le // +build amd64 arm amd64p32 386 arm64 mips64 mips64le ppc64le s390x
// Copyright 2013 The Go Authors. All rights reserved. // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
......
// +build amd64 // +build amd64,s390x
// errorcheck -0 -d=ssa/phiopt/debug=3 // errorcheck -0 -d=ssa/phiopt/debug=3
// Copyright 2016 The Go Authors. All rights reserved. // Copyright 2016 The Go Authors. All rights reserved.
......
// +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le,!mips64,!mips64le // +build !amd64,!arm,!amd64p32,!386,!arm64,!ppc64le,!mips64,!mips64le,!s390x
// errorcheck -0 -d=append,slice // errorcheck -0 -d=append,slice
// Copyright 2015 The Go Authors. All rights reserved. // Copyright 2015 The Go Authors. All rights reserved.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment