Commit 25e0a367 authored by Keith Randall's avatar Keith Randall

[dev.ssa] cmd/compile: clean up tuple types and selects

Make tuple types and their SelectX ops fully generic.
These ops no longer need to be lowered.
Regalloc understands them and their tuple-generating arguments.
We can now have opcodes returning arbitrary pairs of results.
(And it would be easy to move to >2 results if needed.)

Update arm implementation to the new standard.
Implement just enough in 386 port to do 64-bit add.

Change-Id: I370ed5aacce219c82e1954c61d1f63af76c16f79
Reviewed-on: https://go-review.googlesource.com/24976Reviewed-by: default avatarCherry Zhang <cherryyz@google.com>
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent 6b6de15d
...@@ -278,7 +278,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -278,7 +278,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = r p.To.Reg = r
case ssa.OpARMADDS, case ssa.OpARMADDS,
ssa.OpARMSUBS: ssa.OpARMSUBS:
r := gc.SSARegNum(v) r := gc.SSARegNum1(v)
r1 := gc.SSARegNum(v.Args[0]) r1 := gc.SSARegNum(v.Args[0])
r2 := gc.SSARegNum(v.Args[1]) r2 := gc.SSARegNum(v.Args[1])
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
...@@ -351,7 +351,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -351,7 +351,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.Reg = gc.SSARegNum(v.Args[0]) p.Reg = gc.SSARegNum(v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v) p.To.Reg = gc.SSARegNum1(v)
case ssa.OpARMSRRconst: case ssa.OpARMSRRconst:
genshift(arm.AMOVW, 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_RR, v.AuxInt) genshift(arm.AMOVW, 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_RR, v.AuxInt)
case ssa.OpARMADDshiftLL, case ssa.OpARMADDshiftLL,
...@@ -368,7 +368,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -368,7 +368,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMADDSshiftLL, case ssa.OpARMADDSshiftLL,
ssa.OpARMSUBSshiftLL, ssa.OpARMSUBSshiftLL,
ssa.OpARMRSBSshiftLL: ssa.OpARMRSBSshiftLL:
p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt) p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum1(v), arm.SHIFT_LL, v.AuxInt)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRL, case ssa.OpARMADDshiftRL,
ssa.OpARMADCshiftRL, ssa.OpARMADCshiftRL,
...@@ -384,7 +384,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -384,7 +384,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMADDSshiftRL, case ssa.OpARMADDSshiftRL,
ssa.OpARMSUBSshiftRL, ssa.OpARMSUBSshiftRL,
ssa.OpARMRSBSshiftRL: ssa.OpARMRSBSshiftRL:
p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt) p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum1(v), arm.SHIFT_LR, v.AuxInt)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRA, case ssa.OpARMADDshiftRA,
ssa.OpARMADCshiftRA, ssa.OpARMADCshiftRA,
...@@ -400,7 +400,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -400,7 +400,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMADDSshiftRA, case ssa.OpARMADDSshiftRA,
ssa.OpARMSUBSshiftRA, ssa.OpARMSUBSshiftRA,
ssa.OpARMRSBSshiftRA: ssa.OpARMRSBSshiftRA:
p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt) p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum1(v), arm.SHIFT_AR, v.AuxInt)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMMVNshiftLL: case ssa.OpARMMVNshiftLL:
genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt) genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
...@@ -428,7 +428,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -428,7 +428,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMADDSshiftLLreg, case ssa.OpARMADDSshiftLLreg,
ssa.OpARMSUBSshiftLLreg, ssa.OpARMSUBSshiftLLreg,
ssa.OpARMRSBSshiftLLreg: ssa.OpARMRSBSshiftLLreg:
p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LL) p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum1(v), arm.SHIFT_LL)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRLreg, case ssa.OpARMADDshiftRLreg,
ssa.OpARMADCshiftRLreg, ssa.OpARMADCshiftRLreg,
...@@ -444,7 +444,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -444,7 +444,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMADDSshiftRLreg, case ssa.OpARMADDSshiftRLreg,
ssa.OpARMSUBSshiftRLreg, ssa.OpARMSUBSshiftRLreg,
ssa.OpARMRSBSshiftRLreg: ssa.OpARMRSBSshiftRLreg:
p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LR) p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum1(v), arm.SHIFT_LR)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRAreg, case ssa.OpARMADDshiftRAreg,
ssa.OpARMADCshiftRAreg, ssa.OpARMADCshiftRAreg,
...@@ -460,7 +460,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -460,7 +460,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMADDSshiftRAreg, case ssa.OpARMADDSshiftRAreg,
ssa.OpARMSUBSshiftRAreg, ssa.OpARMSUBSshiftRAreg,
ssa.OpARMRSBSshiftRAreg: ssa.OpARMRSBSshiftRAreg:
p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_AR) p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum1(v), arm.SHIFT_AR)
p.Scond = arm.C_SBIT p.Scond = arm.C_SBIT
case ssa.OpARMHMUL, case ssa.OpARMHMUL,
ssa.OpARMHMULU: ssa.OpARMHMULU:
...@@ -473,14 +473,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -473,14 +473,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = gc.SSARegNum(v) p.To.Reg = gc.SSARegNum(v)
p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
case ssa.OpARMMULLU: case ssa.OpARMMULLU:
// 32-bit multiplication, results 64-bit, low 32-bit in reg(v), high 32-bit in R0 // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[0]) p.From.Reg = gc.SSARegNum(v.Args[0])
p.Reg = gc.SSARegNum(v.Args[1]) p.Reg = gc.SSARegNum(v.Args[1])
p.To.Type = obj.TYPE_REGREG p.To.Type = obj.TYPE_REGREG
p.To.Reg = arm.REG_R0 // high 32-bit p.To.Reg = gc.SSARegNum0(v) // high 32-bit
p.To.Offset = int64(gc.SSARegNum(v)) // low 32-bit p.To.Offset = int64(gc.SSARegNum1(v)) // low 32-bit
case ssa.OpARMMULA: case ssa.OpARMMULA:
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
...@@ -928,9 +928,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -928,9 +928,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = 1 p.From.Offset = 1
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v) p.To.Reg = gc.SSARegNum(v)
case ssa.OpARMCarry, case ssa.OpSelect0, ssa.OpSelect1:
ssa.OpARMLoweredSelect0,
ssa.OpARMLoweredSelect1:
// nothing to do // nothing to do
case ssa.OpARMLoweredGetClosurePtr: case ssa.OpARMLoweredGetClosurePtr:
// Closure pointer is R7 (arm.REGCTXT). // Closure pointer is R7 (arm.REGCTXT).
......
...@@ -4266,11 +4266,39 @@ func SSAReg(v *ssa.Value) *ssa.Register { ...@@ -4266,11 +4266,39 @@ func SSAReg(v *ssa.Value) *ssa.Register {
return reg.(*ssa.Register) return reg.(*ssa.Register)
} }
// SSAReg0 returns the register to which the first output of v has been allocated.
func SSAReg0(v *ssa.Value) *ssa.Register {
reg := v.Block.Func.RegAlloc[v.ID].(ssa.LocPair)[0]
if reg == nil {
v.Fatalf("nil first register for value: %s\n%s\n", v.LongString(), v.Block.Func)
}
return reg.(*ssa.Register)
}
// SSAReg1 returns the register to which the second output of v has been allocated.
func SSAReg1(v *ssa.Value) *ssa.Register {
reg := v.Block.Func.RegAlloc[v.ID].(ssa.LocPair)[1]
if reg == nil {
v.Fatalf("nil second register for value: %s\n%s\n", v.LongString(), v.Block.Func)
}
return reg.(*ssa.Register)
}
// SSARegNum returns the register number (in cmd/internal/obj numbering) to which v has been allocated. // SSARegNum returns the register number (in cmd/internal/obj numbering) to which v has been allocated.
func SSARegNum(v *ssa.Value) int16 { func SSARegNum(v *ssa.Value) int16 {
return Thearch.SSARegToReg[SSAReg(v).Num] return Thearch.SSARegToReg[SSAReg(v).Num]
} }
// SSARegNum0 returns the register number (in cmd/internal/obj numbering) to which the first output of v has been allocated.
func SSARegNum0(v *ssa.Value) int16 {
return Thearch.SSARegToReg[SSAReg0(v).Num]
}
// SSARegNum1 returns the register number (in cmd/internal/obj numbering) to which the second output of v has been allocated.
func SSARegNum1(v *ssa.Value) int16 {
return Thearch.SSARegToReg[SSAReg1(v).Num]
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
// Called during ssaGenValue. // Called during ssaGenValue.
func CheckLoweredPhi(v *ssa.Value) { func CheckLoweredPhi(v *ssa.Value) {
......
...@@ -171,10 +171,10 @@ func cse(f *Func) { ...@@ -171,10 +171,10 @@ func cse(f *Func) {
if rewrite[v.ID] != nil { if rewrite[v.ID] != nil {
continue continue
} }
if !v.Op.isTupleSelector() { if v.Op != OpSelect0 && v.Op != OpSelect1 {
continue continue
} }
if !v.Args[0].Op.isTupleGenerator() { if !v.Args[0].Type.IsTuple() {
f.Fatalf("arg of tuple selector %s is not a tuple: %s", v.String(), v.Args[0].LongString()) f.Fatalf("arg of tuple selector %s is not a tuple: %s", v.String(), v.Args[0].LongString())
} }
t := rewrite[v.Args[0].ID] t := rewrite[v.Args[0].ID]
......
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
(Add32F x y) -> (ADDSS x y) (Add32F x y) -> (ADDSS x y)
(Add64F x y) -> (ADDSD x y) (Add64F x y) -> (ADDSD x y)
(Add32carry x y) -> (ADDLcarry x y)
(Add32withcarry x y c) -> (ADCL x y c)
(SubPtr x y) -> (SUBL x y) (SubPtr x y) -> (SUBL x y)
(Sub32 x y) -> (SUBL x y) (Sub32 x y) -> (SUBL x y)
(Sub16 x y) -> (SUBL x y) (Sub16 x y) -> (SUBL x y)
......
...@@ -99,6 +99,8 @@ func init() { ...@@ -99,6 +99,8 @@ func init() {
gp11nf = regInfo{inputs: []regMask{gpsp}, outputs: gponly} // nf: no flags clobbered gp11nf = regInfo{inputs: []regMask{gpsp}, outputs: gponly} // nf: no flags clobbered
gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly} gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly}
gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly, clobbers: flags} gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly, clobbers: flags}
gp21carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{flags, gp}}
gp2carry1 = regInfo{inputs: []regMask{gp, gp, flags}, outputs: gponly}
gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly, clobbers: flags} gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly, clobbers: flags}
gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly} gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly}
gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}, clobbers: flags} gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}, clobbers: flags}
...@@ -171,6 +173,9 @@ func init() { ...@@ -171,6 +173,9 @@ func init() {
{name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1 {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1
{name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", typ: "UInt32"}, // arg0 + auxint {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", typ: "UInt32"}, // arg0 + auxint
{name: "ADDLcarry", argLength: 2, reg: gp21carry, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1, generates <carry,result> pair
{name: "ADCL", argLength: 3, reg: gp2carry1, asm: "ADCL", commutative: true, resultInArg0: true}, // arg0+arg1+carry(arg2), where arg2 is flags
{name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true}, // arg0 - arg1 {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true}, // arg0 - arg1
{name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true}, // arg0 - auxint {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true}, // arg0 - auxint
......
...@@ -366,10 +366,6 @@ ...@@ -366,10 +366,6 @@
(IsSliceInBounds idx len) -> (LessEqualU (CMP idx len)) (IsSliceInBounds idx len) -> (LessEqualU (CMP idx len))
// pseudo-ops // pseudo-ops
(Select0 <t> x) && t.IsFlags() -> (Carry x)
(Select0 <t> x) && !t.IsFlags() -> (LoweredSelect0 x)
(Select1 x) -> (LoweredSelect1 x)
(GetClosurePtr) -> (LoweredGetClosurePtr) (GetClosurePtr) -> (LoweredGetClosurePtr)
(Convert x mem) -> (MOVWconvert x mem) (Convert x mem) -> (MOVWconvert x mem)
......
This diff is collapsed.
...@@ -42,8 +42,8 @@ type opData struct { ...@@ -42,8 +42,8 @@ type opData struct {
aux string aux string
rematerializeable bool rematerializeable bool
argLength int32 // number of arguments, if -1, then this operation has a variable number of arguments argLength int32 // number of arguments, if -1, then this operation has a variable number of arguments
commutative bool // this operation is commutative (e.g. addition) commutative bool // this operation is commutative on its first 2 arguments (e.g. addition)
resultInArg0 bool // v and v.Args[0] must be allocated to the same register resultInArg0 bool // last output of v and v.Args[0] must be allocated to the same register
} }
type blockData struct { type blockData struct {
...@@ -160,11 +160,11 @@ func genOp() { ...@@ -160,11 +160,11 @@ func genOp() {
} }
if v.resultInArg0 { if v.resultInArg0 {
fmt.Fprintln(w, "resultInArg0: true,") fmt.Fprintln(w, "resultInArg0: true,")
if v.reg.inputs[0] != v.reg.outputs[0] { if v.reg.inputs[0] != v.reg.outputs[len(v.reg.outputs)-1] {
log.Fatalf("input[0] and output registers must be equal for %s", v.name) log.Fatalf("input[0] and last output register must be equal for %s", v.name)
} }
if v.commutative && v.reg.inputs[1] != v.reg.outputs[0] { if v.commutative && v.reg.inputs[1] != v.reg.outputs[len(v.reg.outputs)-1] {
log.Fatalf("input[1] and output registers must be equal for %s", v.name) log.Fatalf("input[1] and last output register must be equal for %s", v.name)
} }
} }
if a.name == "generic" { if a.name == "generic" {
...@@ -196,14 +196,24 @@ func genOp() { ...@@ -196,14 +196,24 @@ func genOp() {
} }
fmt.Fprintln(w, "},") fmt.Fprintln(w, "},")
} }
if v.reg.clobbers > 0 { if v.reg.clobbers > 0 {
fmt.Fprintf(w, "clobbers: %d,%s\n", v.reg.clobbers, a.regMaskComment(v.reg.clobbers)) fmt.Fprintf(w, "clobbers: %d,%s\n", v.reg.clobbers, a.regMaskComment(v.reg.clobbers))
} }
// reg outputs // reg outputs
if len(v.reg.outputs) > 0 { s = s[:0]
fmt.Fprintln(w, "outputs: []regMask{") for i, r := range v.reg.outputs {
for _, r := range v.reg.outputs { if r != 0 {
fmt.Fprintf(w, "%d,%s\n", r, a.regMaskComment(r)) s = append(s, intPair{countRegs(r), i})
}
}
if len(s) > 0 {
sort.Sort(byKey(s))
fmt.Fprintln(w, "outputs: []outputInfo{")
for _, p := range s {
r := v.reg.outputs[p.val]
fmt.Fprintf(w, "{%d,%d},%s\n", p.val, r, a.regMaskComment(r))
} }
fmt.Fprintln(w, "},") fmt.Fprintln(w, "},")
} }
......
...@@ -359,7 +359,7 @@ func (v *Value) LongHTML() string { ...@@ -359,7 +359,7 @@ func (v *Value) LongHTML() string {
} }
r := v.Block.Func.RegAlloc r := v.Block.Func.RegAlloc
if int(v.ID) < len(r) && r[v.ID] != nil { if int(v.ID) < len(r) && r[v.ID] != nil {
s += " : " + r[v.ID].Name() s += " : " + html.EscapeString(r[v.ID].Name())
} }
s += "</span>" s += "</span>"
return s return s
......
...@@ -36,3 +36,16 @@ func (s LocalSlot) Name() string { ...@@ -36,3 +36,16 @@ func (s LocalSlot) Name() string {
} }
return fmt.Sprintf("%s+%d[%s]", s.N, s.Off, s.Type) return fmt.Sprintf("%s+%d[%s]", s.N, s.Off, s.Type)
} }
type LocPair [2]Location
func (t LocPair) Name() string {
n0, n1 := "nil", "nil"
if t[0] != nil {
n0 = t[0].Name()
}
if t[1] != nil {
n1 = t[1].Name()
}
return fmt.Sprintf("<%s,%s>", n0, n1)
}
...@@ -21,7 +21,7 @@ func checkLower(f *Func) { ...@@ -21,7 +21,7 @@ func checkLower(f *Func) {
continue // lowered continue // lowered
} }
switch v.Op { switch v.Op {
case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive: case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1:
continue // ok not to lower continue // ok not to lower
case OpGetG: case OpGetG:
if f.Config.hasGReg { if f.Config.hasGReg {
......
...@@ -26,7 +26,7 @@ type opInfo struct { ...@@ -26,7 +26,7 @@ type opInfo struct {
generic bool // this is a generic (arch-independent) opcode generic bool // this is a generic (arch-independent) opcode
rematerializeable bool // this op is rematerializeable rematerializeable bool // this op is rematerializeable
commutative bool // this operation is commutative (e.g. addition) commutative bool // this operation is commutative (e.g. addition)
resultInArg0 bool // v and v.Args[0] must be allocated to the same register resultInArg0 bool // last output of v and v.Args[0] must be allocated to the same register
} }
type inputInfo struct { type inputInfo struct {
...@@ -34,10 +34,15 @@ type inputInfo struct { ...@@ -34,10 +34,15 @@ type inputInfo struct {
regs regMask // allowed input registers regs regMask // allowed input registers
} }
type outputInfo struct {
idx int // index in output tuple
regs regMask // allowed output registers
}
type regInfo struct { type regInfo struct {
inputs []inputInfo // ordered in register allocation order inputs []inputInfo // ordered in register allocation order
clobbers regMask clobbers regMask
outputs []regMask // NOTE: values can only have 1 output for now. outputs []outputInfo // ordered in register allocation order
} }
type auxType int8 type auxType int8
...@@ -152,28 +157,3 @@ func MakeSizeAndAlign(size, align int64) SizeAndAlign { ...@@ -152,28 +157,3 @@ func MakeSizeAndAlign(size, align int64) SizeAndAlign {
} }
return SizeAndAlign(size | align<<56) return SizeAndAlign(size | align<<56)
} }
func (op Op) isTupleGenerator() bool {
switch op {
case OpAdd32carry, OpSub32carry, OpMul32uhilo,
OpARMADDS, OpARMSUBS, OpARMMULLU,
OpARMADDSconst, OpARMSUBSconst, OpARMRSBSconst,
OpARMADDSshiftLL, OpARMSUBSshiftLL, OpARMRSBSshiftLL,
OpARMADDSshiftRL, OpARMSUBSshiftRL, OpARMRSBSshiftRL,
OpARMADDSshiftRA, OpARMSUBSshiftRA, OpARMRSBSshiftRA,
OpARMADDSshiftLLreg, OpARMSUBSshiftLLreg, OpARMRSBSshiftLLreg,
OpARMADDSshiftRLreg, OpARMSUBSshiftRLreg, OpARMRSBSshiftRLreg,
OpARMADDSshiftRAreg, OpARMSUBSshiftRAreg, OpARMRSBSshiftRAreg:
return true
}
return false
}
func (op Op) isTupleSelector() bool {
switch op {
case OpSelect0, OpSelect1,
OpARMLoweredSelect0, OpARMLoweredSelect1, OpARMCarry:
return true
}
return false
}
This diff is collapsed.
...@@ -333,10 +333,10 @@ func (s *regAllocState) assignReg(r register, v *Value, c *Value) { ...@@ -333,10 +333,10 @@ func (s *regAllocState) assignReg(r register, v *Value, c *Value) {
s.f.setHome(c, &s.registers[r]) s.f.setHome(c, &s.registers[r])
} }
// allocReg chooses a register for v from the set of registers in mask. // allocReg chooses a register from the set of registers in mask.
// If there is no unused register, a Value will be kicked out of // If there is no unused register, a Value will be kicked out of
// a register to make room. // a register to make room.
func (s *regAllocState) allocReg(v *Value, mask regMask) register { func (s *regAllocState) allocReg(mask regMask) register {
mask &= s.allocatable mask &= s.allocatable
mask &^= s.nospill mask &^= s.nospill
if mask == 0 { if mask == 0 {
...@@ -401,7 +401,7 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, line ...@@ -401,7 +401,7 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, line
} }
// Allocate a register. // Allocate a register.
r := s.allocReg(v, mask) r := s.allocReg(mask)
// Allocate v to the new register. // Allocate v to the new register.
var c *Value var c *Value
...@@ -471,7 +471,7 @@ func (s *regAllocState) init(f *Func) { ...@@ -471,7 +471,7 @@ func (s *regAllocState) init(f *Func) {
} }
// Figure out which registers we're allowed to use. // Figure out which registers we're allowed to use.
s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask | s.f.Config.flagRegMask s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask
s.allocatable &^= 1 << s.SPReg s.allocatable &^= 1 << s.SPReg
s.allocatable &^= 1 << s.SBReg s.allocatable &^= 1 << s.SBReg
if s.f.Config.hasGReg { if s.f.Config.hasGReg {
...@@ -499,11 +499,13 @@ func (s *regAllocState) init(f *Func) { ...@@ -499,11 +499,13 @@ func (s *regAllocState) init(f *Func) {
s.orig = make([]*Value, f.NumValues()) s.orig = make([]*Value, f.NumValues())
for _, b := range f.Blocks { for _, b := range f.Blocks {
for _, v := range b.Values { for _, v := range b.Values {
if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() { if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() {
s.values[v.ID].needReg = true s.values[v.ID].needReg = true
s.values[v.ID].rematerializeable = v.rematerializeable() s.values[v.ID].rematerializeable = v.rematerializeable()
s.orig[v.ID] = v s.orig[v.ID] = v
} }
// Note: needReg is false for values returning Tuple types.
// Instead, we mark the corresponding Selects as needReg.
} }
} }
s.computeLive() s.computeLive()
...@@ -947,6 +949,7 @@ func (s *regAllocState) regalloc(f *Func) { ...@@ -947,6 +949,7 @@ func (s *regAllocState) regalloc(f *Func) {
if s.f.pass.debug > regDebug { if s.f.pass.debug > regDebug {
fmt.Printf(" processing %s\n", v.LongString()) fmt.Printf(" processing %s\n", v.LongString())
} }
regspec := opcodeTable[v.Op].reg
if v.Op == OpPhi { if v.Op == OpPhi {
f.Fatalf("phi %s not at start of block", v) f.Fatalf("phi %s not at start of block", v)
} }
...@@ -962,6 +965,18 @@ func (s *regAllocState) regalloc(f *Func) { ...@@ -962,6 +965,18 @@ func (s *regAllocState) regalloc(f *Func) {
s.advanceUses(v) s.advanceUses(v)
continue continue
} }
if v.Op == OpSelect0 || v.Op == OpSelect1 {
if s.values[v.ID].needReg {
var i = 0
if v.Op == OpSelect1 {
i = 1
}
s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).Num), v, v)
}
b.Values = append(b.Values, v)
s.advanceUses(v)
goto issueSpill
}
if v.Op == OpGetG && s.f.Config.hasGReg { if v.Op == OpGetG && s.f.Config.hasGReg {
// use hardware g register // use hardware g register
if s.regs[s.GReg].v != nil { if s.regs[s.GReg].v != nil {
...@@ -970,17 +985,7 @@ func (s *regAllocState) regalloc(f *Func) { ...@@ -970,17 +985,7 @@ func (s *regAllocState) regalloc(f *Func) {
s.assignReg(s.GReg, v, v) s.assignReg(s.GReg, v, v)
b.Values = append(b.Values, v) b.Values = append(b.Values, v)
s.advanceUses(v) s.advanceUses(v)
// spill unconditionally, will be deleted if never used goto issueSpill
spill := b.NewValue1(v.Line, OpStoreReg, v.Type, v)
s.setOrig(spill, v)
s.values[v.ID].spill = spill
s.values[v.ID].spillUsed = false
if loop != nil {
loop.spills = append(loop.spills, v)
nSpillsInner++
}
nSpills++
continue
} }
if v.Op == OpArg { if v.Op == OpArg {
// Args are "pre-spilled" values. We don't allocate // Args are "pre-spilled" values. We don't allocate
...@@ -1009,7 +1014,6 @@ func (s *regAllocState) regalloc(f *Func) { ...@@ -1009,7 +1014,6 @@ func (s *regAllocState) regalloc(f *Func) {
b.Values = append(b.Values, v) b.Values = append(b.Values, v)
continue continue
} }
regspec := opcodeTable[v.Op].reg
if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 { if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 {
// No register allocation required (or none specified yet) // No register allocation required (or none specified yet)
s.freeRegs(regspec.clobbers) s.freeRegs(regspec.clobbers)
...@@ -1167,10 +1171,16 @@ func (s *regAllocState) regalloc(f *Func) { ...@@ -1167,10 +1171,16 @@ func (s *regAllocState) regalloc(f *Func) {
// Dump any registers which will be clobbered // Dump any registers which will be clobbered
s.freeRegs(regspec.clobbers) s.freeRegs(regspec.clobbers)
// Pick register for output. // Pick registers for outputs.
if s.values[v.ID].needReg { {
mask := regspec.outputs[0] & s.allocatable outRegs := [2]register{noRegister, noRegister}
if opcodeTable[v.Op].resultInArg0 { var used regMask
for _, out := range regspec.outputs {
mask := out.regs & s.allocatable &^ used
if mask == 0 {
continue
}
if opcodeTable[v.Op].resultInArg0 && out.idx == len(regspec.outputs)-1 {
if !opcodeTable[v.Op].commutative { if !opcodeTable[v.Op].commutative {
// Output must use the same register as input 0. // Output must use the same register as input 0.
r := register(s.f.getHome(args[0].ID).(*Register).Num) r := register(s.f.getHome(args[0].ID).(*Register).Num)
...@@ -1208,9 +1218,27 @@ func (s *regAllocState) regalloc(f *Func) { ...@@ -1208,9 +1218,27 @@ func (s *regAllocState) regalloc(f *Func) {
if mask&^desired.avoid != 0 { if mask&^desired.avoid != 0 {
mask &^= desired.avoid mask &^= desired.avoid
} }
r := s.allocReg(v, mask) r := s.allocReg(mask)
outRegs[out.idx] = r
used |= regMask(1) << r
}
// Record register choices
if v.Type.IsTuple() {
var outLocs LocPair
if r := outRegs[0]; r != noRegister {
outLocs[0] = &s.registers[r]
}
if r := outRegs[1]; r != noRegister {
outLocs[1] = &s.registers[r]
}
s.f.setHome(v, outLocs)
// Note that subsequent SelectX instructions will do the assignReg calls.
} else {
if r := outRegs[0]; r != noRegister {
s.assignReg(r, v, v) s.assignReg(r, v, v)
} }
}
}
// Issue the Value itself. // Issue the Value itself.
for i, a := range args { for i, a := range args {
...@@ -1228,6 +1256,7 @@ func (s *regAllocState) regalloc(f *Func) { ...@@ -1228,6 +1256,7 @@ func (s *regAllocState) regalloc(f *Func) {
// f() // f()
// } // }
// It would be good to have both spill and restore inside the IF. // It would be good to have both spill and restore inside the IF.
issueSpill:
if s.values[v.ID].needReg { if s.values[v.ID].needReg {
spill := b.NewValue1(v.Line, OpStoreReg, v.Type, v) spill := b.NewValue1(v.Line, OpStoreReg, v.Type, v)
s.setOrig(spill, v) s.setOrig(spill, v)
...@@ -1246,9 +1275,10 @@ func (s *regAllocState) regalloc(f *Func) { ...@@ -1246,9 +1275,10 @@ func (s *regAllocState) regalloc(f *Func) {
if s.f.pass.debug > regDebug { if s.f.pass.debug > regDebug {
fmt.Printf(" processing control %s\n", v.LongString()) fmt.Printf(" processing control %s\n", v.LongString())
} }
// TODO: regspec for block control values, instead of using // We assume that a control input can be passed in any
// register set from the control op's output. // type-compatible register. If this turns out not to be true,
s.allocValToReg(v, opcodeTable[v.Op].reg.outputs[0], false, b.Line) // we'll need to introduce a regspec for a block's control value.
s.allocValToReg(v, s.compatRegs(v.Type), false, b.Line)
// Remove this use from the uses list. // Remove this use from the uses list.
vi := &s.values[v.ID] vi := &s.values[v.ID]
u := vi.uses u := vi.uses
...@@ -2065,6 +2095,8 @@ func (e *edgeState) findRegFor(typ Type) Location { ...@@ -2065,6 +2095,8 @@ func (e *edgeState) findRegFor(typ Type) Location {
return nil return nil
} }
// rematerializeable reports whether the register allocator should recompute
// a value instead of spilling/restoring it.
func (v *Value) rematerializeable() bool { func (v *Value) rematerializeable() bool {
if !opcodeTable[v.Op].rematerializeable { if !opcodeTable[v.Op].rematerializeable {
return false return false
......
...@@ -22,6 +22,10 @@ func rewriteValue386(v *Value, config *Config) bool { ...@@ -22,6 +22,10 @@ func rewriteValue386(v *Value, config *Config) bool {
return rewriteValue386_OpAdd32(v, config) return rewriteValue386_OpAdd32(v, config)
case OpAdd32F: case OpAdd32F:
return rewriteValue386_OpAdd32F(v, config) return rewriteValue386_OpAdd32F(v, config)
case OpAdd32carry:
return rewriteValue386_OpAdd32carry(v, config)
case OpAdd32withcarry:
return rewriteValue386_OpAdd32withcarry(v, config)
case OpAdd64F: case OpAdd64F:
return rewriteValue386_OpAdd64F(v, config) return rewriteValue386_OpAdd64F(v, config)
case OpAdd8: case OpAdd8:
...@@ -1116,6 +1120,38 @@ func rewriteValue386_OpAdd32F(v *Value, config *Config) bool { ...@@ -1116,6 +1120,38 @@ func rewriteValue386_OpAdd32F(v *Value, config *Config) bool {
return true return true
} }
} }
func rewriteValue386_OpAdd32carry(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Add32carry x y)
// cond:
// result: (ADDLcarry x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386ADDLcarry)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValue386_OpAdd32withcarry(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Add32withcarry x y c)
// cond:
// result: (ADCL x y c)
for {
x := v.Args[0]
y := v.Args[1]
c := v.Args[2]
v.reset(Op386ADCL)
v.AddArg(x)
v.AddArg(y)
v.AddArg(c)
return true
}
}
func rewriteValue386_OpAdd64F(v *Value, config *Config) bool { func rewriteValue386_OpAdd64F(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
......
...@@ -654,10 +654,6 @@ func rewriteValueARM(v *Value, config *Config) bool { ...@@ -654,10 +654,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMSUBshiftRL(v, config) return rewriteValueARM_OpARMSUBshiftRL(v, config)
case OpARMSUBshiftRLreg: case OpARMSUBshiftRLreg:
return rewriteValueARM_OpARMSUBshiftRLreg(v, config) return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
case OpSelect0:
return rewriteValueARM_OpSelect0(v, config)
case OpSelect1:
return rewriteValueARM_OpSelect1(v, config)
case OpSignExt16to32: case OpSignExt16to32:
return rewriteValueARM_OpSignExt16to32(v, config) return rewriteValueARM_OpSignExt16to32(v, config)
case OpSignExt8to16: case OpSignExt8to16:
...@@ -15554,50 +15550,6 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool { ...@@ -15554,50 +15550,6 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
} }
return false return false
} }
func rewriteValueARM_OpSelect0(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Select0 <t> x)
// cond: t.IsFlags()
// result: (Carry x)
for {
t := v.Type
x := v.Args[0]
if !(t.IsFlags()) {
break
}
v.reset(OpARMCarry)
v.AddArg(x)
return true
}
// match: (Select0 <t> x)
// cond: !t.IsFlags()
// result: (LoweredSelect0 x)
for {
t := v.Type
x := v.Args[0]
if !(!t.IsFlags()) {
break
}
v.reset(OpARMLoweredSelect0)
v.AddArg(x)
return true
}
return false
}
func rewriteValueARM_OpSelect1(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Select1 x)
// cond:
// result: (LoweredSelect1 x)
for {
x := v.Args[0]
v.reset(OpARMLoweredSelect1)
v.AddArg(x)
return true
}
}
func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool { func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
......
...@@ -45,21 +45,6 @@ func (h ValHeap) Less(i, j int) bool { ...@@ -45,21 +45,6 @@ func (h ValHeap) Less(i, j int) bool {
if c := sx - sy; c != 0 { if c := sx - sy; c != 0 {
return c > 0 // higher score comes later. return c > 0 // higher score comes later.
} }
if sx == ScoreReadTuple {
// both are tuple-reading ops
// if they read same tuple, flag-reading op comes earlier
if x.Args[0] == y.Args[0] {
if x.Op == OpARMCarry || x.Op == OpARMLoweredSelect0 { //TODO: abstract this condition?
return false
} else {
return true
}
}
// if they read different tuples, order them as
// tuple-generating order to avoid interleaving
x = x.Args[0]
y = y.Args[0]
}
if x.Line != y.Line { // Favor in-order line stepping if x.Line != y.Line { // Favor in-order line stepping
return x.Line > y.Line return x.Line > y.Line
} }
...@@ -119,7 +104,7 @@ func schedule(f *Func) { ...@@ -119,7 +104,7 @@ func schedule(f *Func) {
// reduce register pressure. It also helps make sure // reduce register pressure. It also helps make sure
// VARDEF ops are scheduled before the corresponding LEA. // VARDEF ops are scheduled before the corresponding LEA.
score[v.ID] = ScoreMemory score[v.ID] = ScoreMemory
case v.Op == OpARMCarry || v.Op == OpARMLoweredSelect0 || v.Op == OpARMLoweredSelect1: case v.Op == OpSelect0 || v.Op == OpSelect1:
// Schedule the pseudo-op of reading part of a tuple // Schedule the pseudo-op of reading part of a tuple
// immediately after the tuple-generating op, since // immediately after the tuple-generating op, since
// this value is already live. This also removes its // this value is already live. This also removes its
...@@ -226,12 +211,12 @@ func schedule(f *Func) { ...@@ -226,12 +211,12 @@ func schedule(f *Func) {
// Do not emit tuple-reading ops until we're ready to emit the tuple-generating op. // Do not emit tuple-reading ops until we're ready to emit the tuple-generating op.
//TODO: maybe remove ReadTuple score above, if it does not help on performance //TODO: maybe remove ReadTuple score above, if it does not help on performance
switch { switch {
case v.Op == OpARMCarry || v.Op == OpARMLoweredSelect0: case v.Op == OpSelect0:
if tuples[v.Args[0].ID] == nil { if tuples[v.Args[0].ID] == nil {
tuples[v.Args[0].ID] = make([]*Value, 2) tuples[v.Args[0].ID] = make([]*Value, 2)
} }
tuples[v.Args[0].ID][0] = v tuples[v.Args[0].ID][0] = v
case v.Op == OpARMLoweredSelect1: case v.Op == OpSelect1:
if tuples[v.Args[0].ID] == nil { if tuples[v.Args[0].ID] == nil {
tuples[v.Args[0].ID] = make([]*Value, 2) tuples[v.Args[0].ID] = make([]*Value, 2)
} }
......
...@@ -64,7 +64,7 @@ func tighten(f *Func) { ...@@ -64,7 +64,7 @@ func tighten(f *Func) {
continue continue
default: default:
} }
if v.Op.isTupleSelector() { if v.Op == OpSelect0 || v.Op == OpSelect1 {
// tuple selector must stay with tuple generator // tuple selector must stay with tuple generator
continue continue
} }
......
...@@ -165,6 +165,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -165,6 +165,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
} }
// 2-address opcode arithmetic // 2-address opcode arithmetic
case ssa.Op386SUBL, case ssa.Op386SUBL,
ssa.Op386MULL, ssa.Op386MULL,
...@@ -176,13 +177,22 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -176,13 +177,22 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.Op386SARL, ssa.Op386SARW, ssa.Op386SARB, ssa.Op386SARL, ssa.Op386SARW, ssa.Op386SARB,
ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD, ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD, ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD,
ssa.Op386PXOR: ssa.Op386PXOR,
ssa.Op386ADCL:
r := gc.SSARegNum(v) r := gc.SSARegNum(v)
if r != gc.SSARegNum(v.Args[0]) { if r != gc.SSARegNum(v.Args[0]) {
v.Fatalf("input[0] and output not in same register %s", v.LongString()) v.Fatalf("input[0] and output not in same register %s", v.LongString())
} }
opregreg(v.Op.Asm(), r, gc.SSARegNum(v.Args[1])) opregreg(v.Op.Asm(), r, gc.SSARegNum(v.Args[1]))
case ssa.Op386ADDLcarry:
// output 0 is carry, output 1 is the low 32 bits.
r := gc.SSARegNum1(v)
if r != gc.SSARegNum(v.Args[0]) {
v.Fatalf("input[0] and output[1] not in same register %s", v.LongString())
}
opregreg(v.Op.Asm(), r, gc.SSARegNum(v.Args[1]))
case ssa.Op386DIVL, ssa.Op386DIVW, case ssa.Op386DIVL, ssa.Op386DIVW,
ssa.Op386DIVLU, ssa.Op386DIVWU, ssa.Op386DIVLU, ssa.Op386DIVWU,
ssa.Op386MODL, ssa.Op386MODW, ssa.Op386MODL, ssa.Op386MODW,
...@@ -716,7 +726,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -716,7 +726,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = gc.SSARegNum(v.Args[0]) p.From.Reg = gc.SSARegNum(v.Args[0])
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v) p.To.Reg = gc.SSARegNum(v)
case ssa.OpSP, ssa.OpSB: case ssa.OpSP, ssa.OpSB, ssa.OpSelect0, ssa.OpSelect1:
// nothing to do // nothing to do
case ssa.Op386SETEQ, ssa.Op386SETNE, case ssa.Op386SETEQ, ssa.Op386SETNE,
ssa.Op386SETL, ssa.Op386SETLE, ssa.Op386SETL, ssa.Op386SETLE,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment