Commit 0a94daa3 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

cmd/compile: funnel SSA Prog creation through SSAGenState

Step one in eliminating Prog-related globals.

Passes toolstash-check -all.

Updates #15756

Change-Id: I3b777fb5a7716f2d9da3067fbd94c28ca894a465
Reviewed-on: https://go-review.googlesource.com/38450
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarMatthew Dempsky <mdempsky@google.com>
parent 3b39f523
......@@ -107,8 +107,8 @@ func moveByType(t ssa.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(op obj.As, dest, src int16) *obj.Prog {
p := gc.Prog(op)
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = dest
......@@ -154,13 +154,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r2 := v.Args[1].Reg()
switch {
case r == r1:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case r == r2:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_REG
......@@ -172,7 +172,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else {
asm = x86.ALEAL
}
p := gc.Prog(asm)
p := s.Prog(asm)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r1
p.From.Scale = 1
......@@ -196,7 +196,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
opregreg(v.Op.Asm(), r, v.Args[1].Reg())
opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:
// Arg[0] (the dividend) is in AX.
......@@ -206,14 +206,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Args[1].Reg()
// Zero extend dividend.
c := gc.Prog(x86.AXORL)
c := s.Prog(x86.AXORL)
c.From.Type = obj.TYPE_REG
c.From.Reg = x86.REG_DX
c.To.Type = obj.TYPE_REG
c.To.Reg = x86.REG_DX
// Issue divide.
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
......@@ -229,46 +229,46 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
var c *obj.Prog
switch v.Op {
case ssa.OpAMD64DIVQ:
c = gc.Prog(x86.ACMPQ)
c = s.Prog(x86.ACMPQ)
case ssa.OpAMD64DIVL:
c = gc.Prog(x86.ACMPL)
c = s.Prog(x86.ACMPL)
case ssa.OpAMD64DIVW:
c = gc.Prog(x86.ACMPW)
c = s.Prog(x86.ACMPW)
}
c.From.Type = obj.TYPE_REG
c.From.Reg = r
c.To.Type = obj.TYPE_CONST
c.To.Offset = -1
j1 := gc.Prog(x86.AJEQ)
j1 := s.Prog(x86.AJEQ)
j1.To.Type = obj.TYPE_BRANCH
// Sign extend dividend.
switch v.Op {
case ssa.OpAMD64DIVQ:
gc.Prog(x86.ACQO)
s.Prog(x86.ACQO)
case ssa.OpAMD64DIVL:
gc.Prog(x86.ACDQ)
s.Prog(x86.ACDQ)
case ssa.OpAMD64DIVW:
gc.Prog(x86.ACWD)
s.Prog(x86.ACWD)
}
// Issue divide.
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
// Skip over -1 fixup code.
j2 := gc.Prog(obj.AJMP)
j2 := s.Prog(obj.AJMP)
j2.To.Type = obj.TYPE_BRANCH
// Issue -1 fixup code.
// n / -1 = -n
n1 := gc.Prog(x86.ANEGQ)
n1 := s.Prog(x86.ANEGQ)
n1.To.Type = obj.TYPE_REG
n1.To.Reg = x86.REG_AX
// n % -1 == 0
n2 := gc.Prog(x86.AXORL)
n2 := s.Prog(x86.AXORL)
n2.From.Type = obj.TYPE_REG
n2.From.Reg = x86.REG_DX
n2.To.Type = obj.TYPE_REG
......@@ -287,14 +287,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Arg[0] is already in AX as it's the only register we allow
// and DX is the only output we care about (the high bits)
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
// IMULB puts the high portion in AH instead of DL,
// so move it to DL for consistency
if v.Type.Size() == 1 {
m := gc.Prog(x86.AMOVB)
m := s.Prog(x86.AMOVB)
m.From.Type = obj.TYPE_REG
m.From.Reg = x86.REG_AH
m.To.Type = obj.TYPE_REG
......@@ -304,14 +304,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64MULQU2:
// Arg[0] is already in AX as it's the only register we allow
// results hi in DX, lo in AX
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
case ssa.OpAMD64DIVQU2:
// Arg[0], Arg[1] are already in Dx, AX, as they're the only registers we allow
// results q in AX, r in DX
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
......@@ -323,12 +323,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(x86.AADDQ)
p := s.Prog(x86.AADDQ)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.From.Reg = v.Args[1].Reg()
p = gc.Prog(x86.ARCRQ)
p = s.Prog(x86.ARCRQ)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
......@@ -350,7 +350,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else {
asm = x86.AINCL
}
p := gc.Prog(asm)
p := s.Prog(asm)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
return
......@@ -362,12 +362,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else {
asm = x86.ADECL
}
p := gc.Prog(asm)
p := s.Prog(asm)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
return
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
......@@ -380,7 +380,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else {
asm = x86.ALEAL
}
p := gc.Prog(asm)
p := s.Prog(asm)
p.From.Type = obj.TYPE_MEM
p.From.Reg = a
p.From.Offset = v.AuxInt
......@@ -392,7 +392,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
......@@ -403,7 +403,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
......@@ -426,14 +426,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
r := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
......@@ -441,7 +441,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
r := v.Args[0].Reg()
i := v.Args[1].Reg()
p := gc.Prog(x86.ALEAQ)
p := s.Prog(x86.ALEAQ)
switch v.Op {
case ssa.OpAMD64LEAQ1:
p.From.Scale = 1
......@@ -462,7 +462,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -471,27 +471,27 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB,
ssa.OpAMD64BTL, ssa.OpAMD64BTQ:
opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD:
// Go assembler has swapped operands for UCOMISx relative to CMP,
// must account for that right here.
opregreg(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
x := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
......@@ -503,20 +503,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
x := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
p.To.Reg = x
case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVOload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -525,7 +525,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -534,7 +534,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVWloadidx2:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -548,7 +548,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == x86.REG_SP {
r, i = i, r
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Scale = 1
......@@ -557,14 +557,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -573,7 +573,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -582,7 +582,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVWstoreidx2:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -596,7 +596,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == x86.REG_SP {
r, i = i, r
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -605,7 +605,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = i
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
......@@ -613,7 +613,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
......@@ -639,17 +639,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg())
opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
case ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSL2SS:
r := v.Reg()
// Break false dependency on destination register.
opregreg(x86.AXORPS, r, r)
opregreg(v.Op.Asm(), r, v.Args[0].Reg())
opregreg(s, x86.AXORPS, r, r)
opregreg(s, v.Op.Asm(), r, v.Args[0].Reg())
case ssa.OpAMD64ADDQmem, ssa.OpAMD64ADDLmem, ssa.OpAMD64SUBQmem, ssa.OpAMD64SUBLmem,
ssa.OpAMD64ANDQmem, ssa.OpAMD64ANDLmem, ssa.OpAMD64ORQmem, ssa.OpAMD64ORLmem,
ssa.OpAMD64XORQmem, ssa.OpAMD64XORLmem, ssa.OpAMD64ADDSDmem, ssa.OpAMD64ADDSSmem,
ssa.OpAMD64SUBSDmem, ssa.OpAMD64SUBSSmem, ssa.OpAMD64MULSDmem, ssa.OpAMD64MULSSmem:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v)
......@@ -663,13 +663,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
adj := duffAdj(v.AuxInt)
var p *obj.Prog
if adj != 0 {
p = gc.Prog(x86.AADDQ)
p = s.Prog(x86.AADDQ)
p.From.Type = obj.TYPE_CONST
p.From.Offset = adj
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_DI
}
p = gc.Prog(obj.ADUFFZERO)
p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffzero
p.To.Offset = off
......@@ -678,9 +678,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("MOVOconst can only do constant=0")
}
r := v.Reg()
opregreg(x86.AXORPS, r, r)
opregreg(s, x86.AXORPS, r, r)
case ssa.OpAMD64DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
......@@ -692,14 +692,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
x := v.Args[0].Reg()
y := v.Reg()
if x != y {
opregreg(moveByType(v.Type), y, x)
opregreg(s, moveByType(v.Type), y, x)
}
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Fatalf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
......@@ -709,7 +709,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("store flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(storeByType(v.Type))
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
......@@ -722,7 +722,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(gc.Ctxt) {
// MOVQ (TLS), r
p := gc.Prog(x86.AMOVQ)
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
......@@ -730,12 +730,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else {
// MOVQ TLS, r
// MOVQ (r)(TLS*1), r
p := gc.Prog(x86.AMOVQ)
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
q := gc.Prog(x86.AMOVQ)
q := s.Prog(x86.AMOVQ)
q.From.Type = obj.TYPE_MEM
q.From.Reg = r
q.From.Index = x86.REG_TLS
......@@ -752,17 +752,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpAMD64BSFQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRQ, ssa.OpAMD64BSRL:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64SQRTSD:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -774,29 +774,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
ssa.OpAMD64SETA, ssa.OpAMD64SETAE:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64SETNEF:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
q := gc.Prog(x86.ASETPS)
q := s.Prog(x86.ASETPS)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
// ORL avoids partial register write and is smaller than ORQ, used by old compiler
opregreg(x86.AORL, v.Reg(), x86.REG_AX)
opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
case ssa.OpAMD64SETEQF:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
q := gc.Prog(x86.ASETPC)
q := s.Prog(x86.ASETPC)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
// ANDL avoids partial register write and is smaller than ANDQ, used by old compiler
opregreg(x86.AANDL, v.Reg(), x86.REG_AX)
opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
case ssa.OpAMD64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
......@@ -805,11 +805,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64:
v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
case ssa.OpAMD64REPSTOSQ:
gc.Prog(x86.AREP)
gc.Prog(x86.ASTOSQ)
s.Prog(x86.AREP)
s.Prog(x86.ASTOSQ)
case ssa.OpAMD64REPMOVSQ:
gc.Prog(x86.AREP)
gc.Prog(x86.AMOVSQ)
s.Prog(x86.AREP)
s.Prog(x86.AMOVSQ)
case ssa.OpAMD64LoweredNilCheck:
// Issue a load which will fault if the input is nil.
// TODO: We currently use the 2-byte instruction TESTB AX, (reg).
......@@ -817,7 +817,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// but it doesn't have false dependency on AX.
// Or maybe allocate an output register and use MOVL (reg),reg2 ?
// That trades clobbering flags for clobbering a register.
p := gc.Prog(x86.ATESTB)
p := s.Prog(x86.ATESTB)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_MEM
......@@ -827,7 +827,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.Warnl(v.Pos, "generated nil check")
}
case ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -838,7 +838,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
......@@ -849,8 +849,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
}
gc.Prog(x86.ALOCK)
p := gc.Prog(v.Op.Asm())
s.Prog(x86.ALOCK)
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
......@@ -860,19 +860,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Args[1].Reg() != x86.REG_AX {
v.Fatalf("input[1] not in AX %s", v.LongString())
}
gc.Prog(x86.ALOCK)
p := gc.Prog(v.Op.Asm())
s.Prog(x86.ALOCK)
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
p = gc.Prog(x86.ASETEQ)
p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64ANDBlock, ssa.OpAMD64ORBlock:
gc.Prog(x86.ALOCK)
p := gc.Prog(v.Op.Asm())
s.Prog(x86.ALOCK)
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -913,7 +913,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
......@@ -921,25 +921,25 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in rax:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(x86.ATESTL)
p := s.Prog(x86.ATESTL)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX
p = gc.Prog(x86.AJNE)
p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
......@@ -960,19 +960,19 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
p = s.Prog(jmp.invasm)
likely *= -1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP)
q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
......
......@@ -86,11 +86,11 @@ func makeshift(reg int16, typ int64, s int64) shift {
return shift(int64(reg&0xf) | typ | (s&31)<<7)
}
// genshift generates a Prog for r = r0 op (r1 shifted by s)
func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
p := gc.Prog(as)
// genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeshift(r1, typ, s))
p.From.Offset = int64(makeshift(r1, typ, n))
p.Reg = r0
if r != 0 {
p.To.Type = obj.TYPE_REG
......@@ -105,8 +105,8 @@ func makeregshift(r1 int16, typ int64, r2 int16) shift {
}
// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
func genregshift(as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
p := gc.Prog(as)
func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeregshift(r1, typ, r2))
p.Reg = r0
......@@ -139,7 +139,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
panic("bad float size")
}
}
p := gc.Prog(as)
p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
......@@ -154,7 +154,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
......@@ -163,7 +163,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("store flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(storeByType(v.Type))
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
......@@ -188,7 +188,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.Reg = r1
......@@ -199,7 +199,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg0()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.Scond = arm.C_SBIT
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
......@@ -212,7 +212,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.Reg = r1
......@@ -227,14 +227,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
p := gc.Prog(arm.ASRA)
p := s.Prog(arm.ASRA)
p.Scond = arm.C_SCOND_HS
p.From.Type = obj.TYPE_CONST
p.From.Offset = 31
p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p = gc.Prog(arm.ASRA)
p = s.Prog(arm.ASRA)
p.Scond = arm.C_SCOND_LO
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
......@@ -254,7 +254,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMSLLconst,
ssa.OpARMSRLconst,
ssa.OpARMSRAconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
......@@ -263,7 +263,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMADDSconst,
ssa.OpARMSUBSconst,
ssa.OpARMRSBSconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.Scond = arm.C_SBIT
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
......@@ -271,7 +271,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpARMSRRconst:
genshift(arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
genshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
case ssa.OpARMADDshiftLL,
ssa.OpARMADCshiftLL,
ssa.OpARMSUBshiftLL,
......@@ -282,11 +282,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftLL,
ssa.OpARMXORshiftLL,
ssa.OpARMBICshiftLL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
case ssa.OpARMADDSshiftLL,
ssa.OpARMSUBSshiftLL,
ssa.OpARMRSBSshiftLL:
p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRL,
ssa.OpARMADCshiftRL,
......@@ -298,11 +298,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftRL,
ssa.OpARMXORshiftRL,
ssa.OpARMBICshiftRL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
case ssa.OpARMADDSshiftRL,
ssa.OpARMSUBSshiftRL,
ssa.OpARMRSBSshiftRL:
p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRA,
ssa.OpARMADCshiftRA,
......@@ -314,26 +314,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftRA,
ssa.OpARMXORshiftRA,
ssa.OpARMBICshiftRA:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
case ssa.OpARMADDSshiftRA,
ssa.OpARMSUBSshiftRA,
ssa.OpARMRSBSshiftRA:
p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
p.Scond = arm.C_SBIT
case ssa.OpARMXORshiftRR:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
case ssa.OpARMMVNshiftLL:
genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
case ssa.OpARMMVNshiftRL:
genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
case ssa.OpARMMVNshiftRA:
genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
case ssa.OpARMMVNshiftLLreg:
genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
case ssa.OpARMMVNshiftRLreg:
genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
case ssa.OpARMMVNshiftRAreg:
genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
case ssa.OpARMADDshiftLLreg,
ssa.OpARMADCshiftLLreg,
ssa.OpARMSUBshiftLLreg,
......@@ -344,11 +344,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftLLreg,
ssa.OpARMXORshiftLLreg,
ssa.OpARMBICshiftLLreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
case ssa.OpARMADDSshiftLLreg,
ssa.OpARMSUBSshiftLLreg,
ssa.OpARMRSBSshiftLLreg:
p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRLreg,
ssa.OpARMADCshiftRLreg,
......@@ -360,11 +360,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftRLreg,
ssa.OpARMXORshiftRLreg,
ssa.OpARMBICshiftRLreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
case ssa.OpARMADDSshiftRLreg,
ssa.OpARMSUBSshiftRLreg,
ssa.OpARMRSBSshiftRLreg:
p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRAreg,
ssa.OpARMADCshiftRAreg,
......@@ -376,16 +376,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMORshiftRAreg,
ssa.OpARMXORshiftRAreg,
ssa.OpARMBICshiftRAreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
case ssa.OpARMADDSshiftRAreg,
ssa.OpARMSUBSshiftRAreg,
ssa.OpARMRSBSshiftRAreg:
p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
p.Scond = arm.C_SBIT
case ssa.OpARMHMUL,
ssa.OpARMHMULU:
// 32-bit high multiplication
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
......@@ -394,7 +394,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
case ssa.OpARMMULLU:
// 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
......@@ -402,7 +402,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg0() // high 32-bit
p.To.Offset = int64(v.Reg1()) // low 32-bit
case ssa.OpARMMULA:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
......@@ -410,14 +410,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg() // result
p.To.Offset = int64(v.Args[2].Reg()) // addend
case ssa.OpARMMOVWconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMMOVFconst,
ssa.OpARMMOVDconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
......@@ -428,7 +428,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMTEQ,
ssa.OpARMCMPF,
ssa.OpARMCMPD:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
// Special layout in ARM assembly
// Comparing to x86, the operands of ARM's CMP are reversed.
......@@ -439,29 +439,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMTSTconst,
ssa.OpARMTEQconst:
// Special layout in ARM assembly
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
case ssa.OpARMCMPF0,
ssa.OpARMCMPD0:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
case ssa.OpARMCMPshiftLL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
case ssa.OpARMCMPshiftRL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
case ssa.OpARMCMPshiftRA:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
case ssa.OpARMCMPshiftLLreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
case ssa.OpARMCMPshiftRLreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
case ssa.OpARMCMPshiftRAreg:
genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
case ssa.OpARMMOVWaddr:
p := gc.Prog(arm.AMOVW)
p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_ADDR
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
......@@ -498,7 +498,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMMOVWload,
ssa.OpARMMOVFload,
ssa.OpARMMOVDload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -509,7 +509,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMMOVWstore,
ssa.OpARMMOVFstore,
ssa.OpARMMOVDstore:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -519,33 +519,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// this is just shift 0 bits
fallthrough
case ssa.OpARMMOVWloadshiftLL:
p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
p.From.Reg = v.Args[0].Reg()
case ssa.OpARMMOVWloadshiftRL:
p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
p.From.Reg = v.Args[0].Reg()
case ssa.OpARMMOVWloadshiftRA:
p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
p.From.Reg = v.Args[0].Reg()
case ssa.OpARMMOVWstoreidx:
// this is just shift 0 bits
fallthrough
case ssa.OpARMMOVWstoreshiftLL:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_SHIFT
p.To.Reg = v.Args[0].Reg()
p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
case ssa.OpARMMOVWstoreshiftRL:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_SHIFT
p.To.Reg = v.Args[0].Reg()
p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
case ssa.OpARMMOVWstoreshiftRA:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_SHIFT
......@@ -570,7 +570,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() == v.Args[0].Reg() {
return
}
p := gc.Prog(arm.AMOVW)
p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -591,7 +591,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMMOVDW,
ssa.OpARMMOVFD,
ssa.OpARMMOVDF:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -600,21 +600,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMMOVWUD,
ssa.OpARMMOVFWU,
ssa.OpARMMOVDWU:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.Scond = arm.C_UBIT
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMCMOVWHSconst:
p := gc.Prog(arm.AMOVW)
p := s.Prog(arm.AMOVW)
p.Scond = arm.C_SCOND_HS
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMCMOVWLSconst:
p := gc.Prog(arm.AMOVW)
p := s.Prog(arm.AMOVW)
p.Scond = arm.C_SCOND_LS
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
......@@ -623,20 +623,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter, ssa.OpARMCALLudiv:
s.Call(v)
case ssa.OpARMDUFFZERO:
p := gc.Prog(obj.ADUFFZERO)
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARMDUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpARMLoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := gc.Prog(arm.AMOVB)
p := s.Prog(arm.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -665,18 +665,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1
mov = arm.AMOVB
}
p := gc.Prog(mov)
p := s.Prog(mov)
p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REG_R1
p.To.Offset = sz
p2 := gc.Prog(arm.ACMP)
p2 := s.Prog(arm.ACMP)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
p2.Reg = arm.REG_R1
p3 := gc.Prog(arm.ABLE)
p3 := s.Prog(arm.ABLE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
case ssa.OpARMLoweredMove:
......@@ -699,25 +699,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1
mov = arm.AMOVB
}
p := gc.Prog(mov)
p := s.Prog(mov)
p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_MEM
p.From.Reg = arm.REG_R1
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
p2 := gc.Prog(mov)
p2 := s.Prog(mov)
p2.Scond = arm.C_PBIT
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = arm.REG_R2
p2.To.Offset = sz
p3 := gc.Prog(arm.ACMP)
p3 := s.Prog(arm.ACMP)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg()
p3.Reg = arm.REG_R1
p4 := gc.Prog(arm.ABLE)
p4 := s.Prog(arm.ABLE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
case ssa.OpARMEqual,
......@@ -732,12 +732,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMGreaterEqualU:
// generate boolean values
// use conditional move
p := gc.Prog(arm.AMOVW)
p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p = gc.Prog(arm.AMOVW)
p = s.Prog(arm.AMOVW)
p.Scond = condBits[v.Op]
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
......@@ -791,7 +791,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
......@@ -800,27 +800,27 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R0:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(arm.ACMP)
p := s.Prog(arm.ACMP)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.Reg = arm.REG_R0
p = gc.Prog(arm.ABNE)
p = s.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(obj.ARET)
p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
......@@ -834,18 +834,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP)
q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
......
......@@ -78,11 +78,11 @@ func makeshift(reg int16, typ int64, s int64) int64 {
return int64(reg&31)<<16 | typ | (s&63)<<10
}
// genshift generates a Prog for r = r0 op (r1 shifted by s)
func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
p := gc.Prog(as)
// genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = makeshift(r1, typ, s)
p.From.Offset = makeshift(r1, typ, n)
p.Reg = r0
if r != 0 {
p.To.Type = obj.TYPE_REG
......@@ -113,7 +113,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
panic("bad float size")
}
}
p := gc.Prog(as)
p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
......@@ -128,7 +128,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
......@@ -137,7 +137,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("store flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(storeByType(v.Type))
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
......@@ -175,7 +175,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.Reg = r1
......@@ -192,7 +192,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64SRAconst,
ssa.OpARM64RORconst,
ssa.OpARM64RORWconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
......@@ -204,30 +204,30 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64ORshiftLL,
ssa.OpARM64XORshiftLL,
ssa.OpARM64BICshiftLL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
case ssa.OpARM64ADDshiftRL,
ssa.OpARM64SUBshiftRL,
ssa.OpARM64ANDshiftRL,
ssa.OpARM64ORshiftRL,
ssa.OpARM64XORshiftRL,
ssa.OpARM64BICshiftRL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
case ssa.OpARM64ADDshiftRA,
ssa.OpARM64SUBshiftRA,
ssa.OpARM64ANDshiftRA,
ssa.OpARM64ORshiftRA,
ssa.OpARM64XORshiftRA,
ssa.OpARM64BICshiftRA:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
case ssa.OpARM64MOVDconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64FMOVSconst,
ssa.OpARM64FMOVDconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
......@@ -238,7 +238,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64CMNW,
ssa.OpARM64FCMPS,
ssa.OpARM64FCMPD:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
......@@ -246,18 +246,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64CMPWconst,
ssa.OpARM64CMNconst,
ssa.OpARM64CMNWconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
case ssa.OpARM64CMPshiftLL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
case ssa.OpARM64CMPshiftRL:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
case ssa.OpARM64CMPshiftRA:
genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
case ssa.OpARM64MOVDaddr:
p := gc.Prog(arm64.AMOVD)
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
......@@ -295,7 +295,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64MOVDload,
ssa.OpARM64FMOVSload,
ssa.OpARM64FMOVDload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -303,7 +303,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARM64LDAR,
ssa.OpARM64LDARW:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -317,7 +317,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64FMOVDstore,
ssa.OpARM64STLR,
ssa.OpARM64STLRW:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -327,7 +327,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64MOVHstorezero,
ssa.OpARM64MOVWstorezero,
ssa.OpARM64MOVDstorezero:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
......@@ -347,18 +347,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
p := gc.Prog(ld)
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
p1 := gc.Prog(st)
p1 := s.Prog(st)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = r0
p1.RegTo2 = arm64.REGTMP
p2 := gc.Prog(arm64.ACBNZ)
p2 := s.Prog(arm64.ACBNZ)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_BRANCH
......@@ -378,23 +378,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
p := gc.Prog(ld)
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
p1 := gc.Prog(arm64.AADD)
p1 := s.Prog(arm64.AADD)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = out
p2 := gc.Prog(st)
p2 := s.Prog(st)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = out
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0
p2.RegTo2 = arm64.REGTMP
p3 := gc.Prog(arm64.ACBNZ)
p3 := s.Prog(arm64.ACBNZ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
......@@ -419,29 +419,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r1 := v.Args[1].Reg()
r2 := v.Args[2].Reg()
out := v.Reg0()
p := gc.Prog(ld)
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
p1 := gc.Prog(cmp)
p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.Reg = arm64.REGTMP
p2 := gc.Prog(arm64.ABNE)
p2 := s.Prog(arm64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
p3 := gc.Prog(st)
p3 := s.Prog(st)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = r2
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = r0
p3.RegTo2 = arm64.REGTMP
p4 := gc.Prog(arm64.ACBNZ)
p4 := s.Prog(arm64.ACBNZ)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = arm64.REGTMP
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
p5 := gc.Prog(arm64.ACSET)
p5 := s.Prog(arm64.ACSET)
p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p5.From.Reg = arm64.COND_EQ
p5.To.Type = obj.TYPE_REG
......@@ -455,23 +455,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// CBNZ Rtmp, -3(PC)
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
p := gc.Prog(arm64.ALDAXRB)
p := s.Prog(arm64.ALDAXRB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
p1 := gc.Prog(v.Op.Asm())
p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = arm64.REGTMP
p2 := gc.Prog(arm64.ASTLXRB)
p2 := s.Prog(arm64.ASTLXRB)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0
p2.RegTo2 = arm64.REGTMP
p3 := gc.Prog(arm64.ACBNZ)
p3 := s.Prog(arm64.ACBNZ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
......@@ -499,7 +499,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() == v.Args[0].Reg() {
return
}
p := gc.Prog(arm64.AMOVD)
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -539,7 +539,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64RBITW,
ssa.OpARM64CLZ,
ssa.OpARM64CLZW:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -550,7 +550,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Op == ssa.OpARM64CSELULT {
r1 = v.Args[1].Reg()
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = arm64.COND_LO
p.Reg = v.Args[0].Reg()
......@@ -559,13 +559,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARM64DUFFZERO:
// runtime.duffzero expects start address - 8 in R16
p := gc.Prog(arm64.ASUB)
p := s.Prog(arm64.ASUB)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REG_R16
p = gc.Prog(obj.ADUFFZERO)
p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
......@@ -575,22 +575,22 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// CMP Rarg1, R16
// BLE -2(PC)
// arg1 is the address of the last element to zero
p := gc.Prog(arm64.AMOVD)
p := s.Prog(arm64.AMOVD)
p.Scond = arm64.C_XPOST
p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REG_R16
p.To.Offset = 8
p2 := gc.Prog(arm64.ACMP)
p2 := s.Prog(arm64.ACMP)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
p2.Reg = arm64.REG_R16
p3 := gc.Prog(arm64.ABLE)
p3 := s.Prog(arm64.ABLE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
case ssa.OpARM64DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
......@@ -601,32 +601,32 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// CMP Rarg2, R16
// BLE -3(PC)
// arg2 is the address of the last element of src
p := gc.Prog(arm64.AMOVD)
p := s.Prog(arm64.AMOVD)
p.Scond = arm64.C_XPOST
p.From.Type = obj.TYPE_MEM
p.From.Reg = arm64.REG_R16
p.From.Offset = 8
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
p2 := gc.Prog(arm64.AMOVD)
p2 := s.Prog(arm64.AMOVD)
p2.Scond = arm64.C_XPOST
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = arm64.REG_R17
p2.To.Offset = 8
p3 := gc.Prog(arm64.ACMP)
p3 := s.Prog(arm64.ACMP)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg()
p3.Reg = arm64.REG_R16
p4 := gc.Prog(arm64.ABLE)
p4 := s.Prog(arm64.ABLE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
s.Call(v)
case ssa.OpARM64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := gc.Prog(arm64.AMOVB)
p := s.Prog(arm64.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -646,7 +646,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64GreaterThanU,
ssa.OpARM64GreaterEqualU:
// generate boolean values using CSET
p := gc.Prog(arm64.ACSET)
p := s.Prog(arm64.ACSET)
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = condBits[v.Op]
p.To.Type = obj.TYPE_REG
......@@ -703,7 +703,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
......@@ -712,27 +712,27 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R0:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(arm64.ACMP)
p := s.Prog(arm64.ACMP)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.Reg = arm64.REG_R0
p = gc.Prog(arm64.ABNE)
p = s.Prog(arm64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(obj.ARET)
p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
......@@ -748,18 +748,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP)
q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
......
......@@ -4253,6 +4253,11 @@ type SSAGenState struct {
stackMapIndex map[*ssa.Value]int
}
// Prog appends a new Prog.
func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
return Prog(as)
}
// Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog {
return pc
......@@ -4411,11 +4416,11 @@ type FloatingEQNEJump struct {
Index int
}
func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch {
p := Prog(jumps.Jump)
func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction) {
p := s.Prog(jumps.Jump)
p.To.Type = obj.TYPE_BRANCH
to := jumps.Index
branches = append(branches, Branch{p, b.Succs[to].Block()})
s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()})
if to == 1 {
likely = -likely
}
......@@ -4431,22 +4436,21 @@ func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPredictio
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
}
return branches
}
func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
likely := b.Likely
switch next {
case b.Succs[0].Block():
s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches)
s.oneFPJump(b, &jumps[0][0], likely)
s.oneFPJump(b, &jumps[0][1], likely)
case b.Succs[1].Block():
s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
s.oneFPJump(b, &jumps[1][0], likely)
s.oneFPJump(b, &jumps[1][1], likely)
default:
s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
q := Prog(obj.AJMP)
s.oneFPJump(b, &jumps[1][0], likely)
s.oneFPJump(b, &jumps[1][1], likely)
q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
}
......@@ -4621,7 +4625,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
if !ok {
Fatalf("missing stack map index for %v", v.LongString())
}
p := Prog(obj.APCDATA)
p := s.Prog(obj.APCDATA)
Addrconst(&p.From, obj.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx))
......@@ -4637,7 +4641,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
thearch.Ginsnop()
}
p = Prog(obj.ACALL)
p = s.Prog(obj.ACALL)
if sym, ok := v.Aux.(*obj.LSym); ok {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
......
......@@ -93,7 +93,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
}
p := gc.Prog(as)
p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
......@@ -101,7 +101,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
// cannot move between special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVW)
p = s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
......@@ -118,14 +118,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
r := v.Reg()
p := gc.Prog(loadByType(v.Type, r))
p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
// cannot directly load, load to TMP and move
p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVW)
p = s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
......@@ -139,14 +139,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Args[0].Reg()
if isHILO(r) {
// cannot directly store, move to TMP and store
p := gc.Prog(mips.AMOVW)
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
r = mips.REGTMP
}
p := gc.Prog(storeByType(v.Type, r))
p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
gc.AddrAuto(&p.To, v)
......@@ -168,7 +168,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSDIVF,
ssa.OpMIPSDIVD,
ssa.OpMIPSMUL:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
......@@ -176,7 +176,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpMIPSSGT,
ssa.OpMIPSSGTU:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
......@@ -184,7 +184,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpMIPSSGTzero,
ssa.OpMIPSSGTUzero:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = mips.REGZERO
......@@ -201,7 +201,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSSRAconst,
ssa.OpMIPSSGTconst,
ssa.OpMIPSSGTUconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
......@@ -212,13 +212,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSDIV,
ssa.OpMIPSDIVU:
// result in hi,lo
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
case ssa.OpMIPSMOVWconst:
r := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
......@@ -226,7 +226,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isFPreg(r) || isHILO(r) {
// cannot move into FP or special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVW)
p = s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
......@@ -234,7 +234,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
case ssa.OpMIPSMOVFconst,
ssa.OpMIPSMOVDconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
......@@ -243,7 +243,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.Reg = v.Args[1].Reg()
......@@ -253,7 +253,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = mips.REGZERO
......@@ -265,12 +265,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSCMPGED,
ssa.OpMIPSCMPGTF,
ssa.OpMIPSCMPGTD:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
case ssa.OpMIPSMOVWaddr:
p := gc.Prog(mips.AMOVW)
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_ADDR
var wantreg string
// MOVW $sym+off(base), R
......@@ -305,7 +305,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSMOVWload,
ssa.OpMIPSMOVFload,
ssa.OpMIPSMOVDload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -316,7 +316,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSMOVWstore,
ssa.OpMIPSMOVFstore,
ssa.OpMIPSMOVDstore:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -325,7 +325,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpMIPSMOVBstorezero,
ssa.OpMIPSMOVHstorezero,
ssa.OpMIPSMOVWstorezero:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
......@@ -350,7 +350,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() == v.Args[0].Reg() {
return
}
p := gc.Prog(mips.AMOVW)
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -370,14 +370,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPSNEGD,
ssa.OpMIPSSQRTD,
ssa.OpMIPSCLZ:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPSNEG:
// SUB from REGZERO
p := gc.Prog(mips.ASUBU)
p := s.Prog(mips.ASUBU)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = mips.REGZERO
......@@ -402,23 +402,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1
mov = mips.AMOVB
}
p := gc.Prog(mips.ASUBU)
p := s.Prog(mips.ASUBU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
p2 := gc.Prog(mov)
p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGZERO
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = mips.REG_R1
p2.To.Offset = sz
p3 := gc.Prog(mips.AADDU)
p3 := s.Prog(mips.AADDU)
p3.From.Type = obj.TYPE_CONST
p3.From.Offset = sz
p3.To.Type = obj.TYPE_REG
p3.To.Reg = mips.REG_R1
p4 := gc.Prog(mips.ABNE)
p4 := s.Prog(mips.ABNE)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
......@@ -445,33 +445,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1
mov = mips.AMOVB
}
p := gc.Prog(mips.ASUBU)
p := s.Prog(mips.ASUBU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
p2 := gc.Prog(mov)
p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_MEM
p2.From.Reg = mips.REG_R1
p2.From.Offset = sz
p2.To.Type = obj.TYPE_REG
p2.To.Reg = mips.REGTMP
p3 := gc.Prog(mov)
p3 := s.Prog(mov)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = mips.REG_R2
p4 := gc.Prog(mips.AADDU)
p4 := s.Prog(mips.AADDU)
p4.From.Type = obj.TYPE_CONST
p4.From.Offset = sz
p4.To.Type = obj.TYPE_REG
p4.To.Reg = mips.REG_R1
p5 := gc.Prog(mips.AADDU)
p5 := s.Prog(mips.AADDU)
p5.From.Type = obj.TYPE_CONST
p5.From.Offset = sz
p5.To.Type = obj.TYPE_REG
p5.To.Reg = mips.REG_R2
p6 := gc.Prog(mips.ABNE)
p6 := s.Prog(mips.ABNE)
p6.From.Type = obj.TYPE_REG
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
......@@ -480,35 +480,35 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
s.Call(v)
case ssa.OpMIPSLoweredAtomicLoad:
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p := gc.Prog(mips.AMOVW)
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicStore:
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p := gc.Prog(mips.AMOVW)
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicStorezero:
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p := gc.Prog(mips.AMOVW)
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicExchange:
// SYNC
// MOVW Rarg1, Rtmp
......@@ -516,33 +516,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC)
// SYNC
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p := gc.Prog(mips.AMOVW)
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
p1 := gc.Prog(mips.ALL)
p1 := s.Prog(mips.ALL)
p1.From.Type = obj.TYPE_MEM
p1.From.Reg = v.Args[0].Reg()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = v.Reg0()
p2 := gc.Prog(mips.ASC)
p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
p3 := gc.Prog(mips.ABEQ)
p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicAdd:
// SYNC
// LL (Rarg0), Rout
......@@ -551,36 +551,36 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BEQ Rtmp, -3(PC)
// SYNC
// ADDU Rarg1, Rout
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p := gc.Prog(mips.ALL)
p := s.Prog(mips.ALL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
p1 := gc.Prog(mips.AADDU)
p1 := s.Prog(mips.AADDU)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = v.Args[1].Reg()
p1.Reg = v.Reg0()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
p2 := gc.Prog(mips.ASC)
p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
p3 := gc.Prog(mips.ABEQ)
p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p4 := gc.Prog(mips.AADDU)
p4 := s.Prog(mips.AADDU)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg()
p4.Reg = v.Reg0()
......@@ -595,36 +595,36 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BEQ Rtmp, -3(PC)
// SYNC
// ADDU $auxInt, Rout
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p := gc.Prog(mips.ALL)
p := s.Prog(mips.ALL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
p1 := gc.Prog(mips.AADDU)
p1 := s.Prog(mips.AADDU)
p1.From.Type = obj.TYPE_CONST
p1.From.Offset = v.AuxInt
p1.Reg = v.Reg0()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
p2 := gc.Prog(mips.ASC)
p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
p3 := gc.Prog(mips.ABEQ)
p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p4 := gc.Prog(mips.AADDU)
p4 := s.Prog(mips.AADDU)
p4.From.Type = obj.TYPE_CONST
p4.From.Offset = v.AuxInt
p4.Reg = v.Reg0()
......@@ -639,34 +639,34 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC)
// SYNC
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p := gc.Prog(mips.ALL)
p := s.Prog(mips.ALL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
p1 := gc.Prog(v.Op.Asm())
p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG
p1.From.Reg = v.Args[1].Reg()
p1.Reg = mips.REGTMP
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
p2 := gc.Prog(mips.ASC)
p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
p3 := gc.Prog(mips.ABEQ)
p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicCas:
// MOVW $0, Rout
......@@ -677,52 +677,52 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// SC Rout, (Rarg0)
// BEQ Rout, -4(PC)
// SYNC
p := gc.Prog(mips.AMOVW)
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p1 := gc.Prog(mips.ALL)
p1 := s.Prog(mips.ALL)
p1.From.Type = obj.TYPE_MEM
p1.From.Reg = v.Args[0].Reg()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
p2 := gc.Prog(mips.ABNE)
p2 := s.Prog(mips.ABNE)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
p2.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_BRANCH
p3 := gc.Prog(mips.AMOVW)
p3 := s.Prog(mips.AMOVW)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg()
p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg0()
p4 := gc.Prog(mips.ASC)
p4 := s.Prog(mips.ASC)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Reg0()
p4.To.Type = obj.TYPE_MEM
p4.To.Reg = v.Args[0].Reg()
p5 := gc.Prog(mips.ABEQ)
p5 := s.Prog(mips.ABEQ)
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH
gc.Patch(p5, p1)
gc.Prog(mips.ASYNC)
s.Prog(mips.ASYNC)
p6 := gc.Prog(obj.ANOP)
p6 := s.Prog(obj.ANOP)
gc.Patch(p2, p6)
case ssa.OpMIPSLoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := gc.Prog(mips.AMOVB)
p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -740,12 +740,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Op == ssa.OpMIPSFPFlagFalse {
cmov = mips.ACMOVT
}
p := gc.Prog(mips.AMOVW)
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p1 := gc.Prog(cmov)
p1 := s.Prog(cmov)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = mips.REGZERO
p1.To.Type = obj.TYPE_REG
......@@ -776,7 +776,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
......@@ -784,23 +784,23 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R1:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(mips.ABNE)
p := s.Prog(mips.ABNE)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(obj.ARET)
p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
......@@ -812,18 +812,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP)
q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
......
......@@ -96,7 +96,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isFPreg(x) && isFPreg(y) {
as = mips.AMOVD
}
p := gc.Prog(as)
p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
......@@ -104,7 +104,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
// cannot move between special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVV)
p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
......@@ -121,14 +121,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
r := v.Reg()
p := gc.Prog(loadByType(v.Type, r))
p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
// cannot directly load, load to TMP and move
p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVV)
p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
......@@ -142,14 +142,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Args[0].Reg()
if isHILO(r) {
// cannot directly store, move to TMP and store
p := gc.Prog(mips.AMOVV)
p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
r = mips.REGTMP
}
p := gc.Prog(storeByType(v.Type, r))
p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
gc.AddrAuto(&p.To, v)
......@@ -170,7 +170,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MULD,
ssa.OpMIPS64DIVF,
ssa.OpMIPS64DIVD:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
......@@ -178,7 +178,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpMIPS64SGT,
ssa.OpMIPS64SGTU:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
......@@ -195,7 +195,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64SRAVconst,
ssa.OpMIPS64SGTconst,
ssa.OpMIPS64SGTUconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
......@@ -206,13 +206,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64DIVV,
ssa.OpMIPS64DIVVU:
// result in hi,lo
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
case ssa.OpMIPS64MOVVconst:
r := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
......@@ -220,7 +220,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isFPreg(r) || isHILO(r) {
// cannot move into FP or special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP
p = gc.Prog(mips.AMOVV)
p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
......@@ -228,7 +228,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
case ssa.OpMIPS64MOVFconst,
ssa.OpMIPS64MOVDconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
......@@ -239,12 +239,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64CMPGED,
ssa.OpMIPS64CMPGTF,
ssa.OpMIPS64CMPGTD:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
case ssa.OpMIPS64MOVVaddr:
p := gc.Prog(mips.AMOVV)
p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_ADDR
var wantreg string
// MOVV $sym+off(base), R
......@@ -281,7 +281,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVVload,
ssa.OpMIPS64MOVFload,
ssa.OpMIPS64MOVDload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -293,7 +293,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVVstore,
ssa.OpMIPS64MOVFstore,
ssa.OpMIPS64MOVDstore:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -303,7 +303,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVHstorezero,
ssa.OpMIPS64MOVWstorezero,
ssa.OpMIPS64MOVVstorezero:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
......@@ -332,7 +332,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() == v.Args[0].Reg() {
return
}
p := gc.Prog(mips.AMOVV)
p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -354,14 +354,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVDF,
ssa.OpMIPS64NEGF,
ssa.OpMIPS64NEGD:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPS64NEGV:
// SUB from REGZERO
p := gc.Prog(mips.ASUBVU)
p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = mips.REGZERO
......@@ -369,13 +369,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpMIPS64DUFFZERO:
// runtime.duffzero expects start address - 8 in R1
p := gc.Prog(mips.ASUBVU)
p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
p = gc.Prog(obj.ADUFFZERO)
p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
......@@ -402,23 +402,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1
mov = mips.AMOVB
}
p := gc.Prog(mips.ASUBVU)
p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
p2 := gc.Prog(mov)
p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGZERO
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = mips.REG_R1
p2.To.Offset = sz
p3 := gc.Prog(mips.AADDVU)
p3 := s.Prog(mips.AADDVU)
p3.From.Type = obj.TYPE_CONST
p3.From.Offset = sz
p3.To.Type = obj.TYPE_REG
p3.To.Reg = mips.REG_R1
p4 := gc.Prog(mips.ABNE)
p4 := s.Prog(mips.ABNE)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
......@@ -448,33 +448,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1
mov = mips.AMOVB
}
p := gc.Prog(mips.ASUBVU)
p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
p2 := gc.Prog(mov)
p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_MEM
p2.From.Reg = mips.REG_R1
p2.From.Offset = sz
p2.To.Type = obj.TYPE_REG
p2.To.Reg = mips.REGTMP
p3 := gc.Prog(mov)
p3 := s.Prog(mov)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = mips.REG_R2
p4 := gc.Prog(mips.AADDVU)
p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_CONST
p4.From.Offset = sz
p4.To.Type = obj.TYPE_REG
p4.To.Reg = mips.REG_R1
p5 := gc.Prog(mips.AADDVU)
p5 := s.Prog(mips.AADDVU)
p5.From.Type = obj.TYPE_CONST
p5.From.Offset = sz
p5.To.Type = obj.TYPE_REG
p5.To.Reg = mips.REG_R2
p6 := gc.Prog(mips.ABNE)
p6 := s.Prog(mips.ABNE)
p6.From.Type = obj.TYPE_REG
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
......@@ -484,7 +484,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.Call(v)
case ssa.OpMIPS64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := gc.Prog(mips.AMOVB)
p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -502,19 +502,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Op == ssa.OpMIPS64FPFlagFalse {
branch = mips.ABFPT
}
p := gc.Prog(mips.AMOVV)
p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p2 := gc.Prog(branch)
p2 := s.Prog(branch)
p2.To.Type = obj.TYPE_BRANCH
p3 := gc.Prog(mips.AMOVV)
p3 := s.Prog(mips.AMOVV)
p3.From.Type = obj.TYPE_CONST
p3.From.Offset = 1
p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg()
p4 := gc.Prog(obj.ANOP) // not a machine instruction, for branch to land
p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
gc.Patch(p2, p4)
case ssa.OpMIPS64LoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
......@@ -541,7 +541,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
......@@ -549,23 +549,23 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R1:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(mips.ABNE)
p := s.Prog(mips.ABNE)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(obj.ARET)
p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
......@@ -577,18 +577,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP)
q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
......
......@@ -131,9 +131,9 @@ func storeByType(t ssa.Type) obj.As {
panic("bad store type")
}
func ssaGenISEL(v *ssa.Value, cr int64, r1, r2 int16) {
func ssaGenISEL(s *gc.SSAGenState, v *ssa.Value, cr int64, r1, r2 int16) {
r := v.Reg()
p := gc.Prog(ppc64.AISEL)
p := s.Prog(ppc64.AISEL)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.Reg = r1
......@@ -158,7 +158,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if t.IsFloat() {
op = ppc64.AFMOVD
}
p := gc.Prog(op)
p := s.Prog(op)
p.From.Type = rt
p.From.Reg = x
p.To.Type = rt
......@@ -170,7 +170,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
x := v.Args[0].Reg()
y := v.Reg()
p := gc.Prog(ppc64.AMFVSRD)
p := s.Prog(ppc64.AMFVSRD)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
......@@ -181,7 +181,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
x := v.Args[0].Reg()
y := v.Reg()
p := gc.Prog(ppc64.AMTVSRD)
p := s.Prog(ppc64.AMTVSRD)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
......@@ -198,28 +198,28 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// ISYNC
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
psync := gc.Prog(ppc64.ASYNC)
psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
p := gc.Prog(ppc64.ALBAR)
p := s.Prog(ppc64.ALBAR)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
p1 := gc.Prog(v.Op.Asm())
p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = ppc64.REGTMP
p2 := gc.Prog(ppc64.ASTBCCC)
p2 := s.Prog(ppc64.ASTBCCC)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = ppc64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0
p2.RegTo2 = ppc64.REGTMP
p3 := gc.Prog(ppc64.ABNE)
p3 := s.Prog(ppc64.ABNE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
pisync := gc.Prog(ppc64.AISYNC)
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
case ssa.OpPPC64LoweredAtomicAdd32,
......@@ -241,37 +241,37 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r1 := v.Args[1].Reg()
out := v.Reg0()
// SYNC
psync := gc.Prog(ppc64.ASYNC)
psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
// LDAR or LWAR
p := gc.Prog(ld)
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
// ADD reg1,out
p1 := gc.Prog(ppc64.AADD)
p1 := s.Prog(ppc64.AADD)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Reg = out
p1.To.Type = obj.TYPE_REG
// STDCCC or STWCCC
p3 := gc.Prog(st)
p3 := s.Prog(st)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = out
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = r0
// BNE retry
p4 := gc.Prog(ppc64.ABNE)
p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
// ISYNC
pisync := gc.Prog(ppc64.AISYNC)
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
// Ensure a 32 bit result
if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
p5 := gc.Prog(ppc64.AMOVWZ)
p5 := s.Prog(ppc64.AMOVWZ)
p5.To.Type = obj.TYPE_REG
p5.To.Reg = out
p5.From.Type = obj.TYPE_REG
......@@ -295,26 +295,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r1 := v.Args[1].Reg()
out := v.Reg0()
// SYNC
psync := gc.Prog(ppc64.ASYNC)
psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
// LDAR or LWAR
p := gc.Prog(ld)
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
// STDCCC or STWCCC
p1 := gc.Prog(st)
p1 := s.Prog(st)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = r0
// BNE retry
p2 := gc.Prog(ppc64.ABNE)
p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
gc.Patch(p2, p)
// ISYNC
pisync := gc.Prog(ppc64.AISYNC)
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
case ssa.OpPPC64LoweredAtomicLoad32,
......@@ -334,25 +334,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
arg0 := v.Args[0].Reg()
out := v.Reg0()
// SYNC
psync := gc.Prog(ppc64.ASYNC)
psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
// Load
p := gc.Prog(ld)
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = arg0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
// CMP
p1 := gc.Prog(cmp)
p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = out
p1.To.Type = obj.TYPE_REG
p1.To.Reg = out
// BNE
p2 := gc.Prog(ppc64.ABNE)
p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
// ISYNC
pisync := gc.Prog(ppc64.AISYNC)
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
gc.Patch(p2, pisync)
......@@ -367,10 +367,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
arg0 := v.Args[0].Reg()
arg1 := v.Args[1].Reg()
// SYNC
psync := gc.Prog(ppc64.ASYNC)
psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
// Store
p := gc.Prog(st)
p := s.Prog(st)
p.To.Type = obj.TYPE_MEM
p.To.Reg = arg0
p.From.Type = obj.TYPE_REG
......@@ -404,54 +404,54 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r2 := v.Args[2].Reg()
out := v.Reg0()
// SYNC
psync := gc.Prog(ppc64.ASYNC)
psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
// LDAR or LWAR
p := gc.Prog(ld)
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
// CMP reg1,reg2
p1 := gc.Prog(cmp)
p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Reg = ppc64.REGTMP
p1.To.Type = obj.TYPE_REG
// BNE cas_fail
p2 := gc.Prog(ppc64.ABNE)
p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
// STDCCC or STWCCC
p3 := gc.Prog(st)
p3 := s.Prog(st)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = r2
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = r0
// BNE retry
p4 := gc.Prog(ppc64.ABNE)
p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
// ISYNC
pisync := gc.Prog(ppc64.AISYNC)
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
// return true
p5 := gc.Prog(ppc64.AMOVD)
p5 := s.Prog(ppc64.AMOVD)
p5.From.Type = obj.TYPE_CONST
p5.From.Offset = 1
p5.To.Type = obj.TYPE_REG
p5.To.Reg = out
// BR done
p6 := gc.Prog(obj.AJMP)
p6 := s.Prog(obj.AJMP)
p6.To.Type = obj.TYPE_BRANCH
// return false
p7 := gc.Prog(ppc64.AMOVD)
p7 := s.Prog(ppc64.AMOVD)
p7.From.Type = obj.TYPE_CONST
p7.From.Offset = 0
p7.To.Type = obj.TYPE_REG
p7.To.Reg = out
gc.Patch(p2, p7)
// done (label)
p8 := gc.Prog(obj.ANOP)
p8 := s.Prog(obj.ANOP)
gc.Patch(p6, p8)
case ssa.OpPPC64LoweredGetClosurePtr:
......@@ -463,14 +463,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpLoadReg:
loadOp := loadByType(v.Type)
p := gc.Prog(loadOp)
p := s.Prog(loadOp)
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
storeOp := storeByType(v.Type)
p := gc.Prog(storeOp)
p := s.Prog(storeOp)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
......@@ -488,33 +488,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
p := gc.Prog(ppc64.ACMP)
p := s.Prog(ppc64.ACMP)
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_CONST
p.To.Offset = -1
pbahead := gc.Prog(ppc64.ABEQ)
pbahead := s.Prog(ppc64.ABEQ)
pbahead.To.Type = obj.TYPE_BRANCH
p = gc.Prog(v.Op.Asm())
p = s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = r
pbover := gc.Prog(obj.AJMP)
pbover := s.Prog(obj.AJMP)
pbover.To.Type = obj.TYPE_BRANCH
p = gc.Prog(ppc64.ANEG)
p = s.Prog(ppc64.ANEG)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.From.Type = obj.TYPE_REG
p.From.Reg = r0
gc.Patch(pbahead, p)
p = gc.Prog(obj.ANOP)
p = s.Prog(obj.ANOP)
gc.Patch(pbover, p)
case ssa.OpPPC64DIVW:
......@@ -523,33 +523,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
p := gc.Prog(ppc64.ACMPW)
p := s.Prog(ppc64.ACMPW)
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_CONST
p.To.Offset = -1
pbahead := gc.Prog(ppc64.ABEQ)
pbahead := s.Prog(ppc64.ABEQ)
pbahead.To.Type = obj.TYPE_BRANCH
p = gc.Prog(v.Op.Asm())
p = s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = r
pbover := gc.Prog(obj.AJMP)
pbover := s.Prog(obj.AJMP)
pbover.To.Type = obj.TYPE_BRANCH
p = gc.Prog(ppc64.ANEG)
p = s.Prog(ppc64.ANEG)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.From.Type = obj.TYPE_REG
p.From.Reg = r0
gc.Patch(pbahead, p)
p = gc.Prog(obj.ANOP)
p = s.Prog(obj.ANOP)
gc.Patch(pbover, p)
case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
......@@ -561,7 +561,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.Reg = r1
......@@ -574,7 +574,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r2 := v.Args[1].Reg()
r3 := v.Args[2].Reg()
// r = r1*r2 ± r3
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r3
......@@ -586,7 +586,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64MaskIfNotCarry:
r := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_REG
......@@ -594,7 +594,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64ADDconstForCarry:
r1 := v.Args[0].Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.Reg = r1
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
......@@ -603,7 +603,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FRSP:
r := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.From.Type = obj.TYPE_REG
......@@ -611,7 +611,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg()
if v.Aux != nil {
......@@ -626,7 +626,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpPPC64ANDCCconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg()
if v.Aux != nil {
......@@ -641,7 +641,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = ppc64.REGTMP // discard result
case ssa.OpPPC64MOVDaddr:
p := gc.Prog(ppc64.AMOVD)
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
......@@ -673,28 +673,28 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
case ssa.OpPPC64MOVDconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
......@@ -702,14 +702,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg:
// Shift in register to required size
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Reg = v.Reg()
p.To.Type = obj.TYPE_REG
case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -717,7 +717,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -725,7 +725,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_MEM
......@@ -733,14 +733,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&p.To, v)
case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -766,69 +766,69 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// isel rt,0,rtmp,!cond // rt is target in ppc asm
if v.Block.Func.Config.OldArch {
p := gc.Prog(ppc64.AMOVD)
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
pb := gc.Prog(condOps[v.Op])
pb := s.Prog(condOps[v.Op])
pb.To.Type = obj.TYPE_BRANCH
p = gc.Prog(ppc64.AMOVD)
p = s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p = gc.Prog(obj.ANOP)
p = s.Prog(obj.ANOP)
gc.Patch(pb, p)
break
}
// Modern PPC uses ISEL
p := gc.Prog(ppc64.AMOVD)
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = iselRegs[1]
iop := iselOps[v.Op]
ssaGenISEL(v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
ssaGenISEL(s, v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
case ssa.OpPPC64FLessEqual, // These include a second branch for EQ -- dealing with NaN prevents REL= to !REL conversion
ssa.OpPPC64FGreaterEqual:
if v.Block.Func.Config.OldArch {
p := gc.Prog(ppc64.AMOVW)
p := s.Prog(ppc64.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
pb0 := gc.Prog(condOps[v.Op])
pb0 := s.Prog(condOps[v.Op])
pb0.To.Type = obj.TYPE_BRANCH
pb1 := gc.Prog(ppc64.ABEQ)
pb1 := s.Prog(ppc64.ABEQ)
pb1.To.Type = obj.TYPE_BRANCH
p = gc.Prog(ppc64.AMOVW)
p = s.Prog(ppc64.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p = gc.Prog(obj.ANOP)
p = s.Prog(obj.ANOP)
gc.Patch(pb0, p)
gc.Patch(pb1, p)
break
}
// Modern PPC uses ISEL
p := gc.Prog(ppc64.AMOVD)
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = iselRegs[1]
iop := iselOps[v.Op]
ssaGenISEL(v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
ssaGenISEL(v, ppc64.C_COND_EQ, iselRegs[1], v.Reg())
ssaGenISEL(s, v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
ssaGenISEL(s, v, ppc64.C_COND_EQ, iselRegs[1], v.Reg())
case ssa.OpPPC64LoweredZero:
......@@ -879,13 +879,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// than 1 iteration.
if ctr > 1 {
// Set up CTR loop counter
p := gc.Prog(ppc64.AMOVD)
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = ctr
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
p = gc.Prog(ppc64.AMOVD)
p = s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGTMP
p.To.Type = obj.TYPE_REG
......@@ -896,7 +896,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
var top *obj.Prog
for offset := int64(0); offset < 32; offset += 8 {
// This is the top of loop
p := gc.Prog(ppc64.AMOVD)
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_MEM
......@@ -910,7 +910,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Increment address for the
// 4 doublewords just zeroed.
p = gc.Prog(ppc64.AADD)
p = s.Prog(ppc64.AADD)
p.Reg = v.Args[0].Reg()
p.From.Type = obj.TYPE_CONST
p.From.Offset = 32
......@@ -920,7 +920,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Branch back to top of loop
// based on CTR
// BC with BO_BCTR generates bdnz
p = gc.Prog(ppc64.ABC)
p = s.Prog(ppc64.ABC)
p.From.Type = obj.TYPE_CONST
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
......@@ -951,7 +951,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case rem >= 2:
op, size = ppc64.AMOVH, 2
}
p := gc.Prog(op)
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_MEM
......@@ -994,41 +994,41 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
movu = ppc64.AMOVBU
}
p := gc.Prog(ppc64.AADD)
p := s.Prog(ppc64.AADD)
p.Reg = v.Args[0].Reg()
p.From.Type = obj.TYPE_CONST
p.From.Offset = -sz
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
p = gc.Prog(ppc64.AADD)
p = s.Prog(ppc64.AADD)
p.Reg = v.Args[1].Reg()
p.From.Type = obj.TYPE_CONST
p.From.Offset = -sz
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
p = gc.Prog(movu)
p = s.Prog(movu)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
p2 := gc.Prog(movu)
p2 := s.Prog(movu)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = ppc64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
p2.To.Offset = sz
p3 := gc.Prog(ppc64.ACMPU)
p3 := s.Prog(ppc64.ACMPU)
p3.From.Reg = v.Args[1].Reg()
p3.From.Type = obj.TYPE_REG
p3.To.Reg = v.Args[2].Reg()
p3.To.Type = obj.TYPE_REG
p4 := gc.Prog(ppc64.ABLT)
p4 := s.Prog(ppc64.ABLT)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
......@@ -1036,7 +1036,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.Call(v)
case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter:
p := gc.Prog(ppc64.AMOVD)
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -1049,7 +1049,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// change the register allocation to put the value in
// R12 already, but I don't know how to do that.
// TODO: We have the technology now to implement TODO above.
q := gc.Prog(ppc64.AMOVD)
q := s.Prog(ppc64.AMOVD)
q.From = p.From
q.To.Type = obj.TYPE_REG
q.To.Reg = ppc64.REG_R12
......@@ -1063,7 +1063,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// called via pointer might have been implemented in
// a separate module and so overwritten the TOC
// pointer in R2; reload it.
q := gc.Prog(ppc64.AMOVD)
q := s.Prog(ppc64.AMOVD)
q.From.Type = obj.TYPE_MEM
q.From.Offset = 24
q.From.Reg = ppc64.REGSP
......@@ -1073,7 +1073,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := gc.Prog(ppc64.AMOVBZ)
p := s.Prog(ppc64.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -1118,33 +1118,33 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R3:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(ppc64.ACMP)
p := s.Prog(ppc64.ACMP)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R3
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R0
p = gc.Prog(ppc64.ABNE)
p = s.Prog(ppc64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
......@@ -1159,35 +1159,35 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
p = s.Prog(jmp.invasm)
likely *= -1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if jmp.invasmun {
// TODO: The second branch is probably predict-not-taken since it is for FP unordered
q := gc.Prog(ppc64.ABVS)
q := s.Prog(ppc64.ABVS)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
if jmp.asmeq {
q := gc.Prog(ppc64.ABEQ)
q := s.Prog(ppc64.ABEQ)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
}
default:
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
if jmp.asmeq {
q := gc.Prog(ppc64.ABEQ)
q := s.Prog(ppc64.ABEQ)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
}
q := gc.Prog(obj.AJMP)
q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
......
......@@ -132,8 +132,8 @@ func moveByType(t ssa.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(op obj.As, dest, src int16) *obj.Prog {
p := gc.Prog(op)
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = dest
......@@ -145,8 +145,8 @@ func opregreg(op obj.As, dest, src int16) *obj.Prog {
// dest := src(From) op off
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregregimm(op obj.As, dest, src int16, off int64) *obj.Prog {
p := gc.Prog(op)
func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_CONST
p.From.Offset = off
p.Reg = src
......@@ -166,7 +166,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r2 == s390x.REG_R0 {
v.Fatalf("cannot use R0 as shift value %s", v.LongString())
}
p := opregreg(v.Op.Asm(), r, r2)
p := opregreg(s, v.Op.Asm(), r, r2)
if r != r1 {
p.Reg = r1
}
......@@ -178,7 +178,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
p := opregreg(v.Op.Asm(), r, r2)
p := opregreg(s, v.Op.Asm(), r, r2)
if r != r1 {
p.Reg = r1
}
......@@ -191,7 +191,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
opregreg(v.Op.Asm(), r, v.Args[1].Reg())
opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.OpS390XFMADD, ssa.OpS390XFMADDS,
ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS:
r := v.Reg()
......@@ -200,7 +200,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
r1 := v.Args[1].Reg()
r2 := v.Args[2].Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r2
......@@ -222,8 +222,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW {
var c *obj.Prog
c = gc.Prog(s390x.ACMP)
j = gc.Prog(s390x.ABEQ)
c = s.Prog(s390x.ACMP)
j = s.Prog(s390x.ABEQ)
c.From.Type = obj.TYPE_REG
c.From.Reg = divisor
......@@ -234,7 +234,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = divisor
p.Reg = 0
......@@ -243,18 +243,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// signed division, rest of the check for -1 case
if j != nil {
j2 := gc.Prog(s390x.ABR)
j2 := s.Prog(s390x.ABR)
j2.To.Type = obj.TYPE_BRANCH
var n *obj.Prog
if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW {
// n * -1 = -n
n = gc.Prog(s390x.ANEG)
n = s.Prog(s390x.ANEG)
n.To.Type = obj.TYPE_REG
n.To.Reg = dividend
} else {
// n % -1 == 0
n = gc.Prog(s390x.AXOR)
n = s.Prog(s390x.AXOR)
n.From.Type = obj.TYPE_REG
n.From.Reg = dividend
n.To.Type = obj.TYPE_REG
......@@ -265,7 +265,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
j2.To.Val = s.Pc()
}
case ssa.OpS390XADDconst, ssa.OpS390XADDWconst:
opregregimm(v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst,
ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst,
ssa.OpS390XANDconst, ssa.OpS390XANDWconst,
......@@ -275,7 +275,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
......@@ -284,7 +284,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpS390XSRDconst, ssa.OpS390XSRWconst,
ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst,
ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
r := v.Reg()
......@@ -296,7 +296,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = r
case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask:
r := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
......@@ -304,7 +304,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpS390XMOVDaddridx:
r := v.Args[0].Reg()
i := v.Args[1].Reg()
p := gc.Prog(s390x.AMOVD)
p := s.Prog(s390x.AMOVD)
p.From.Scale = 1
if i == s390x.REGSP {
r, i = i, r
......@@ -316,32 +316,32 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVDaddr:
p := gc.Prog(s390x.AMOVD)
p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.OpS390XFCMPS, ssa.OpS390XFCMP:
opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst, ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
case ssa.OpS390XMOVDconst:
x := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = x
case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst:
x := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
......@@ -356,7 +356,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v)
......@@ -367,7 +367,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload,
ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload,
ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -381,7 +381,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == s390x.REGSP {
r, i = i, r
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Scale = 1
......@@ -392,7 +392,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore,
ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -406,7 +406,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == s390x.REGSP {
r, i = i, r
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -415,7 +415,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = i
gc.AddAux(&p.To, v)
case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
......@@ -428,9 +428,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
ssa.OpS390XFNEG, ssa.OpS390XFNEGS:
opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg())
opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
case ssa.OpS390XCLEAR:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
......@@ -444,7 +444,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
x := v.Args[0].Reg()
y := v.Reg()
if x != y {
opregreg(moveByType(v.Type), y, x)
opregreg(s, moveByType(v.Type), y, x)
}
case ssa.OpS390XMOVDnop:
if v.Reg() != v.Args[0].Reg() {
......@@ -456,7 +456,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
......@@ -465,7 +465,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("store flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(storeByType(v.Type))
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
......@@ -476,7 +476,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// input is already rounded
case ssa.OpS390XLoweredGetG:
r := v.Reg()
p := gc.Prog(s390x.AMOVD)
p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = s390x.REGG
p.To.Type = obj.TYPE_REG
......@@ -485,7 +485,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.Call(v)
case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW,
ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -500,13 +500,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XFSQRT:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -519,7 +519,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
case ssa.OpS390XLoweredNilCheck:
// Issue a load which will fault if the input is nil.
p := gc.Prog(s390x.AMOVBZ)
p := s.Prog(s390x.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -530,7 +530,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
case ssa.OpS390XMVC:
vo := v.AuxValAndOff()
p := gc.Prog(s390x.AMVC)
p := s.Prog(s390x.AMVC)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
p.From.Offset = vo.Off()
......@@ -547,7 +547,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("invalid store multiple %s", v.LongString())
}
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[len(v.Args)-2].Reg()
......@@ -566,7 +566,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE mvc
// MVC $rem, 0(R2), 0(R1) // if rem > 0
// arg2 is the last address to move in the loop + 256
mvc := gc.Prog(s390x.AMVC)
mvc := s.Prog(s390x.AMVC)
mvc.From.Type = obj.TYPE_MEM
mvc.From.Reg = v.Args[1].Reg()
mvc.To.Type = obj.TYPE_MEM
......@@ -576,7 +576,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
mvc.From3.Offset = 256
for i := 0; i < 2; i++ {
movd := gc.Prog(s390x.AMOVD)
movd := s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_ADDR
movd.From.Reg = v.Args[i].Reg()
movd.From.Offset = 256
......@@ -584,18 +584,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
movd.To.Reg = v.Args[i].Reg()
}
cmpu := gc.Prog(s390x.ACMPU)
cmpu := s.Prog(s390x.ACMPU)
cmpu.From.Reg = v.Args[1].Reg()
cmpu.From.Type = obj.TYPE_REG
cmpu.To.Reg = v.Args[2].Reg()
cmpu.To.Type = obj.TYPE_REG
bne := gc.Prog(s390x.ABLT)
bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, mvc)
if v.AuxInt > 0 {
mvc := gc.Prog(s390x.AMVC)
mvc := s.Prog(s390x.AMVC)
mvc.From.Type = obj.TYPE_MEM
mvc.From.Reg = v.Args[1].Reg()
mvc.To.Type = obj.TYPE_MEM
......@@ -615,52 +615,52 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE clear
// CLEAR $rem, 0(R1) // if rem > 0
// arg1 is the last address to zero in the loop + 256
clear := gc.Prog(s390x.ACLEAR)
clear := s.Prog(s390x.ACLEAR)
clear.From.Type = obj.TYPE_CONST
clear.From.Offset = 256
clear.To.Type = obj.TYPE_MEM
clear.To.Reg = v.Args[0].Reg()
movd := gc.Prog(s390x.AMOVD)
movd := s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_ADDR
movd.From.Reg = v.Args[0].Reg()
movd.From.Offset = 256
movd.To.Type = obj.TYPE_REG
movd.To.Reg = v.Args[0].Reg()
cmpu := gc.Prog(s390x.ACMPU)
cmpu := s.Prog(s390x.ACMPU)
cmpu.From.Reg = v.Args[0].Reg()
cmpu.From.Type = obj.TYPE_REG
cmpu.To.Reg = v.Args[1].Reg()
cmpu.To.Type = obj.TYPE_REG
bne := gc.Prog(s390x.ABLT)
bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, clear)
if v.AuxInt > 0 {
clear := gc.Prog(s390x.ACLEAR)
clear := s.Prog(s390x.ACLEAR)
clear.From.Type = obj.TYPE_CONST
clear.From.Offset = v.AuxInt
clear.To.Type = obj.TYPE_MEM
clear.To.Reg = v.Args[0].Reg()
}
case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpS390XLAA, ssa.OpS390XLAAG:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.Reg = v.Reg0()
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
......@@ -676,7 +676,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// NOP (so the BNE has somewhere to land)
// CS{,G} arg1, arg2, arg0
cs := gc.Prog(v.Op.Asm())
cs := s.Prog(v.Op.Asm())
cs.From.Type = obj.TYPE_REG
cs.From.Reg = v.Args[1].Reg() // old
cs.Reg = v.Args[2].Reg() // new
......@@ -685,25 +685,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&cs.To, v)
// MOVD $0, ret
movd := gc.Prog(s390x.AMOVD)
movd := s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_CONST
movd.From.Offset = 0
movd.To.Type = obj.TYPE_REG
movd.To.Reg = v.Reg0()
// BNE 2(PC)
bne := gc.Prog(s390x.ABNE)
bne := s.Prog(s390x.ABNE)
bne.To.Type = obj.TYPE_BRANCH
// MOVD $1, ret
movd = gc.Prog(s390x.AMOVD)
movd = s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_CONST
movd.From.Offset = 1
movd.To.Type = obj.TYPE_REG
movd.To.Reg = v.Reg0()
// NOP (so the BNE has somewhere to land)
nop := gc.Prog(obj.ANOP)
nop := s.Prog(obj.ANOP)
gc.Patch(bne, nop)
case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
// Loop until the CS{,G} succeeds.
......@@ -712,7 +712,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE cs
// MOV{WZ,D} arg0, ret
load := gc.Prog(loadByType(v.Type.FieldType(0)))
load := s.Prog(loadByType(v.Type.FieldType(0)))
load.From.Type = obj.TYPE_MEM
load.From.Reg = v.Args[0].Reg()
load.To.Type = obj.TYPE_REG
......@@ -720,7 +720,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&load.From, v)
// CS{,G} ret, arg1, arg0
cs := gc.Prog(v.Op.Asm())
cs := s.Prog(v.Op.Asm())
cs.From.Type = obj.TYPE_REG
cs.From.Reg = v.Reg0() // old
cs.Reg = v.Args[1].Reg() // new
......@@ -729,7 +729,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&cs.To, v)
// BNE cs
bne := gc.Prog(s390x.ABNE)
bne := s.Prog(s390x.ABNE)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, cs)
default:
......@@ -754,7 +754,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := gc.Prog(s390x.ABR)
p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
......@@ -762,25 +762,25 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R3:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(s390x.ACMPW)
p := s.Prog(s390x.ACMPW)
p.From.Type = obj.TYPE_REG
p.From.Reg = s390x.REG_R3
p.To.Type = obj.TYPE_CONST
p.To.Offset = 0
p = gc.Prog(s390x.ABNE)
p = s.Prog(s390x.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(s390x.ABR)
p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(s390x.ABR)
p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
......@@ -793,19 +793,19 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
p = s.Prog(jmp.invasm)
likely *= -1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(s390x.ABR)
q := s.Prog(s390x.ABR)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
......
......@@ -21,7 +21,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
switch v.Op {
case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
p := gc.Prog(loadPush(v.Type))
p := s.Prog(loadPush(v.Type))
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
......@@ -29,7 +29,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
popAndSave(s, v)
case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
p := gc.Prog(loadPush(v.Type))
p := s.Prog(loadPush(v.Type))
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -37,7 +37,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
popAndSave(s, v)
case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, ssa.Op386MOVSSloadidx4, ssa.Op386MOVSDloadidx8:
p := gc.Prog(loadPush(v.Type))
p := s.Prog(loadPush(v.Type))
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -68,7 +68,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386MOVSDstore:
op = x86.AFMOVDP
}
p := gc.Prog(op)
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_MEM
......@@ -84,7 +84,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSDstoreidx8:
op = x86.AFMOVDP
}
p := gc.Prog(op)
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_MEM
......@@ -114,9 +114,9 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
// Set precision if needed. 64 bits is the default.
switch v.Op {
case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
p := gc.Prog(x86.AFSTCW)
p := s.Prog(x86.AFSTCW)
s.AddrScratch(&p.To)
p = gc.Prog(x86.AFLDCW)
p = s.Prog(x86.AFLDCW)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = gc.Sysfunc("controlWord32")
......@@ -133,7 +133,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386DIVSS, ssa.Op386DIVSD:
op = x86.AFDIVDP
}
p := gc.Prog(op)
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG
......@@ -142,7 +142,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
// Restore precision if needed.
switch v.Op {
case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
p := gc.Prog(x86.AFLDCW)
p := s.Prog(x86.AFLDCW)
s.AddrScratch(&p.From)
}
......@@ -150,48 +150,48 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
push(s, v.Args[0])
// Compare.
p := gc.Prog(x86.AFUCOMP)
p := s.Prog(x86.AFUCOMP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG
p.To.Reg = s.SSEto387[v.Args[1].Reg()] + 1
// Save AX.
p = gc.Prog(x86.AMOVL)
p = s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX
s.AddrScratch(&p.To)
// Move status word into AX.
p = gc.Prog(x86.AFSTSW)
p = s.Prog(x86.AFSTSW)
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX
// Then move the flags we need to the integer flags.
gc.Prog(x86.ASAHF)
s.Prog(x86.ASAHF)
// Restore AX.
p = gc.Prog(x86.AMOVL)
p = s.Prog(x86.AMOVL)
s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX
case ssa.Op386SQRTSD:
push(s, v.Args[0])
gc.Prog(x86.AFSQRT)
s.Prog(x86.AFSQRT)
popAndSave(s, v)
case ssa.Op386FCHS:
push(s, v.Args[0])
gc.Prog(x86.AFCHS)
s.Prog(x86.AFCHS)
popAndSave(s, v)
case ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD:
p := gc.Prog(x86.AMOVL)
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
s.AddrScratch(&p.To)
p = gc.Prog(x86.AFMOVL)
p = s.Prog(x86.AFMOVL)
s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_F0
......@@ -201,28 +201,28 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
push(s, v.Args[0])
// Save control word.
p := gc.Prog(x86.AFSTCW)
p := s.Prog(x86.AFSTCW)
s.AddrScratch(&p.To)
p.To.Offset += 4
// Load control word which truncates (rounds towards zero).
p = gc.Prog(x86.AFLDCW)
p = s.Prog(x86.AFLDCW)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = gc.Sysfunc("controlWord64trunc")
// Now do the conversion.
p = gc.Prog(x86.AFMOVLP)
p = s.Prog(x86.AFMOVLP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
s.AddrScratch(&p.To)
p = gc.Prog(x86.AMOVL)
p = s.Prog(x86.AMOVL)
s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
// Restore control word.
p = gc.Prog(x86.AFLDCW)
p = s.Prog(x86.AFLDCW)
s.AddrScratch(&p.From)
p.From.Offset += 4
......@@ -234,11 +234,11 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386CVTSD2SS:
// Round to nearest float32.
push(s, v.Args[0])
p := gc.Prog(x86.AFMOVFP)
p := s.Prog(x86.AFMOVFP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
s.AddrScratch(&p.To)
p = gc.Prog(x86.AFMOVF)
p = s.Prog(x86.AFMOVF)
s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_F0
......@@ -250,7 +250,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
return
}
// Load+push the value we need.
p := gc.Prog(loadPush(v.Type))
p := s.Prog(loadPush(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_F0
......@@ -270,7 +270,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case 8:
op = x86.AFMOVDP
}
p := gc.Prog(op)
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
gc.AddrAuto(&p.To, v)
......@@ -293,7 +293,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
// push pushes v onto the floating-point stack. v must be in a register.
func push(s *gc.SSAGenState, v *ssa.Value) {
p := gc.Prog(x86.AFMOVD)
p := s.Prog(x86.AFMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = s.SSEto387[v.Reg()]
p.To.Type = obj.TYPE_REG
......@@ -306,7 +306,7 @@ func popAndSave(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
if _, ok := s.SSEto387[r]; ok {
// Pop value, write to correct register.
p := gc.Prog(x86.AFMOVDP)
p := s.Prog(x86.AFMOVDP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG
......@@ -333,7 +333,7 @@ func loadPush(t ssa.Type) obj.As {
// flush387 removes all entries from the 387 floating-point stack.
func flush387(s *gc.SSAGenState) {
for k := range s.SSEto387 {
p := gc.Prog(x86.AFMOVDP)
p := s.Prog(x86.AFMOVDP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG
......
......@@ -104,8 +104,8 @@ func moveByType(t ssa.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(op obj.As, dest, src int16) *obj.Prog {
p := gc.Prog(op)
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = dest
......@@ -121,19 +121,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r2 := v.Args[1].Reg()
switch {
case r == r1:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case r == r2:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
default:
p := gc.Prog(x86.ALEAL)
p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r1
p.From.Scale = 1
......@@ -160,7 +160,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
opregreg(v.Op.Asm(), r, v.Args[1].Reg())
opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry:
// output 0 is carry/borrow, output 1 is the low 32 bits.
......@@ -168,7 +168,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
}
opregreg(v.Op.Asm(), r, v.Args[1].Reg())
opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry:
// output 0 is carry/borrow, output 1 is the low 32 bits.
......@@ -176,7 +176,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
......@@ -200,14 +200,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
var c *obj.Prog
switch v.Op {
case ssa.Op386DIVL, ssa.Op386MODL:
c = gc.Prog(x86.ACMPL)
j = gc.Prog(x86.AJEQ)
gc.Prog(x86.ACDQ) //TODO: fix
c = s.Prog(x86.ACMPL)
j = s.Prog(x86.AJEQ)
s.Prog(x86.ACDQ) //TODO: fix
case ssa.Op386DIVW, ssa.Op386MODW:
c = gc.Prog(x86.ACMPW)
j = gc.Prog(x86.AJEQ)
gc.Prog(x86.ACWD)
c = s.Prog(x86.ACMPW)
j = s.Prog(x86.AJEQ)
s.Prog(x86.ACWD)
}
c.From.Type = obj.TYPE_REG
c.From.Reg = x
......@@ -221,31 +221,31 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// signed ints were sign extended above
if v.Op == ssa.Op386DIVLU || v.Op == ssa.Op386MODLU ||
v.Op == ssa.Op386DIVWU || v.Op == ssa.Op386MODWU {
c := gc.Prog(x86.AXORL)
c := s.Prog(x86.AXORL)
c.From.Type = obj.TYPE_REG
c.From.Reg = x86.REG_DX
c.To.Type = obj.TYPE_REG
c.To.Reg = x86.REG_DX
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = x
// signed division, rest of the check for -1 case
if j != nil {
j2 := gc.Prog(obj.AJMP)
j2 := s.Prog(obj.AJMP)
j2.To.Type = obj.TYPE_BRANCH
var n *obj.Prog
if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW {
// n * -1 = -n
n = gc.Prog(x86.ANEGL)
n = s.Prog(x86.ANEGL)
n.To.Type = obj.TYPE_REG
n.To.Reg = x86.REG_AX
} else {
// n % -1 == 0
n = gc.Prog(x86.AXORL)
n = s.Prog(x86.AXORL)
n.From.Type = obj.TYPE_REG
n.From.Reg = x86.REG_DX
n.To.Type = obj.TYPE_REG
......@@ -263,14 +263,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Arg[0] is already in AX as it's the only register we allow
// and DX is the only output we care about (the high bits)
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
// IMULB puts the high portion in AH instead of DL,
// so move it to DL for consistency
if v.Type.Size() == 1 {
m := gc.Prog(x86.AMOVB)
m := s.Prog(x86.AMOVB)
m.From.Type = obj.TYPE_REG
m.From.Reg = x86.REG_AH
m.To.Type = obj.TYPE_REG
......@@ -279,7 +279,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386MULLQU:
// AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]).
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
......@@ -291,12 +291,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(x86.AADDL)
p := s.Prog(x86.AADDL)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.From.Reg = v.Args[1].Reg()
p = gc.Prog(x86.ARCRL)
p = s.Prog(x86.ARCRL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
......@@ -307,25 +307,25 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
a := v.Args[0].Reg()
if r == a {
if v.AuxInt == 1 {
p := gc.Prog(x86.AINCL)
p := s.Prog(x86.AINCL)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
return
}
if v.AuxInt == -1 {
p := gc.Prog(x86.ADECL)
p := s.Prog(x86.ADECL)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
return
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = r
return
}
p := gc.Prog(x86.ALEAL)
p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = a
p.From.Offset = v.AuxInt
......@@ -337,7 +337,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
......@@ -362,14 +362,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.Op386SBBLcarrymask:
r := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
......@@ -377,7 +377,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386LEAL1, ssa.Op386LEAL2, ssa.Op386LEAL4, ssa.Op386LEAL8:
r := v.Args[0].Reg()
i := v.Args[1].Reg()
p := gc.Prog(x86.ALEAL)
p := s.Prog(x86.ALEAL)
switch v.Op {
case ssa.Op386LEAL1:
p.From.Scale = 1
......@@ -398,7 +398,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386LEAL:
p := gc.Prog(x86.ALEAL)
p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -406,26 +406,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
ssa.Op386TESTL, ssa.Op386TESTW, ssa.Op386TESTB:
opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
// Go assembler has swapped operands for UCOMISx relative to CMP,
// must account for that right here.
opregreg(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
case ssa.Op386CMPLconst, ssa.Op386CMPWconst, ssa.Op386CMPBconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
case ssa.Op386TESTLconst, ssa.Op386TESTWconst, ssa.Op386TESTBconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
case ssa.Op386MOVLconst:
x := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
......@@ -437,7 +437,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
x := v.Reg()
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
......@@ -449,7 +449,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else {
literal = fmt.Sprintf("$f32.%08x", math.Float32bits(float32(math.Float64frombits(uint64(v.AuxInt)))))
}
p := gc.Prog(x86.ALEAL)
p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = obj.Linklookup(gc.Ctxt, literal, 0)
......@@ -457,21 +457,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload, ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVSDloadidx8:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -480,7 +480,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -489,7 +489,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVWloadidx2:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
......@@ -503,7 +503,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == x86.REG_SP {
r, i = i, r
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Scale = 1
......@@ -512,14 +512,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.Op386MOVSDstoreidx8:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -528,7 +528,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -537,7 +537,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.Op386MOVWstoreidx2:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -551,7 +551,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if i == x86.REG_SP {
r, i = i, r
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
......@@ -560,7 +560,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Index = i
gc.AddAux(&p.To, v)
case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
......@@ -568,7 +568,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
......@@ -593,14 +593,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
ssa.Op386CVTSS2SD, ssa.Op386CVTSD2SS:
opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg())
opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
case ssa.Op386DUFFZERO:
p := gc.Prog(obj.ADUFFZERO)
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt
case ssa.Op386DUFFCOPY:
p := gc.Prog(obj.ADUFFCOPY)
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
......@@ -612,14 +612,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
x := v.Args[0].Reg()
y := v.Reg()
if x != y {
opregreg(moveByType(v.Type), y, x)
opregreg(s, moveByType(v.Type), y, x)
}
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Fatalf("load flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(loadByType(v.Type))
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
......@@ -629,7 +629,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("store flags not implemented: %v", v.LongString())
return
}
p := gc.Prog(storeByType(v.Type))
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
......@@ -642,7 +642,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(gc.Ctxt) {
// MOVL (TLS), r
p := gc.Prog(x86.AMOVL)
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
......@@ -650,12 +650,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else {
// MOVL TLS, r
// MOVL (r)(TLS*1), r
p := gc.Prog(x86.AMOVL)
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
q := gc.Prog(x86.AMOVL)
q := s.Prog(x86.AMOVL)
q.From.Type = obj.TYPE_MEM
q.From.Reg = r
q.From.Index = x86.REG_TLS
......@@ -672,13 +672,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.Op386BSFL, ssa.Op386BSFW,
ssa.Op386BSRL, ssa.Op386BSRW,
ssa.Op386SQRTSD:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
......@@ -690,38 +690,38 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.Op386SETB, ssa.Op386SETBE,
ssa.Op386SETORD, ssa.Op386SETNAN,
ssa.Op386SETA, ssa.Op386SETAE:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386SETNEF:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
q := gc.Prog(x86.ASETPS)
q := s.Prog(x86.ASETPS)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
opregreg(x86.AORL, v.Reg(), x86.REG_AX)
opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
case ssa.Op386SETEQF:
p := gc.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
q := gc.Prog(x86.ASETPC)
q := s.Prog(x86.ASETPC)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
opregreg(x86.AANDL, v.Reg(), x86.REG_AX)
opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
case ssa.Op386InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.Op386FlagEQ, ssa.Op386FlagLT_ULT, ssa.Op386FlagLT_UGT, ssa.Op386FlagGT_ULT, ssa.Op386FlagGT_UGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.Op386REPSTOSL:
gc.Prog(x86.AREP)
gc.Prog(x86.ASTOSL)
s.Prog(x86.AREP)
s.Prog(x86.ASTOSL)
case ssa.Op386REPMOVSL:
gc.Prog(x86.AREP)
gc.Prog(x86.AMOVSL)
s.Prog(x86.AREP)
s.Prog(x86.AMOVSL)
case ssa.Op386LoweredNilCheck:
// Issue a load which will fault if the input is nil.
// TODO: We currently use the 2-byte instruction TESTB AX, (reg).
......@@ -729,7 +729,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// but it doesn't have false dependency on AX.
// Or maybe allocate an output register and use MOVL (reg),reg2 ?
// That trades clobbering flags for clobbering a register.
p := gc.Prog(x86.ATESTB)
p := s.Prog(x86.ATESTB)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_MEM
......@@ -775,7 +775,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
......@@ -783,25 +783,25 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in rax:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
p := gc.Prog(x86.ATESTL)
p := s.Prog(x86.ATESTL)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX
p = gc.Prog(x86.AJNE)
p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
gc.Prog(obj.ARET)
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := gc.Prog(obj.AJMP)
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
......@@ -822,19 +822,19 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog
switch next {
case b.Succs[0].Block():
p = gc.Prog(jmp.invasm)
p = s.Prog(jmp.invasm)
likely *= -1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
p = gc.Prog(jmp.asm)
p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
q := gc.Prog(obj.AJMP)
q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment