Commit c4135d61 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

cmd/internal/obj/mips: make assembler almost concurrency-safe

CL 39922 made the arm assembler concurrency-safe.
This CL does the same, but for mips.
The approach is similar: introduce ctxt0 to hold
function-local state and thread it through
the assembler as necessary.

One race remains after this CL, similar to CL 40252.

That race is conceptually unrelated to this refactoring,
and will be addressed in a separate CL.

Passes toolstash-check -all.

Updates #15756

Change-Id: I2c54a889aa448a4476c9a75da4dd94ef69657b16
Reviewed-on: https://go-review.googlesource.com/40370
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarCherry Zhang <cherryyz@google.com>
parent 69261eca
......@@ -37,6 +37,18 @@ import (
"sort"
)
// ctxt0 holds state while assembling a single function.
// Each function gets a fresh ctxt0.
// This allows for multiple functions to be safely concurrently assembled.
type ctxt0 struct {
ctxt *obj.Link
newprog obj.ProgAlloc
cursym *obj.LSym
autosize int32
instoffset int64
pc int64
}
// Instruction layout.
const (
......@@ -378,11 +390,11 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if p == nil || p.Link == nil { // handle external functions and ELF section symbols
return
}
ctxt.Cursym = cursym
ctxt.Autosize = int32(p.To.Offset + ctxt.FixedFrameSize())
c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset + ctxt.FixedFrameSize())}
if oprange[AOR&obj.AMask] == nil {
ctxt.Diag("mips ops not initialized, call mips.buildop first")
c.ctxt.Diag("mips ops not initialized, call mips.buildop first")
}
pc := int64(0)
......@@ -392,11 +404,11 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
var o *Optab
for p = p.Link; p != nil; p = p.Link {
p.Pc = pc
o = oplook(ctxt, p)
o = c.oplook(p)
m = int(o.size)
if m == 0 {
if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
ctxt.Diag("zero-width instruction\n%v", p)
c.ctxt.Diag("zero-width instruction\n%v", p)
}
continue
}
......@@ -404,7 +416,7 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
pc += int64(m)
}
cursym.Size = pc
c.cursym.Size = pc
/*
* if any procedure is large enough to
......@@ -419,15 +431,15 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
for bflag != 0 {
bflag = 0
pc = 0
for p = cursym.Text.Link; p != nil; p = p.Link {
for p = c.cursym.Text.Link; p != nil; p = p.Link {
p.Pc = pc
o = oplook(ctxt, p)
o = c.oplook(p)
// very large conditional branches
if o.type_ == 6 && p.Pcond != nil {
otxt = p.Pcond.Pc - pc
if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 {
q = newprog()
q = c.newprog()
q.Link = p.Link
p.Link = q
q.As = AJMP
......@@ -435,7 +447,7 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Type = obj.TYPE_BRANCH
q.Pcond = p.Pcond
p.Pcond = q
q = newprog()
q = c.newprog()
q.Link = p.Link
p.Link = q
q.As = AJMP
......@@ -443,8 +455,8 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Type = obj.TYPE_BRANCH
q.Pcond = q.Link.Link
addnop(ctxt, p.Link, newprog)
addnop(ctxt, p, newprog)
c.addnop(p.Link)
c.addnop(p)
bflag = 1
}
}
......@@ -452,7 +464,7 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
m = int(o.size)
if m == 0 {
if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
ctxt.Diag("zero-width instruction\n%v", p)
c.ctxt.Diag("zero-width instruction\n%v", p)
}
continue
}
......@@ -460,31 +472,31 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
pc += int64(m)
}
cursym.Size = pc
c.cursym.Size = pc
}
if ctxt.Arch.Family == sys.MIPS64 {
if c.ctxt.Arch.Family == sys.MIPS64 {
pc += -pc & (mips64FuncAlign - 1)
}
cursym.Size = pc
c.cursym.Size = pc
/*
* lay out the code, emitting code and data relocations.
*/
cursym.Grow(cursym.Size)
c.cursym.Grow(c.cursym.Size)
bp := cursym.P
bp := c.cursym.P
var i int32
var out [4]uint32
for p := cursym.Text.Link; p != nil; p = p.Link {
ctxt.Pc = p.Pc
o = oplook(ctxt, p)
for p := c.cursym.Text.Link; p != nil; p = p.Link {
c.pc = p.Pc
o = c.oplook(p)
if int(o.size) > 4*len(out) {
log.Fatalf("out array in span0 is too small, need at least %d for %v", o.size/4, p)
}
asmout(ctxt, p, o, out[:])
c.asmout(p, o, out[:])
for i = 0; i < int32(o.size/4); i++ {
ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
bp = bp[4:]
}
}
......@@ -498,7 +510,7 @@ func isuint32(v uint64) bool {
return uint64(uint32(v)) == v
}
func aclass(ctxt *obj.Link, a *obj.Addr) int {
func (c *ctxt0) aclass(a *obj.Addr) int {
switch a.Type {
case obj.TYPE_NONE:
return C_NONE
......@@ -531,7 +543,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
if a.Sym == nil {
break
}
ctxt.Instoffset = a.Offset
c.instoffset = a.Offset
if a.Sym != nil { // use relocation
if a.Sym.Type == obj.STLSBSS {
return C_TLS
......@@ -541,25 +553,25 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_LEXT
case obj.NAME_AUTO:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
c.instoffset = int64(c.autosize) + a.Offset
if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SAUTO
}
return C_LAUTO
case obj.NAME_PARAM:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SAUTO
}
return C_LAUTO
case obj.NAME_NONE:
ctxt.Instoffset = a.Offset
if ctxt.Instoffset == 0 {
c.instoffset = a.Offset
if c.instoffset == 0 {
return C_ZOREG
}
if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SOREG
}
return C_LOREG
......@@ -574,12 +586,12 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
obj.TYPE_ADDR:
switch a.Name {
case obj.NAME_NONE:
ctxt.Instoffset = a.Offset
c.instoffset = a.Offset
if a.Reg != 0 {
if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
if -BIG <= c.instoffset && c.instoffset <= BIG {
return C_SACON
}
if isint32(ctxt.Instoffset) {
if isint32(c.instoffset) {
return C_LACON
}
return C_DACON
......@@ -594,26 +606,26 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
break
}
if s.Type == obj.SCONST {
ctxt.Instoffset = a.Offset
c.instoffset = a.Offset
goto consize
}
ctxt.Instoffset = a.Offset
c.instoffset = a.Offset
if s.Type == obj.STLSBSS {
return C_STCON // address of TLS variable
}
return C_LECON
case obj.NAME_AUTO:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
c.instoffset = int64(c.autosize) + a.Offset
if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SACON
}
return C_LACON
case obj.NAME_PARAM:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SACON
}
return C_LACON
......@@ -622,32 +634,32 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_GOK
consize:
if ctxt.Instoffset >= 0 {
if ctxt.Instoffset == 0 {
if c.instoffset >= 0 {
if c.instoffset == 0 {
return C_ZCON
}
if ctxt.Instoffset <= 0x7fff {
if c.instoffset <= 0x7fff {
return C_SCON
}
if ctxt.Instoffset <= 0xffff {
if c.instoffset <= 0xffff {
return C_ANDCON
}
if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
return C_UCON
}
if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
return C_LCON
}
return C_LCON // C_DCON
}
if ctxt.Instoffset >= -0x8000 {
if c.instoffset >= -0x8000 {
return C_ADDCON
}
if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
return C_UCON
}
if isint32(ctxt.Instoffset) {
if isint32(c.instoffset) {
return C_LCON
}
return C_LCON // C_DCON
......@@ -663,9 +675,9 @@ func prasm(p *obj.Prog) {
fmt.Printf("%v\n", p)
}
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
func (c *ctxt0) oplook(p *obj.Prog) *Optab {
if oprange[AOR&obj.AMask] == nil {
ctxt.Diag("mips ops not initialized, call mips.buildop first")
c.ctxt.Diag("mips ops not initialized, call mips.buildop first")
}
a1 := int(p.Optab)
......@@ -674,14 +686,14 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
}
a1 = int(p.From.Class)
if a1 == 0 {
a1 = aclass(ctxt, &p.From) + 1
a1 = c.aclass(&p.From) + 1
p.From.Class = int8(a1)
}
a1--
a3 := int(p.To.Class)
if a3 == 0 {
a3 = aclass(ctxt, &p.To) + 1
a3 = c.aclass(&p.To) + 1
p.To.Class = int8(a3)
}
......@@ -698,13 +710,13 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
c3 := &xcmp[a3]
for i := range ops {
op := &ops[i]
if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && (op.family == 0 || ctxt.Arch.Family == op.family) {
if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && (op.family == 0 || c.ctxt.Arch.Family == op.family) {
p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
return op
}
}
ctxt.Diag("illegal combination %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3))
c.ctxt.Diag("illegal combination %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3))
prasm(p)
if ops == nil {
ops = optab
......@@ -1063,7 +1075,7 @@ func OP_JMP(op uint32, i uint32) uint32 {
return op | i&0x3FFFFFF
}
func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 := uint32(0)
o2 := uint32(0)
o3 := uint32(0)
......@@ -1071,12 +1083,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
add := AADDU
if ctxt.Arch.Family == sys.MIPS64 {
if c.ctxt.Arch.Family == sys.MIPS64 {
add = AADDVU
}
switch o.type_ {
default:
ctxt.Diag("unknown type %d %v", o.type_)
c.ctxt.Diag("unknown type %d %v", o.type_)
prasm(p)
case 0: /* pseudo ops */
......@@ -1084,10 +1096,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 1: /* mov r1,r2 ==> OR r1,r0,r2 */
a := AOR
if p.As == AMOVW && ctxt.Arch.Family == sys.MIPS64 {
if p.As == AMOVW && c.ctxt.Arch.Family == sys.MIPS64 {
a = AADDU // sign-extended to high 32 bits
}
o1 = OP_RRR(oprrr(ctxt, a), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
o1 = OP_RRR(c.oprrr(a), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
case 2: /* add/sub r1,[r2],r3 */
r := int(p.Reg)
......@@ -1095,10 +1107,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(p.To.Reg)
}
o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 3: /* mov $soreg, r ==> or/add $i,o,r */
v := regoff(ctxt, &p.From)
v := c.regoff(&p.From)
r := int(p.From.Reg)
if r == 0 {
......@@ -1109,20 +1121,20 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
a = AOR
}
o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.To.Reg))
o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg))
case 4: /* add $scon,[r1],r2 */
v := regoff(ctxt, &p.From)
v := c.regoff(&p.From)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg))
o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
case 5: /* syscall */
o1 = oprrr(ctxt, p.As)
o1 = c.oprrr(p.As)
case 6: /* beq r1,[r2],sbra */
v := int32(0)
......@@ -1132,9 +1144,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
v = int32(p.Pcond.Pc-p.Pc-4) >> 2
}
if (v<<16)>>16 != v {
ctxt.Diag("short branch too far\n%v", p)
c.ctxt.Diag("short branch too far\n%v", p)
}
o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
// for ABFPT and ABFPF only: always fill delay slot with 0
// see comments in func preprocess for details.
o2 = 0
......@@ -1144,16 +1156,16 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(o.param)
}
v := regoff(ctxt, &p.To)
o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.From.Reg))
v := c.regoff(&p.To)
o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.From.Reg))
case 8: /* mov soreg, r ==> lw o(r) */
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
v := regoff(ctxt, &p.From)
o1 = OP_IRR(opirr(ctxt, -p.As), uint32(v), uint32(r), uint32(p.To.Reg))
v := c.regoff(&p.From)
o1 = OP_IRR(c.opirr(-p.As), uint32(v), uint32(r), uint32(p.To.Reg))
case 9: /* sll r1,[r2],r3 */
r := int(p.Reg)
......@@ -1161,24 +1173,24 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(p.To.Reg)
}
o1 = OP_RRR(oprrr(ctxt, p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
o1 = OP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
case 10: /* add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 */
v := regoff(ctxt, &p.From)
v := c.regoff(&p.From)
a := AOR
if v < 0 {
a = AADDU
}
o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(0), uint32(REGTMP))
o1 = OP_IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 11: /* jmp lbra */
v := int32(0)
if aclass(ctxt, &p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP {
if c.aclass(&p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP {
// use PC-relative branch for short branches
// BEQ R0, R0, sbra
if p.Pcond == nil {
......@@ -1187,7 +1199,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
v = int32(p.Pcond.Pc-p.Pc-4) >> 2
}
if (v<<16)>>16 == v {
o1 = OP_IRR(opirr(ctxt, ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO))
o1 = OP_IRR(c.opirr(ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO))
break
}
}
......@@ -1196,13 +1208,13 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
} else {
v = int32(p.Pcond.Pc) >> 2
}
o1 = OP_JMP(opirr(ctxt, p.As), uint32(v))
o1 = OP_JMP(c.opirr(p.As), uint32(v))
if p.To.Sym == nil {
p.To.Sym = ctxt.Cursym.Text.From.Sym
p.To.Sym = c.cursym.Text.From.Sym
p.To.Offset = p.Pcond.Pc
}
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
......@@ -1217,31 +1229,31 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if p.As == AMOVB {
v = 24
}
o1 = OP_SRR(opirr(ctxt, ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg))
o2 = OP_SRR(opirr(ctxt, ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
o1 = OP_SRR(c.opirr(ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg))
o2 = OP_SRR(c.opirr(ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
case 13: /* movbu r,r */
if p.As == AMOVBU {
o1 = OP_IRR(opirr(ctxt, AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg))
o1 = OP_IRR(c.opirr(AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg))
} else {
o1 = OP_IRR(opirr(ctxt, AAND), uint32(0xffff), uint32(p.From.Reg), uint32(p.To.Reg))
o1 = OP_IRR(c.opirr(AAND), uint32(0xffff), uint32(p.From.Reg), uint32(p.To.Reg))
}
case 14: /* movwu r,r */
o1 = OP_SRR(opirr(ctxt, -ASLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
o2 = OP_SRR(opirr(ctxt, -ASRLV), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
o1 = OP_SRR(c.opirr(-ASLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
o2 = OP_SRR(c.opirr(-ASRLV), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
case 15: /* teq $c r,r */
v := regoff(ctxt, &p.From)
v := c.regoff(&p.From)
r := int(p.Reg)
if r == 0 {
r = REGZERO
}
/* only use 10 bits of trap code */
o1 = OP_IRR(opirr(ctxt, p.As), (uint32(v)&0x3FF)<<6, uint32(p.Reg), uint32(p.To.Reg))
o1 = OP_IRR(c.opirr(p.As), (uint32(v)&0x3FF)<<6, uint32(p.Reg), uint32(p.To.Reg))
case 16: /* sll $c,[r1],r2 */
v := regoff(ctxt, &p.From)
v := c.regoff(&p.From)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
......@@ -1249,29 +1261,29 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
/* OP_SRR will use only the low 5 bits of the shift value */
if v >= 32 && vshift(p.As) {
o1 = OP_SRR(opirr(ctxt, -p.As), uint32(v-32), uint32(r), uint32(p.To.Reg))
o1 = OP_SRR(c.opirr(-p.As), uint32(v-32), uint32(r), uint32(p.To.Reg))
} else {
o1 = OP_SRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg))
o1 = OP_SRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
}
case 17:
o1 = OP_RRR(oprrr(ctxt, p.As), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
o1 = OP_RRR(c.oprrr(p.As), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
case 18: /* jmp [r1],0(r2) */
r := int(p.Reg)
if r == 0 {
r = int(o.param)
}
o1 = OP_RRR(oprrr(ctxt, p.As), uint32(0), uint32(p.To.Reg), uint32(r))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
o1 = OP_RRR(c.oprrr(p.As), uint32(0), uint32(p.To.Reg), uint32(r))
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc)
rel.Siz = 0
rel.Type = obj.R_CALLIND
case 19: /* mov $lcon,r ==> lu+or */
v := regoff(ctxt, &p.From)
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
v := c.regoff(&p.From)
o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
case 20: /* mov lo/hi,r */
a := OP(2, 0) /* mfhi */
......@@ -1296,44 +1308,44 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
a := SP(3, 4) | 2 /* mul */
o1 = OP_RRR(a, uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
} else {
o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
}
case 23: /* add $lcon,r1,r2 ==> lu+or+add */
v := regoff(ctxt, &p.From)
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
v := c.regoff(&p.From)
o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o3 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
o3 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 24: /* mov $ucon,r ==> lu r */
v := regoff(ctxt, &p.From)
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
v := c.regoff(&p.From)
o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
case 25: /* add/and $ucon,[r1],r2 ==> lu $con,t; add t,[r1],r2 */
v := regoff(ctxt, &p.From)
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
v := c.regoff(&p.From)
o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 26: /* mov $lsext/auto/oreg,r ==> lu+or+add */
v := regoff(ctxt, &p.From)
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
v := c.regoff(&p.From)
o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o3 = OP_RRR(oprrr(ctxt, add), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
o3 = OP_RRR(c.oprrr(add), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 27: /* mov [sl]ext/auto/oreg,fr ==> lwc1 o(r) */
v := regoff(ctxt, &p.From)
v := c.regoff(&p.From)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
......@@ -1344,16 +1356,16 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
switch o.size {
case 12:
o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
o3 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
o3 = OP_IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
case 4:
o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.To.Reg))
o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg))
}
case 28: /* mov fr,[sl]ext/auto/oreg ==> swc1 o(r) */
v := regoff(ctxt, &p.To)
v := c.regoff(&p.To)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
......@@ -1364,12 +1376,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
switch o.size {
case 12:
o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
o3 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
o3 = OP_IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
case 4:
o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.From.Reg))
o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.From.Reg))
}
case 30: /* movw r,fr */
......@@ -1385,39 +1397,39 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(p.To.Reg)
}
o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
o1 = OP_FRRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 33: /* fabs fr1, fr3 */
o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
o1 = OP_FRRR(c.oprrr(p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
case 34: /* mov $con,fr ==> or/add $i,t; mov t,fr */
v := regoff(ctxt, &p.From)
v := c.regoff(&p.From)
a := AADDU
if o.a1 == C_ANDCON {
a = AOR
}
o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(0), uint32(REGTMP))
o1 = OP_IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP))
o2 = OP_RRR(SP(2, 1)|(4<<21), uint32(REGTMP), uint32(0), uint32(p.To.Reg)) /* mtc1 */
case 35: /* mov r,lext/auto/oreg ==> sw o(REGTMP) */
v := regoff(ctxt, &p.To)
v := c.regoff(&p.To)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
o3 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
o3 = OP_IRR(c.opirr(p.As), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
case 36: /* mov lext/auto/oreg,r ==> lw o(REGTMP) */
v := regoff(ctxt, &p.From)
v := c.regoff(&p.From)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
o3 = OP_IRR(opirr(ctxt, -p.As), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
o3 = OP_IRR(c.opirr(-p.As), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
case 37: /* movw r,mr */
a := SP(2, 0) | (4 << 21) /* mtc0 */
......@@ -1434,7 +1446,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
case 40: /* word */
o1 = uint32(regoff(ctxt, &p.From))
o1 = uint32(c.regoff(&p.From))
case 41: /* movw f,fcr */
o1 = OP_RRR(SP(2, 1)|(2<<21), uint32(REGZERO), uint32(0), uint32(p.To.Reg)) /* mfcc1 */
......@@ -1456,16 +1468,16 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
/* relocation operations */
case 50: /* mov r,addr ==> lu + add REGSB, REGTMP + sw o(REGTMP) */
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
rel.Type = obj.R_ADDRMIPSU
o2 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
rel2 := obj.Addrel(ctxt.Cursym)
rel2.Off = int32(ctxt.Pc + 4)
o2 = OP_IRR(c.opirr(p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
rel2 := obj.Addrel(c.cursym)
rel2.Off = int32(c.pc + 4)
rel2.Siz = 4
rel2.Sym = p.To.Sym
rel2.Add = p.To.Offset
......@@ -1473,21 +1485,21 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if o.size == 12 {
o3 = o2
o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
rel2.Off += 4
}
case 51: /* mov addr,r ==> lu + add REGSB, REGTMP + lw o(REGTMP) */
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
rel.Type = obj.R_ADDRMIPSU
o2 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
rel2 := obj.Addrel(ctxt.Cursym)
rel2.Off = int32(ctxt.Pc + 4)
o2 = OP_IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
rel2 := obj.Addrel(c.cursym)
rel2.Off = int32(c.pc + 4)
rel2.Siz = 4
rel2.Sym = p.From.Sym
rel2.Add = p.From.Offset
......@@ -1495,21 +1507,21 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if o.size == 12 {
o3 = o2
o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
rel2.Off += 4
}
case 52: /* mov $lext, r ==> lu + add REGSB, r + add */
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(p.To.Reg))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(p.To.Reg))
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
rel.Type = obj.R_ADDRMIPSU
o2 = OP_IRR(opirr(ctxt, add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
rel2 := obj.Addrel(ctxt.Cursym)
rel2.Off = int32(ctxt.Pc + 4)
o2 = OP_IRR(c.opirr(add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
rel2 := obj.Addrel(c.cursym)
rel2.Off = int32(c.pc + 4)
rel2.Siz = 4
rel2.Sym = p.From.Sym
rel2.Add = p.From.Offset
......@@ -1517,7 +1529,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if o.size == 12 {
o3 = o2
o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(p.To.Reg), uint32(p.To.Reg))
o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(p.To.Reg), uint32(p.To.Reg))
rel2.Off += 4
}
......@@ -1525,9 +1537,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
// clobbers R3 !
// load thread pointer with RDHWR, R3 is used for fast kernel emulation on Linux
o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
o2 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REG_R3), uint32(p.From.Reg))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc + 4)
o2 = OP_IRR(c.opirr(p.As), uint32(0), uint32(REG_R3), uint32(p.From.Reg))
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc + 4)
rel.Siz = 4
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
......@@ -1536,9 +1548,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 54: /* mov tlsvar, r ==> rdhwr + lw o(r3) */
// clobbers R3 !
o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
o2 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc + 4)
o2 = OP_IRR(c.opirr(-p.As), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc + 4)
rel.Siz = 4
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
......@@ -1547,9 +1559,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 55: /* mov $tlsvar, r ==> rdhwr + add */
// clobbers R3 !
o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
o2 = OP_IRR(opirr(ctxt, add), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc + 4)
o2 = OP_IRR(c.opirr(add), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc + 4)
rel.Siz = 4
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
......@@ -1563,17 +1575,17 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
return
}
func vregoff(ctxt *obj.Link, a *obj.Addr) int64 {
ctxt.Instoffset = 0
aclass(ctxt, a)
return ctxt.Instoffset
func (c *ctxt0) vregoff(a *obj.Addr) int64 {
c.instoffset = 0
c.aclass(a)
return c.instoffset
}
func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
return int32(vregoff(ctxt, a))
func (c *ctxt0) regoff(a *obj.Addr) int32 {
return int32(c.vregoff(a))
}
func oprrr(ctxt *obj.Link, a obj.As) uint32 {
func (c *ctxt0) oprrr(a obj.As) uint32 {
switch a {
case AADD:
return OP(4, 0)
......@@ -1748,14 +1760,14 @@ func oprrr(ctxt *obj.Link, a obj.As) uint32 {
}
if a < 0 {
ctxt.Diag("bad rrr opcode -%v", -a)
c.ctxt.Diag("bad rrr opcode -%v", -a)
} else {
ctxt.Diag("bad rrr opcode %v", a)
c.ctxt.Diag("bad rrr opcode %v", a)
}
return 0
}
func opirr(ctxt *obj.Link, a obj.As) uint32 {
func (c *ctxt0) opirr(a obj.As) uint32 {
switch a {
case AADD:
return SP(1, 0)
......@@ -1909,9 +1921,9 @@ func opirr(ctxt *obj.Link, a obj.As) uint32 {
}
if a < 0 {
ctxt.Diag("bad irr opcode -%v", -a)
c.ctxt.Diag("bad irr opcode -%v", -a)
} else {
ctxt.Diag("bad irr opcode %v", a)
c.ctxt.Diag("bad irr opcode %v", a)
}
return 0
}
......
......@@ -38,6 +38,8 @@ import (
)
func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
c := ctxt0{ctxt: ctxt, newprog: newprog}
p.From.Class = 0
p.To.Class = 0
......@@ -73,7 +75,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
case AMOVD:
if p.From.Type == obj.TYPE_FCONST {
f64 := p.From.Val.(float64)
if math.Float64bits(f64) == 0 && ctxt.Arch.Family == sys.MIPS64 {
if math.Float64bits(f64) == 0 && c.ctxt.Arch.Family == sys.MIPS64 {
p.As = AMOVV
p.From.Type = obj.TYPE_REG
p.From.Reg = REGZERO
......@@ -125,20 +127,20 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// TODO(minux): add morestack short-cuts with small fixed frame-size.
ctxt.Cursym = cursym
c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym}
// a switch for enabling/disabling instruction scheduling
nosched := true
if cursym.Text == nil || cursym.Text.Link == nil {
if c.cursym.Text == nil || c.cursym.Text.Link == nil {
return
}
p := cursym.Text
p := c.cursym.Text
textstksiz := p.To.Offset
cursym.Args = p.To.Val.(int32)
cursym.Locals = int32(textstksiz)
c.cursym.Args = p.To.Val.(int32)
c.cursym.Locals = int32(textstksiz)
/*
* find leaf subroutines
......@@ -149,7 +151,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
var q *obj.Prog
var q1 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
for p := c.cursym.Text; p != nil; p = p.Link {
switch p.As {
/* too hard, just leave alone */
case obj.ATEXT:
......@@ -195,7 +197,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
AJAL,
obj.ADUFFZERO,
obj.ADUFFCOPY:
cursym.Text.Mark &^= LEAF
c.cursym.Text.Mark &^= LEAF
fallthrough
case AJMP,
......@@ -260,7 +262,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
var mov, add obj.As
if ctxt.Arch.Family == sys.MIPS64 {
if c.ctxt.Arch.Family == sys.MIPS64 {
add = AADDV
mov = AMOVV
} else {
......@@ -271,21 +273,21 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
autosize := int32(0)
var p1 *obj.Prog
var p2 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
for p := c.cursym.Text; p != nil; p = p.Link {
o := p.As
switch o {
case obj.ATEXT:
autosize = int32(textstksiz + ctxt.FixedFrameSize())
if (p.Mark&LEAF != 0) && autosize <= int32(ctxt.FixedFrameSize()) {
autosize = 0
} else if autosize&4 != 0 && ctxt.Arch.Family == sys.MIPS64 {
} else if autosize&4 != 0 && c.ctxt.Arch.Family == sys.MIPS64 {
autosize += 4
}
p.To.Offset = int64(autosize) - ctxt.FixedFrameSize()
if p.From3.Offset&obj.NOSPLIT == 0 {
p = stacksplit(ctxt, p, newprog, autosize) // emit split check
p = c.stacksplit(p, autosize) // emit split check
}
q = p
......@@ -313,22 +315,22 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Type = obj.TYPE_REG
q.To.Reg = REGSP
q.Spadj = +autosize
} else if cursym.Text.Mark&LEAF == 0 {
if cursym.Text.From3.Offset&obj.NOSPLIT != 0 {
} else if c.cursym.Text.Mark&LEAF == 0 {
if c.cursym.Text.From3.Offset&obj.NOSPLIT != 0 {
if ctxt.Debugvlog {
ctxt.Logf("save suppressed in: %s\n", cursym.Name)
ctxt.Logf("save suppressed in: %s\n", c.cursym.Name)
}
cursym.Text.Mark |= LEAF
c.cursym.Text.Mark |= LEAF
}
}
if cursym.Text.Mark&LEAF != 0 {
cursym.Set(obj.AttrLeaf, true)
if c.cursym.Text.Mark&LEAF != 0 {
c.cursym.Set(obj.AttrLeaf, true)
break
}
if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
if c.cursym.Text.From3.Offset&obj.WRAPPER != 0 {
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
//
// MOV g_panic(g), R1
......@@ -349,7 +351,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.As = mov
q.From.Type = obj.TYPE_MEM
q.From.Reg = REGG
q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
q.To.Type = obj.TYPE_REG
q.To.Reg = REG_R1
......@@ -419,7 +421,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.To.Name = obj.NAME_NONE // clear fields as we may modify p to other instruction
p.To.Sym = nil
if cursym.Text.Mark&LEAF != 0 {
if c.cursym.Text.Mark&LEAF != 0 {
if autosize == 0 {
p.As = AJMP
p.From = obj.Addr{}
......@@ -443,7 +445,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.To.Reg = REGSP
p.Spadj = -autosize
q = newprog()
q = c.newprog()
q.As = AJMP
q.Pos = p.Pos
q.To.Type = obj.TYPE_MEM
......@@ -468,7 +470,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
if autosize != 0 {
q = newprog()
q = c.newprog()
q.As = add
q.Pos = p.Pos
q.From.Type = obj.TYPE_CONST
......@@ -481,7 +483,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.Link = q
}
q1 = newprog()
q1 = c.newprog()
q1.As = AJMP
q1.Pos = p.Pos
if retSym != nil { // retjmp
......@@ -509,9 +511,9 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
}
if ctxt.Arch.Family == sys.MIPS {
if c.ctxt.Arch.Family == sys.MIPS {
// rewrite MOVD into two MOVF in 32-bit mode to avoid unaligned memory access
for p = cursym.Text; p != nil; p = p1 {
for p = c.cursym.Text; p != nil; p = p1 {
p1 = p.Link
if p.As != AMOVD {
......@@ -522,14 +524,14 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
p.As = AMOVF
q = newprog()
q = c.newprog()
*q = *p
q.Link = p.Link
p.Link = q
p1 = q.Link
var regOff int16
if ctxt.Arch.ByteOrder == binary.BigEndian {
if c.ctxt.Arch.ByteOrder == binary.BigEndian {
regOff = 1 // load odd register first
}
if p.From.Type == obj.TYPE_MEM {
......@@ -549,9 +551,9 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if nosched {
// if we don't do instruction scheduling, simply add
// NOP after each branch instruction.
for p = cursym.Text; p != nil; p = p.Link {
for p = c.cursym.Text; p != nil; p = p.Link {
if p.Mark&BRANCH != 0 {
addnop(ctxt, p, newprog)
c.addnop(p)
}
}
return
......@@ -559,14 +561,14 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// instruction scheduling
q = nil // p - 1
q1 = cursym.Text // top of block
q1 = c.cursym.Text // top of block
o := 0 // count of instructions
for p = cursym.Text; p != nil; p = p1 {
for p = c.cursym.Text; p != nil; p = p1 {
p1 = p.Link
o++
if p.Mark&NOSCHED != 0 {
if q1 != p {
sched(ctxt, newprog, q1, q)
c.sched(q1, q)
}
for ; p != nil; p = p.Link {
if p.Mark&NOSCHED == 0 {
......@@ -581,18 +583,18 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
if p.Mark&(LABEL|SYNC) != 0 {
if q1 != p {
sched(ctxt, newprog, q1, q)
c.sched(q1, q)
}
q1 = p
o = 1
}
if p.Mark&(BRANCH|SYNC) != 0 {
sched(ctxt, newprog, q1, p)
c.sched(q1, p)
q1 = p1
o = 0
}
if o >= NSCHED {
sched(ctxt, newprog, q1, p)
c.sched(q1, p)
q1 = p1
o = 0
}
......@@ -600,7 +602,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
}
func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize int32) *obj.Prog {
func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
// Leaf function with no frame is effectively NOSPLIT.
if framesize == 0 {
return p
......@@ -608,7 +610,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
var mov, add, sub obj.As
if ctxt.Arch.Family == sys.MIPS64 {
if c.ctxt.Arch.Family == sys.MIPS64 {
add = AADDV
mov = AMOVV
sub = ASUBVU
......@@ -619,14 +621,14 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
}
// MOV g_stackguard(g), R1
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = mov
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
if ctxt.Cursym.CFunc() {
p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0
if c.cursym.CFunc() {
p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
......@@ -635,7 +637,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// AGTU SP, stackguard, R1
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = ASGTU
p.From.Type = obj.TYPE_REG
......@@ -647,7 +649,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
// large stack: SP-framesize < stackguard-StackSmall
// ADD $-(framesize-StackSmall), SP, R2
// SGTU R2, stackguard, R1
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = add
p.From.Type = obj.TYPE_CONST
......@@ -656,7 +658,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = ASGTU
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R2
......@@ -679,7 +681,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
// SUB R1, R2
// MOV $(framesize+(StackGuard-StackSmall)), R1
// SGTU R2, R1, R1
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = mov
p.From.Type = obj.TYPE_CONST
......@@ -687,7 +689,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
q = p
p.As = ABEQ
p.From.Type = obj.TYPE_REG
......@@ -696,7 +698,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
p.To.Type = obj.TYPE_BRANCH
p.Mark |= BRANCH
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = add
p.From.Type = obj.TYPE_CONST
p.From.Offset = obj.StackGuard
......@@ -704,21 +706,21 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = sub
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = mov
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = ASGTU
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R2
......@@ -728,7 +730,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
}
// q1: BNE R1, done
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
q1 := p
p.As = ABNE
......@@ -738,7 +740,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
p.Mark |= BRANCH
// MOV LINK, R3
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = mov
p.From.Type = obj.TYPE_REG
......@@ -751,29 +753,29 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
}
// JAL runtime.morestack(SB)
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = AJAL
p.To.Type = obj.TYPE_BRANCH
if ctxt.Cursym.CFunc() {
p.To.Sym = ctxt.Lookup("runtime.morestackc", 0)
} else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
p.To.Sym = ctxt.Lookup("runtime.morestack_noctxt", 0)
if c.cursym.CFunc() {
p.To.Sym = c.ctxt.Lookup("runtime.morestackc", 0)
} else if c.cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
p.To.Sym = c.ctxt.Lookup("runtime.morestack_noctxt", 0)
} else {
p.To.Sym = ctxt.Lookup("runtime.morestack", 0)
p.To.Sym = c.ctxt.Lookup("runtime.morestack", 0)
}
p.Mark |= BRANCH
// JMP start
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = AJMP
p.To.Type = obj.TYPE_BRANCH
p.Pcond = ctxt.Cursym.Text.Link
p.Pcond = c.cursym.Text.Link
p.Mark |= BRANCH
// placeholder for q1's jump target
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = obj.ANOP // zero-width place holder
q1.Pcond = p
......@@ -781,8 +783,8 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
return p
}
func addnop(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
q := newprog()
func (c *ctxt0) addnop(p *obj.Prog) {
q := c.newprog()
// we want to use the canonical NOP (SLL $0,R0,R0) here,
// however, as the assembler will always replace $0
// as R0, we have to resort to manually encode the SLL
......@@ -825,7 +827,7 @@ type Sch struct {
comp bool
}
func sched(ctxt *obj.Link, newprog obj.ProgAlloc, p0, pe *obj.Prog) {
func (c *ctxt0) sched(p0, pe *obj.Prog) {
var sch [NSCHED]Sch
/*
......@@ -834,7 +836,7 @@ func sched(ctxt *obj.Link, newprog obj.ProgAlloc, p0, pe *obj.Prog) {
s := sch[:]
for p := p0; ; p = p.Link {
s[0].p = *p
markregused(ctxt, &s[0])
c.markregused(&s[0])
if p == pe {
break
}
......@@ -868,7 +870,7 @@ func sched(ctxt *obj.Link, newprog obj.ProgAlloc, p0, pe *obj.Prog) {
}
}
for u := t[1:]; -cap(u) <= -cap(s); u = u[1:] {
if depend(ctxt, &u[0], &t[0]) {
if c.depend(&u[0], &t[0]) {
goto no2
}
}
......@@ -910,14 +912,14 @@ func sched(ctxt *obj.Link, newprog obj.ProgAlloc, p0, pe *obj.Prog) {
}
for s[0].nop != 0 {
s[0].nop--
addnop(ctxt, p, newprog)
c.addnop(p)
}
}
}
func markregused(ctxt *obj.Link, s *Sch) {
func (c *ctxt0) markregused(s *Sch) {
p := &s.p
s.comp = compound(ctxt, p)
s.comp = c.compound(p)
s.nop = 0
if s.comp {
s.set.ireg |= 1 << (REGTMP - REG_R0)
......@@ -934,7 +936,7 @@ func markregused(ctxt *obj.Link, s *Sch) {
*/
switch p.As {
case obj.ATEXT:
ctxt.Autosize = int32(p.To.Offset + 8)
c.autosize = int32(p.To.Offset + 8)
ad = 1
case AJAL:
......@@ -1063,7 +1065,7 @@ func markregused(ctxt *obj.Link, s *Sch) {
*/
cls := int(p.To.Class)
if cls == 0 {
cls = aclass(ctxt, &p.To) + 1
cls = c.aclass(&p.To) + 1
p.To.Class = int8(cls)
}
cls--
......@@ -1105,7 +1107,7 @@ func markregused(ctxt *obj.Link, s *Sch) {
break
}
s.size = uint8(sz)
s.soffset = regoff(ctxt, &p.To)
s.soffset = c.regoff(&p.To)
m := uint32(ANYMEM)
if cls == REGSB {
......@@ -1153,7 +1155,7 @@ func markregused(ctxt *obj.Link, s *Sch) {
break
}
s.size = uint8(sz)
s.soffset = regoff(ctxt, &p.To)
s.soffset = c.regoff(&p.To)
if ar != 0 {
s.used.cc |= E_MEMSP
......@@ -1168,7 +1170,7 @@ func markregused(ctxt *obj.Link, s *Sch) {
break
}
s.size = uint8(sz)
s.soffset = regoff(ctxt, &p.To)
s.soffset = c.regoff(&p.To)
if ar != 0 {
s.used.cc |= E_MEMSB
......@@ -1182,7 +1184,7 @@ func markregused(ctxt *obj.Link, s *Sch) {
*/
cls = int(p.From.Class)
if cls == 0 {
cls = aclass(ctxt, &p.From) + 1
cls = c.aclass(&p.From) + 1
p.From.Class = int8(cls)
}
cls--
......@@ -1224,7 +1226,7 @@ func markregused(ctxt *obj.Link, s *Sch) {
p.Mark |= LOAD
}
s.size = uint8(sz)
s.soffset = regoff(ctxt, &p.From)
s.soffset = c.regoff(&p.From)
m := uint32(ANYMEM)
if cls == REGSB {
......@@ -1267,7 +1269,7 @@ func markregused(ctxt *obj.Link, s *Sch) {
break
}
s.size = uint8(sz)
s.soffset = regoff(ctxt, &p.From)
s.soffset = c.regoff(&p.From)
s.used.cc |= E_MEMSP
......@@ -1281,7 +1283,7 @@ func markregused(ctxt *obj.Link, s *Sch) {
break
}
s.size = uint8(sz)
s.soffset = regoff(ctxt, &p.From)
s.soffset = c.regoff(&p.From)
s.used.cc |= E_MEMSB
}
......@@ -1301,7 +1303,7 @@ func markregused(ctxt *obj.Link, s *Sch) {
* test to see if two instructions can be
* interchanged without changing semantics
*/
func depend(ctxt *obj.Link, sa, sb *Sch) bool {
func (c *ctxt0) depend(sa, sb *Sch) bool {
if sa.set.ireg&(sb.set.ireg|sb.used.ireg) != 0 {
return true
}
......@@ -1323,7 +1325,7 @@ func depend(ctxt *obj.Link, sa, sb *Sch) bool {
*/
if sa.used.cc&sb.used.cc&E_MEM != 0 {
if sa.p.Reg == sb.p.Reg {
if regoff(ctxt, &sa.p.From) == regoff(ctxt, &sb.p.From) {
if c.regoff(&sa.p.From) == c.regoff(&sb.p.From) {
return true
}
}
......@@ -1382,8 +1384,8 @@ func conflict(sa, sb *Sch) bool {
return false
}
func compound(ctxt *obj.Link, p *obj.Prog) bool {
o := oplook(ctxt, p)
func (c *ctxt0) compound(p *obj.Prog) bool {
o := c.oplook(p)
if o.size != 4 {
return true
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment