Commit dcf643f1 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

cmd/internal/obj/arm: make assembler concurrency-safe

Move global state from obj.Link
to a new function-local state struct arm.ctxt5.

This ends up being cleaner than threading
all the state through as parameters; there's a lot of it.
While we're here, move newprog from a parameter to ctxt5.

We reserve the variable name c for ctxt5,
so a few local variables named c have been renamed.

Instead of lazily initializing deferreturn
and Sym_div and friends, initialize them up front.

Passes toolstash-check -all.

Updates #15756

Change-Id: Ifb4e4b9879e4e1f25e6168d8b7b2a25a3390dc11
Reviewed-on: https://go-review.googlesource.com/39922
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarCherry Zhang <cherryyz@google.com>
parent 62aeb777
......@@ -38,6 +38,26 @@ import (
"sort"
)
// ctxt5 holds state while assembling a single function.
// Each function gets a fresh ctxt5.
// This allows for multiple functions to be safely concurrently assembled.
type ctxt5 struct {
ctxt *obj.Link
newprog obj.ProgAlloc
cursym *obj.LSym
printp *obj.Prog
blitrl *obj.Prog
elitrl *obj.Prog
autosize int64
instoffset int64
pc int64
pool struct {
start uint32
size uint32
extra uint32
}
}
type Optab struct {
as obj.As
a1 uint8
......@@ -257,17 +277,17 @@ var optab = []Optab{
{obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0},
}
var pool struct {
start uint32
size uint32
extra uint32
}
var oprange [ALAST & obj.AMask][]Optab
var xcmp [C_GOK + 1][C_GOK + 1]bool
var deferreturn *obj.LSym
var (
deferreturn *obj.LSym
symdiv *obj.LSym
symdivu *obj.LSym
symmod *obj.LSym
symmodu *obj.LSym
)
// Note about encoding: Prog.scond holds the condition encoding,
// but XOR'ed with C_SCOND_XOR, so that C_SCOND_NONE == 0.
......@@ -279,14 +299,14 @@ var deferreturn *obj.LSym
// p->pc if extra padding is necessary.
// In rare cases, asmoutnacl might split p into two instructions.
// origPC is the PC for this Prog (no padding is taken into account).
func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog, o *Optab, out []uint32) int {
func (c *ctxt5) asmoutnacl(origPC int32, p *obj.Prog, o *Optab, out []uint32) int {
size := int(o.size)
// instruction specific
switch p.As {
default:
if out != nil {
asmout(ctxt, p, o, out)
c.asmout(p, o, out)
}
case ADATABUNDLE, // align to 16-byte boundary
......@@ -294,7 +314,7 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
p.Pc = (p.Pc + 15) &^ 15
if out != nil {
asmout(ctxt, p, o, out)
c.asmout(p, o, out)
}
case obj.AUNDEF,
......@@ -313,11 +333,11 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
case AB, ABL:
if p.To.Type != obj.TYPE_MEM {
if out != nil {
asmout(ctxt, p, o, out)
c.asmout(p, o, out)
}
} else {
if p.To.Offset != 0 || size != 4 || p.To.Reg > REG_R15 || p.To.Reg < REG_R0 {
ctxt.Diag("unsupported instruction: %v", p)
c.ctxt.Diag("unsupported instruction: %v", p)
}
if p.Pc&15 == 12 {
p.Pc += 4
......@@ -336,9 +356,6 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
// align the last instruction (the actual BL) to the last instruction in a bundle
if p.As == ABL {
if deferreturn == nil {
deferreturn = ctxt.Lookup("runtime.deferreturn", 0)
}
if p.To.Sym == deferreturn {
p.Pc = ((int64(origPC) + 15) &^ 15) + 16 - int64(size)
} else {
......@@ -362,7 +379,7 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
ASTREXD:
if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_R15 && p.From.Reg == REG_R13 { // MOVW.W x(R13), PC
if out != nil {
asmout(ctxt, p, o, out)
c.asmout(p, o, out)
}
if size == 4 {
if out != nil {
......@@ -398,7 +415,7 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
}
if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_R15 {
ctxt.Diag("unsupported instruction (move to another register and use indirect jump instead): %v", p)
c.ctxt.Diag("unsupported instruction (move to another register and use indirect jump instead): %v", p)
}
if p.To.Type == obj.TYPE_MEM && p.To.Reg == REG_R13 && (p.Scond&C_WBIT != 0) && size > 4 {
......@@ -406,7 +423,7 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
// split it into two instructions:
// ADD $-100004, R13
// MOVW R14, 0(R13)
q := newprog()
q := c.newprog()
p.Scond &^= C_WBIT
*q = *p
......@@ -444,7 +461,7 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
a2.Reg = REG_R13
a2.Sym = nil
a2.Offset = 0
size = int(oplook(ctxt, p).size)
size = int(c.oplook(p).size)
break
}
......@@ -461,7 +478,7 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
// if addr.reg == 0, then it is probably load from x(FP) with small x, no need to modify.
if reg == 0 {
if out != nil {
asmout(ctxt, p, o, out)
c.asmout(p, o, out)
}
} else {
if out != nil {
......@@ -472,7 +489,7 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
}
size += 4
if out != nil {
asmout(ctxt, p, o, out[1:])
c.asmout(p, o, out[1:])
}
}
......@@ -484,9 +501,9 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
// 2. load/store from R11.
// This won't handle .W/.P, so we should reject such code.
if p.Scond&(C_PBIT|C_WBIT) != 0 {
ctxt.Diag("unsupported instruction (.P/.W): %v", p)
c.ctxt.Diag("unsupported instruction (.P/.W): %v", p)
}
q := newprog()
q := c.newprog()
*q = *p
var a2 *obj.Addr
if p.To.Type == obj.TYPE_MEM {
......@@ -519,11 +536,11 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
a2.Reg = REG_R11
a2.Sym = nil
a2.Offset = 0
size = int(oplook(ctxt, p).size)
size = int(c.oplook(p).size)
break
}
} else if out != nil {
asmout(ctxt, p, o, out)
c.asmout(p, o, out)
}
}
......@@ -531,7 +548,7 @@ func asmoutnacl(ctxt *obj.Link, newprog obj.ProgAlloc, origPC int32, p *obj.Prog
if p.To.Type == obj.TYPE_REG {
switch p.To.Reg {
case REG_R9:
ctxt.Diag("invalid instruction, cannot write to R9: %v", p)
c.ctxt.Diag("invalid instruction, cannot write to R9: %v", p)
case REG_R13:
if out != nil {
......@@ -560,19 +577,17 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
ctxt.Diag("arm ops not initialized, call arm.buildop first")
}
ctxt.Cursym = cursym
ctxt.Autosize = int32(p.To.Offset + 4)
c := int32(0)
c := ctxt5{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: p.To.Offset + 4}
pc := int32(0)
op = p
p = p.Link
var i int
var m int
var o *Optab
for ; p != nil || ctxt.Blitrl != nil; op, p = p, p.Link {
for ; p != nil || c.blitrl != nil; op, p = p, p.Link {
if p == nil {
if checkpool(ctxt, newprog, op, 0) {
if c.checkpool(op, 0) {
p = op
continue
}
......@@ -583,14 +598,14 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
break
}
p.Pc = int64(c)
o = oplook(ctxt, p)
p.Pc = int64(pc)
o = c.oplook(p)
if ctxt.Headtype != obj.Hnacl {
m = int(o.size)
} else {
m = asmoutnacl(ctxt, newprog, c, p, o, nil)
c = int32(p.Pc) // asmoutnacl might change pc for alignment
o = oplook(ctxt, p) // asmoutnacl might change p in rare cases
m = c.asmoutnacl(pc, p, o, nil)
pc = int32(p.Pc) // asmoutnacl might change pc for alignment
o = c.oplook(p) // asmoutnacl might change p in rare cases
}
if m%4 != 0 || p.Pc%4 != 0 {
......@@ -598,9 +613,9 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
// must check literal pool here in case p generates many instructions
if ctxt.Blitrl != nil {
if c.blitrl != nil {
i = m
if checkpool(ctxt, newprog, op, i) {
if c.checkpool(op, i) {
p = op
continue
}
......@@ -613,24 +628,24 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
switch o.flag & (LFROM | LTO | LPOOL) {
case LFROM:
addpool(ctxt, newprog, p, &p.From)
c.addpool(p, &p.From)
case LTO:
addpool(ctxt, newprog, p, &p.To)
c.addpool(p, &p.To)
case LPOOL:
if p.Scond&C_SCOND == C_SCOND_NONE {
flushpool(ctxt, newprog, p, 0, 0)
c.flushpool(p, 0, 0)
}
}
if p.As == AMOVW && p.To.Type == obj.TYPE_REG && p.To.Reg == REGPC && p.Scond&C_SCOND == C_SCOND_NONE {
flushpool(ctxt, newprog, p, 0, 0)
c.flushpool(p, 0, 0)
}
c += int32(m)
pc += int32(m)
}
cursym.Size = int64(c)
c.cursym.Size = int64(pc)
/*
* if any procedure is large enough to
......@@ -645,13 +660,13 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
var out [6 + 3]uint32
for {
bflag = 0
c = 0
pc = 0
times++
cursym.Text.Pc = 0 // force re-layout the code.
for p = cursym.Text; p != nil; p = p.Link {
o = oplook(ctxt, p)
if int64(c) > p.Pc {
p.Pc = int64(c)
c.cursym.Text.Pc = 0 // force re-layout the code.
for p = c.cursym.Text; p != nil; p = p.Link {
o = c.oplook(p)
if int64(pc) > p.Pc {
p.Pc = int64(pc)
}
/* very large branches
......@@ -682,14 +697,14 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if ctxt.Headtype != obj.Hnacl {
m = int(o.size)
} else {
m = asmoutnacl(ctxt, newprog, c, p, o, nil)
m = c.asmoutnacl(pc, p, o, nil)
}
if p.Pc != int64(opc) {
bflag = 1
}
//print("%v pc changed %d to %d in iter. %d\n", p, opc, (int32)p->pc, times);
c = int32(p.Pc + int64(m))
pc = int32(p.Pc + int64(m))
if m%4 != 0 || p.Pc%4 != 0 {
ctxt.Diag("pc invalid: %v size=%d", p, m)
......@@ -700,7 +715,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != ADATABUNDLEEND && p.As != obj.ANOP) {
if p.As == obj.ATEXT {
ctxt.Autosize = int32(p.To.Offset + 4)
c.autosize = p.To.Offset + 4
continue
}
......@@ -709,14 +724,14 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
}
cursym.Size = int64(c)
c.cursym.Size = int64(pc)
if bflag == 0 {
break
}
}
if c%4 != 0 {
ctxt.Diag("sym->size=%d, invalid", c)
if pc%4 != 0 {
ctxt.Diag("sym->size=%d, invalid", pc)
}
/*
......@@ -728,22 +743,22 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
* perhaps we'd be able to parallelize the span loop above.
*/
p = cursym.Text
ctxt.Autosize = int32(p.To.Offset + 4)
cursym.Grow(cursym.Size)
p = c.cursym.Text
c.autosize = p.To.Offset + 4
c.cursym.Grow(c.cursym.Size)
bp := cursym.P
c = int32(p.Pc) // even p->link might need extra padding
bp := c.cursym.P
pc = int32(p.Pc) // even p->link might need extra padding
var v int
for p = p.Link; p != nil; p = p.Link {
ctxt.Pc = p.Pc
o = oplook(ctxt, p)
c.pc = p.Pc
o = c.oplook(p)
opc = int32(p.Pc)
if ctxt.Headtype != obj.Hnacl {
asmout(ctxt, p, o, out[:])
c.asmout(p, o, out[:])
m = int(o.size)
} else {
m = asmoutnacl(ctxt, newprog, c, p, o, out[:])
m = c.asmoutnacl(pc, p, o, out[:])
if int64(opc) != p.Pc {
ctxt.Diag("asmoutnacl broken: pc changed (%d->%d) in last stage: %v", opc, int32(p.Pc), p)
}
......@@ -753,10 +768,10 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
ctxt.Diag("final stage: pc invalid: %v size=%d", p, m)
}
if int64(c) > p.Pc {
ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, c, p)
if int64(pc) > p.Pc {
ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, pc, p)
}
for int64(c) != p.Pc {
for int64(pc) != p.Pc {
// emit 0xe1a00000 (MOVW R0, R0)
bp[0] = 0x00
bp = bp[1:]
......@@ -767,7 +782,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
bp = bp[1:]
bp[0] = 0xe1
bp = bp[1:]
c += 4
pc += 4
}
for i = 0; i < m/4; i++ {
......@@ -782,7 +797,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
bp = bp[1:]
}
c += int32(m)
pc += int32(m)
}
}
......@@ -792,77 +807,73 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
* drop the pool now, and branch round it.
* this happens only in extended basic blocks that exceed 4k.
*/
func checkpool(ctxt *obj.Link, newprog obj.ProgAlloc, p *obj.Prog, sz int) bool {
if pool.size >= 0xff0 || immaddr(int32((p.Pc+int64(sz)+4)+4+int64(12+pool.size)-int64(pool.start+8))) == 0 {
return flushpool(ctxt, newprog, p, 1, 0)
func (c *ctxt5) checkpool(p *obj.Prog, sz int) bool {
if c.pool.size >= 0xff0 || immaddr(int32((p.Pc+int64(sz)+4)+4+int64(12+c.pool.size)-int64(c.pool.start+8))) == 0 {
return c.flushpool(p, 1, 0)
} else if p.Link == nil {
return flushpool(ctxt, newprog, p, 2, 0)
return c.flushpool(p, 2, 0)
}
return false
}
func flushpool(ctxt *obj.Link, newprog obj.ProgAlloc, p *obj.Prog, skip int, force int) bool {
if ctxt.Blitrl != nil {
func (c *ctxt5) flushpool(p *obj.Prog, skip int, force int) bool {
if c.blitrl != nil {
if skip != 0 {
if false && skip == 1 {
fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), c.pool.size, c.pool.start)
}
q := newprog()
q := c.newprog()
q.As = AB
q.To.Type = obj.TYPE_BRANCH
q.Pcond = p.Link
q.Link = ctxt.Blitrl
q.Link = c.blitrl
q.Pos = p.Pos
ctxt.Blitrl = q
} else if force == 0 && (p.Pc+int64(12+pool.size)-int64(pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
c.blitrl = q
} else if force == 0 && (p.Pc+int64(12+c.pool.size)-int64(c.pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
return false
}
if ctxt.Headtype == obj.Hnacl && pool.size%16 != 0 {
if c.ctxt.Headtype == obj.Hnacl && c.pool.size%16 != 0 {
// if pool is not multiple of 16 bytes, add an alignment marker
q := newprog()
q := c.newprog()
q.As = ADATABUNDLEEND
ctxt.Elitrl.Link = q
ctxt.Elitrl = q
c.elitrl.Link = q
c.elitrl = q
}
// The line number for constant pool entries doesn't really matter.
// We set it to the line number of the preceding instruction so that
// there are no deltas to encode in the pc-line tables.
for q := ctxt.Blitrl; q != nil; q = q.Link {
for q := c.blitrl; q != nil; q = q.Link {
q.Pos = p.Pos
}
ctxt.Elitrl.Link = p.Link
p.Link = ctxt.Blitrl
c.elitrl.Link = p.Link
p.Link = c.blitrl
ctxt.Blitrl = nil /* BUG: should refer back to values until out-of-range */
ctxt.Elitrl = nil
pool.size = 0
pool.start = 0
pool.extra = 0
c.blitrl = nil /* BUG: should refer back to values until out-of-range */
c.elitrl = nil
c.pool.size = 0
c.pool.start = 0
c.pool.extra = 0
return true
}
return false
}
func addpool(ctxt *obj.Link, newprog obj.ProgAlloc, p *obj.Prog, a *obj.Addr) {
var t obj.Prog
c := aclass(ctxt, a)
t.Ctxt = ctxt
func (c *ctxt5) addpool(p *obj.Prog, a *obj.Addr) {
t := c.newprog()
t.As = AWORD
switch c {
switch c.aclass(a) {
default:
t.To.Offset = a.Offset
t.To.Sym = a.Sym
t.To.Type = a.Type
t.To.Name = a.Name
if ctxt.Flag_shared && t.To.Sym != nil {
if c.ctxt.Flag_shared && t.To.Sym != nil {
t.Rel = p
}
......@@ -877,11 +888,11 @@ func addpool(ctxt *obj.Link, newprog obj.ProgAlloc, p *obj.Prog, a *obj.Addr) {
C_LAUTO,
C_LACON:
t.To.Type = obj.TYPE_CONST
t.To.Offset = ctxt.Instoffset
t.To.Offset = c.instoffset
}
if t.Rel == nil {
for q := ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
if q.Rel == nil && q.To == t.To {
p.Pcond = q
return
......@@ -889,42 +900,42 @@ func addpool(ctxt *obj.Link, newprog obj.ProgAlloc, p *obj.Prog, a *obj.Addr) {
}
}
if ctxt.Headtype == obj.Hnacl && pool.size%16 == 0 {
if c.ctxt.Headtype == obj.Hnacl && c.pool.size%16 == 0 {
// start a new data bundle
q := newprog()
q := c.newprog()
q.As = ADATABUNDLE
q.Pc = int64(pool.size)
pool.size += 4
if ctxt.Blitrl == nil {
ctxt.Blitrl = q
pool.start = uint32(p.Pc)
q.Pc = int64(c.pool.size)
c.pool.size += 4
if c.blitrl == nil {
c.blitrl = q
c.pool.start = uint32(p.Pc)
} else {
ctxt.Elitrl.Link = q
c.elitrl.Link = q
}
ctxt.Elitrl = q
c.elitrl = q
}
q := newprog()
*q = t
q.Pc = int64(pool.size)
q := c.newprog()
*q = *t
q.Pc = int64(c.pool.size)
if ctxt.Blitrl == nil {
ctxt.Blitrl = q
pool.start = uint32(p.Pc)
if c.blitrl == nil {
c.blitrl = q
c.pool.start = uint32(p.Pc)
} else {
ctxt.Elitrl.Link = q
c.elitrl.Link = q
}
ctxt.Elitrl = q
pool.size += 4
c.elitrl = q
c.pool.size += 4
p.Pcond = q
}
func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
ctxt.Instoffset = 0
aclass(ctxt, a)
return int32(ctxt.Instoffset)
func (c *ctxt5) regoff(a *obj.Addr) int32 {
c.instoffset = 0
c.aclass(a)
return int32(c.instoffset)
}
func immrot(v uint32) int32 {
......@@ -962,13 +973,13 @@ func immhalf(v int32) bool {
return false
}
func aclass(ctxt *obj.Link, a *obj.Addr) int {
func (c *ctxt5) aclass(a *obj.Addr) int {
switch a.Type {
case obj.TYPE_NONE:
return C_NONE
case obj.TYPE_REG:
ctxt.Instoffset = 0
c.instoffset = 0
if REG_R0 <= a.Reg && a.Reg <= REG_R15 {
return C_REG
}
......@@ -1005,9 +1016,9 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_GOK
}
ctxt.Instoffset = 0 // s.b. unused but just in case
c.instoffset = 0 // s.b. unused but just in case
if a.Sym.Type == obj.STLSBSS {
if ctxt.Flag_shared {
if c.ctxt.Flag_shared {
return C_TLS_IE
} else {
return C_TLS_LE
......@@ -1017,9 +1028,9 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_ADDR
case obj.NAME_AUTO:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
if t := immaddr(int32(ctxt.Instoffset)); t != 0 {
if immhalf(int32(ctxt.Instoffset)) {
c.instoffset = c.autosize + a.Offset
if t := immaddr(int32(c.instoffset)); t != 0 {
if immhalf(int32(c.instoffset)) {
if immfloat(t) {
return C_HFAUTO
}
......@@ -1035,9 +1046,9 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_LAUTO
case obj.NAME_PARAM:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
if t := immaddr(int32(ctxt.Instoffset)); t != 0 {
if immhalf(int32(ctxt.Instoffset)) {
c.instoffset = c.autosize + a.Offset + 4
if t := immaddr(int32(c.instoffset)); t != 0 {
if immhalf(int32(c.instoffset)) {
if immfloat(t) {
return C_HFAUTO
}
......@@ -1053,9 +1064,9 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_LAUTO
case obj.NAME_NONE:
ctxt.Instoffset = a.Offset
if t := immaddr(int32(ctxt.Instoffset)); t != 0 {
if immhalf(int32(ctxt.Instoffset)) { /* n.b. that it will also satisfy immrot */
c.instoffset = a.Offset
if t := immaddr(int32(c.instoffset)); t != 0 {
if immhalf(int32(c.instoffset)) { /* n.b. that it will also satisfy immrot */
if immfloat(t) {
return C_HFOREG
}
......@@ -1065,16 +1076,16 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
if immfloat(t) {
return C_FOREG /* n.b. that it will also satisfy immrot */
}
if immrot(uint32(ctxt.Instoffset)) != 0 {
if immrot(uint32(c.instoffset)) != 0 {
return C_SROREG
}
if immhalf(int32(ctxt.Instoffset)) {
if immhalf(int32(c.instoffset)) {
return C_HOREG
}
return C_SOREG
}
if immrot(uint32(ctxt.Instoffset)) != 0 {
if immrot(uint32(c.instoffset)) != 0 {
return C_ROREG
}
return C_LOREG
......@@ -1083,10 +1094,10 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_GOK
case obj.TYPE_FCONST:
if chipzero5(ctxt, a.Val.(float64)) >= 0 {
if c.chipzero5(a.Val.(float64)) >= 0 {
return C_ZFCON
}
if chipfloat5(ctxt, a.Val.(float64)) >= 0 {
if c.chipfloat5(a.Val.(float64)) >= 0 {
return C_SFCON
}
return C_LFCON
......@@ -1098,15 +1109,15 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
obj.TYPE_ADDR:
switch a.Name {
case obj.NAME_NONE:
ctxt.Instoffset = a.Offset
c.instoffset = a.Offset
if a.Reg != 0 {
return aconsize(ctxt)
return c.aconsize()
}
if immrot(uint32(ctxt.Instoffset)) != 0 {
if immrot(uint32(c.instoffset)) != 0 {
return C_RCON
}
if immrot(^uint32(ctxt.Instoffset)) != 0 {
if immrot(^uint32(c.instoffset)) != 0 {
return C_NCON
}
return C_LCON
......@@ -1118,16 +1129,16 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
if s == nil {
break
}
ctxt.Instoffset = 0 // s.b. unused but just in case
c.instoffset = 0 // s.b. unused but just in case
return C_LCONADDR
case obj.NAME_AUTO:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
return aconsize(ctxt)
c.instoffset = c.autosize + a.Offset
return c.aconsize()
case obj.NAME_PARAM:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
return aconsize(ctxt)
c.instoffset = c.autosize + a.Offset + 4
return c.aconsize()
}
return C_GOK
......@@ -1139,31 +1150,31 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_GOK
}
func aconsize(ctxt *obj.Link) int {
if immrot(uint32(ctxt.Instoffset)) != 0 {
func (c *ctxt5) aconsize() int {
if immrot(uint32(c.instoffset)) != 0 {
return C_RACON
}
if immrot(uint32(-ctxt.Instoffset)) != 0 {
if immrot(uint32(-c.instoffset)) != 0 {
return C_RACON
}
return C_LACON
}
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
func (c *ctxt5) oplook(p *obj.Prog) *Optab {
a1 := int(p.Optab)
if a1 != 0 {
return &optab[a1-1]
}
a1 = int(p.From.Class)
if a1 == 0 {
a1 = aclass(ctxt, &p.From) + 1
a1 = c.aclass(&p.From) + 1
p.From.Class = int8(a1)
}
a1--
a3 := int(p.To.Class)
if a3 == 0 {
a3 = aclass(ctxt, &p.To) + 1
a3 = c.aclass(&p.To) + 1
p.To.Class = int8(a3)
}
......@@ -1189,8 +1200,8 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
}
}
ctxt.Diag("illegal combination %v; %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), p.From.Type, p.To.Type)
ctxt.Diag("from %d %d to %d %d\n", p.From.Type, p.From.Name, p.To.Type, p.To.Name)
c.ctxt.Diag("illegal combination %v; %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), p.From.Type, p.To.Type)
c.ctxt.Diag("from %d %d to %d %d\n", p.From.Type, p.From.Name, p.To.Type, p.To.Name)
if ops == nil {
ops = optab
}
......@@ -1300,6 +1311,13 @@ func buildop(ctxt *obj.Link) {
return
}
deferreturn = ctxt.Lookup("runtime.deferreturn", 0)
symdiv = ctxt.Lookup("_div", 0)
symdivu = ctxt.Lookup("_divu", 0)
symmod = ctxt.Lookup("_mod", 0)
symmodu = ctxt.Lookup("_modu", 0)
var n int
for i := 0; i < C_GOK; i++ {
......@@ -1478,21 +1496,20 @@ func buildop(ctxt *obj.Link) {
}
}
func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
ctxt.Printp = p
func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) {
c.printp = p
o1 := uint32(0)
o2 := uint32(0)
o3 := uint32(0)
o4 := uint32(0)
o5 := uint32(0)
o6 := uint32(0)
ctxt.Armsize += int32(o.size)
if false { /*debug['P']*/
fmt.Printf("%x: %v\ttype %d\n", uint32(p.Pc), p, o.type_)
}
switch o.type_ {
default:
ctxt.Diag("%v: unknown asm %d", p, o.type_)
c.ctxt.Diag("%v: unknown asm %d", p, o.type_)
case 0: /* pseudo ops */
if false { /*debug['G']*/
......@@ -1500,7 +1517,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 1: /* op R,[R],R */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
......@@ -1516,10 +1533,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= (uint32(rf)&15)<<0 | (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
case 2: /* movbu $I,[R],R */
aclass(ctxt, &p.From)
c.aclass(&p.From)
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
o1 = c.oprrr(p, p.As, int(p.Scond))
o1 |= uint32(immrot(uint32(c.instoffset)))
rt := int(p.To.Reg)
r := int(p.Reg)
if p.To.Type == obj.TYPE_NONE {
......@@ -1533,16 +1550,16 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
case 3: /* add R<<[IR],[R],R */
o1 = mov(ctxt, p)
o1 = c.mov(p)
case 4: /* MOVW $off(R), R -> add $off,[R],R */
aclass(ctxt, &p.From)
if ctxt.Instoffset < 0 {
o1 = oprrr(ctxt, p, ASUB, int(p.Scond))
o1 |= uint32(immrot(uint32(-ctxt.Instoffset)))
c.aclass(&p.From)
if c.instoffset < 0 {
o1 = c.oprrr(p, ASUB, int(p.Scond))
o1 |= uint32(immrot(uint32(-c.instoffset)))
} else {
o1 = oprrr(ctxt, p, AADD, int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
o1 = c.oprrr(p, AADD, int(p.Scond))
o1 |= uint32(immrot(uint32(c.instoffset)))
}
r := int(p.From.Reg)
if r == 0 {
......@@ -1552,12 +1569,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= (uint32(p.To.Reg) & 15) << 12
case 5: /* bra s */
o1 = opbra(ctxt, p, p.As, int(p.Scond))
o1 = c.opbra(p, p.As, int(p.Scond))
v := int32(-8)
if p.To.Sym != nil {
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.To.Sym
v += int32(p.To.Offset)
......@@ -1567,45 +1584,45 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
if p.Pcond != nil {
v = int32((p.Pcond.Pc - ctxt.Pc) - 8)
v = int32((p.Pcond.Pc - c.pc) - 8)
}
o1 |= (uint32(v) >> 2) & 0xffffff
case 6: /* b ,O(R) -> add $O,R,PC */
aclass(ctxt, &p.To)
c.aclass(&p.To)
o1 = oprrr(ctxt, p, AADD, int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
o1 = c.oprrr(p, AADD, int(p.Scond))
o1 |= uint32(immrot(uint32(c.instoffset)))
o1 |= (uint32(p.To.Reg) & 15) << 16
o1 |= (REGPC & 15) << 12
case 7: /* bl (R) -> blx R */
aclass(ctxt, &p.To)
c.aclass(&p.To)
if ctxt.Instoffset != 0 {
ctxt.Diag("%v: doesn't support BL offset(REG) with non-zero offset %d", p, ctxt.Instoffset)
if c.instoffset != 0 {
c.ctxt.Diag("%v: doesn't support BL offset(REG) with non-zero offset %d", p, c.instoffset)
}
o1 = oprrr(ctxt, p, ABL, int(p.Scond))
o1 = c.oprrr(p, ABL, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 0
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc)
rel.Siz = 0
rel.Type = obj.R_CALLIND
case 8: /* sll $c,[R],R -> mov (R<<$c),R */
aclass(ctxt, &p.From)
c.aclass(&p.From)
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o1 |= (uint32(r) & 15) << 0
o1 |= uint32((ctxt.Instoffset & 31) << 7)
o1 |= uint32((c.instoffset & 31) << 7)
o1 |= (uint32(p.To.Reg) & 15) << 12
case 9: /* sll R,[R],R -> mov (R<<R),R */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
r := int(p.Reg)
if r == 0 {
......@@ -1616,34 +1633,34 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= (uint32(p.To.Reg) & 15) << 12
case 10: /* swi [$con] */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
if p.To.Type != obj.TYPE_NONE {
aclass(ctxt, &p.To)
o1 |= uint32(ctxt.Instoffset & 0xffffff)
c.aclass(&p.To)
o1 |= uint32(c.instoffset & 0xffffff)
}
case 11: /* word */
aclass(ctxt, &p.To)
c.aclass(&p.To)
o1 = uint32(ctxt.Instoffset)
o1 = uint32(c.instoffset)
if p.To.Sym != nil {
// This case happens with words generated
// in the PC stream as part of the literal pool.
rel := obj.Addrel(ctxt.Cursym)
// in the PC stream as part of the literal pool (c.pool).
rel := obj.Addrel(c.cursym)
rel.Off = int32(ctxt.Pc)
rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
if ctxt.Flag_shared {
if c.ctxt.Flag_shared {
if p.To.Name == obj.NAME_GOTREF {
rel.Type = obj.R_GOTPCREL
} else {
rel.Type = obj.R_PCREL
}
rel.Add += ctxt.Pc - p.Rel.Pc - 8
rel.Add += c.pc - p.Rel.Pc - 8
} else {
rel.Type = obj.R_ADDR
}
......@@ -1651,19 +1668,19 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 12: /* movw $lcon, reg */
o1 = omvl(ctxt, p, &p.From, int(p.To.Reg))
o1 = c.omvl(p, &p.From, int(p.To.Reg))
if o.flag&LPCREL != 0 {
o2 = oprrr(ctxt, p, AADD, int(p.Scond)) | (uint32(p.To.Reg)&15)<<0 | (REGPC&15)<<16 | (uint32(p.To.Reg)&15)<<12
o2 = c.oprrr(p, AADD, int(p.Scond)) | (uint32(p.To.Reg)&15)<<0 | (REGPC&15)<<16 | (uint32(p.To.Reg)&15)<<12
}
case 13: /* op $lcon, [R], R */
o1 = omvl(ctxt, p, &p.From, REGTMP)
o1 = c.omvl(p, &p.From, REGTMP)
if o1 == 0 {
break
}
o2 = oprrr(ctxt, p, p.As, int(p.Scond))
o2 = c.oprrr(p, p.As, int(p.Scond))
o2 |= REGTMP & 15
r := int(p.Reg)
if p.As == AMOVW || p.As == AMVN {
......@@ -1677,12 +1694,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 14: /* movb/movbu/movh/movhu R,R */
o1 = oprrr(ctxt, p, ASLL, int(p.Scond))
o1 = c.oprrr(p, ASLL, int(p.Scond))
if p.As == AMOVBU || p.As == AMOVHU {
o2 = oprrr(ctxt, p, ASRL, int(p.Scond))
o2 = c.oprrr(p, ASRL, int(p.Scond))
} else {
o2 = oprrr(ctxt, p, ASRA, int(p.Scond))
o2 = c.oprrr(p, ASRA, int(p.Scond))
}
r := int(p.To.Reg)
......@@ -1697,7 +1714,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 15: /* mul r,[r,]r */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
......@@ -1712,7 +1729,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if false {
if rt == r || rf == REGPC&15 || r == REGPC&15 || rt == REGPC&15 {
ctxt.Diag("%v: bad registers in MUL", p)
c.ctxt.Diag("%v: bad registers in MUL", p)
}
}
......@@ -1724,7 +1741,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o2 = 0
case 17:
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
rt2 := int(p.To.Offset)
......@@ -1732,28 +1749,28 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= (uint32(rf)&15)<<8 | (uint32(r)&15)<<0 | (uint32(rt)&15)<<16 | (uint32(rt2)&15)<<12
case 20: /* mov/movb/movbu R,O(R) */
aclass(ctxt, &p.To)
c.aclass(&p.To)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
o1 = osr(ctxt, p.As, int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
o1 = c.osr(p.As, int(p.From.Reg), int32(c.instoffset), r, int(p.Scond))
case 21: /* mov/movbu O(R),R -> lr */
aclass(ctxt, &p.From)
c.aclass(&p.From)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o1 = olr(ctxt, int32(ctxt.Instoffset), r, int(p.To.Reg), int(p.Scond))
o1 = c.olr(int32(c.instoffset), r, int(p.To.Reg), int(p.Scond))
if p.As != AMOVW {
o1 |= 1 << 22
}
case 30: /* mov/movb/movbu R,L(R) */
o1 = omvl(ctxt, p, &p.To, REGTMP)
o1 = c.omvl(p, &p.To, REGTMP)
if o1 == 0 {
break
......@@ -1762,13 +1779,13 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(o.param)
}
o2 = osrr(ctxt, int(p.From.Reg), REGTMP&15, r, int(p.Scond))
o2 = c.osrr(int(p.From.Reg), REGTMP&15, r, int(p.Scond))
if p.As != AMOVW {
o2 |= 1 << 22
}
case 31: /* mov/movbu L(R),R -> lr[b] */
o1 = omvl(ctxt, p, &p.From, REGTMP)
o1 = c.omvl(p, &p.From, REGTMP)
if o1 == 0 {
break
......@@ -1777,19 +1794,19 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(o.param)
}
o2 = olrr(ctxt, REGTMP&15, r, int(p.To.Reg), int(p.Scond))
o2 = c.olrr(REGTMP&15, r, int(p.To.Reg), int(p.Scond))
if p.As == AMOVBU || p.As == AMOVBS || p.As == AMOVB {
o2 |= 1 << 22
}
case 34: /* mov $lacon,R */
o1 = omvl(ctxt, p, &p.From, REGTMP)
o1 = c.omvl(p, &p.From, REGTMP)
if o1 == 0 {
break
}
o2 = oprrr(ctxt, p, AADD, int(p.Scond))
o2 = c.oprrr(p, AADD, int(p.Scond))
o2 |= REGTMP & 15
r := int(p.From.Reg)
if r == 0 {
......@@ -1818,14 +1835,14 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= (uint32(p.From.Reg) & 15) << 0
case 37: /* mov $con,PSR */
aclass(ctxt, &p.From)
c.aclass(&p.From)
o1 = 2<<23 | 0x29f<<12 | 0<<4
if p.Scond&C_FBIT != 0 {
o1 ^= 0x010 << 12
}
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
o1 |= uint32(immrot(uint32(c.instoffset)))
o1 |= (uint32(p.To.Reg) & 1) << 22
o1 |= (uint32(p.From.Reg) & 15) << 0
......@@ -1836,18 +1853,18 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= uint32(p.From.Offset & 0xffff)
o1 |= (uint32(p.To.Reg) & 15) << 16
aclass(ctxt, &p.To)
c.aclass(&p.To)
case 39: /* movm oreg,$con -> ldm */
o1 = 0x4<<25 | 1<<20
o1 |= uint32(p.To.Offset & 0xffff)
o1 |= (uint32(p.From.Reg) & 15) << 16
aclass(ctxt, &p.From)
c.aclass(&p.From)
}
if ctxt.Instoffset != 0 {
ctxt.Diag("offset must be zero in MOVM; %v", p)
if c.instoffset != 0 {
c.ctxt.Diag("offset must be zero in MOVM; %v", p)
}
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
if p.Scond&C_PBIT != 0 {
......@@ -1864,10 +1881,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 40: /* swp oreg,reg,reg */
aclass(ctxt, &p.From)
c.aclass(&p.From)
if ctxt.Instoffset != 0 {
ctxt.Diag("offset must be zero in SWP")
if c.instoffset != 0 {
c.ctxt.Diag("offset must be zero in SWP")
}
o1 = 0x2<<23 | 0x9<<4
if p.As != ASWPW {
......@@ -1882,25 +1899,25 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = 0xe8fd8000
case 50: /* floating point store */
v := regoff(ctxt, &p.To)
v := c.regoff(&p.To)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
o1 = ofsr(ctxt, p.As, int(p.From.Reg), v, r, int(p.Scond), p)
o1 = c.ofsr(p.As, int(p.From.Reg), v, r, int(p.Scond), p)
case 51: /* floating point load */
v := regoff(ctxt, &p.From)
v := c.regoff(&p.From)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o1 = ofsr(ctxt, p.As, int(p.To.Reg), v, r, int(p.Scond), p) | 1<<20
o1 = c.ofsr(p.As, int(p.To.Reg), v, r, int(p.Scond), p) | 1<<20
case 52: /* floating point store, int32 offset UGLY */
o1 = omvl(ctxt, p, &p.To, REGTMP)
o1 = c.omvl(p, &p.To, REGTMP)
if o1 == 0 {
break
......@@ -1909,11 +1926,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(o.param)
}
o2 = oprrr(ctxt, p, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
o3 = ofsr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
o2 = c.oprrr(p, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
o3 = c.ofsr(p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
case 53: /* floating point load, int32 offset UGLY */
o1 = omvl(ctxt, p, &p.From, REGTMP)
o1 = c.omvl(p, &p.From, REGTMP)
if o1 == 0 {
break
......@@ -1922,11 +1939,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(o.param)
}
o2 = oprrr(ctxt, p, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
o3 = ofsr(ctxt, p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
o2 = c.oprrr(p, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
o3 = c.ofsr(p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
case 54: /* floating point arith */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
......@@ -1951,7 +1968,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= ((uint32(p.From.Reg)&1)+1)<<21 | (uint32(p.To.Reg)&15)<<12 | 1<<20
case 58: /* movbu R,R */
o1 = oprrr(ctxt, p, AAND, int(p.Scond))
o1 = c.oprrr(p, AAND, int(p.Scond))
o1 |= uint32(immrot(0xff))
rt := int(p.To.Reg)
......@@ -1967,95 +1984,95 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 59: /* movw/bu R<<I(R),R -> ldr indexed */
if p.From.Reg == 0 {
if p.As != AMOVW {
ctxt.Diag("byte MOV from shifter operand")
c.ctxt.Diag("byte MOV from shifter operand")
}
o1 = mov(ctxt, p)
o1 = c.mov(p)
break
}
if p.From.Offset&(1<<4) != 0 {
ctxt.Diag("bad shift in LDR")
c.ctxt.Diag("bad shift in LDR")
}
o1 = olrr(ctxt, int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond))
o1 = c.olrr(int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond))
if p.As == AMOVBU {
o1 |= 1 << 22
}
case 60: /* movb R(R),R -> ldrsb indexed */
if p.From.Reg == 0 {
ctxt.Diag("byte MOV from shifter operand")
o1 = mov(ctxt, p)
c.ctxt.Diag("byte MOV from shifter operand")
o1 = c.mov(p)
break
}
if p.From.Offset&(^0xf) != 0 {
ctxt.Diag("bad shift in LDRSB")
c.ctxt.Diag("bad shift in LDRSB")
}
o1 = olhrr(ctxt, int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond))
o1 = c.olhrr(int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond))
o1 ^= 1<<5 | 1<<6
case 61: /* movw/b/bu R,R<<[IR](R) -> str indexed */
if p.To.Reg == 0 {
ctxt.Diag("MOV to shifter operand")
c.ctxt.Diag("MOV to shifter operand")
}
o1 = osrr(ctxt, int(p.From.Reg), int(p.To.Offset), int(p.To.Reg), int(p.Scond))
o1 = c.osrr(int(p.From.Reg), int(p.To.Offset), int(p.To.Reg), int(p.Scond))
if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU {
o1 |= 1 << 22
}
/* reloc ops */
case 64: /* mov/movb/movbu R,addr */
o1 = omvl(ctxt, p, &p.To, REGTMP)
o1 = c.omvl(p, &p.To, REGTMP)
if o1 == 0 {
break
}
o2 = osr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond))
o2 = c.osr(p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond))
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
}
case 65: /* mov/movbu addr,R */
o1 = omvl(ctxt, p, &p.From, REGTMP)
o1 = c.omvl(p, &p.From, REGTMP)
if o1 == 0 {
break
}
o2 = olr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
o2 = c.olr(0, REGTMP, int(p.To.Reg), int(p.Scond))
if p.As == AMOVBU || p.As == AMOVBS || p.As == AMOVB {
o2 |= 1 << 22
}
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
}
case 101: /* movw tlsvar,R, local exec*/
if p.Scond&C_SCOND != C_SCOND_NONE {
ctxt.Diag("conditional tls")
c.ctxt.Diag("conditional tls")
}
o1 = omvl(ctxt, p, &p.From, int(p.To.Reg))
o1 = c.omvl(p, &p.From, int(p.To.Reg))
case 102: /* movw tlsvar,R, initial exec*/
if p.Scond&C_SCOND != C_SCOND_NONE {
ctxt.Diag("conditional tls")
c.ctxt.Diag("conditional tls")
}
o1 = omvl(ctxt, p, &p.From, int(p.To.Reg))
o2 = olrr(ctxt, int(p.To.Reg)&15, (REGPC & 15), int(p.To.Reg), int(p.Scond))
o1 = c.omvl(p, &p.From, int(p.To.Reg))
o2 = c.olrr(int(p.To.Reg)&15, (REGPC & 15), int(p.To.Reg), int(p.Scond))
case 103: /* word tlsvar, local exec */
if p.To.Sym == nil {
ctxt.Diag("nil sym in tls %v", p)
c.ctxt.Diag("nil sym in tls %v", p)
}
if p.To.Offset != 0 {
ctxt.Diag("offset against tls var in %v", p)
c.ctxt.Diag("offset against tls var in %v", p)
}
// This case happens with words generated in the PC stream as part of
// the literal pool.
rel := obj.Addrel(ctxt.Cursym)
// the literal c.pool.
rel := obj.Addrel(c.cursym)
rel.Off = int32(ctxt.Pc)
rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.To.Sym
rel.Type = obj.R_TLS_LE
......@@ -2063,60 +2080,60 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 104: /* word tlsvar, initial exec */
if p.To.Sym == nil {
ctxt.Diag("nil sym in tls %v", p)
c.ctxt.Diag("nil sym in tls %v", p)
}
if p.To.Offset != 0 {
ctxt.Diag("offset against tls var in %v", p)
c.ctxt.Diag("offset against tls var in %v", p)
}
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.To.Sym
rel.Type = obj.R_TLS_IE
rel.Add = ctxt.Pc - p.Rel.Pc - 8 - int64(rel.Siz)
rel.Add = c.pc - p.Rel.Pc - 8 - int64(rel.Siz)
case 68: /* floating point store -> ADDR */
o1 = omvl(ctxt, p, &p.To, REGTMP)
o1 = c.omvl(p, &p.To, REGTMP)
if o1 == 0 {
break
}
o2 = ofsr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
o2 = c.ofsr(p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
}
case 69: /* floating point load <- ADDR */
o1 = omvl(ctxt, p, &p.From, REGTMP)
o1 = c.omvl(p, &p.From, REGTMP)
if o1 == 0 {
break
}
o2 = ofsr(ctxt, p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
o2 = c.ofsr(p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
}
/* ArmV4 ops: */
case 70: /* movh/movhu R,O(R) -> strh */
aclass(ctxt, &p.To)
c.aclass(&p.To)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
o1 = oshr(ctxt, int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
o1 = c.oshr(int(p.From.Reg), int32(c.instoffset), r, int(p.Scond))
case 71: /* movb/movh/movhu O(R),R -> ldrsb/ldrsh/ldrh */
aclass(ctxt, &p.From)
c.aclass(&p.From)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o1 = olhr(ctxt, int32(ctxt.Instoffset), r, int(p.To.Reg), int(p.Scond))
o1 = c.olhr(int32(c.instoffset), r, int(p.To.Reg), int(p.Scond))
if p.As == AMOVB || p.As == AMOVBS {
o1 ^= 1<<5 | 1<<6
} else if p.As == AMOVH || p.As == AMOVHS {
......@@ -2124,7 +2141,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 72: /* movh/movhu R,L(R) -> strh */
o1 = omvl(ctxt, p, &p.To, REGTMP)
o1 = c.omvl(p, &p.To, REGTMP)
if o1 == 0 {
break
......@@ -2133,10 +2150,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(o.param)
}
o2 = oshrr(ctxt, int(p.From.Reg), REGTMP&15, r, int(p.Scond))
o2 = c.oshrr(int(p.From.Reg), REGTMP&15, r, int(p.Scond))
case 73: /* movb/movh/movhu L(R),R -> ldrsb/ldrsh/ldrh */
o1 = omvl(ctxt, p, &p.From, REGTMP)
o1 = c.omvl(p, &p.From, REGTMP)
if o1 == 0 {
break
......@@ -2145,7 +2162,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(o.param)
}
o2 = olhrr(ctxt, REGTMP&15, r, int(p.To.Reg), int(p.Scond))
o2 = c.olhrr(REGTMP&15, r, int(p.To.Reg), int(p.Scond))
if p.As == AMOVB || p.As == AMOVBS {
o2 ^= 1<<5 | 1<<6
} else if p.As == AMOVH || p.As == AMOVHS {
......@@ -2153,36 +2170,36 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 74: /* bx $I */
ctxt.Diag("ABX $I")
c.ctxt.Diag("ABX $I")
case 75: /* bx O(R) */
aclass(ctxt, &p.To)
c.aclass(&p.To)
if ctxt.Instoffset != 0 {
ctxt.Diag("non-zero offset in ABX")
if c.instoffset != 0 {
c.ctxt.Diag("non-zero offset in ABX")
}
/*
o1 = oprrr(ctxt, p, AADD, p->scond) | immrot(0) | ((REGPC&15)<<16) | ((REGLINK&15)<<12); // mov PC, LR
o1 = c.oprrr(p, AADD, p->scond) | immrot(0) | ((REGPC&15)<<16) | ((REGLINK&15)<<12); // mov PC, LR
o2 = (((p->scond&C_SCOND) ^ C_SCOND_XOR)<<28) | (0x12fff<<8) | (1<<4) | ((p->to.reg&15) << 0); // BX R
*/
// p->to.reg may be REGLINK
o1 = oprrr(ctxt, p, AADD, int(p.Scond))
o1 = c.oprrr(p, AADD, int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
o1 |= uint32(immrot(uint32(c.instoffset)))
o1 |= (uint32(p.To.Reg) & 15) << 16
o1 |= (REGTMP & 15) << 12
o2 = oprrr(ctxt, p, AADD, int(p.Scond)) | uint32(immrot(0)) | (REGPC&15)<<16 | (REGLINK&15)<<12 // mov PC, LR
o3 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x12fff<<8 | 1<<4 | REGTMP&15 // BX Rtmp
o2 = c.oprrr(p, AADD, int(p.Scond)) | uint32(immrot(0)) | (REGPC&15)<<16 | (REGLINK&15)<<12 // mov PC, LR
o3 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x12fff<<8 | 1<<4 | REGTMP&15 // BX Rtmp
case 76: /* bx O(R) when returning from fn*/
ctxt.Diag("ABXRET")
c.ctxt.Diag("ABXRET")
case 77: /* ldrex oreg,reg */
aclass(ctxt, &p.From)
c.aclass(&p.From)
if ctxt.Instoffset != 0 {
ctxt.Diag("offset must be zero in LDREX")
if c.instoffset != 0 {
c.ctxt.Diag("offset must be zero in LDREX")
}
o1 = 0x19<<20 | 0xf9f
o1 |= (uint32(p.From.Reg) & 15) << 16
......@@ -2190,10 +2207,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
case 78: /* strex reg,oreg,reg */
aclass(ctxt, &p.From)
c.aclass(&p.From)
if ctxt.Instoffset != 0 {
ctxt.Diag("offset must be zero in STREX")
if c.instoffset != 0 {
c.ctxt.Diag("offset must be zero in STREX")
}
o1 = 0x18<<20 | 0xf90
o1 |= (uint32(p.From.Reg) & 15) << 16
......@@ -2204,10 +2221,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 80: /* fmov zfcon,freg */
if p.As == AMOVD {
o1 = 0xeeb00b00 // VMOV imm 64
o2 = oprrr(ctxt, p, ASUBD, int(p.Scond))
o2 = c.oprrr(p, ASUBD, int(p.Scond))
} else {
o1 = 0x0eb00a00 // VMOV imm 32
o2 = oprrr(ctxt, p, ASUBF, int(p.Scond))
o2 = c.oprrr(p, ASUBF, int(p.Scond))
}
v := int32(0x70) // 1.0
......@@ -2230,73 +2247,73 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
o1 |= (uint32(p.To.Reg) & 15) << 12
v := int32(chipfloat5(ctxt, p.From.Val.(float64)))
v := int32(c.chipfloat5(p.From.Val.(float64)))
o1 |= (uint32(v) & 0xf) << 0
o1 |= (uint32(v) & 0xf0) << 12
case 82: /* fcmp freg,freg, */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
o1 |= (uint32(p.Reg)&15)<<12 | (uint32(p.From.Reg)&15)<<0
o2 = 0x0ef1fa10 // VMRS R15
o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
case 83: /* fcmp freg,, */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg)&15)<<12 | 1<<16
o2 = 0x0ef1fa10 // VMRS R15
o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
case 84: /* movfw freg,freg - truncate float-to-fix */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 0
o1 |= (uint32(p.To.Reg) & 15) << 12
case 85: /* movwf freg,freg - fix-to-float */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 0
o1 |= (uint32(p.To.Reg) & 15) << 12
// macro for movfw freg,FTMP; movw FTMP,reg
case 86: /* movfw freg,reg - truncate float-to-fix */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 0
o1 |= (FREGTMP & 15) << 12
o2 = oprrr(ctxt, p, -AMOVFW, int(p.Scond))
o2 = c.oprrr(p, -AMOVFW, int(p.Scond))
o2 |= (FREGTMP & 15) << 16
o2 |= (uint32(p.To.Reg) & 15) << 12
// macro for movw reg,FTMP; movwf FTMP,freg
case 87: /* movwf reg,freg - fix-to-float */
o1 = oprrr(ctxt, p, -AMOVWF, int(p.Scond))
o1 = c.oprrr(p, -AMOVWF, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 12
o1 |= (FREGTMP & 15) << 16
o2 = oprrr(ctxt, p, p.As, int(p.Scond))
o2 = c.oprrr(p, p.As, int(p.Scond))
o2 |= (FREGTMP & 15) << 0
o2 |= (uint32(p.To.Reg) & 15) << 12
case 88: /* movw reg,freg */
o1 = oprrr(ctxt, p, -AMOVWF, int(p.Scond))
o1 = c.oprrr(p, -AMOVWF, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 12
o1 |= (uint32(p.To.Reg) & 15) << 16
case 89: /* movw freg,reg */
o1 = oprrr(ctxt, p, -AMOVFW, int(p.Scond))
o1 = c.oprrr(p, -AMOVFW, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 16
o1 |= (uint32(p.To.Reg) & 15) << 12
case 91: /* ldrexd oreg,reg */
aclass(ctxt, &p.From)
c.aclass(&p.From)
if ctxt.Instoffset != 0 {
ctxt.Diag("offset must be zero in LDREX")
if c.instoffset != 0 {
c.ctxt.Diag("offset must be zero in LDREX")
}
o1 = 0x1b<<20 | 0xf9f
o1 |= (uint32(p.From.Reg) & 15) << 16
......@@ -2304,10 +2321,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
case 92: /* strexd reg,oreg,reg */
aclass(ctxt, &p.From)
c.aclass(&p.From)
if ctxt.Instoffset != 0 {
ctxt.Diag("offset must be zero in STREX")
if c.instoffset != 0 {
c.ctxt.Diag("offset must be zero in STREX")
}
o1 = 0x1a<<20 | 0xf90
o1 |= (uint32(p.From.Reg) & 15) << 16
......@@ -2316,12 +2333,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
case 93: /* movb/movh/movhu addr,R -> ldrsb/ldrsh/ldrh */
o1 = omvl(ctxt, p, &p.From, REGTMP)
o1 = c.omvl(p, &p.From, REGTMP)
if o1 == 0 {
break
}
o2 = olhr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
o2 = c.olhr(0, REGTMP, int(p.To.Reg), int(p.Scond))
if p.As == AMOVB || p.As == AMOVBS {
o2 ^= 1<<5 | 1<<6
} else if p.As == AMOVH || p.As == AMOVHS {
......@@ -2329,19 +2346,19 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
}
case 94: /* movh/movhu R,addr -> strh */
o1 = omvl(ctxt, p, &p.To, REGTMP)
o1 = c.omvl(p, &p.To, REGTMP)
if o1 == 0 {
break
}
o2 = oshr(ctxt, int(p.From.Reg), 0, REGTMP, int(p.Scond))
o2 = c.oshr(int(p.From.Reg), 0, REGTMP, int(p.Scond))
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
}
case 95: /* PLD off(reg) */
......@@ -2364,20 +2381,20 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = 0xf7fabcfd
case 97: /* CLZ Rm, Rd */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 12
o1 |= (uint32(p.From.Reg) & 15) << 0
case 98: /* MULW{T,B} Rs, Rm, Rd */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 16
o1 |= (uint32(p.From.Reg) & 15) << 8
o1 |= (uint32(p.Reg) & 15) << 0
case 99: /* MULAW{T,B} Rs, Rm, Rn, Rd */
o1 = oprrr(ctxt, p, p.As, int(p.Scond))
o1 = c.oprrr(p, p.As, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 12
o1 |= (uint32(p.From.Reg) & 15) << 8
......@@ -2401,9 +2418,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
return
}
func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
aclass(ctxt, &p.From)
o1 := oprrr(ctxt, p, p.As, int(p.Scond))
func (c *ctxt5) mov(p *obj.Prog) uint32 {
c.aclass(&p.From)
o1 := c.oprrr(p, p.As, int(p.Scond))
o1 |= uint32(p.From.Offset)
rt := int(p.To.Reg)
if p.To.Type == obj.TYPE_NONE {
......@@ -2419,13 +2436,13 @@ func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
return o1
}
func oprrr(ctxt *obj.Link, p *obj.Prog, a obj.As, sc int) uint32 {
func (c *ctxt5) oprrr(p *obj.Prog, a obj.As, sc int) uint32 {
o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_SBIT != 0 {
o |= 1 << 20
}
if sc&(C_PBIT|C_WBIT) != 0 {
ctxt.Diag(".nil/.W on dp instruction")
c.ctxt.Diag(".nil/.W on dp instruction")
}
switch a {
case AMMUL:
......@@ -2604,13 +2621,13 @@ func oprrr(ctxt *obj.Link, p *obj.Prog, a obj.As, sc int) uint32 {
return o&(0xf<<28) | 0x12fff3<<4
}
ctxt.Diag("%v: bad rrr %d", p, a)
c.ctxt.Diag("%v: bad rrr %d", p, a)
return 0
}
func opbra(ctxt *obj.Link, p *obj.Prog, a obj.As, sc int) uint32 {
func (c *ctxt5) opbra(p *obj.Prog, a obj.As, sc int) uint32 {
if sc&(C_SBIT|C_PBIT|C_WBIT) != 0 {
ctxt.Diag("%v: .nil/.nil/.W on bra instruction", p)
c.ctxt.Diag("%v: .nil/.nil/.W on bra instruction", p)
}
sc &= C_SCOND
sc ^= C_SCOND_XOR
......@@ -2618,7 +2635,7 @@ func opbra(ctxt *obj.Link, p *obj.Prog, a obj.As, sc int) uint32 {
return uint32(sc)<<28 | 0x5<<25 | 0x1<<24
}
if sc != 0xe {
ctxt.Diag("%v: .COND on bcond instruction", p)
c.ctxt.Diag("%v: .COND on bcond instruction", p)
}
switch a {
case ABEQ:
......@@ -2657,13 +2674,13 @@ func opbra(ctxt *obj.Link, p *obj.Prog, a obj.As, sc int) uint32 {
return 0xe<<28 | 0x5<<25
}
ctxt.Diag("%v: bad bra %v", p, a)
c.ctxt.Diag("%v: bad bra %v", p, a)
return 0
}
func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
func (c *ctxt5) olr(v int32, b int, r int, sc int) uint32 {
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on LDR/STR instruction")
c.ctxt.Diag(".nil on LDR/STR instruction")
}
o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
......@@ -2678,14 +2695,14 @@ func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
o |= 1<<26 | 1<<20
if v < 0 {
if sc&C_UBIT != 0 {
ctxt.Diag(".U on neg offset")
c.ctxt.Diag(".U on neg offset")
}
v = -v
o ^= 1 << 23
}
if v >= 1<<12 || v < 0 {
ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, ctxt.Printp)
c.ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, c.printp)
}
o |= uint32(v)
o |= (uint32(b) & 15) << 16
......@@ -2693,9 +2710,9 @@ func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
return o
}
func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
func (c *ctxt5) olhr(v int32, b int, r int, sc int) uint32 {
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on LDRH/STRH instruction")
c.ctxt.Diag(".nil on LDRH/STRH instruction")
}
o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
......@@ -2711,7 +2728,7 @@ func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
}
if v >= 1<<8 || v < 0 {
ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, ctxt.Printp)
c.ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, c.printp)
}
o |= uint32(v)&0xf | (uint32(v)>>4)<<8 | 1<<22
o |= (uint32(b) & 15) << 16
......@@ -2719,38 +2736,38 @@ func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
return o
}
func osr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int) uint32 {
o := olr(ctxt, v, b, r, sc) ^ (1 << 20)
func (c *ctxt5) osr(a obj.As, r int, v int32, b int, sc int) uint32 {
o := c.olr(v, b, r, sc) ^ (1 << 20)
if a != AMOVW {
o |= 1 << 22
}
return o
}
func oshr(ctxt *obj.Link, r int, v int32, b int, sc int) uint32 {
o := olhr(ctxt, v, b, r, sc) ^ (1 << 20)
func (c *ctxt5) oshr(r int, v int32, b int, sc int) uint32 {
o := c.olhr(v, b, r, sc) ^ (1 << 20)
return o
}
func osrr(ctxt *obj.Link, r int, i int, b int, sc int) uint32 {
return olr(ctxt, int32(i), b, r, sc) ^ (1<<25 | 1<<20)
func (c *ctxt5) osrr(r int, i int, b int, sc int) uint32 {
return c.olr(int32(i), b, r, sc) ^ (1<<25 | 1<<20)
}
func oshrr(ctxt *obj.Link, r int, i int, b int, sc int) uint32 {
return olhr(ctxt, int32(i), b, r, sc) ^ (1<<22 | 1<<20)
func (c *ctxt5) oshrr(r int, i int, b int, sc int) uint32 {
return c.olhr(int32(i), b, r, sc) ^ (1<<22 | 1<<20)
}
func olrr(ctxt *obj.Link, i int, b int, r int, sc int) uint32 {
return olr(ctxt, int32(i), b, r, sc) ^ (1 << 25)
func (c *ctxt5) olrr(i int, b int, r int, sc int) uint32 {
return c.olr(int32(i), b, r, sc) ^ (1 << 25)
}
func olhrr(ctxt *obj.Link, i int, b int, r int, sc int) uint32 {
return olhr(ctxt, int32(i), b, r, sc) ^ (1 << 22)
func (c *ctxt5) olhrr(i int, b int, r int, sc int) uint32 {
return c.olhr(int32(i), b, r, sc) ^ (1 << 22)
}
func ofsr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
func (c *ctxt5) ofsr(a obj.As, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on FLDR/FSTR instruction: %v", p)
c.ctxt.Diag(".nil on FLDR/FSTR instruction: %v", p)
}
o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
......@@ -2766,9 +2783,9 @@ func ofsr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int, p *obj.Prog)
}
if v&3 != 0 {
ctxt.Diag("odd offset for floating point op: %d\n%v", v, p)
c.ctxt.Diag("odd offset for floating point op: %d\n%v", v, p)
} else if v >= 1<<10 || v < 0 {
ctxt.Diag("literal span too large: %d\n%v", v, p)
c.ctxt.Diag("literal span too large: %d\n%v", v, p)
}
o |= (uint32(v) >> 2) & 0xFF
o |= (uint32(b) & 15) << 16
......@@ -2776,7 +2793,7 @@ func ofsr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int, p *obj.Prog)
switch a {
default:
ctxt.Diag("bad fst %v", a)
c.ctxt.Diag("bad fst %v", a)
fallthrough
case AMOVD:
......@@ -2790,28 +2807,28 @@ func ofsr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int, p *obj.Prog)
return o
}
func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 {
var o1 uint32
if p.Pcond == nil {
aclass(ctxt, a)
v := immrot(^uint32(ctxt.Instoffset))
c.aclass(a)
v := immrot(^uint32(c.instoffset))
if v == 0 {
ctxt.Diag("%v: missing literal", p)
c.ctxt.Diag("%v: missing literal", p)
return 0
}
o1 = oprrr(ctxt, p, AMVN, int(p.Scond)&C_SCOND)
o1 = c.oprrr(p, AMVN, int(p.Scond)&C_SCOND)
o1 |= uint32(v)
o1 |= (uint32(dr) & 15) << 12
} else {
v := int32(p.Pcond.Pc - p.Pc - 8)
o1 = olr(ctxt, v, REGPC, dr, int(p.Scond)&C_SCOND)
o1 = c.olr(v, REGPC, dr, int(p.Scond)&C_SCOND)
}
return o1
}
func chipzero5(ctxt *obj.Link, e float64) int {
func (c *ctxt5) chipzero5(e float64) int {
// We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
if obj.GOARM < 7 || e != 0 {
return -1
......@@ -2819,7 +2836,7 @@ func chipzero5(ctxt *obj.Link, e float64) int {
return 0
}
func chipfloat5(ctxt *obj.Link, e float64) int {
func (c *ctxt5) chipfloat5(e float64) int {
// We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
if obj.GOARM < 7 {
return -1
......
......@@ -41,12 +41,11 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
p.From.Class = 0
p.To.Class = 0
c := ctxt5{ctxt: ctxt, newprog: newprog}
// Rewrite B/BL to symbol as TYPE_BRANCH.
switch p.As {
case AB,
ABL,
obj.ADUFFZERO,
obj.ADUFFCOPY:
case AB, ABL, obj.ADUFFZERO, obj.ADUFFCOPY:
if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH
}
......@@ -104,7 +103,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
// Rewrite float constants to values stored in memory.
switch p.As {
case AMOVF:
if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.Val.(float64)) < 0 && (chipzero5(ctxt, p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
if p.From.Type == obj.TYPE_FCONST && c.chipfloat5(p.From.Val.(float64)) < 0 && (c.chipzero5(p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
f32 := float32(p.From.Val.(float64))
p.From.Type = obj.TYPE_MEM
p.From.Sym = ctxt.Float32Sym(f32)
......@@ -113,7 +112,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
}
case AMOVD:
if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.Val.(float64)) < 0 && (chipzero5(ctxt, p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
if p.From.Type == obj.TYPE_FCONST && c.chipfloat5(p.From.Val.(float64)) < 0 && (c.chipzero5(p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
p.From.Type = obj.TYPE_MEM
p.From.Sym = ctxt.Float64Sym(p.From.Val.(float64))
p.From.Name = obj.NAME_EXTERN
......@@ -122,12 +121,12 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
}
if ctxt.Flag_dynlink {
rewriteToUseGot(ctxt, p, newprog)
c.rewriteToUseGot(p)
}
}
// Rewrite p, if necessary, to access global data via the global offset table.
func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
func (c *ctxt5) rewriteToUseGot(p *obj.Prog) {
if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
// ADUFFxxx $offset
// becomes
......@@ -136,9 +135,9 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
// CALL (R9)
var sym *obj.LSym
if p.As == obj.ADUFFZERO {
sym = ctxt.Lookup("runtime.duffzero", 0)
sym = c.ctxt.Lookup("runtime.duffzero", 0)
} else {
sym = ctxt.Lookup("runtime.duffcopy", 0)
sym = c.ctxt.Lookup("runtime.duffcopy", 0)
}
offset := p.To.Offset
p.As = AMOVW
......@@ -150,13 +149,13 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
p.To.Name = obj.NAME_NONE
p.To.Offset = 0
p.To.Sym = nil
p1 := obj.Appendp(p, newprog)
p1 := obj.Appendp(p, c.newprog)
p1.As = AADD
p1.From.Type = obj.TYPE_CONST
p1.From.Offset = offset
p1.To.Type = obj.TYPE_REG
p1.To.Reg = REG_R9
p2 := obj.Appendp(p1, newprog)
p2 := obj.Appendp(p1, c.newprog)
p2.As = obj.ACALL
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = REG_R9
......@@ -170,15 +169,15 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
// MOVW $sym, Rx becomes MOVW sym@GOT, Rx
// MOVW $sym+<off>, Rx becomes MOVW sym@GOT, Rx; ADD <off>, Rx
if p.As != AMOVW {
ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p)
c.ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p)
}
if p.To.Type != obj.TYPE_REG {
ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p)
c.ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p)
}
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_GOTREF
if p.From.Offset != 0 {
q := obj.Appendp(p, newprog)
q := obj.Appendp(p, c.newprog)
q.As = AADD
q.From.Type = obj.TYPE_CONST
q.From.Offset = p.From.Offset
......@@ -187,7 +186,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
}
}
if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
ctxt.Diag("don't know how to handle %v with -dynlink", p)
c.ctxt.Diag("don't know how to handle %v with -dynlink", p)
}
var source *obj.Addr
// MOVx sym, Ry becomes MOVW sym@GOT, R9; MOVx (R9), Ry
......@@ -195,7 +194,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
// An addition may be inserted between the two MOVs if there is an offset.
if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
c.ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
}
source = &p.From
} else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
......@@ -210,10 +209,10 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
return
}
if source.Type != obj.TYPE_MEM {
ctxt.Diag("don't know how to handle %v with -dynlink", p)
c.ctxt.Diag("don't know how to handle %v with -dynlink", p)
}
p1 := obj.Appendp(p, newprog)
p2 := obj.Appendp(p1, newprog)
p1 := obj.Appendp(p, c.newprog)
p2 := obj.Appendp(p1, c.newprog)
p1.As = AMOVW
p1.From.Type = obj.TYPE_MEM
......@@ -249,15 +248,15 @@ const (
func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
autosize := int32(0)
ctxt.Cursym = cursym
if cursym.Text == nil || cursym.Text.Link == nil {
return
}
softfloat(ctxt, newprog, cursym)
c := ctxt5{ctxt: ctxt, cursym: cursym, newprog: newprog}
p := cursym.Text
c.softfloat()
p := c.cursym.Text
autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
......@@ -283,9 +282,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
case ADIV, ADIVU, AMOD, AMODU:
q = p
if ctxt.Sym_div == nil {
initdiv(ctxt)
}
cursym.Text.Mark &^= LEAF
continue
......@@ -362,11 +358,11 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
if p.From3.Offset&obj.NOSPLIT == 0 {
p = stacksplit(ctxt, p, newprog, autosize) // emit split check
p = c.stacksplit(p, autosize) // emit split check
}
// MOVW.W R14,$-autosize(SP)
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = AMOVW
p.Scond |= C_WBIT
......@@ -597,16 +593,13 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.To.Type = obj.TYPE_BRANCH
switch o {
case ADIV:
p.To.Sym = ctxt.Sym_div
p.To.Sym = symdiv
case ADIVU:
p.To.Sym = ctxt.Sym_divu
p.To.Sym = symdivu
case AMOD:
p.To.Sym = ctxt.Sym_mod
p.To.Sym = symmod
case AMODU:
p.To.Sym = ctxt.Sym_modu
p.To.Sym = symmodu
}
/* MOV REGTMP, b */
......@@ -637,21 +630,21 @@ func isfloatreg(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && REG_F0 <= a.Reg && a.Reg <= REG_F15
}
func softfloat(ctxt *obj.Link, newprog obj.ProgAlloc, cursym *obj.LSym) {
func (c *ctxt5) softfloat() {
if obj.GOARM > 5 {
return
}
symsfloat := ctxt.Lookup("_sfloat", 0)
symsfloat := c.ctxt.Lookup("_sfloat", 0)
wasfloat := 0
for p := cursym.Text; p != nil; p = p.Link {
for p := c.cursym.Text; p != nil; p = p.Link {
if p.Pcond != nil {
p.Pcond.Mark |= LABEL
}
}
var next *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
for p := c.cursym.Text; p != nil; p = p.Link {
switch p.As {
case AMOVW:
if isfloatreg(&p.To) || isfloatreg(&p.From) {
......@@ -691,12 +684,12 @@ func softfloat(ctxt *obj.Link, newprog obj.ProgAlloc, cursym *obj.LSym) {
soft:
if wasfloat == 0 || (p.Mark&LABEL != 0) {
next = newprog()
next = c.newprog()
*next = *p
// BL _sfloat(SB)
*p = obj.Prog{}
p.Ctxt = ctxt
p.Ctxt = c.ctxt
p.Link = next
p.As = ABL
p.To.Type = obj.TYPE_BRANCH
......@@ -714,16 +707,16 @@ func softfloat(ctxt *obj.Link, newprog obj.ProgAlloc, cursym *obj.LSym) {
}
}
func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize int32) *obj.Prog {
func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
// MOVW g_stackguard(g), R1
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = AMOVW
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
if ctxt.Cursym.CFunc() {
p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0
if c.cursym.CFunc() {
p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
......@@ -731,7 +724,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
......@@ -741,7 +734,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
// large stack: SP-framesize < stackguard-StackSmall
// MOVW $-(framesize-StackSmall)(SP), R2
// CMP stackguard, R2
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
......@@ -750,7 +743,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
......@@ -766,14 +759,14 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
// SUB.NE R1, R2
// MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
// CMP.NE R3, R2
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
p.Reg = REG_R1
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
......@@ -782,7 +775,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = ASUB
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
......@@ -790,7 +783,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
p.To.Reg = REG_R2
p.Scond = C_SCOND_NE
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
......@@ -798,7 +791,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
p.To.Reg = REG_R3
p.Scond = C_SCOND_NE
p = obj.Appendp(p, newprog)
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R3
......@@ -807,23 +800,23 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
}
// BLS call-to-morestack
bls := obj.Appendp(p, newprog)
bls := obj.Appendp(p, c.newprog)
bls.As = ABLS
bls.To.Type = obj.TYPE_BRANCH
var last *obj.Prog
for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
for last = c.cursym.Text; last.Link != nil; last = last.Link {
}
// Now we are at the end of the function, but logically
// we are still in function prologue. We need to fix the
// SP data and PCDATA.
spfix := obj.Appendp(last, newprog)
spfix := obj.Appendp(last, c.newprog)
spfix.As = obj.ANOP
spfix.Spadj = -framesize
pcdata := obj.Appendp(spfix, newprog)
pcdata.Pos = ctxt.Cursym.Text.Pos
pcdata := obj.Appendp(spfix, c.newprog)
pcdata.Pos = c.cursym.Text.Pos
pcdata.As = obj.APCDATA
pcdata.From.Type = obj.TYPE_CONST
pcdata.From.Offset = obj.PCDATA_StackMapIndex
......@@ -831,7 +824,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
pcdata.To.Offset = -1 // pcdata starts at -1 at function entry
// MOVW LR, R3
movw := obj.Appendp(pcdata, newprog)
movw := obj.Appendp(pcdata, c.newprog)
movw.As = AMOVW
movw.From.Type = obj.TYPE_REG
movw.From.Reg = REGLINK
......@@ -841,38 +834,28 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize in
bls.Pcond = movw
// BL runtime.morestack
call := obj.Appendp(movw, newprog)
call := obj.Appendp(movw, c.newprog)
call.As = obj.ACALL
call.To.Type = obj.TYPE_BRANCH
morestack := "runtime.morestack"
switch {
case ctxt.Cursym.CFunc():
case c.cursym.CFunc():
morestack = "runtime.morestackc"
case ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0:
case c.cursym.Text.From3.Offset&obj.NEEDCTXT == 0:
morestack = "runtime.morestack_noctxt"
}
call.To.Sym = ctxt.Lookup(morestack, 0)
call.To.Sym = c.ctxt.Lookup(morestack, 0)
// B start
b := obj.Appendp(call, newprog)
b := obj.Appendp(call, c.newprog)
b.As = obj.AJMP
b.To.Type = obj.TYPE_BRANCH
b.Pcond = ctxt.Cursym.Text.Link
b.Pcond = c.cursym.Text.Link
b.Spadj = +framesize
return bls
}
func initdiv(ctxt *obj.Link) {
if ctxt.Sym_div != nil {
return
}
ctxt.Sym_div = ctxt.Lookup("_div", 0)
ctxt.Sym_divu = ctxt.Lookup("_divu", 0)
ctxt.Sym_mod = ctxt.Lookup("_mod", 0)
ctxt.Sym_modu = ctxt.Lookup("_modu", 0)
}
var unaryDst = map[obj.As]bool{
ASWI: true,
AWORD: true,
......
......@@ -726,17 +726,12 @@ type Link struct {
PosTable src.PosTable
InlTree InlTree // global inlining tree used by gc/inl.go
Imports []string
Sym_div *LSym
Sym_divu *LSym
Sym_mod *LSym
Sym_modu *LSym
Plan9privates *LSym
Printp *Prog
Blitrl *Prog
Elitrl *Prog
Instoffset int64
Autosize int32
Armsize int32
Pc int64
DiagFunc func(string, ...interface{})
DebugInfo func(fn *LSym, curfn interface{}) []*dwarf.Var // if non-nil, curfn is a *gc.Node
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment