Commit 15b76ad9 authored by Russ Cox's avatar Russ Cox

runtime: assume precisestack, copystack, StackCopyAlways, ScanStackByFrames

Commit to stack copying for stack growth.

We're carrying around a surprising amount of cruft from older schemes.
I am confident that precise stack scans and stack copying are here to stay.

Delete fallback code for when precise stack info is disabled.
Delete fallback code for when copying stacks is disabled.
Delete fallback code for when StackCopyAlways is disabled.
Delete Stktop chain - there is only one stack segment now.
Delete M.moreargp, M.moreargsize, M.moreframesize, M.cret.
Delete G.writenbuf (unrelated, just dead).
Delete runtime.lessstack, runtime.oldstack.
Delete many amd64 morestack variants.
Delete initialization of morestack frame/arg sizes (shortens split prologue!).

Replace G's stackguard/stackbase/stack0/stacksize/
syscallstack/syscallguard/forkstackguard with simple stack
bounds (lo, hi).

Update liblink, runtime/cgo for adjustments to G.

LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, iant, r
https://golang.org/cl/137410043
parent d72029e3
......@@ -392,7 +392,7 @@ struct Link
LSym* sym_divu;
LSym* sym_mod;
LSym* sym_modu;
LSym* symmorestack[20];
LSym* symmorestack[2];
LSym* tlsg;
LSym* plan9privates;
Prog* curp;
......
......@@ -246,6 +246,8 @@ ok:
aggr = "alg";
else if(streq(fields.p[1], "Panic"))
aggr = "panic";
else if(streq(fields.p[1], "Stack"))
aggr = "stack";
}
if(hasprefix(lines.p[i], "}"))
aggr = nil;
......
......@@ -458,7 +458,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
p->as = AMOVW;
p->from.type = D_OREG;
p->from.reg = REGG;
p->from.offset = 2*ctxt->arch->ptrsize; // G.panic
p->from.offset = 4*ctxt->arch->ptrsize; // G.panic
p->to.type = D_REG;
p->to.reg = 1;
......@@ -762,15 +762,14 @@ softfloat(Link *ctxt, LSym *cursym)
static Prog*
stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt)
{
int32 arg;
// MOVW g_stackguard(g), R1
p = appendp(ctxt, p);
p->as = AMOVW;
p->from.type = D_OREG;
p->from.reg = REGG;
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
if(ctxt->cursym->cfunc)
p->from.offset = 3*ctxt->arch->ptrsize;
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
p->to.type = D_REG;
p->to.reg = 1;
......@@ -849,29 +848,6 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt)
p->scond = C_SCOND_NE;
}
// MOVW.LS $framesize, R1
p = appendp(ctxt, p);
p->as = AMOVW;
p->scond = C_SCOND_LS;
p->from.type = D_CONST;
p->from.offset = framesize;
p->to.type = D_REG;
p->to.reg = 1;
// MOVW.LS $args, R2
p = appendp(ctxt, p);
p->as = AMOVW;
p->scond = C_SCOND_LS;
p->from.type = D_CONST;
arg = ctxt->cursym->text->to.offset2;
if(arg == 1) // special marker for known 0
arg = 0;
if(arg&3)
ctxt->diag("misaligned argument size in stack split");
p->from.offset = arg;
p->to.type = D_REG;
p->to.reg = 2;
// MOVW.LS R14, R3
p = appendp(ctxt, p);
p->as = AMOVW;
......
......@@ -342,32 +342,6 @@ nacladdr(Link *ctxt, Prog *p, Addr *a)
}
}
static char*
morename[] =
{
"runtime.morestack00",
"runtime.morestack00_noctxt",
"runtime.morestack10",
"runtime.morestack10_noctxt",
"runtime.morestack01",
"runtime.morestack01_noctxt",
"runtime.morestack11",
"runtime.morestack11_noctxt",
"runtime.morestack8",
"runtime.morestack8_noctxt",
"runtime.morestack16",
"runtime.morestack16_noctxt",
"runtime.morestack24",
"runtime.morestack24_noctxt",
"runtime.morestack32",
"runtime.morestack32_noctxt",
"runtime.morestack40",
"runtime.morestack40_noctxt",
"runtime.morestack48",
"runtime.morestack48_noctxt",
};
static Prog* load_g_cx(Link*, Prog*);
static Prog* stacksplit(Link*, Prog*, int32, int32, int, Prog**);
static void indir_cx(Link*, Addr*);
......@@ -388,19 +362,16 @@ parsetextconst(vlong arg, vlong *textstksiz, vlong *textarg)
static void
addstacksplit(Link *ctxt, LSym *cursym)
{
Prog *p, *q, *q1, *p1, *p2;
Prog *p, *q, *p1, *p2;
int32 autoffset, deltasp;
int a, pcsize;
uint32 i;
vlong textstksiz, textarg;
if(ctxt->tlsg == nil)
ctxt->tlsg = linklookup(ctxt, "runtime.tlsg", 0);
if(ctxt->symmorestack[0] == nil) {
if(nelem(morename) > nelem(ctxt->symmorestack))
sysfatal("Link.symmorestack needs at least %d elements", nelem(morename));
for(i=0; i<nelem(morename); i++)
ctxt->symmorestack[i] = linklookup(ctxt, morename[i], 0);
ctxt->symmorestack[0] = linklookup(ctxt, "runtime.morestack", 0);
ctxt->symmorestack[1] = linklookup(ctxt, "runtime.morestack_noctxt", 0);
}
if(ctxt->headtype == Hplan9 && ctxt->plan9privates == nil)
......@@ -481,7 +452,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
p = appendp(ctxt, p);
p->as = AMOVQ;
p->from.type = D_INDIR+D_CX;
p->from.offset = 2*ctxt->arch->ptrsize; // G.panic
p->from.offset = 4*ctxt->arch->ptrsize; // G.panic
p->to.type = D_BX;
if(ctxt->headtype == Hnacl) {
p->as = AMOVL;
......@@ -545,42 +516,6 @@ addstacksplit(Link *ctxt, LSym *cursym)
p2->pcond = p;
}
if(ctxt->debugstack > 1 && autoffset) {
// 6l -K -K means double-check for stack overflow
// even after calling morestack and even if the
// function is marked as nosplit.
p = appendp(ctxt, p);
p->as = AMOVQ;
indir_cx(ctxt, &p->from);
p->from.offset = 0;
p->to.type = D_BX;
p = appendp(ctxt, p);
p->as = ASUBQ;
p->from.type = D_CONST;
p->from.offset = StackSmall+32;
p->to.type = D_BX;
p = appendp(ctxt, p);
p->as = ACMPQ;
p->from.type = D_SP;
p->to.type = D_BX;
p = appendp(ctxt, p);
p->as = AJHI;
p->to.type = D_BRANCH;
q1 = p;
p = appendp(ctxt, p);
p->as = AINT;
p->from.type = D_CONST;
p->from.offset = 3;
p = appendp(ctxt, p);
p->as = ANOP;
q1->pcond = p;
}
if(ctxt->debugzerostack && autoffset && !(cursym->text->from.scale&NOSPLIT)) {
// 6l -Z means zero the stack frame on entry.
// This slows down function calls but can help avoid
......@@ -731,9 +666,9 @@ static Prog*
stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog **jmpok)
{
Prog *q, *q1;
uint32 moreconst1, moreconst2, i;
int cmp, lea, mov, sub;
USED(textarg);
cmp = ACMPQ;
lea = ALEAQ;
mov = AMOVQ;
......@@ -746,35 +681,6 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
sub = ASUBL;
}
if(ctxt->debugstack) {
// 6l -K means check not only for stack
// overflow but stack underflow.
// On underflow, INT 3 (breakpoint).
// Underflow itself is rare but this also
// catches out-of-sync stack guard info
p = appendp(ctxt, p);
p->as = cmp;
indir_cx(ctxt, &p->from);
p->from.offset = 8;
p->to.type = D_SP;
p = appendp(ctxt, p);
p->as = AJHI;
p->to.type = D_BRANCH;
p->to.offset = 4;
q1 = p;
p = appendp(ctxt, p);
p->as = AINT;
p->from.type = D_CONST;
p->from.offset = 3;
p = appendp(ctxt, p);
p->as = ANOP;
q1->pcond = p;
}
q1 = nil;
if(framesize <= StackSmall) {
// small stack: SP <= stackguard
......@@ -783,8 +689,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
p->as = cmp;
p->from.type = D_SP;
indir_cx(ctxt, &p->to);
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
if(ctxt->cursym->cfunc)
p->to.offset = 3*ctxt->arch->ptrsize;
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
} else if(framesize <= StackBig) {
// large stack: SP-framesize <= stackguard-StackSmall
// LEAQ -xxx(SP), AX
......@@ -799,8 +706,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
p->as = cmp;
p->from.type = D_AX;
indir_cx(ctxt, &p->to);
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
if(ctxt->cursym->cfunc)
p->to.offset = 3*ctxt->arch->ptrsize;
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
} else {
// Such a large stack we need to protect against wraparound.
// If SP is close to zero:
......@@ -820,9 +728,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
p = appendp(ctxt, p);
p->as = mov;
indir_cx(ctxt, &p->from);
p->from.offset = 0;
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
if(ctxt->cursym->cfunc)
p->from.offset = 3*ctxt->arch->ptrsize;
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
p->to.type = D_SI;
p = appendp(ctxt, p);
......@@ -860,75 +768,13 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
p->to.type = D_BRANCH;
q = p;
// If we ask for more stack, we'll get a minimum of StackMin bytes.
// We need a stack frame large enough to hold the top-of-stack data,
// the function arguments+results, our caller's PC, our frame,
// a word for the return PC of the next call, and then the StackLimit bytes
// that must be available on entry to any function called from a function
// that did a stack check. If StackMin is enough, don't ask for a specific
// amount: then we can use the custom functions and save a few
// instructions.
moreconst1 = 0;
if(StackTop + textarg + ctxt->arch->ptrsize + framesize + ctxt->arch->ptrsize + StackLimit >= StackMin)
moreconst1 = framesize;
moreconst2 = textarg;
if(moreconst2 == 1) // special marker
moreconst2 = 0;
if((moreconst2&7) != 0)
ctxt->diag("misaligned argument size in stack split");
// 4 varieties varieties (const1==0 cross const2==0)
// and 6 subvarieties of (const1==0 and const2!=0)
p = appendp(ctxt, p);
if(ctxt->cursym->cfunc) {
p->as = ACALL;
p->to.type = D_BRANCH;
p->as = ACALL;
p->to.type = D_BRANCH;
if(ctxt->cursym->cfunc)
p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
} else
if(moreconst1 == 0 && moreconst2 == 0) {
p->as = ACALL;
p->to.type = D_BRANCH;
p->to.sym = ctxt->symmorestack[0*2+noctxt];
} else
if(moreconst1 != 0 && moreconst2 == 0) {
p->as = AMOVL;
p->from.type = D_CONST;
p->from.offset = moreconst1;
p->to.type = D_AX;
p = appendp(ctxt, p);
p->as = ACALL;
p->to.type = D_BRANCH;
p->to.sym = ctxt->symmorestack[1*2+noctxt];
} else
if(moreconst1 == 0 && moreconst2 <= 48 && moreconst2%8 == 0) {
i = moreconst2/8 + 3;
p->as = ACALL;
p->to.type = D_BRANCH;
p->to.sym = ctxt->symmorestack[i*2+noctxt];
} else
if(moreconst1 == 0 && moreconst2 != 0) {
p->as = AMOVL;
p->from.type = D_CONST;
p->from.offset = moreconst2;
p->to.type = D_AX;
p = appendp(ctxt, p);
p->as = ACALL;
p->to.type = D_BRANCH;
p->to.sym = ctxt->symmorestack[2*2+noctxt];
} else {
// Pass framesize and argsize.
p->as = AMOVQ;
p->from.type = D_CONST;
p->from.offset = (uint64)moreconst2 << 32;
p->from.offset |= moreconst1;
p->to.type = D_AX;
p = appendp(ctxt, p);
p->as = ACALL;
p->to.type = D_BRANCH;
p->to.sym = ctxt->symmorestack[3*2+noctxt];
}
else
p->to.sym = ctxt->symmorestack[noctxt];
p = appendp(ctxt, p);
p->as = AJMP;
......
......@@ -335,7 +335,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
p = appendp(ctxt, p);
p->as = AMOVL;
p->from.type = D_INDIR+D_CX;
p->from.offset = 2*ctxt->arch->ptrsize; // G.panic
p->from.offset = 4*ctxt->arch->ptrsize; // G.panic
p->to.type = D_BX;
p = appendp(ctxt, p);
......@@ -501,7 +501,6 @@ static Prog*
stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
{
Prog *q, *q1;
int arg;
if(ctxt->debugstack) {
// 8l -K means check not only for stack
......@@ -539,8 +538,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
p->as = ACMPL;
p->from.type = D_SP;
p->to.type = D_INDIR+D_CX;
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
if(ctxt->cursym->cfunc)
p->to.offset = 3*ctxt->arch->ptrsize;
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
} else if(framesize <= StackBig) {
// large stack: SP-framesize <= stackguard-StackSmall
// LEAL -(framesize-StackSmall)(SP), AX
......@@ -555,8 +555,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
p->as = ACMPL;
p->from.type = D_AX;
p->to.type = D_INDIR+D_CX;
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
if(ctxt->cursym->cfunc)
p->to.offset = 3*ctxt->arch->ptrsize;
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
......@@ -576,8 +577,9 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
p->as = AMOVL;
p->from.type = D_INDIR+D_CX;
p->from.offset = 0;
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0
if(ctxt->cursym->cfunc)
p->from.offset = 3*ctxt->arch->ptrsize;
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
p->to.type = D_SI;
p = appendp(ctxt, p);
......@@ -617,33 +619,6 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
p->to.offset = 4;
q = p;
p = appendp(ctxt, p); // save frame size in DI
p->as = AMOVL;
p->to.type = D_DI;
p->from.type = D_CONST;
// If we ask for more stack, we'll get a minimum of StackMin bytes.
// We need a stack frame large enough to hold the top-of-stack data,
// the function arguments+results, our caller's PC, our frame,
// a word for the return PC of the next call, and then the StackLimit bytes
// that must be available on entry to any function called from a function
// that did a stack check. If StackMin is enough, don't ask for a specific
// amount: then we can use the custom functions and save a few
// instructions.
if(StackTop + ctxt->cursym->text->to.offset2 + ctxt->arch->ptrsize + framesize + ctxt->arch->ptrsize + StackLimit >= StackMin)
p->from.offset = (framesize+7) & ~7LL;
arg = ctxt->cursym->text->to.offset2;
if(arg == 1) // special marker for known 0
arg = 0;
if(arg&3)
ctxt->diag("misaligned argument size in stack split");
p = appendp(ctxt, p); // save arg size in AX
p->as = AMOVL;
p->to.type = D_AX;
p->from.type = D_CONST;
p->from.offset = arg;
p = appendp(ctxt, p);
p->as = ACALL;
p->to.type = D_BRANCH;
......
......@@ -19,9 +19,10 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
// _cgo_init may update stackguard.
MOVL $runtime·g0(SB), BP
LEAL (-64*1024+104)(SP), BX
MOVL BX, g_stackguard(BP)
MOVL BX, g_stackguard0(BP)
MOVL SP, g_stackbase(BP)
MOVL BX, g_stackguard1(BP)
MOVL BX, (g_stack+stack_lo)(BP)
MOVL SP, (g_stack+stack_hi)(BP)
// find out information about the processor we're on
MOVL $0, AX
......@@ -44,10 +45,14 @@ nocpuinfo:
MOVL BX, 4(SP)
MOVL BP, 0(SP)
CALL AX
// update stackguard after _cgo_init
MOVL $runtime·g0(SB), CX
MOVL g_stackguard0(CX), AX
MOVL AX, g_stackguard(CX)
MOVL (g_stack+stack_lo)(CX), AX
ADDL $const_StackGuard, AX
MOVL AX, g_stackguard0(CX)
MOVL AX, g_stackguard1(CX)
// skip runtime·ldt0setup(SB) and tls test after _cgo_init for non-windows
CMPL runtime·iswindows(SB), $0
JEQ ok
......@@ -289,19 +294,12 @@ TEXT runtime·morestack(SB),NOSPLIT,$0-0
JNE 2(PC)
INT $3
// frame size in DI
// arg size in AX
// Save in m.
MOVL DI, m_moreframesize(BX)
MOVL AX, m_moreargsize(BX)
// Called from f.
// Set m->morebuf to f's caller.
MOVL 4(SP), DI // f's caller's PC
MOVL DI, (m_morebuf+gobuf_pc)(BX)
LEAL 8(SP), CX // f's caller's SP
MOVL CX, (m_morebuf+gobuf_sp)(BX)
MOVL CX, m_moreargp(BX)
get_tls(CX)
MOVL g(CX), SI
MOVL SI, (m_morebuf+gobuf_g)(BX)
......@@ -437,25 +435,6 @@ CALLFN(runtime·call268435456, 268435456)
CALLFN(runtime·call536870912, 536870912)
CALLFN(runtime·call1073741824, 1073741824)
// Return point when leaving stack.
//
// Lessstack can appear in stack traces for the same reason
// as morestack; in that context, it has 0 arguments.
TEXT runtime·lessstack(SB), NOSPLIT, $0-0
// Save return value in m->cret
get_tls(CX)
MOVL g(CX), BX
MOVL g_m(BX), BX
MOVL AX, m_cret(BX)
// Call oldstack on m->g0's stack.
MOVL m_g0(BX), BP
MOVL BP, g(CX)
MOVL (g_sched+gobuf_sp)(BP), SP
CALL runtime·oldstack(SB)
MOVL $0, 0x1004 // crash if oldstack returns
RET
// bool cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
......@@ -836,10 +815,10 @@ TEXT setg_gcc<>(SB), NOSPLIT, $0
TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
get_tls(CX)
MOVL g(CX), AX
CMPL g_stackbase(AX), SP
CMPL (g_stack+stack_hi)(AX), SP
JHI 2(PC)
INT $3
CMPL SP, g_stackguard(AX)
CMPL SP, (g_stack+stack_lo)(AX)
JHI 2(PC)
INT $3
RET
......@@ -904,15 +883,6 @@ TEXT runtime·emptyfunc(SB),0,$0-0
TEXT runtime·abort(SB),NOSPLIT,$0-0
INT $0x3
TEXT runtime·stackguard(SB),NOSPLIT,$0-8
MOVL SP, DX
MOVL DX, sp+0(FP)
get_tls(CX)
MOVL g(CX), BX
MOVL g_stackguard(BX), DX
MOVL DX, limit+4(FP)
RET
GLOBL runtime·tls0(SB), $32
// hash function using AES hardware instructions
......
......@@ -19,9 +19,10 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
// _cgo_init may update stackguard.
MOVQ $runtime·g0(SB), DI
LEAQ (-64*1024+104)(SP), BX
MOVQ BX, g_stackguard(DI)
MOVQ BX, g_stackguard0(DI)
MOVQ SP, g_stackbase(DI)
MOVQ BX, g_stackguard1(DI)
MOVQ BX, (g_stack+stack_lo)(DI)
MOVQ SP, (g_stack+stack_hi)(DI)
// find out information about the processor we're on
MOVQ $0, AX
......@@ -42,13 +43,16 @@ nocpuinfo:
MOVQ DI, CX // Win64 uses CX for first parameter
MOVQ $setg_gcc<>(SB), SI
CALL AX
// update stackguard after _cgo_init
MOVQ $runtime·g0(SB), CX
MOVQ g_stackguard0(CX), AX
MOVQ AX, g_stackguard(CX)
MOVQ (g_stack+stack_lo)(CX), AX
ADDQ $const_StackGuard, AX
MOVQ AX, g_stackguard0(CX)
MOVQ AX, g_stackguard1(CX)
CMPL runtime·iswindows(SB), $0
JEQ ok
needtls:
// skip TLS setup on Plan 9
CMPL runtime·isplan9(SB), $1
......@@ -261,7 +265,6 @@ onm:
*/
// Called during function prolog when more stack is needed.
// Caller has already done get_tls(CX); MOVQ m(CX), BX.
//
// The traceback routines see morestack on a g0 as being
// the top of a stack (for example, morestack calling newstack
......@@ -269,6 +272,8 @@ onm:
// record an argument size. For that purpose, it has no arguments.
TEXT runtime·morestack(SB),NOSPLIT,$0-0
// Cannot grow scheduler stack (m->g0).
MOVQ g(CX), BX
MOVQ g_m(BX), BX
MOVQ m_g0(BX), SI
CMPQ g(CX), SI
JNE 2(PC)
......@@ -286,7 +291,6 @@ TEXT runtime·morestack(SB),NOSPLIT,$0-0
MOVQ AX, (m_morebuf+gobuf_pc)(BX)
LEAQ 16(SP), AX // f's caller's SP
MOVQ AX, (m_morebuf+gobuf_sp)(BX)
MOVQ AX, m_moreargp(BX)
get_tls(CX)
MOVQ g(CX), SI
MOVQ SI, (m_morebuf+gobuf_g)(BX)
......@@ -307,6 +311,11 @@ TEXT runtime·morestack(SB),NOSPLIT,$0-0
MOVQ $0, 0x1003 // crash if newstack returns
RET
// morestack but not preserving ctxt.
TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack(SB)
// reflectcall: call a function with the given argument list
// func call(f *FuncVal, arg *byte, argsize, retoffset uint32).
// we don't have variable-sized frames, so we use a small number
......@@ -415,142 +424,6 @@ CALLFN(runtime·call268435456, 268435456)
CALLFN(runtime·call536870912, 536870912)
CALLFN(runtime·call1073741824, 1073741824)
// Return point when leaving stack.
//
// Lessstack can appear in stack traces for the same reason
// as morestack; in that context, it has 0 arguments.
TEXT runtime·lessstack(SB), NOSPLIT, $0-0
// Save return value in m->cret
get_tls(CX)
MOVQ g(CX), BX
MOVQ g_m(BX), BX
MOVQ AX, m_cret(BX)
// Call oldstack on m->g0's stack.
MOVQ m_g0(BX), BP
MOVQ BP, g(CX)
MOVQ (g_sched+gobuf_sp)(BP), SP
CALL runtime·oldstack(SB)
MOVQ $0, 0x1004 // crash if oldstack returns
RET
// morestack trampolines
TEXT runtime·morestack00(SB),NOSPLIT,$0
get_tls(CX)
MOVQ g(CX), BX
MOVQ g_m(BX), BX
MOVQ $0, AX
MOVQ AX, m_moreframesize(BX)
MOVQ $runtime·morestack(SB), AX
JMP AX
TEXT runtime·morestack01(SB),NOSPLIT,$0
get_tls(CX)
MOVQ g(CX), BX
MOVQ g_m(BX), BX
SHLQ $32, AX
MOVQ AX, m_moreframesize(BX)
MOVQ $runtime·morestack(SB), AX
JMP AX
TEXT runtime·morestack10(SB),NOSPLIT,$0
get_tls(CX)
MOVQ g(CX), BX
MOVQ g_m(BX), BX
MOVLQZX AX, AX
MOVQ AX, m_moreframesize(BX)
MOVQ $runtime·morestack(SB), AX
JMP AX
TEXT runtime·morestack11(SB),NOSPLIT,$0
get_tls(CX)
MOVQ g(CX), BX
MOVQ g_m(BX), BX
MOVQ AX, m_moreframesize(BX)
MOVQ $runtime·morestack(SB), AX
JMP AX
// subcases of morestack01
// with const of 8,16,...48
TEXT runtime·morestack8(SB),NOSPLIT,$0
MOVQ $1, R8
MOVQ $morestack<>(SB), AX
JMP AX
TEXT runtime·morestack16(SB),NOSPLIT,$0
MOVQ $2, R8
MOVQ $morestack<>(SB), AX
JMP AX
TEXT runtime·morestack24(SB),NOSPLIT,$0
MOVQ $3, R8
MOVQ $morestack<>(SB), AX
JMP AX
TEXT runtime·morestack32(SB),NOSPLIT,$0
MOVQ $4, R8
MOVQ $morestack<>(SB), AX
JMP AX
TEXT runtime·morestack40(SB),NOSPLIT,$0
MOVQ $5, R8
MOVQ $morestack<>(SB), AX
JMP AX
TEXT runtime·morestack48(SB),NOSPLIT,$0
MOVQ $6, R8
MOVQ $morestack<>(SB), AX
JMP AX
TEXT morestack<>(SB),NOSPLIT,$0
get_tls(CX)
MOVQ g(CX), BX
MOVQ g_m(BX), BX
SHLQ $35, R8
MOVQ R8, m_moreframesize(BX)
MOVQ $runtime·morestack(SB), AX
JMP AX
TEXT runtime·morestack00_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack00(SB)
TEXT runtime·morestack01_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack01(SB)
TEXT runtime·morestack10_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack10(SB)
TEXT runtime·morestack11_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack11(SB)
TEXT runtime·morestack8_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack8(SB)
TEXT runtime·morestack16_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack16(SB)
TEXT runtime·morestack24_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack24(SB)
TEXT runtime·morestack32_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack32(SB)
TEXT runtime·morestack40_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack40(SB)
TEXT runtime·morestack48_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack48(SB)
// bool cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
......@@ -922,14 +795,14 @@ TEXT setg_gcc<>(SB),NOSPLIT,$0
MOVQ DI, g(AX)
RET
// check that SP is in range [g->stackbase, g->stackguard)
// check that SP is in range [g->stack.lo, g->stack.hi)
TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
get_tls(CX)
MOVQ g(CX), AX
CMPQ g_stackbase(AX), SP
CMPQ (g_stack+stack_hi)(AX), SP
JHI 2(PC)
INT $3
CMPQ SP, g_stackguard(AX)
CMPQ SP, (g_stack+stack_lo)(AX)
JHI 2(PC)
INT $3
RET
......@@ -978,15 +851,6 @@ TEXT runtime·gocputicks(SB),NOSPLIT,$0-8
MOVQ AX, ret+0(FP)
RET
TEXT runtime·stackguard(SB),NOSPLIT,$0-16
MOVQ SP, DX
MOVQ DX, sp+0(FP)
get_tls(CX)
MOVQ g(CX), BX
MOVQ g_stackguard(BX), DX
MOVQ DX, limit+8(FP)
RET
GLOBL runtime·tls0(SB), $64
// hash function using AES hardware instructions
......
......@@ -20,10 +20,11 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
// create istack out of the given (operating system) stack.
MOVL $runtime·g0(SB), DI
LEAL (-64*1024+104)(SP), DI
MOVL BX, g_stackguard(DI)
LEAL (-64*1024+104)(SP), BX
MOVL BX, g_stackguard0(DI)
MOVL SP, g_stackbase(DI)
MOVL BX, g_stackguard1(DI)
MOVL BX, (g_stack+stack_lo)(DI)
MOVL SP, (g_stack+stack_hi)(DI)
// find out information about the processor we're on
MOVQ $0, AX
......@@ -234,13 +235,16 @@ onm:
*/
// Called during function prolog when more stack is needed.
// Caller has already done get_tls(CX); MOVQ m(CX), BX.
//
// The traceback routines see morestack on a g0 as being
// the top of a stack (for example, morestack calling newstack
// calling the scheduler calling newm calling gc), so we must
// record an argument size. For that purpose, it has no arguments.
TEXT runtime·morestack(SB),NOSPLIT,$0-0
get_tls(CX)
MOVL g(CX), BX
MOVL g_m(BX), BX
// Cannot grow scheduler stack (m->g0).
MOVL m_g0(BX), SI
CMPL g(CX), SI
......@@ -259,7 +263,6 @@ TEXT runtime·morestack(SB),NOSPLIT,$0-0
MOVL AX, (m_morebuf+gobuf_pc)(BX)
LEAL 16(SP), AX // f's caller's SP
MOVL AX, (m_morebuf+gobuf_sp)(BX)
MOVL AX, m_moreargp(BX)
get_tls(CX)
MOVL g(CX), SI
MOVL SI, (m_morebuf+gobuf_g)(BX)
......@@ -280,6 +283,11 @@ TEXT runtime·morestack(SB),NOSPLIT,$0-0
MOVL $0, 0x1003 // crash if newstack returns
RET
// morestack trampolines
TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack(SB)
// reflectcall: call a function with the given argument list
// func call(f *FuncVal, arg *byte, argsize, retoffset uint32).
// we don't have variable-sized frames, so we use a small number
......@@ -389,142 +397,6 @@ CALLFN(runtime·call268435456, 268435456)
CALLFN(runtime·call536870912, 536870912)
CALLFN(runtime·call1073741824, 1073741824)
// Return point when leaving stack.
//
// Lessstack can appear in stack traces for the same reason
// as morestack; in that context, it has 0 arguments.
TEXT runtime·lessstack(SB), NOSPLIT, $0-0
// Save return value in m->cret
get_tls(CX)
MOVL g(CX), BX
MOVL g_m(BX), BX
MOVQ AX, m_cret(BX) // MOVQ, to save all 64 bits
// Call oldstack on m->g0's stack.
MOVL m_g0(BX), BX
MOVL BX, g(CX)
MOVL (g_sched+gobuf_sp)(BX), SP
CALL runtime·oldstack(SB)
MOVL $0, 0x1004 // crash if oldstack returns
RET
// morestack trampolines
TEXT runtime·morestack00(SB),NOSPLIT,$0
get_tls(CX)
MOVL g(CX), BX
MOVL g_m(BX), BX
MOVQ $0, AX
MOVQ AX, m_moreframesize(BX)
MOVL $runtime·morestack(SB), AX
JMP AX
TEXT runtime·morestack01(SB),NOSPLIT,$0
get_tls(CX)
MOVL g(CX), BX
MOVL g_m(BX), BX
SHLQ $32, AX
MOVQ AX, m_moreframesize(BX)
MOVL $runtime·morestack(SB), AX
JMP AX
TEXT runtime·morestack10(SB),NOSPLIT,$0
get_tls(CX)
MOVL g(CX), BX
MOVL g_m(BX), BX
MOVLQZX AX, AX
MOVQ AX, m_moreframesize(BX)
MOVL $runtime·morestack(SB), AX
JMP AX
TEXT runtime·morestack11(SB),NOSPLIT,$0
get_tls(CX)
MOVL g(CX), BX
MOVL g_m(BX), BX
MOVQ AX, m_moreframesize(BX)
MOVL $runtime·morestack(SB), AX
JMP AX
// subcases of morestack01
// with const of 8,16,...48
TEXT runtime·morestack8(SB),NOSPLIT,$0
MOVQ $1, R8
MOVL $morestack<>(SB), AX
JMP AX
TEXT runtime·morestack16(SB),NOSPLIT,$0
MOVQ $2, R8
MOVL $morestack<>(SB), AX
JMP AX
TEXT runtime·morestack24(SB),NOSPLIT,$0
MOVQ $3, R8
MOVL $morestack<>(SB), AX
JMP AX
TEXT runtime·morestack32(SB),NOSPLIT,$0
MOVQ $4, R8
MOVL $morestack<>(SB), AX
JMP AX
TEXT runtime·morestack40(SB),NOSPLIT,$0
MOVQ $5, R8
MOVL $morestack<>(SB), AX
JMP AX
TEXT runtime·morestack48(SB),NOSPLIT,$0
MOVQ $6, R8
MOVL $morestack<>(SB), AX
JMP AX
TEXT morestack<>(SB),NOSPLIT,$0
get_tls(CX)
MOVL g(CX), BX
MOVL g_m(BX), BX
SHLQ $35, R8
MOVQ R8, m_moreframesize(BX)
MOVL $runtime·morestack(SB), AX
JMP AX
TEXT runtime·morestack00_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack00(SB)
TEXT runtime·morestack01_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack01(SB)
TEXT runtime·morestack10_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack10(SB)
TEXT runtime·morestack11_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack11(SB)
TEXT runtime·morestack8_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack8(SB)
TEXT runtime·morestack16_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack16(SB)
TEXT runtime·morestack24_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack24(SB)
TEXT runtime·morestack32_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack32(SB)
TEXT runtime·morestack40_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack40(SB)
TEXT runtime·morestack48_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack48(SB)
// bool cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
......@@ -722,10 +594,10 @@ TEXT runtime·setg(SB), NOSPLIT, $0-4
TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
get_tls(CX)
MOVL g(CX), AX
CMPL g_stackbase(AX), SP
CMPL (g_stack+stack_hi)(AX), SP
JHI 2(PC)
MOVL 0, AX
CMPL SP, g_stackguard(AX)
CMPL SP, (g_stack+stack_lo)(AX)
JHI 2(PC)
MOVL 0, AX
RET
......@@ -789,15 +661,6 @@ TEXT runtime·gocputicks(SB),NOSPLIT,$0-8
MOVQ AX, ret+0(FP)
RET
TEXT runtime·stackguard(SB),NOSPLIT,$0-8
MOVL SP, DX
MOVL DX, sp+0(FP)
get_tls(CX)
MOVL g(CX), BX
MOVL g_stackguard(BX), DX
MOVL DX, limit+4(FP)
RET
GLOBL runtime·tls0(SB), $64
// hash function using AES hardware instructions
......
......@@ -31,9 +31,11 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$-4
// create istack out of the OS stack
MOVW $(-8192+104)(R13), R0
MOVW R0, g_stackguard(g) // (w 104b guard)
MOVW R0, g_stackguard0(g)
MOVW R13, g_stackbase(g)
MOVW R0, g_stackguard1(g)
MOVW R0, (g_stack+stack_lo)(g)
MOVW R13, (g_stack+stack_hi)(g)
BL runtime·emptyfunc(SB) // fault if stack check is wrong
#ifndef GOOS_nacl
......@@ -51,8 +53,10 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$-4
nocgo:
// update stackguard after _cgo_init
MOVW g_stackguard0(g), R0
MOVW R0, g_stackguard(g)
MOVW (g_stack+stack_lo)(g), R0
ADD $const_StackGuard, R0
MOVW R0, g_stackguard0(g)
MOVW R0, g_stackguard1(g)
BL runtime·checkgoarm(SB)
BL runtime·check(SB)
......@@ -287,9 +291,6 @@ TEXT runtime·morestack(SB),NOSPLIT,$-4-0
CMP g, R4
BL.EQ runtime·abort(SB)
MOVW R1, m_moreframesize(R8)
MOVW R2, m_moreargsize(R8)
// Called from f.
// Set g->sched to context in f.
MOVW R7, (g_sched+gobuf_ctxt)(g)
......@@ -302,7 +303,6 @@ TEXT runtime·morestack(SB),NOSPLIT,$-4-0
MOVW R3, (m_morebuf+gobuf_pc)(R8) // f's caller's PC
MOVW SP, (m_morebuf+gobuf_sp)(R8) // f's caller's SP
MOVW $4(SP), R3 // f's argument pointer
MOVW R3, m_moreargp(R8)
MOVW g, (m_morebuf+gobuf_g)(R8)
// Call newstack on m->g0's stack.
......@@ -436,22 +436,6 @@ CALLFN(runtime·call268435456, 268435456)
CALLFN(runtime·call536870912, 536870912)
CALLFN(runtime·call1073741824, 1073741824)
// Return point when leaving stack.
// using frame size $-4 means do not save LR on stack.
//
// Lessstack can appear in stack traces for the same reason
// as morestack; in that context, it has 0 arguments.
TEXT runtime·lessstack(SB),NOSPLIT,$-4-0
// Save return value in m->cret
MOVW g_m(g), R8
MOVW R0, m_cret(R8)
// Call oldstack on m->g0's stack.
MOVW m_g0(R8), R0
BL setg<>(SB)
MOVW (g_sched+gobuf_sp)(g), SP
BL runtime·oldstack(SB)
// void jmpdefer(fn, sp);
// called from deferreturn.
// 1. grab stored LR for caller
......@@ -721,13 +705,6 @@ TEXT runtime·atomicloaduintptr(SB),NOSPLIT,$0-8
TEXT runtime·atomicloaduint(SB),NOSPLIT,$0-8
B runtime·atomicload(SB)
TEXT runtime·stackguard(SB),NOSPLIT,$0-8
MOVW R13, R1
MOVW g_stackguard(g), R2
MOVW R1, sp+0(FP)
MOVW R2, limit+4(FP)
RET
// AES hashing not implemented for ARM
TEXT runtime·aeshash(SB),NOSPLIT,$-4-0
MOVW $0, R0
......
......@@ -100,7 +100,7 @@ x_cgo_init(G *g)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
inittls();
......@@ -121,7 +121,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -140,14 +141,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
pthread_setspecific(k1, (void*)ts.g);
crosscall_386(ts.fn);
......
......@@ -71,7 +71,7 @@ x_cgo_init(G *g)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
inittls();
......@@ -92,7 +92,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -111,14 +112,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
pthread_setspecific(k1, (void*)ts.g);
crosscall_amd64(ts.fn);
......
......@@ -21,7 +21,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
......@@ -40,7 +40,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -59,14 +60,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
/*
* Set specific keys.
*/
......
......@@ -21,7 +21,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
......@@ -40,7 +40,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -59,14 +60,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
/*
* Set specific keys.
*/
......
......@@ -21,7 +21,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
......@@ -40,7 +40,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -59,14 +60,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
/*
* Set specific keys.
*/
......
......@@ -21,7 +21,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
......@@ -40,7 +40,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -59,14 +60,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
/*
* Set specific keys.
*/
......
......@@ -32,7 +32,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
......@@ -56,7 +56,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
size = 0;
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -76,14 +77,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096 * 2;
crosscall_arm1(ts.fn, setg_gcc, (void*)ts.g);
return nil;
}
......@@ -19,7 +19,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
......@@ -43,7 +43,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
size = 0;
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -61,14 +62,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
/*
* Set specific keys.
*/
......
......@@ -19,7 +19,7 @@ x_cgo_init(G* g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
......@@ -38,7 +38,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -56,14 +57,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
/*
* Set specific keys.
*/
......
......@@ -31,7 +31,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
size = 0;
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -50,14 +51,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096 * 2;
crosscall_arm1(ts.fn, setg_gcc, (void*)ts.g);
return nil;
}
......@@ -71,7 +64,7 @@ x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
if (x_cgo_inittls) {
......
......@@ -20,7 +20,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
......@@ -39,7 +39,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -58,14 +59,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
/*
* Set specific keys.
*/
......
......@@ -20,7 +20,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
......@@ -40,7 +40,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -59,14 +60,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
/*
* Set specific keys.
*/
......
......@@ -21,7 +21,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
}
......@@ -40,7 +40,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -60,14 +61,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096 * 2;
crosscall_arm1(ts.fn, setg_gcc, (void*)ts.g);
return nil;
}
......@@ -92,7 +92,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
// Locate symbol for the system pthread_create function.
......@@ -126,7 +126,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = sys_pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -147,14 +148,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
/*
* Set specific keys.
*/
......
......@@ -92,7 +92,7 @@ x_cgo_init(G *g, void (*setg)(void*))
setg_gcc = setg;
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stackguard = (uintptr)&attr - size + 4096;
g->stacklo = (uintptr)&attr - size + 4096;
pthread_attr_destroy(&attr);
// Locate symbol for the system pthread_create function.
......@@ -127,7 +127,8 @@ _cgo_sys_thread_start(ThreadStart *ts)
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
ts->g->stackguard = size;
// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
ts->g->stackhi = size;
err = sys_pthread_create(&p, &attr, threadentry, ts);
pthread_sigmask(SIG_SETMASK, &oset, nil);
......@@ -148,14 +149,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
/*
* _cgo_sys_thread_start set stackguard to stack size;
* change to actual guard pointer.
*/
ts.g->stackguard = (uintptr)&ts - ts.g->stackguard + 4096;
/*
* Set specific keys.
*/
......
......@@ -20,7 +20,7 @@ void
x_cgo_init(G *g)
{
int tmp;
g->stackguard = (uintptr)&tmp - STACKSIZE + 8*1024;
g->stacklo = (uintptr)&tmp - STACKSIZE + 8*1024;
}
......@@ -44,8 +44,8 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
ts.g->stackguard = (uintptr)&ts - STACKSIZE + 8*1024;
ts.g->stackhi = (uintptr)&ts;
ts.g->stacklo = (uintptr)&ts - STACKSIZE + 8*1024;
/*
* Set specific keys in thread local storage.
......
......@@ -20,7 +20,7 @@ void
x_cgo_init(G *g)
{
int tmp;
g->stackguard = (uintptr)&tmp - STACKSIZE + 8*1024;
g->stacklo = (uintptr)&tmp - STACKSIZE + 8*1024;
}
......@@ -44,8 +44,8 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
ts.g->stackbase = (uintptr)&ts;
ts.g->stackguard = (uintptr)&ts - STACKSIZE + 8*1024;
ts.g->stackhi = (uintptr)&ts;
ts.g->stacklo = (uintptr)&ts - STACKSIZE + 8*1024;
/*
* Set specific keys in thread local storage.
......
......@@ -21,8 +21,8 @@ typedef uintptr_t uintptr;
typedef struct G G;
struct G
{
uintptr stackguard;
uintptr stackbase;
uintptr stacklo;
uintptr stackhi;
};
/*
......
......@@ -24,7 +24,6 @@ func stackguard() (sp, limit uintptr)
var Entersyscall = entersyscall
var Exitsyscall = exitsyscall
var LockedOSThread = lockedOSThread
var Stackguard = stackguard
type LFNode struct {
Next *LFNode
......
......@@ -382,7 +382,7 @@ dumpgoroutine(G *gp)
Panic *p;
bool (*fn)(Stkframe*, void*);
if(gp->syscallstack != (uintptr)nil) {
if(gp->syscallsp != (uintptr)nil) {
sp = gp->syscallsp;
pc = gp->syscallpc;
lr = 0;
......@@ -412,8 +412,6 @@ dumpgoroutine(G *gp)
child.arglen = 0;
child.sp = nil;
child.depth = 0;
if(!ScanStackByFrames)
runtime·throw("need frame info to dump stacks");
fn = dumpframe;
runtime·gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, &fn, &child, false);
......
......@@ -136,7 +136,7 @@ func mallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
}
mp.mallocing = 0
if mp.curg != nil {
mp.curg.stackguard0 = mp.curg.stackguard
mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
}
// Note: one releasem for the acquirem just above.
// The other for the acquirem at start of malloc.
......@@ -313,7 +313,7 @@ marked:
}
mp.mallocing = 0
if mp.curg != nil {
mp.curg.stackguard0 = mp.curg.stackguard
mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
}
// Note: one releasem for the acquirem just above.
// The other for the acquirem at start of malloc.
......
......@@ -689,9 +689,6 @@ static void
scanstack(G *gp)
{
M *mp;
int32 n;
Stktop *stk;
uintptr sp, guard;
bool (*fn)(Stkframe*, void*);
if(runtime·readgstatus(gp)&Gscan == 0) {
......@@ -719,44 +716,8 @@ scanstack(G *gp)
if((mp = gp->m) != nil && mp->helpgc)
runtime·throw("can't scan gchelper stack");
if(gp->syscallstack != (uintptr)nil) {
// Scanning another goroutine that is about to enter or might
// have just exited a system call. It may be executing code such
// as schedlock and may have needed to start a new stack segment.
// Use the stack segment and stack pointer at the time of
// the system call instead, since that won't change underfoot.
sp = gp->syscallsp;
stk = (Stktop*)gp->syscallstack;
guard = gp->syscallguard;
} else {
// Scanning another goroutine's stack.
// The goroutine is usually asleep (the world is stopped).
sp = gp->sched.sp;
stk = (Stktop*)gp->stackbase;
guard = gp->stackguard;
}
if(ScanStackByFrames) {
USED(sp);
USED(stk);
USED(guard);
fn = scanframe;
runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, &fn, nil, false);
} else {
n = 0;
while(stk) {
if(sp < guard-StackGuard || (uintptr)stk < sp) {
runtime·printf("scanstack inconsistent: g%D#%d sp=%p not in [%p,%p]\n", gp->goid, n, sp, guard-StackGuard, stk);
runtime·throw("scanstack");
}
if(Debug > 2)
runtime·printf("conservative stack %p+%p\n", (byte*)sp, (uintptr)stk-sp);
scanblock((byte*)sp, (uintptr)stk - sp, ScanConservatively);
sp = stk->gobuf.sp;
guard = stk->stackguard;
stk = (Stktop*)stk->stackbase;
n++;
}
}
fn = scanframe;
runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, &fn, nil, false);
}
// The gp has been moved to a gc safepoint. If there is gcphase specific
......
......@@ -5,8 +5,6 @@
// Garbage collector (GC)
enum {
ScanStackByFrames = 1,
// Four bits per word (see #defines below).
gcBits = 4,
wordsPerBitmapByte = 8/gcBits,
......
......@@ -144,7 +144,7 @@ void
runtime·minit(void)
{
// Initialize signal handling.
runtime·signalstack((byte*)g->m->gsignal->stackguard - StackGuard, 32*1024);
runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
}
......
......@@ -204,7 +204,7 @@ void
runtime·minit(void)
{
// Initialize signal handling
runtime·signalstack((byte*)g->m->gsignal->stackguard - StackGuard, 32*1024);
runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
runtime·sigprocmask(&sigset_none, nil);
}
......
......@@ -212,7 +212,7 @@ void
runtime·minit(void)
{
// Initialize signal handling
runtime·signalstack((byte*)g->m->gsignal->stackguard - StackGuard, 32*1024);
runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
runtime·sigprocmask(&sigset_none, nil);
}
......
......@@ -226,7 +226,7 @@ void
runtime·minit(void)
{
// Initialize signal handling.
runtime·signalstack((byte*)g->m->gsignal->stackguard - StackGuard, 32*1024);
runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
runtime·rtsigprocmask(SIG_SETMASK, &sigset_none, nil, sizeof(Sigset));
}
......
......@@ -31,7 +31,7 @@ runtime·minit(void)
int32 ret;
// Initialize signal handling
ret = runtime·nacl_exception_stack((byte*)g->m->gsignal->stackguard - StackGuard, 32*1024);
ret = runtime·nacl_exception_stack((byte*)g->m->gsignal->stack.lo, 32*1024);
if(ret < 0)
runtime·printf("runtime: nacl_exception_stack: error %d\n", -ret);
......
......@@ -282,7 +282,7 @@ runtime·minit(void)
g->m->procid = runtime·lwp_self();
// Initialize signal handling
runtime·signalstack((byte*)g->m->gsignal->stackguard - StackGuard, 32*1024);
runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
}
......
......@@ -237,7 +237,7 @@ void
runtime·minit(void)
{
// Initialize signal handling
runtime·signalstack((byte*)g->m->gsignal->stackguard - StackGuard, 32*1024);
runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
runtime·sigprocmask(SIG_SETMASK, sigset_none);
}
......
......@@ -183,7 +183,7 @@ runtime·minit(void)
{
runtime·asmcgocall(runtime·miniterrno, (void *)libc·___errno);
// Initialize signal handling
runtime·signalstack((byte*)g->m->gsignal->stackguard - StackGuard, 32*1024);
runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
}
......
......@@ -51,8 +51,11 @@ runtime·recovery_m(G *gp)
argp = (void*)gp->sigcode0;
pc = (uintptr)gp->sigcode1;
// Unwind to the stack frame with d's arguments in it.
runtime·unwindstack(gp, argp);
// d's arguments need to be in the stack.
if(argp != nil && ((uintptr)argp < gp->stack.lo || gp->stack.hi < (uintptr)argp)) {
runtime·printf("recover: %p not in [%p, %p]\n", argp, gp->stack.lo, gp->stack.hi);
runtime·throw("bad recovery");
}
// Make the deferproc for this d return again,
// this time returning 1. The calling function will
......@@ -73,34 +76,6 @@ runtime·recovery_m(G *gp)
runtime·gogo(&gp->sched);
}
// Free stack frames until we hit the last one
// or until we find the one that contains the sp.
void
runtime·unwindstack(G *gp, byte *sp)
{
Stktop *top;
byte *stk;
// Must be called from a different goroutine, usually m->g0.
if(g == gp)
runtime·throw("unwindstack on self");
while((top = (Stktop*)gp->stackbase) != 0 && top->stackbase != 0) {
stk = (byte*)gp->stackguard - StackGuard;
if(stk <= sp && sp < (byte*)gp->stackbase)
break;
gp->stackbase = top->stackbase;
gp->stackguard = top->stackguard;
gp->stackguard0 = gp->stackguard;
runtime·stackfree(gp, stk, top);
}
if(sp != nil && (sp < (byte*)gp->stackguard - StackGuard || (byte*)gp->stackbase < sp)) {
runtime·printf("recover: %p not in [%p, %p]\n", sp, gp->stackguard - StackGuard, gp->stackbase);
runtime·throw("bad unwindstack");
}
}
void
runtime·startpanic_m(void)
{
......
This diff is collapsed.
......@@ -62,7 +62,6 @@ typedef struct M M;
typedef struct P P;
typedef struct Note Note;
typedef struct Slice Slice;
typedef struct Stktop Stktop;
typedef struct String String;
typedef struct FuncVal FuncVal;
typedef struct SigTab SigTab;
......@@ -74,12 +73,12 @@ typedef struct InterfaceType InterfaceType;
typedef struct Eface Eface;
typedef struct Type Type;
typedef struct PtrType PtrType;
typedef struct ChanType ChanType;
typedef struct ChanType ChanType;
typedef struct MapType MapType;
typedef struct Defer Defer;
typedef struct Panic Panic;
typedef struct Hmap Hmap;
typedef struct Hiter Hiter;
typedef struct Hiter Hiter;
typedef struct Hchan Hchan;
typedef struct Complex64 Complex64;
typedef struct Complex128 Complex128;
......@@ -92,7 +91,8 @@ typedef struct ParForThread ParForThread;
typedef struct CgoMal CgoMal;
typedef struct PollDesc PollDesc;
typedef struct DebugVars DebugVars;
typedef struct ForceGCState ForceGCState;
typedef struct ForceGCState ForceGCState;
typedef struct Stack Stack;
/*
* Per-CPU declaration.
......@@ -265,23 +265,33 @@ struct WinCallbackContext
bool cleanstack;
};
// Stack describes a Go execution stack.
// The bounds of the stack are exactly [lo, hi),
// with no implicit data structures on either side.
struct Stack
{
uintptr lo;
uintptr hi;
};
struct G
{
// stackguard0 can be set to StackPreempt as opposed to stackguard
uintptr stackguard0; // cannot move - also known to liblink, libmach, runtime/cgo
uintptr stackbase; // cannot move - also known to libmach, runtime/cgo
Panic* panic; // cannot move - also known to liblink
// stackguard1 is checked by C code; it is set to ~0 in ordinary (non-g0, non-gsignal) goroutines
uintptr stackguard1; // cannot move - also known to liblink
Defer* defer;
// Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi).
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
// stackguard1 is the stack pointer compared in the C stack growth prologue.
// It is stack.lo+StackGuard on g0 and gsignal stacks.
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
Stack stack; // offset known to runtime/cgo
uintptr stackguard0; // offset known to liblink
uintptr stackguard1; // offset known to liblink
Panic* panic; // innermost panic - offset known to liblink
Defer* defer; // innermost defer
Gobuf sched;
uintptr syscallstack; // if status==Gsyscall, syscallstack = stackbase to use during gc
uintptr syscallsp; // if status==Gsyscall, syscallsp = sched.sp to use during gc
uintptr syscallpc; // if status==Gsyscall, syscallpc = sched.pc to use during gc
uintptr syscallguard; // if status==Gsyscall, syscallguard = stackguard to use during gc
uintptr stackguard; // same as stackguard0, but not set to StackPreempt
uintptr stack0;
uintptr stacksize;
void* param; // passed parameter on wakeup
uint32 atomicstatus;
int64 goid;
......@@ -291,34 +301,29 @@ struct G
bool issystem; // do not output in stack dump, ignore in deadlock detector
bool preempt; // preemption signal, duplicates stackguard0 = StackPreempt
bool paniconfault; // panic (instead of crash) on unexpected fault address
bool preemptscan; // preempted g does scan for GC
bool gcworkdone; // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
bool preemptscan; // preempted g does scan for GC
bool gcworkdone; // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
bool throwsplit; // must not split stack
int8 raceignore; // ignore race detection events
M* m; // for debuggers, but offset not hard-coded
M* lockedm;
int32 sig;
int32 writenbuf;
Slice writebuf;
uintptr sigcode0;
uintptr sigcode1;
uintptr sigpc;
uintptr gopc; // pc of go statement that created this goroutine
uintptr racectx;
SudoG *waiting; // sudog structures this G is waiting on (that have a valid elem ptr)
SudoG* waiting; // sudog structures this G is waiting on (that have a valid elem ptr)
uintptr end[];
};
struct M
{
G* g0; // goroutine with scheduling stack
void* moreargp; // argument pointer for more stack
Gobuf morebuf; // gobuf arg to morestack
// Fields not known to debuggers.
uint32 moreframesize; // size arguments to morestack
uint32 moreargsize; // known by amd64 asm to follow moreframesize
uintreg cret; // return value from C
uint64 procid; // for debuggers, but offset not hard-coded
G* gsignal; // signal-handling G
uintptr tls[4]; // thread-local storage (for x86 extern register)
......@@ -362,7 +367,6 @@ struct M
uint8 traceback;
bool (*waitunlockf)(G*, void*);
void* waitlock;
uintptr forkstackguard;
uintptr scalararg[4]; // scalar argument/return for mcall
void* ptrarg[4]; // pointer argument/return for mcall
#ifdef GOOS_windows
......@@ -442,16 +446,6 @@ enum
LockInternal = 2,
};
struct Stktop
{
// The offsets of these fields are known to (hard-coded in) libmach.
uintptr stackguard;
uintptr stackbase;
Gobuf gobuf;
uint32 argsize;
uint8* argp; // pointer to arguments in old frame
};
struct SigTab
{
int32 flags;
......@@ -596,8 +590,6 @@ struct ForceGCState
};
extern uint32 runtime·gcphase;
extern bool runtime·precisestack;
extern bool runtime·copystack;
/*
* defined macros
......@@ -758,7 +750,6 @@ void runtime·gogo(Gobuf*);
void runtime·gostartcall(Gobuf*, void(*)(void), void*);
void runtime·gostartcallfn(Gobuf*, FuncVal*);
void runtime·gosave(Gobuf*);
void runtime·lessstack(void);
void runtime·goargs(void);
void runtime·goenvs(void);
void runtime·goenvs_unix(void);
......@@ -809,8 +800,8 @@ int32 runtime·funcspdelta(Func*, uintptr);
int8* runtime·funcname(Func*);
int32 runtime·pcdatavalue(Func*, int32, uintptr);
void runtime·stackinit(void);
void* runtime·stackalloc(G*, uint32);
void runtime·stackfree(G*, void*, Stktop*);
Stack runtime·stackalloc(uint32);
void runtime·stackfree(Stack);
void runtime·shrinkstack(G*);
MCache* runtime·allocmcache(void);
void runtime·freemcache(MCache*);
......@@ -873,7 +864,6 @@ int64 runtime·unixnanotime(void); // real time, can skip
void runtime·dopanic(int32);
void runtime·startpanic(void);
void runtime·freezetheworld(void);
void runtime·unwindstack(G*, byte*);
void runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp);
void runtime·resetcpuprofiler(int32);
void runtime·setcpuprofilerate(int32);
......
This diff is collapsed.
......@@ -99,11 +99,6 @@ enum {
// The maximum number of bytes that a chain of NOSPLIT
// functions can use.
StackLimit = StackGuard - StackSystem - StackSmall,
// The assumed size of the top-of-stack data block.
// The actual size can be smaller than this but cannot be larger.
// Checked in proc.c's runtime.malg.
StackTop = 88,
};
// Goroutine preemption request.
......@@ -117,3 +112,4 @@ enum
StackPreempt = -1314,
};
*/
#define StackFork ((uint64)-1234)
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -10,62 +10,8 @@ import (
"sync"
"testing"
"time"
"unsafe"
)
// See stack.h.
const (
StackGuard = 256
StackSmall = 64
StackLimit = StackGuard - StackSmall
)
// Test stack split logic by calling functions of every frame size
// from near 0 up to and beyond the default segment size (4k).
// Each of those functions reports its SP + stack limit, and then
// the test (the caller) checks that those make sense. By not
// doing the actual checking and reporting from the suspect functions,
// we minimize the possibility of crashes during the test itself.
//
// Exhaustive test for http://golang.org/issue/3310.
// The linker used to get a few sizes near the segment size wrong:
//
// --- FAIL: TestStackSplit (0.01 seconds)
// stack_test.go:22: after runtime_test.stack3812: sp=0x7f7818d5d078 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3816: sp=0x7f7818d5d078 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3820: sp=0x7f7818d5d070 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3824: sp=0x7f7818d5d070 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3828: sp=0x7f7818d5d068 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3832: sp=0x7f7818d5d068 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3836: sp=0x7f7818d5d060 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3840: sp=0x7f7818d5d060 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3844: sp=0x7f7818d5d058 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3848: sp=0x7f7818d5d058 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3852: sp=0x7f7818d5d050 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3856: sp=0x7f7818d5d050 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3860: sp=0x7f7818d5d048 < limit=0x7f7818d5d080
// stack_test.go:22: after runtime_test.stack3864: sp=0x7f7818d5d048 < limit=0x7f7818d5d080
// FAIL
func TestStackSplit(t *testing.T) {
for _, f := range splitTests {
sp, guard := f()
bottom := guard - StackGuard
if sp < bottom+StackLimit {
fun := FuncForPC(**(**uintptr)(unsafe.Pointer(&f)))
t.Errorf("after %s: sp=%#x < limit=%#x (guard=%#x, bottom=%#x)",
fun.Name(), sp, bottom+StackLimit, guard, bottom)
}
}
}
var Used byte
func use(buf []byte) {
for _, c := range buf {
Used += c
}
}
// TestStackMem measures per-thread stack segment cache behavior.
// The test consumed up to 500MB in the past.
func TestStackMem(t *testing.T) {
......
......@@ -219,7 +219,6 @@ const _NoArgs = ^uintptr(0)
func newstack()
func newproc()
func lessstack()
func morestack()
func mstart()
func rt0_go()
......
......@@ -34,7 +34,6 @@ var (
deferprocPC = funcPC(deferproc)
goexitPC = funcPC(goexit)
jmpdeferPC = funcPC(jmpdefer)
lessstackPC = funcPC(lessstack)
mcallPC = funcPC(mcall)
morestackPC = funcPC(morestack)
mstartPC = funcPC(mstart)
......@@ -57,7 +56,7 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
g := getg()
gotraceback := gotraceback(nil)
if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
if gp.syscallstack != 0 {
if gp.syscallsp != 0 {
pc0 = gp.syscallpc
sp0 = gp.syscallsp
if usesLR {
......@@ -115,7 +114,6 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
frame.fn = f
n := 0
stk := (*stktop)(unsafe.Pointer(gp.stackbase))
for n < max {
// Typically:
// pc is the PC of the running function.
......@@ -123,39 +121,8 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
// fp is the frame pointer (caller's stack pointer) at that program counter, or nil if unknown.
// stk is the stack containing sp.
// The caller's program counter is lr, unless lr is zero, in which case it is *(uintptr*)sp.
if frame.pc == lessstackPC {
// Hit top of stack segment. Unwind to next segment.
frame.pc = stk.gobuf.pc
frame.sp = stk.gobuf.sp
frame.lr = 0
frame.fp = 0
if printing && showframe(nil, gp) {
print("----- stack segment boundary -----\n")
}
stk = (*stktop)(unsafe.Pointer(stk.stackbase))
f = findfunc(frame.pc)
if f == nil {
print("runtime: unknown pc ", hex(frame.pc), " after stack split\n")
if callback != nil {
gothrow("unknown pc")
}
}
frame.fn = f
continue
}
f = frame.fn
// Hook for handling Windows exception handlers. See traceback_windows.go.
if systraceback != nil {
changed, aborted := systraceback(f, (*stkframe)(noescape(unsafe.Pointer(&frame))), gp, printing, callback, v)
if aborted {
return n
}
if changed {
continue
}
}
// Found an actual function.
// Derive frame pointer and link register.
if frame.fp == 0 {
......@@ -224,8 +191,6 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
frame.arglen = uintptr(f.args)
} else if flr == nil {
frame.arglen = 0
} else if frame.lr == lessstackPC {
frame.arglen = uintptr(stk.argsize)
} else {
i := funcarglen(flr, frame.lr)
if i >= 0 {
......@@ -617,7 +582,6 @@ func topofstack(f *_func) bool {
pc == mstartPC ||
pc == mcallPC ||
pc == morestackPC ||
pc == lessstackPC ||
pc == rt0_goPC ||
externalthreadhandlerp != 0 && pc == externalthreadhandlerp
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
// sigtrampPC is the PC at the beginning of the jmpdefer assembly function.
// The traceback needs to recognize it on link register architectures.
var sigtrampPC uintptr
func sigtramp()
func init() {
sigtrampPC = funcPC(sigtramp)
systraceback = traceback_windows
}
func traceback_windows(f *_func, frame *stkframe, gp *g, printing bool, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer) (changed, aborted bool) {
// The main traceback thinks it has found a function. Check this.
// Windows exception handlers run on the actual g stack (there is room
// dedicated to this below the usual "bottom of stack"), not on a separate
// stack. As a result, we have to be able to unwind past the exception
// handler when called to unwind during stack growth inside the handler.
// Recognize the frame at the call to sighandler in sigtramp and unwind
// using the context argument passed to the call. This is awful.
if f != nil && f.entry == sigtrampPC && frame.pc > f.entry {
var r *context
// Invoke callback so that stack copier sees an uncopyable frame.
if callback != nil {
frame.continpc = frame.pc
frame.argp = 0
frame.arglen = 0
if !callback(frame, v) {
aborted = true
return
}
}
r = (*context)(unsafe.Pointer(frame.sp + ptrSize))
frame.pc = contextPC(r)
frame.sp = contextSP(r)
frame.lr = 0
frame.fp = 0
frame.fn = nil
if printing && showframe(nil, gp) {
print("----- exception handler -----\n")
}
f = findfunc(frame.pc)
if f == nil {
print("runtime: unknown pc ", hex(frame.pc), " after exception handler\n")
if callback != nil {
gothrow("unknown pc")
}
}
frame.fn = f
changed = true
return
}
return
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment