Commit 84736957 authored by Russ Cox's avatar Russ Cox

runtime: fix panic/wrapper/recover math

The gp->panicwrap adjustment is just fatally flawed.
Now that there is a Panic.argp field, update that instead.
That can be done on entry only, so that unwinding doesn't
need to worry about undoing anything. The wrappers
emit a few more instructions in the prologue but everything
else in the system gets much simpler.

It also fixes (without trying) a broken test I never checked in.

Fixes #7491.

LGTM=khr
R=khr
CC=dvyukov, golang-codereviews, iant, r
https://golang.org/cl/135490044
parent 8620e2b0
......@@ -241,7 +241,7 @@ nocache(Prog *p)
static void
addstacksplit(Link *ctxt, LSym *cursym)
{
Prog *p, *pl, *q, *q1, *q2;
Prog *p, *pl, *p1, *p2, *q, *q1, *q2;
int o;
int32 autosize, autoffset;
......@@ -437,32 +437,93 @@ addstacksplit(Link *ctxt, LSym *cursym)
p->spadj = autosize;
if(cursym->text->reg & WRAPPER) {
// g->panicwrap += autosize;
// MOVW panicwrap_offset(g), R3
// ADD $autosize, R3
// MOVW R3 panicwrap_offset(g)
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
//
// MOVW g_panic(g), R1
// CMP $0, R1
// B.EQ end
// MOVW panic_argp(R1), R2
// ADD $(autosize+4), R13, R3
// CMP R2, R3
// B.NE end
// ADD $4, R13, R4
// MOVW R4, panic_argp(R1)
// end:
// NOP
//
// The NOP is needed to give the jumps somewhere to land.
// It is a liblink NOP, not an ARM NOP: it encodes to 0 instruction bytes.
p = appendp(ctxt, p);
p->as = AMOVW;
p->from.type = D_OREG;
p->from.reg = REGG;
p->from.offset = 2*ctxt->arch->ptrsize;
p->from.offset = 2*ctxt->arch->ptrsize; // G.panic
p->to.type = D_REG;
p->to.reg = 3;
p->to.reg = 1;
p = appendp(ctxt, p);
p->as = ACMP;
p->from.type = D_CONST;
p->from.offset = 0;
p->to.type = D_REG;
p->to.reg = 1;
p = appendp(ctxt, p);
p->as = AB;
p->scond = C_SCOND_EQ;
p->to.type = D_BRANCH;
p1 = p;
p = appendp(ctxt, p);
p->as = AMOVW;
p->from.type = D_OREG;
p->from.reg = 1;
p->from.offset = 0; // Panic.argp
p->to.type = D_REG;
p->to.reg = 2;
p = appendp(ctxt, p);
p->as = AADD;
p->from.type = D_CONST;
p->from.offset = autosize;
p->from.offset = autosize+4;
p->reg = 13;
p->to.type = D_REG;
p->to.reg = 3;
p = appendp(ctxt, p);
p->as = ACMP;
p->from.type = D_REG;
p->from.offset = 2;
p->to.type = D_REG;
p->to.reg = 3;
p = appendp(ctxt, p);
p->as = AB;
p->scond = C_SCOND_NE;
p->to.type = D_BRANCH;
p2 = p;
p = appendp(ctxt, p);
p->as = AADD;
p->from.type = D_CONST;
p->from.offset = 4;
p->reg = 13;
p->to.type = D_REG;
p->to.reg = 4;
p = appendp(ctxt, p);
p->as = AMOVW;
p->from.type = D_REG;
p->from.reg = 3;
p->from.reg = 4;
p->to.type = D_OREG;
p->to.reg = REGG;
p->to.offset = 2*ctxt->arch->ptrsize;
p->to.reg = 1;
p->to.offset = 0; // Panic.argp
p = appendp(ctxt, p);
p->as = ANOP;
p1->pcond = p;
p2->pcond = p;
}
break;
......@@ -483,44 +544,6 @@ addstacksplit(Link *ctxt, LSym *cursym)
}
}
if(cursym->text->reg & WRAPPER) {
int scond;
// Preserve original RET's cond, to allow RET.EQ
// in the implementation of reflect.call.
scond = p->scond;
p->scond = C_SCOND_NONE;
// g->panicwrap -= autosize;
// MOVW panicwrap_offset(g), R3
// SUB $autosize, R3
// MOVW R3 panicwrap_offset(g)
p->as = AMOVW;
p->from.type = D_OREG;
p->from.reg = REGG;
p->from.offset = 2*ctxt->arch->ptrsize;
p->to.type = D_REG;
p->to.reg = 3;
p = appendp(ctxt, p);
p->as = ASUB;
p->from.type = D_CONST;
p->from.offset = autosize;
p->to.type = D_REG;
p->to.reg = 3;
p = appendp(ctxt, p);
p->as = AMOVW;
p->from.type = D_REG;
p->from.reg = 3;
p->to.type = D_OREG;
p->to.reg = REGG;
p->to.offset = 2*ctxt->arch->ptrsize;
p = appendp(ctxt, p);
p->scond = scond;
}
p->as = AMOVW;
p->scond |= C_PBIT;
p->from.type = D_OREG;
......
......@@ -388,7 +388,7 @@ parsetextconst(vlong arg, vlong *textstksiz, vlong *textarg)
static void
addstacksplit(Link *ctxt, LSym *cursym)
{
Prog *p, *q, *q1;
Prog *p, *q, *q1, *p1, *p2;
int32 autoffset, deltasp;
int a, pcsize;
uint32 i;
......@@ -463,13 +463,64 @@ addstacksplit(Link *ctxt, LSym *cursym)
deltasp = autoffset;
if(cursym->text->from.scale & WRAPPER) {
// g->panicwrap += autoffset + ctxt->arch->regsize;
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
//
// MOVQ g_panic(CX), BX
// TESTQ BX, BX
// JEQ end
// LEAQ (autoffset+8)(SP), DI
// CMPQ panic_argp(BX), DI
// JNE end
// MOVQ SP, panic_argp(BX)
// end:
// NOP
//
// The NOP is needed to give the jumps somewhere to land.
// It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.
p = appendp(ctxt, p);
p->as = AADDL;
p->from.type = D_CONST;
p->from.offset = autoffset + ctxt->arch->regsize;
indir_cx(ctxt, &p->to);
p->to.offset = 2*ctxt->arch->ptrsize;
p->as = AMOVQ;
p->from.type = D_INDIR+D_CX;
p->from.offset = 2*ctxt->arch->ptrsize; // G.panic
p->to.type = D_BX;
p = appendp(ctxt, p);
p->as = ATESTQ;
p->from.type = D_BX;
p->to.type = D_BX;
p = appendp(ctxt, p);
p->as = AJEQ;
p->to.type = D_BRANCH;
p1 = p;
p = appendp(ctxt, p);
p->as = ALEAQ;
p->from.type = D_INDIR+D_SP;
p->from.offset = autoffset+8;
p->to.type = D_DI;
p = appendp(ctxt, p);
p->as = ACMPQ;
p->from.type = D_INDIR+D_BX;
p->from.offset = 0; // Panic.argp
p->to.type = D_DI;
p = appendp(ctxt, p);
p->as = AJNE;
p->to.type = D_BRANCH;
p2 = p;
p = appendp(ctxt, p);
p->as = AMOVQ;
p->from.type = D_SP;
p->to.type = D_INDIR+D_BX;
p->to.offset = 0; // Panic.argp
p = appendp(ctxt, p);
p->as = ANOP;
p1->pcond = p;
p2->pcond = p;
}
if(ctxt->debugstack > 1 && autoffset) {
......@@ -589,19 +640,6 @@ addstacksplit(Link *ctxt, LSym *cursym)
if(autoffset != deltasp)
ctxt->diag("unbalanced PUSH/POP");
if(cursym->text->from.scale & WRAPPER) {
p = load_g_cx(ctxt, p);
p = appendp(ctxt, p);
// g->panicwrap -= autoffset + ctxt->arch->regsize;
p->as = ASUBL;
p->from.type = D_CONST;
p->from.offset = autoffset + ctxt->arch->regsize;
indir_cx(ctxt, &p->to);
p->to.offset = 2*ctxt->arch->ptrsize;
p = appendp(ctxt, p);
p->as = ARET;
}
if(autoffset) {
p->as = AADJSP;
p->from.type = D_CONST;
......
......@@ -261,7 +261,7 @@ static Prog* stacksplit(Link*, Prog*, int32, int, Prog**);
static void
addstacksplit(Link *ctxt, LSym *cursym)
{
Prog *p, *q;
Prog *p, *q, *p1, *p2;
int32 autoffset, deltasp;
int a;
......@@ -317,13 +317,64 @@ addstacksplit(Link *ctxt, LSym *cursym)
deltasp = autoffset;
if(cursym->text->from.scale & WRAPPER) {
// g->panicwrap += autoffset + ctxt->arch->ptrsize;
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
//
// MOVL g_panic(CX), BX
// TESTL BX, BX
// JEQ end
// LEAL (autoffset+4)(SP), DI
// CMPL panic_argp(BX), DI
// JNE end
// MOVL SP, panic_argp(BX)
// end:
// NOP
//
// The NOP is needed to give the jumps somewhere to land.
// It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.
p = appendp(ctxt, p);
p->as = AADDL;
p->from.type = D_CONST;
p->from.offset = autoffset + ctxt->arch->ptrsize;
p->to.type = D_INDIR+D_CX;
p->to.offset = 2*ctxt->arch->ptrsize;
p->as = AMOVL;
p->from.type = D_INDIR+D_CX;
p->from.offset = 2*ctxt->arch->ptrsize; // G.panic
p->to.type = D_BX;
p = appendp(ctxt, p);
p->as = ATESTL;
p->from.type = D_BX;
p->to.type = D_BX;
p = appendp(ctxt, p);
p->as = AJEQ;
p->to.type = D_BRANCH;
p1 = p;
p = appendp(ctxt, p);
p->as = ALEAL;
p->from.type = D_INDIR+D_SP;
p->from.offset = autoffset+4;
p->to.type = D_DI;
p = appendp(ctxt, p);
p->as = ACMPL;
p->from.type = D_INDIR+D_BX;
p->from.offset = 0; // Panic.argp
p->to.type = D_DI;
p = appendp(ctxt, p);
p->as = AJNE;
p->to.type = D_BRANCH;
p2 = p;
p = appendp(ctxt, p);
p->as = AMOVL;
p->from.type = D_SP;
p->to.type = D_INDIR+D_BX;
p->to.offset = 0; // Panic.argp
p = appendp(ctxt, p);
p->as = ANOP;
p1->pcond = p;
p2->pcond = p;
}
if(ctxt->debugzerostack && autoffset && !(cursym->text->from.scale&NOSPLIT)) {
......@@ -396,19 +447,6 @@ addstacksplit(Link *ctxt, LSym *cursym)
if(autoffset != deltasp)
ctxt->diag("unbalanced PUSH/POP");
if(cursym->text->from.scale & WRAPPER) {
p = load_g_cx(ctxt, p);
p = appendp(ctxt, p);
// g->panicwrap -= autoffset + ctxt->arch->ptrsize;
p->as = ASUBL;
p->from.type = D_CONST;
p->from.offset = autoffset + ctxt->arch->ptrsize;
p->to.type = D_INDIR+D_CX;
p->to.offset = 2*ctxt->arch->ptrsize;
p = appendp(ctxt, p);
p->as = ARET;
}
if(autoffset) {
p->as = AADJSP;
p->from.type = D_CONST;
......
......@@ -563,7 +563,7 @@ func (v Value) call(op string, in []Value) []Value {
}
// Call.
call(fn, args, uint32(frametype.size), uint32(retOffset), nil)
call(fn, args, uint32(frametype.size), uint32(retOffset))
// For testing; see TestCallMethodJump.
if callGC {
......@@ -761,7 +761,7 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
memmove(unsafe.Pointer(uintptr(args)+ptrSize), frame, argSize-ptrSize)
// Call.
call(fn, args, uint32(frametype.size), uint32(retOffset), nil)
call(fn, args, uint32(frametype.size), uint32(retOffset))
// Copy return values. On amd64p32, the beginning of return values
// is 64-bit aligned, so the caller's frame layout (which doesn't have
......@@ -2699,9 +2699,7 @@ func mapiterinit(t *rtype, m unsafe.Pointer) unsafe.Pointer
func mapiterkey(it unsafe.Pointer) (key unsafe.Pointer)
func mapiternext(it unsafe.Pointer)
func maplen(m unsafe.Pointer) int
// panicpos is for use by runtime and should be nil in all calls in this package
func call(fn, arg unsafe.Pointer, n uint32, retoffset uint32, panicpos unsafe.Pointer)
func call(fn, arg unsafe.Pointer, n uint32, retoffset uint32)
func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer)
......
......@@ -329,7 +329,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
JMP runtime·morestack(SB)
// reflect·call: call a function with the given argument list
// func call(f *FuncVal, arg *byte, argsize, retoffset uint32, p *Panic).
// func call(f *FuncVal, arg *byte, argsize, retoffset uint32).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
......@@ -341,7 +341,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
JMP AX
// Note: can't just "JMP NAME(SB)" - bad inlining results.
TEXT reflect·call(SB), NOSPLIT, $0-20
TEXT reflect·call(SB), NOSPLIT, $0-16
MOVL argsize+8(FP), CX
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
......@@ -375,9 +375,8 @@ TEXT reflect·call(SB), NOSPLIT, $0-20
// Argument map for the callXX frames. Each has one stack map.
DATA gcargs_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_reflectcall<>+0x04(SB)/4, $10 // 5 words
DATA gcargs_reflectcall<>+0x04(SB)/4, $8 // 4 words
DATA gcargs_reflectcall<>+0x08(SB)/1, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsScalar<<4)+(const_BitsScalar<<6))
DATA gcargs_reflectcall<>+0x09(SB)/1, $(const_BitsPointer)
GLOBL gcargs_reflectcall<>(SB),RODATA,$12
// callXX frames have no locals
......@@ -386,7 +385,7 @@ DATA gclocals_reflectcall<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_reflectcall<>(SB),RODATA,$8
#define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
TEXT NAME(SB), WRAPPER, $MAXSIZE-16; \
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_reflectcall<>(SB); \
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_reflectcall<>(SB);\
/* copy arguments to stack */ \
......@@ -394,22 +393,11 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
MOVL argsize+8(FP), CX; \
MOVL SP, DI; \
REP;MOVSB; \
/* initialize panic argp */ \
MOVL panic+16(FP), CX; \
CMPL CX, $0; \
JEQ 3(PC); \
LEAL (MAXSIZE+4)(SP), BX; \
MOVL BX, panic_argp(CX); \
/* call function */ \
MOVL f+0(FP), DX; \
MOVL (DX), AX; \
PCDATA $PCDATA_StackMapIndex, $0; \
CALL AX; \
/* clear panic argp */ \
MOVL panic+16(FP), CX; \
CMPL CX, $0; \
JEQ 2(PC); \
MOVL $0, panic_argp(CX); \
/* copy return values back */ \
MOVL argptr+4(FP), DI; \
MOVL argsize+8(FP), CX; \
......
......@@ -308,7 +308,7 @@ TEXT runtime·morestack(SB),NOSPLIT,$0-0
RET
// reflect·call: call a function with the given argument list
// func call(f *FuncVal, arg *byte, argsize, retoffset uint32, p *Panic).
// func call(f *FuncVal, arg *byte, argsize, retoffset uint32).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
......@@ -320,7 +320,7 @@ TEXT runtime·morestack(SB),NOSPLIT,$0-0
JMP AX
// Note: can't just "JMP NAME(SB)" - bad inlining results.
TEXT reflect·call(SB), NOSPLIT, $0-32
TEXT reflect·call(SB), NOSPLIT, $0-24
MOVLQZX argsize+16(FP), CX
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
......@@ -354,8 +354,8 @@ TEXT reflect·call(SB), NOSPLIT, $0-32
// Argument map for the callXX frames. Each has one stack map.
DATA gcargs_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_reflectcall<>+0x04(SB)/4, $8 // 4 words
DATA gcargs_reflectcall<>+0x08(SB)/1, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsScalar<<4)+(const_BitsPointer<<6))
DATA gcargs_reflectcall<>+0x04(SB)/4, $6 // 3 words
DATA gcargs_reflectcall<>+0x08(SB)/1, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsScalar<<4))
GLOBL gcargs_reflectcall<>(SB),RODATA,$12
// callXX frames have no locals
......@@ -363,16 +363,8 @@ DATA gclocals_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
DATA gclocals_reflectcall<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_reflectcall<>(SB),RODATA,$8
// CALLFN is marked as a WRAPPER so that a deferred reflect.call func will
// see the right answer for recover. However, CALLFN is also how we start
// the panic in the first place. We record the panic argp if this is the start of
// a panic. Since the wrapper adjustment has already happened, though
// (in the implicit prologue), we have to write not SP but MAXSIZE+8+SP into
// p.argp. The MAXSIZE+8 will counter the MAXSIZE+8 the wrapper prologue
// added to g->panicwrap.
#define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_reflectcall<>(SB); \
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_reflectcall<>(SB);\
/* copy arguments to stack */ \
......@@ -380,21 +372,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
MOVLQZX argsize+16(FP), CX; \
MOVQ SP, DI; \
REP;MOVSB; \
/* initialize panic argp */ \
MOVQ panic+24(FP), CX; \
CMPQ CX, $0; \
JEQ 3(PC); \
LEAQ (MAXSIZE+8)(SP), BX; \
MOVQ BX, panic_argp(CX); \
/* call function */ \
MOVQ f+0(FP), DX; \
PCDATA $PCDATA_StackMapIndex, $0; \
CALL (DX); \
/* clear panic argp */ \
MOVQ panic+24(FP), CX; \
CMPQ CX, $0; \
JEQ 2(PC); \
MOVQ $0, panic_argp(CX); \
/* copy return values back */ \
MOVQ argptr+8(FP), DI; \
MOVLQZX argsize+16(FP), CX; \
......
......@@ -349,9 +349,8 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
/* initialize panic argp */ \
MOVL panic+16(FP), CX; \
CMPL CX, $0; \
JEQ 3(PC); \
LEAL (MAXSIZE+8)(SP), BX; \
MOVL BX, panic_argp(CX); \
JEQ 2(PC); \
MOVL SP, panic_argp(CX); \
/* call function */ \
MOVL f+0(FP), DX; \
MOVL (DX), AX; \
......
......@@ -306,7 +306,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$-4-0
B runtime·morestack(SB)
// reflect·call: call a function with the given argument list
// func call(f *FuncVal, arg *byte, argsize, retoffset uint32, p *Panic).
// func call(f *FuncVal, arg *byte, argsize, retoffset uint32).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
......@@ -317,7 +317,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$-4-0
MOVW $NAME(SB), R1; \
B (R1)
TEXT reflect·call(SB),NOSPLIT,$-4-20
TEXT reflect·call(SB),NOSPLIT,$-4-16
MOVW argsize+8(FP), R0
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
......@@ -351,9 +351,8 @@ TEXT reflect·call(SB),NOSPLIT,$-4-20
// Argument map for the callXX frames. Each has one stack map.
DATA gcargs_reflectcall<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_reflectcall<>+0x04(SB)/4, $10 // 5 words
DATA gcargs_reflectcall<>+0x04(SB)/4, $8 // 4 words
DATA gcargs_reflectcall<>+0x08(SB)/1, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsScalar<<4)+(const_BitsScalar<<6))
DATA gcargs_reflectcall<>+0x09(SB)/1, $(const_BitsPointer)
GLOBL gcargs_reflectcall<>(SB),RODATA,$12
// callXX frames have no locals
......@@ -362,7 +361,7 @@ DATA gclocals_reflectcall<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_reflectcall<>(SB),RODATA,$8
#define CALLFN(NAME,MAXSIZE) \
TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
TEXT NAME(SB), WRAPPER, $MAXSIZE-16; \
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_reflectcall<>(SB); \
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_reflectcall<>(SB);\
/* copy arguments to stack */ \
......@@ -375,23 +374,11 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
MOVBU.P R5, 1(R1); \
SUB $1, R2, R2; \
B -5(PC); \
/* initialize panic argp */ \
MOVW panic+16(FP), R4; \
CMP $0, R4; \
B.EQ 3(PC); \
ADD $(4+MAXSIZE+4), R13, R5; \
MOVW R5, panic_argp(R4); \
/* call function */ \
MOVW f+0(FP), R7; \
MOVW (R7), R0; \
PCDATA $PCDATA_StackMapIndex, $0; \
BL (R0); \
/* clear panic argp */ \
MOVW panic+16(FP), R4; \
CMP $0, R4; \
B.EQ 3(PC); \
MOVW $0, R5; \
MOVW R5, panic_argp(R4); \
/* copy return values back */ \
MOVW argptr+4(FP), R0; \
MOVW argsize+8(FP), R2; \
......
......@@ -225,7 +225,7 @@ func cgocallbackg1() {
}
// Invoke callback.
reflectcall(unsafe.Pointer(cb.fn), unsafe.Pointer(cb.arg), uint32(cb.argsize), 0, nil)
reflectcall(unsafe.Pointer(cb.fn), unsafe.Pointer(cb.arg), uint32(cb.argsize), 0)
if raceenabled {
racereleasemerge(unsafe.Pointer(&racecgosync))
......
......@@ -743,7 +743,7 @@ func runfinq() {
default:
gothrow("bad kind in runfinq")
}
reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), nil)
reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
// drop finalizer queue references to finalized object
f.fn = nil
......
......@@ -208,7 +208,7 @@ func Goexit() {
for gp._defer != nil {
d := gp._defer
gp._defer = d.link
reflectcall(unsafe.Pointer(d.fn), unsafe.Pointer(&d.args), uint32(d.siz), uint32(d.siz), nil)
reflectcall(unsafe.Pointer(d.fn), unsafe.Pointer(&d.args), uint32(d.siz), uint32(d.siz))
freedefer(d)
// Note: we ignore recovers here because Goexit isn't a panic
}
......
......@@ -46,25 +46,19 @@ func gopanic(e interface{}) {
}
// take defer off list in case of recursive panic
gp._defer = d.link
gp.ispanic = true // rock for runtime·newstack, where runtime·newstackcall ends up
argp := unsafe.Pointer(d.argp) // must be pointer so it gets adjusted during stack copy
pc := d.pc
// The deferred function may cause another panic,
// so newstackcall may not return. Set up a defer
// so reflectcall may not return. Set up a defer
// to mark this panic aborted if that happens.
dabort.link = gp._defer
gp._defer = (*_defer)(noescape(unsafe.Pointer(&dabort)))
p._defer = d
p.outerwrap = gp.panicwrap
// TODO(rsc): I am pretty sure the panicwrap manipulation here is not correct.
// It is close enough to pass all the tests we have, but I think it needs to be
// restored during recovery too. I will write better tests and fix it in a separate CL.
gp.panicwrap = 0
reflectcall(unsafe.Pointer(d.fn), unsafe.Pointer(&d.args), uint32(d.siz), uint32(d.siz), (*_panic)(noescape(unsafe.Pointer(&p))))
gp.panicwrap = p.outerwrap
p.argp = getargp(0)
reflectcall(unsafe.Pointer(d.fn), unsafe.Pointer(&d.args), uint32(d.siz), uint32(d.siz))
p.argp = 0
// reflectcall did not panic. Remove dabort.
if gp._defer != &dabort {
......@@ -102,6 +96,21 @@ func gopanic(e interface{}) {
*(*int)(nil) = 0 // not reached
}
// getargp returns the location where the caller
// writes outgoing function call arguments.
//go:nosplit
func getargp(x int) uintptr {
// x is an argument mainly so that we can return its address.
// However, we need to make the function complex enough
// that it won't be inlined. We always pass x = 0, so this code
// does nothing other than keep the compiler from thinking
// the function is simple enough to inline.
if x > 0 {
return getcallersp(unsafe.Pointer(&x)) * 0
}
return uintptr(noescape(unsafe.Pointer(&x)))
}
func abortpanic(p *_panic) {
p.aborted = true
}
......@@ -109,23 +118,20 @@ func abortpanic(p *_panic) {
// The implementation of the predeclared function recover.
// Cannot split the stack because it needs to reliably
// find the stack segment of its caller.
//
// TODO(rsc): Once we commit to CopyStackAlways,
// this doesn't need to be nosplit.
//go:nosplit
func gorecover(argp uintptr) interface{} {
// Must be an unrecovered panic in progress.
// Must be on a stack segment created for a deferred call during a panic.
// Must be at the top of that segment, meaning the deferred call itself
// and not something it called. The top frame in the segment will have
// argument pointer argp == top - top.argsize.
// The subtraction of g.panicwrap allows wrapper functions that
// do not count as official calls to adjust what we consider the top frame
// while they are active on the stack. The linker emits adjustments of
// g.panicwrap in the prologue and epilogue of functions marked as wrappers.
// Must be in a function running as part of a deferred call during the panic.
// Must be called from the topmost function of the call
// (the function used in the defer statement).
// p.argp is the argument pointer of that topmost deferred function call.
// Compare against argp reported by caller.
// If they match, the caller is the one who can recover.
gp := getg()
p := gp._panic
// if p != nil {
// println("recover?", p, p.recovered, hex(argp), hex(p.argp), uintptr(gp.panicwrap), p != nil && !p.recovered && argp == p.argp-uintptr(gp.panicwrap))
// }
if p != nil && !p.recovered && argp == p.argp-uintptr(gp.panicwrap) {
if p != nil && !p.recovered && argp == p.argp {
p.recovered = true
return p.arg
}
......
......@@ -2327,7 +2327,6 @@ runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerp
p->goidcacheend = p->goidcache + GoidCacheBatch;
}
newg->goid = p->goidcache++;
newg->panicwrap = 0;
if(raceenabled)
newg->racectx = runtime·racegostart((void*)callerpc);
runqput(p, newg);
......
......@@ -268,11 +268,10 @@ struct WinCallbackContext
struct G
{
// stackguard0 can be set to StackPreempt as opposed to stackguard
uintptr stackguard0; // cannot move - also known to linker, libmach, runtime/cgo
uintptr stackguard0; // cannot move - also known to liblink, libmach, runtime/cgo
uintptr stackbase; // cannot move - also known to libmach, runtime/cgo
uint32 panicwrap; // cannot move - also known to linker
Panic* panic; // cannot move - also known to liblink
Defer* defer;
Panic* panic;
Gobuf sched;
uintptr syscallstack; // if status==Gsyscall, syscallstack = stackbase to use during gc
uintptr syscallsp; // if status==Gsyscall, syscallsp = sched.sp to use during gc
......@@ -287,7 +286,6 @@ struct G
int64 waitsince; // approx time when the G become blocked
String waitreason; // if status==Gwaiting
G* schedlink;
bool ispanic;
bool issystem; // do not output in stack dump, ignore in deadlock detector
bool preempt; // preemption signal, duplicates stackguard0 = StackPreempt
bool paniconfault; // panic (instead of crash) on unexpected fault address
......@@ -449,7 +447,6 @@ struct Stktop
uintptr stackbase;
Gobuf gobuf;
uint32 argsize;
uint32 panicwrap;
uint8* argp; // pointer to arguments in old frame
};
......@@ -654,11 +651,10 @@ struct Defer
*/
struct Panic
{
uintptr argp; // pointer to arguments of deferred call run during panic; cannot move - known to liblink
Eface arg; // argument to panic
Panic* link; // link to earlier panic
Defer* defer; // current executing defer
uintptr argp; // pointer to arguments of deferred call, for recover
uint32 outerwrap; // outer gp->panicwrap
bool recovered; // whether this panic is over
bool aborted; // the panic was aborted
};
......
......@@ -371,7 +371,6 @@ runtime·oldstack(void)
gp->stackbase = top->stackbase;
gp->stackguard = top->stackguard;
gp->stackguard0 = gp->stackguard;
gp->panicwrap = top->panicwrap;
runtime·stackfree(gp, old, top);
runtime·casgstatus(gp, Gcopystack, oldstatus); // oldstatus is Grunning or Gsyscall
runtime·gogo(&gp->sched);
......@@ -1033,9 +1032,6 @@ runtime·newstack(void)
top->argp = moreargp;
top->argsize = argsize;
top->panicwrap = gp->panicwrap;
gp->panicwrap = 0;
gp->stackbase = (uintptr)top;
gp->stackguard = (uintptr)stk + StackGuard;
gp->stackguard0 = gp->stackguard;
......
......@@ -171,7 +171,7 @@ func cputicks() int64
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
func munmap(addr unsafe.Pointer, n uintptr)
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32, p *_panic)
func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32)
func osyield()
func procyield(cycles uint32)
func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr)
......
......@@ -47,6 +47,7 @@ func main() {
test11reflect1()
test11reflect2()
}
test111()
test12()
if !interp {
test12reflect1()
......@@ -77,7 +78,7 @@ func mustRecoverBody(v1, v2, v3, x interface{}) {
}
v = v2
if v == nil {
println("missing recover")
println("missing recover", x.(int))
die() // panic is useless here
}
if v != x {
......@@ -137,7 +138,7 @@ func test1WithClosures() {
mustNotRecover()
v := recover()
if v == nil {
println("missing recover")
println("missing recover", x.(int))
die()
}
if v != x {
......@@ -406,6 +407,49 @@ func test11reflect2() {
panic(11)
}
// tiny receiver, so basic wrapper in i.M()
type T3deeper struct{}
func (T3deeper) M() {
badstate() // difference from T3
mustRecoverBody(doubleRecover(), recover(), recover(), 111)
}
func test111() {
var i I = T3deeper{}
defer i.M()
panic(111)
}
type Tiny struct{}
func (Tiny) M() {
panic(112)
}
// i.M is a wrapper, and i.M panics.
//
// This is a torture test for an old implementation of recover that
// tried to deal with wrapper functions by doing some argument
// positioning math on both entry and exit. Doing anything on exit
// is a problem because sometimes functions exit via panic instead
// of an ordinary return, so panic would have to know to do the
// same math when unwinding the stack. It gets complicated fast.
// This particular test never worked with the old scheme, because
// panic never did the right unwinding math.
//
// The new scheme adjusts Panic.argp on entry to a wrapper.
// It has no exit work, so if a wrapper is interrupted by a panic,
// there's no cleanup that panic itself must do.
// This test just works now.
func badstate() {
defer func() {
recover()
}()
var i I = Tiny{}
i.M()
}
// large receiver, so basic wrapper in i.M()
type T4 [2]string
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment