Commit 656be317 authored by Russ Cox's avatar Russ Cox

[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack

Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).

Scalararg and ptrarg are also untyped and therefore error-prone.

Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.

For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.

Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).

The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.

Correct the misnomer by naming the replacement function systemstack.

Fix a few references to "M stack" in code.

The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.

This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)

LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
parent e98f2d59
...@@ -200,62 +200,49 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4 ...@@ -200,62 +200,49 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
JMP AX JMP AX
RET RET
// switchtoM is a dummy routine that onM leaves at the bottom // systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that // of the G stack. We need to distinguish the routine that
// lives at the bottom of the G stack from the one that lives // lives at the bottom of the G stack from the one that lives
// at the top of the M stack because the one at the top of // at the top of the system stack because the one at the top of
// the M stack terminates the stack walk (see topofstack()). // the system stack terminates the stack walk (see topofstack()).
TEXT runtime·switchtoM(SB), NOSPLIT, $0-0 TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
RET RET
// func onM_signalok(fn func()) // func systemstack(fn func())
TEXT runtime·onM_signalok(SB), NOSPLIT, $0-4 TEXT runtime·systemstack(SB), NOSPLIT, $0-4
MOVL fn+0(FP), DI // DI = fn
get_tls(CX) get_tls(CX)
MOVL g(CX), AX // AX = g MOVL g(CX), AX // AX = g
MOVL g_m(AX), BX // BX = m MOVL g_m(AX), BX // BX = m
MOVL m_gsignal(BX), DX // DX = gsignal MOVL m_gsignal(BX), DX // DX = gsignal
CMPL AX, DX CMPL AX, DX
JEQ ongsignal JEQ noswitch
JMP runtime·onM(SB)
ongsignal:
MOVL fn+0(FP), DI // DI = fn
MOVL DI, DX
MOVL 0(DI), DI
CALL DI
RET
// func onM(fn func())
TEXT runtime·onM(SB), NOSPLIT, $0-4
MOVL fn+0(FP), DI // DI = fn
get_tls(CX)
MOVL g(CX), AX // AX = g
MOVL g_m(AX), BX // BX = m
MOVL m_g0(BX), DX // DX = g0 MOVL m_g0(BX), DX // DX = g0
CMPL AX, DX CMPL AX, DX
JEQ onm JEQ noswitch
MOVL m_curg(BX), BP MOVL m_curg(BX), BP
CMPL AX, BP CMPL AX, BP
JEQ oncurg JEQ switch
// Not g0, not curg. Must be gsignal, but that's not allowed. // Bad: g is not gsignal, not g0, not curg. What is it?
// Hide call from linker nosplit analysis. // Hide call from linker nosplit analysis.
MOVL $runtime·badonm(SB), AX MOVL $runtime·badsystemstack(SB), AX
CALL AX CALL AX
oncurg: switch:
// save our state in g->sched. Pretend to // save our state in g->sched. Pretend to
// be switchtoM if the G stack is scanned. // be systemstack_switch if the G stack is scanned.
MOVL $runtime·switchtoM(SB), (g_sched+gobuf_pc)(AX) MOVL $runtime·systemstack_switch(SB), (g_sched+gobuf_pc)(AX)
MOVL SP, (g_sched+gobuf_sp)(AX) MOVL SP, (g_sched+gobuf_sp)(AX)
MOVL AX, (g_sched+gobuf_g)(AX) MOVL AX, (g_sched+gobuf_g)(AX)
// switch to g0 // switch to g0
MOVL DX, g(CX) MOVL DX, g(CX)
MOVL (g_sched+gobuf_sp)(DX), BX MOVL (g_sched+gobuf_sp)(DX), BX
// make it look like mstart called onM on g0, to stop traceback // make it look like mstart called systemstack on g0, to stop traceback
SUBL $4, BX SUBL $4, BX
MOVL $runtime·mstart(SB), DX MOVL $runtime·mstart(SB), DX
MOVL DX, 0(BX) MOVL DX, 0(BX)
...@@ -276,8 +263,8 @@ oncurg: ...@@ -276,8 +263,8 @@ oncurg:
MOVL $0, (g_sched+gobuf_sp)(AX) MOVL $0, (g_sched+gobuf_sp)(AX)
RET RET
onm: noswitch:
// already on m stack, just call directly // already on system stack, just call directly
MOVL DI, DX MOVL DI, DX
MOVL 0(DI), DI MOVL 0(DI), DI
CALL DI CALL DI
...@@ -741,7 +728,7 @@ needm: ...@@ -741,7 +728,7 @@ needm:
// the same SP back to m->sched.sp. That seems redundant, // the same SP back to m->sched.sp. That seems redundant,
// but if an unrecovered panic happens, unwindm will // but if an unrecovered panic happens, unwindm will
// restore the g->sched.sp from the stack location // restore the g->sched.sp from the stack location
// and then onM will try to use it. If we don't set it here, // and then systemstack will try to use it. If we don't set it here,
// that restored SP will be uninitialized (typically 0) and // that restored SP will be uninitialized (typically 0) and
// will not be usable. // will not be usable.
MOVL m_g0(BP), SI MOVL m_g0(BP), SI
......
...@@ -190,55 +190,41 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8 ...@@ -190,55 +190,41 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8
JMP AX JMP AX
RET RET
// switchtoM is a dummy routine that onM leaves at the bottom // systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that // of the G stack. We need to distinguish the routine that
// lives at the bottom of the G stack from the one that lives // lives at the bottom of the G stack from the one that lives
// at the top of the M stack because the one at the top of // at the top of the system stack because the one at the top of
// the M stack terminates the stack walk (see topofstack()). // the system stack terminates the stack walk (see topofstack()).
TEXT runtime·switchtoM(SB), NOSPLIT, $0-0 TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
RET RET
// func onM_signalok(fn func()) // func systemstack(fn func())
TEXT runtime·onM_signalok(SB), NOSPLIT, $0-8 TEXT runtime·systemstack(SB), NOSPLIT, $0-8
MOVQ fn+0(FP), DI // DI = fn
get_tls(CX) get_tls(CX)
MOVQ g(CX), AX // AX = g MOVQ g(CX), AX // AX = g
MOVQ g_m(AX), BX // BX = m MOVQ g_m(AX), BX // BX = m
MOVQ m_gsignal(BX), DX // DX = gsignal MOVQ m_gsignal(BX), DX // DX = gsignal
CMPQ AX, DX CMPQ AX, DX
JEQ ongsignal JEQ noswitch
JMP runtime·onM(SB)
ongsignal:
MOVQ fn+0(FP), DI // DI = fn
MOVQ DI, DX
MOVQ 0(DI), DI
CALL DI
RET
// func onM(fn func())
TEXT runtime·onM(SB), NOSPLIT, $0-8
MOVQ fn+0(FP), DI // DI = fn
get_tls(CX)
MOVQ g(CX), AX // AX = g
MOVQ g_m(AX), BX // BX = m
MOVQ m_g0(BX), DX // DX = g0 MOVQ m_g0(BX), DX // DX = g0
CMPQ AX, DX CMPQ AX, DX
JEQ onm JEQ noswitch
MOVQ m_curg(BX), BP MOVQ m_curg(BX), BP
CMPQ AX, BP CMPQ AX, BP
JEQ oncurg JEQ switch
// Not g0, not curg. Must be gsignal, but that's not allowed. // Bad: g is not gsignal, not g0, not curg. What is it?
// Hide call from linker nosplit analysis. MOVQ $runtime·badsystemstack(SB), AX
MOVQ $runtime·badonm(SB), AX
CALL AX CALL AX
oncurg: switch:
// save our state in g->sched. Pretend to // save our state in g->sched. Pretend to
// be switchtoM if the G stack is scanned. // be systemstack_switch if the G stack is scanned.
MOVQ $runtime·switchtoM(SB), BP MOVQ $runtime·systemstack_switch(SB), BP
MOVQ BP, (g_sched+gobuf_pc)(AX) MOVQ BP, (g_sched+gobuf_pc)(AX)
MOVQ SP, (g_sched+gobuf_sp)(AX) MOVQ SP, (g_sched+gobuf_sp)(AX)
MOVQ AX, (g_sched+gobuf_g)(AX) MOVQ AX, (g_sched+gobuf_g)(AX)
...@@ -246,7 +232,7 @@ oncurg: ...@@ -246,7 +232,7 @@ oncurg:
// switch to g0 // switch to g0
MOVQ DX, g(CX) MOVQ DX, g(CX)
MOVQ (g_sched+gobuf_sp)(DX), BX MOVQ (g_sched+gobuf_sp)(DX), BX
// make it look like mstart called onM on g0, to stop traceback // make it look like mstart called systemstack on g0, to stop traceback
SUBQ $8, BX SUBQ $8, BX
MOVQ $runtime·mstart(SB), DX MOVQ $runtime·mstart(SB), DX
MOVQ DX, 0(BX) MOVQ DX, 0(BX)
...@@ -267,7 +253,7 @@ oncurg: ...@@ -267,7 +253,7 @@ oncurg:
MOVQ $0, (g_sched+gobuf_sp)(AX) MOVQ $0, (g_sched+gobuf_sp)(AX)
RET RET
onm: noswitch:
// already on m stack, just call directly // already on m stack, just call directly
MOVQ DI, DX MOVQ DI, DX
MOVQ 0(DI), DI MOVQ 0(DI), DI
...@@ -727,7 +713,7 @@ needm: ...@@ -727,7 +713,7 @@ needm:
// the same SP back to m->sched.sp. That seems redundant, // the same SP back to m->sched.sp. That seems redundant,
// but if an unrecovered panic happens, unwindm will // but if an unrecovered panic happens, unwindm will
// restore the g->sched.sp from the stack location // restore the g->sched.sp from the stack location
// and then onM will try to use it. If we don't set it here, // and then systemstack will try to use it. If we don't set it here,
// that restored SP will be uninitialized (typically 0) and // that restored SP will be uninitialized (typically 0) and
// will not be usable. // will not be usable.
MOVQ m_g0(BP), SI MOVQ m_g0(BP), SI
......
...@@ -165,55 +165,42 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4 ...@@ -165,55 +165,42 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
JMP AX JMP AX
RET RET
// switchtoM is a dummy routine that onM leaves at the bottom // systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that // of the G stack. We need to distinguish the routine that
// lives at the bottom of the G stack from the one that lives // lives at the bottom of the G stack from the one that lives
// at the top of the M stack because the one at the top of // at the top of the system stack because the one at the top of
// the M stack terminates the stack walk (see topofstack()). // the M stack terminates the stack walk (see topofstack()).
TEXT runtime·switchtoM(SB), NOSPLIT, $0-0 TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
RET RET
// func onM_signalok(fn func()) // func systemstack(fn func())
TEXT runtime·onM_signalok(SB), NOSPLIT, $0-4 TEXT runtime·systemstack(SB), NOSPLIT, $0-4
MOVL fn+0(FP), DI // DI = fn
get_tls(CX) get_tls(CX)
MOVL g(CX), AX // AX = g MOVL g(CX), AX // AX = g
MOVL g_m(AX), BX // BX = m MOVL g_m(AX), BX // BX = m
MOVL m_gsignal(BX), DX // DX = gsignal MOVL m_gsignal(BX), DX // DX = gsignal
CMPL AX, DX CMPL AX, DX
JEQ ongsignal JEQ noswitch
JMP runtime·onM(SB)
ongsignal:
MOVL fn+0(FP), DI // DI = fn
MOVL DI, DX
MOVL 0(DI), DI
CALL DI
RET
// func onM(fn func())
TEXT runtime·onM(SB), NOSPLIT, $0-4
MOVL fn+0(FP), DI // DI = fn
get_tls(CX)
MOVL g(CX), AX // AX = g
MOVL g_m(AX), BX // BX = m
MOVL m_g0(BX), DX // DX = g0 MOVL m_g0(BX), DX // DX = g0
CMPL AX, DX CMPL AX, DX
JEQ onm JEQ noswitch
MOVL m_curg(BX), R8 MOVL m_curg(BX), R8
CMPL AX, R8 CMPL AX, R8
JEQ oncurg JEQ switch
// Not g0, not curg. Must be gsignal, but that's not allowed. // Not g0, not curg. Must be gsignal, but that's not allowed.
// Hide call from linker nosplit analysis. // Hide call from linker nosplit analysis.
MOVL $runtime·badonm(SB), AX MOVL $runtime·badsystemstack(SB), AX
CALL AX CALL AX
oncurg: switch:
// save our state in g->sched. Pretend to // save our state in g->sched. Pretend to
// be switchtoM if the G stack is scanned. // be systemstack_switch if the G stack is scanned.
MOVL $runtime·switchtoM(SB), SI MOVL $runtime·systemstack_switch(SB), SI
MOVL SI, (g_sched+gobuf_pc)(AX) MOVL SI, (g_sched+gobuf_pc)(AX)
MOVL SP, (g_sched+gobuf_sp)(AX) MOVL SP, (g_sched+gobuf_sp)(AX)
MOVL AX, (g_sched+gobuf_g)(AX) MOVL AX, (g_sched+gobuf_g)(AX)
...@@ -237,7 +224,7 @@ oncurg: ...@@ -237,7 +224,7 @@ oncurg:
MOVL $0, (g_sched+gobuf_sp)(AX) MOVL $0, (g_sched+gobuf_sp)(AX)
RET RET
onm: noswitch:
// already on m stack, just call directly // already on m stack, just call directly
MOVL DI, DX MOVL DI, DX
MOVL 0(DI), DI MOVL 0(DI), DI
......
...@@ -191,55 +191,42 @@ TEXT runtime·mcall(SB),NOSPLIT,$-4-4 ...@@ -191,55 +191,42 @@ TEXT runtime·mcall(SB),NOSPLIT,$-4-4
B runtime·badmcall2(SB) B runtime·badmcall2(SB)
RET RET
// switchtoM is a dummy routine that onM leaves at the bottom // systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that // of the G stack. We need to distinguish the routine that
// lives at the bottom of the G stack from the one that lives // lives at the bottom of the G stack from the one that lives
// at the top of the M stack because the one at the top of // at the top of the system stack because the one at the top of
// the M stack terminates the stack walk (see topofstack()). // the system stack terminates the stack walk (see topofstack()).
TEXT runtime·switchtoM(SB),NOSPLIT,$0-0 TEXT runtime·systemstack_switch(SB),NOSPLIT,$0-0
MOVW $0, R0 MOVW $0, R0
BL (R0) // clobber lr to ensure push {lr} is kept BL (R0) // clobber lr to ensure push {lr} is kept
RET RET
// func onM_signalok(fn func()) // func systemstack(fn func())
TEXT runtime·onM_signalok(SB), NOSPLIT, $4-4 TEXT runtime·systemstack(SB),NOSPLIT,$0-4
MOVW g_m(g), R1
MOVW m_gsignal(R1), R2
MOVW fn+0(FP), R0
CMP g, R2
B.EQ ongsignal
MOVW R0, 4(R13)
BL runtime·onM(SB)
RET
ongsignal:
MOVW R0, R7
MOVW 0(R0), R0
BL (R0)
RET
// func onM(fn func())
TEXT runtime·onM(SB),NOSPLIT,$0-4
MOVW fn+0(FP), R0 // R0 = fn MOVW fn+0(FP), R0 // R0 = fn
MOVW g_m(g), R1 // R1 = m MOVW g_m(g), R1 // R1 = m
MOVW m_gsignal(R1), R2 // R2 = gsignal
CMP g, R2
B.EQ noswitch
MOVW m_g0(R1), R2 // R2 = g0 MOVW m_g0(R1), R2 // R2 = g0
CMP g, R2 CMP g, R2
B.EQ onm B.EQ noswitch
MOVW m_curg(R1), R3 MOVW m_curg(R1), R3
CMP g, R3 CMP g, R3
B.EQ oncurg B.EQ switch
// Not g0, not curg. Must be gsignal, but that's not allowed. // Bad: g is not gsignal, not g0, not curg. What is it?
// Hide call from linker nosplit analysis. // Hide call from linker nosplit analysis.
MOVW $runtime·badonm(SB), R0 MOVW $runtime·badsystemstack(SB), R0
BL (R0) BL (R0)
oncurg: switch:
// save our state in g->sched. Pretend to // save our state in g->sched. Pretend to
// be switchtoM if the G stack is scanned. // be systemstack_switch if the G stack is scanned.
MOVW $runtime·switchtoM(SB), R3 MOVW $runtime·systemstack_switch(SB), R3
ADD $4, R3, R3 // get past push {lr} ADD $4, R3, R3 // get past push {lr}
MOVW R3, (g_sched+gobuf_pc)(g) MOVW R3, (g_sched+gobuf_pc)(g)
MOVW SP, (g_sched+gobuf_sp)(g) MOVW SP, (g_sched+gobuf_sp)(g)
...@@ -252,7 +239,7 @@ oncurg: ...@@ -252,7 +239,7 @@ oncurg:
BL setg<>(SB) BL setg<>(SB)
MOVW R5, R0 MOVW R5, R0
MOVW (g_sched+gobuf_sp)(R2), R3 MOVW (g_sched+gobuf_sp)(R2), R3
// make it look like mstart called onM on g0, to stop traceback // make it look like mstart called systemstack on g0, to stop traceback
SUB $4, R3, R3 SUB $4, R3, R3
MOVW $runtime·mstart(SB), R4 MOVW $runtime·mstart(SB), R4
MOVW R4, 0(R3) MOVW R4, 0(R3)
...@@ -272,7 +259,7 @@ oncurg: ...@@ -272,7 +259,7 @@ oncurg:
MOVW R3, (g_sched+gobuf_sp)(g) MOVW R3, (g_sched+gobuf_sp)(g)
RET RET
onm: noswitch:
MOVW R0, R7 MOVW R0, R7
MOVW 0(R0), R0 MOVW 0(R0), R0
BL (R0) BL (R0)
...@@ -567,7 +554,7 @@ TEXT ·cgocallback_gofunc(SB),NOSPLIT,$8-12 ...@@ -567,7 +554,7 @@ TEXT ·cgocallback_gofunc(SB),NOSPLIT,$8-12
// the same SP back to m->sched.sp. That seems redundant, // the same SP back to m->sched.sp. That seems redundant,
// but if an unrecovered panic happens, unwindm will // but if an unrecovered panic happens, unwindm will
// restore the g->sched.sp from the stack location // restore the g->sched.sp from the stack location
// and then onM will try to use it. If we don't set it here, // and then systemstack will try to use it. If we don't set it here,
// that restored SP will be uninitialized (typically 0) and // that restored SP will be uninitialized (typically 0) and
// will not be usable. // will not be usable.
MOVW g_m(g), R8 MOVW g_m(g), R8
......
...@@ -85,7 +85,7 @@ func atomicstore(addr *uint32, v uint32) { ...@@ -85,7 +85,7 @@ func atomicstore(addr *uint32, v uint32) {
//go:nosplit //go:nosplit
func cas64(addr *uint64, old, new uint64) bool { func cas64(addr *uint64, old, new uint64) bool {
var ok bool var ok bool
onM(func() { systemstack(func() {
lock(addrLock(addr)) lock(addrLock(addr))
if *addr == old { if *addr == old {
*addr = new *addr = new
...@@ -99,7 +99,7 @@ func cas64(addr *uint64, old, new uint64) bool { ...@@ -99,7 +99,7 @@ func cas64(addr *uint64, old, new uint64) bool {
//go:nosplit //go:nosplit
func xadd64(addr *uint64, delta int64) uint64 { func xadd64(addr *uint64, delta int64) uint64 {
var r uint64 var r uint64
onM(func() { systemstack(func() {
lock(addrLock(addr)) lock(addrLock(addr))
r = *addr + uint64(delta) r = *addr + uint64(delta)
*addr = r *addr = r
...@@ -111,7 +111,7 @@ func xadd64(addr *uint64, delta int64) uint64 { ...@@ -111,7 +111,7 @@ func xadd64(addr *uint64, delta int64) uint64 {
//go:nosplit //go:nosplit
func xchg64(addr *uint64, v uint64) uint64 { func xchg64(addr *uint64, v uint64) uint64 {
var r uint64 var r uint64
onM(func() { systemstack(func() {
lock(addrLock(addr)) lock(addrLock(addr))
r = *addr r = *addr
*addr = v *addr = v
...@@ -123,7 +123,7 @@ func xchg64(addr *uint64, v uint64) uint64 { ...@@ -123,7 +123,7 @@ func xchg64(addr *uint64, v uint64) uint64 {
//go:nosplit //go:nosplit
func atomicload64(addr *uint64) uint64 { func atomicload64(addr *uint64) uint64 {
var r uint64 var r uint64
onM(func() { systemstack(func() {
lock(addrLock(addr)) lock(addrLock(addr))
r = *addr r = *addr
unlock(addrLock(addr)) unlock(addrLock(addr))
...@@ -133,7 +133,7 @@ func atomicload64(addr *uint64) uint64 { ...@@ -133,7 +133,7 @@ func atomicload64(addr *uint64) uint64 {
//go:nosplit //go:nosplit
func atomicstore64(addr *uint64, v uint64) { func atomicstore64(addr *uint64, v uint64) {
onM(func() { systemstack(func() {
lock(addrLock(addr)) lock(addrLock(addr))
*addr = v *addr = v
unlock(addrLock(addr)) unlock(addrLock(addr))
......
...@@ -103,7 +103,7 @@ func cgocall_errno(fn, arg unsafe.Pointer) int32 { ...@@ -103,7 +103,7 @@ func cgocall_errno(fn, arg unsafe.Pointer) int32 {
// Create an extra M for callbacks on threads not created by Go on first cgo call. // Create an extra M for callbacks on threads not created by Go on first cgo call.
if needextram == 1 && cas(&needextram, 1, 0) { if needextram == 1 && cas(&needextram, 1, 0) {
onM(newextram) systemstack(newextram)
} }
/* /*
...@@ -195,7 +195,7 @@ func cgocallbackg1() { ...@@ -195,7 +195,7 @@ func cgocallbackg1() {
gp := getg() gp := getg()
if gp.m.needextram { if gp.m.needextram {
gp.m.needextram = false gp.m.needextram = false
onM(newextram) systemstack(newextram)
} }
// Add entry to defer stack in case of panic. // Add entry to defer stack in case of panic.
......
...@@ -102,9 +102,9 @@ var ( ...@@ -102,9 +102,9 @@ var (
) )
func setcpuprofilerate(hz int32) { func setcpuprofilerate(hz int32) {
g := getg() systemstack(func() {
g.m.scalararg[0] = uintptr(hz) setcpuprofilerate_m(hz)
onM(setcpuprofilerate_m) })
} }
// lostProfileData is a no-op function used in profiles // lostProfileData is a no-op function used in profiles
......
...@@ -25,14 +25,14 @@ func GOMAXPROCS(n int) int { ...@@ -25,14 +25,14 @@ func GOMAXPROCS(n int) int {
semacquire(&worldsema, false) semacquire(&worldsema, false)
gp := getg() gp := getg()
gp.m.gcing = 1 gp.m.gcing = 1
onM(stoptheworld) systemstack(stoptheworld)
// newprocs will be processed by starttheworld // newprocs will be processed by starttheworld
newprocs = int32(n) newprocs = int32(n)
gp.m.gcing = 0 gp.m.gcing = 0
semrelease(&worldsema) semrelease(&worldsema)
onM(starttheworld) systemstack(starttheworld)
return ret return ret
} }
......
...@@ -59,21 +59,21 @@ func parforiters_m() ...@@ -59,21 +59,21 @@ func parforiters_m()
func NewParFor(nthrmax uint32) *ParFor { func NewParFor(nthrmax uint32) *ParFor {
var desc *ParFor var desc *ParFor
onM(func() { systemstack(func() {
desc = (*ParFor)(unsafe.Pointer(parforalloc(nthrmax))) desc = (*ParFor)(unsafe.Pointer(parforalloc(nthrmax)))
}) })
return desc return desc
} }
func ParForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32)) { func ParForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32)) {
onM(func() { systemstack(func() {
parforsetup((*parfor)(unsafe.Pointer(desc)), nthr, n, unsafe.Pointer(ctx), wait, parforsetup((*parfor)(unsafe.Pointer(desc)), nthr, n, unsafe.Pointer(ctx), wait,
*(*func(*parfor, uint32))(unsafe.Pointer(&body))) *(*func(*parfor, uint32))(unsafe.Pointer(&body)))
}) })
} }
func ParForDo(desc *ParFor) { func ParForDo(desc *ParFor) {
onM(func() { systemstack(func() {
parfordo((*parfor)(unsafe.Pointer(desc))) parfordo((*parfor)(unsafe.Pointer(desc)))
}) })
} }
...@@ -87,7 +87,7 @@ func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) { ...@@ -87,7 +87,7 @@ func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
func GCMask(x interface{}) (ret []byte) { func GCMask(x interface{}) (ret []byte) {
e := (*eface)(unsafe.Pointer(&x)) e := (*eface)(unsafe.Pointer(&x))
s := (*slice)(unsafe.Pointer(&ret)) s := (*slice)(unsafe.Pointer(&ret))
onM(func() { systemstack(func() {
var len uintptr var len uintptr
getgcmask(e.data, e._type, &s.array, &len) getgcmask(e.data, e._type, &s.array, &len)
s.len = uint(len) s.len = uint(len)
...@@ -97,10 +97,10 @@ func GCMask(x interface{}) (ret []byte) { ...@@ -97,10 +97,10 @@ func GCMask(x interface{}) (ret []byte) {
} }
func RunSchedLocalQueueTest() { func RunSchedLocalQueueTest() {
onM(testSchedLocalQueue) systemstack(testSchedLocalQueue)
} }
func RunSchedLocalQueueStealTest() { func RunSchedLocalQueueStealTest() {
onM(testSchedLocalQueueSteal) systemstack(testSchedLocalQueueSteal)
} }
var HaveGoodHash = haveGoodHash var HaveGoodHash = haveGoodHash
...@@ -121,7 +121,7 @@ func GogoBytes() int32 { ...@@ -121,7 +121,7 @@ func GogoBytes() int32 {
// entry point for testing // entry point for testing
func GostringW(w []uint16) (s string) { func GostringW(w []uint16) (s string) {
onM(func() { systemstack(func() {
s = gostringw(&w[0]) s = gostringw(&w[0])
}) })
return return
......
...@@ -112,7 +112,8 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool) { ...@@ -112,7 +112,8 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
if xpc > f.entry && (g == nil || g.entry != funcPC(sigpanic)) { if xpc > f.entry && (g == nil || g.entry != funcPC(sigpanic)) {
xpc-- xpc--
} }
line = int(funcline(f, xpc, &file)) file, line32 := funcline(f, xpc)
line = int(line32)
ok = true ok = true
return return
} }
......
...@@ -602,8 +602,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, ...@@ -602,8 +602,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs,
if i > 0 && pc > f.entry { if i > 0 && pc > f.entry {
pc-- pc--
} }
var file string file, line := funcline(f, pc)
line := funcline(f, pc, &file)
dumpstr(file) dumpstr(file)
dumpint(uint64(line)) dumpint(uint64(line))
} }
...@@ -657,11 +656,8 @@ func mdump() { ...@@ -657,11 +656,8 @@ func mdump() {
flush() flush()
} }
func writeheapdump_m() { func writeheapdump_m(fd uintptr) {
_g_ := getg() _g_ := getg()
fd := _g_.m.scalararg[0]
_g_.m.scalararg[0] = 0
casgstatus(_g_.m.curg, _Grunning, _Gwaiting) casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
_g_.waitreason = "dumping heap" _g_.waitreason = "dumping heap"
......
...@@ -57,7 +57,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -57,7 +57,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
// This function must be atomic wrt GC, but for performance reasons // This function must be atomic wrt GC, but for performance reasons
// we don't acquirem/releasem on fast path. The code below does not have // we don't acquirem/releasem on fast path. The code below does not have
// split stack checks, so it can't be preempted by GC. // split stack checks, so it can't be preempted by GC.
// Functions like roundup/add are inlined. And onM/racemalloc are nosplit. // Functions like roundup/add are inlined. And systemstack/racemalloc are nosplit.
// If debugMalloc = true, these assumptions are checked below. // If debugMalloc = true, these assumptions are checked below.
if debugMalloc { if debugMalloc {
mp := acquirem() mp := acquirem()
...@@ -143,7 +143,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -143,7 +143,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = c.alloc[tinySizeClass] s = c.alloc[tinySizeClass]
v := s.freelist v := s.freelist
if v == nil { if v == nil {
onM(func() { systemstack(func() {
mCache_Refill(c, tinySizeClass) mCache_Refill(c, tinySizeClass)
}) })
s = c.alloc[tinySizeClass] s = c.alloc[tinySizeClass]
...@@ -173,7 +173,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -173,7 +173,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = c.alloc[sizeclass] s = c.alloc[sizeclass]
v := s.freelist v := s.freelist
if v == nil { if v == nil {
onM(func() { systemstack(func() {
mCache_Refill(c, int32(sizeclass)) mCache_Refill(c, int32(sizeclass))
}) })
s = c.alloc[sizeclass] s = c.alloc[sizeclass]
...@@ -193,7 +193,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -193,7 +193,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
c.local_cachealloc += intptr(size) c.local_cachealloc += intptr(size)
} else { } else {
var s *mspan var s *mspan
onM(func() { systemstack(func() {
s = largeAlloc(size, uint32(flags)) s = largeAlloc(size, uint32(flags))
}) })
x = unsafe.Pointer(uintptr(s.start << pageShift)) x = unsafe.Pointer(uintptr(s.start << pageShift))
...@@ -247,22 +247,17 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -247,22 +247,17 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
// into the GC bitmap. It's 7 times slower than copying // into the GC bitmap. It's 7 times slower than copying
// from the pre-unrolled mask, but saves 1/16 of type size // from the pre-unrolled mask, but saves 1/16 of type size
// memory for the mask. // memory for the mask.
mp := acquirem() systemstack(func() {
mp.ptrarg[0] = x unrollgcproginplace_m(x, typ, size, size0)
mp.ptrarg[1] = unsafe.Pointer(typ) })
mp.scalararg[0] = uintptr(size)
mp.scalararg[1] = uintptr(size0)
onM(unrollgcproginplace_m)
releasem(mp)
goto marked goto marked
} }
ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0]))) ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
// Check whether the program is already unrolled. // Check whether the program is already unrolled.
if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 { if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
mp := acquirem() systemstack(func() {
mp.ptrarg[0] = unsafe.Pointer(typ) unrollgcprog_m(typ)
onM(unrollgcprog_m) })
releasem(mp)
} }
ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
} else { } else {
...@@ -434,7 +429,7 @@ func gogc(force int32) { ...@@ -434,7 +429,7 @@ func gogc(force int32) {
mp = acquirem() mp = acquirem()
mp.gcing = 1 mp.gcing = 1
releasem(mp) releasem(mp)
onM(stoptheworld) systemstack(stoptheworld)
if mp != acquirem() { if mp != acquirem() {
gothrow("gogc: rescheduled") gothrow("gogc: rescheduled")
} }
...@@ -455,20 +450,16 @@ func gogc(force int32) { ...@@ -455,20 +450,16 @@ func gogc(force int32) {
startTime = nanotime() startTime = nanotime()
} }
// switch to g0, call gc, then switch back // switch to g0, call gc, then switch back
mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits eagersweep := force >= 2
mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits systemstack(func() {
if force >= 2 { gc_m(startTime, eagersweep)
mp.scalararg[2] = 1 // eagersweep })
} else {
mp.scalararg[2] = 0
}
onM(gc_m)
} }
// all done // all done
mp.gcing = 0 mp.gcing = 0
semrelease(&worldsema) semrelease(&worldsema)
onM(starttheworld) systemstack(starttheworld)
releasem(mp) releasem(mp)
mp = nil mp = nil
...@@ -580,8 +571,8 @@ func SetFinalizer(obj interface{}, finalizer interface{}) { ...@@ -580,8 +571,8 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
f := (*eface)(unsafe.Pointer(&finalizer)) f := (*eface)(unsafe.Pointer(&finalizer))
ftyp := f._type ftyp := f._type
if ftyp == nil { if ftyp == nil {
// switch to M stack and remove finalizer // switch to system stack and remove finalizer
onM(func() { systemstack(func() {
removefinalizer(e.data) removefinalizer(e.data)
}) })
return return
...@@ -628,7 +619,7 @@ okarg: ...@@ -628,7 +619,7 @@ okarg:
// make sure we have a finalizer goroutine // make sure we have a finalizer goroutine
createfing() createfing()
onM(func() { systemstack(func() {
if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) { if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
gothrow("runtime.SetFinalizer: finalizer already set") gothrow("runtime.SetFinalizer: finalizer already set")
} }
......
...@@ -35,7 +35,7 @@ func allocmcache() *mcache { ...@@ -35,7 +35,7 @@ func allocmcache() *mcache {
} }
func freemcache(c *mcache) { func freemcache(c *mcache) {
onM(func() { systemstack(func() {
mCache_ReleaseAll(c) mCache_ReleaseAll(c)
stackcache_clear(c) stackcache_clear(c)
gcworkbuffree(c.gcworkbuf) gcworkbuffree(c.gcworkbuf)
......
...@@ -82,15 +82,16 @@ func ReadMemStats(m *MemStats) { ...@@ -82,15 +82,16 @@ func ReadMemStats(m *MemStats) {
semacquire(&worldsema, false) semacquire(&worldsema, false)
gp := getg() gp := getg()
gp.m.gcing = 1 gp.m.gcing = 1
onM(stoptheworld) systemstack(stoptheworld)
gp.m.ptrarg[0] = noescape(unsafe.Pointer(m)) systemstack(func() {
onM(readmemstats_m) readmemstats_m(m)
})
gp.m.gcing = 0 gp.m.gcing = 0
gp.m.locks++ gp.m.locks++
semrelease(&worldsema) semrelease(&worldsema)
onM(starttheworld) systemstack(starttheworld)
gp.m.locks-- gp.m.locks--
} }
...@@ -99,14 +100,15 @@ func writeHeapDump(fd uintptr) { ...@@ -99,14 +100,15 @@ func writeHeapDump(fd uintptr) {
semacquire(&worldsema, false) semacquire(&worldsema, false)
gp := getg() gp := getg()
gp.m.gcing = 1 gp.m.gcing = 1
onM(stoptheworld) systemstack(stoptheworld)
gp.m.scalararg[0] = fd systemstack(func() {
onM(writeheapdump_m) writeheapdump_m(fd)
})
gp.m.gcing = 0 gp.m.gcing = 0
gp.m.locks++ gp.m.locks++
semrelease(&worldsema) semrelease(&worldsema)
onM(starttheworld) systemstack(starttheworld)
gp.m.locks-- gp.m.locks--
} }
...@@ -1058,7 +1058,7 @@ func sweepone() uintptr { ...@@ -1058,7 +1058,7 @@ func sweepone() uintptr {
func gosweepone() uintptr { func gosweepone() uintptr {
var ret uintptr var ret uintptr
onM(func() { systemstack(func() {
ret = sweepone() ret = sweepone()
}) })
return ret return ret
...@@ -1152,7 +1152,7 @@ func updatememstats(stats *gcstats) { ...@@ -1152,7 +1152,7 @@ func updatememstats(stats *gcstats) {
} }
// Flush MCache's to MCentral. // Flush MCache's to MCentral.
onM(flushallmcaches) systemstack(flushallmcaches)
// Aggregate local stats. // Aggregate local stats.
cachestats() cachestats()
...@@ -1193,13 +1193,6 @@ func updatememstats(stats *gcstats) { ...@@ -1193,13 +1193,6 @@ func updatememstats(stats *gcstats) {
memstats.heap_objects = memstats.nmalloc - memstats.nfree memstats.heap_objects = memstats.nmalloc - memstats.nfree
} }
// Structure of arguments passed to function gc().
// This allows the arguments to be passed via mcall.
type gc_args struct {
start_time int64 // start time of GC in ns (just before stoptheworld)
eagersweep bool
}
func gcinit() { func gcinit() {
if unsafe.Sizeof(workbuf{}) != _WorkbufSize { if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
gothrow("runtime: size of Workbuf is suboptimal") gothrow("runtime: size of Workbuf is suboptimal")
...@@ -1211,21 +1204,18 @@ func gcinit() { ...@@ -1211,21 +1204,18 @@ func gcinit() {
gcbssmask = unrollglobgcprog((*byte)(unsafe.Pointer(&gcbss)), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss))) gcbssmask = unrollglobgcprog((*byte)(unsafe.Pointer(&gcbss)), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)))
} }
func gc_m() { func gc_m(start_time int64, eagersweep bool) {
_g_ := getg() _g_ := getg()
gp := _g_.m.curg gp := _g_.m.curg
casgstatus(gp, _Grunning, _Gwaiting) casgstatus(gp, _Grunning, _Gwaiting)
gp.waitreason = "garbage collection" gp.waitreason = "garbage collection"
var a gc_args gc(start_time, eagersweep)
a.start_time = int64(_g_.m.scalararg[0]) | int64(uintptr(_g_.m.scalararg[1]))<<32
a.eagersweep = _g_.m.scalararg[2] != 0
gc(&a)
if nbadblock > 0 { if nbadblock > 0 {
// Work out path from root to bad block. // Work out path from root to bad block.
for { for {
gc(&a) gc(start_time, eagersweep)
if nbadblock >= int32(len(badblock)) { if nbadblock >= int32(len(badblock)) {
gothrow("cannot find path to bad pointer") gothrow("cannot find path to bad pointer")
} }
...@@ -1235,7 +1225,7 @@ func gc_m() { ...@@ -1235,7 +1225,7 @@ func gc_m() {
casgstatus(gp, _Gwaiting, _Grunning) casgstatus(gp, _Gwaiting, _Grunning)
} }
func gc(args *gc_args) { func gc(start_time int64, eagersweep bool) {
if _DebugGCPtrs { if _DebugGCPtrs {
print("GC start\n") print("GC start\n")
} }
...@@ -1246,8 +1236,8 @@ func gc(args *gc_args) { ...@@ -1246,8 +1236,8 @@ func gc(args *gc_args) {
_g_ := getg() _g_ := getg()
_g_.m.traceback = 2 _g_.m.traceback = 2
t0 := args.start_time t0 := start_time
work.tstart = args.start_time work.tstart = start_time
var t1 int64 var t1 int64
if debug.gctrace > 0 { if debug.gctrace > 0 {
...@@ -1367,7 +1357,7 @@ func gc(args *gc_args) { ...@@ -1367,7 +1357,7 @@ func gc(args *gc_args) {
sweep.spanidx = 0 sweep.spanidx = 0
unlock(&mheap_.lock) unlock(&mheap_.lock)
if _ConcurrentSweep && !args.eagersweep { if _ConcurrentSweep && !eagersweep {
lock(&gclock) lock(&gclock)
if !sweep.started { if !sweep.started {
go bgsweep() go bgsweep()
...@@ -1394,11 +1384,7 @@ func gc(args *gc_args) { ...@@ -1394,11 +1384,7 @@ func gc(args *gc_args) {
} }
} }
func readmemstats_m() { func readmemstats_m(stats *MemStats) {
_g_ := getg()
stats := (*mstats)(_g_.m.ptrarg[0])
_g_.m.ptrarg[0] = nil
updatememstats(nil) updatememstats(nil)
// Size of the trailing by_size array differs between Go and C, // Size of the trailing by_size array differs between Go and C,
...@@ -1406,14 +1392,14 @@ func readmemstats_m() { ...@@ -1406,14 +1392,14 @@ func readmemstats_m() {
memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats) memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
// Stack numbers are part of the heap numbers, separate those out for user consumption // Stack numbers are part of the heap numbers, separate those out for user consumption
stats.stacks_sys = stats.stacks_inuse stats.StackSys = stats.StackInuse
stats.heap_inuse -= stats.stacks_inuse stats.HeapInuse -= stats.StackInuse
stats.heap_sys -= stats.stacks_inuse stats.HeapSys -= stats.StackInuse
} }
//go:linkname readGCStats runtime/debug.readGCStats //go:linkname readGCStats runtime/debug.readGCStats
func readGCStats(pauses *[]uint64) { func readGCStats(pauses *[]uint64) {
onM(func() { systemstack(func() {
readGCStats_m(pauses) readGCStats_m(pauses)
}) })
} }
...@@ -1578,16 +1564,7 @@ func unrollglobgcprog(prog *byte, size uintptr) bitvector { ...@@ -1578,16 +1564,7 @@ func unrollglobgcprog(prog *byte, size uintptr) bitvector {
return bitvector{int32(masksize * 8), &mask[0]} return bitvector{int32(masksize * 8), &mask[0]}
} }
func unrollgcproginplace_m() { func unrollgcproginplace_m(v unsafe.Pointer, typ *_type, size, size0 uintptr) {
_g_ := getg()
v := _g_.m.ptrarg[0]
typ := (*_type)(_g_.m.ptrarg[1])
size := _g_.m.scalararg[0]
size0 := _g_.m.scalararg[1]
_g_.m.ptrarg[0] = nil
_g_.m.ptrarg[1] = nil
pos := uintptr(0) pos := uintptr(0)
prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1]))) prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
for pos != size0 { for pos != size0 {
...@@ -1613,12 +1590,7 @@ func unrollgcproginplace_m() { ...@@ -1613,12 +1590,7 @@ func unrollgcproginplace_m() {
var unroll mutex var unroll mutex
// Unrolls GC program in typ.gc[1] into typ.gc[0] // Unrolls GC program in typ.gc[1] into typ.gc[0]
func unrollgcprog_m() { func unrollgcprog_m(typ *_type) {
_g_ := getg()
typ := (*_type)(_g_.m.ptrarg[0])
_g_.m.ptrarg[0] = nil
lock(&unroll) lock(&unroll)
mask := (*byte)(unsafe.Pointer(uintptr(typ.gc[0]))) mask := (*byte)(unsafe.Pointer(uintptr(typ.gc[0])))
if *mask == 0 { if *mask == 0 {
......
...@@ -28,7 +28,7 @@ func gc_unixnanotime(now *int64) { ...@@ -28,7 +28,7 @@ func gc_unixnanotime(now *int64) {
func freeOSMemory() { func freeOSMemory() {
gogc(2) // force GC and do eager sweep gogc(2) // force GC and do eager sweep
onM(scavenge_m) systemstack(scavenge_m)
} }
var poolcleanup func() var poolcleanup func()
......
...@@ -174,7 +174,7 @@ func mHeap_Reclaim(h *mheap, npage uintptr) { ...@@ -174,7 +174,7 @@ func mHeap_Reclaim(h *mheap, npage uintptr) {
func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan { func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan {
_g_ := getg() _g_ := getg()
if _g_ != _g_.m.g0 { if _g_ != _g_.m.g0 {
gothrow("_mheap_alloc not on M stack") gothrow("_mheap_alloc not on g0 stack")
} }
lock(&h.lock) lock(&h.lock)
...@@ -226,7 +226,7 @@ func mHeap_Alloc(h *mheap, npage uintptr, sizeclass int32, large bool, needzero ...@@ -226,7 +226,7 @@ func mHeap_Alloc(h *mheap, npage uintptr, sizeclass int32, large bool, needzero
// It might trigger stack growth, and the stack growth code needs // It might trigger stack growth, and the stack growth code needs
// to be able to allocate heap. // to be able to allocate heap.
var s *mspan var s *mspan
onM(func() { systemstack(func() {
s = mHeap_Alloc_m(h, npage, sizeclass, large) s = mHeap_Alloc_m(h, npage, sizeclass, large)
}) })
...@@ -242,7 +242,7 @@ func mHeap_Alloc(h *mheap, npage uintptr, sizeclass int32, large bool, needzero ...@@ -242,7 +242,7 @@ func mHeap_Alloc(h *mheap, npage uintptr, sizeclass int32, large bool, needzero
func mHeap_AllocStack(h *mheap, npage uintptr) *mspan { func mHeap_AllocStack(h *mheap, npage uintptr) *mspan {
_g_ := getg() _g_ := getg()
if _g_ != _g_.m.g0 { if _g_ != _g_.m.g0 {
gothrow("mheap_allocstack not on M stack") gothrow("mheap_allocstack not on g0 stack")
} }
lock(&h.lock) lock(&h.lock)
s := mHeap_AllocSpanLocked(h, npage) s := mHeap_AllocSpanLocked(h, npage)
...@@ -428,7 +428,7 @@ func mHeap_LookupMaybe(h *mheap, v unsafe.Pointer) *mspan { ...@@ -428,7 +428,7 @@ func mHeap_LookupMaybe(h *mheap, v unsafe.Pointer) *mspan {
// Free the span back into the heap. // Free the span back into the heap.
func mHeap_Free(h *mheap, s *mspan, acct int32) { func mHeap_Free(h *mheap, s *mspan, acct int32) {
onM(func() { systemstack(func() {
mp := getg().m mp := getg().m
lock(&h.lock) lock(&h.lock)
memstats.heap_alloc += uint64(mp.mcache.local_cachealloc) memstats.heap_alloc += uint64(mp.mcache.local_cachealloc)
...@@ -447,7 +447,7 @@ func mHeap_Free(h *mheap, s *mspan, acct int32) { ...@@ -447,7 +447,7 @@ func mHeap_Free(h *mheap, s *mspan, acct int32) {
func mHeap_FreeStack(h *mheap, s *mspan) { func mHeap_FreeStack(h *mheap, s *mspan) {
_g_ := getg() _g_ := getg()
if _g_ != _g_.m.g0 { if _g_ != _g_.m.g0 {
gothrow("mheap_freestack not on M stack") gothrow("mheap_freestack not on g0 stack")
} }
s.needzero = 1 s.needzero = 1
lock(&h.lock) lock(&h.lock)
......
...@@ -244,7 +244,7 @@ func mProf_Malloc(p unsafe.Pointer, size uintptr) { ...@@ -244,7 +244,7 @@ func mProf_Malloc(p unsafe.Pointer, size uintptr) {
// This reduces potential contention and chances of deadlocks. // This reduces potential contention and chances of deadlocks.
// Since the object must be alive during call to mProf_Malloc, // Since the object must be alive during call to mProf_Malloc,
// it's fine to do this non-atomically. // it's fine to do this non-atomically.
onM(func() { systemstack(func() {
setprofilebucket(p, b) setprofilebucket(p, b)
}) })
} }
...@@ -523,7 +523,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { ...@@ -523,7 +523,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
gp := getg() gp := getg()
semacquire(&worldsema, false) semacquire(&worldsema, false)
gp.m.gcing = 1 gp.m.gcing = 1
onM(stoptheworld) systemstack(stoptheworld)
n = NumGoroutine() n = NumGoroutine()
if n <= len(p) { if n <= len(p) {
...@@ -531,7 +531,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { ...@@ -531,7 +531,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
r := p r := p
sp := getcallersp(unsafe.Pointer(&p)) sp := getcallersp(unsafe.Pointer(&p))
pc := getcallerpc(unsafe.Pointer(&p)) pc := getcallerpc(unsafe.Pointer(&p))
onM(func() { systemstack(func() {
saveg(pc, sp, gp, &r[0]) saveg(pc, sp, gp, &r[0])
}) })
r = r[1:] r = r[1:]
...@@ -546,7 +546,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { ...@@ -546,7 +546,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
gp.m.gcing = 0 gp.m.gcing = 0
semrelease(&worldsema) semrelease(&worldsema)
onM(starttheworld) systemstack(starttheworld)
} }
return n, ok return n, ok
...@@ -570,7 +570,7 @@ func Stack(buf []byte, all bool) int { ...@@ -570,7 +570,7 @@ func Stack(buf []byte, all bool) int {
semacquire(&worldsema, false) semacquire(&worldsema, false)
mp.gcing = 1 mp.gcing = 1
releasem(mp) releasem(mp)
onM(stoptheworld) systemstack(stoptheworld)
if mp != acquirem() { if mp != acquirem() {
gothrow("Stack: rescheduled") gothrow("Stack: rescheduled")
} }
...@@ -580,7 +580,7 @@ func Stack(buf []byte, all bool) int { ...@@ -580,7 +580,7 @@ func Stack(buf []byte, all bool) int {
if len(buf) > 0 { if len(buf) > 0 {
sp := getcallersp(unsafe.Pointer(&buf)) sp := getcallersp(unsafe.Pointer(&buf))
pc := getcallerpc(unsafe.Pointer(&buf)) pc := getcallerpc(unsafe.Pointer(&buf))
onM(func() { systemstack(func() {
g0 := getg() g0 := getg()
g0.writebuf = buf[0:0:len(buf)] g0.writebuf = buf[0:0:len(buf)]
goroutineheader(gp) goroutineheader(gp)
...@@ -596,7 +596,7 @@ func Stack(buf []byte, all bool) int { ...@@ -596,7 +596,7 @@ func Stack(buf []byte, all bool) int {
if all { if all {
mp.gcing = 0 mp.gcing = 0
semrelease(&worldsema) semrelease(&worldsema)
onM(starttheworld) systemstack(starttheworld)
} }
releasem(mp) releasem(mp)
return n return n
...@@ -619,7 +619,7 @@ func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) { ...@@ -619,7 +619,7 @@ func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
goroutineheader(gp) goroutineheader(gp)
pc := getcallerpc(unsafe.Pointer(&p)) pc := getcallerpc(unsafe.Pointer(&p))
sp := getcallersp(unsafe.Pointer(&p)) sp := getcallersp(unsafe.Pointer(&p))
onM(func() { systemstack(func() {
traceback(pc, sp, 0, gp) traceback(pc, sp, 0, gp)
}) })
} else { } else {
...@@ -639,7 +639,7 @@ func tracefree(p unsafe.Pointer, size uintptr) { ...@@ -639,7 +639,7 @@ func tracefree(p unsafe.Pointer, size uintptr) {
goroutineheader(gp) goroutineheader(gp)
pc := getcallerpc(unsafe.Pointer(&p)) pc := getcallerpc(unsafe.Pointer(&p))
sp := getcallersp(unsafe.Pointer(&p)) sp := getcallersp(unsafe.Pointer(&p))
onM(func() { systemstack(func() {
traceback(pc, sp, 0, gp) traceback(pc, sp, 0, gp)
}) })
print("\n") print("\n")
......
...@@ -72,7 +72,7 @@ type pollCache struct { ...@@ -72,7 +72,7 @@ type pollCache struct {
var pollcache pollCache var pollcache pollCache
func netpollServerInit() { func netpollServerInit() {
onM(netpollinit) systemstack(netpollinit)
} }
func netpollOpen(fd uintptr) (*pollDesc, int) { func netpollOpen(fd uintptr) (*pollDesc, int) {
...@@ -94,7 +94,7 @@ func netpollOpen(fd uintptr) (*pollDesc, int) { ...@@ -94,7 +94,7 @@ func netpollOpen(fd uintptr) (*pollDesc, int) {
unlock(&pd.lock) unlock(&pd.lock)
var errno int32 var errno int32
onM(func() { systemstack(func() {
errno = netpollopen(fd, pd) errno = netpollopen(fd, pd)
}) })
return pd, int(errno) return pd, int(errno)
...@@ -110,7 +110,7 @@ func netpollClose(pd *pollDesc) { ...@@ -110,7 +110,7 @@ func netpollClose(pd *pollDesc) {
if pd.rg != 0 && pd.rg != pdReady { if pd.rg != 0 && pd.rg != pdReady {
gothrow("netpollClose: blocked read on closing descriptor") gothrow("netpollClose: blocked read on closing descriptor")
} }
onM(func() { systemstack(func() {
netpollclose(uintptr(pd.fd)) netpollclose(uintptr(pd.fd))
}) })
pollcache.free(pd) pollcache.free(pd)
...@@ -143,7 +143,7 @@ func netpollWait(pd *pollDesc, mode int) int { ...@@ -143,7 +143,7 @@ func netpollWait(pd *pollDesc, mode int) int {
} }
// As for now only Solaris uses level-triggered IO. // As for now only Solaris uses level-triggered IO.
if GOOS == "solaris" { if GOOS == "solaris" {
onM(func() { systemstack(func() {
netpollarm(pd, mode) netpollarm(pd, mode)
}) })
} }
......
...@@ -24,7 +24,7 @@ func semawakeup(mp *m) { ...@@ -24,7 +24,7 @@ func semawakeup(mp *m) {
//go:nosplit //go:nosplit
func semacreate() uintptr { func semacreate() uintptr {
var x uintptr var x uintptr
onM(func() { systemstack(func() {
x = uintptr(mach_semcreate()) x = uintptr(mach_semcreate())
}) })
return x return x
...@@ -349,7 +349,7 @@ func semasleep1(ns int64) int32 { ...@@ -349,7 +349,7 @@ func semasleep1(ns int64) int32 {
//go:nosplit //go:nosplit
func semasleep(ns int64) int32 { func semasleep(ns int64) int32 {
var r int32 var r int32
onM(func() { systemstack(func() {
r = semasleep1(ns) r = semasleep1(ns)
}) })
return r return r
...@@ -368,9 +368,9 @@ func mach_semrelease(sem uint32) { ...@@ -368,9 +368,9 @@ func mach_semrelease(sem uint32) {
// mach_semrelease must be completely nosplit, // mach_semrelease must be completely nosplit,
// because it is called from Go code. // because it is called from Go code.
// If we're going to die, start that process on the m stack // If we're going to die, start that process on the system stack
// to avoid a Go stack split. // to avoid a Go stack split.
onM_signalok(func() { macherror(r, "semaphore_signal") }) systemstack(func() { macherror(r, "semaphore_signal") })
} }
} }
......
...@@ -32,7 +32,7 @@ func getncpu() int32 { ...@@ -32,7 +32,7 @@ func getncpu() int32 {
//go:nosplit //go:nosplit
func futexsleep(addr *uint32, val uint32, ns int64) { func futexsleep(addr *uint32, val uint32, ns int64) {
onM(func() { systemstack(func() {
futexsleep1(addr, val, ns) futexsleep1(addr, val, ns)
}) })
} }
...@@ -60,7 +60,7 @@ func futexwakeup(addr *uint32, cnt uint32) { ...@@ -60,7 +60,7 @@ func futexwakeup(addr *uint32, cnt uint32) {
return return
} }
onM(func() { systemstack(func() {
print("umtx_wake_addr=", addr, " ret=", ret, "\n") print("umtx_wake_addr=", addr, " ret=", ret, "\n")
}) })
} }
......
...@@ -58,7 +58,7 @@ func futexwakeup(addr *uint32, cnt uint32) { ...@@ -58,7 +58,7 @@ func futexwakeup(addr *uint32, cnt uint32) {
// I don't know that futex wakeup can return // I don't know that futex wakeup can return
// EAGAIN or EINTR, but if it does, it would be // EAGAIN or EINTR, but if it does, it would be
// safe to loop and call futex again. // safe to loop and call futex again.
onM_signalok(func() { systemstack(func() {
print("futexwakeup addr=", addr, " returned ", ret, "\n") print("futexwakeup addr=", addr, " returned ", ret, "\n")
}) })
......
...@@ -55,8 +55,8 @@ func throwinit() { ...@@ -55,8 +55,8 @@ func throwinit() {
//go:nosplit //go:nosplit
func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
if getg().m.curg != getg() { if getg().m.curg != getg() {
// go code on the m stack can't defer // go code on the system stack can't defer
gothrow("defer on m") gothrow("defer on system stack")
} }
// the arguments of fn are in a perilous state. The stack map // the arguments of fn are in a perilous state. The stack map
...@@ -71,7 +71,7 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn ...@@ -71,7 +71,7 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
} }
callerpc := getcallerpc(unsafe.Pointer(&siz)) callerpc := getcallerpc(unsafe.Pointer(&siz))
onM(func() { systemstack(func() {
d := newdefer(siz) d := newdefer(siz)
if d._panic != nil { if d._panic != nil {
gothrow("deferproc: d.panic != nil after newdefer") gothrow("deferproc: d.panic != nil after newdefer")
...@@ -322,7 +322,7 @@ func gopanic(e interface{}) { ...@@ -322,7 +322,7 @@ func gopanic(e interface{}) {
print("panic: ") print("panic: ")
printany(e) printany(e)
print("\n") print("\n")
gothrow("panic on m stack") gothrow("panic on system stack")
} }
// m.softfloat is set during software floating point. // m.softfloat is set during software floating point.
...@@ -470,17 +470,17 @@ func gorecover(argp uintptr) interface{} { ...@@ -470,17 +470,17 @@ func gorecover(argp uintptr) interface{} {
//go:nosplit //go:nosplit
func startpanic() { func startpanic() {
onM_signalok(startpanic_m) systemstack(startpanic_m)
} }
//go:nosplit //go:nosplit
func dopanic(unused int) { func dopanic(unused int) {
pc := getcallerpc(unsafe.Pointer(&unused))
sp := getcallersp(unsafe.Pointer(&unused))
gp := getg() gp := getg()
mp := acquirem() systemstack(func() {
mp.ptrarg[0] = unsafe.Pointer(gp) dopanic_m(gp, pc, sp) // should never return
mp.scalararg[0] = getcallerpc((unsafe.Pointer)(&unused)) })
mp.scalararg[1] = getcallersp((unsafe.Pointer)(&unused))
onM_signalok(dopanic_m) // should never return
*(*int)(nil) = 0 *(*int)(nil) = 0
} }
......
...@@ -94,20 +94,13 @@ func startpanic_m() { ...@@ -94,20 +94,13 @@ func startpanic_m() {
var didothers bool var didothers bool
var deadlock mutex var deadlock mutex
func dopanic_m() { func dopanic_m(gp *g, pc, sp uintptr) {
_g_ := getg()
gp := (*g)(_g_.m.ptrarg[0])
_g_.m.ptrarg[0] = nil
pc := uintptr(_g_.m.scalararg[0])
sp := uintptr(_g_.m.scalararg[1])
_g_.m.scalararg[1] = 0
if gp.sig != 0 { if gp.sig != 0 {
print("[signal ", hex(gp.sig), " code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") print("[signal ", hex(gp.sig), " code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
} }
var docrash bool var docrash bool
_g_ := getg()
if t := gotraceback(&docrash); t > 0 { if t := gotraceback(&docrash); t > 0 {
if gp != gp.m.g0 { if gp != gp.m.g0 {
print("\n") print("\n")
......
...@@ -27,7 +27,7 @@ func main() { ...@@ -27,7 +27,7 @@ func main() {
maxstacksize = 250000000 maxstacksize = 250000000
} }
onM(newsysmon) systemstack(newsysmon)
// Lock the main goroutine onto this, the main OS thread, // Lock the main goroutine onto this, the main OS thread,
// during initialization. Most programs won't care, but a few // during initialization. Most programs won't care, but a few
...@@ -151,7 +151,7 @@ func goparkunlock(lock *mutex, reason string) { ...@@ -151,7 +151,7 @@ func goparkunlock(lock *mutex, reason string) {
} }
func goready(gp *g) { func goready(gp *g) {
onM(func() { systemstack(func() {
ready(gp) ready(gp)
}) })
} }
......
...@@ -362,7 +362,7 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool { ...@@ -362,7 +362,7 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool {
//go:nosplit //go:nosplit
func casgstatus(gp *g, oldval, newval uint32) { func casgstatus(gp *g, oldval, newval uint32) {
if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
onM(func() { systemstack(func() {
print("casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") print("casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
gothrow("casgstatus: bad incoming values") gothrow("casgstatus: bad incoming values")
}) })
...@@ -374,7 +374,7 @@ func casgstatus(gp *g, oldval, newval uint32) { ...@@ -374,7 +374,7 @@ func casgstatus(gp *g, oldval, newval uint32) {
// Help GC if needed. // Help GC if needed.
if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
gp.preemptscan = false gp.preemptscan = false
onM(func() { systemstack(func() {
gcphasework(gp) gcphasework(gp)
}) })
} }
...@@ -1573,8 +1573,8 @@ func save(pc, sp uintptr) { ...@@ -1573,8 +1573,8 @@ func save(pc, sp uintptr) {
// because we do not know which of the uintptr arguments are // because we do not know which of the uintptr arguments are
// really pointers (back into the stack). // really pointers (back into the stack).
// In practice, this means that we make the fast path run through // In practice, this means that we make the fast path run through
// entersyscall doing no-split things, and the slow path has to use onM // entersyscall doing no-split things, and the slow path has to use systemstack
// to run bigger things on the m stack. // to run bigger things on the system stack.
// //
// reentersyscall is the entry point used by cgo callbacks, where explicitly // reentersyscall is the entry point used by cgo callbacks, where explicitly
// saved SP and PC are restored. This is needed when exitsyscall will be called // saved SP and PC are restored. This is needed when exitsyscall will be called
...@@ -1602,11 +1602,11 @@ func reentersyscall(pc, sp uintptr) { ...@@ -1602,11 +1602,11 @@ func reentersyscall(pc, sp uintptr) {
_g_.syscallpc = pc _g_.syscallpc = pc
casgstatus(_g_, _Grunning, _Gsyscall) casgstatus(_g_, _Grunning, _Gsyscall)
if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
onM(entersyscall_bad) systemstack(entersyscall_bad)
} }
if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic
onM(entersyscall_sysmon) systemstack(entersyscall_sysmon)
save(pc, sp) save(pc, sp)
} }
...@@ -1614,7 +1614,7 @@ func reentersyscall(pc, sp uintptr) { ...@@ -1614,7 +1614,7 @@ func reentersyscall(pc, sp uintptr) {
_g_.m.p.m = nil _g_.m.p.m = nil
atomicstore(&_g_.m.p.status, _Psyscall) atomicstore(&_g_.m.p.status, _Psyscall)
if sched.gcwaiting != 0 { if sched.gcwaiting != 0 {
onM(entersyscall_gcwait) systemstack(entersyscall_gcwait)
save(pc, sp) save(pc, sp)
} }
...@@ -1674,10 +1674,10 @@ func entersyscallblock(dummy int32) { ...@@ -1674,10 +1674,10 @@ func entersyscallblock(dummy int32) {
_g_.syscallpc = _g_.sched.pc _g_.syscallpc = _g_.sched.pc
casgstatus(_g_, _Grunning, _Gsyscall) casgstatus(_g_, _Grunning, _Gsyscall)
if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
onM(entersyscall_bad) systemstack(entersyscall_bad)
} }
onM(entersyscallblock_handoff) systemstack(entersyscallblock_handoff)
// Resave for traceback during blocked call. // Resave for traceback during blocked call.
save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
...@@ -1768,18 +1768,18 @@ func exitsyscallfast() bool { ...@@ -1768,18 +1768,18 @@ func exitsyscallfast() bool {
// Try to get any other idle P. // Try to get any other idle P.
_g_.m.p = nil _g_.m.p = nil
if sched.pidle != nil { if sched.pidle != nil {
onM(exitsyscallfast_pidle) var ok bool
if _g_.m.scalararg[0] != 0 { systemstack(func() {
_g_.m.scalararg[0] = 0 ok = exitsyscallfast_pidle()
})
if ok {
return true return true
} }
} }
return false return false
} }
func exitsyscallfast_pidle() { func exitsyscallfast_pidle() bool {
_g_ := getg()
lock(&sched.lock) lock(&sched.lock)
_p_ := pidleget() _p_ := pidleget()
if _p_ != nil && atomicload(&sched.sysmonwait) != 0 { if _p_ != nil && atomicload(&sched.sysmonwait) != 0 {
...@@ -1789,10 +1789,9 @@ func exitsyscallfast_pidle() { ...@@ -1789,10 +1789,9 @@ func exitsyscallfast_pidle() {
unlock(&sched.lock) unlock(&sched.lock)
if _p_ != nil { if _p_ != nil {
acquirep(_p_) acquirep(_p_)
_g_.m.scalararg[0] = 1 return true
} else {
_g_.m.scalararg[0] = 0
} }
return false
} }
// exitsyscall slow path on g0. // exitsyscall slow path on g0.
...@@ -1844,7 +1843,7 @@ func beforefork() { ...@@ -1844,7 +1843,7 @@ func beforefork() {
// Called from syscall package before fork. // Called from syscall package before fork.
//go:nosplit //go:nosplit
func syscall_BeforeFork() { func syscall_BeforeFork() {
onM(beforefork) systemstack(beforefork)
} }
func afterfork() { func afterfork() {
...@@ -1863,7 +1862,7 @@ func afterfork() { ...@@ -1863,7 +1862,7 @@ func afterfork() {
// Called from syscall package after fork in parent. // Called from syscall package after fork in parent.
//go:nosplit //go:nosplit
func syscall_AfterFork() { func syscall_AfterFork() {
onM(afterfork) systemstack(afterfork)
} }
// Allocate a new g, with a stack big enough for stacksize bytes. // Allocate a new g, with a stack big enough for stacksize bytes.
...@@ -1871,7 +1870,7 @@ func malg(stacksize int32) *g { ...@@ -1871,7 +1870,7 @@ func malg(stacksize int32) *g {
newg := allocg() newg := allocg()
if stacksize >= 0 { if stacksize >= 0 {
stacksize = round2(_StackSystem + stacksize) stacksize = round2(_StackSystem + stacksize)
onM(func() { systemstack(func() {
newg.stack = stackalloc(uint32(stacksize)) newg.stack = stackalloc(uint32(stacksize))
}) })
newg.stackguard0 = newg.stack.lo + _StackGuard newg.stackguard0 = newg.stack.lo + _StackGuard
...@@ -1894,7 +1893,7 @@ func newproc(siz int32, fn *funcval) { ...@@ -1894,7 +1893,7 @@ func newproc(siz int32, fn *funcval) {
} }
pc := getcallerpc(unsafe.Pointer(&siz)) pc := getcallerpc(unsafe.Pointer(&siz))
onM(func() { systemstack(func() {
newproc1(fn, (*uint8)(argp), siz, 0, pc) newproc1(fn, (*uint8)(argp), siz, 0, pc)
}) })
} }
...@@ -2037,7 +2036,7 @@ retry: ...@@ -2037,7 +2036,7 @@ retry:
_p_.gfreecnt-- _p_.gfreecnt--
if gp.stack.lo == 0 { if gp.stack.lo == 0 {
// Stack was deallocated in gfput. Allocate a new one. // Stack was deallocated in gfput. Allocate a new one.
onM(func() { systemstack(func() {
gp.stack = stackalloc(_FixedStack) gp.stack = stackalloc(_FixedStack)
}) })
gp.stackguard0 = gp.stack.lo + _StackGuard gp.stackguard0 = gp.stack.lo + _StackGuard
...@@ -2121,7 +2120,7 @@ func UnlockOSThread() { ...@@ -2121,7 +2120,7 @@ func UnlockOSThread() {
func unlockOSThread() { func unlockOSThread() {
_g_ := getg() _g_ := getg()
if _g_.m.locked < _LockInternal { if _g_.m.locked < _LockInternal {
onM(badunlockosthread) systemstack(badunlockosthread)
} }
_g_.m.locked -= _LockInternal _g_.m.locked -= _LockInternal
dounlockOSThread() dounlockOSThread()
...@@ -2307,12 +2306,7 @@ func sigprof(pc *uint8, sp *uint8, lr *uint8, gp *g, mp *m) { ...@@ -2307,12 +2306,7 @@ func sigprof(pc *uint8, sp *uint8, lr *uint8, gp *g, mp *m) {
} }
// Arrange to call fn with a traceback hz times a second. // Arrange to call fn with a traceback hz times a second.
func setcpuprofilerate_m() { func setcpuprofilerate_m(hz int32) {
_g_ := getg()
hz := int32(_g_.m.scalararg[0])
_g_.m.scalararg[0] = 0
// Force sane arguments. // Force sane arguments.
if hz < 0 { if hz < 0 {
hz = 0 hz = 0
...@@ -2320,6 +2314,7 @@ func setcpuprofilerate_m() { ...@@ -2320,6 +2314,7 @@ func setcpuprofilerate_m() {
// Disable preemption, otherwise we can be rescheduled to another thread // Disable preemption, otherwise we can be rescheduled to another thread
// that has profiling enabled. // that has profiling enabled.
_g_ := getg()
_g_.m.locks++ _g_.m.locks++
// Stop profiler on this thread so that it is safe to lock prof. // Stop profiler on this thread so that it is safe to lock prof.
......
...@@ -80,8 +80,8 @@ func racesymbolize(ctx *symbolizeContext) { ...@@ -80,8 +80,8 @@ func racesymbolize(ctx *symbolizeContext) {
} }
ctx.fn = funcname(f) ctx.fn = funcname(f)
var file string file, line := funcline(f, ctx.pc)
ctx.line = uintptr(funcline(f, ctx.pc, &file)) ctx.line = uintptr(line)
ctx.file = &bytes(file)[0] // assume NUL-terminated ctx.file = &bytes(file)[0] // assume NUL-terminated
ctx.off = ctx.pc - f.entry ctx.off = ctx.pc - f.entry
ctx.res = 1 ctx.res = 1
......
...@@ -245,8 +245,6 @@ type m struct { ...@@ -245,8 +245,6 @@ type m struct {
traceback uint8 traceback uint8
waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
waitlock unsafe.Pointer waitlock unsafe.Pointer
scalararg [4]uintptr // scalar argument/return for mcall
ptrarg [4]unsafe.Pointer // pointer argument/return for mcall
//#ifdef GOOS_windows //#ifdef GOOS_windows
thread uintptr // thread handle thread uintptr // thread handle
// these are here because they are too large to be on the stack // these are here because they are too large to be on the stack
......
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
func sigenable_m() {
_g_ := getg()
sigenable(uint32(_g_.m.scalararg[0]))
}
func sigdisable_m() {
_g_ := getg()
sigdisable(uint32(_g_.m.scalararg[0]))
}
...@@ -7,5 +7,5 @@ ...@@ -7,5 +7,5 @@
package runtime package runtime
func os_sigpipe() { func os_sigpipe() {
onM(sigpipe) systemstack(sigpipe)
} }
...@@ -139,7 +139,7 @@ func signal_enable(s uint32) { ...@@ -139,7 +139,7 @@ func signal_enable(s uint32) {
return return
} }
sig.wanted[s/32] |= 1 << (s & 31) sig.wanted[s/32] |= 1 << (s & 31)
sigenable_go(s) sigenable(s)
} }
// Must only be called from a single goroutine at a time. // Must only be called from a single goroutine at a time.
...@@ -148,7 +148,7 @@ func signal_disable(s uint32) { ...@@ -148,7 +148,7 @@ func signal_disable(s uint32) {
return return
} }
sig.wanted[s/32] &^= 1 << (s & 31) sig.wanted[s/32] &^= 1 << (s & 31)
sigdisable_go(s) sigdisable(s)
} }
// This runs on a foreign stack, without an m or a g. No stack split. // This runs on a foreign stack, without an m or a g. No stack split.
...@@ -156,15 +156,3 @@ func signal_disable(s uint32) { ...@@ -156,15 +156,3 @@ func signal_disable(s uint32) {
func badsignal(sig uintptr) { func badsignal(sig uintptr) {
cgocallback(unsafe.Pointer(funcPC(sigsend)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig)) cgocallback(unsafe.Pointer(funcPC(sigsend)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
} }
func sigenable_go(s uint32) {
g := getg()
g.m.scalararg[0] = uintptr(s)
onM(sigenable_m)
}
func sigdisable_go(s uint32) {
g := getg()
g.m.scalararg[0] = uintptr(s)
onM(sigdisable_m)
}
...@@ -606,7 +606,7 @@ done: ...@@ -606,7 +606,7 @@ done:
//go:nosplit //go:nosplit
func _sfloat2(pc uint32, regs *[15]uint32) { func _sfloat2(pc uint32, regs *[15]uint32) {
onM(func() { systemstack(func() {
pc = sfloat2(pc, regs) pc = sfloat2(pc, regs)
}) })
} }
......
...@@ -418,8 +418,8 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { ...@@ -418,8 +418,8 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
if stackDebug >= 2 { if stackDebug >= 2 {
print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
} }
if f.entry == switchtoMPC { if f.entry == systemstack_switchPC {
// A special routine at the bottom of stack of a goroutine that does an onM call. // A special routine at the bottom of stack of a goroutine that does an systemstack call.
// We will allow it to be copied even though we don't // We will allow it to be copied even though we don't
// have full GC info for it (because it is written in asm). // have full GC info for it (because it is written in asm).
return true return true
...@@ -801,7 +801,7 @@ func shrinkfinish() { ...@@ -801,7 +801,7 @@ func shrinkfinish() {
//go:nosplit //go:nosplit
func morestackc() { func morestackc() {
onM(func() { systemstack(func() {
gothrow("attempt to execute C code on Go stack") gothrow("attempt to execute C code on Go stack")
}) })
} }
...@@ -38,56 +38,28 @@ func getg() *g ...@@ -38,56 +38,28 @@ func getg() *g
//go:noescape //go:noescape
func mcall(fn func(*g)) func mcall(fn func(*g))
// onM switches from the g to the g0 stack and invokes fn(). // systemstack runs fn on a system stack.
// When fn returns, onM switches back to the g and returns, // If systemstack is called from the per-OS-thread (g0) stack, or
// continuing execution on the g stack. // if systemstack is called from the signal handling (gsignal) stack,
// If arguments must be passed to fn, they can be written to // systemstack calls fn directly and returns.
// g->m->ptrarg (pointers) and g->m->scalararg (non-pointers) // Otherwise, systemstack is being called from the limited stack
// before the call and then consulted during fn. // of an ordinary goroutine. In this case, systemstack switches
// Similarly, fn can pass return values back in those locations. // to the per-OS-thread stack, calls fn, and switches back.
// If fn is written in Go, it can be a closure, which avoids the need for // It is common to use a func literal as the argument, in order
// ptrarg and scalararg entirely. // to share inputs and outputs with the code around the call
// After reading values out of ptrarg and scalararg it is conventional // to system stack:
// to zero them to avoid (memory or information) leaks.
// //
// If onM is called from a g0 stack, it invokes fn and returns, // ... set up y ...
// without any stack switches. // systemstack(func() {
// // x = bigcall(y)
// If onM is called from a gsignal stack, it crashes the program. // })
// The implication is that functions used in signal handlers must // ... use x ...
// not use onM.
//
// NOTE(rsc): We could introduce a separate onMsignal that is
// like onM but if called from a gsignal stack would just run fn on
// that stack. The caller of onMsignal would be required to save the
// old values of ptrarg/scalararg and restore them when the call
// was finished, in case the signal interrupted an onM sequence
// in progress on the g or g0 stacks. Until there is a clear need for this,
// we just reject onM in signal handling contexts entirely.
//
//go:noescape
func onM(fn func())
// onMsignal is like onM but is allowed to be used in code that
// might run on the gsignal stack. Code running on a signal stack
// may be interrupting an onM sequence on the main stack, so
// if the onMsignal calling sequence writes to ptrarg/scalararg,
// it must first save the old values and then restore them when
// finished. As an exception to the rule, it is fine not to save and
// restore the values if the program is trying to crash rather than
// return from the signal handler.
// Once all the runtime is written in Go, there will be no ptrarg/scalararg
// and the distinction between onM and onMsignal (and perhaps mcall)
// can go away.
//
// If onMsignal is called from a gsignal stack, it invokes fn directly,
// without a stack switch. Otherwise onMsignal behaves like onM.
// //
//go:noescape //go:noescape
func onM_signalok(fn func()) func systemstack(fn func())
func badonm() { func badsystemstack() {
gothrow("onM called from signal goroutine") gothrow("systemstack called from unexpected goroutine")
} }
// memclr clears n bytes starting at ptr. // memclr clears n bytes starting at ptr.
...@@ -272,4 +244,4 @@ func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32) ...@@ -272,4 +244,4 @@ func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32)
func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32) func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32)
func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32) func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32)
func switchtoM() func systemstack_switch()
...@@ -121,8 +121,8 @@ func (f *Func) Entry() uintptr { ...@@ -121,8 +121,8 @@ func (f *Func) Entry() uintptr {
func (f *Func) FileLine(pc uintptr) (file string, line int) { func (f *Func) FileLine(pc uintptr) (file string, line int) {
// Pass strict=false here, because anyone can call this function, // Pass strict=false here, because anyone can call this function,
// and they might just be wrong about targetpc belonging to f. // and they might just be wrong about targetpc belonging to f.
line = int(funcline1(f.raw(), pc, &file, false)) file, line32 := funcline1(f.raw(), pc, false)
return file, line return file, int(line32)
} }
func findfunc(pc uintptr) *_func { func findfunc(pc uintptr) *_func {
...@@ -207,20 +207,19 @@ func gofuncname(f *_func) string { ...@@ -207,20 +207,19 @@ func gofuncname(f *_func) string {
return gostringnocopy(funcname(f)) return gostringnocopy(funcname(f))
} }
func funcline1(f *_func, targetpc uintptr, file *string, strict bool) int32 { func funcline1(f *_func, targetpc uintptr, strict bool) (file string, line int32) {
*file = "?"
fileno := int(pcvalue(f, f.pcfile, targetpc, strict)) fileno := int(pcvalue(f, f.pcfile, targetpc, strict))
line := pcvalue(f, f.pcln, targetpc, strict) line = pcvalue(f, f.pcln, targetpc, strict)
if fileno == -1 || line == -1 || fileno >= len(filetab) { if fileno == -1 || line == -1 || fileno >= len(filetab) {
// print("looking for ", hex(targetpc), " in ", gofuncname(f), " got file=", fileno, " line=", lineno, "\n") // print("looking for ", hex(targetpc), " in ", gofuncname(f), " got file=", fileno, " line=", lineno, "\n")
return 0 return "?", 0
} }
*file = gostringnocopy(&pclntable[filetab[fileno]]) file = gostringnocopy(&pclntable[filetab[fileno]])
return line return
} }
func funcline(f *_func, targetpc uintptr, file *string) int32 { func funcline(f *_func, targetpc uintptr) (file string, line int32) {
return funcline1(f, targetpc, file, true) return funcline1(f, targetpc, true)
} }
func funcspdelta(f *_func, targetpc uintptr) int32 { func funcspdelta(f *_func, targetpc uintptr) int32 {
......
...@@ -32,16 +32,16 @@ const usesLR = GOARCH != "amd64" && GOARCH != "amd64p32" && GOARCH != "386" ...@@ -32,16 +32,16 @@ const usesLR = GOARCH != "amd64" && GOARCH != "amd64p32" && GOARCH != "386"
var ( var (
// initialized in tracebackinit // initialized in tracebackinit
deferprocPC uintptr deferprocPC uintptr
goexitPC uintptr goexitPC uintptr
jmpdeferPC uintptr jmpdeferPC uintptr
mcallPC uintptr mcallPC uintptr
morestackPC uintptr morestackPC uintptr
mstartPC uintptr mstartPC uintptr
newprocPC uintptr newprocPC uintptr
rt0_goPC uintptr rt0_goPC uintptr
sigpanicPC uintptr sigpanicPC uintptr
switchtoMPC uintptr systemstack_switchPC uintptr
externalthreadhandlerp uintptr // initialized elsewhere externalthreadhandlerp uintptr // initialized elsewhere
) )
...@@ -60,7 +60,7 @@ func tracebackinit() { ...@@ -60,7 +60,7 @@ func tracebackinit() {
newprocPC = funcPC(newproc) newprocPC = funcPC(newproc)
rt0_goPC = funcPC(rt0_go) rt0_goPC = funcPC(rt0_go)
sigpanicPC = funcPC(sigpanic) sigpanicPC = funcPC(sigpanic)
switchtoMPC = funcPC(switchtoM) systemstack_switchPC = funcPC(systemstack_switch)
} }
// Traceback over the deferred function calls. // Traceback over the deferred function calls.
...@@ -337,8 +337,7 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf ...@@ -337,8 +337,7 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
print(hex(argp[i])) print(hex(argp[i]))
} }
print(")\n") print(")\n")
var file string file, line := funcline(f, tracepc)
line := funcline(f, tracepc, &file)
print("\t", file, ":", line) print("\t", file, ":", line)
if frame.pc > f.entry { if frame.pc > f.entry {
print(" +", hex(frame.pc-f.entry)) print(" +", hex(frame.pc-f.entry))
...@@ -482,8 +481,7 @@ func printcreatedby(gp *g) { ...@@ -482,8 +481,7 @@ func printcreatedby(gp *g) {
if pc > f.entry { if pc > f.entry {
tracepc -= _PCQuantum tracepc -= _PCQuantum
} }
var file string file, line := funcline(f, tracepc)
line := funcline(f, tracepc, &file)
print("\t", file, ":", line) print("\t", file, ":", line)
if pc > f.entry { if pc > f.entry {
print(" +", hex(pc-f.entry)) print(" +", hex(pc-f.entry))
...@@ -530,7 +528,7 @@ func callers(skip int, pcbuf *uintptr, m int) int { ...@@ -530,7 +528,7 @@ func callers(skip int, pcbuf *uintptr, m int) int {
sp := getcallersp(unsafe.Pointer(&skip)) sp := getcallersp(unsafe.Pointer(&skip))
pc := uintptr(getcallerpc(unsafe.Pointer(&skip))) pc := uintptr(getcallerpc(unsafe.Pointer(&skip)))
var n int var n int
onM(func() { systemstack(func() {
n = gentraceback(pc, sp, 0, getg(), skip, pcbuf, m, nil, nil, 0) n = gentraceback(pc, sp, 0, getg(), skip, pcbuf, m, nil, nil, 0)
}) })
return n return n
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment