Commit 012ceed9 authored by Russ Cox's avatar Russ Cox

runtime: make onM and mcall take Go func values

This gives them correct types in Go and also makes it
possible to use them to run Go code on an m stack.

LGTM=iant
R=golang-codereviews, dave, iant
CC=dvyukov, golang-codereviews, khr, r
https://golang.org/cl/137970044
parent 7ba41e99
...@@ -162,7 +162,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-4 ...@@ -162,7 +162,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-4
MOVL gobuf_pc(BX), BX MOVL gobuf_pc(BX), BX
JMP BX JMP BX
// void mcall(void (*fn)(G*)) // func mcall(fn func(*g))
// Switch to m->g0's stack, call fn(g). // Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched) // Fn must never return. It should gogo(&g->sched)
// to keep running g. // to keep running g.
...@@ -188,6 +188,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4 ...@@ -188,6 +188,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
MOVL SI, g(CX) // g = m->g0 MOVL SI, g(CX) // g = m->g0
MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
PUSHL AX PUSHL AX
MOVL DI, DX
MOVL 0(DI), DI
CALL DI CALL DI
POPL AX POPL AX
MOVL $runtime·badmcall2(SB), AX MOVL $runtime·badmcall2(SB), AX
...@@ -202,7 +204,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4 ...@@ -202,7 +204,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
TEXT runtime·switchtoM(SB), NOSPLIT, $0-4 TEXT runtime·switchtoM(SB), NOSPLIT, $0-4
RET RET
// void onM(void (*fn)()) // func onM(fn func())
// calls fn() on the M stack. // calls fn() on the M stack.
// switches to the M stack if not already on it, and // switches to the M stack if not already on it, and
// switches back when fn() returns. // switches back when fn() returns.
...@@ -227,6 +229,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4 ...@@ -227,6 +229,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
// call target function // call target function
ARGSIZE(0) ARGSIZE(0)
MOVL DI, DX
MOVL 0(DI), DI
CALL DI CALL DI
// switch back to g // switch back to g
...@@ -241,6 +245,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4 ...@@ -241,6 +245,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
onm: onm:
// already on m stack, just call directly // already on m stack, just call directly
MOVL DI, DX
MOVL 0(DI), DI
CALL DI CALL DI
RET RET
......
...@@ -153,7 +153,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-8 ...@@ -153,7 +153,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-8
MOVQ gobuf_pc(BX), BX MOVQ gobuf_pc(BX), BX
JMP BX JMP BX
// void mcall(void (*fn)(G*)) // func mcall(fn func(*g))
// Switch to m->g0's stack, call fn(g). // Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched) // Fn must never return. It should gogo(&g->sched)
// to keep running g. // to keep running g.
...@@ -180,6 +180,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8 ...@@ -180,6 +180,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8
MOVQ (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp MOVQ (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
PUSHQ AX PUSHQ AX
ARGSIZE(8) ARGSIZE(8)
MOVQ DI, DX
MOVQ 0(DI), DI
CALL DI CALL DI
POPQ AX POPQ AX
MOVQ $runtime·badmcall2(SB), AX MOVQ $runtime·badmcall2(SB), AX
...@@ -194,7 +196,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8 ...@@ -194,7 +196,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8
TEXT runtime·switchtoM(SB), NOSPLIT, $0-8 TEXT runtime·switchtoM(SB), NOSPLIT, $0-8
RET RET
// void onM(void (*fn)()) // func onM(fn func())
// calls fn() on the M stack. // calls fn() on the M stack.
// switches to the M stack if not already on it, and // switches to the M stack if not already on it, and
// switches back when fn() returns. // switches back when fn() returns.
...@@ -220,6 +222,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-8 ...@@ -220,6 +222,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-8
// call target function // call target function
ARGSIZE(0) ARGSIZE(0)
MOVQ DI, DX
MOVQ 0(DI), DI
CALL DI CALL DI
// switch back to g // switch back to g
...@@ -234,6 +238,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-8 ...@@ -234,6 +238,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-8
onm: onm:
// already on m stack, just call directly // already on m stack, just call directly
MOVQ DI, DX
MOVQ 0(DI), DI
CALL DI CALL DI
RET RET
......
...@@ -131,7 +131,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-4 ...@@ -131,7 +131,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-4
MOVL gobuf_pc(BX), BX MOVL gobuf_pc(BX), BX
JMP BX JMP BX
// void mcall(void (*fn)(G*)) // func mcall(fn func(*g))
// Switch to m->g0's stack, call fn(g). // Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched) // Fn must never return. It should gogo(&g->sched)
// to keep running g. // to keep running g.
...@@ -158,6 +158,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4 ...@@ -158,6 +158,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
PUSHQ AX PUSHQ AX
ARGSIZE(8) ARGSIZE(8)
MOVL DI, DX
MOVL 0(DI), DI
CALL DI CALL DI
POPQ AX POPQ AX
MOVL $runtime·badmcall2(SB), AX MOVL $runtime·badmcall2(SB), AX
...@@ -172,7 +174,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4 ...@@ -172,7 +174,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
TEXT runtime·switchtoM(SB), NOSPLIT, $0-4 TEXT runtime·switchtoM(SB), NOSPLIT, $0-4
RET RET
// void onM(void (*fn)()) // func onM(fn func())
// calls fn() on the M stack. // calls fn() on the M stack.
// switches to the M stack if not already on it, and // switches to the M stack if not already on it, and
// switches back when fn() returns. // switches back when fn() returns.
...@@ -198,6 +200,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4 ...@@ -198,6 +200,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
// call target function // call target function
ARGSIZE(0) ARGSIZE(0)
MOVL DI, DX
MOVL 0(DI), DI
CALL DI CALL DI
// switch back to g // switch back to g
...@@ -212,6 +216,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4 ...@@ -212,6 +216,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
onm: onm:
// already on m stack, just call directly // already on m stack, just call directly
MOVL DI, DX
MOVL 0(DI), DI
CALL DI CALL DI
RET RET
......
...@@ -147,7 +147,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $-4-4 ...@@ -147,7 +147,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $-4-4
MOVW gobuf_pc(R1), R11 MOVW gobuf_pc(R1), R11
B (R11) B (R11)
// void mcall(void (*fn)(G*)) // func mcall(fn func(*g))
// Switch to m->g0's stack, call fn(g). // Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched) // Fn must never return. It should gogo(&g->sched)
// to keep running g. // to keep running g.
...@@ -173,6 +173,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $-4-4 ...@@ -173,6 +173,8 @@ TEXT runtime·mcall(SB), NOSPLIT, $-4-4
MOVW (g_sched+gobuf_sp)(g), SP MOVW (g_sched+gobuf_sp)(g), SP
SUB $8, SP SUB $8, SP
MOVW R1, 4(SP) MOVW R1, 4(SP)
MOVW R0, R7
MOVW 0(R0), R0
BL (R0) BL (R0)
B runtime·badmcall2(SB) B runtime·badmcall2(SB)
RET RET
...@@ -187,7 +189,7 @@ TEXT runtime·switchtoM(SB), NOSPLIT, $0-4 ...@@ -187,7 +189,7 @@ TEXT runtime·switchtoM(SB), NOSPLIT, $0-4
BL (R0) // clobber lr to ensure push {lr} is kept BL (R0) // clobber lr to ensure push {lr} is kept
RET RET
// void onM(void (*fn)()) // func onM(fn func())
// calls fn() on the M stack. // calls fn() on the M stack.
// switches to the M stack if not already on it, and // switches to the M stack if not already on it, and
// switches back when fn() returns. // switches back when fn() returns.
...@@ -213,6 +215,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4 ...@@ -213,6 +215,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
// call target function // call target function
ARGSIZE(0) ARGSIZE(0)
MOVW R0, R7
MOVW 0(R0), R0
BL (R0) BL (R0)
// switch back to g // switch back to g
...@@ -224,6 +228,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4 ...@@ -224,6 +228,8 @@ TEXT runtime·onM(SB), NOSPLIT, $0-4
RET RET
onm: onm:
MOVW R0, R7
MOVW 0(R0), R0
BL (R0) BL (R0)
RET RET
......
...@@ -31,23 +31,21 @@ type LFNode struct { ...@@ -31,23 +31,21 @@ type LFNode struct {
Pushcnt uintptr Pushcnt uintptr
} }
var ( func lfstackpush_m()
lfstackpush_m, func lfstackpop_m()
lfstackpop_m mFunction
)
func LFStackPush(head *uint64, node *LFNode) { func LFStackPush(head *uint64, node *LFNode) {
mp := acquirem() mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(head) mp.ptrarg[0] = unsafe.Pointer(head)
mp.ptrarg[1] = unsafe.Pointer(node) mp.ptrarg[1] = unsafe.Pointer(node)
onM(&lfstackpush_m) onM(lfstackpush_m)
releasem(mp) releasem(mp)
} }
func LFStackPop(head *uint64) *LFNode { func LFStackPop(head *uint64) *LFNode {
mp := acquirem() mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(head) mp.ptrarg[0] = unsafe.Pointer(head)
onM(&lfstackpop_m) onM(lfstackpop_m)
node := (*LFNode)(unsafe.Pointer(mp.ptrarg[0])) node := (*LFNode)(unsafe.Pointer(mp.ptrarg[0]))
mp.ptrarg[0] = nil mp.ptrarg[0] = nil
releasem(mp) releasem(mp)
...@@ -65,17 +63,15 @@ type ParFor struct { ...@@ -65,17 +63,15 @@ type ParFor struct {
wait bool wait bool
} }
var ( func newparfor_m()
newparfor_m, func parforsetup_m()
parforsetup_m, func parfordo_m()
parfordo_m, func parforiters_m()
parforiters_m mFunction
)
func NewParFor(nthrmax uint32) *ParFor { func NewParFor(nthrmax uint32) *ParFor {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uintptr(nthrmax) mp.scalararg[0] = uintptr(nthrmax)
onM(&newparfor_m) onM(newparfor_m)
desc := (*ParFor)(mp.ptrarg[0]) desc := (*ParFor)(mp.ptrarg[0])
mp.ptrarg[0] = nil mp.ptrarg[0] = nil
releasem(mp) releasem(mp)
...@@ -93,14 +89,14 @@ func ParForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(* ...@@ -93,14 +89,14 @@ func ParForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*
if wait { if wait {
mp.scalararg[2] = 1 mp.scalararg[2] = 1
} }
onM(&parforsetup_m) onM(parforsetup_m)
releasem(mp) releasem(mp)
} }
func ParForDo(desc *ParFor) { func ParForDo(desc *ParFor) {
mp := acquirem() mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(desc) mp.ptrarg[0] = unsafe.Pointer(desc)
onM(&parfordo_m) onM(parfordo_m)
releasem(mp) releasem(mp)
} }
...@@ -108,7 +104,7 @@ func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) { ...@@ -108,7 +104,7 @@ func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
mp := acquirem() mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(desc) mp.ptrarg[0] = unsafe.Pointer(desc)
mp.scalararg[0] = uintptr(tid) mp.scalararg[0] = uintptr(tid)
onM(&parforiters_m) onM(parforiters_m)
begin := uint32(mp.scalararg[0]) begin := uint32(mp.scalararg[0])
end := uint32(mp.scalararg[1]) end := uint32(mp.scalararg[1])
releasem(mp) releasem(mp)
......
...@@ -746,6 +746,8 @@ mdump(G *gp) ...@@ -746,6 +746,8 @@ mdump(G *gp)
void void
runtimedebug·WriteHeapDump(uintptr fd) runtimedebug·WriteHeapDump(uintptr fd)
{ {
void (*fn)(G*);
// Stop the world. // Stop the world.
runtime·semacquire(&runtime·worldsema, false); runtime·semacquire(&runtime·worldsema, false);
g->m->gcing = 1; g->m->gcing = 1;
...@@ -762,7 +764,8 @@ runtime∕debug·WriteHeapDump(uintptr fd) ...@@ -762,7 +764,8 @@ runtime∕debug·WriteHeapDump(uintptr fd)
// Call dump routine on M stack. // Call dump routine on M stack.
runtime·casgstatus(g, Grunning, Gwaiting); runtime·casgstatus(g, Grunning, Gwaiting);
g->waitreason = runtime·gostringnocopy((byte*)"dumping heap"); g->waitreason = runtime·gostringnocopy((byte*)"dumping heap");
runtime·mcall(mdump); fn = mdump;
runtime·mcall(&fn);
// Reset dump file. // Reset dump file.
dumpfd = 0; dumpfd = 0;
......
...@@ -144,7 +144,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -144,7 +144,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
if v == nil { if v == nil {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = tinySizeClass mp.scalararg[0] = tinySizeClass
onM(&mcacheRefill_m) onM(mcacheRefill_m)
releasem(mp) releasem(mp)
s = c.alloc[tinySizeClass] s = c.alloc[tinySizeClass]
v = s.freelist v = s.freelist
...@@ -175,7 +175,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -175,7 +175,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
if v == nil { if v == nil {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uintptr(sizeclass) mp.scalararg[0] = uintptr(sizeclass)
onM(&mcacheRefill_m) onM(mcacheRefill_m)
releasem(mp) releasem(mp)
s = c.alloc[sizeclass] s = c.alloc[sizeclass]
v = s.freelist v = s.freelist
...@@ -196,7 +196,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -196,7 +196,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uintptr(size) mp.scalararg[0] = uintptr(size)
mp.scalararg[1] = uintptr(flags) mp.scalararg[1] = uintptr(flags)
onM(&largeAlloc_m) onM(largeAlloc_m)
s = (*mspan)(mp.ptrarg[0]) s = (*mspan)(mp.ptrarg[0])
mp.ptrarg[0] = nil mp.ptrarg[0] = nil
releasem(mp) releasem(mp)
...@@ -246,7 +246,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -246,7 +246,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
mp.ptrarg[1] = unsafe.Pointer(typ) mp.ptrarg[1] = unsafe.Pointer(typ)
mp.scalararg[0] = uintptr(size) mp.scalararg[0] = uintptr(size)
mp.scalararg[1] = uintptr(size0) mp.scalararg[1] = uintptr(size0)
onM(&unrollgcproginplace_m) onM(unrollgcproginplace_m)
releasem(mp) releasem(mp)
goto marked goto marked
} }
...@@ -255,7 +255,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { ...@@ -255,7 +255,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 { if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
mp := acquirem() mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(typ) mp.ptrarg[0] = unsafe.Pointer(typ)
onM(&unrollgcprog_m) onM(unrollgcprog_m)
releasem(mp) releasem(mp)
} }
ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
...@@ -459,7 +459,7 @@ func gogc(force int32) { ...@@ -459,7 +459,7 @@ func gogc(force int32) {
} else { } else {
mp.scalararg[2] = 0 mp.scalararg[2] = 0
} }
onM(&gc_m) onM(gc_m)
} }
// all done // all done
...@@ -571,7 +571,7 @@ func SetFinalizer(obj interface{}, finalizer interface{}) { ...@@ -571,7 +571,7 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
// switch to M stack and remove finalizer // switch to M stack and remove finalizer
mp := acquirem() mp := acquirem()
mp.ptrarg[0] = e.data mp.ptrarg[0] = e.data
onM(&removeFinalizer_m) onM(removeFinalizer_m)
releasem(mp) releasem(mp)
return return
} }
...@@ -624,7 +624,7 @@ okarg: ...@@ -624,7 +624,7 @@ okarg:
mp.scalararg[0] = nret mp.scalararg[0] = nret
mp.ptrarg[2] = unsafe.Pointer(fint) mp.ptrarg[2] = unsafe.Pointer(fint)
mp.ptrarg[3] = unsafe.Pointer(ot) mp.ptrarg[3] = unsafe.Pointer(ot)
onM(&setFinalizer_m) onM(setFinalizer_m)
if mp.scalararg[0] != 1 { if mp.scalararg[0] != 1 {
gothrow("runtime.SetFinalizer: finalizer already set") gothrow("runtime.SetFinalizer: finalizer already set")
} }
......
...@@ -65,8 +65,11 @@ freemcache_m(G *gp) ...@@ -65,8 +65,11 @@ freemcache_m(G *gp)
void void
runtime·freemcache(MCache *c) runtime·freemcache(MCache *c)
{ {
void (*fn)(G*);
g->m->ptrarg[0] = c; g->m->ptrarg[0] = c;
runtime·mcall(freemcache_m); fn = freemcache_m;
runtime·mcall(&fn);
} }
// Gets a span that has a free object in it and assigns it // Gets a span that has a free object in it and assigns it
......
...@@ -1141,6 +1141,7 @@ runtime·updatememstats(GCStats *stats) ...@@ -1141,6 +1141,7 @@ runtime·updatememstats(GCStats *stats)
int32 i; int32 i;
uint64 smallfree; uint64 smallfree;
uint64 *src, *dst; uint64 *src, *dst;
void (*fn)(G*);
if(stats) if(stats)
runtime·memclr((byte*)stats, sizeof(*stats)); runtime·memclr((byte*)stats, sizeof(*stats));
...@@ -1177,8 +1178,10 @@ runtime·updatememstats(GCStats *stats) ...@@ -1177,8 +1178,10 @@ runtime·updatememstats(GCStats *stats)
// Flush MCache's to MCentral. // Flush MCache's to MCentral.
if(g == g->m->g0) if(g == g->m->g0)
flushallmcaches(); flushallmcaches();
else else {
runtime·mcall(flushallmcaches_m); fn = flushallmcaches_m;
runtime·mcall(&fn);
}
// Aggregate local stats. // Aggregate local stats.
cachestats(); cachestats();
......
...@@ -37,7 +37,7 @@ func gc_unixnanotime(now *int64) { ...@@ -37,7 +37,7 @@ func gc_unixnanotime(now *int64) {
func freeOSMemory() { func freeOSMemory() {
gogc(2) // force GC and do eager sweep gogc(2) // force GC and do eager sweep
onM(&scavenge_m) onM(scavenge_m)
} }
var poolcleanup func() var poolcleanup func()
......
...@@ -229,6 +229,7 @@ MSpan* ...@@ -229,6 +229,7 @@ MSpan*
runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero) runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
{ {
MSpan *s; MSpan *s;
void (*fn)(G*);
// Don't do any operations that lock the heap on the G stack. // Don't do any operations that lock the heap on the G stack.
// It might trigger stack growth, and the stack growth code needs // It might trigger stack growth, and the stack growth code needs
...@@ -240,7 +241,8 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool ...@@ -240,7 +241,8 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool
g->m->scalararg[0] = npage; g->m->scalararg[0] = npage;
g->m->scalararg[1] = sizeclass; g->m->scalararg[1] = sizeclass;
g->m->scalararg[2] = large; g->m->scalararg[2] = large;
runtime·mcall(mheap_alloc_m); fn = mheap_alloc_m;
runtime·mcall(&fn);
s = g->m->ptrarg[0]; s = g->m->ptrarg[0];
g->m->ptrarg[0] = nil; g->m->ptrarg[0] = nil;
} }
...@@ -488,13 +490,16 @@ mheap_free_m(G *gp) ...@@ -488,13 +490,16 @@ mheap_free_m(G *gp)
void void
runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct) runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
{ {
void (*fn)(G*);
if(g == g->m->g0) { if(g == g->m->g0) {
mheap_free(h, s, acct); mheap_free(h, s, acct);
} else { } else {
g->m->ptrarg[0] = h; g->m->ptrarg[0] = h;
g->m->ptrarg[1] = s; g->m->ptrarg[1] = s;
g->m->scalararg[0] = acct; g->m->scalararg[0] = acct;
runtime·mcall(mheap_free_m); fn = mheap_free_m;
runtime·mcall(&fn);
} }
} }
......
...@@ -214,6 +214,7 @@ runtime·panic(Eface e) ...@@ -214,6 +214,7 @@ runtime·panic(Eface e)
Defer *d, dabort; Defer *d, dabort;
Panic p; Panic p;
uintptr pc, argp; uintptr pc, argp;
void (*fn)(G*);
runtime·memclr((byte*)&p, sizeof p); runtime·memclr((byte*)&p, sizeof p);
p.arg = e; p.arg = e;
...@@ -266,7 +267,8 @@ runtime·panic(Eface e) ...@@ -266,7 +267,8 @@ runtime·panic(Eface e)
// Pass information about recovering frame to recovery. // Pass information about recovering frame to recovery.
g->sigcode0 = (uintptr)argp; g->sigcode0 = (uintptr)argp;
g->sigcode1 = (uintptr)pc; g->sigcode1 = (uintptr)pc;
runtime·mcall(recovery); fn = recovery;
runtime·mcall(&fn);
runtime·throw("recovery failed"); // mcall should not return runtime·throw("recovery failed"); // mcall should not return
} }
} }
......
...@@ -1439,10 +1439,13 @@ dropg(void) ...@@ -1439,10 +1439,13 @@ dropg(void)
void void
runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason) runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
{ {
void (*fn)(G*);
g->m->waitlock = lock; g->m->waitlock = lock;
g->m->waitunlockf = unlockf; g->m->waitunlockf = unlockf;
g->waitreason = reason; g->waitreason = reason;
runtime·mcall(runtime·park_m); fn = runtime·park_m;
runtime·mcall(&fn);
} }
bool bool
...@@ -1487,7 +1490,10 @@ runtime·park_m(G *gp) ...@@ -1487,7 +1490,10 @@ runtime·park_m(G *gp)
void void
runtime·gosched(void) runtime·gosched(void)
{ {
runtime·mcall(runtime·gosched_m); void (*fn)(G*);
fn = runtime·gosched_m;
runtime·mcall(&fn);
} }
// runtime·gosched continuation on g0. // runtime·gosched continuation on g0.
...@@ -1518,9 +1524,12 @@ runtime·gosched_m(G *gp) ...@@ -1518,9 +1524,12 @@ runtime·gosched_m(G *gp)
void void
runtime·goexit(void) runtime·goexit(void)
{ {
void (*fn)(G*);
if(raceenabled) if(raceenabled)
runtime·racegoend(); runtime·racegoend();
runtime·mcall(goexit0); fn = goexit0;
runtime·mcall(&fn);
} }
// runtime·goexit continuation on g0. // runtime·goexit continuation on g0.
...@@ -1689,6 +1698,8 @@ runtime·entersyscallblock_m(void) ...@@ -1689,6 +1698,8 @@ runtime·entersyscallblock_m(void)
void void
runtime·exitsyscall(void) runtime·exitsyscall(void)
{ {
void (*fn)(G*);
g->m->locks++; // see comment in entersyscall g->m->locks++; // see comment in entersyscall
g->waitsince = 0; g->waitsince = 0;
...@@ -1716,7 +1727,8 @@ runtime·exitsyscall(void) ...@@ -1716,7 +1727,8 @@ runtime·exitsyscall(void)
g->m->locks--; g->m->locks--;
// Call the scheduler. // Call the scheduler.
runtime·mcall(exitsyscall0); fn = exitsyscall0;
runtime·mcall(&fn);
// Scheduler returned, so we're allowed to run now. // Scheduler returned, so we're allowed to run now.
// Delete the gcstack information that we left for // Delete the gcstack information that we left for
...@@ -1858,6 +1870,7 @@ runtime·malg(int32 stacksize) ...@@ -1858,6 +1870,7 @@ runtime·malg(int32 stacksize)
{ {
G *newg; G *newg;
byte *stk; byte *stk;
void (*fn)(G*);
if(StackTop < sizeof(Stktop)) { if(StackTop < sizeof(Stktop)) {
runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (int32)StackTop, (int32)sizeof(Stktop)); runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (int32)StackTop, (int32)sizeof(Stktop));
...@@ -1874,7 +1887,8 @@ runtime·malg(int32 stacksize) ...@@ -1874,7 +1887,8 @@ runtime·malg(int32 stacksize)
// have to call stackalloc on scheduler stack. // have to call stackalloc on scheduler stack.
newg->stacksize = stacksize; newg->stacksize = stacksize;
g->param = newg; g->param = newg;
runtime·mcall(mstackalloc); fn = mstackalloc;
runtime·mcall(&fn);
stk = g->param; stk = g->param;
g->param = nil; g->param = nil;
} }
...@@ -1915,6 +1929,7 @@ void ...@@ -1915,6 +1929,7 @@ void
runtime·newproc(int32 siz, FuncVal* fn, ...) runtime·newproc(int32 siz, FuncVal* fn, ...)
{ {
byte *argp; byte *argp;
void (*mfn)(void);
if(thechar == '5') if(thechar == '5')
argp = (byte*)(&fn+2); // skip caller's saved LR argp = (byte*)(&fn+2); // skip caller's saved LR
...@@ -1926,7 +1941,8 @@ runtime·newproc(int32 siz, FuncVal* fn, ...) ...@@ -1926,7 +1941,8 @@ runtime·newproc(int32 siz, FuncVal* fn, ...)
g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz); g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
g->m->ptrarg[0] = argp; g->m->ptrarg[0] = argp;
g->m->ptrarg[1] = fn; g->m->ptrarg[1] = fn;
runtime·onM(newproc_m); mfn = newproc_m;
runtime·onM(&mfn);
g->m->locks--; g->m->locks--;
} }
...@@ -2090,6 +2106,7 @@ gfget(P *p) ...@@ -2090,6 +2106,7 @@ gfget(P *p)
{ {
G *gp; G *gp;
byte *stk; byte *stk;
void (*fn)(G*);
retry: retry:
gp = p->gfree; gp = p->gfree;
...@@ -2117,7 +2134,8 @@ retry: ...@@ -2117,7 +2134,8 @@ retry:
} else { } else {
gp->stacksize = FixedStack; gp->stacksize = FixedStack;
g->param = gp; g->param = gp;
runtime·mcall(mstackalloc); fn = mstackalloc;
runtime·mcall(&fn);
stk = g->param; stk = g->param;
g->param = nil; g->param = nil;
} }
......
...@@ -55,7 +55,7 @@ func forcegchelper() { ...@@ -55,7 +55,7 @@ func forcegchelper() {
// Gosched yields the processor, allowing other goroutines to run. It does not // Gosched yields the processor, allowing other goroutines to run. It does not
// suspend the current goroutine, so execution resumes automatically. // suspend the current goroutine, so execution resumes automatically.
func Gosched() { func Gosched() {
mcall(&gosched_m) mcall(gosched_m)
} }
func readgStatus(gp *g) uint32 { func readgStatus(gp *g) uint32 {
...@@ -77,7 +77,7 @@ func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) { ...@@ -77,7 +77,7 @@ func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
gp.waitreason = reason gp.waitreason = reason
releasem(mp) releasem(mp)
// can't do anything that might move the G between Ms here. // can't do anything that might move the G between Ms here.
mcall(&park_m) mcall(park_m)
} }
// Puts the current goroutine into a waiting state and unlocks the lock. // Puts the current goroutine into a waiting state and unlocks the lock.
...@@ -89,7 +89,7 @@ func goparkunlock(lock *mutex, reason string) { ...@@ -89,7 +89,7 @@ func goparkunlock(lock *mutex, reason string) {
func goready(gp *g) { func goready(gp *g) {
mp := acquirem() mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(gp) mp.ptrarg[0] = unsafe.Pointer(gp)
onM(&ready_m) onM(ready_m)
releasem(mp) releasem(mp)
} }
......
...@@ -13,7 +13,7 @@ func setMaxStack(in int) (out int) { ...@@ -13,7 +13,7 @@ func setMaxStack(in int) (out int) {
func setGCPercent(in int32) (out int32) { func setGCPercent(in int32) (out int32) {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uintptr(int(in)) mp.scalararg[0] = uintptr(int(in))
onM(&setgcpercent_m) onM(setgcpercent_m)
out = int32(int(mp.scalararg[0])) out = int32(int(mp.scalararg[0]))
releasem(mp) releasem(mp)
return out return out
...@@ -30,7 +30,7 @@ func setPanicOnFault(new bool) (old bool) { ...@@ -30,7 +30,7 @@ func setPanicOnFault(new bool) (old bool) {
func setMaxThreads(in int) (out int) { func setMaxThreads(in int) (out int) {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uintptr(in) mp.scalararg[0] = uintptr(in)
onM(&setmaxthreads_m) onM(setmaxthreads_m)
out = int(mp.scalararg[0]) out = int(mp.scalararg[0])
releasem(mp) releasem(mp)
return out return out
......
...@@ -809,8 +809,8 @@ void runtime·runpanic(Panic*); ...@@ -809,8 +809,8 @@ void runtime·runpanic(Panic*);
uintptr runtime·getcallersp(void*); uintptr runtime·getcallersp(void*);
int32 runtime·mcount(void); int32 runtime·mcount(void);
int32 runtime·gcount(void); int32 runtime·gcount(void);
void runtime·mcall(void(*)(G*)); void runtime·mcall(void(**)(G*));
void runtime·onM(void(*)(void)); void runtime·onM(void(**)(void));
uint32 runtime·fastrand1(void); uint32 runtime·fastrand1(void);
void runtime·rewindmorestack(Gobuf*); void runtime·rewindmorestack(Gobuf*);
int32 runtime·timediv(int64, int32, int32*); int32 runtime·timediv(int64, int32, int32*);
......
...@@ -9,7 +9,7 @@ package runtime ...@@ -9,7 +9,7 @@ package runtime
func signal_recv() (m uint32) { func signal_recv() (m uint32) {
for { for {
mp := acquirem() mp := acquirem()
onM(&signal_recv_m) onM(signal_recv_m)
ok := mp.scalararg[0] != 0 ok := mp.scalararg[0] != 0
m = uint32(mp.scalararg[1]) m = uint32(mp.scalararg[1])
releasem(mp) releasem(mp)
...@@ -24,19 +24,17 @@ func signal_recv() (m uint32) { ...@@ -24,19 +24,17 @@ func signal_recv() (m uint32) {
func signal_enable(s uint32) { func signal_enable(s uint32) {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uintptr(s) mp.scalararg[0] = uintptr(s)
onM(&signal_enable_m) onM(signal_enable_m)
releasem(mp) releasem(mp)
} }
func signal_disable(s uint32) { func signal_disable(s uint32) {
mp := acquirem() mp := acquirem()
mp.scalararg[0] = uintptr(s) mp.scalararg[0] = uintptr(s)
onM(&signal_disable_m) onM(signal_disable_m)
releasem(mp) releasem(mp)
} }
var ( func signal_recv_m()
signal_recv_m, func signal_enable_m()
signal_enable_m, func signal_disable_m()
signal_disable_m mFunction
)
...@@ -57,37 +57,34 @@ func acquirem() *m ...@@ -57,37 +57,34 @@ func acquirem() *m
func releasem(mp *m) func releasem(mp *m)
func gomcache() *mcache func gomcache() *mcache
// An mFunction represents a C function that runs on the M stack. It
// can be called from Go using mcall or onM. Through the magic of
// linking, an mFunction variable and the corresponding C code entry
// point live at the same address.
type mFunction byte
// in asm_*.s // in asm_*.s
func mcall(fn *mFunction) func mcall(func(*g))
func onM(fn *mFunction) func onM(fn func())
// C functions that run on the M stack. Call these like // C functions that run on the M stack.
// mcall(&mcacheRefill_m) // Call using mcall.
// Arguments should be passed in m->scalararg[x] and // These functions need to be written to arrange explicitly
// m->ptrarg[x]. Return values can be passed in those // for the goroutine to continue execution.
// same slots. func gosched_m(*g)
var ( func park_m(*g)
mcacheRefill_m,
largeAlloc_m, // More C functions that run on the M stack.
gc_m, // Call using onM.
scavenge_m, // Arguments should be passed in m->scalararg[x] and m->ptrarg[x].
setFinalizer_m, // Return values can be passed in those same slots.
removeFinalizer_m, // These functions return to the goroutine when they return.
markallocated_m, func mcacheRefill_m()
unrollgcprog_m, func largeAlloc_m()
unrollgcproginplace_m, func gc_m()
gosched_m, func scavenge_m()
setgcpercent_m, func setFinalizer_m()
setmaxthreads_m, func removeFinalizer_m()
ready_m, func markallocated_m()
park_m mFunction func unrollgcprog_m()
) func unrollgcproginplace_m()
func setgcpercent_m()
func setmaxthreads_m()
func ready_m()
// memclr clears n bytes starting at ptr. // memclr clears n bytes starting at ptr.
// in memclr_*.s // in memclr_*.s
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment