Commit ea9859f8 authored by Austin Clements's avatar Austin Clements

runtime: use acquirem/releasem more widely

We've copy-pasted the pattern of releasem in many places. This CL
replaces almost everywhere that manipulates g.m.locks and g.preempt
with calls to acquirem/releasem. There are a few where we do something
more complicated, like where exitsyscall has to restore the stack
bound differently depending on the preempt flag, which this CL leaves
alone.

Change-Id: Ia7a46c261daea6e7802b80e7eb9227499f460433
Reviewed-on: https://go-review.googlesource.com/c/go/+/170064
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
parent ac40a7fb
...@@ -645,7 +645,7 @@ func ready(gp *g, traceskip int, next bool) { ...@@ -645,7 +645,7 @@ func ready(gp *g, traceskip int, next bool) {
// Mark runnable. // Mark runnable.
_g_ := getg() _g_ := getg()
_g_.m.locks++ // disable preemption because it can be holding p in a local var mp := acquirem() // disable preemption because it can be holding p in a local var
if status&^_Gscan != _Gwaiting { if status&^_Gscan != _Gwaiting {
dumpgstatus(gp) dumpgstatus(gp)
throw("bad g->status in ready") throw("bad g->status in ready")
...@@ -657,10 +657,7 @@ func ready(gp *g, traceskip int, next bool) { ...@@ -657,10 +657,7 @@ func ready(gp *g, traceskip int, next bool) {
if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
wakep() wakep()
} }
_g_.m.locks-- releasem(mp)
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack
_g_.stackguard0 = stackPreempt
}
} }
// freezeStopWait is a large value that freezetheworld sets // freezeStopWait is a large value that freezetheworld sets
...@@ -1080,9 +1077,7 @@ func stopTheWorldWithSema() { ...@@ -1080,9 +1077,7 @@ func stopTheWorldWithSema() {
} }
func startTheWorldWithSema(emitTraceEvent bool) int64 { func startTheWorldWithSema(emitTraceEvent bool) int64 {
_g_ := getg() mp := acquirem() // disable preemption because it can be holding p in a local var
_g_.m.locks++ // disable preemption because it can be holding p in a local var
if netpollinited() { if netpollinited() {
list := netpoll(false) // non-blocking list := netpoll(false) // non-blocking
injectglist(&list) injectglist(&list)
...@@ -1132,10 +1127,7 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 { ...@@ -1132,10 +1127,7 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 {
wakep() wakep()
} }
_g_.m.locks-- releasem(mp)
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
_g_.stackguard0 = stackPreempt
}
return startTime return startTime
} }
...@@ -1464,7 +1456,7 @@ type cgothreadstart struct { ...@@ -1464,7 +1456,7 @@ type cgothreadstart struct {
//go:yeswritebarrierrec //go:yeswritebarrierrec
func allocm(_p_ *p, fn func()) *m { func allocm(_p_ *p, fn func()) *m {
_g_ := getg() _g_ := getg()
_g_.m.locks++ // disable GC because it can be called from sysmon acquirem() // disable GC because it can be called from sysmon
if _g_.m.p == 0 { if _g_.m.p == 0 {
acquirep(_p_) // temporarily borrow p for mallocs in this function acquirep(_p_) // temporarily borrow p for mallocs in this function
} }
...@@ -1505,10 +1497,7 @@ func allocm(_p_ *p, fn func()) *m { ...@@ -1505,10 +1497,7 @@ func allocm(_p_ *p, fn func()) *m {
if _p_ == _g_.m.p.ptr() { if _p_ == _g_.m.p.ptr() {
releasep() releasep()
} }
_g_.m.locks-- releasem(_g_.m)
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
_g_.stackguard0 = stackPreempt
}
return mp return mp
} }
...@@ -3255,7 +3244,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintpt ...@@ -3255,7 +3244,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintpt
_g_.m.throwing = -1 // do not dump full stacks _g_.m.throwing = -1 // do not dump full stacks
throw("go of nil func value") throw("go of nil func value")
} }
_g_.m.locks++ // disable preemption because it can be holding p in a local var acquirem() // disable preemption because it can be holding p in a local var
siz := narg siz := narg
siz = (siz + 7) &^ 7 siz = (siz + 7) &^ 7
...@@ -3350,10 +3339,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintpt ...@@ -3350,10 +3339,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintpt
if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted { if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
wakep() wakep()
} }
_g_.m.locks-- releasem(_g_.m)
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
_g_.stackguard0 = stackPreempt
}
} }
// saveAncestors copies previous ancestors of the given caller g and // saveAncestors copies previous ancestors of the given caller g and
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment