Commit e900e275 authored by Austin Clements's avatar Austin Clements

runtime: clean up loops over allp

allp now has length gomaxprocs, which means none of allp[i] are nil or
in state _Pdead. This lets replace several different styles of loops
over allp with normal range loops.

for i := 0; i < gomaxprocs; i++ { ... } loops can simply range over
allp. Likewise, range loops over allp[:gomaxprocs] can just range over
allp.

Loops that check for p == nil || p.state == _Pdead don't need to check
this any more.

Loops that check for p == nil don't have to check this *if* dead Ps
don't affect them. I checked that all such loops are, in fact,
unaffected by dead Ps. One loop was potentially affected, which this
fixes by zeroing p.gcAssistTime in procresize.

Updates #15131.

Change-Id: Ifa1c2a86ed59892eca0610360a75bb613bc6dcee
Reviewed-on: https://go-review.googlesource.com/45575
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
parent ee55000f
...@@ -466,9 +466,6 @@ func (c *gcControllerState) startCycle() { ...@@ -466,9 +466,6 @@ func (c *gcControllerState) startCycle() {
// Clear per-P state // Clear per-P state
for _, p := range allp { for _, p := range allp {
if p == nil {
break
}
p.gcAssistTime = 0 p.gcAssistTime = 0
} }
...@@ -1663,9 +1660,6 @@ func gcBgMarkStartWorkers() { ...@@ -1663,9 +1660,6 @@ func gcBgMarkStartWorkers() {
// Background marking is performed by per-P G's. Ensure that // Background marking is performed by per-P G's. Ensure that
// each P has a background GC G. // each P has a background GC G.
for _, p := range allp { for _, p := range allp {
if p == nil || p.status == _Pdead {
break
}
if p.gcBgMarkWorker == 0 { if p.gcBgMarkWorker == 0 {
go gcBgMarkWorker(p) go gcBgMarkWorker(p)
notetsleepg(&work.bgMarkReady, -1) notetsleepg(&work.bgMarkReady, -1)
...@@ -1962,8 +1956,8 @@ func gcMark(start_time int64) { ...@@ -1962,8 +1956,8 @@ func gcMark(start_time int64) {
// Double-check that all gcWork caches are empty. This should // Double-check that all gcWork caches are empty. This should
// be ensured by mark 2 before we enter mark termination. // be ensured by mark 2 before we enter mark termination.
for i := 0; i < int(gomaxprocs); i++ { for _, p := range allp {
gcw := &allp[i].gcw gcw := &p.gcw
if !gcw.empty() { if !gcw.empty() {
throw("P has cached GC work at end of mark termination") throw("P has cached GC work at end of mark termination")
} }
......
...@@ -1357,9 +1357,6 @@ func gcmarknewobject(obj, size, scanSize uintptr) { ...@@ -1357,9 +1357,6 @@ func gcmarknewobject(obj, size, scanSize uintptr) {
// The world must be stopped. // The world must be stopped.
func gcMarkTinyAllocs() { func gcMarkTinyAllocs() {
for _, p := range allp { for _, p := range allp {
if p == nil || p.status == _Pdead {
break
}
c := p.mcache c := p.mcache
if c == nil || c.tiny == 0 { if c == nil || c.tiny == 0 {
continue continue
......
...@@ -596,9 +596,6 @@ func updatememstats() { ...@@ -596,9 +596,6 @@ func updatememstats() {
//go:nowritebarrier //go:nowritebarrier
func cachestats() { func cachestats() {
for _, p := range allp { for _, p := range allp {
if p == nil {
break
}
c := p.mcache c := p.mcache
if c == nil { if c == nil {
continue continue
...@@ -614,9 +611,6 @@ func cachestats() { ...@@ -614,9 +611,6 @@ func cachestats() {
//go:nowritebarrier //go:nowritebarrier
func flushmcache(i int) { func flushmcache(i int) {
p := allp[i] p := allp[i]
if p == nil {
return
}
c := p.mcache c := p.mcache
if c == nil { if c == nil {
return return
......
...@@ -997,8 +997,7 @@ func stopTheWorldWithSema() { ...@@ -997,8 +997,7 @@ func stopTheWorldWithSema() {
_g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
sched.stopwait-- sched.stopwait--
// try to retake all P's in Psyscall status // try to retake all P's in Psyscall status
for i := 0; i < int(gomaxprocs); i++ { for _, p := range allp {
p := allp[i]
s := p.status s := p.status
if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
if trace.enabled { if trace.enabled {
...@@ -1038,8 +1037,7 @@ func stopTheWorldWithSema() { ...@@ -1038,8 +1037,7 @@ func stopTheWorldWithSema() {
if sched.stopwait != 0 { if sched.stopwait != 0 {
bad = "stopTheWorld: not stopped (stopwait != 0)" bad = "stopTheWorld: not stopped (stopwait != 0)"
} else { } else {
for i := 0; i < int(gomaxprocs); i++ { for _, p := range allp {
p := allp[i]
if p.status != _Pgcstop { if p.status != _Pgcstop {
bad = "stopTheWorld: not stopped (status != _Pgcstop)" bad = "stopTheWorld: not stopped (status != _Pgcstop)"
} }
...@@ -1219,7 +1217,7 @@ func forEachP(fn func(*p)) { ...@@ -1219,7 +1217,7 @@ func forEachP(fn func(*p)) {
sched.safePointFn = fn sched.safePointFn = fn
// Ask all Ps to run the safe point function. // Ask all Ps to run the safe point function.
for _, p := range allp[:gomaxprocs] { for _, p := range allp {
if p != _p_ { if p != _p_ {
atomic.Store(&p.runSafePointFn, 1) atomic.Store(&p.runSafePointFn, 1)
} }
...@@ -1247,8 +1245,7 @@ func forEachP(fn func(*p)) { ...@@ -1247,8 +1245,7 @@ func forEachP(fn func(*p)) {
// Force Ps currently in _Psyscall into _Pidle and hand them // Force Ps currently in _Psyscall into _Pidle and hand them
// off to induce safe point function execution. // off to induce safe point function execution.
for i := 0; i < int(gomaxprocs); i++ { for _, p := range allp {
p := allp[i]
s := p.status s := p.status
if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
if trace.enabled { if trace.enabled {
...@@ -1277,8 +1274,7 @@ func forEachP(fn func(*p)) { ...@@ -1277,8 +1274,7 @@ func forEachP(fn func(*p)) {
if sched.safePointWait != 0 { if sched.safePointWait != 0 {
throw("forEachP: not done") throw("forEachP: not done")
} }
for i := 0; i < int(gomaxprocs); i++ { for _, p := range allp {
p := allp[i]
if p.runSafePointFn != 0 { if p.runSafePointFn != 0 {
throw("forEachP: P did not run fn") throw("forEachP: P did not run fn")
} }
...@@ -2072,9 +2068,8 @@ stop: ...@@ -2072,9 +2068,8 @@ stop:
} }
// check all runqueues once again // check all runqueues once again
for i := 0; i < int(gomaxprocs); i++ { for _, _p_ := range allp {
_p_ := allp[i] if !runqempty(_p_) {
if _p_ != nil && !runqempty(_p_) {
lock(&sched.lock) lock(&sched.lock)
_p_ = pidleget() _p_ = pidleget()
unlock(&sched.lock) unlock(&sched.lock)
...@@ -3229,9 +3224,6 @@ func badunlockosthread() { ...@@ -3229,9 +3224,6 @@ func badunlockosthread() {
func gcount() int32 { func gcount() int32 {
n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
for _, _p_ := range allp { for _, _p_ := range allp {
if _p_ == nil {
break
}
n -= _p_.gfreecnt n -= _p_.gfreecnt
} }
...@@ -3641,6 +3633,7 @@ func procresize(nprocs int32) *p { ...@@ -3641,6 +3633,7 @@ func procresize(nprocs int32) *p {
raceprocdestroy(p.racectx) raceprocdestroy(p.racectx)
p.racectx = 0 p.racectx = 0
} }
p.gcAssistTime = 0
p.status = _Pdead p.status = _Pdead
// can't free P itself because it can be referenced by an M in syscall // can't free P itself because it can be referenced by an M in syscall
} }
...@@ -3980,9 +3973,14 @@ func retake(now int64) uint32 { ...@@ -3980,9 +3973,14 @@ func retake(now int64) uint32 {
// Prevent allp slice changes. This lock will be completely // Prevent allp slice changes. This lock will be completely
// uncontended unless we're already stopping the world. // uncontended unless we're already stopping the world.
lock(&allpLock) lock(&allpLock)
// We can't use a range loop over allp because we may
// temporarily drop the allpLock. Hence, we need to re-fetch
// allp each time around the loop.
for i := 0; i < len(allp); i++ { for i := 0; i < len(allp); i++ {
_p_ := allp[i] _p_ := allp[i]
if _p_ == nil { if _p_ == nil {
// This can happen if procresize has grown
// allp but not yet created new Ps.
continue continue
} }
pd := &_p_.sysmontick pd := &_p_.sysmontick
...@@ -4044,9 +4042,8 @@ func retake(now int64) uint32 { ...@@ -4044,9 +4042,8 @@ func retake(now int64) uint32 {
// Returns true if preemption request was issued to at least one goroutine. // Returns true if preemption request was issued to at least one goroutine.
func preemptall() bool { func preemptall() bool {
res := false res := false
for i := int32(0); i < gomaxprocs; i++ { for _, _p_ := range allp {
_p_ := allp[i] if _p_.status != _Prunning {
if _p_ == nil || _p_.status != _Prunning {
continue continue
} }
if preemptone(_p_) { if preemptone(_p_) {
...@@ -4102,11 +4099,7 @@ func schedtrace(detailed bool) { ...@@ -4102,11 +4099,7 @@ func schedtrace(detailed bool) {
// We must be careful while reading data from P's, M's and G's. // We must be careful while reading data from P's, M's and G's.
// Even if we hold schedlock, most data can be changed concurrently. // Even if we hold schedlock, most data can be changed concurrently.
// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
for i := int32(0); i < gomaxprocs; i++ { for i, _p_ := range allp {
_p_ := allp[i]
if _p_ == nil {
continue
}
mp := _p_.m.ptr() mp := _p_.m.ptr()
h := atomic.Load(&_p_.runqhead) h := atomic.Load(&_p_.runqhead)
t := atomic.Load(&_p_.runqtail) t := atomic.Load(&_p_.runqtail)
...@@ -4124,7 +4117,7 @@ func schedtrace(detailed bool) { ...@@ -4124,7 +4117,7 @@ func schedtrace(detailed bool) {
print("[") print("[")
} }
print(t - h) print(t - h)
if i == gomaxprocs-1 { if i == len(allp)-1 {
print("]\n") print("]\n")
} }
} }
......
...@@ -280,9 +280,6 @@ func StopTrace() { ...@@ -280,9 +280,6 @@ func StopTrace() {
// Loop over all allocated Ps because dead Ps may still have // Loop over all allocated Ps because dead Ps may still have
// trace buffers. // trace buffers.
for _, p := range allp[:cap(allp)] { for _, p := range allp[:cap(allp)] {
if p == nil {
break
}
buf := p.tracebuf buf := p.tracebuf
if buf != 0 { if buf != 0 {
traceFullQueue(buf) traceFullQueue(buf)
...@@ -323,9 +320,6 @@ func StopTrace() { ...@@ -323,9 +320,6 @@ func StopTrace() {
// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world. // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
lock(&trace.lock) lock(&trace.lock)
for _, p := range allp[:cap(allp)] { for _, p := range allp[:cap(allp)] {
if p == nil {
break
}
if p.tracebuf != 0 { if p.tracebuf != 0 {
throw("trace: non-empty trace buffer in proc") throw("trace: non-empty trace buffer in proc")
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment