Commit 84d2c7ea authored by Austin Clements's avatar Austin Clements

runtime: dynamically allocate allp

This makes it possible to eliminate the hard cap on GOMAXPROCS.

Updates #15131.

Change-Id: I4c422b340791621584c118a6be1b38e8a44f8b70
Reviewed-on: https://go-review.googlesource.com/45573
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent 197f9ba1
...@@ -465,7 +465,7 @@ func (c *gcControllerState) startCycle() { ...@@ -465,7 +465,7 @@ func (c *gcControllerState) startCycle() {
} }
// Clear per-P state // Clear per-P state
for _, p := range &allp { for _, p := range allp {
if p == nil { if p == nil {
break break
} }
...@@ -1662,7 +1662,7 @@ func gcMarkTermination(nextTriggerRatio float64) { ...@@ -1662,7 +1662,7 @@ func gcMarkTermination(nextTriggerRatio float64) {
func gcBgMarkStartWorkers() { func gcBgMarkStartWorkers() {
// Background marking is performed by per-P G's. Ensure that // Background marking is performed by per-P G's. Ensure that
// each P has a background GC G. // each P has a background GC G.
for _, p := range &allp { for _, p := range allp {
if p == nil || p.status == _Pdead { if p == nil || p.status == _Pdead {
break break
} }
......
...@@ -1356,7 +1356,7 @@ func gcmarknewobject(obj, size, scanSize uintptr) { ...@@ -1356,7 +1356,7 @@ func gcmarknewobject(obj, size, scanSize uintptr) {
// //
// The world must be stopped. // The world must be stopped.
func gcMarkTinyAllocs() { func gcMarkTinyAllocs() {
for _, p := range &allp { for _, p := range allp {
if p == nil || p.status == _Pdead { if p == nil || p.status == _Pdead {
break break
} }
......
...@@ -589,9 +589,13 @@ func updatememstats() { ...@@ -589,9 +589,13 @@ func updatememstats() {
memstats.heap_objects = memstats.nmalloc - memstats.nfree memstats.heap_objects = memstats.nmalloc - memstats.nfree
} }
// cachestats flushes all mcache stats.
//
// The world must be stopped.
//
//go:nowritebarrier //go:nowritebarrier
func cachestats() { func cachestats() {
for _, p := range &allp { for _, p := range allp {
if p == nil { if p == nil {
break break
} }
......
...@@ -3231,7 +3231,7 @@ func badunlockosthread() { ...@@ -3231,7 +3231,7 @@ func badunlockosthread() {
func gcount() int32 { func gcount() int32 {
n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
for _, _p_ := range &allp { for _, _p_ := range allp {
if _p_ == nil { if _p_ == nil {
break break
} }
...@@ -3543,6 +3543,23 @@ func procresize(nprocs int32) *p { ...@@ -3543,6 +3543,23 @@ func procresize(nprocs int32) *p {
} }
sched.procresizetime = now sched.procresizetime = now
// Grow allp if necessary.
if nprocs > int32(len(allp)) {
// Synchronize with retake, which could be running
// concurrently since it doesn't run on a P.
lock(&allpLock)
if nprocs <= int32(cap(allp)) {
allp = allp[:nprocs]
} else {
nallp := make([]*p, nprocs)
// Copy everything up to allp's cap so we
// never lose old allocated Ps.
copy(nallp, allp[:cap(allp)])
allp = nallp
}
unlock(&allpLock)
}
// initialize new P's // initialize new P's
for i := int32(0); i < nprocs; i++ { for i := int32(0); i < nprocs; i++ {
pp := allp[i] pp := allp[i]
...@@ -3631,6 +3648,13 @@ func procresize(nprocs int32) *p { ...@@ -3631,6 +3648,13 @@ func procresize(nprocs int32) *p {
// can't free P itself because it can be referenced by an M in syscall // can't free P itself because it can be referenced by an M in syscall
} }
// Trim allp.
if int32(len(allp)) != nprocs {
lock(&allpLock)
allp = allp[:nprocs]
unlock(&allpLock)
}
_g_ := getg() _g_ := getg()
if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
// continue to use the current P // continue to use the current P
...@@ -3956,7 +3980,10 @@ const forcePreemptNS = 10 * 1000 * 1000 // 10ms ...@@ -3956,7 +3980,10 @@ const forcePreemptNS = 10 * 1000 * 1000 // 10ms
func retake(now int64) uint32 { func retake(now int64) uint32 {
n := 0 n := 0
for i := int32(0); i < gomaxprocs; i++ { // Prevent allp slice changes. This lock will be completely
// uncontended unless we're already stopping the world.
lock(&allpLock)
for i := 0; i < len(allp); i++ {
_p_ := allp[i] _p_ := allp[i]
if _p_ == nil { if _p_ == nil {
continue continue
...@@ -3977,6 +4004,8 @@ func retake(now int64) uint32 { ...@@ -3977,6 +4004,8 @@ func retake(now int64) uint32 {
if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
continue continue
} }
// Drop allpLock so we can take sched.lock.
unlock(&allpLock)
// Need to decrement number of idle locked M's // Need to decrement number of idle locked M's
// (pretending that one more is running) before the CAS. // (pretending that one more is running) before the CAS.
// Otherwise the M from which we retake can exit the syscall, // Otherwise the M from which we retake can exit the syscall,
...@@ -3992,6 +4021,7 @@ func retake(now int64) uint32 { ...@@ -3992,6 +4021,7 @@ func retake(now int64) uint32 {
handoffp(_p_) handoffp(_p_)
} }
incidlelocked(1) incidlelocked(1)
lock(&allpLock)
} else if s == _Prunning { } else if s == _Prunning {
// Preempt G if it's running for too long. // Preempt G if it's running for too long.
t := int64(_p_.schedtick) t := int64(_p_.schedtick)
...@@ -4006,6 +4036,7 @@ func retake(now int64) uint32 { ...@@ -4006,6 +4036,7 @@ func retake(now int64) uint32 {
preemptone(_p_) preemptone(_p_)
} }
} }
unlock(&allpLock)
return uint32(n) return uint32(n)
} }
......
...@@ -722,7 +722,8 @@ const _TracebackMaxFrames = 100 ...@@ -722,7 +722,8 @@ const _TracebackMaxFrames = 100
var ( var (
allglen uintptr allglen uintptr
allm *m allm *m
allp [_MaxGomaxprocs + 1]*p allp []*p // len(allp) == gomaxprocs; may change at safe points, otherwise immutable
allpLock mutex // Protects P-less reads of allp and all writes
gomaxprocs int32 gomaxprocs int32
ncpu int32 ncpu int32
forcegc forcegcstate forcegc forcegcstate
......
...@@ -277,7 +277,9 @@ func StopTrace() { ...@@ -277,7 +277,9 @@ func StopTrace() {
traceGoSched() traceGoSched()
for _, p := range &allp { // Loop over all allocated Ps because dead Ps may still have
// trace buffers.
for _, p := range allp[:cap(allp)] {
if p == nil { if p == nil {
break break
} }
...@@ -320,7 +322,7 @@ func StopTrace() { ...@@ -320,7 +322,7 @@ func StopTrace() {
// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world. // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
lock(&trace.lock) lock(&trace.lock)
for _, p := range &allp { for _, p := range allp[:cap(allp)] {
if p == nil { if p == nil {
break break
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment