Commit 7ed7669c authored by Michael Anthony Knyszek's avatar Michael Anthony Knyszek Committed by Michael Knyszek

runtime: ensure mheap lock stack growth invariant is maintained

Currently there's an invariant in the runtime wherein the heap lock
can only be acquired on the system stack, otherwise a self-deadlock
could occur if the stack grows while the lock is held.

This invariant is upheld and documented in a number of situations (e.g.
allocManual, freeManual) but there are other places where the invariant
is either not maintained at all which risks self-deadlock (e.g.
setGCPercent, gcResetMarkState, allocmcache) or is maintained but
undocumented (e.g. gcSweep, readGCStats_m).

This change adds go:systemstack to any function that acquires the heap
lock or adds a systemstack(func() { ... }) around the critical section,
where appropriate. It also documents the invariant on (*mheap).lock
directly and updates repetitive documentation to refer to that comment.

Fixes #32105.

Change-Id: I702b1290709c118b837389c78efde25c51a2cafb
Reviewed-on: https://go-review.googlesource.com/c/go/+/177857
Run-TryBot: Michael Knyszek <mknyszek@google.com>
Reviewed-by: default avatarAustin Clements <austin@google.com>
parent db325553
...@@ -545,18 +545,23 @@ type Span struct { ...@@ -545,18 +545,23 @@ type Span struct {
} }
func AllocSpan(base, npages uintptr, scavenged bool) Span { func AllocSpan(base, npages uintptr, scavenged bool) Span {
lock(&mheap_.lock) var s *mspan
s := (*mspan)(mheap_.spanalloc.alloc()) systemstack(func() {
unlock(&mheap_.lock) lock(&mheap_.lock)
s = (*mspan)(mheap_.spanalloc.alloc())
unlock(&mheap_.lock)
})
s.init(base, npages) s.init(base, npages)
s.scavenged = scavenged s.scavenged = scavenged
return Span{s} return Span{s}
} }
func (s *Span) Free() { func (s *Span) Free() {
lock(&mheap_.lock) systemstack(func() {
mheap_.spanalloc.free(unsafe.Pointer(s.mspan)) lock(&mheap_.lock)
unlock(&mheap_.lock) mheap_.spanalloc.free(unsafe.Pointer(s.mspan))
unlock(&mheap_.lock)
})
s.mspan = nil s.mspan = nil
} }
...@@ -629,9 +634,11 @@ func (t *Treap) Insert(s Span) { ...@@ -629,9 +634,11 @@ func (t *Treap) Insert(s Span) {
// allocation which requires the mheap_ lock to manipulate. // allocation which requires the mheap_ lock to manipulate.
// Locking here is safe because the treap itself never allocs // Locking here is safe because the treap itself never allocs
// or otherwise ends up grabbing this lock. // or otherwise ends up grabbing this lock.
lock(&mheap_.lock) systemstack(func() {
t.insert(s.mspan) lock(&mheap_.lock)
unlock(&mheap_.lock) t.insert(s.mspan)
unlock(&mheap_.lock)
})
t.CheckInvariants() t.CheckInvariants()
} }
...@@ -644,17 +651,21 @@ func (t *Treap) Erase(i TreapIter) { ...@@ -644,17 +651,21 @@ func (t *Treap) Erase(i TreapIter) {
// freeing which requires the mheap_ lock to manipulate. // freeing which requires the mheap_ lock to manipulate.
// Locking here is safe because the treap itself never allocs // Locking here is safe because the treap itself never allocs
// or otherwise ends up grabbing this lock. // or otherwise ends up grabbing this lock.
lock(&mheap_.lock) systemstack(func() {
t.erase(i.treapIter) lock(&mheap_.lock)
unlock(&mheap_.lock) t.erase(i.treapIter)
unlock(&mheap_.lock)
})
t.CheckInvariants() t.CheckInvariants()
} }
func (t *Treap) RemoveSpan(s Span) { func (t *Treap) RemoveSpan(s Span) {
// See Erase about locking. // See Erase about locking.
lock(&mheap_.lock) systemstack(func() {
t.removeSpan(s.mspan) lock(&mheap_.lock)
unlock(&mheap_.lock) t.removeSpan(s.mspan)
unlock(&mheap_.lock)
})
t.CheckInvariants() t.CheckInvariants()
} }
......
...@@ -83,10 +83,13 @@ type stackfreelist struct { ...@@ -83,10 +83,13 @@ type stackfreelist struct {
var emptymspan mspan var emptymspan mspan
func allocmcache() *mcache { func allocmcache() *mcache {
lock(&mheap_.lock) var c *mcache
c := (*mcache)(mheap_.cachealloc.alloc()) systemstack(func() {
c.flushGen = mheap_.sweepgen lock(&mheap_.lock)
unlock(&mheap_.lock) c = (*mcache)(mheap_.cachealloc.alloc())
c.flushGen = mheap_.sweepgen
unlock(&mheap_.lock)
})
for i := range c.alloc { for i := range c.alloc {
c.alloc[i] = &emptymspan c.alloc[i] = &emptymspan
} }
......
...@@ -216,16 +216,19 @@ func gcenable() { ...@@ -216,16 +216,19 @@ func gcenable() {
//go:linkname setGCPercent runtime/debug.setGCPercent //go:linkname setGCPercent runtime/debug.setGCPercent
func setGCPercent(in int32) (out int32) { func setGCPercent(in int32) (out int32) {
lock(&mheap_.lock) // Run on the system stack since we grab the heap lock.
out = gcpercent systemstack(func() {
if in < 0 { lock(&mheap_.lock)
in = -1 out = gcpercent
} if in < 0 {
gcpercent = in in = -1
heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100 }
// Update pacing in response to gcpercent change. gcpercent = in
gcSetTriggerRatio(memstats.triggerRatio) heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100
unlock(&mheap_.lock) // Update pacing in response to gcpercent change.
gcSetTriggerRatio(memstats.triggerRatio)
unlock(&mheap_.lock)
})
// If we just disabled GC, wait for any concurrent GC mark to // If we just disabled GC, wait for any concurrent GC mark to
// finish so we always return with no GC running. // finish so we always return with no GC running.
...@@ -1261,7 +1264,7 @@ func gcStart(trigger gcTrigger) { ...@@ -1261,7 +1264,7 @@ func gcStart(trigger gcTrigger) {
gcBgMarkStartWorkers() gcBgMarkStartWorkers()
gcResetMarkState() systemstack(gcResetMarkState)
work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
if work.stwprocs > ncpu { if work.stwprocs > ncpu {
...@@ -2078,6 +2081,9 @@ func gcMark(start_time int64) { ...@@ -2078,6 +2081,9 @@ func gcMark(start_time int64) {
} }
} }
// gcSweep must be called on the system stack because it acquires the heap
// lock. See mheap for details.
//go:systemstack
func gcSweep(mode gcMode) { func gcSweep(mode gcMode) {
if gcphase != _GCoff { if gcphase != _GCoff {
throw("gcSweep being done but phase is not GCoff") throw("gcSweep being done but phase is not GCoff")
...@@ -2134,6 +2140,11 @@ func gcSweep(mode gcMode) { ...@@ -2134,6 +2140,11 @@ func gcSweep(mode gcMode) {
// //
// This is safe to do without the world stopped because any Gs created // This is safe to do without the world stopped because any Gs created
// during or after this will start out in the reset state. // during or after this will start out in the reset state.
//
// gcResetMarkState must be called on the system stack because it acquires
// the heap lock. See mheap for details.
//
//go:systemstack
func gcResetMarkState() { func gcResetMarkState() {
// This may be called during a concurrent phase, so make sure // This may be called during a concurrent phase, so make sure
// allgs doesn't change. // allgs doesn't change.
......
...@@ -29,6 +29,8 @@ const minPhysPageSize = 4096 ...@@ -29,6 +29,8 @@ const minPhysPageSize = 4096
// //
//go:notinheap //go:notinheap
type mheap struct { type mheap struct {
// lock must only be acquired on the system stack, otherwise a g
// could self-deadlock if its stack grows with the lock held.
lock mutex lock mutex
free mTreap // free spans free mTreap // free spans
sweepgen uint32 // sweep generation, see comment in mspan sweepgen uint32 // sweep generation, see comment in mspan
...@@ -1095,9 +1097,8 @@ func (h *mheap) alloc(npage uintptr, spanclass spanClass, large bool, needzero b ...@@ -1095,9 +1097,8 @@ func (h *mheap) alloc(npage uintptr, spanclass spanClass, large bool, needzero b
// The memory backing the returned span may not be zeroed if // The memory backing the returned span may not be zeroed if
// span.needzero is set. // span.needzero is set.
// //
// allocManual must be called on the system stack to prevent stack // allocManual must be called on the system stack because it acquires
// growth. Since this is used by the stack allocator, stack growth // the heap lock. See mheap for details.
// during allocManual would self-deadlock.
// //
//go:systemstack //go:systemstack
func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
...@@ -1303,8 +1304,8 @@ func (h *mheap) freeSpan(s *mspan, large bool) { ...@@ -1303,8 +1304,8 @@ func (h *mheap) freeSpan(s *mspan, large bool) {
// This must only be called when gcphase == _GCoff. See mSpanState for // This must only be called when gcphase == _GCoff. See mSpanState for
// an explanation. // an explanation.
// //
// freeManual must be called on the system stack to prevent stack // freeManual must be called on the system stack because it acquires
// growth, just like allocManual. // the heap lock. See mheap for details.
// //
//go:systemstack //go:systemstack
func (h *mheap) freeManual(s *mspan, stat *uint64) { func (h *mheap) freeManual(s *mspan, stat *uint64) {
......
...@@ -470,6 +470,9 @@ func readGCStats(pauses *[]uint64) { ...@@ -470,6 +470,9 @@ func readGCStats(pauses *[]uint64) {
}) })
} }
// readGCStats_m must be called on the system stack because it acquires the heap
// lock. See mheap for details.
//go:systemstack
func readGCStats_m(pauses *[]uint64) { func readGCStats_m(pauses *[]uint64) {
p := *pauses p := *pauses
// Calling code in runtime/debug should make the slice large enough. // Calling code in runtime/debug should make the slice large enough.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment