Commit 44ed88a5 authored by Austin Clements's avatar Austin Clements

runtime: track the number of active sweepone calls

sweepone returns ^uintptr(0) when there are no more spans to *start*
sweeping, but there may be spans being swept concurrently at the time
and there's currently no efficient way to tell when the sweeper is
done sweeping all the spans.

We'll need this for concurrent runtime.GC(), so add a count of the
number of active sweepone calls to make it possible to block until
sweeping is truly done.

This is also useful for more accurately printing the gcpacertrace,
since that should be printed after all of the sweeping stats are in
(currently we can print it slightly too early).

For #18216.

Change-Id: I06e6240c9e7b40aca6fd7b788bb6962107c10a0f
Reviewed-on: https://go-review.googlesource.com/37716
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent 2919132e
......@@ -22,10 +22,6 @@ type sweepdata struct {
nbgsweep uint32
npausesweep uint32
// pacertracegen is the sweepgen at which the last pacer trace
// "sweep finished" message was printed.
pacertracegen uint32
}
// finishsweep_m ensures that all spans are swept.
......@@ -82,16 +78,19 @@ func sweepone() uintptr {
// increment locks to ensure that the goroutine is not preempted
// in the middle of sweep thus leaving the span in an inconsistent state for next GC
_g_.m.locks++
if atomic.Load(&mheap_.sweepdone) != 0 {
_g_.m.locks--
return ^uintptr(0)
}
atomic.Xadd(&mheap_.sweepers, +1)
npages := ^uintptr(0)
sg := mheap_.sweepgen
for {
s := mheap_.sweepSpans[1-sg/2%2].pop()
if s == nil {
mheap_.sweepdone = 1
_g_.m.locks--
if debug.gcpacertrace > 0 && atomic.Cas(&sweep.pacertracegen, sg-2, sg) {
print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", mheap_.spanBytesAlloc>>20, "MB of spans; swept ", mheap_.pagesSwept, " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
}
return ^uintptr(0)
atomic.Store(&mheap_.sweepdone, 1)
break
}
if s.state != mSpanInUse {
// This can happen if direct sweeping already
......@@ -106,16 +105,25 @@ func sweepone() uintptr {
if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) {
continue
}
npages := s.npages
npages = s.npages
if !s.sweep(false) {
// Span is still in-use, so this returned no
// pages to the heap and the span needs to
// move to the swept in-use list.
npages = 0
}
_g_.m.locks--
return npages
break
}
// Decrement the number of active sweepers and if this is the
// last one print trace information.
if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
if debug.gcpacertrace > 0 {
print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", mheap_.spanBytesAlloc>>20, "MB of spans; swept ", mheap_.pagesSwept, " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
}
}
_g_.m.locks--
return npages
}
//go:nowritebarrier
......
......@@ -35,6 +35,7 @@ type mheap struct {
busylarge mSpanList // busy lists of large spans length >= _MaxMHeapList
sweepgen uint32 // sweep generation, see comment in mspan
sweepdone uint32 // all spans are swept
sweepers uint32 // number of active sweepone calls
// allspans is a slice of all mspans ever created. Each mspan
// appears exactly once.
......@@ -71,7 +72,7 @@ type mheap struct {
// on the swept stack.
sweepSpans [2]gcSweepBuf
// _ uint32 // align uint64 fields on 32-bit for atomics
_ uint32 // align uint64 fields on 32-bit for atomics
// Proportional sweep
pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment