Commit 3be3cbd5 authored by Austin Clements's avatar Austin Clements

runtime: track "scannable" bytes of heap

This tracks the number of scannable bytes in the allocated heap. That
is, bytes that the garbage collector must scan before reaching the
last pointer field in each object.

This will be used to compute a more robust estimate of the GC scan
work.

Change-Id: I1eecd45ef9cdd65b69d2afb5db5da885c80086bb
Reviewed-on: https://go-review.googlesource.com/9695Reviewed-by: default avatarRuss Cox <rsc@golang.org>
parent 53c53984
......@@ -647,6 +647,16 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
dataSize = unsafe.Sizeof(_defer{})
}
heapBitsSetType(uintptr(x), size, dataSize, typ)
if dataSize > typ.size {
// Array allocation. If there are any
// pointers, GC has to scan to the last
// element.
if typ.ptrdata != 0 {
c.local_scan += dataSize - typ.size + typ.ptrdata
}
} else {
c.local_scan += typ.ptrdata
}
}
// GCmarkterminate allocates black
......
......@@ -13,6 +13,7 @@ type mcache struct {
// so they are grouped here for better caching.
next_sample int32 // trigger heap sample after allocating this many bytes
local_cachealloc uintptr // bytes allocated from cache since last lock of heap
local_scan uintptr // bytes of scannable heap allocated
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.go.
tiny unsafe.Pointer
......
......@@ -1279,6 +1279,7 @@ func gcMark(start_time int64) {
// Update other GC heap size stats.
memstats.heap_live = work.bytesMarked
memstats.heap_marked = work.bytesMarked
memstats.heap_scan = uint64(gcController.scanWork)
if trace.enabled {
traceHeapAlloc()
......
......@@ -530,6 +530,10 @@ func gcDrainN(gcw *gcWork, scanWork int64) {
// scanblock scans b as scanobject would, but using an explicit
// pointer bitmap instead of the heap bitmap.
//
// This is used to scan non-heap roots, so it does not update
// gcw.bytesMarked or gcw.scanWork.
//
//go:nowritebarrier
func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
// Use local copies of original parameters, so that a stack trace
......@@ -565,8 +569,6 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
i += ptrSize
}
}
gcw.scanWork += int64(n)
}
// scanobject scans the object starting at b, adding pointers to gcw.
......
......@@ -398,6 +398,8 @@ func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan
// transfer stats from cache to global
memstats.heap_live += uint64(_g_.m.mcache.local_cachealloc)
_g_.m.mcache.local_cachealloc = 0
memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
_g_.m.mcache.local_scan = 0
memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
_g_.m.mcache.local_tinyallocs = 0
......@@ -656,6 +658,8 @@ func mHeap_Free(h *mheap, s *mspan, acct int32) {
lock(&h.lock)
memstats.heap_live += uint64(mp.mcache.local_cachealloc)
mp.mcache.local_cachealloc = 0
memstats.heap_scan += uint64(mp.mcache.local_scan)
mp.mcache.local_scan = 0
memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
mp.mcache.local_tinyallocs = 0
if acct != 0 {
......
......@@ -69,6 +69,11 @@ type mstats struct {
// excludes unmarked objects that have not yet been swept.
heap_live uint64
// heap_scan is the number of bytes of "scannable" heap. This
// is the live heap (as counted by heap_live), but omitting
// no-scan objects and no-scan tails of objects.
heap_scan uint64
// heap_marked is the number of bytes marked by the previous
// GC. After mark termination, heap_live == heap_marked, but
// unlike heap_live, heap_marked does not change until the
......@@ -340,6 +345,8 @@ func purgecachedstats(c *mcache) {
if trace.enabled {
traceHeapAlloc()
}
memstats.heap_scan += uint64(c.local_scan)
c.local_scan = 0
memstats.tinyallocs += uint64(c.local_tinyallocs)
c.local_tinyallocs = 0
memstats.nlookup += uint64(c.local_nlookup)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment