Commit dc0f0ab7 authored by Austin Clements's avatar Austin Clements

runtime: don't count manually-managed spans from heap_{inuse,sys}

Currently, manually-managed spans are included in memstats.heap_inuse
and memstats.heap_sys, but when we export these stats to the user, we
subtract out how much has been allocated for stack spans from both.
This works for now because stacks are the only manually-managed spans
we have.

However, we're about to use manually-managed spans for more things
that don't necessarily have obvious stats we can use to adjust the
user-presented numbers. Prepare for this by changing the accounting so
manually-managed spans don't count toward heap_inuse or heap_sys. This
makes these fields align with the fields presented to the user and
means we don't have to track more statistics just so we can adjust
these statistics.

For #19325.

Change-Id: I5cb35527fd65587ff23339276ba2c3969e2ad98f
Reviewed-on: https://go-review.googlesource.com/38577
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent 407c56ae
...@@ -585,7 +585,7 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan { ...@@ -585,7 +585,7 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs) memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
_g_.m.mcache.local_tinyallocs = 0 _g_.m.mcache.local_tinyallocs = 0
s := h.allocSpanLocked(npage) s := h.allocSpanLocked(npage, &memstats.heap_inuse)
if s != nil { if s != nil {
// Record span info, because gc needs to be // Record span info, because gc needs to be
// able to map interior pointer to containing span. // able to map interior pointer to containing span.
...@@ -664,9 +664,12 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) ...@@ -664,9 +664,12 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool)
return s return s
} }
// allocManual allocates a manually-managed span of npage pages and // allocManual allocates a manually-managed span of npage pages.
// adds the bytes used to *stat, which should be a memstats in-use // allocManual returns nil if allocation fails.
// field. allocManual returns nil if allocation fails. //
// allocManual adds the bytes used to *stat, which should be a
// memstats in-use field. Unlike allocations in the GC'd heap, the
// allocation does *not* count toward heap_inuse or heap_sys.
// //
// The memory backing the returned span may not be zeroed if // The memory backing the returned span may not be zeroed if
// span.needzero is set. // span.needzero is set.
...@@ -678,7 +681,7 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) ...@@ -678,7 +681,7 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool)
//go:systemstack //go:systemstack
func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
lock(&h.lock) lock(&h.lock)
s := h.allocSpanLocked(npage) s := h.allocSpanLocked(npage, stat)
if s != nil { if s != nil {
s.state = _MSpanManual s.state = _MSpanManual
s.manualFreeList = 0 s.manualFreeList = 0
...@@ -687,7 +690,8 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { ...@@ -687,7 +690,8 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
s.nelems = 0 s.nelems = 0
s.elemsize = 0 s.elemsize = 0
s.limit = s.base() + s.npages<<_PageShift s.limit = s.base() + s.npages<<_PageShift
*stat += uint64(s.npages << _PageShift) // Manually manged memory doesn't count toward heap_sys.
memstats.heap_sys -= uint64(s.npages << _PageShift)
} }
// This unlock acts as a release barrier. See mheap.alloc_m. // This unlock acts as a release barrier. See mheap.alloc_m.
...@@ -699,7 +703,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { ...@@ -699,7 +703,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
// Allocates a span of the given size. h must be locked. // Allocates a span of the given size. h must be locked.
// The returned span has been removed from the // The returned span has been removed from the
// free list, but its state is still MSpanFree. // free list, but its state is still MSpanFree.
func (h *mheap) allocSpanLocked(npage uintptr) *mspan { func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
var list *mSpanList var list *mSpanList
var s *mspan var s *mspan
...@@ -762,7 +766,7 @@ HaveSpan: ...@@ -762,7 +766,7 @@ HaveSpan:
h.spans[p+n] = s h.spans[p+n] = s
} }
memstats.heap_inuse += uint64(npage << _PageShift) *stat += uint64(npage << _PageShift)
memstats.heap_idle -= uint64(npage << _PageShift) memstats.heap_idle -= uint64(npage << _PageShift)
//println("spanalloc", hex(s.start<<_PageShift)) //println("spanalloc", hex(s.start<<_PageShift))
...@@ -903,7 +907,8 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) { ...@@ -903,7 +907,8 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) {
s.needzero = 1 s.needzero = 1
lock(&h.lock) lock(&h.lock)
*stat -= uint64(s.npages << _PageShift) *stat -= uint64(s.npages << _PageShift)
h.freeSpanLocked(s, true, true, 0) memstats.heap_sys += uint64(s.npages << _PageShift)
h.freeSpanLocked(s, false, true, 0)
unlock(&h.lock) unlock(&h.lock)
} }
...@@ -1096,8 +1101,6 @@ func (h *mheap) scavenge(k int32, now, limit uint64) { ...@@ -1096,8 +1101,6 @@ func (h *mheap) scavenge(k int32, now, limit uint64) {
if sumreleased > 0 { if sumreleased > 0 {
print("scvg", k, ": ", sumreleased>>20, " MB released\n") print("scvg", k, ": ", sumreleased>>20, " MB released\n")
} }
// TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap.
// But we can't call ReadMemStats on g0 holding locks.
print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n") print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
} }
} }
......
...@@ -33,13 +33,12 @@ type mstats struct { ...@@ -33,13 +33,12 @@ type mstats struct {
// Statistics about malloc heap. // Statistics about malloc heap.
// Protected by mheap.lock // Protected by mheap.lock
// //
// In mstats, heap_sys and heap_inuse includes stack memory, // Like MemStats, heap_sys and heap_inuse do not count memory
// while in MemStats stack memory is separated out from the // in manually-managed spans.
// heap stats.
heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above) heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above)
heap_sys uint64 // virtual address space obtained from system heap_sys uint64 // virtual address space obtained from system for GC'd heap
heap_idle uint64 // bytes in idle spans heap_idle uint64 // bytes in idle spans
heap_inuse uint64 // bytes in non-idle spans heap_inuse uint64 // bytes in _MSpanInUse spans
heap_released uint64 // bytes released to the os heap_released uint64 // bytes released to the os
heap_objects uint64 // total number of allocated objects heap_objects uint64 // total number of allocated objects
...@@ -59,7 +58,7 @@ type mstats struct { ...@@ -59,7 +58,7 @@ type mstats struct {
// Statistics about allocation of low-level fixed-size structures. // Statistics about allocation of low-level fixed-size structures.
// Protected by FixAlloc locks. // Protected by FixAlloc locks.
stacks_inuse uint64 // this number is included in heap_inuse above; differs from MemStats.StackInuse stacks_inuse uint64 // bytes in manually-managed stack spans
stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
mspan_inuse uint64 // mspan structures mspan_inuse uint64 // mspan structures
mspan_sys uint64 mspan_sys uint64
...@@ -459,10 +458,9 @@ func readmemstats_m(stats *MemStats) { ...@@ -459,10 +458,9 @@ func readmemstats_m(stats *MemStats) {
// cannot change MemStats because of backward compatibility. // cannot change MemStats because of backward compatibility.
memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats) memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
// Stack numbers are part of the heap numbers, separate those out for user consumption // memstats.stacks_sys is only memory mapped directly for OS stacks.
// Add in heap-allocated stack memory for user consumption.
stats.StackSys += stats.StackInuse stats.StackSys += stats.StackInuse
stats.HeapInuse -= stats.StackInuse
stats.HeapSys -= stats.StackInuse
} }
//go:linkname readGCStats runtime/debug.readGCStats //go:linkname readGCStats runtime/debug.readGCStats
...@@ -512,6 +510,9 @@ func updatememstats() { ...@@ -512,6 +510,9 @@ func updatememstats() {
memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys + memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
// We also count stacks_inuse as sys memory.
memstats.sys += memstats.stacks_inuse
// Calculate memory allocator stats. // Calculate memory allocator stats.
// During program execution we only count number of frees and amount of freed memory. // During program execution we only count number of frees and amount of freed memory.
// Current number of alive object in the heap and amount of alive heap memory // Current number of alive object in the heap and amount of alive heap memory
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment