Commit eb96f8a5 authored by Michael Anthony Knyszek's avatar Michael Anthony Knyszek Committed by Michael Knyszek

runtime: scavenge on growth instead of inline with allocation

Inline scavenging causes significant performance regressions in tail
latency for k8s and has relatively little benefit for RSS footprint.

We disabled inline scavenging in Go 1.12.5 (CL 174102) as well, but
we thought other changes in Go 1.13 had mitigated the issues with
inline scavenging. Apparently we were wrong.

This CL switches back to only doing foreground scavenging on heap
growth, rather than doing it when allocation tries to allocate from
scavenged space.

Fixes #32828.

Change-Id: I1f5df44046091f0b4f89fec73c2cde98bf9448cb
Reviewed-on: https://go-review.googlesource.com/c/go/+/183857
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarKeith Randall <khr@golang.org>
Reviewed-by: default avatarMichael Knyszek <mknyszek@google.com>
parent f18109d7
...@@ -1226,16 +1226,6 @@ HaveSpan: ...@@ -1226,16 +1226,6 @@ HaveSpan:
// heap_released since we already did so earlier. // heap_released since we already did so earlier.
sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift) sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
s.scavenged = false s.scavenged = false
// Since we allocated out of a scavenged span, we just
// grew the RSS. Mitigate this by scavenging enough free
// space to make up for it but only if we need to.
//
// scavengeLocked may cause coalescing, so prevent
// coalescing with s by temporarily changing its state.
s.state = mSpanManual
h.scavengeIfNeededLocked(s.npages * pageSize)
s.state = mSpanFree
} }
h.setSpans(s.base(), npage, s) h.setSpans(s.base(), npage, s)
...@@ -1311,6 +1301,10 @@ func (h *mheap) grow(npage uintptr) bool { ...@@ -1311,6 +1301,10 @@ func (h *mheap) grow(npage uintptr) bool {
// //
// h must be locked. // h must be locked.
func (h *mheap) growAddSpan(v unsafe.Pointer, size uintptr) { func (h *mheap) growAddSpan(v unsafe.Pointer, size uintptr) {
// Scavenge some pages to make up for the virtual memory space
// we just allocated, but only if we need to.
h.scavengeIfNeededLocked(size)
s := (*mspan)(h.spanalloc.alloc()) s := (*mspan)(h.spanalloc.alloc())
s.init(uintptr(v), size/pageSize) s.init(uintptr(v), size/pageSize)
h.setSpans(s.base(), s.npages, s) h.setSpans(s.base(), s.npages, s)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment