Commit 40036a99 authored by Michael Anthony Knyszek's avatar Michael Anthony Knyszek Committed by Michael Knyszek

runtime: change the span allocation policy to first-fit

This change modifies the treap implementation to be address-ordered
instead of size-ordered, and further augments it so it may be used for
allocation. It then modifies the find method to implement a first-fit
allocation policy.

This change to the treap implementation consequently makes it so that
spans are scavenged in highest-address-first order without any
additional changes to the scavenging code. Because the treap itself is
now address ordered, and the scavenging code iterates over it in
reverse, the highest address is now chosen instead of the largest span.

This change also renames the now wrongly-named "scavengeLargest" method
on mheap to just "scavengeLocked" and also fixes up logic in that method
which made assumptions about size.

For #30333.

Change-Id: I94b6f3209211cc1bfdc8cdaea04152a232cfbbb4
Reviewed-on: https://go-review.googlesource.com/c/go/+/164101
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarAustin Clements <austin@google.com>
parent e30aa166
...@@ -627,4 +627,5 @@ func (t *Treap) Size() int { ...@@ -627,4 +627,5 @@ func (t *Treap) Size() int {
func (t *Treap) CheckInvariants() { func (t *Treap) CheckInvariants() {
t.mTreap.treap.walkTreap(checkTreapNode) t.mTreap.treap.walkTreap(checkTreapNode)
t.mTreap.treap.validateMaxPages()
} }
This diff is collapsed.
...@@ -1121,10 +1121,10 @@ func (h *mheap) pickFreeSpan(npage uintptr) *mspan { ...@@ -1121,10 +1121,10 @@ func (h *mheap) pickFreeSpan(npage uintptr) *mspan {
// Note that we want the _smaller_ free span, i.e. the free span // Note that we want the _smaller_ free span, i.e. the free span
// closer in size to the amount we requested (npage). // closer in size to the amount we requested (npage).
var s *mspan var s *mspan
if tf.valid() && (!ts.valid() || tf.span().npages <= ts.span().npages) { if tf.valid() && (!ts.valid() || tf.span().base() <= ts.span().base()) {
s = tf.span() s = tf.span()
h.free.erase(tf) h.free.erase(tf)
} else if ts.valid() && (!tf.valid() || tf.span().npages > ts.span().npages) { } else if ts.valid() && (!tf.valid() || tf.span().base() > ts.span().base()) {
s = ts.span() s = ts.span()
h.scav.erase(ts) h.scav.erase(ts)
} }
...@@ -1198,10 +1198,10 @@ HaveSpan: ...@@ -1198,10 +1198,10 @@ HaveSpan:
// grew the RSS. Mitigate this by scavenging enough free // grew the RSS. Mitigate this by scavenging enough free
// space to make up for it. // space to make up for it.
// //
// Also, scavengeLargest may cause coalescing, so prevent // Also, scavenge may cause coalescing, so prevent
// coalescing with s by temporarily changing its state. // coalescing with s by temporarily changing its state.
s.state = mSpanManual s.state = mSpanManual
h.scavengeLargest(s.npages * pageSize) h.scavengeLocked(s.npages * pageSize)
s.state = mSpanFree s.state = mSpanFree
} }
s.unusedsince = 0 s.unusedsince = 0
...@@ -1236,7 +1236,7 @@ func (h *mheap) grow(npage uintptr) bool { ...@@ -1236,7 +1236,7 @@ func (h *mheap) grow(npage uintptr) bool {
// is proportional to the number of sysUnused() calls rather than // is proportional to the number of sysUnused() calls rather than
// the number of pages released, so we make fewer of those calls // the number of pages released, so we make fewer of those calls
// with larger spans. // with larger spans.
h.scavengeLargest(size) h.scavengeLocked(size)
// Create a fake "in use" span and free it, so that the // Create a fake "in use" span and free it, so that the
// right coalescing happens. // right coalescing happens.
...@@ -1344,10 +1344,10 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i ...@@ -1344,10 +1344,10 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
h.treapForSpan(s).insert(s) h.treapForSpan(s).insert(s)
} }
// scavengeLargest scavenges nbytes worth of spans in unscav // scavengeLocked scavenges nbytes worth of spans in the free treap by
// starting from the largest span and working down. It then takes those spans // starting from the span with the highest base address and working down.
// and places them in scav. h must be locked. // It then takes those spans and places them in scav. h must be locked.
func (h *mheap) scavengeLargest(nbytes uintptr) { func (h *mheap) scavengeLocked(nbytes uintptr) {
// Use up scavenge credit if there's any available. // Use up scavenge credit if there's any available.
if nbytes > h.scavengeCredit { if nbytes > h.scavengeCredit {
nbytes -= h.scavengeCredit nbytes -= h.scavengeCredit
...@@ -1356,23 +1356,16 @@ func (h *mheap) scavengeLargest(nbytes uintptr) { ...@@ -1356,23 +1356,16 @@ func (h *mheap) scavengeLargest(nbytes uintptr) {
h.scavengeCredit -= nbytes h.scavengeCredit -= nbytes
return return
} }
// Iterate over the treap backwards (from largest to smallest) scavenging spans // Iterate over the treap backwards (from highest address to lowest address)
// until we've reached our quota of nbytes. // scavenging spans until we've reached our quota of nbytes.
released := uintptr(0) released := uintptr(0)
for t := h.free.end(); released < nbytes && t.valid(); { for t := h.free.end(); released < nbytes && t.valid(); {
s := t.span() s := t.span()
r := s.scavenge() r := s.scavenge()
if r == 0 { if r == 0 {
// Since we're going in order of largest-to-smallest span, this // This span doesn't cover at least one physical page, so skip it.
// means all other spans are no bigger than s. There's a high t = t.prev()
// chance that the other spans don't even cover a full page, continue
// (though they could) but iterating further just for a handful
// of pages probably isn't worth it, so just stop here.
//
// This check also preserves the invariant that spans that have
// `scavenged` set are only ever in the `scav` treap, and
// those which have it unset are only in the `free` treap.
break
} }
n := t.prev() n := t.prev()
h.free.erase(t) h.free.erase(t)
...@@ -1393,7 +1386,7 @@ func (h *mheap) scavengeLargest(nbytes uintptr) { ...@@ -1393,7 +1386,7 @@ func (h *mheap) scavengeLargest(nbytes uintptr) {
// scavengeAll visits each node in the unscav treap and scavenges the // scavengeAll visits each node in the unscav treap and scavenges the
// treapNode's span. It then removes the scavenged span from // treapNode's span. It then removes the scavenged span from
// unscav and adds it into scav before continuing. h must be locked. // unscav and adds it into scav before continuing. h must be locked.
func (h *mheap) scavengeAll(now, limit uint64) uintptr { func (h *mheap) scavengeAllLocked(now, limit uint64) uintptr {
// Iterate over the treap scavenging spans if unused for at least limit time. // Iterate over the treap scavenging spans if unused for at least limit time.
released := uintptr(0) released := uintptr(0)
for t := h.free.start(); t.valid(); { for t := h.free.start(); t.valid(); {
...@@ -1416,14 +1409,14 @@ func (h *mheap) scavengeAll(now, limit uint64) uintptr { ...@@ -1416,14 +1409,14 @@ func (h *mheap) scavengeAll(now, limit uint64) uintptr {
return released return released
} }
func (h *mheap) scavenge(k int32, now, limit uint64) { func (h *mheap) scavengeAll(k int32, now, limit uint64) {
// Disallow malloc or panic while holding the heap lock. We do // Disallow malloc or panic while holding the heap lock. We do
// this here because this is an non-mallocgc entry-point to // this here because this is an non-mallocgc entry-point to
// the mheap API. // the mheap API.
gp := getg() gp := getg()
gp.m.mallocing++ gp.m.mallocing++
lock(&h.lock) lock(&h.lock)
released := h.scavengeAll(now, limit) released := h.scavengeAllLocked(now, limit)
unlock(&h.lock) unlock(&h.lock)
gp.m.mallocing-- gp.m.mallocing--
...@@ -1438,7 +1431,7 @@ func (h *mheap) scavenge(k int32, now, limit uint64) { ...@@ -1438,7 +1431,7 @@ func (h *mheap) scavenge(k int32, now, limit uint64) {
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
func runtime_debug_freeOSMemory() { func runtime_debug_freeOSMemory() {
GC() GC()
systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) }) systemstack(func() { mheap_.scavengeAll(-1, ^uint64(0), 0) })
} }
// Initialize a new span with the given start and npages. // Initialize a new span with the given start and npages.
......
...@@ -4369,7 +4369,7 @@ func sysmon() { ...@@ -4369,7 +4369,7 @@ func sysmon() {
} }
// scavenge heap once in a while // scavenge heap once in a while
if lastscavenge+scavengelimit/2 < now { if lastscavenge+scavengelimit/2 < now {
mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) mheap_.scavengeAll(int32(nscavenge), uint64(now), uint64(scavengelimit))
lastscavenge = now lastscavenge = now
nscavenge++ nscavenge++
} }
......
...@@ -58,20 +58,18 @@ func TestTreap(t *testing.T) { ...@@ -58,20 +58,18 @@ func TestTreap(t *testing.T) {
} }
tr.RemoveSpan(spans[0]) tr.RemoveSpan(spans[0])
}) })
t.Run("FindBestFit", func(t *testing.T) { t.Run("FindFirstFit", func(t *testing.T) {
// Run this 10 times, recreating the treap each time. // Run this 10 times, recreating the treap each time.
// Because of the non-deterministic structure of a treap, // Because of the non-deterministic structure of a treap,
// we'll be able to test different structures this way. // we'll be able to test different structures this way.
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
tr := treap{} tr := runtime.Treap{}
for _, s := range spans { for _, s := range spans {
tr.Insert(s) tr.Insert(s)
} }
i := tr.Find(5) i := tr.Find(5)
if i.Span().Pages() != 5 { if i.Span().Base() != 0xc0010000 {
t.Fatalf("expected span of size 5, got span of size %d", i.Span().Pages()) t.Fatalf("expected span at lowest address which could fit 5 pages, instead found span at %x", i.Span().Base())
} else if i.Span().Base() != 0xc0040000 {
t.Fatalf("expected span to have the lowest base address, instead got base %x", i.Span().Base())
} }
for _, s := range spans { for _, s := range spans {
tr.RemoveSpan(s) tr.RemoveSpan(s)
...@@ -88,13 +86,13 @@ func TestTreap(t *testing.T) { ...@@ -88,13 +86,13 @@ func TestTreap(t *testing.T) {
tr.Insert(s) tr.Insert(s)
} }
nspans := 0 nspans := 0
lastSize := uintptr(0) lastBase := uintptr(0)
for i := tr.Start(); i.Valid(); i = i.Next() { for i := tr.Start(); i.Valid(); i = i.Next() {
nspans++ nspans++
if lastSize > i.Span().Pages() { if lastBase > i.Span().Base() {
t.Fatalf("not iterating in correct order: encountered size %d before %d", lastSize, i.Span().Pages()) t.Fatalf("not iterating in correct order: encountered base %x before %x", lastBase, i.Span().Base())
} }
lastSize = i.Span().Pages() lastBase = i.Span().Base()
} }
if nspans != len(spans) { if nspans != len(spans) {
t.Fatal("failed to iterate forwards over full treap") t.Fatal("failed to iterate forwards over full treap")
...@@ -112,13 +110,13 @@ func TestTreap(t *testing.T) { ...@@ -112,13 +110,13 @@ func TestTreap(t *testing.T) {
tr.Insert(s) tr.Insert(s)
} }
nspans := 0 nspans := 0
lastSize := ^uintptr(0) lastBase := ^uintptr(0)
for i := tr.End(); i.Valid(); i = i.Prev() { for i := tr.End(); i.Valid(); i = i.Prev() {
nspans++ nspans++
if lastSize < i.Span().Pages() { if lastBase < i.Span().Base() {
t.Fatalf("not iterating in correct order: encountered size %d before %d", lastSize, i.Span().Pages()) t.Fatalf("not iterating in correct order: encountered base %x before %x", lastBase, i.Span().Base())
} }
lastSize = i.Span().Pages() lastBase = i.Span().Base()
} }
if nspans != len(spans) { if nspans != len(spans) {
t.Fatal("failed to iterate backwards over full treap") t.Fatal("failed to iterate backwards over full treap")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment