Commit 40036a99 authored by Michael Anthony Knyszek's avatar Michael Anthony Knyszek Committed by Michael Knyszek

runtime: change the span allocation policy to first-fit

This change modifies the treap implementation to be address-ordered
instead of size-ordered, and further augments it so it may be used for
allocation. It then modifies the find method to implement a first-fit
allocation policy.

This change to the treap implementation consequently makes it so that
spans are scavenged in highest-address-first order without any
additional changes to the scavenging code. Because the treap itself is
now address ordered, and the scavenging code iterates over it in
reverse, the highest address is now chosen instead of the largest span.

This change also renames the now wrongly-named "scavengeLargest" method
on mheap to just "scavengeLocked" and also fixes up logic in that method
which made assumptions about size.

For #30333.

Change-Id: I94b6f3209211cc1bfdc8cdaea04152a232cfbbb4
Reviewed-on: https://go-review.googlesource.com/c/go/+/164101
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarAustin Clements <austin@google.com>
parent e30aa166
......@@ -627,4 +627,5 @@ func (t *Treap) Size() int {
func (t *Treap) CheckInvariants() {
t.mTreap.treap.walkTreap(checkTreapNode)
t.mTreap.treap.validateMaxPages()
}
This diff is collapsed.
......@@ -1121,10 +1121,10 @@ func (h *mheap) pickFreeSpan(npage uintptr) *mspan {
// Note that we want the _smaller_ free span, i.e. the free span
// closer in size to the amount we requested (npage).
var s *mspan
if tf.valid() && (!ts.valid() || tf.span().npages <= ts.span().npages) {
if tf.valid() && (!ts.valid() || tf.span().base() <= ts.span().base()) {
s = tf.span()
h.free.erase(tf)
} else if ts.valid() && (!tf.valid() || tf.span().npages > ts.span().npages) {
} else if ts.valid() && (!tf.valid() || tf.span().base() > ts.span().base()) {
s = ts.span()
h.scav.erase(ts)
}
......@@ -1198,10 +1198,10 @@ HaveSpan:
// grew the RSS. Mitigate this by scavenging enough free
// space to make up for it.
//
// Also, scavengeLargest may cause coalescing, so prevent
// Also, scavenge may cause coalescing, so prevent
// coalescing with s by temporarily changing its state.
s.state = mSpanManual
h.scavengeLargest(s.npages * pageSize)
h.scavengeLocked(s.npages * pageSize)
s.state = mSpanFree
}
s.unusedsince = 0
......@@ -1236,7 +1236,7 @@ func (h *mheap) grow(npage uintptr) bool {
// is proportional to the number of sysUnused() calls rather than
// the number of pages released, so we make fewer of those calls
// with larger spans.
h.scavengeLargest(size)
h.scavengeLocked(size)
// Create a fake "in use" span and free it, so that the
// right coalescing happens.
......@@ -1344,10 +1344,10 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
h.treapForSpan(s).insert(s)
}
// scavengeLargest scavenges nbytes worth of spans in unscav
// starting from the largest span and working down. It then takes those spans
// and places them in scav. h must be locked.
func (h *mheap) scavengeLargest(nbytes uintptr) {
// scavengeLocked scavenges nbytes worth of spans in the free treap by
// starting from the span with the highest base address and working down.
// It then takes those spans and places them in scav. h must be locked.
func (h *mheap) scavengeLocked(nbytes uintptr) {
// Use up scavenge credit if there's any available.
if nbytes > h.scavengeCredit {
nbytes -= h.scavengeCredit
......@@ -1356,23 +1356,16 @@ func (h *mheap) scavengeLargest(nbytes uintptr) {
h.scavengeCredit -= nbytes
return
}
// Iterate over the treap backwards (from largest to smallest) scavenging spans
// until we've reached our quota of nbytes.
// Iterate over the treap backwards (from highest address to lowest address)
// scavenging spans until we've reached our quota of nbytes.
released := uintptr(0)
for t := h.free.end(); released < nbytes && t.valid(); {
s := t.span()
r := s.scavenge()
if r == 0 {
// Since we're going in order of largest-to-smallest span, this
// means all other spans are no bigger than s. There's a high
// chance that the other spans don't even cover a full page,
// (though they could) but iterating further just for a handful
// of pages probably isn't worth it, so just stop here.
//
// This check also preserves the invariant that spans that have
// `scavenged` set are only ever in the `scav` treap, and
// those which have it unset are only in the `free` treap.
break
// This span doesn't cover at least one physical page, so skip it.
t = t.prev()
continue
}
n := t.prev()
h.free.erase(t)
......@@ -1393,7 +1386,7 @@ func (h *mheap) scavengeLargest(nbytes uintptr) {
// scavengeAll visits each node in the unscav treap and scavenges the
// treapNode's span. It then removes the scavenged span from
// unscav and adds it into scav before continuing. h must be locked.
func (h *mheap) scavengeAll(now, limit uint64) uintptr {
func (h *mheap) scavengeAllLocked(now, limit uint64) uintptr {
// Iterate over the treap scavenging spans if unused for at least limit time.
released := uintptr(0)
for t := h.free.start(); t.valid(); {
......@@ -1416,14 +1409,14 @@ func (h *mheap) scavengeAll(now, limit uint64) uintptr {
return released
}
func (h *mheap) scavenge(k int32, now, limit uint64) {
func (h *mheap) scavengeAll(k int32, now, limit uint64) {
// Disallow malloc or panic while holding the heap lock. We do
// this here because this is an non-mallocgc entry-point to
// the mheap API.
gp := getg()
gp.m.mallocing++
lock(&h.lock)
released := h.scavengeAll(now, limit)
released := h.scavengeAllLocked(now, limit)
unlock(&h.lock)
gp.m.mallocing--
......@@ -1438,7 +1431,7 @@ func (h *mheap) scavenge(k int32, now, limit uint64) {
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
func runtime_debug_freeOSMemory() {
GC()
systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) })
systemstack(func() { mheap_.scavengeAll(-1, ^uint64(0), 0) })
}
// Initialize a new span with the given start and npages.
......
......@@ -4369,7 +4369,7 @@ func sysmon() {
}
// scavenge heap once in a while
if lastscavenge+scavengelimit/2 < now {
mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
mheap_.scavengeAll(int32(nscavenge), uint64(now), uint64(scavengelimit))
lastscavenge = now
nscavenge++
}
......
......@@ -58,20 +58,18 @@ func TestTreap(t *testing.T) {
}
tr.RemoveSpan(spans[0])
})
t.Run("FindBestFit", func(t *testing.T) {
t.Run("FindFirstFit", func(t *testing.T) {
// Run this 10 times, recreating the treap each time.
// Because of the non-deterministic structure of a treap,
// we'll be able to test different structures this way.
for i := 0; i < 10; i++ {
tr := treap{}
tr := runtime.Treap{}
for _, s := range spans {
tr.Insert(s)
}
i := tr.Find(5)
if i.Span().Pages() != 5 {
t.Fatalf("expected span of size 5, got span of size %d", i.Span().Pages())
} else if i.Span().Base() != 0xc0040000 {
t.Fatalf("expected span to have the lowest base address, instead got base %x", i.Span().Base())
if i.Span().Base() != 0xc0010000 {
t.Fatalf("expected span at lowest address which could fit 5 pages, instead found span at %x", i.Span().Base())
}
for _, s := range spans {
tr.RemoveSpan(s)
......@@ -88,13 +86,13 @@ func TestTreap(t *testing.T) {
tr.Insert(s)
}
nspans := 0
lastSize := uintptr(0)
lastBase := uintptr(0)
for i := tr.Start(); i.Valid(); i = i.Next() {
nspans++
if lastSize > i.Span().Pages() {
t.Fatalf("not iterating in correct order: encountered size %d before %d", lastSize, i.Span().Pages())
if lastBase > i.Span().Base() {
t.Fatalf("not iterating in correct order: encountered base %x before %x", lastBase, i.Span().Base())
}
lastSize = i.Span().Pages()
lastBase = i.Span().Base()
}
if nspans != len(spans) {
t.Fatal("failed to iterate forwards over full treap")
......@@ -112,13 +110,13 @@ func TestTreap(t *testing.T) {
tr.Insert(s)
}
nspans := 0
lastSize := ^uintptr(0)
lastBase := ^uintptr(0)
for i := tr.End(); i.Valid(); i = i.Prev() {
nspans++
if lastSize < i.Span().Pages() {
t.Fatalf("not iterating in correct order: encountered size %d before %d", lastSize, i.Span().Pages())
if lastBase < i.Span().Base() {
t.Fatalf("not iterating in correct order: encountered base %x before %x", lastBase, i.Span().Base())
}
lastSize = i.Span().Pages()
lastBase = i.Span().Base()
}
if nspans != len(spans) {
t.Fatal("failed to iterate backwards over full treap")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment