Commit 5c15ed64 authored by Michael Anthony Knyszek's avatar Michael Anthony Knyszek Committed by Michael Knyszek

runtime: split spans during allocation without treap removal

Now that the treap is first-fit, we can make a nice optimization.
Mainly, since we know that span splitting doesn't modify the relative
position of a span in a treap, we can actually modify a span in-place
on the treap. The only caveat is that we need to update the relevant
metadata.

To enable this optimization, this change introduces a mutate method on
the iterator which takes a callback that is passed the iterator's span.
The method records some properties of the span before it calls into the
callback and then uses those records to see what changed and update
treap metadata appropriately.

Change-Id: I74f7d2ee172800828434ba0194d3d78d3942acf2
Reviewed-on: https://go-review.googlesource.com/c/go/+/174879
Run-TryBot: Michael Knyszek <mknyszek@google.com>
Reviewed-by: default avatarAustin Clements <austin@google.com>
parent f4a5ae55
......@@ -377,6 +377,40 @@ func (root *mTreap) end(mask, match treapIterType) treapIter {
return treapIter{f, root.treap.findMaximal(f)}
}
// mutate allows one to mutate the span without removing it from the treap via a
// callback. The span's base and size are allowed to change as long as the span
// remains in the same order relative to its predecessor and successor.
//
// Note however that any operation that causes a treap rebalancing inside of fn
// is strictly forbidden, as that may cause treap node metadata to go
// out-of-sync.
func (root *mTreap) mutate(i treapIter, fn func(span *mspan)) {
s := i.span()
// Save some state about the span for later inspection.
hpages := s.hugePages()
scavenged := s.scavenged
// Call the mutator.
fn(s)
// Update unscavHugePages appropriately.
if !scavenged {
mheap_.free.unscavHugePages -= hpages
}
if !s.scavenged {
mheap_.free.unscavHugePages += s.hugePages()
}
// Update the key in case the base changed.
i.t.key = s.base()
// Updating invariants up the tree needs to happen if
// anything changed at all, so just go ahead and do it
// unconditionally.
//
// If it turns out nothing changed, it'll exit quickly.
t := i.t
for t != nil && t.updateInvariants() {
t = t.parent
}
}
// insert adds span to the large span treap.
func (root *mTreap) insert(span *mspan) {
if !span.scavenged {
......
......@@ -1138,40 +1138,46 @@ func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
HaveSpan:
s := t.span()
h.free.erase(t)
// Mark span in use.
if s.state != mSpanFree {
throw("candidate mspan for allocation is not free")
}
if s.npages < npage {
throw("candidate mspan for allocation is too small")
}
// First, subtract any memory that was released back to
// the OS from s. We will re-scavenge the trimmed section
// if necessary.
// the OS from s. We will add back what's left if necessary.
memstats.heap_released -= uint64(s.released())
if s.npages > npage {
// Trim extra and put it back in the heap.
t := (*mspan)(h.spanalloc.alloc())
t.init(s.base()+npage<<_PageShift, s.npages-npage)
s.npages = npage
h.setSpan(t.base()-1, s)
h.setSpan(t.base(), t)
h.setSpan(t.base()+t.npages*pageSize-1, t)
t.needzero = s.needzero
// If s was scavenged, then t may be scavenged.
start, end := t.physPageBounds()
if s.scavenged && start < end {
if s.npages == npage {
h.free.erase(t)
} else if s.npages > npage {
// Trim off the lower bits and make that our new span.
// Do this in-place since this operation does not
// affect the original span's location in the treap.
n := (*mspan)(h.spanalloc.alloc())
h.free.mutate(t, func(s *mspan) {
n.init(s.base(), npage)
s.npages -= npage
s.startAddr = s.base() + npage*pageSize
h.setSpan(s.base()-1, n)
h.setSpan(s.base(), s)
h.setSpan(n.base(), n)
n.needzero = s.needzero
// n may not be big enough to actually be scavenged, but that's fine.
// We still want it to appear to be scavenged so that we can do the
// right bookkeeping later on in this function (i.e. sysUsed).
n.scavenged = s.scavenged
// Check if s is still scavenged.
if s.scavenged {
start, end := s.physPageBounds()
if start < end {
memstats.heap_released += uint64(end - start)
t.scavenged = true
} else {
s.scavenged = false
}
s.state = mSpanManual // prevent coalescing with s
t.state = mSpanManual
h.freeSpanLocked(t, false, false, s.unusedsince)
s.state = mSpanFree
}
})
s = n
} else {
throw("candidate mspan for allocation is too small")
}
// "Unscavenge" s only AFTER splitting so that
// we only sysUsed whatever we actually need.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment