Commit 7b33b627 authored by Michael Anthony Knyszek's avatar Michael Anthony Knyszek Committed by Michael Knyszek

runtime: introduce treapForSpan to reduce code duplication

Currently which treap a span should be inserted into/removed from is
checked by looking at the span's properties. This logic is repeated in
four places. As this logic gets more complex, it makes sense to
de-duplicate this, so introduce treapForSpan instead which captures this
logic by returning the appropriate treap for the span.

For #30333.

Change-Id: I4bd933d93dc50c5fc7c7c7f56ceb95194dcbfbcc
Reviewed-on: https://go-review.googlesource.com/c/go/+/170857
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarAustin Clements <austin@google.com>
parent d13a9312
...@@ -464,11 +464,7 @@ func (h *mheap) coalesce(s *mspan) { ...@@ -464,11 +464,7 @@ func (h *mheap) coalesce(s *mspan) {
// The size is potentially changing so the treap needs to delete adjacent nodes and // The size is potentially changing so the treap needs to delete adjacent nodes and
// insert back as a combined node. // insert back as a combined node.
if other.scavenged { h.treapForSpan(other).removeSpan(other)
h.scav.removeSpan(other)
} else {
h.free.removeSpan(other)
}
other.state = mSpanDead other.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(other)) h.spanalloc.free(unsafe.Pointer(other))
} }
...@@ -486,11 +482,8 @@ func (h *mheap) coalesce(s *mspan) { ...@@ -486,11 +482,8 @@ func (h *mheap) coalesce(s *mspan) {
return return
} }
// Since we're resizing other, we must remove it from the treap. // Since we're resizing other, we must remove it from the treap.
if other.scavenged { h.treapForSpan(other).removeSpan(other)
h.scav.removeSpan(other)
} else {
h.free.removeSpan(other)
}
// Round boundary to the nearest physical page size, toward the // Round boundary to the nearest physical page size, toward the
// scavenged span. // scavenged span.
boundary := b.startAddr boundary := b.startAddr
...@@ -507,11 +500,7 @@ func (h *mheap) coalesce(s *mspan) { ...@@ -507,11 +500,7 @@ func (h *mheap) coalesce(s *mspan) {
h.setSpan(boundary, b) h.setSpan(boundary, b)
// Re-insert other now that it has a new size. // Re-insert other now that it has a new size.
if other.scavenged { h.treapForSpan(other).insert(other)
h.scav.insert(other)
} else {
h.free.insert(other)
}
} }
// Coalesce with earlier, later spans. // Coalesce with earlier, later spans.
...@@ -1112,6 +1101,15 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) { ...@@ -1112,6 +1101,15 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
} }
} }
// treapForSpan returns the appropriate treap for a span for
// insertion and removal.
func (h *mheap) treapForSpan(span *mspan) *mTreap {
if span.scavenged {
return &h.scav
}
return &h.free
}
// pickFreeSpan acquires a free span from internal free list // pickFreeSpan acquires a free span from internal free list
// structures if one is available. Otherwise returns nil. // structures if one is available. Otherwise returns nil.
// h must be locked. // h must be locked.
...@@ -1343,11 +1341,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i ...@@ -1343,11 +1341,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
h.coalesce(s) h.coalesce(s)
// Insert s into the appropriate treap. // Insert s into the appropriate treap.
if s.scavenged { h.treapForSpan(s).insert(s)
h.scav.insert(s)
} else {
h.free.insert(s)
}
} }
// scavengeLargest scavenges nbytes worth of spans in unscav // scavengeLargest scavenges nbytes worth of spans in unscav
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment