Commit 0de5324d authored by Austin Clements's avatar Austin Clements

runtime: abstract remaining mheap.spans access

This abstracts the remaining direct accesses to mheap.spans into new
mheap.setSpan and mheap.setSpans methods.

For #10460.

Change-Id: Id1db8bc5e34a77a9221032aa2e62d05322707364
Reviewed-on: https://go-review.googlesource.com/85884
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent c0392d2e
...@@ -806,6 +806,20 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { ...@@ -806,6 +806,20 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
return s return s
} }
// setSpan modifies the span map so spanOf(base) is s.
func (h *mheap) setSpan(base uintptr, s *mspan) {
h.spans[(base-h.arena_start)>>_PageShift] = s
}
// setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
// is s.
func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
p := (base - h.arena_start) >> _PageShift
for n := uintptr(0); n < npage; n++ {
h.spans[p+n] = s
}
}
// Allocates a span of the given size. h must be locked. // Allocates a span of the given size. h must be locked.
// The returned span has been removed from the // The returned span has been removed from the
// free list, but its state is still MSpanFree. // free list, but its state is still MSpanFree.
...@@ -853,12 +867,9 @@ HaveSpan: ...@@ -853,12 +867,9 @@ HaveSpan:
t := (*mspan)(h.spanalloc.alloc()) t := (*mspan)(h.spanalloc.alloc())
t.init(s.base()+npage<<_PageShift, s.npages-npage) t.init(s.base()+npage<<_PageShift, s.npages-npage)
s.npages = npage s.npages = npage
p := (t.base() - h.arena_start) >> _PageShift h.setSpan(t.base()-1, s)
if p > 0 { h.setSpan(t.base(), t)
h.spans[p-1] = s h.setSpan(t.base()+t.npages*pageSize-1, t)
}
h.spans[p] = t
h.spans[p+t.npages-1] = t
t.needzero = s.needzero t.needzero = s.needzero
s.state = _MSpanManual // prevent coalescing with s s.state = _MSpanManual // prevent coalescing with s
t.state = _MSpanManual t.state = _MSpanManual
...@@ -867,10 +878,7 @@ HaveSpan: ...@@ -867,10 +878,7 @@ HaveSpan:
} }
s.unusedsince = 0 s.unusedsince = 0
p := (s.base() - h.arena_start) >> _PageShift h.setSpans(s.base(), npage, s)
for n := uintptr(0); n < npage; n++ {
h.spans[p+n] = s
}
*stat += uint64(npage << _PageShift) *stat += uint64(npage << _PageShift)
memstats.heap_idle -= uint64(npage << _PageShift) memstats.heap_idle -= uint64(npage << _PageShift)
...@@ -928,10 +936,7 @@ func (h *mheap) grow(npage uintptr) bool { ...@@ -928,10 +936,7 @@ func (h *mheap) grow(npage uintptr) bool {
// right coalescing happens. // right coalescing happens.
s := (*mspan)(h.spanalloc.alloc()) s := (*mspan)(h.spanalloc.alloc())
s.init(uintptr(v), ask>>_PageShift) s.init(uintptr(v), ask>>_PageShift)
p := (s.base() - h.arena_start) >> _PageShift h.setSpans(s.base(), s.npages, s)
for i := p; i < p+s.npages; i++ {
h.spans[i] = s
}
atomic.Store(&s.sweepgen, h.sweepgen) atomic.Store(&s.sweepgen, h.sweepgen)
s.state = _MSpanInUse s.state = _MSpanInUse
h.pagesInUse += uint64(s.npages) h.pagesInUse += uint64(s.npages)
...@@ -1023,46 +1028,38 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i ...@@ -1023,46 +1028,38 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
s.npreleased = 0 s.npreleased = 0
// Coalesce with earlier, later spans. // Coalesce with earlier, later spans.
p := (s.base() - h.arena_start) >> _PageShift if before := spanOf(s.base() - 1); before != nil && before.state == _MSpanFree {
if p > 0 { // Now adjust s.
before := h.spans[p-1] s.startAddr = before.startAddr
if before != nil && before.state == _MSpanFree { s.npages += before.npages
// Now adjust s. s.npreleased = before.npreleased // absorb released pages
s.startAddr = before.startAddr s.needzero |= before.needzero
s.npages += before.npages h.setSpan(before.base(), s)
s.npreleased = before.npreleased // absorb released pages // The size is potentially changing so the treap needs to delete adjacent nodes and
s.needzero |= before.needzero // insert back as a combined node.
p -= before.npages if h.isLargeSpan(before.npages) {
h.spans[p] = s // We have a t, it is large so it has to be in the treap so we can remove it.
// The size is potentially changing so the treap needs to delete adjacent nodes and h.freelarge.removeSpan(before)
// insert back as a combined node. } else {
if h.isLargeSpan(before.npages) { h.freeList(before.npages).remove(before)
// We have a t, it is large so it has to be in the treap so we can remove it.
h.freelarge.removeSpan(before)
} else {
h.freeList(before.npages).remove(before)
}
before.state = _MSpanDead
h.spanalloc.free(unsafe.Pointer(before))
} }
before.state = _MSpanDead
h.spanalloc.free(unsafe.Pointer(before))
} }
// Now check to see if next (greater addresses) span is free and can be coalesced. // Now check to see if next (greater addresses) span is free and can be coalesced.
if (p + s.npages) < uintptr(len(h.spans)) { if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == _MSpanFree {
after := h.spans[p+s.npages] s.npages += after.npages
if after != nil && after.state == _MSpanFree { s.npreleased += after.npreleased
s.npages += after.npages s.needzero |= after.needzero
s.npreleased += after.npreleased h.setSpan(s.base()+s.npages*pageSize-1, s)
s.needzero |= after.needzero if h.isLargeSpan(after.npages) {
h.spans[p+s.npages-1] = s h.freelarge.removeSpan(after)
if h.isLargeSpan(after.npages) { } else {
h.freelarge.removeSpan(after) h.freeList(after.npages).remove(after)
} else {
h.freeList(after.npages).remove(after)
}
after.state = _MSpanDead
h.spanalloc.free(unsafe.Pointer(after))
} }
after.state = _MSpanDead
h.spanalloc.free(unsafe.Pointer(after))
} }
// Insert s into appropriate list or treap. // Insert s into appropriate list or treap.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment