Commit a90f9a00 authored by Austin Clements's avatar Austin Clements

runtime: consolidate mheap.lookup* and spanOf*

I think we'd forgotten about the mheap.lookup APIs when we introduced
spanOf*, but, at any rate, the spanOf* functions are used far more
widely at this point, so this CL eliminates the mheap.lookup*
functions in favor of spanOf*.

Change-Id: I15facd0856e238bb75d990e838a092b5bef5bdfc
Reviewed-on: https://go-review.googlesource.com/85879
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent 058bb7ea
...@@ -426,8 +426,13 @@ func inHeapOrStack(b uintptr) bool { ...@@ -426,8 +426,13 @@ func inHeapOrStack(b uintptr) bool {
// TODO: spanOf and spanOfUnchecked are open-coded in a lot of places. // TODO: spanOf and spanOfUnchecked are open-coded in a lot of places.
// Use the functions instead. // Use the functions instead.
// spanOf returns the span of p. If p does not point into the heap or // spanOf returns the span of p. If p does not point into the heap
// no span contains p, spanOf returns nil. // arena or no span has ever contained p, spanOf returns nil.
//
// If p does not point to allocated memory, this may return a non-nil
// span that does *not* contain p. If this is a possibility, the
// caller should either call spanOfHeap or check the span bounds
// explicitly.
func spanOf(p uintptr) *mspan { func spanOf(p uintptr) *mspan {
if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used { if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used {
return nil return nil
...@@ -442,6 +447,18 @@ func spanOfUnchecked(p uintptr) *mspan { ...@@ -442,6 +447,18 @@ func spanOfUnchecked(p uintptr) *mspan {
return mheap_.spans[(p-mheap_.arena_start)>>_PageShift] return mheap_.spans[(p-mheap_.arena_start)>>_PageShift]
} }
// spanOfHeap is like spanOf, but returns nil if p does not point to a
// heap object.
func spanOfHeap(p uintptr) *mspan {
s := spanOf(p)
// If p is not allocated, it may point to a stale span, so we
// have to check the span's bounds and state.
if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
return nil
}
return s
}
// Initialize the heap. // Initialize the heap.
func (h *mheap) init(spansStart, spansBytes uintptr) { func (h *mheap) init(spansStart, spansBytes uintptr) {
h.treapalloc.init(unsafe.Sizeof(treapNode{}), nil, nil, &memstats.other_sys) h.treapalloc.init(unsafe.Sizeof(treapNode{}), nil, nil, &memstats.other_sys)
...@@ -882,33 +899,6 @@ func (h *mheap) grow(npage uintptr) bool { ...@@ -882,33 +899,6 @@ func (h *mheap) grow(npage uintptr) bool {
return true return true
} }
// Look up the span at the given address.
// Address is guaranteed to be in map
// and is guaranteed to be start or end of span.
func (h *mheap) lookup(v unsafe.Pointer) *mspan {
p := uintptr(v)
p -= h.arena_start
return h.spans[p>>_PageShift]
}
// Look up the span at the given address.
// Address is *not* guaranteed to be in map
// and may be anywhere in the span.
// Map entries for the middle of a span are only
// valid for allocated spans. Free spans may have
// other garbage in their middles, so we have to
// check for that.
func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan {
if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used {
return nil
}
s := h.spans[(uintptr(v)-h.arena_start)>>_PageShift]
if s == nil || uintptr(v) < s.base() || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
return nil
}
return s
}
// Free the span back into the heap. // Free the span back into the heap.
func (h *mheap) freeSpan(s *mspan, acct int32) { func (h *mheap) freeSpan(s *mspan, acct int32) {
systemstack(func() { systemstack(func() {
...@@ -1297,7 +1287,7 @@ type special struct { ...@@ -1297,7 +1287,7 @@ type special struct {
// (The add will fail only if a record with the same p and s->kind // (The add will fail only if a record with the same p and s->kind
// already exists.) // already exists.)
func addspecial(p unsafe.Pointer, s *special) bool { func addspecial(p unsafe.Pointer, s *special) bool {
span := mheap_.lookupMaybe(p) span := spanOfHeap(uintptr(p))
if span == nil { if span == nil {
throw("addspecial on invalid pointer") throw("addspecial on invalid pointer")
} }
...@@ -1345,7 +1335,7 @@ func addspecial(p unsafe.Pointer, s *special) bool { ...@@ -1345,7 +1335,7 @@ func addspecial(p unsafe.Pointer, s *special) bool {
// Returns the record if the record existed, nil otherwise. // Returns the record if the record existed, nil otherwise.
// The caller must FixAlloc_Free the result. // The caller must FixAlloc_Free the result.
func removespecial(p unsafe.Pointer, kind uint8) *special { func removespecial(p unsafe.Pointer, kind uint8) *special {
span := mheap_.lookupMaybe(p) span := spanOfHeap(uintptr(p))
if span == nil { if span == nil {
throw("removespecial on invalid pointer") throw("removespecial on invalid pointer")
} }
......
...@@ -209,7 +209,7 @@ func stackpoolalloc(order uint8) gclinkptr { ...@@ -209,7 +209,7 @@ func stackpoolalloc(order uint8) gclinkptr {
// Adds stack x to the free pool. Must be called with stackpoolmu held. // Adds stack x to the free pool. Must be called with stackpoolmu held.
func stackpoolfree(x gclinkptr, order uint8) { func stackpoolfree(x gclinkptr, order uint8) {
s := mheap_.lookup(unsafe.Pointer(x)) s := spanOfUnchecked(uintptr(x))
if s.state != _MSpanManual { if s.state != _MSpanManual {
throw("freeing stack not in a stack span") throw("freeing stack not in a stack span")
} }
...@@ -455,7 +455,7 @@ func stackfree(stk stack) { ...@@ -455,7 +455,7 @@ func stackfree(stk stack) {
c.stackcache[order].size += n c.stackcache[order].size += n
} }
} else { } else {
s := mheap_.lookup(v) s := spanOfUnchecked(uintptr(v))
if s.state != _MSpanManual { if s.state != _MSpanManual {
println(hex(s.base()), v) println(hex(s.base()), v)
throw("bad span state") throw("bad span state")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment