Commit 8fbaa4f7 authored by Austin Clements's avatar Austin Clements

runtime: rename _MSpanStack -> _MSpanManual

We're about to generalize _MSpanStack to be used for other forms of
in-heap manual memory management in the runtime. This is an automated
rename of _MSpanStack to _MSpanManual plus some comment fix-ups.

For #19325.

Change-Id: I1e20a57bb3b87a0d324382f92a3e294ffc767395
Reviewed-on: https://go-review.googlesource.com/38574
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent 16df2ccd
...@@ -124,7 +124,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { ...@@ -124,7 +124,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
aoff := uintptr(src) - mheap_.arena_start aoff := uintptr(src) - mheap_.arena_start
idx := aoff >> _PageShift idx := aoff >> _PageShift
s := mheap_.spans[idx] s := mheap_.spans[idx]
if s.state == _MSpanStack { if s.state == _MSpanManual {
// There are no heap bits for value stored on the stack. // There are no heap bits for value stored on the stack.
// For a channel receive src might be on the stack of some // For a channel receive src might be on the stack of some
// other goroutine, so we can't unwind the stack even if // other goroutine, so we can't unwind the stack even if
......
...@@ -391,7 +391,7 @@ func heapBitsForObject(p, refBase, refOff uintptr) (base uintptr, hbits heapBits ...@@ -391,7 +391,7 @@ func heapBitsForObject(p, refBase, refOff uintptr) (base uintptr, hbits heapBits
// Consult the span table to find the block beginning. // Consult the span table to find the block beginning.
s = mheap_.spans[idx] s = mheap_.spans[idx]
if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse { if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
if s == nil || s.state == _MSpanStack { if s == nil || s.state == _MSpanManual {
// If s is nil, the virtual address has never been part of the heap. // If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it. // This pointer may be to some mmap'd region, so we allow it.
// Pointers into stacks are also ok, the runtime manages these explicitly. // Pointers into stacks are also ok, the runtime manages these explicitly.
......
...@@ -1299,7 +1299,7 @@ func gcDumpObject(label string, obj, off uintptr) { ...@@ -1299,7 +1299,7 @@ func gcDumpObject(label string, obj, off uintptr) {
skipped := false skipped := false
size := s.elemsize size := s.elemsize
if s.state == _MSpanStack && size == 0 { if s.state == _MSpanManual && size == 0 {
// We're printing something from a stack frame. We // We're printing something from a stack frame. We
// don't know how big it is, so just show up to an // don't know how big it is, so just show up to an
// including off. // including off.
......
...@@ -121,7 +121,7 @@ var mheap_ mheap ...@@ -121,7 +121,7 @@ var mheap_ mheap
// When a MSpan is in the heap free list, state == MSpanFree // When a MSpan is in the heap free list, state == MSpanFree
// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
// //
// When a MSpan is allocated, state == MSpanInUse or MSpanStack // When a MSpan is allocated, state == MSpanInUse or MSpanManual
// and heapmap(i) == span for all s->start <= i < s->start+s->npages. // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
// Every MSpan is in one doubly-linked list, // Every MSpan is in one doubly-linked list,
...@@ -129,25 +129,25 @@ var mheap_ mheap ...@@ -129,25 +129,25 @@ var mheap_ mheap
// MCentral's span lists. // MCentral's span lists.
// An MSpan representing actual memory has state _MSpanInUse, // An MSpan representing actual memory has state _MSpanInUse,
// _MSpanStack, or _MSpanFree. Transitions between these states are // _MSpanManual, or _MSpanFree. Transitions between these states are
// constrained as follows: // constrained as follows:
// //
// * A span may transition from free to in-use or stack during any GC // * A span may transition from free to in-use or manual during any GC
// phase. // phase.
// //
// * During sweeping (gcphase == _GCoff), a span may transition from // * During sweeping (gcphase == _GCoff), a span may transition from
// in-use to free (as a result of sweeping) or stack to free (as a // in-use to free (as a result of sweeping) or manual to free (as a
// result of stacks being freed). // result of stacks being freed).
// //
// * During GC (gcphase != _GCoff), a span *must not* transition from // * During GC (gcphase != _GCoff), a span *must not* transition from
// stack or in-use to free. Because concurrent GC may read a pointer // manual or in-use to free. Because concurrent GC may read a pointer
// and then look up its span, the span state must be monotonic. // and then look up its span, the span state must be monotonic.
type mSpanState uint8 type mSpanState uint8
const ( const (
_MSpanDead mSpanState = iota _MSpanDead mSpanState = iota
_MSpanInUse // allocated for garbage collected heap _MSpanInUse // allocated for garbage collected heap
_MSpanStack // allocated for use by stack allocator _MSpanManual // allocated for manual management (e.g., stack allocator)
_MSpanFree _MSpanFree
) )
...@@ -156,7 +156,7 @@ const ( ...@@ -156,7 +156,7 @@ const (
var mSpanStateNames = []string{ var mSpanStateNames = []string{
"_MSpanDead", "_MSpanDead",
"_MSpanInUse", "_MSpanInUse",
"_MSpanStack", "_MSpanManual",
"_MSpanFree", "_MSpanFree",
} }
...@@ -297,7 +297,7 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { ...@@ -297,7 +297,7 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
} }
// inheap reports whether b is a pointer into a (potentially dead) heap object. // inheap reports whether b is a pointer into a (potentially dead) heap object.
// It returns false for pointers into stack spans. // It returns false for pointers into _MSpanManual spans.
// Non-preemptible because it is used by write barriers. // Non-preemptible because it is used by write barriers.
//go:nowritebarrier //go:nowritebarrier
//go:nosplit //go:nosplit
...@@ -313,7 +313,9 @@ func inheap(b uintptr) bool { ...@@ -313,7 +313,9 @@ func inheap(b uintptr) bool {
return true return true
} }
// inHeapOrStack is a variant of inheap that returns true for pointers into stack spans. // inHeapOrStack is a variant of inheap that returns true for pointers
// into any allocated heap span.
//
//go:nowritebarrier //go:nowritebarrier
//go:nosplit //go:nosplit
func inHeapOrStack(b uintptr) bool { func inHeapOrStack(b uintptr) bool {
...@@ -326,7 +328,7 @@ func inHeapOrStack(b uintptr) bool { ...@@ -326,7 +328,7 @@ func inHeapOrStack(b uintptr) bool {
return false return false
} }
switch s.state { switch s.state {
case mSpanInUse, _MSpanStack: case mSpanInUse, _MSpanManual:
return b < s.limit return b < s.limit
default: default:
return false return false
...@@ -669,7 +671,7 @@ func (h *mheap) allocStack(npage uintptr) *mspan { ...@@ -669,7 +671,7 @@ func (h *mheap) allocStack(npage uintptr) *mspan {
lock(&h.lock) lock(&h.lock)
s := h.allocSpanLocked(npage) s := h.allocSpanLocked(npage)
if s != nil { if s != nil {
s.state = _MSpanStack s.state = _MSpanManual
s.stackfreelist = 0 s.stackfreelist = 0
s.allocCount = 0 s.allocCount = 0
s.sizeclass = 0 s.sizeclass = 0
...@@ -739,8 +741,8 @@ HaveSpan: ...@@ -739,8 +741,8 @@ HaveSpan:
h.spans[p] = t h.spans[p] = t
h.spans[p+t.npages-1] = t h.spans[p+t.npages-1] = t
t.needzero = s.needzero t.needzero = s.needzero
s.state = _MSpanStack // prevent coalescing with s s.state = _MSpanManual // prevent coalescing with s
t.state = _MSpanStack t.state = _MSpanManual
h.freeSpanLocked(t, false, false, s.unusedsince) h.freeSpanLocked(t, false, false, s.unusedsince)
s.state = _MSpanFree s.state = _MSpanFree
} }
...@@ -892,7 +894,7 @@ func (h *mheap) freeStack(s *mspan) { ...@@ -892,7 +894,7 @@ func (h *mheap) freeStack(s *mspan) {
// s must be on a busy list (h.busy or h.busylarge) or unlinked. // s must be on a busy list (h.busy or h.busylarge) or unlinked.
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
switch s.state { switch s.state {
case _MSpanStack: case _MSpanManual:
if s.allocCount != 0 { if s.allocCount != 0 {
throw("MHeap_FreeSpanLocked - invalid stack free") throw("MHeap_FreeSpanLocked - invalid stack free")
} }
......
...@@ -220,7 +220,7 @@ func stackpoolalloc(order uint8) gclinkptr { ...@@ -220,7 +220,7 @@ func stackpoolalloc(order uint8) gclinkptr {
// Adds stack x to the free pool. Must be called with stackpoolmu held. // Adds stack x to the free pool. Must be called with stackpoolmu held.
func stackpoolfree(x gclinkptr, order uint8) { func stackpoolfree(x gclinkptr, order uint8) {
s := mheap_.lookup(unsafe.Pointer(x)) s := mheap_.lookup(unsafe.Pointer(x))
if s.state != _MSpanStack { if s.state != _MSpanManual {
throw("freeing stack not in a stack span") throw("freeing stack not in a stack span")
} }
if s.stackfreelist.ptr() == nil { if s.stackfreelist.ptr() == nil {
...@@ -465,7 +465,7 @@ func stackfree(stk stack) { ...@@ -465,7 +465,7 @@ func stackfree(stk stack) {
} }
} else { } else {
s := mheap_.lookup(v) s := mheap_.lookup(v)
if s.state != _MSpanStack { if s.state != _MSpanManual {
println(hex(s.base()), v) println(hex(s.base()), v)
throw("bad span state") throw("bad span state")
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment