Commit e4ac2d4a authored by Rick Hudson's avatar Rick Hudson

[dev.garbage] runtime: replace ref with allocCount

This is a renaming of the field ref to the
more appropriate allocCount. The field
holds the number of objects in the span
that are currently allocated. Some throws
strings were adjusted to more accurately
convey the meaning of allocCount.

Change-Id: I10daf44e3e9cc24a10912638c7de3c1984ef8efe
Reviewed-on: https://go-review.googlesource.com/19518Reviewed-by: default avatarAustin Clements <austin@google.com>
parent 3479b065
...@@ -509,8 +509,8 @@ func (c *mcache) nextFree(sizeclass int8) (v gclinkptr, shouldhelpgc bool) { ...@@ -509,8 +509,8 @@ func (c *mcache) nextFree(sizeclass int8) (v gclinkptr, shouldhelpgc bool) {
if freeIndex == s.nelems { if freeIndex == s.nelems {
// The span is full. // The span is full.
if uintptr(s.ref) != s.nelems { if uintptr(s.allocCount) != s.nelems {
throw("s.ref != s.nelems && freeIndex == s.nelems") throw("s.allocCount != s.nelems && freeIndex == s.nelems")
} }
systemstack(func() { systemstack(func() {
c.refill(int32(sizeclass)) c.refill(int32(sizeclass))
...@@ -526,9 +526,9 @@ func (c *mcache) nextFree(sizeclass int8) (v gclinkptr, shouldhelpgc bool) { ...@@ -526,9 +526,9 @@ func (c *mcache) nextFree(sizeclass int8) (v gclinkptr, shouldhelpgc bool) {
v = gclinkptr(freeIndex*s.elemsize + s.base()) v = gclinkptr(freeIndex*s.elemsize + s.base())
// Advance the freeIndex. // Advance the freeIndex.
s.freeindex = freeIndex + 1 s.freeindex = freeIndex + 1
s.ref++ s.allocCount++
if uintptr(s.ref) > s.nelems { if uintptr(s.allocCount) > s.nelems {
throw("s.ref > s.nelems") throw("s.allocCount > s.nelems")
} }
return return
} }
......
...@@ -109,7 +109,7 @@ func (c *mcache) refill(sizeclass int32) *mspan { ...@@ -109,7 +109,7 @@ func (c *mcache) refill(sizeclass int32) *mspan {
// Return the current cached span to the central lists. // Return the current cached span to the central lists.
s := c.alloc[sizeclass] s := c.alloc[sizeclass]
if uintptr(s.ref) != s.nelems { if uintptr(s.allocCount) != s.nelems {
throw("refill of span with free space remaining") throw("refill of span with free space remaining")
} }
...@@ -123,7 +123,7 @@ func (c *mcache) refill(sizeclass int32) *mspan { ...@@ -123,7 +123,7 @@ func (c *mcache) refill(sizeclass int32) *mspan {
throw("out of memory") throw("out of memory")
} }
if uintptr(s.ref) == s.nelems { if uintptr(s.allocCount) == s.nelems {
throw("span has no free space") throw("span has no free space")
} }
......
...@@ -100,11 +100,11 @@ retry: ...@@ -100,11 +100,11 @@ retry:
// c is unlocked. // c is unlocked.
havespan: havespan:
cap := int32((s.npages << _PageShift) / s.elemsize) cap := int32((s.npages << _PageShift) / s.elemsize)
n := cap - int32(s.ref) n := cap - int32(s.allocCount)
if n == 0 { if n == 0 {
throw("empty span") throw("span has no free objects")
} }
usedBytes := uintptr(s.ref) * s.elemsize usedBytes := uintptr(s.allocCount) * s.elemsize
if usedBytes > 0 { if usedBytes > 0 {
reimburseSweepCredit(usedBytes) reimburseSweepCredit(usedBytes)
} }
...@@ -127,12 +127,12 @@ func (c *mcentral) uncacheSpan(s *mspan) { ...@@ -127,12 +127,12 @@ func (c *mcentral) uncacheSpan(s *mspan) {
s.incache = false s.incache = false
if s.ref == 0 { if s.allocCount == 0 {
throw("uncaching full span") throw("uncaching span but s.allocCount == 0")
} }
cap := int32((s.npages << _PageShift) / s.elemsize) cap := int32((s.npages << _PageShift) / s.elemsize)
n := cap - int32(s.ref) n := cap - int32(s.allocCount)
if n > 0 { if n > 0 {
c.empty.remove(s) c.empty.remove(s)
c.nonempty.insert(s) c.nonempty.insert(s)
...@@ -154,7 +154,7 @@ func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, p ...@@ -154,7 +154,7 @@ func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, p
throw("freeSpan given cached span") throw("freeSpan given cached span")
} }
s.ref -= uint16(n) s.allocCount -= uint16(n)
if preserve { if preserve {
// preserve is set only when called from MCentral_CacheSpan above, // preserve is set only when called from MCentral_CacheSpan above,
...@@ -180,7 +180,7 @@ func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, p ...@@ -180,7 +180,7 @@ func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, p
// lock of c above.) // lock of c above.)
atomic.Store(&s.sweepgen, mheap_.sweepgen) atomic.Store(&s.sweepgen, mheap_.sweepgen)
if s.ref != 0 { if s.allocCount != 0 {
unlock(&c.lock) unlock(&c.lock)
return false return false
} }
......
...@@ -159,7 +159,7 @@ type mspan struct { ...@@ -159,7 +159,7 @@ type mspan struct {
sweepgen uint32 sweepgen uint32
divMul uint32 // for divide by elemsize - divMagic.mul divMul uint32 // for divide by elemsize - divMagic.mul
ref uint16 // capacity - number of objects in freelist allocCount uint16 // capacity - number of objects in freelist
sizeclass uint8 // size class sizeclass uint8 // size class
incache bool // being used by an mcache incache bool // being used by an mcache
state uint8 // mspaninuse etc state uint8 // mspaninuse etc
...@@ -471,7 +471,7 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan { ...@@ -471,7 +471,7 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
// able to map interior pointer to containing span. // able to map interior pointer to containing span.
atomic.Store(&s.sweepgen, h.sweepgen) atomic.Store(&s.sweepgen, h.sweepgen)
s.state = _MSpanInUse s.state = _MSpanInUse
s.ref = 0 s.allocCount = 0
s.sizeclass = uint8(sizeclass) s.sizeclass = uint8(sizeclass)
if sizeclass == 0 { if sizeclass == 0 {
s.elemsize = s.npages << _PageShift s.elemsize = s.npages << _PageShift
...@@ -551,7 +551,7 @@ func (h *mheap) allocStack(npage uintptr) *mspan { ...@@ -551,7 +551,7 @@ func (h *mheap) allocStack(npage uintptr) *mspan {
if s != nil { if s != nil {
s.state = _MSpanStack s.state = _MSpanStack
s.stackfreelist = 0 s.stackfreelist = 0
s.ref = 0 s.allocCount = 0
memstats.stacks_inuse += uint64(s.npages << _PageShift) memstats.stacks_inuse += uint64(s.npages << _PageShift)
} }
...@@ -773,12 +773,12 @@ func (h *mheap) freeStack(s *mspan) { ...@@ -773,12 +773,12 @@ func (h *mheap) freeStack(s *mspan) {
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
switch s.state { switch s.state {
case _MSpanStack: case _MSpanStack:
if s.ref != 0 { if s.allocCount != 0 {
throw("MHeap_FreeSpanLocked - invalid stack free") throw("MHeap_FreeSpanLocked - invalid stack free")
} }
case _MSpanInUse: case _MSpanInUse:
if s.ref != 0 || s.sweepgen != h.sweepgen { if s.allocCount != 0 || s.sweepgen != h.sweepgen {
print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
throw("MHeap_FreeSpanLocked - invalid free") throw("MHeap_FreeSpanLocked - invalid free")
} }
h.pagesInUse -= uint64(s.npages) h.pagesInUse -= uint64(s.npages)
...@@ -912,7 +912,7 @@ func (span *mspan) init(start pageID, npages uintptr) { ...@@ -912,7 +912,7 @@ func (span *mspan) init(start pageID, npages uintptr) {
span.list = nil span.list = nil
span.start = start span.start = start
span.npages = npages span.npages = npages
span.ref = 0 span.allocCount = 0
span.sizeclass = 0 span.sizeclass = 0
span.incache = false span.incache = false
span.elemsize = 0 span.elemsize = 0
......
...@@ -295,9 +295,9 @@ func updatememstats(stats *gcstats) { ...@@ -295,9 +295,9 @@ func updatememstats(stats *gcstats) {
memstats.nmalloc++ memstats.nmalloc++
memstats.alloc += uint64(s.elemsize) memstats.alloc += uint64(s.elemsize)
} else { } else {
memstats.nmalloc += uint64(s.ref) memstats.nmalloc += uint64(s.allocCount)
memstats.by_size[s.sizeclass].nmalloc += uint64(s.ref) memstats.by_size[s.sizeclass].nmalloc += uint64(s.allocCount)
memstats.alloc += uint64(s.ref) * uint64(s.elemsize) memstats.alloc += uint64(s.allocCount) * uint64(s.elemsize)
} }
} }
unlock(&mheap_.lock) unlock(&mheap_.lock)
......
...@@ -191,8 +191,8 @@ func stackpoolalloc(order uint8) gclinkptr { ...@@ -191,8 +191,8 @@ func stackpoolalloc(order uint8) gclinkptr {
if s == nil { if s == nil {
throw("out of memory") throw("out of memory")
} }
if s.ref != 0 { if s.allocCount != 0 {
throw("bad ref") throw("bad allocCount")
} }
if s.stackfreelist.ptr() != nil { if s.stackfreelist.ptr() != nil {
throw("bad stackfreelist") throw("bad stackfreelist")
...@@ -209,7 +209,7 @@ func stackpoolalloc(order uint8) gclinkptr { ...@@ -209,7 +209,7 @@ func stackpoolalloc(order uint8) gclinkptr {
throw("span has no free stacks") throw("span has no free stacks")
} }
s.stackfreelist = x.ptr().next s.stackfreelist = x.ptr().next
s.ref++ s.allocCount++
if s.stackfreelist.ptr() == nil { if s.stackfreelist.ptr() == nil {
// all stacks in s are allocated. // all stacks in s are allocated.
list.remove(s) list.remove(s)
...@@ -229,8 +229,8 @@ func stackpoolfree(x gclinkptr, order uint8) { ...@@ -229,8 +229,8 @@ func stackpoolfree(x gclinkptr, order uint8) {
} }
x.ptr().next = s.stackfreelist x.ptr().next = s.stackfreelist
s.stackfreelist = x s.stackfreelist = x
s.ref-- s.allocCount--
if gcphase == _GCoff && s.ref == 0 { if gcphase == _GCoff && s.allocCount == 0 {
// Span is completely free. Return it to the heap // Span is completely free. Return it to the heap
// immediately if we're sweeping. // immediately if we're sweeping.
// //
...@@ -1135,7 +1135,7 @@ func freeStackSpans() { ...@@ -1135,7 +1135,7 @@ func freeStackSpans() {
list := &stackpool[order] list := &stackpool[order]
for s := list.first; s != nil; { for s := list.first; s != nil; {
next := s.next next := s.next
if s.ref == 0 { if s.allocCount == 0 {
list.remove(s) list.remove(s)
s.stackfreelist = 0 s.stackfreelist = 0
mheap_.freeStack(s) mheap_.freeStack(s)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment