Commit 1652a2c3 authored by Matthew Dempsky's avatar Matthew Dempsky

runtime: add mSpanList type to represent lists of mspans

This CL introduces a new mSpanList type to replace the empty mspan
variables that were previously used as list heads.

To be type safe, the previous circular linked list data structure is
now a tail queue instead.  One complication of this is
mSpanList_Remove needs to know the list a span is being removed from,
but this appears to be computable in all circumstances.

As a temporary sanity check, mSpanList_Insert and mSpanList_InsertBack
record the list that an mspan has been inserted into so that
mSpanList_Remove can verify that the correct list was specified.

Whereas mspan is 112 bytes on amd64, mSpanList is only 16 bytes.  This
shrinks the size of mheap from 50216 bytes to 12584 bytes.

Change-Id: I8146364753dbc3b4ab120afbb9c7b8740653c216
Reviewed-on: https://go-review.googlesource.com/15906
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: default avatarAustin Clements <austin@google.com>
parent 151f4ec9
......@@ -16,8 +16,8 @@ package runtime
type mcentral struct {
lock mutex
sizeclass int32
nonempty mspan // list of spans with a free object
empty mspan // list of spans with no free objects (or cached in an mcache)
nonempty mSpanList // list of spans with a free object
empty mSpanList // list of spans with no free objects (or cached in an mcache)
}
// Initialize a single central free list.
......@@ -36,9 +36,9 @@ func mCentral_CacheSpan(c *mcentral) *mspan {
sg := mheap_.sweepgen
retry:
var s *mspan
for s = c.nonempty.next; s != &c.nonempty; s = s.next {
for s = c.nonempty.first; s != nil; s = s.next {
if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
mSpanList_Remove(s)
mSpanList_Remove(&c.nonempty, s)
mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock)
mSpan_Sweep(s, true)
......@@ -49,17 +49,17 @@ retry:
continue
}
// we have a nonempty span that does not require sweeping, allocate from it
mSpanList_Remove(s)
mSpanList_Remove(&c.nonempty, s)
mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock)
goto havespan
}
for s = c.empty.next; s != &c.empty; s = s.next {
for s = c.empty.first; s != nil; s = s.next {
if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
// we have an empty span that requires sweeping,
// sweep it and see if we can free some space in it
mSpanList_Remove(s)
mSpanList_Remove(&c.empty, s)
// swept spans are at the end of the list
mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock)
......@@ -119,7 +119,7 @@ func mCentral_UncacheSpan(c *mcentral, s *mspan) {
cap := int32((s.npages << _PageShift) / s.elemsize)
n := cap - int32(s.ref)
if n > 0 {
mSpanList_Remove(s)
mSpanList_Remove(&c.empty, s)
mSpanList_Insert(&c.nonempty, s)
}
unlock(&c.lock)
......@@ -145,7 +145,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli
if preserve {
// preserve is set only when called from MCentral_CacheSpan above,
// the span must be in the empty list.
if s.next == nil {
if !mSpan_InList(s) {
throw("can't preserve unlinked span")
}
atomicstore(&s.sweepgen, mheap_.sweepgen)
......@@ -156,7 +156,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli
// Move to nonempty if necessary.
if wasempty {
mSpanList_Remove(s)
mSpanList_Remove(&c.empty, s)
mSpanList_Insert(&c.nonempty, s)
}
......@@ -172,7 +172,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli
}
// s is completely freed, return it to the heap.
mSpanList_Remove(s)
mSpanList_Remove(&c.nonempty, s)
s.needzero = 1
s.freelist = 0
unlock(&c.lock)
......
This diff is collapsed.
......@@ -142,12 +142,12 @@ const (
// order = log_2(size/FixedStack)
// There is a free list for each order.
// TODO: one lock per order?
var stackpool [_NumStackOrders]mspan
var stackpool [_NumStackOrders]mSpanList
var stackpoolmu mutex
// List of stack spans to be freed at the end of GC. Protected by
// stackpoolmu.
var stackFreeQueue mspan
var stackFreeQueue mSpanList
// Cached value of haveexperiment("framepointer")
var framepointer_enabled bool
......@@ -166,8 +166,8 @@ func stackinit() {
// stackpoolmu held.
func stackpoolalloc(order uint8) gclinkptr {
list := &stackpool[order]
s := list.next
if s == list {
s := list.first
if s == nil {
// no free stacks. Allocate another span worth.
s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
if s == nil {
......@@ -194,7 +194,7 @@ func stackpoolalloc(order uint8) gclinkptr {
s.ref++
if s.freelist.ptr() == nil {
// all stacks in s are allocated.
mSpanList_Remove(s)
mSpanList_Remove(list, s)
}
return x
}
......@@ -228,7 +228,7 @@ func stackpoolfree(x gclinkptr, order uint8) {
// pointer into a free span.
//
// By not freeing, we prevent step #4 until GC is done.
mSpanList_Remove(s)
mSpanList_Remove(&stackpool[order], s)
s.freelist = 0
mHeap_FreeStack(&mheap_, s)
}
......@@ -994,10 +994,10 @@ func freeStackSpans() {
// Scan stack pools for empty stack spans.
for order := range stackpool {
list := &stackpool[order]
for s := list.next; s != list; {
for s := list.first; s != nil; {
next := s.next
if s.ref == 0 {
mSpanList_Remove(s)
mSpanList_Remove(list, s)
s.freelist = 0
mHeap_FreeStack(&mheap_, s)
}
......@@ -1006,9 +1006,9 @@ func freeStackSpans() {
}
// Free queued stack spans.
for stackFreeQueue.next != &stackFreeQueue {
s := stackFreeQueue.next
mSpanList_Remove(s)
for !mSpanList_IsEmpty(&stackFreeQueue) {
s := stackFreeQueue.first
mSpanList_Remove(&stackFreeQueue, s)
mHeap_FreeStack(&mheap_, s)
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment