Commit 9903d687 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

runtime: minor refactoring in preparation for parallel GC

factor sweepspan() out of sweep(), no logical changes

R=golang-dev, rsc
CC=golang-dev
https://golang.org/cl/5991047
parent d839a809
...@@ -719,22 +719,17 @@ handlespecial(byte *p, uintptr size) ...@@ -719,22 +719,17 @@ handlespecial(byte *p, uintptr size)
return true; return true;
} }
static void sweepspan(MSpan *s);
// Sweep frees or collects finalizers for blocks not marked in the mark phase. // Sweep frees or collects finalizers for blocks not marked in the mark phase.
// It clears the mark bits in preparation for the next GC round. // It clears the mark bits in preparation for the next GC round.
static void static void
sweep(void) sweep(void)
{ {
MSpan *s; MSpan *s;
int32 cl, n, npages;
uintptr size;
byte *p;
MCache *c;
byte *arena_start;
int64 now; int64 now;
arena_start = runtime·mheap.arena_start;
now = runtime·nanotime(); now = runtime·nanotime();
for(;;) { for(;;) {
s = work.spans; s = work.spans;
if(s == nil) if(s == nil)
...@@ -750,69 +745,82 @@ sweep(void) ...@@ -750,69 +745,82 @@ sweep(void)
if(s->state != MSpanInUse) if(s->state != MSpanInUse)
continue; continue;
p = (byte*)(s->start << PageShift); sweepspan(s);
cl = s->sizeclass; }
if(cl == 0) { }
size = s->npages<<PageShift;
n = 1;
} else {
// Chunk full of small blocks.
size = runtime·class_to_size[cl];
npages = runtime·class_to_allocnpages[cl];
n = (npages << PageShift) / size;
}
// Sweep through n objects of given size starting at p. static void
// This thread owns the span now, so it can manipulate sweepspan(MSpan *s)
// the block bitmap without atomic operations. {
for(; n > 0; n--, p += size) { int32 cl, n, npages;
uintptr off, *bitp, shift, bits; uintptr size;
byte *p;
MCache *c;
byte *arena_start;
off = (uintptr*)p - (uintptr*)arena_start; arena_start = runtime·mheap.arena_start;
bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; p = (byte*)(s->start << PageShift);
shift = off % wordsPerBitmapWord; cl = s->sizeclass;
bits = *bitp>>shift; if(cl == 0) {
size = s->npages<<PageShift;
n = 1;
} else {
// Chunk full of small blocks.
size = runtime·class_to_size[cl];
npages = runtime·class_to_allocnpages[cl];
n = (npages << PageShift) / size;
}
if((bits & bitAllocated) == 0) // Sweep through n objects of given size starting at p.
continue; // This thread owns the span now, so it can manipulate
// the block bitmap without atomic operations.
for(; n > 0; n--, p += size) {
uintptr off, *bitp, shift, bits;
if((bits & bitMarked) != 0) { off = (uintptr*)p - (uintptr*)arena_start;
if(DebugMark) { bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
if(!(bits & bitSpecial)) shift = off % wordsPerBitmapWord;
runtime·printf("found spurious mark on %p\n", p); bits = *bitp>>shift;
*bitp &= ~(bitSpecial<<shift);
}
*bitp &= ~(bitMarked<<shift);
continue;
}
// Special means it has a finalizer or is being profiled. if((bits & bitAllocated) == 0)
// In DebugMark mode, the bit has been coopted so continue;
// we have to assume all blocks are special.
if(DebugMark || (bits & bitSpecial) != 0) { if((bits & bitMarked) != 0) {
if(handlespecial(p, size)) if(DebugMark) {
continue; if(!(bits & bitSpecial))
runtime·printf("found spurious mark on %p\n", p);
*bitp &= ~(bitSpecial<<shift);
} }
*bitp &= ~(bitMarked<<shift);
continue;
}
// Mark freed; restore block boundary bit. // Special means it has a finalizer or is being profiled.
*bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift); // In DebugMark mode, the bit has been coopted so
// we have to assume all blocks are special.
if(DebugMark || (bits & bitSpecial) != 0) {
if(handlespecial(p, size))
continue;
}
c = m->mcache; // Mark freed; restore block boundary bit.
if(s->sizeclass == 0) { *bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
// Free large span.
runtime·unmarkspan(p, 1<<PageShift); c = m->mcache;
*(uintptr*)p = 1; // needs zeroing if(s->sizeclass == 0) {
runtime·MHeap_Free(&runtime·mheap, s, 1); // Free large span.
} else { runtime·unmarkspan(p, 1<<PageShift);
// Free small object. *(uintptr*)p = 1; // needs zeroing
if(size > sizeof(uintptr)) runtime·MHeap_Free(&runtime·mheap, s, 1);
((uintptr*)p)[1] = 1; // mark as "needs to be zeroed" } else {
c->local_by_size[s->sizeclass].nfree++; // Free small object.
runtime·MCache_Free(c, p, s->sizeclass, size); if(size > sizeof(uintptr))
} ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
c->local_alloc -= size; c->local_by_size[s->sizeclass].nfree++;
c->local_nfree++; runtime·MCache_Free(c, p, s->sizeclass, size);
} }
c->local_alloc -= size;
c->local_nfree++;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment