Commit 86e3cb8d authored by Russ Cox's avatar Russ Cox

runtime: introduce MSpan.needzero instead of writing to span data

This cleans up the code significantly, and it avoids any
possible problems with madvise zeroing out some but
not all of the data.

Fixes #6400.

LGTM=dave
R=dvyukov, dave
CC=golang-codereviews
https://golang.org/cl/57680046
parent f8e4a2ef
...@@ -315,7 +315,7 @@ runtime·free(void *v) ...@@ -315,7 +315,7 @@ runtime·free(void *v)
c = m->mcache; c = m->mcache;
if(sizeclass == 0) { if(sizeclass == 0) {
// Large object. // Large object.
*(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed" s->needzero = 1;
// Must mark v freed before calling unmarkspan and MHeap_Free: // Must mark v freed before calling unmarkspan and MHeap_Free:
// they might coalesce v into other spans and change the bitmap further. // they might coalesce v into other spans and change the bitmap further.
runtime·markfreed(v, size); runtime·markfreed(v, size);
......
...@@ -412,6 +412,7 @@ struct MSpan ...@@ -412,6 +412,7 @@ struct MSpan
uint16 ref; // number of allocated objects in this span uint16 ref; // number of allocated objects in this span
uint8 sizeclass; // size class uint8 sizeclass; // size class
uint8 state; // MSpanInUse etc uint8 state; // MSpanInUse etc
uint8 needzero; // needs to be zeroed before allocation
uintptr elemsize; // computed from sizeclass or from npages uintptr elemsize; // computed from sizeclass or from npages
int64 unusedsince; // First time spotted by GC in MSpanFree state int64 unusedsince; // First time spotted by GC in MSpanFree state
uintptr npreleased; // number of pages released to the OS uintptr npreleased; // number of pages released to the OS
...@@ -501,7 +502,7 @@ struct MHeap ...@@ -501,7 +502,7 @@ struct MHeap
extern MHeap runtime·mheap; extern MHeap runtime·mheap;
void runtime·MHeap_Init(MHeap *h); void runtime·MHeap_Init(MHeap *h);
MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool zeroed); MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero);
void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct); void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct);
MSpan* runtime·MHeap_Lookup(MHeap *h, void *v); MSpan* runtime·MHeap_Lookup(MHeap *h, void *v);
MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v); MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v);
......
...@@ -147,7 +147,7 @@ MCentral_Free(MCentral *c, void *v) ...@@ -147,7 +147,7 @@ MCentral_Free(MCentral *c, void *v)
size = runtime·class_to_size[c->sizeclass]; size = runtime·class_to_size[c->sizeclass];
runtime·MSpanList_Remove(s); runtime·MSpanList_Remove(s);
runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift); runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
*(uintptr*)(s->start<<PageShift) = 1; // needs zeroing s->needzero = 1;
s->freelist = nil; s->freelist = nil;
c->nfree -= (s->npages << PageShift) / size; c->nfree -= (s->npages << PageShift) / size;
runtime·unlock(c); runtime·unlock(c);
...@@ -186,7 +186,7 @@ runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink * ...@@ -186,7 +186,7 @@ runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *
// s is completely freed, return it to the heap. // s is completely freed, return it to the heap.
size = runtime·class_to_size[c->sizeclass]; size = runtime·class_to_size[c->sizeclass];
runtime·MSpanList_Remove(s); runtime·MSpanList_Remove(s);
*(uintptr*)(s->start<<PageShift) = 1; // needs zeroing s->needzero = 1;
s->freelist = nil; s->freelist = nil;
c->nfree -= (s->npages << PageShift) / size; c->nfree -= (s->npages << PageShift) / size;
runtime·unlock(c); runtime·unlock(c);
......
...@@ -1298,8 +1298,10 @@ markroot(ParFor *desc, uint32 i) ...@@ -1298,8 +1298,10 @@ markroot(ParFor *desc, uint32 i)
SpecialFinalizer *spf; SpecialFinalizer *spf;
s = allspans[spanidx]; s = allspans[spanidx];
if(s->sweepgen != sg) if(s->sweepgen != sg) {
runtime·printf("sweep %d %d\n", s->sweepgen, sg);
runtime·throw("gc: unswept span"); runtime·throw("gc: unswept span");
}
if(s->state != MSpanInUse) if(s->state != MSpanInUse)
continue; continue;
// The garbage collector ignores type pointers stored in MSpan.types: // The garbage collector ignores type pointers stored in MSpan.types:
...@@ -1826,7 +1828,7 @@ runtime·MSpan_Sweep(MSpan *s) ...@@ -1826,7 +1828,7 @@ runtime·MSpan_Sweep(MSpan *s)
if(cl == 0) { if(cl == 0) {
// Free large span. // Free large span.
runtime·unmarkspan(p, 1<<PageShift); runtime·unmarkspan(p, 1<<PageShift);
*(uintptr*)p = (uintptr)0xdeaddeaddeaddeadll; // needs zeroing s->needzero = 1;
// important to set sweepgen before returning it to heap // important to set sweepgen before returning it to heap
runtime·atomicstore(&s->sweepgen, sweepgen); runtime·atomicstore(&s->sweepgen, sweepgen);
sweepgenset = true; sweepgenset = true;
......
...@@ -168,7 +168,7 @@ MHeap_Reclaim(MHeap *h, uintptr npage) ...@@ -168,7 +168,7 @@ MHeap_Reclaim(MHeap *h, uintptr npage)
// Allocate a new span of npage pages from the heap // Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache. // and record its size class in the HeapMap and HeapMapCache.
MSpan* MSpan*
runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool zeroed) runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
{ {
MSpan *s; MSpan *s;
...@@ -189,8 +189,11 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool ...@@ -189,8 +189,11 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool
} }
} }
runtime·unlock(h); runtime·unlock(h);
if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed) if(s != nil) {
runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); if(needzero && s->needzero)
runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
s->needzero = 0;
}
return s; return s;
} }
...@@ -233,26 +236,8 @@ HaveSpan: ...@@ -233,26 +236,8 @@ HaveSpan:
s->state = MSpanInUse; s->state = MSpanInUse;
mstats.heap_idle -= s->npages<<PageShift; mstats.heap_idle -= s->npages<<PageShift;
mstats.heap_released -= s->npreleased<<PageShift; mstats.heap_released -= s->npreleased<<PageShift;
if(s->npreleased > 0) { if(s->npreleased > 0)
// We have called runtime·SysUnused with these pages, and on
// Unix systems it called madvise. At this point at least
// some BSD-based kernels will return these pages either as
// zeros or with the old data. For our caller, the first word
// in the page indicates whether the span contains zeros or
// not (this word was set when the span was freed by
// MCentral_Free or runtime·MCentral_FreeSpan). If the first
// page in the span is returned as zeros, and some subsequent
// page is returned with the old data, then we will be
// returning a span that is assumed to be all zeros, but the
// actual data will not be all zeros. Avoid that problem by
// explicitly marking the span as not being zeroed, just in
// case. The beadbead constant we use here means nothing, it
// is just a unique constant not seen elsewhere in the
// runtime, as a clue in case it turns up unexpectedly in
// memory or in a stack trace.
runtime·SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift); runtime·SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
*(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL;
}
s->npreleased = 0; s->npreleased = 0;
if(s->npages > npage) { if(s->npages > npage) {
...@@ -266,7 +251,7 @@ HaveSpan: ...@@ -266,7 +251,7 @@ HaveSpan:
h->spans[p-1] = s; h->spans[p-1] = s;
h->spans[p] = t; h->spans[p] = t;
h->spans[p+t->npages-1] = t; h->spans[p+t->npages-1] = t;
*(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark t->needzero = s->needzero;
runtime·atomicstore(&t->sweepgen, h->sweepgen); runtime·atomicstore(&t->sweepgen, h->sweepgen);
t->state = MSpanInUse; t->state = MSpanInUse;
MHeap_FreeLocked(h, t); MHeap_FreeLocked(h, t);
...@@ -413,7 +398,6 @@ runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct) ...@@ -413,7 +398,6 @@ runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
static void static void
MHeap_FreeLocked(MHeap *h, MSpan *s) MHeap_FreeLocked(MHeap *h, MSpan *s)
{ {
uintptr *sp, *tp;
MSpan *t; MSpan *t;
PageID p; PageID p;
...@@ -427,7 +411,6 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) ...@@ -427,7 +411,6 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
mstats.heap_idle += s->npages<<PageShift; mstats.heap_idle += s->npages<<PageShift;
s->state = MSpanFree; s->state = MSpanFree;
runtime·MSpanList_Remove(s); runtime·MSpanList_Remove(s);
sp = (uintptr*)(s->start<<PageShift);
// Stamp newly unused spans. The scavenger will use that // Stamp newly unused spans. The scavenger will use that
// info to potentially give back some pages to the OS. // info to potentially give back some pages to the OS.
s->unusedsince = runtime·nanotime(); s->unusedsince = runtime·nanotime();
...@@ -437,13 +420,10 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) ...@@ -437,13 +420,10 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
p = s->start; p = s->start;
p -= (uintptr)h->arena_start >> PageShift; p -= (uintptr)h->arena_start >> PageShift;
if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) { if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) {
if(t->npreleased == 0) { // cant't touch this otherwise
tp = (uintptr*)(t->start<<PageShift);
*tp |= *sp; // propagate "needs zeroing" mark
}
s->start = t->start; s->start = t->start;
s->npages += t->npages; s->npages += t->npages;
s->npreleased = t->npreleased; // absorb released pages s->npreleased = t->npreleased; // absorb released pages
s->needzero |= t->needzero;
p -= t->npages; p -= t->npages;
h->spans[p] = s; h->spans[p] = s;
runtime·MSpanList_Remove(t); runtime·MSpanList_Remove(t);
...@@ -451,12 +431,9 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) ...@@ -451,12 +431,9 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
runtime·FixAlloc_Free(&h->spanalloc, t); runtime·FixAlloc_Free(&h->spanalloc, t);
} }
if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) { if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) {
if(t->npreleased == 0) { // cant't touch this otherwise
tp = (uintptr*)(t->start<<PageShift);
*sp |= *tp; // propagate "needs zeroing" mark
}
s->npages += t->npages; s->npages += t->npages;
s->npreleased += t->npreleased; s->npreleased += t->npreleased;
s->needzero |= t->needzero;
h->spans[p + s->npages - 1] = s; h->spans[p + s->npages - 1] = s;
runtime·MSpanList_Remove(t); runtime·MSpanList_Remove(t);
t->state = MSpanDead; t->state = MSpanDead;
...@@ -601,6 +578,7 @@ runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages) ...@@ -601,6 +578,7 @@ runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages)
span->types.compression = MTypes_Empty; span->types.compression = MTypes_Empty;
span->specialLock.key = 0; span->specialLock.key = 0;
span->specials = nil; span->specials = nil;
span->needzero = 0;
} }
// Initialize an empty doubly-linked list. // Initialize an empty doubly-linked list.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment