Commit 8a6ff3ab authored by Russ Cox's avatar Russ Cox

runtime: allocate heap metadata at run time

Before, the mheap structure was in the bss,
but it's quite large (today, 256 MB, much of
which is never actually paged in), and it makes
Go binaries run afoul of exec-time bss size
limits on some BSD systems.

Fixes #4447.

R=golang-dev, dave, minux.ma, remyoudompheng, iant
CC=golang-dev
https://golang.org/cl/7307122
parent f87b7f67
......@@ -14,8 +14,7 @@ package runtime
#include "typekind.h"
#include "race.h"
#pragma dataflag 16 /* mark mheap as 'no pointers', hiding from garbage collector */
MHeap runtime·mheap;
MHeap *runtime·mheap;
int32 runtime·checking;
......@@ -66,7 +65,7 @@ runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
npages = size >> PageShift;
if((size & PageMask) != 0)
npages++;
s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, zeroed);
s = runtime·MHeap_Alloc(runtime·mheap, npages, 0, 1, zeroed);
if(s == nil)
runtime·throw("out of memory");
size = npages<<PageShift;
......@@ -80,9 +79,9 @@ runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) {
// purge cache stats to prevent overflow
runtime·lock(&runtime·mheap);
runtime·lock(runtime·mheap);
runtime·purgecachedstats(c);
runtime·unlock(&runtime·mheap);
runtime·unlock(runtime·mheap);
}
if(!(flag & FlagNoGC))
......@@ -166,7 +165,7 @@ runtime·free(void *v)
// they might coalesce v into other spans and change the bitmap further.
runtime·markfreed(v, size);
runtime·unmarkspan(v, 1<<PageShift);
runtime·MHeap_Free(&runtime·mheap, s, 1);
runtime·MHeap_Free(runtime·mheap, s, 1);
} else {
// Small object.
size = runtime·class_to_size[sizeclass];
......@@ -196,12 +195,12 @@ runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
m->mcache->local_nlookup++;
if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
// purge cache stats to prevent overflow
runtime·lock(&runtime·mheap);
runtime·lock(runtime·mheap);
runtime·purgecachedstats(m->mcache);
runtime·unlock(&runtime·mheap);
runtime·unlock(runtime·mheap);
}
s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
if(sp)
*sp = s;
if(s == nil) {
......@@ -245,11 +244,11 @@ runtime·allocmcache(void)
intgo rate;
MCache *c;
runtime·lock(&runtime·mheap);
c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
mstats.mcache_sys = runtime·mheap.cachealloc.sys;
runtime·unlock(&runtime·mheap);
runtime·lock(runtime·mheap);
c = runtime·FixAlloc_Alloc(&runtime·mheap->cachealloc);
mstats.mcache_inuse = runtime·mheap->cachealloc.inuse;
mstats.mcache_sys = runtime·mheap->cachealloc.sys;
runtime·unlock(runtime·mheap);
runtime·memclr((byte*)c, sizeof(*c));
// Set first allocation sample size.
......@@ -266,10 +265,10 @@ void
runtime·freemcache(MCache *c)
{
runtime·MCache_ReleaseAll(c);
runtime·lock(&runtime·mheap);
runtime·lock(runtime·mheap);
runtime·purgecachedstats(c);
runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c);
runtime·unlock(&runtime·mheap);
runtime·FixAlloc_Free(&runtime·mheap->cachealloc, c);
runtime·unlock(runtime·mheap);
}
void
......@@ -314,6 +313,9 @@ runtime·mallocinit(void)
USED(arena_size);
USED(bitmap_size);
if((runtime·mheap = runtime·SysAlloc(sizeof(*runtime·mheap))) == nil)
runtime·throw("runtime: cannot allocate heap metadata");
runtime·InitSizes();
limit = runtime·memlimit();
......@@ -392,13 +394,13 @@ runtime·mallocinit(void)
if((uintptr)p & (((uintptr)1<<PageShift)-1))
runtime·throw("runtime: SysReserve returned unaligned address");
runtime·mheap.bitmap = p;
runtime·mheap.arena_start = p + bitmap_size;
runtime·mheap.arena_used = runtime·mheap.arena_start;
runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size;
runtime·mheap->bitmap = p;
runtime·mheap->arena_start = p + bitmap_size;
runtime·mheap->arena_used = runtime·mheap->arena_start;
runtime·mheap->arena_end = runtime·mheap->arena_start + arena_size;
// Initialize the rest of the allocator.
runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc);
runtime·MHeap_Init(runtime·mheap, runtime·SysAlloc);
m->mcache = runtime·allocmcache();
// See if it works.
......@@ -496,8 +498,8 @@ runtime·settype_flush(M *mp, bool sysalloc)
// (Manually inlined copy of runtime·MHeap_Lookup)
p = (uintptr)v>>PageShift;
if(sizeof(void*) == 8)
p -= (uintptr)runtime·mheap.arena_start >> PageShift;
s = runtime·mheap.map[p];
p -= (uintptr)runtime·mheap->arena_start >> PageShift;
s = runtime·mheap->map[p];
if(s->sizeclass == 0) {
s->types.compression = MTypes_Single;
......@@ -610,7 +612,7 @@ runtime·settype(void *v, uintptr t)
}
if(DebugTypeAtBlockEnd) {
s = runtime·MHeap_Lookup(&runtime·mheap, v);
s = runtime·MHeap_Lookup(runtime·mheap, v);
*(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t;
}
}
......@@ -649,7 +651,7 @@ runtime·gettype(void *v)
uintptr t, ofs;
byte *data;
s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
if(s != nil) {
t = 0;
switch(s->types.compression) {
......
......@@ -427,7 +427,7 @@ struct MHeap
FixAlloc spanalloc; // allocator for Span*
FixAlloc cachealloc; // allocator for MCache*
};
extern MHeap runtime·mheap;
extern MHeap *runtime·mheap;
void runtime·MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed);
......
......@@ -21,7 +21,7 @@ runtime·MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed)
l = &c->list[sizeclass];
if(l->list == nil) {
// Replenish using central lists.
n = runtime·MCentral_AllocList(&runtime·mheap.central[sizeclass],
n = runtime·MCentral_AllocList(&runtime·mheap->central[sizeclass],
runtime·class_to_transfercount[sizeclass], &first);
if(n == 0)
runtime·throw("out of memory");
......@@ -69,7 +69,7 @@ ReleaseN(MCache *c, MCacheList *l, int32 n, int32 sizeclass)
c->size -= n*runtime·class_to_size[sizeclass];
// Return them to central free list.
runtime·MCentral_FreeList(&runtime·mheap.central[sizeclass], n, first);
runtime·MCentral_FreeList(&runtime·mheap->central[sizeclass], n, first);
}
void
......
......@@ -109,7 +109,7 @@ MCentral_Free(MCentral *c, void *v)
int32 size;
// Find span for v.
s = runtime·MHeap_Lookup(&runtime·mheap, v);
s = runtime·MHeap_Lookup(runtime·mheap, v);
if(s == nil || s->ref == 0)
runtime·throw("invalid free");
......@@ -134,7 +134,7 @@ MCentral_Free(MCentral *c, void *v)
s->freelist = nil;
c->nfree -= (s->npages << PageShift) / size;
runtime·unlock(c);
runtime·MHeap_Free(&runtime·mheap, s, 0);
runtime·MHeap_Free(runtime·mheap, s, 0);
runtime·lock(c);
}
}
......@@ -169,7 +169,7 @@ runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *
c->nfree -= (s->npages << PageShift) / size;
runtime·unlock(c);
runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
runtime·MHeap_Free(&runtime·mheap, s, 0);
runtime·MHeap_Free(runtime·mheap, s, 0);
} else {
runtime·unlock(c);
}
......@@ -201,7 +201,7 @@ MCentral_Grow(MCentral *c)
runtime·unlock(c);
runtime·MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
s = runtime·MHeap_Alloc(&runtime·mheap, npages, c->sizeclass, 0, 1);
s = runtime·MHeap_Alloc(runtime·mheap, npages, c->sizeclass, 0, 1);
if(s == nil) {
// TODO(rsc): Log out of memory
runtime·lock(c);
......
This diff is collapsed.
......@@ -383,7 +383,7 @@ scavenge(uint64 now, uint64 limit)
uintptr sumreleased;
MHeap *h;
h = &runtime·mheap;
h = runtime·mheap;
sumreleased = 0;
for(i=0; i < nelem(h->free); i++)
sumreleased += scavengelist(&h->free[i], now, limit);
......@@ -421,7 +421,7 @@ runtime·MHeap_Scavenger(void)
if(env != nil)
trace = runtime·atoi(env) > 0;
h = &runtime·mheap;
h = runtime·mheap;
for(k=0;; k++) {
runtime·noteclear(&note);
runtime·entersyscall();
......@@ -463,9 +463,9 @@ void
runtimedebug·freeOSMemory(void)
{
runtime·gc(1);
runtime·lock(&runtime·mheap);
runtime·lock(runtime·mheap);
scavenge(~(uintptr)0, 0);
runtime·unlock(&runtime·mheap);
runtime·unlock(runtime·mheap);
}
// Initialize a new span with the given start and npages.
......
......@@ -40,12 +40,7 @@ runtime·raceinit(void)
m->racecall = true;
runtimerace·Initialize(&racectx);
sz = (byte*)&runtime·mheap - noptrdata;
if(sz)
runtimerace·MapShadow(noptrdata, sz);
sz = enoptrbss - (byte*)(&runtime·mheap+1);
if(sz)
runtimerace·MapShadow(&runtime·mheap+1, sz);
runtimerace·MapShadow(noptrdata, enoptrbss - noptrdata);
m->racecall = false;
return racectx;
}
......@@ -102,7 +97,7 @@ runtime·racefuncenter(uintptr pc)
// Same thing if the PC is on the heap, which should be a
// closure trampoline.
if(pc == (uintptr)runtime·lessstack ||
(pc >= (uintptr)runtime·mheap.arena_start && pc < (uintptr)runtime·mheap.arena_used))
(pc >= (uintptr)runtime·mheap->arena_start && pc < (uintptr)runtime·mheap->arena_used))
runtime·callers(2, &pc, 1);
m->racecall = true;
......@@ -168,7 +163,7 @@ memoryaccess(void *addr, uintptr callpc, uintptr pc, bool write)
racectx = g->racectx;
if(callpc) {
if(callpc == (uintptr)runtime·lessstack ||
(callpc >= (uintptr)runtime·mheap.arena_start && callpc < (uintptr)runtime·mheap.arena_used))
(callpc >= (uintptr)runtime·mheap->arena_start && callpc < (uintptr)runtime·mheap->arena_used))
runtime·callers(3, &callpc, 1);
runtimerace·FuncEnter(racectx, (void*)callpc);
}
......@@ -204,7 +199,7 @@ rangeaccess(void *addr, uintptr size, uintptr step, uintptr callpc, uintptr pc,
racectx = g->racectx;
if(callpc) {
if(callpc == (uintptr)runtime·lessstack ||
(callpc >= (uintptr)runtime·mheap.arena_start && callpc < (uintptr)runtime·mheap.arena_used))
(callpc >= (uintptr)runtime·mheap->arena_start && callpc < (uintptr)runtime·mheap->arena_used))
runtime·callers(3, &callpc, 1);
runtimerace·FuncEnter(racectx, (void*)callpc);
}
......@@ -354,7 +349,7 @@ onstack(uintptr argp)
// the layout is in ../../cmd/ld/data.c
if((byte*)argp >= noptrdata && (byte*)argp < enoptrbss)
return false;
if((byte*)argp >= runtime·mheap.arena_start && (byte*)argp < runtime·mheap.arena_used)
if((byte*)argp >= runtime·mheap->arena_start && (byte*)argp < runtime·mheap->arena_used)
return false;
return true;
}
......@@ -74,8 +74,8 @@ runtime·gentraceback(byte *pc0, byte *sp, byte *lr0, G *gp, int32 skip, uintptr
// we have lost track of where we are.
p = (byte*)pc;
if((pc&3) == 0 && p < p+4 &&
runtime·mheap.arena_start < p &&
p+4 < runtime·mheap.arena_used) {
runtime·mheap->arena_start < p &&
p+4 < runtime·mheap->arena_used) {
x = *(uintptr*)p;
if((x&0xfffff000) == 0xe49df000) {
// End of closure:
......@@ -94,7 +94,7 @@ runtime·gentraceback(byte *pc0, byte *sp, byte *lr0, G *gp, int32 skip, uintptr
// argument copying
p += 7*4;
}
if((byte*)pc < p && p < p+4 && p+4 < runtime·mheap.arena_used) {
if((byte*)pc < p && p < p+4 && p+4 < runtime·mheap->arena_used) {
pc = *(uintptr*)p;
fp = nil;
continue;
......
......@@ -82,7 +82,7 @@ runtime·gentraceback(byte *pc0, byte *sp, byte *lr0, G *gp, int32 skip, uintptr
// The 0x48 byte is only on amd64.
p = (byte*)pc;
// We check p < p+8 to avoid wrapping and faulting if we lose track.
if(runtime·mheap.arena_start < p && p < p+8 && p+8 < runtime·mheap.arena_used && // pointer in allocated memory
if(runtime·mheap->arena_start < p && p < p+8 && p+8 < runtime·mheap->arena_used && // pointer in allocated memory
(sizeof(uintptr) != 8 || *p++ == 0x48) && // skip 0x48 byte on amd64
p[0] == 0x81 && p[1] == 0xc4 && p[6] == 0xc3) {
sp += *(uint32*)(p+2);
......@@ -234,7 +234,7 @@ isclosureentry(uintptr pc)
int32 i, siz;
p = (byte*)pc;
if(p < runtime·mheap.arena_start || p+32 > runtime·mheap.arena_used)
if(p < runtime·mheap->arena_start || p+32 > runtime·mheap->arena_used)
return 0;
if(*p == 0xe8) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment