Commit da0a7d7b authored by Russ Cox's avatar Russ Cox

malloc bug fixes.

use malloc by default.
free stacks.

R=r
DELTA=424  (333 added, 29 deleted, 62 changed)
OCL=21553
CL=21584
parent ba882f99
...@@ -16,4 +16,4 @@ type Stats struct { ...@@ -16,4 +16,4 @@ type Stats struct {
export func Alloc(uint64) *byte; export func Alloc(uint64) *byte;
export func Free(*byte); export func Free(*byte);
export func GetStats() *Stats; export func GetStats() *Stats;
export func Lookup(*byte) (*byte, uint64);
...@@ -25,6 +25,10 @@ malloc(uintptr size) ...@@ -25,6 +25,10 @@ malloc(uintptr size)
MSpan *s; MSpan *s;
void *v; void *v;
if(m->mallocing)
throw("malloc - deadlock");
m->mallocing = 1;
if(size == 0) if(size == 0)
size = 1; size = 1;
...@@ -35,11 +39,9 @@ malloc(uintptr size) ...@@ -35,11 +39,9 @@ malloc(uintptr size)
c = m->mcache; c = m->mcache;
v = MCache_Alloc(c, sizeclass, size); v = MCache_Alloc(c, sizeclass, size);
if(v == nil) if(v == nil)
return nil; throw("out of memory");
mstats.alloc += size; mstats.alloc += size;
return v; } else {
}
// TODO(rsc): Report tracebacks for very large allocations. // TODO(rsc): Report tracebacks for very large allocations.
// Allocate directly from heap. // Allocate directly from heap.
...@@ -48,9 +50,13 @@ malloc(uintptr size) ...@@ -48,9 +50,13 @@ malloc(uintptr size)
npages++; npages++;
s = MHeap_Alloc(&mheap, npages, 0); s = MHeap_Alloc(&mheap, npages, 0);
if(s == nil) if(s == nil)
return nil; throw("out of memory");
mstats.alloc += npages<<PageShift; mstats.alloc += npages<<PageShift;
return (void*)(s->start << PageShift); v = (void*)(s->start << PageShift);
}
m->mallocing = 0;
return v;
} }
// Free the object whose base pointer is v. // Free the object whose base pointer is v.
...@@ -89,6 +95,34 @@ free(void *v) ...@@ -89,6 +95,34 @@ free(void *v)
MCache_Free(c, v, sizeclass, size); MCache_Free(c, v, sizeclass, size);
} }
void
mlookup(void *v, byte **base, uintptr *size)
{
uintptr n, off;
byte *p;
MSpan *s;
s = MHeap_Lookup(&mheap, (uintptr)v>>PageShift);
if(s == nil) {
*base = nil;
*size = 0;
return;
}
p = (byte*)((uintptr)s->start<<PageShift);
if(s->sizeclass == 0) {
// Large object.
*base = p;
*size = s->npages<<PageShift;
return;
}
n = class_to_size[s->sizeclass];
off = ((byte*)v - p)/n * n;
*base = p+off;
*size = n;
}
MCache* MCache*
allocmcache(void) allocmcache(void)
{ {
...@@ -144,6 +178,80 @@ SysFree(void *v, uintptr n) ...@@ -144,6 +178,80 @@ SysFree(void *v, uintptr n)
// TODO(rsc): call munmap // TODO(rsc): call munmap
} }
// Runtime stubs.
extern void *oldmal(uint32);
void*
mal(uint32 n)
{
//return oldmal(n);
void *v;
v = malloc(n);
if(0) {
byte *p;
int32 i;
p = v;
for(i=0; i<n; i++) {
if(p[i] != 0) {
printf("mal %d => %p: byte %d is non-zero\n", n, v, i);
throw("mal");
}
}
}
//printf("mal %d %p\n", n, v); // |checkmal to check for overlapping returns.
return v;
}
// Stack allocator uses malloc/free most of the time,
// but if we're in the middle of malloc and need stack,
// we have to do something else to avoid deadlock.
// In that case, we fall back on a fixed-size free-list
// allocator, assuming that inside malloc all the stack
// frames are small, so that all the stack allocations
// will be a single size, the minimum (right now, 5k).
struct {
Lock;
FixAlloc;
} stacks;
void*
stackalloc(uint32 n)
{
void *v;
//return oldmal(n);
if(m->mallocing) {
lock(&stacks);
if(stacks.size == 0)
FixAlloc_Init(&stacks, n, SysAlloc);
if(stacks.size != n) {
printf("stackalloc: in malloc, size=%D want %d", stacks.size, n);
throw("stackalloc");
}
v = FixAlloc_Alloc(&stacks);
unlock(&stacks);
return v;
}
return malloc(n);
}
void
stackfree(void *v)
{
//return;
if(m->mallocing) {
lock(&stacks);
FixAlloc_Free(&stacks, v);
unlock(&stacks);
return;
}
free(v);
}
// Go function stubs. // Go function stubs.
...@@ -160,10 +268,15 @@ malloc·Free(byte *p) ...@@ -160,10 +268,15 @@ malloc·Free(byte *p)
free(p); free(p);
} }
void
malloc·Lookup(byte *p, byte *base, uintptr size)
{
mlookup(p, &base, &size);
}
void void
malloc·GetStats(MStats *s) malloc·GetStats(MStats *s)
{ {
s = &mstats; s = &mstats;
FLUSH(&s); FLUSH(&s);
} }
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
// 4. If the heap has too much memory, return some to the // 4. If the heap has too much memory, return some to the
// operating system. // operating system.
// //
// TODO(rsc): Steps 2, 3, 4 are not implemented. // TODO(rsc): Step 4 is not implemented.
// //
// Allocating and freeing a large object uses the page heap // Allocating and freeing a large object uses the page heap
// directly, bypassing the MCache and MCentral free lists. // directly, bypassing the MCache and MCentral free lists.
...@@ -79,6 +79,7 @@ typedef struct MHeapMap MHeapMap; ...@@ -79,6 +79,7 @@ typedef struct MHeapMap MHeapMap;
typedef struct MHeapMapCache MHeapMapCache; typedef struct MHeapMapCache MHeapMapCache;
typedef struct MSpan MSpan; typedef struct MSpan MSpan;
typedef struct MStats MStats; typedef struct MStats MStats;
typedef struct MLink MLink;
enum enum
{ {
...@@ -102,6 +103,12 @@ enum ...@@ -102,6 +103,12 @@ enum
}; };
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
struct MLink
{
MLink *next;
};
// SysAlloc obtains a large chunk of memory from the operating system, // SysAlloc obtains a large chunk of memory from the operating system,
// typically on the order of a hundred kilobytes or a megabyte. // typically on the order of a hundred kilobytes or a megabyte.
// //
...@@ -129,7 +136,7 @@ struct FixAlloc ...@@ -129,7 +136,7 @@ struct FixAlloc
{ {
uintptr size; uintptr size;
void *(*alloc)(uintptr); void *(*alloc)(uintptr);
void *list; MLink *list;
byte *chunk; byte *chunk;
uint32 nchunk; uint32 nchunk;
}; };
...@@ -146,6 +153,7 @@ struct MStats ...@@ -146,6 +153,7 @@ struct MStats
{ {
uint64 alloc; uint64 alloc;
uint64 sys; uint64 sys;
uint64 stacks;
}; };
extern MStats mstats; extern MStats mstats;
...@@ -175,8 +183,9 @@ extern void InitSizes(void); ...@@ -175,8 +183,9 @@ extern void InitSizes(void);
typedef struct MCacheList MCacheList; typedef struct MCacheList MCacheList;
struct MCacheList struct MCacheList
{ {
void *list; MLink *list;
uint32 nlist; uint32 nlist;
uint32 nlistmin;
}; };
struct MCache struct MCache
...@@ -230,8 +239,8 @@ struct MCentral ...@@ -230,8 +239,8 @@ struct MCentral
}; };
void MCentral_Init(MCentral *c, int32 sizeclass); void MCentral_Init(MCentral *c, int32 sizeclass);
int32 MCentral_AllocList(MCentral *c, int32 n, void **start, void **end); int32 MCentral_AllocList(MCentral *c, int32 n, MLink **first);
void MCentral_FreeList(MCentral *c, int32 n, void *start, void *end); void MCentral_FreeList(MCentral *c, int32 n, MLink *first);
// Free(v) must be able to determine the MSpan containing v. // Free(v) must be able to determine the MSpan containing v.
......
...@@ -13,7 +13,7 @@ void* ...@@ -13,7 +13,7 @@ void*
MCache_Alloc(MCache *c, int32 sizeclass, uintptr size) MCache_Alloc(MCache *c, int32 sizeclass, uintptr size)
{ {
MCacheList *l; MCacheList *l;
void *v, *start, *end; MLink *first, *v;
int32 n; int32 n;
// Allocate from list. // Allocate from list.
...@@ -21,41 +21,85 @@ MCache_Alloc(MCache *c, int32 sizeclass, uintptr size) ...@@ -21,41 +21,85 @@ MCache_Alloc(MCache *c, int32 sizeclass, uintptr size)
if(l->list == nil) { if(l->list == nil) {
// Replenish using central lists. // Replenish using central lists.
n = MCentral_AllocList(&mheap.central[sizeclass], n = MCentral_AllocList(&mheap.central[sizeclass],
class_to_transfercount[sizeclass], &start, &end); class_to_transfercount[sizeclass], &first);
if(n == 0) l->list = first;
return nil;
l->list = start;
l->nlist = n; l->nlist = n;
c->size += n*size; c->size += n*size;
} }
v = l->list; v = l->list;
l->list = *(void**)v; l->list = v->next;
l->nlist--; l->nlist--;
if(l->nlist < l->nlistmin)
l->nlistmin = l->nlist;
c->size -= size; c->size -= size;
// v is zeroed except for the link pointer // v is zeroed except for the link pointer
// that we used above; zero that. // that we used above; zero that.
*(void**)v = nil; v->next = nil;
return v; return v;
} }
// Take n elements off l and return them to the central free list.
static void
ReleaseN(MCache *c, MCacheList *l, int32 n, int32 sizeclass)
{
MLink *first, **lp;
int32 i;
// Cut off first n elements.
first = l->list;
lp = &l->list;
for(i=0; i<n; i++)
lp = &(*lp)->next;
l->list = *lp;
*lp = nil;
l->nlist -= n;
if(l->nlist < l->nlistmin)
l->nlistmin = l->nlist;
c->size -= n*class_to_size[sizeclass];
// Return them to central free list.
MCentral_FreeList(&mheap.central[sizeclass], n, first);
}
void void
MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size) MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
{ {
int32 i, n;
MCacheList *l; MCacheList *l;
MLink *p;
// Put back on list. // Put back on list.
l = &c->list[sizeclass]; l = &c->list[sizeclass];
*(void**)p = l->list; p = v;
p->next = l->list;
l->list = p; l->list = p;
l->nlist++; l->nlist++;
c->size += size; c->size += size;
if(l->nlist >= MaxMCacheListLen) { if(l->nlist >= MaxMCacheListLen) {
// TODO(rsc): Release to central cache. // Release a chunk back.
ReleaseN(c, l, class_to_transfercount[sizeclass], sizeclass);
} }
if(c->size >= MaxMCacheSize) { if(c->size >= MaxMCacheSize) {
// TODO(rsc): Scavenge. // Scavenge.
for(i=0; i<NumSizeClasses; i++) {
l = &c->list[i];
n = l->nlistmin;
// n is the minimum number of elements we've seen on
// the list since the last scavenge. If n > 0, it means that
// we could have gotten by with n fewer elements
// without needing to consult the central free list.
// Move toward that situation by releasing n/2 of them.
if(n > 0) {
if(n > 1)
n /= 2;
ReleaseN(c, l, n, i);
}
l->nlistmin = l->nlist;
}
} }
} }
...@@ -35,42 +35,35 @@ MCentral_Init(MCentral *c, int32 sizeclass) ...@@ -35,42 +35,35 @@ MCentral_Init(MCentral *c, int32 sizeclass)
// The objects are linked together by their first words. // The objects are linked together by their first words.
// On return, *pstart points at the first object and *pend at the last. // On return, *pstart points at the first object and *pend at the last.
int32 int32
MCentral_AllocList(MCentral *c, int32 n, void **pstart, void **pend) MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
{ {
void *start, *end, *v; MLink *first, *last, *v;
int32 i; int32 i;
*pstart = nil;
*pend = nil;
lock(c); lock(c);
// Replenish central list if empty. // Replenish central list if empty.
if(MSpanList_IsEmpty(&c->nonempty)) { if(MSpanList_IsEmpty(&c->nonempty)) {
if(!MCentral_Grow(c)) { if(!MCentral_Grow(c)) {
unlock(c); unlock(c);
*pfirst = nil;
return 0; return 0;
} }
} }
// Copy from list, up to n. // Copy from list, up to n.
start = nil; // First one is guaranteed to work, because we just grew the list.
end = nil; first = MCentral_Alloc(c);
for(i=0; i<n; i++) { last = first;
v = MCentral_Alloc(c); for(i=1; i<n && (v = MCentral_Alloc(c)) != nil; i++) {
if(v == nil) last->next = v;
break; last = v;
if(start == nil)
start = v;
else
*(void**)end = v;
end = v;
} }
last->next = nil;
c->nfree -= i; c->nfree -= i;
unlock(c); unlock(c);
*pstart = start; *pfirst = first;
*pend = end;
return i; return i;
} }
...@@ -79,18 +72,18 @@ static void* ...@@ -79,18 +72,18 @@ static void*
MCentral_Alloc(MCentral *c) MCentral_Alloc(MCentral *c)
{ {
MSpan *s; MSpan *s;
void *v; MLink *v;
if(MSpanList_IsEmpty(&c->nonempty)) if(MSpanList_IsEmpty(&c->nonempty))
return nil; return nil;
s = c->nonempty.next; s = c->nonempty.next;
s->ref++;
v = s->freelist; v = s->freelist;
s->freelist = *(void**)v; s->freelist = v->next;
if(s->freelist == nil) { if(s->freelist == nil) {
MSpanList_Remove(s); MSpanList_Remove(s);
MSpanList_Insert(&c->empty, s); MSpanList_Insert(&c->empty, s);
} }
s->ref++;
return v; return v;
} }
...@@ -99,19 +92,18 @@ MCentral_Alloc(MCentral *c) ...@@ -99,19 +92,18 @@ MCentral_Alloc(MCentral *c)
// The objects are linked together by their first words. // The objects are linked together by their first words.
// On return, *pstart points at the first object and *pend at the last. // On return, *pstart points at the first object and *pend at the last.
void void
MCentral_FreeList(MCentral *c, int32 n, void *start, void *end) MCentral_FreeList(MCentral *c, int32 n, void *start)
{ {
void *v, *next; MLink *v, *next;
// Assume *(void**)end = nil marks end of list. // Assume next == nil marks end of list.
// n and end would be useful if we implemented // n and end would be useful if we implemented
// the transfer cache optimization in the TODO above. // the transfer cache optimization in the TODO above.
USED(n); USED(n);
USED(end);
lock(c); lock(c);
for(v=start; v; v=next) { for(v=start; v; v=next) {
next = *(void**)v; next = v->next;
MCentral_Free(c, v); MCentral_Free(c, v);
} }
unlock(c); unlock(c);
...@@ -122,11 +114,12 @@ static void ...@@ -122,11 +114,12 @@ static void
MCentral_Free(MCentral *c, void *v) MCentral_Free(MCentral *c, void *v)
{ {
MSpan *s; MSpan *s;
PageID p; PageID page;
MLink *p, *next;
// Find span for v. // Find span for v.
p = (uintptr)v >> PageShift; page = (uintptr)v >> PageShift;
s = MHeap_Lookup(&mheap, p); s = MHeap_Lookup(&mheap, page);
if(s == nil || s->ref == 0) if(s == nil || s->ref == 0)
throw("invalid free"); throw("invalid free");
...@@ -137,13 +130,21 @@ MCentral_Free(MCentral *c, void *v) ...@@ -137,13 +130,21 @@ MCentral_Free(MCentral *c, void *v)
} }
// Add v back to s's free list. // Add v back to s's free list.
*(void**)v = s->freelist; p = v;
s->freelist = v; p->next = s->freelist;
s->freelist = p;
c->nfree++; c->nfree++;
// If s is completely freed, return it to the heap. // If s is completely freed, return it to the heap.
if(--s->ref == 0) { if(--s->ref == 0) {
MSpanList_Remove(s); MSpanList_Remove(s);
// Freed blocks are zeroed except for the link pointer.
// Zero the link pointers so that the page is all zero.
for(p=s->freelist; p; p=next) {
next = p->next;
p->next = nil;
}
s->freelist = nil;
c->nfree -= (s->npages << PageShift) / class_to_size[c->sizeclass]; c->nfree -= (s->npages << PageShift) / class_to_size[c->sizeclass];
unlock(c); unlock(c);
MHeap_Free(&mheap, s); MHeap_Free(&mheap, s);
...@@ -157,7 +158,7 @@ static bool ...@@ -157,7 +158,7 @@ static bool
MCentral_Grow(MCentral *c) MCentral_Grow(MCentral *c)
{ {
int32 n, npages, size; int32 n, npages, size;
void **tail; MLink **tailp, *v;
byte *p, *end; byte *p, *end;
MSpan *s; MSpan *s;
...@@ -171,17 +172,18 @@ MCentral_Grow(MCentral *c) ...@@ -171,17 +172,18 @@ MCentral_Grow(MCentral *c)
} }
// Carve span into sequence of blocks. // Carve span into sequence of blocks.
tail = &s->freelist; tailp = &s->freelist;
p = (byte*)(s->start << PageShift); p = (byte*)(s->start << PageShift);
end = p + (npages << PageShift); end = p + (npages << PageShift);
size = class_to_size[c->sizeclass]; size = class_to_size[c->sizeclass];
n = 0; n = 0;
for(; p + size <= end; p += size) { for(; p + size <= end; p += size) {
*tail = p; v = (MLink*)p;
tail = (void**)p; *tailp = v;
tailp = &v->next;
n++; n++;
} }
*tail = nil; *tailp = nil;
lock(c); lock(c);
c->nfree += n; c->nfree += n;
......
...@@ -23,17 +23,6 @@ enum ...@@ -23,17 +23,6 @@ enum
MAP_ANON = 0x1000, // not on Linux - TODO(rsc) MAP_ANON = 0x1000, // not on Linux - TODO(rsc)
}; };
void*
stackalloc(uint32 n)
{
return mal(n);
}
void
stackfree(void*)
{
}
// Convenient wrapper around mmap. // Convenient wrapper around mmap.
static void* static void*
brk(uint32 n) brk(uint32 n)
...@@ -51,7 +40,7 @@ brk(uint32 n) ...@@ -51,7 +40,7 @@ brk(uint32 n)
// right here?" The answer is yes unless we're in the middle of // right here?" The answer is yes unless we're in the middle of
// editing the malloc state in m->mem. // editing the malloc state in m->mem.
void* void*
mal(uint32 n) oldmal(uint32 n)
{ {
byte* v; byte* v;
......
...@@ -98,9 +98,20 @@ HaveSpan: ...@@ -98,9 +98,20 @@ HaveSpan:
// No matter what, cache span info, because gc needs to be // No matter what, cache span info, because gc needs to be
// able to map interior pointer to containing span. // able to map interior pointer to containing span.
s->sizeclass = sizeclass; s->sizeclass = sizeclass;
for(n=0; n<npage; n++) { for(n=0; n<npage; n++)
MHeapMap_Set(&h->map, s->start+n, s); MHeapMap_Set(&h->map, s->start+n, s);
if(sizeclass != 0) if(sizeclass == 0) {
uintptr tmp;
// If there are entries for this span, invalidate them,
// but don't blow out cache entries about other spans.
for(n=0; n<npage; n++)
if(MHeapMapCache_GET(&h->mapcache, s->start+n, tmp) != 0)
MHeapMapCache_SET(&h->mapcache, s->start+n, 0);
} else {
// Save cache entries for this span.
// If there's a size class, there aren't that many pages.
for(n=0; n<npage; n++)
MHeapMapCache_SET(&h->mapcache, s->start+n, sizeclass); MHeapMapCache_SET(&h->mapcache, s->start+n, sizeclass);
} }
...@@ -168,6 +179,8 @@ MHeap_Grow(MHeap *h, uintptr npage) ...@@ -168,6 +179,8 @@ MHeap_Grow(MHeap *h, uintptr npage)
return false; return false;
} }
// Create a fake "in use" span and free it, so that the
// right coalescing happens.
s = FixAlloc_Alloc(&h->spanalloc); s = FixAlloc_Alloc(&h->spanalloc);
MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift); MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
MHeapMap_Set(&h->map, s->start, s); MHeapMap_Set(&h->map, s->start, s);
...@@ -198,8 +211,10 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) ...@@ -198,8 +211,10 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
{ {
MSpan *t; MSpan *t;
if(s->state != MSpanInUse || s->ref != 0) if(s->state != MSpanInUse || s->ref != 0) {
printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
throw("MHeap_FreeLocked - invalid free"); throw("MHeap_FreeLocked - invalid free");
}
s->state = MSpanFree; s->state = MSpanFree;
MSpanList_Remove(s); MSpanList_Remove(s);
......
...@@ -98,6 +98,10 @@ schedinit(void) ...@@ -98,6 +98,10 @@ schedinit(void)
mallocinit(); mallocinit();
// Allocate internal symbol table representation now,
// so that we don't need to call malloc when we crash.
findfunc(0);
sched.gomaxprocs = 1; sched.gomaxprocs = 1;
p = getenv("GOMAXPROCS"); p = getenv("GOMAXPROCS");
if(p != nil && (n = atoi(p)) != 0) if(p != nil && (n = atoi(p)) != 0)
...@@ -440,7 +444,7 @@ matchmg(void) ...@@ -440,7 +444,7 @@ matchmg(void)
notewakeup(&m->havenextg); notewakeup(&m->havenextg);
}else{ }else{
m = mal(sizeof(M)); m = mal(sizeof(M));
m->g0 = malg(1024); m->g0 = malg(8192);
m->nextg = g; m->nextg = g;
m->id = sched.mcount++; m->id = sched.mcount++;
if(debug) { if(debug) {
......
...@@ -22,7 +22,7 @@ TEXT _rt0_amd64(SB),7,$-8 ...@@ -22,7 +22,7 @@ TEXT _rt0_amd64(SB),7,$-8
// create istack out of the given (operating system) stack // create istack out of the given (operating system) stack
LEAQ (-1024+104)(SP), AX LEAQ (-8192+104)(SP), AX
MOVQ AX, 0(R15) // 0(R15) is stack limit (w 104b guard) MOVQ AX, 0(R15) // 0(R15) is stack limit (w 104b guard)
MOVQ SP, 8(R15) // 8(R15) is base MOVQ SP, 8(R15) // 8(R15) is base
......
...@@ -76,10 +76,6 @@ enum ...@@ -76,10 +76,6 @@ enum
true = 1, true = 1,
false = 0, false = 0,
}; };
enum
{
SmallFreeClasses = 168, // number of small free lists in malloc
};
/* /*
* structures * structures
...@@ -158,6 +154,7 @@ struct M ...@@ -158,6 +154,7 @@ struct M
int32 siz1; int32 siz1;
int32 siz2; int32 siz2;
int32 id; int32 id;
int32 mallocing;
Note havenextg; Note havenextg;
G* nextg; G* nextg;
M* schedlink; M* schedlink;
......
...@@ -31,15 +31,17 @@ func bigger() { ...@@ -31,15 +31,17 @@ func bigger() {
func main() { func main() {
flag.Parse(); flag.Parse();
malloc.GetStats().alloc = 0; // ignore stacks
for i := 0; i < 1<<8; i++ { for i := 0; i < 1<<8; i++ {
for j := 1; j <= 1<<22; j<<=1 { for j := 1; j <= 1<<22; j<<=1 {
if i == 0 && chatty { if i == 0 && chatty {
println("First alloc:", j); println("First alloc:", j);
} }
b := malloc.Alloc(uint64(j)); b := malloc.Alloc(uint64(j));
during := malloc.GetStats().alloc;
malloc.Free(b); malloc.Free(b);
if a := malloc.GetStats().alloc; a != 0 { if a := malloc.GetStats().alloc; a != 0 {
panicln("malloc wrong count", a); panicln("malloc wrong count", a, "after", j, "during", during);
} }
bigger(); bigger();
} }
......
// $G $D/$F.go && $L $F.$A && ./$A.out
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Repeated malloc test.
package main
import (
"flag";
"fmt";
"malloc";
"strconv"
)
var chatty bool;
var chatty_flag = flag.Bool("v", false, &chatty, "chatty");
var reverse bool;
var reverse_flag = flag.Bool("r", false, &reverse, "reverse");
var longtest bool;
var longtest_flag = flag.Bool("l", false, &longtest, "long test");
var b *[]*byte;
var stats = malloc.GetStats();
func OkAmount(size, n uint64) bool {
if n < size {
return false
}
if size < 16*8 {
if n > size+16 {
return false
}
} else {
if n > size*9/8 {
return false
}
}
return true
}
func AllocAndFree(size, count int) {
if chatty {
fmt.printf("size=%d count=%d ...\n", size, count);
}
n1 := stats.alloc;
for i := 0; i < count; i++ {
b[i] = malloc.Alloc(uint64(size));
base, n := malloc.Lookup(b[i]);
if base != b[i] || !OkAmount(uint64(size), n) {
panicln("lookup failed: got", base, n, "for", b[i]);
}
if malloc.GetStats().sys > 1e9 {
panicln("too much memory allocated");
}
}
n2 := stats.alloc;
if chatty {
fmt.printf("size=%d count=%d stats=%+v\n", size, count, *stats);
}
n3 := stats.alloc;
for j := 0; j < count; j++ {
i := j;
if reverse {
i = count - 1 - j;
}
alloc := stats.alloc;
base, n := malloc.Lookup(b[i]);
if base != b[i] || !OkAmount(uint64(size), n) {
panicln("lookup failed: got", base, n, "for", b[i]);
}
malloc.Free(b[i]);
if stats.alloc != alloc - n {
panicln("free alloc got", stats.alloc, "expected", alloc - n, "after free of", n);
}
if malloc.GetStats().sys > 1e9 {
panicln("too much memory allocated");
}
}
n4 := stats.alloc;
if chatty {
fmt.printf("size=%d count=%d stats=%+v\n", size, count, *stats);
}
if n2-n1 != n3-n4 {
panicln("wrong alloc count: ", n2-n1, n3-n4);
}
}
func atoi(s string) int {
i, xx1 := strconv.atoi(s);
return i
}
func main() {
flag.Parse();
b = new([]*byte, 10000);
if flag.NArg() > 0 {
AllocAndFree(atoi(flag.Arg(0)), atoi(flag.Arg(1)));
return;
}
for j := 1; j <= 1<<22; j<<=1 {
n := len(b);
max := uint64(1<<28);
if !longtest {
max = 1<<22;
}
if uint64(j)*uint64(n) > max {
n = int(max / uint64(j));
}
if n < 10 {
n = 10;
}
for m := 1; m <= n; {
AllocAndFree(j, m);
if m == n {
break
}
m = 5*m/4;
if m < 4 {
m++
}
if m > n {
m = n
}
}
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment