Commit 8b8ff164 authored by Russ Cox's avatar Russ Cox

delete playpen copy of malloc

R=r
DELTA=905  (0 added, 905 deleted, 0 changed)
OCL=22663
CL=22690
parent 8fb60768
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
CC=6c -w
GC=6g
LD=6l
O=6
TARG=testrandom testrepeat testsizetoclass
default: $(TARG)
%.$O: %.c malloc.h
$(CC) $*.c
%.$O: %.go
$(GC) $*.go
OFILES=\
allocator.$O\
malloc.$O\
mem.$O\
ms.$O\
pagemap.$O\
triv.$O\
testrandom.$O: allocator.$O
testrepeat.$O: allocator.$O
test%: test%.$O $(OFILES)
$(LD) -o $@ $^
clean:
rm -f *.$O $(TARG)
runtime: $(OFILES)
6ar grc $(GOROOT)/lib/lib_$(GOARCH)_$(GOOS).a $^
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package allocator
export func free(*byte)
export func malloc(int) *byte
export func memset(*byte, int, int)
export var footprint int64
export var frozen bool
export func testsizetoclass()
export var allocated int64
export func find(uint64) (obj *byte, size int64, ref *int32, ok bool)
export func gc()
This diff is collapsed.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "../../../src/runtime/runtime.h"
typedef struct PageMap PageMap;
enum
{
PageShift = 12,
PageMask = (1<<PageShift) - 1,
};
#define RefFree 0xffffffffU
#define RefManual 0xfffffffeU
#define RefStack 0xfffffffdU
enum {
PMBits = 64 - PageShift,
PMLevels = 4,
PMLevelBits = 13,
PMLevelSize = 1<<PMLevelBits,
PMLevelMask = PMLevelSize - 1,
};
struct PageMap
{
void *level0[PMLevelSize];
};
typedef struct Span Span;
typedef struct Central Central;
// A Span contains metadata about a range of pages.
enum {
SpanInUse = 0, // span has been handed out by allocator
SpanFree = 1, // span is in central free list
};
struct Span
{
Span *aprev; // in list of all spans
Span *anext;
Span *next; // in free lists
byte *base; // first byte in span
uintptr length; // number of pages in span
int32 cl;
int32 state; // state (enum above)
union {
int32 ref; // reference count if state == SpanInUse (for GC)
int32 *refbase; // ptr to packed ref counts
};
// void *type; // object type if state == SpanInUse (for GC)
};
// The Central cache contains a list of free spans,
// as well as free lists of small blocks.
struct Central
{
Lock;
Span *free[256];
Span *large; // free spans >= MaxPage pages
};
extern int64 allocator·allocated;
extern int64 allocator·footprint;
extern bool allocator·frozen;
void* trivalloc(int32);
void* pmlookup(PageMap*, uintptr);
void* pminsert(PageMap*, uintptr, void*);
void* alloc(int32);
void free(void*);
bool findobj(void*, void**, int64*, int32**);
extern Central central;
extern PageMap spanmap;
extern int32 classtosize[SmallFreeClasses];
extern Span *spanfirst, *spanlast;
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "malloc.h"
void*
stackalloc(uint32 n)
{
void *v;
int32 *ref;
v = alloc(n);
//printf("stackalloc %d = %p\n", n, v);
ref = nil;
findobj(v, nil, nil, &ref);
*ref = RefStack;
return v;
}
void
stackfree(void *v)
{
//printf("stackfree %p\n", v);
free(v);
}
void*
mal(uint32 n)
{
return alloc(n);
}
void
sys·mal(uint32 n, uint8 *ret)
{
ret = alloc(n);
FLUSH(&ret);
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "malloc.h"
// A PageMap maps page numbers to void* pointers.
// The AMD64 has 64-bit addresses and 4096-byte pages, so
// the page numbers are 52 bits. We use a four-level radix tree,
// with 13 bits for each level. This requires 32 kB per level or
// 128 kB for a table with one entry. Moving to three levels of 18 bits
// would require 3 MB for a table with one entry, which seems too expensive.
// This is easy to change.
// It may be that a balanced tree would be better anyway.
// Return the entry for page number pn in m.
void*
pmlookup(PageMap *m, uintptr pn)
{
int32 i, x;
void **v;
v = &m->level0[0];
for(i=0; i<PMLevels; i++) {
// Pick off top PMLevelBits bits as index and shift up.
x = (pn >> (PMBits - PMLevelBits)) & PMLevelMask;
pn <<= PMLevelBits;
// Walk down using index.
v = v[x];
if(v == nil)
return nil;
}
return v;
}
// Set the entry for page number pn in m to s.
// Return the old value.
void*
pminsert(PageMap *m, uintptr pn, void *value)
{
int32 i, x;
void **v, **l;
l = nil; // shut up 6c
v = &m->level0[0];
for(i=0; i<PMLevels; i++) {
// Pick off top PMLevelBits bits as index and shift up.
x = (pn >> (PMBits - PMLevelBits)) & PMLevelMask;
pn <<= PMLevelBits;
// Walk down using index, but remember location of pointer.
l = &v[x];
v = *l;
// Allocate new level if needed.
if(v == nil && i < PMLevels-1) {
v = trivalloc(PMLevelSize * sizeof v[0]);
*l = v;
}
}
// Record new value and return old.
*l = value;
return v;
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"allocator";
"rand";
"syscall"
)
var footprint int64;
var allocated int64;
func bigger() {
if footprint < allocator.footprint {
footprint = allocator.footprint;
println("Footprint", footprint, " for ", allocated);
if footprint > 1e9 {
panicln("too big");
}
}
}
// Prime the data structures by allocating one of
// each block in order. After this, there should be
// little reason to ask for more memory from the OS.
func prime() {
for i := 0; i < 16; i++ {
b := allocator.malloc(1<<uint(i));
allocator.free(b);
}
for i := 0; i < 256; i++ {
b := allocator.malloc(i<<12);
allocator.free(b);
}
}
func main() {
// prime();
var blocks [1] struct { base *byte; siz int; };
for i := 0; i < 1 << 20; i++ {
if i%(1<<10) == 0 {
println(i);
}
b := rand.rand() % len(blocks);
if blocks[b].base != nil {
// println("Free", blocks[b].siz, blocks[b].base);
allocator.free(blocks[b].base);
blocks[b].base = nil;
allocated -= int64(blocks[b].siz);
continue
}
siz := rand.rand() >> (11 + rand.urand32() % 20);
base := allocator.malloc(siz);
ptr := uint64(syscall.BytePtr(base))+uint64(siz/2);
obj, size, ref, ok := allocator.find(ptr);
if obj != base || *ref != 0 || !ok {
panicln("find", siz, obj, ref, ok);
}
blocks[b].base = base;
blocks[b].siz = siz;
allocated += int64(siz);
// println("Alloc", siz, base);
allocator.memset(base, 0xbb, siz);
bigger();
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"allocator"
)
var footprint int64
func bigger() {
if footprint < allocator.footprint {
footprint = allocator.footprint;
println("Footprint", footprint);
}
}
func main() {
for i := 0; i < 1<<16; i++ {
for j := 1; j <= 1<<22; j<<=1 {
if i == 0 {
println("First alloc:", j);
}
b := allocator.malloc(j);
allocator.free(b);
bigger();
}
if i%(1<<10) == 0 {
println(i);
}
if i == 0 {
println("Primed", i);
allocator.frozen = true;
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "allocator"
func main() {
allocator.testsizetoclass()
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trivial base allocator.
#include "malloc.h"
// TODO: The call to sys·mmap should be a call to an assembly
// function sys·mmapnew that takes only a size parameter.
enum
{
PROT_NONE = 0x00,
PROT_READ = 0x01,
PROT_WRITE = 0x02,
PROT_EXEC = 0x04,
MAP_FILE = 0x0000,
MAP_SHARED = 0x0001,
MAP_PRIVATE = 0x0002,
MAP_FIXED = 0x0010,
MAP_ANON = 0x1000,
};
// Allocate and return zeroed memory.
// Simple allocator for small things like Span structures,
// and also used to grab large amounts of memory for
// the real allocator to hand out.
enum
{
Round = 15,
};
void*
trivalloc(int32 size)
{
static byte *p;
static int32 n;
byte *v;
uint64 oldfoot;
if(allocator·frozen)
throw("allocator frozen");
//prints("Newmem: ");
//sys·printint(size);
//prints("\n");
oldfoot = allocator·footprint;
if(size < 4096) { // TODO: Tune constant.
size = (size + Round) & ~Round;
if(size > n) {
n = 1<<20; // TODO: Tune constant.
p = sys·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, 0, 0);
allocator·footprint += n;
}
v = p;
p += size;
goto out;
}
if(size & PageMask)
size += (1<<PageShift) - (size & PageMask);
v = sys·mmap(nil, size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, 0, 0);
allocator·footprint += size;
out:
if((oldfoot>>24) != (allocator·footprint>>24))
printf("memory footprint = %D MB for %D MB\n", allocator·footprint>>20, allocator·allocated>>20);
if(allocator·footprint >= 2LL<<30) {
prints("out of memory\n");
sys·exit(1);
}
return v;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment