Commit 564eab89 authored by Russ Cox's avatar Russ Cox

runtime: add GODEBUG=sbrk=1 to bypass memory allocator (and GC)

To reduce lock contention in this mode, makes persistent allocation state per-P,
which means at most 64 kB overhead x $GOMAXPROCS, which should be
completely tolerable.

Change-Id: I34ca95e77d7e67130e30822e5a4aff6772b1a1c5
Reviewed-on: https://go-review.googlesource.com/7740Reviewed-by: default avatarRick Hudson <rlh@golang.org>
parent 01af7270
...@@ -483,16 +483,23 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -483,16 +483,23 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
if gcphase == _GCmarktermination { if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination") throw("mallocgc called with gcphase == _GCmarktermination")
} }
shouldhelpgc := false
if size == 0 { if size == 0 {
return unsafe.Pointer(&zerobase) return unsafe.Pointer(&zerobase)
} }
dataSize := size
if flags&flagNoScan == 0 && typ == nil { if flags&flagNoScan == 0 && typ == nil {
throw("malloc missing type") throw("malloc missing type")
} }
if debug.sbrk != 0 {
align := uintptr(16)
if typ != nil {
align = uintptr(typ.align)
}
return persistentalloc(size, align, &memstats.other_sys)
}
// Set mp.mallocing to keep from being preempted by GC. // Set mp.mallocing to keep from being preempted by GC.
mp := acquirem() mp := acquirem()
if mp.mallocing != 0 { if mp.mallocing != 0 {
...@@ -500,6 +507,8 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -500,6 +507,8 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
} }
mp.mallocing = 1 mp.mallocing = 1
shouldhelpgc := false
dataSize := size
c := gomcache() c := gomcache()
var s *mspan var s *mspan
var x unsafe.Pointer var x unsafe.Pointer
...@@ -761,12 +770,16 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { ...@@ -761,12 +770,16 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
mProf_Malloc(x, size) mProf_Malloc(x, size)
} }
var persistent struct { type persistentAlloc struct {
lock mutex
base unsafe.Pointer base unsafe.Pointer
off uintptr off uintptr
} }
var globalAlloc struct {
mutex
persistentAlloc
}
// Wrapper around sysAlloc that can allocate small chunks. // Wrapper around sysAlloc that can allocate small chunks.
// There is no associated free operation. // There is no associated free operation.
// Intended for things like function/type/debug-related persistent data. // Intended for things like function/type/debug-related persistent data.
...@@ -795,19 +808,31 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer { ...@@ -795,19 +808,31 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
return sysAlloc(size, stat) return sysAlloc(size, stat)
} }
lock(&persistent.lock) mp := acquirem()
var persistent *persistentAlloc
if mp != nil && mp.p != nil {
persistent = &mp.p.palloc
} else {
lock(&globalAlloc.mutex)
persistent = &globalAlloc.persistentAlloc
}
persistent.off = round(persistent.off, align) persistent.off = round(persistent.off, align)
if persistent.off+size > chunk || persistent.base == nil { if persistent.off+size > chunk || persistent.base == nil {
persistent.base = sysAlloc(chunk, &memstats.other_sys) persistent.base = sysAlloc(chunk, &memstats.other_sys)
if persistent.base == nil { if persistent.base == nil {
unlock(&persistent.lock) if persistent == &globalAlloc.persistentAlloc {
unlock(&globalAlloc.mutex)
}
throw("runtime: cannot allocate memory") throw("runtime: cannot allocate memory")
} }
persistent.off = 0 persistent.off = 0
} }
p := add(persistent.base, persistent.off) p := add(persistent.base, persistent.off)
persistent.off += size persistent.off += size
unlock(&persistent.lock) releasem(mp)
if persistent == &globalAlloc.persistentAlloc {
unlock(&globalAlloc.mutex)
}
if stat != &memstats.other_sys { if stat != &memstats.other_sys {
xadd64(stat, int64(size)) xadd64(stat, int64(size))
......
...@@ -254,6 +254,11 @@ func runfinq() { ...@@ -254,6 +254,11 @@ func runfinq() {
// If a finalizer must run for a long time, it should do so by starting // If a finalizer must run for a long time, it should do so by starting
// a new goroutine. // a new goroutine.
func SetFinalizer(obj interface{}, finalizer interface{}) { func SetFinalizer(obj interface{}, finalizer interface{}) {
if debug.sbrk != 0 {
// debug.sbrk never frees memory, so no finalizers run
// (and we don't have the data structures to record them).
return
}
e := (*eface)(unsafe.Pointer(&obj)) e := (*eface)(unsafe.Pointer(&obj))
etyp := e._type etyp := e._type
if etyp == nil { if etyp == nil {
......
...@@ -316,6 +316,7 @@ var debug struct { ...@@ -316,6 +316,7 @@ var debug struct {
schedtrace int32 schedtrace int32
wbshadow int32 wbshadow int32
gccheckmark int32 gccheckmark int32
sbrk int32
} }
var dbgvars = []dbgVar{ var dbgvars = []dbgVar{
...@@ -329,6 +330,7 @@ var dbgvars = []dbgVar{ ...@@ -329,6 +330,7 @@ var dbgvars = []dbgVar{
{"schedtrace", &debug.schedtrace}, {"schedtrace", &debug.schedtrace},
{"wbshadow", &debug.wbshadow}, {"wbshadow", &debug.wbshadow},
{"gccheckmark", &debug.gccheckmark}, {"gccheckmark", &debug.gccheckmark},
{"sbrk", &debug.sbrk},
} }
func parsedebugvars() { func parsedebugvars() {
......
...@@ -369,6 +369,8 @@ type p struct { ...@@ -369,6 +369,8 @@ type p struct {
tracebuf *traceBuf tracebuf *traceBuf
palloc persistentAlloc // per-P to avoid mutex
pad [64]byte pad [64]byte
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment