Commit 3965d750 authored by Russ Cox's avatar Russ Cox

runtime: factor out bitmap, finalizer code from malloc/mgc

The code in mfinal.go is moved from malloc*.go and mgc*.go
and substantially unchanged.

The code in mbitmap.go is also moved from those files, but
cleaned up so that it can be called from those files (in most cases
the code being moved was not already a standalone function).
I also renamed the constants and wrote comments describing
the format. The result is a significant cleanup and isolation of
the bitmap code, but, roughly speaking, it should be treated
and reviewed as new code.

The other files changed only as much as necessary to support
this code movement.

This CL does NOT change the semantics of the heap or type
bitmaps at all, although there are now some obvious opportunities
to do so in followup CLs.

Change-Id: I41b8d5de87ad1d3cd322709931ab25e659dbb21d
Reviewed-on: https://go-review.googlesource.com/2991Reviewed-by: default avatarKeith Randall <khr@golang.org>
parent fd880f8d
......@@ -50,7 +50,7 @@ func TestGCInfo(t *testing.T) {
func verifyGCInfo(t *testing.T, name string, p interface{}, mask0 []byte) {
mask := runtime.GCMask(p)
if len(mask) > len(mask0) {
mask0 = append(mask0, BitsDead)
mask0 = append(mask0, typeDead)
mask = mask[:len(mask0)]
}
if bytes.Compare(mask, mask0) != 0 {
......@@ -60,11 +60,11 @@ func verifyGCInfo(t *testing.T, name string, p interface{}, mask0 []byte) {
}
func nonStackInfo(mask []byte) []byte {
// BitsDead is replaced with BitsScalar everywhere except stacks.
// typeDead is replaced with typeScalar everywhere except stacks.
mask1 := make([]byte, len(mask))
for i, v := range mask {
if v == BitsDead {
v = BitsScalar
if v == typeDead {
v = typeScalar
}
mask1[i] = v
}
......@@ -79,9 +79,9 @@ func escape(p interface{}) interface{} {
}
const (
BitsDead = iota
BitsScalar
BitsPointer
typeDead = iota
typeScalar
typePointer
)
const (
......@@ -100,7 +100,7 @@ type ScalarPtr struct {
y *int
}
var infoScalarPtr = []byte{BitsScalar, BitsPointer, BitsScalar, BitsPointer, BitsScalar, BitsPointer}
var infoScalarPtr = []byte{typeScalar, typePointer, typeScalar, typePointer, typeScalar, typePointer}
type PtrScalar struct {
q *int
......@@ -111,7 +111,7 @@ type PtrScalar struct {
y int
}
var infoPtrScalar = []byte{BitsPointer, BitsScalar, BitsPointer, BitsScalar, BitsPointer, BitsScalar}
var infoPtrScalar = []byte{typePointer, typeScalar, typePointer, typeScalar, typePointer, typeScalar}
type BigStruct struct {
q *int
......@@ -128,27 +128,27 @@ func infoBigStruct() []byte {
switch runtime.GOARCH {
case "386", "arm":
return []byte{
BitsPointer, // q *int
BitsScalar, BitsScalar, BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte
BitsPointer, BitsDead, BitsDead, // r []byte
BitsScalar, BitsScalar, BitsScalar, BitsScalar, // t int; y uint16; u uint64
BitsPointer, BitsDead, // i string
typePointer, // q *int
typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
typePointer, typeDead, typeDead, // r []byte
typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeDead, // i string
}
case "amd64", "ppc64", "ppc64le":
return []byte{
BitsPointer, // q *int
BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte
BitsPointer, BitsDead, BitsDead, // r []byte
BitsScalar, BitsScalar, BitsScalar, // t int; y uint16; u uint64
BitsPointer, BitsDead, // i string
typePointer, // q *int
typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
typePointer, typeDead, typeDead, // r []byte
typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeDead, // i string
}
case "amd64p32":
return []byte{
BitsPointer, // q *int
BitsScalar, BitsScalar, BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte
BitsPointer, BitsDead, BitsDead, // r []byte
BitsScalar, BitsScalar, BitsDead, BitsScalar, BitsScalar, // t int; y uint16; u uint64
BitsPointer, BitsDead, // i string
typePointer, // q *int
typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
typePointer, typeDead, typeDead, // r []byte
typeScalar, typeScalar, typeDead, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeDead, // i string
}
default:
panic("unknown arch")
......@@ -183,8 +183,8 @@ var (
dataEface interface{} = 42
dataIface Iface = IfaceImpl(42)
infoString = []byte{BitsPointer, BitsDead}
infoSlice = []byte{BitsPointer, BitsDead, BitsDead}
infoEface = []byte{BitsPointer, BitsPointer}
infoIface = []byte{BitsPointer, BitsPointer}
infoString = []byte{typePointer, typeDead}
infoSlice = []byte{typePointer, typeDead, typeDead}
infoEface = []byte{typePointer, typePointer}
infoIface = []byte{typePointer, typePointer}
)
......@@ -219,18 +219,18 @@ type childInfo struct {
// dump kinds & offsets of interesting fields in bv
func dumpbv(cbv *bitvector, offset uintptr) {
bv := gobv(*cbv)
for i := uintptr(0); i < uintptr(bv.n); i += bitsPerPointer {
switch bv.bytedata[i/8] >> (i % 8) & 3 {
for i := uintptr(0); i < uintptr(bv.n); i += typeBitsWidth {
switch bv.bytedata[i/8] >> (i % 8) & typeMask {
default:
throw("unexpected pointer bits")
case _BitsDead:
// BitsDead has already been processed in makeheapobjbv.
case typeDead:
// typeDead has already been processed in makeheapobjbv.
// We should only see it in stack maps, in which case we should continue processing.
case _BitsScalar:
case typeScalar:
// ok
case _BitsPointer:
case typePointer:
dumpint(fieldKindPtr)
dumpint(uint64(offset + i/_BitsPerPointer*ptrSize))
dumpint(uint64(offset + i/typeBitsWidth*ptrSize))
}
}
}
......@@ -260,7 +260,7 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
var bv bitvector
if stkmap != nil && stkmap.n > 0 {
bv = stackmapdata(stkmap, pcdata)
dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n/_BitsPerPointer*ptrSize)))
dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n/typeBitsWidth*ptrSize)))
} else {
bv.n = -1
}
......@@ -308,7 +308,7 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
} else if stkmap.n > 0 {
// Locals bitmap information, scan just the pointers in
// locals.
dumpbv(&bv, s.varp-uintptr(bv.n)/_BitsPerPointer*ptrSize-s.sp)
dumpbv(&bv, s.varp-uintptr(bv.n)/typeBitsWidth*ptrSize-s.sp)
}
dumpint(fieldKindEol)
......@@ -701,29 +701,28 @@ func dumpbvtypes(bv *bitvector, base unsafe.Pointer) {
func makeheapobjbv(p uintptr, size uintptr) bitvector {
// Extend the temp buffer if necessary.
nptr := size / ptrSize
if uintptr(len(tmpbuf)) < nptr*_BitsPerPointer/8+1 {
if uintptr(len(tmpbuf)) < nptr*typeBitsWidth/8+1 {
if tmpbuf != nil {
sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
}
n := nptr*_BitsPerPointer/8 + 1
n := nptr*typeBitsWidth/8 + 1
p := sysAlloc(n, &memstats.other_sys)
if p == nil {
throw("heapdump: out of memory")
}
tmpbuf = (*[1 << 30]byte)(p)[:n]
}
// Copy and compact the bitmap.
var i uintptr
for i = 0; i < nptr; i++ {
off := (p + i*ptrSize - mheap_.arena_start) / ptrSize
bitp := (*uint8)(unsafe.Pointer(mheap_.arena_start - off/wordsPerBitmapByte - 1))
shift := uint8((off % wordsPerBitmapByte) * gcBits)
bits := (*bitp >> (shift + 2)) & _BitsMask
if bits == _BitsDead {
break // end of heap object
// Convert heap bitmap to type bitmap.
i := uintptr(0)
hbits := heapBitsForAddr(p)
for ; i < nptr; i++ {
bits := hbits.typeBits()
if bits == typeDead {
break // end of object
}
tmpbuf[i*_BitsPerPointer/8] &^= (_BitsMask << ((i * _BitsPerPointer) % 8))
tmpbuf[i*_BitsPerPointer/8] |= bits << ((i * _BitsPerPointer) % 8)
hbits = hbits.next()
tmpbuf[i*typeBitsWidth/8] &^= (typeMask << ((i * typeBitsWidth) % 8))
tmpbuf[i*typeBitsWidth/8] |= bits << ((i * typeBitsWidth) % 8)
}
return bitvector{int32(i * _BitsPerPointer), &tmpbuf[0]}
return bitvector{int32(i * typeBitsWidth), &tmpbuf[0]}
}
This diff is collapsed.
......@@ -350,8 +350,6 @@ func largeAlloc(size uintptr, flag uint32) *mspan {
throw("out of memory")
}
s.limit = uintptr(s.start)<<_PageShift + size
v := unsafe.Pointer(uintptr(s.start) << _PageShift)
// setup for mark sweep
markspan(v, 0, 0, true)
heapBitsForSpan(s.base()).initSpan(s.layout())
return s
}
......@@ -405,6 +405,19 @@ type mspan struct {
specials *special // linked list of special records sorted by offset.
}
func (s *mspan) base() uintptr {
return uintptr(s.start << _PageShift)
}
func (s *mspan) layout() (size, n, total uintptr) {
total = s.npages << _PageShift
size = s.elemsize
if size > 0 {
n = total / size
}
return
}
// Every MSpan is in one doubly-linked list,
// either one of the MHeap's free lists or one of the
// MCentral's span lists. We use empty MSpan structures as list heads.
......
......@@ -211,11 +211,11 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
}
systemstack(func() {
mask := loadPtrMask(typ)
mask := typeBitmapInHeapBitmapFormat(typ)
nptr := typ.size / ptrSize
for i := uintptr(0); i < nptr; i += 2 {
bits := mask[i/2]
if (bits>>2)&_BitsMask == _BitsPointer {
if (bits>>2)&typeMask == typePointer {
writebarrierptr((*uintptr)(dst), *(*uintptr)(src))
} else {
*(*uintptr)(dst) = *(*uintptr)(src)
......@@ -227,7 +227,7 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
break
}
bits >>= 4
if (bits>>2)&_BitsMask == _BitsPointer {
if (bits>>2)&typeMask == typePointer {
writebarrierptr((*uintptr)(dst), *(*uintptr)(src))
} else {
*(*uintptr)(dst) = *(*uintptr)(src)
......@@ -262,11 +262,11 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
off += frag
}
mask := loadPtrMask(typ)
mask := typeBitmapInHeapBitmapFormat(typ)
nptr := (off + size) / ptrSize
for i := uintptr(off / ptrSize); i < nptr; i++ {
bits := mask[i/2] >> ((i & 1) << 2)
if (bits>>2)&_BitsMask == _BitsPointer {
if (bits>>2)&typeMask == typePointer {
writebarrierptr((*uintptr)(dst), *(*uintptr)(src))
} else {
*(*uintptr)(dst) = *(*uintptr)(src)
......@@ -295,14 +295,14 @@ func callwritebarrier(typ *_type, frame unsafe.Pointer, framesize, retoffset uin
}
systemstack(func() {
mask := loadPtrMask(typ)
mask := typeBitmapInHeapBitmapFormat(typ)
// retoffset is known to be pointer-aligned (at least).
// TODO(rsc): The noescape call should be unnecessary.
dst := add(noescape(frame), retoffset)
nptr := framesize / ptrSize
for i := uintptr(retoffset / ptrSize); i < nptr; i++ {
bits := mask[i/2] >> ((i & 1) << 2)
if (bits>>2)&_BitsMask == _BitsPointer {
if (bits>>2)&typeMask == typePointer {
writebarrierptr_nostore((*uintptr)(dst), *(*uintptr)(dst))
}
// TODO(rsc): The noescape call should be unnecessary.
......
This diff is collapsed.
......@@ -12,8 +12,6 @@
package runtime
import "unsafe"
// Initialize a single central free list.
func mCentral_Init(c *mcentral, sizeclass int32) {
c.sizeclass = sizeclass
......@@ -167,7 +165,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli
s.needzero = 1
s.freelist = 0
unlock(&c.lock)
unmarkspan(uintptr(s.start)<<_PageShift, s.npages<<_PageShift)
heapBitsForSpan(s.base()).clearSpan(s.layout())
mHeap_Free(&mheap_, s, 0)
return true
}
......@@ -198,6 +196,6 @@ func mCentral_Grow(c *mcentral) *mspan {
}
tail.ptr().next = 0
s.freelist = head
markspan(unsafe.Pointer(uintptr(s.start)<<_PageShift), size, n, size*n < s.npages<<_PageShift)
heapBitsForSpan(s.base()).initSpan(s.layout())
return s
}
This diff is collapsed.
This diff is collapsed.
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector (GC)
package runtime
const (
// Four bits per word (see #defines below).
gcBits = 4
wordsPerBitmapByte = 8 / gcBits
)
const (
// GC type info programs.
// The programs allow to store type info required for GC in a compact form.
// Most importantly arrays take O(1) space instead of O(n).
// The program grammar is:
//
// Program = {Block} "insEnd"
// Block = Data | Array
// Data = "insData" DataSize DataBlock
// DataSize = int // size of the DataBlock in bit pairs, 1 byte
// DataBlock = binary // dense GC mask (2 bits per word) of size ]DataSize/4[ bytes
// Array = "insArray" ArrayLen Block "insArrayEnd"
// ArrayLen = int // length of the array, 8 bytes (4 bytes for 32-bit arch)
//
// Each instruction (insData, insArray, etc) is 1 byte.
// For example, for type struct { x []byte; y [20]struct{ z int; w *byte }; }
// the program looks as:
//
// insData 3 (BitsPointer BitsScalar BitsScalar)
// insArray 20 insData 2 (BitsScalar BitsPointer) insArrayEnd insEnd
//
// Total size of the program is 17 bytes (13 bytes on 32-bits).
// The corresponding GC mask would take 43 bytes (it would be repeated
// because the type has odd number of words).
insData = 1 + iota
insArray
insArrayEnd
insEnd
)
const (
// Pointer map
_BitsPerPointer = 2
_BitsMask = (1 << _BitsPerPointer) - 1
_PointersPerByte = 8 / _BitsPerPointer
// If you change these, also change scanblock.
// scanblock does "if(bits == BitsScalar || bits == BitsDead)" as "if(bits <= BitsScalar)".
_BitsDead = 0
_BitsScalar = 1 // 01
_BitsPointer = 2 // 10
_BitsCheckMarkXor = 1 // 10
_BitsScalarMarked = _BitsScalar ^ _BitsCheckMarkXor // 00
_BitsPointerMarked = _BitsPointer ^ _BitsCheckMarkXor // 11
// 64 bytes cover objects of size 1024/512 on 64/32 bits, respectively.
_MaxGCMask = 65536 // TODO(rsc): change back to 64
)
// Bits in per-word bitmap.
// #defines because we shift the values beyond 32 bits.
//
// Each word in the bitmap describes wordsPerBitmapWord words
// of heap memory. There are 4 bitmap bits dedicated to each heap word,
// so on a 64-bit system there is one bitmap word per 16 heap words.
//
// The bitmap starts at mheap.arena_start and extends *backward* from
// there. On a 64-bit system the off'th word in the arena is tracked by
// the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
// the only difference is that the divisor is 8.)
const (
bitBoundary = 1 // boundary of an object
bitMarked = 2 // marked object
bitMask = bitBoundary | bitMarked
bitPtrMask = _BitsMask << 2
)
......@@ -296,9 +296,9 @@ func stackfree(stk stack) {
var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
var mapnames = []string{
_BitsDead: "---",
_BitsScalar: "scalar",
_BitsPointer: "ptr",
typeDead: "---",
typeScalar: "scalar",
typePointer: "ptr",
}
// Stack frame layout
......@@ -371,7 +371,7 @@ func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f
minp := adjinfo.old.lo
maxp := adjinfo.old.hi
delta := adjinfo.delta
num := uintptr(bv.n / _BitsPerPointer)
num := uintptr(bv.n) / typeBitsWidth
for i := uintptr(0); i < num; i++ {
if stackDebug >= 4 {
print(" ", add(scanp, i*ptrSize), ":", mapnames[ptrbits(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/4], "\n")
......@@ -379,13 +379,13 @@ func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f
switch ptrbits(&bv, i) {
default:
throw("unexpected pointer bits")
case _BitsDead:
case typeDead:
if debug.gcdead != 0 {
*(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(uintptr(poisonStack))
}
case _BitsScalar:
case typeScalar:
// ok
case _BitsPointer:
case typePointer:
p := *(*unsafe.Pointer)(add(scanp, i*ptrSize))
up := uintptr(p)
if f != nil && 0 < up && up < _PageSize && debug.invalidptr != 0 || up == poisonStack {
......@@ -453,7 +453,7 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
throw("bad symbol table")
}
bv = stackmapdata(stackmap, pcdata)
size = (uintptr(bv.n) * ptrSize) / _BitsPerPointer
size = (uintptr(bv.n) / typeBitsWidth) * ptrSize
if stackDebug >= 3 {
print(" locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n")
}
......
......@@ -22,7 +22,7 @@ type _type struct {
// (no indirection), 4 bits per word.
// If (kind&KindGCProg)!=0, then gc[1] points to a compiler-generated
// read-only GC program; and gc[0] points to BSS space for sparse GC bitmap.
// For huge _types (>MaxGCMask), runtime unrolls the program directly into
// For huge _types (>maxGCMask), runtime unrolls the program directly into
// GC bitmap and gc[0] is not used. For moderately-sized _types, runtime
// unrolls the program into gc[0] space on first use. The first byte of gc[0]
// (gc[0][0]) contains 'unroll' flag saying whether the program is already
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment