Commit a9b37ae0 authored by Austin Clements's avatar Austin Clements

runtime: fully initialize span in alloc_m

Currently, several important fields of a heap span are set by
heapBits.initSpan, which happens after the span has already been
published and returned from the locked region of alloc_m. In
particular, allocBits is set very late, which makes mspan.isFree
unsafe even if you were to lock the heap because it tries to access
allocBits.

This CL fixes this by populating these fields in alloc_m. The next CL
builds on this to only publish the span once it is fully initialized.
Together, they'll make it safe to check allocBits even if there is a
race with alloc_m.

For #10958, #24543, but a good fix in general.

Change-Id: I7fde90023af0f497e826b637efa4d19c32840c08
Reviewed-on: https://go-review.googlesource.com/c/go/+/203285
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: default avatarCherry Zhang <cherryyz@google.com>
Reviewed-by: default avatarMichael Knyszek <mknyszek@google.com>
parent d5caea77
......@@ -792,29 +792,19 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
// words to pointer/scan.
// Otherwise, it initializes all words to scalar/dead.
func (h heapBits) initSpan(s *mspan) {
size, n, total := s.layout()
// Init the markbit structures
s.freeindex = 0
s.allocCache = ^uint64(0) // all 1s indicating all free.
s.nelems = n
s.allocBits = nil
s.gcmarkBits = nil
s.gcmarkBits = newMarkBits(s.nelems)
s.allocBits = newAllocBits(s.nelems)
// Clear bits corresponding to objects.
nw := total / sys.PtrSize
nw := (s.npages << _PageShift) / sys.PtrSize
if nw%wordsPerBitmapByte != 0 {
throw("initSpan: unaligned length")
}
if h.shift != 0 {
throw("initSpan: unaligned base")
}
isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
for nw > 0 {
hNext, anw := h.forwardOrBoundary(nw)
nbyte := anw / wordsPerBitmapByte
if sys.PtrSize == 8 && size == sys.PtrSize {
if isPtrs {
bitp := h.bitp
for i := uintptr(0); i < nbyte; i++ {
*bitp = bitPointerAll | bitScanAll
......
......@@ -1012,6 +1012,23 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
h.reclaim(npage)
}
// Compute size information.
nbytes := npage << _PageShift
var elemSize, nelems uintptr
if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
elemSize = nbytes
nelems = 1
} else {
elemSize = uintptr(class_to_size[sizeclass])
nelems = nbytes / elemSize
}
// Allocate mark and allocation bits before we take the heap
// lock. We'll drop these on the floor if we fail to allocate
// the span, but in that case we'll panic soon.
gcmarkBits := newMarkBits(nelems)
allocBits := newAllocBits(nelems)
lock(&h.lock)
// transfer stats from cache to global
memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
......@@ -1028,14 +1045,13 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
s.state = mSpanInUse
s.allocCount = 0
s.spanclass = spanclass
s.elemsize = elemSize
if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
s.elemsize = s.npages << _PageShift
s.divShift = 0
s.divMul = 0
s.divShift2 = 0
s.baseMask = 0
} else {
s.elemsize = uintptr(class_to_size[sizeclass])
m := &class_to_divmagic[sizeclass]
s.divShift = m.shift
s.divMul = m.mul
......@@ -1043,6 +1059,13 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
s.baseMask = m.baseMask
}
// Initialize mark and allocation structures.
s.freeindex = 0
s.allocCache = ^uint64(0) // all 1s indicating all free.
s.nelems = nelems
s.gcmarkBits = gcmarkBits
s.allocBits = allocBits
// Mark in-use span in arena page bitmap.
arena, pageIdx, pageMask := pageIndexOf(s.base())
arena.pageInUse[pageIdx] |= pageMask
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment