Commit e1ddf050 authored by Michael Anthony Knyszek's avatar Michael Anthony Knyszek Committed by Michael Knyszek

runtime: count scavenged bits for new allocation for new page allocator

This change makes it so that the new page allocator returns the number
of pages that are scavenged in a new allocation so that mheap can update
memstats appropriately.

The accounting could be embedded into pageAlloc, but that would make
the new allocator more difficult to test.

Updates #35112.

Change-Id: I0f94f563d7af2458e6d534f589d2e7dd6af26d12
Reviewed-on: https://go-review.googlesource.com/c/go/+/195698Reviewed-by: default avatarAustin Clements <austin@google.com>
parent 73317080
......@@ -751,9 +751,10 @@ type PallocBits pallocBits
func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
return (*pallocBits)(b).find(npages, searchIdx)
}
func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
// SummarizeSlow is a slow but more obviously correct implementation
// of (*pallocBits).summarize. Used for testing.
......@@ -853,8 +854,12 @@ type ChunkIdx chunkIdx
// not in the heap, so is PageAlloc.
type PageAlloc pageAlloc
func (p *PageAlloc) Alloc(npages uintptr) uintptr { return (*pageAlloc)(p).alloc(npages) }
func (p *PageAlloc) Free(base, npages uintptr) { (*pageAlloc)(p).free(base, npages) }
func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
return (*pageAlloc)(p).alloc(npages)
}
func (p *PageAlloc) Free(base, npages uintptr) {
(*pageAlloc)(p).free(base, npages)
}
func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
}
......
......@@ -467,24 +467,33 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
// allocated. It also updates the summaries to reflect the newly-updated
// bitmap.
//
// Returns the amount of scavenged memory in bytes present in the
// allocated range.
//
// s.mheapLock must be held.
func (s *pageAlloc) allocRange(base, npages uintptr) {
func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
si, ei := chunkPageIndex(base), chunkPageIndex(limit)
scav := uint(0)
if sc == ec {
// The range doesn't cross any chunk boundaries.
scav += s.chunks[sc].scavenged.popcntRange(si, ei+1-si)
s.chunks[sc].allocRange(si, ei+1-si)
} else {
// The range crosses at least one chunk boundary.
scav += s.chunks[sc].scavenged.popcntRange(si, pallocChunkPages-si)
s.chunks[sc].allocRange(si, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
scav += s.chunks[c].scavenged.popcntRange(0, pallocChunkPages)
s.chunks[c].allocAll()
}
scav += s.chunks[ec].scavenged.popcntRange(0, ei+1)
s.chunks[ec].allocRange(0, ei+1)
}
s.update(base, npages, true, true)
return uintptr(scav) * pageSize
}
// find searches for the first (address-ordered) contiguous free region of
......@@ -714,21 +723,23 @@ nextLevel:
}
// alloc allocates npages worth of memory from the page heap, returning the base
// address for the allocation.
// address for the allocation and the amount of scavenged memory in bytes
// contained in the region [base address, base address + npages*pageSize).
//
// Returns 0 on failure.
// Returns a 0 base address on failure, in which case other returned values
// should be ignored.
//
// s.mheapLock must be held.
func (s *pageAlloc) alloc(npages uintptr) uintptr {
func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
if chunkIndex(s.searchAddr) >= s.end {
return 0
return 0, 0
}
// If npages has a chance of fitting in the chunk where the searchAddr is,
// search it directly.
var addr, searchAddr uintptr
searchAddr := uintptr(0)
if pallocChunkPages-chunkPageIndex(s.searchAddr) >= uint(npages) {
// npages is guaranteed to be no greater than pallocChunkPages here.
i := chunkIndex(s.searchAddr)
......@@ -756,11 +767,11 @@ func (s *pageAlloc) alloc(npages uintptr) uintptr {
// accommodate npages.
s.searchAddr = maxSearchAddr
}
return 0
return 0, 0
}
Found:
// Go ahead and actually mark the bits now that we have an address.
s.allocRange(addr, npages)
scav = s.allocRange(addr, npages)
// If we found a higher (linearized) searchAddr, we know that all the
// heap memory before that searchAddr in a linear address space is
......@@ -768,7 +779,7 @@ Found:
if s.compareSearchAddrTo(searchAddr) > 0 {
s.searchAddr = searchAddr
}
return addr
return addr, scav
}
// free returns npages worth of memory starting at base back to the page heap.
......
This diff is collapsed.
......@@ -88,6 +88,26 @@ func (b *pageBits) clearAll() {
}
}
// popcntRange counts the number of set bits in the
// range [i, i+n).
func (b *pageBits) popcntRange(i, n uint) (s uint) {
if n == 1 {
return uint((b[i/64] >> (i % 64)) & 1)
}
_ = b[i/64]
j := i + n - 1
if i/64 == j/64 {
return uint(bits.OnesCount64((b[i/64] >> (i % 64)) & ((1 << n) - 1)))
}
_ = b[j/64]
s += uint(bits.OnesCount64(b[i/64] >> (i % 64)))
for k := i/64 + 1; k < j/64; k++ {
s += uint(bits.OnesCount64(b[k]))
}
s += uint(bits.OnesCount64(b[j/64] & ((1 << (j%64 + 1)) - 1)))
return
}
// pallocBits is a bitmap that tracks page allocations for at most one
// palloc chunk.
//
......
......@@ -113,6 +113,98 @@ func checkPallocSum(t *testing.T, got, want PallocSum) {
}
}
func TestMallocBitsPopcntRange(t *testing.T) {
type test struct {
i, n uint // bit range to popcnt over.
want uint // expected popcnt result on that range.
}
tests := map[string]struct {
init []BitRange // bit ranges to set to 1 in the bitmap.
tests []test // a set of popcnt tests to run over the bitmap.
}{
"None": {
tests: []test{
{0, 1, 0},
{5, 3, 0},
{2, 11, 0},
{PallocChunkPages/4 + 1, PallocChunkPages / 2, 0},
{0, PallocChunkPages, 0},
},
},
"All": {
init: []BitRange{{0, PallocChunkPages}},
tests: []test{
{0, 1, 1},
{5, 3, 3},
{2, 11, 11},
{PallocChunkPages/4 + 1, PallocChunkPages / 2, PallocChunkPages / 2},
{0, PallocChunkPages, PallocChunkPages},
},
},
"Half": {
init: []BitRange{{PallocChunkPages / 2, PallocChunkPages / 2}},
tests: []test{
{0, 1, 0},
{5, 3, 0},
{2, 11, 0},
{PallocChunkPages/2 - 1, 1, 0},
{PallocChunkPages / 2, 1, 1},
{PallocChunkPages/2 + 10, 1, 1},
{PallocChunkPages/2 - 1, 2, 1},
{PallocChunkPages / 4, PallocChunkPages / 4, 0},
{PallocChunkPages / 4, PallocChunkPages/4 + 1, 1},
{PallocChunkPages/4 + 1, PallocChunkPages / 2, PallocChunkPages/4 + 1},
{0, PallocChunkPages, PallocChunkPages / 2},
},
},
"OddBound": {
init: []BitRange{{0, 111}},
tests: []test{
{0, 1, 1},
{5, 3, 3},
{2, 11, 11},
{110, 2, 1},
{99, 50, 12},
{110, 1, 1},
{111, 1, 0},
{99, 1, 1},
{120, 1, 0},
{PallocChunkPages / 2, PallocChunkPages / 2, 0},
{0, PallocChunkPages, 111},
},
},
"Scattered": {
init: []BitRange{
{1, 3}, {5, 1}, {7, 1}, {10, 2}, {13, 1}, {15, 4},
{21, 1}, {23, 1}, {26, 2}, {30, 5}, {36, 2}, {40, 3},
{44, 6}, {51, 1}, {53, 2}, {58, 3}, {63, 1}, {67, 2},
{71, 10}, {84, 1}, {89, 7}, {99, 2}, {103, 1}, {107, 2},
{111, 1}, {113, 1}, {115, 1}, {118, 1}, {120, 2}, {125, 5},
},
tests: []test{
{0, 11, 6},
{0, 64, 39},
{13, 64, 40},
{64, 64, 34},
{0, 128, 73},
{1, 128, 74},
{0, PallocChunkPages, 75},
},
},
}
for name, v := range tests {
v := v
t.Run(name, func(t *testing.T) {
b := makePallocBits(v.init)
for _, h := range v.tests {
if got := b.PopcntRange(h.i, h.n); got != h.want {
t.Errorf("bad popcnt (i=%d, n=%d): got %d, want %d", h.i, h.n, got, h.want)
}
}
})
}
}
// Ensures computing bit summaries works as expected by generating random
// bitmaps and checking against a reference implementation.
func TestPallocBitsSummarizeRandom(t *testing.T) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment