Commit 31c4e099 authored by Michael Anthony Knyszek's avatar Michael Anthony Knyszek Committed by Michael Knyszek

runtime: ensure free and unscavenged spans may be backed by huge pages

This change adds a new sysHugePage function to provide the equivalent of
Linux's madvise(MADV_HUGEPAGE) support to the runtime. It then uses
sysHugePage to mark a newly-coalesced free span as backable by huge
pages to make the freeHugePages approximation a bit more accurate.

The problem being solved here is that if a large free span is composed
of many small spans which were coalesced together, then there's a chance
that they have had madvise(MADV_NOHUGEPAGE) called on them at some point,
which makes freeHugePages less accurate.

For #30333.

Change-Id: Idd4b02567619fc8d45647d9abd18da42f96f0522
Reviewed-on: https://go-review.googlesource.com/c/go/+/173338
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarAustin Clements <austin@google.com>
parent 5c15ed64
...@@ -35,6 +35,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) { ...@@ -35,6 +35,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
func sysUsed(v unsafe.Pointer, n uintptr) { func sysUsed(v unsafe.Pointer, n uintptr) {
} }
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
......
...@@ -29,6 +29,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) { ...@@ -29,6 +29,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
func sysUsed(v unsafe.Pointer, n uintptr) { func sysUsed(v unsafe.Pointer, n uintptr) {
} }
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
......
...@@ -33,6 +33,9 @@ func sysUsed(v unsafe.Pointer, n uintptr) { ...@@ -33,6 +33,9 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
madvise(v, n, _MADV_FREE_REUSE) madvise(v, n, _MADV_FREE_REUSE)
} }
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
......
...@@ -26,6 +26,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) { ...@@ -26,6 +26,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
func sysUsed(v unsafe.Pointer, n uintptr) { func sysUsed(v unsafe.Pointer, n uintptr) {
} }
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
......
...@@ -117,16 +117,19 @@ func sysUnused(v unsafe.Pointer, n uintptr) { ...@@ -117,16 +117,19 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
} }
func sysUsed(v unsafe.Pointer, n uintptr) { func sysUsed(v unsafe.Pointer, n uintptr) {
if physHugePageSize != 0 { // Partially undo the NOHUGEPAGE marks from sysUnused
// Partially undo the NOHUGEPAGE marks from sysUnused // for whole huge pages between v and v+n. This may
// for whole huge pages between v and v+n. This may // leave huge pages off at the end points v and v+n
// leave huge pages off at the end points v and v+n // even though allocations may cover these entire huge
// even though allocations may cover these entire huge // pages. We could detect this and undo NOHUGEPAGE on
// pages. We could detect this and undo NOHUGEPAGE on // the end points as well, but it's probably not worth
// the end points as well, but it's probably not worth // the cost because when neighboring allocations are
// the cost because when neighboring allocations are // freed sysUnused will just set NOHUGEPAGE again.
// freed sysUnused will just set NOHUGEPAGE again. sysHugePage(v, n)
}
func sysHugePage(v unsafe.Pointer, n uintptr) {
if physHugePageSize != 0 {
// Round v up to a huge page boundary. // Round v up to a huge page boundary.
beg := (uintptr(v) + (physHugePageSize - 1)) &^ (physHugePageSize - 1) beg := (uintptr(v) + (physHugePageSize - 1)) &^ (physHugePageSize - 1)
// Round v+n down to a huge page boundary. // Round v+n down to a huge page boundary.
......
...@@ -173,6 +173,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) { ...@@ -173,6 +173,9 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
func sysUsed(v unsafe.Pointer, n uintptr) { func sysUsed(v unsafe.Pointer, n uintptr) {
} }
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
// sysReserve has already allocated all heap memory, // sysReserve has already allocated all heap memory,
// but has not adjusted stats. // but has not adjusted stats.
......
...@@ -81,6 +81,9 @@ func sysUsed(v unsafe.Pointer, n uintptr) { ...@@ -81,6 +81,9 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
} }
} }
func sysHugePage(v unsafe.Pointer, n uintptr) {
}
// Don't split the stack as this function may be invoked without a valid G, // Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack. // which prevents us from allocating more stack.
//go:nosplit //go:nosplit
......
...@@ -502,6 +502,8 @@ func (h *mheap) coalesce(s *mspan) { ...@@ -502,6 +502,8 @@ func (h *mheap) coalesce(s *mspan) {
h.free.insert(other) h.free.insert(other)
} }
hpBefore := s.hugePages()
// Coalesce with earlier, later spans. // Coalesce with earlier, later spans.
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree { if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
if s.scavenged == before.scavenged { if s.scavenged == before.scavenged {
...@@ -519,6 +521,18 @@ func (h *mheap) coalesce(s *mspan) { ...@@ -519,6 +521,18 @@ func (h *mheap) coalesce(s *mspan) {
realign(s, after, after) realign(s, after, after)
} }
} }
if !s.scavenged && s.hugePages() > hpBefore {
// If s has grown such that it now may contain more huge pages than it
// did before, then mark the whole region as huge-page-backable.
//
// Otherwise, on systems where we break up huge pages (like Linux)
// s may not be backed by huge pages because it could be made up of
// pieces which are broken up in the underlying VMA. The primary issue
// with this is that it can lead to a poor estimate of the amount of
// free memory backed by huge pages for determining the scavenging rate.
sysHugePage(unsafe.Pointer(s.base()), s.npages*pageSize)
}
} }
// hugePages returns the number of aligned physical huge pages in the memory // hugePages returns the number of aligned physical huge pages in the memory
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment