Commit d8a8e1f0 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

thp, vmstat: implement HZP_ALLOC and HZP_ALLOC_FAILED events

hzp_alloc is incremented every time a huge zero page is successfully
	allocated. It includes allocations which where dropped due
	race with other allocation. Note, it doesn't count every map
	of the huge zero page, only its allocation.

hzp_alloc_failed is incremented if kernel fails to allocate huge zero
	page and falls back to using small pages.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 97ae1749
...@@ -197,6 +197,14 @@ thp_split is incremented every time a huge page is split into base ...@@ -197,6 +197,14 @@ thp_split is incremented every time a huge page is split into base
pages. This can happen for a variety of reasons but a common pages. This can happen for a variety of reasons but a common
reason is that a huge page is old and is being reclaimed. reason is that a huge page is old and is being reclaimed.
thp_zero_page_alloc is incremented every time a huge zero page is
successfully allocated. It includes allocations which where
dropped due race with other allocation. Note, it doesn't count
every map of the huge zero page, only its allocation.
thp_zero_page_alloc_failed is incremented if kernel fails to allocate
huge zero page and falls back to using small pages.
As the system ages, allocating huge pages may be expensive as the As the system ages, allocating huge pages may be expensive as the
system uses memory compaction to copy data around memory to free a system uses memory compaction to copy data around memory to free a
huge page for use. There are some counters in /proc/vmstat to help huge page for use. There are some counters in /proc/vmstat to help
......
...@@ -58,6 +58,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, ...@@ -58,6 +58,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_COLLAPSE_ALLOC, THP_COLLAPSE_ALLOC,
THP_COLLAPSE_ALLOC_FAILED, THP_COLLAPSE_ALLOC_FAILED,
THP_SPLIT, THP_SPLIT,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
#endif #endif
NR_VM_EVENT_ITEMS NR_VM_EVENT_ITEMS
}; };
......
...@@ -184,8 +184,11 @@ static unsigned long get_huge_zero_page(void) ...@@ -184,8 +184,11 @@ static unsigned long get_huge_zero_page(void)
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
HPAGE_PMD_ORDER); HPAGE_PMD_ORDER);
if (!zero_page) if (!zero_page) {
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
return 0; return 0;
}
count_vm_event(THP_ZERO_PAGE_ALLOC);
preempt_disable(); preempt_disable();
if (cmpxchg(&huge_zero_pfn, 0, page_to_pfn(zero_page))) { if (cmpxchg(&huge_zero_pfn, 0, page_to_pfn(zero_page))) {
preempt_enable(); preempt_enable();
......
...@@ -801,6 +801,8 @@ const char * const vmstat_text[] = { ...@@ -801,6 +801,8 @@ const char * const vmstat_text[] = {
"thp_collapse_alloc", "thp_collapse_alloc",
"thp_collapse_alloc_failed", "thp_collapse_alloc_failed",
"thp_split", "thp_split",
"thp_zero_page_alloc",
"thp_zero_page_alloc_failed",
#endif #endif
#endif /* CONFIG_VM_EVENTS_COUNTERS */ #endif /* CONFIG_VM_EVENTS_COUNTERS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment