Commit 8449d21f authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

mm, thp: fix mlock statistics

NR_MLOCK is only accounted in single page units: there's no logic to
handle transparent hugepages.  This patch checks the appropriate number of
pages to adjust the statistics by so that the correct amount of memory is
reflected.

Currently:

		$ grep Mlocked /proc/meminfo
		Mlocked:           19636 kB

	#define MAP_SIZE	(4 << 30)	/* 4GB */

	void *ptr = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
			 MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
	mlock(ptr, MAP_SIZE);

		$ grep Mlocked /proc/meminfo
		Mlocked:           29844 kB

	munlock(ptr, MAP_SIZE);

		$ grep Mlocked /proc/meminfo
		Mlocked:           19636 kB

And with this patch:

		$ grep Mlock /proc/meminfo
		Mlocked:           19636 kB

	mlock(ptr, MAP_SIZE);

		$ grep Mlock /proc/meminfo
		Mlocked:         4213664 kB

	munlock(ptr, MAP_SIZE);

		$ grep Mlock /proc/meminfo
		Mlocked:           19636 kB
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Reported-by: default avatarHugh Dickens <hughd@google.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarMichel Lespinasse <walken@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b676b293
...@@ -180,7 +180,8 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma, ...@@ -180,7 +180,8 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
return 0; return 0;
if (!TestSetPageMlocked(page)) { if (!TestSetPageMlocked(page)) {
inc_zone_page_state(page, NR_MLOCK); mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED); count_vm_event(UNEVICTABLE_PGMLOCKED);
} }
return 1; return 1;
......
...@@ -56,7 +56,8 @@ void clear_page_mlock(struct page *page) ...@@ -56,7 +56,8 @@ void clear_page_mlock(struct page *page)
if (!TestClearPageMlocked(page)) if (!TestClearPageMlocked(page))
return; return;
dec_zone_page_state(page, NR_MLOCK); mod_zone_page_state(page_zone(page), NR_MLOCK,
-hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGCLEARED); count_vm_event(UNEVICTABLE_PGCLEARED);
if (!isolate_lru_page(page)) { if (!isolate_lru_page(page)) {
putback_lru_page(page); putback_lru_page(page);
...@@ -78,7 +79,8 @@ void mlock_vma_page(struct page *page) ...@@ -78,7 +79,8 @@ void mlock_vma_page(struct page *page)
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
if (!TestSetPageMlocked(page)) { if (!TestSetPageMlocked(page)) {
inc_zone_page_state(page, NR_MLOCK); mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED); count_vm_event(UNEVICTABLE_PGMLOCKED);
if (!isolate_lru_page(page)) if (!isolate_lru_page(page))
putback_lru_page(page); putback_lru_page(page);
...@@ -105,7 +107,8 @@ void munlock_vma_page(struct page *page) ...@@ -105,7 +107,8 @@ void munlock_vma_page(struct page *page)
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
if (TestClearPageMlocked(page)) { if (TestClearPageMlocked(page)) {
dec_zone_page_state(page, NR_MLOCK); mod_zone_page_state(page_zone(page), NR_MLOCK,
-hpage_nr_pages(page));
if (!isolate_lru_page(page)) { if (!isolate_lru_page(page)) {
int ret = SWAP_AGAIN; int ret = SWAP_AGAIN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment