Commit 56873f43 authored by Wang, Yalin's avatar Wang, Yalin Committed by Linus Torvalds

mm:add KPF_ZERO_PAGE flag for /proc/kpageflags

Add KPF_ZERO_PAGE flag for zero_page, so that userspace processes can
detect zero_page in /proc/kpageflags, and then do memory analysis more
accurately.
Signed-off-by: default avatarYalin Wang <yalin.wang@sonymobile.com>
Acked-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1d148e21
......@@ -62,6 +62,8 @@ There are three components to pagemap:
20. NOPAGE
21. KSM
22. THP
23. BALLOON
24. ZERO_PAGE
Short descriptions to the page flags:
......@@ -102,6 +104,12 @@ Short descriptions to the page flags:
22. THP
contiguous pages which construct transparent hugepages
23. BALLOON
balloon compaction page
24. ZERO_PAGE
zero page for pfn_zero or huge_zero page
[IO related page flags]
1. ERROR IO error occurred
3. UPTODATE page has up-to-date data
......
......@@ -5,6 +5,7 @@
#include <linux/ksm.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/huge_mm.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/hugetlb.h>
......@@ -121,9 +122,18 @@ u64 stable_page_flags(struct page *page)
* just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
* to make sure a given page is a thp, not a non-huge compound page.
*/
else if (PageTransCompound(page) && (PageLRU(compound_head(page)) ||
PageAnon(compound_head(page))))
else if (PageTransCompound(page)) {
struct page *head = compound_head(page);
if (PageLRU(head) || PageAnon(head))
u |= 1 << KPF_THP;
else if (is_huge_zero_page(head)) {
u |= 1 << KPF_ZERO_PAGE;
u |= 1 << KPF_THP;
}
} else if (is_zero_pfn(page_to_pfn(page)))
u |= 1 << KPF_ZERO_PAGE;
/*
* Caveats on high order pages: page->_count will only be set
......
......@@ -157,6 +157,13 @@ static inline int hpage_nr_pages(struct page *page)
extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp);
extern struct page *huge_zero_page;
static inline bool is_huge_zero_page(struct page *page)
{
return ACCESS_ONCE(huge_zero_page) == page;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
......@@ -206,6 +213,11 @@ static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_str
return 0;
}
static inline bool is_huge_zero_page(struct page *page)
{
return false;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */
......@@ -32,6 +32,7 @@
#define KPF_KSM 21
#define KPF_THP 22
#define KPF_BALLOON 23
#define KPF_ZERO_PAGE 24
#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
......@@ -171,12 +171,7 @@ static int start_khugepaged(void)
}
static atomic_t huge_zero_refcount;
static struct page *huge_zero_page __read_mostly;
static inline bool is_huge_zero_page(struct page *page)
{
return ACCESS_ONCE(huge_zero_page) == page;
}
struct page *huge_zero_page __read_mostly;
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
......
......@@ -133,6 +133,7 @@ static const char * const page_flag_names[] = {
[KPF_KSM] = "x:ksm",
[KPF_THP] = "t:thp",
[KPF_BALLOON] = "o:balloon",
[KPF_ZERO_PAGE] = "z:zero_page",
[KPF_RESERVED] = "r:reserved",
[KPF_MLOCKED] = "m:mlocked",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment