Commit 7e1f049e authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

mm: hugetlb: cleanup using paeg_huge_active()

Now we have an easy access to hugepages' activeness, so existing helpers to
get the information can be cleaned up.

[akpm@linux-foundation.org: s/PageHugeActive/page_huge_active/]
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Hugh Dickins <hughd@google.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bcc54222
......@@ -84,7 +84,6 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
bool is_hugepage_active(struct page *page);
void free_huge_page(struct page *page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
......@@ -152,7 +151,6 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
#define putback_active_hugepage(p) do {} while (0)
#define is_hugepage_active(x) false
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
......
......@@ -470,11 +470,18 @@ static inline void ClearPageCompound(struct page *page)
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
int PageHeadHuge(struct page *page);
bool page_huge_active(struct page *page);
#else
TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)
static inline bool page_huge_active(struct page *page)
{
return 0;
}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
......
......@@ -3896,20 +3896,6 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
#ifdef CONFIG_MEMORY_FAILURE
/* Should be called in hugetlb_lock */
static int is_hugepage_on_freelist(struct page *hpage)
{
struct page *page;
struct page *tmp;
struct hstate *h = page_hstate(hpage);
int nid = page_to_nid(hpage);
list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
if (page == hpage)
return 1;
return 0;
}
/*
* This function is called from memory failure code.
* Assume the caller holds page lock of the head page.
......@@ -3921,7 +3907,11 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
int ret = -EBUSY;
spin_lock(&hugetlb_lock);
if (is_hugepage_on_freelist(hpage)) {
/*
* Just checking !page_huge_active is not enough, because that could be
* an isolated/hwpoisoned hugepage (which have >0 refcount).
*/
if (!page_huge_active(hpage) && !page_count(hpage)) {
/*
* Hwpoisoned hugepage isn't linked to activelist or freelist,
* but dangling hpage->lru can trigger list-debug warnings
......@@ -3965,25 +3955,3 @@ void putback_active_hugepage(struct page *page)
spin_unlock(&hugetlb_lock);
put_page(page);
}
bool is_hugepage_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
/*
* This function can be called for a tail page because the caller,
* scan_movable_pages, scans through a given pfn-range which typically
* covers one memory block. In systems using gigantic hugepage (1GB
* for x86_64,) a hugepage is larger than a memory block, and we don't
* support migrating such large hugepages for now, so return false
* when called for tail pages.
*/
if (PageTail(page))
return false;
/*
* Refcount of a hwpoisoned hugepages is 1, but they are not active,
* so we should return false for them.
*/
if (unlikely(PageHWPoison(page)))
return false;
return page_count(page) > 0;
}
......@@ -1373,7 +1373,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
if (PageLRU(page))
return pfn;
if (PageHuge(page)) {
if (is_hugepage_active(page))
if (page_huge_active(page))
return pfn;
else
pfn = round_up(pfn + 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment