Commit 77fe7f13 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm/page_alloc: check high-order pages for corruption during PCP operations

Eric Dumazet pointed out that commit 44042b44 ("mm/page_alloc: allow
high-order pages to be stored on the per-cpu lists") only checks the
head page during PCP refill and allocation operations.  This was an
oversight and all pages should be checked.  This will incur a small
performance penalty but it's necessary for correctness.

Link: https://lkml.kernel.org/r/20220310092456.GJ15701@techsingularity.net
Fixes: 44042b44 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists")
Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Reported-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Wei Xu <weixugc@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3313204c
...@@ -2291,23 +2291,36 @@ static inline int check_new_page(struct page *page) ...@@ -2291,23 +2291,36 @@ static inline int check_new_page(struct page *page)
return 1; return 1;
} }
static bool check_new_pages(struct page *page, unsigned int order)
{
int i;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
return true;
}
return false;
}
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
/* /*
* With DEBUG_VM enabled, order-0 pages are checked for expected state when * With DEBUG_VM enabled, order-0 pages are checked for expected state when
* being allocated from pcp lists. With debug_pagealloc also enabled, they are * being allocated from pcp lists. With debug_pagealloc also enabled, they are
* also checked when pcp lists are refilled from the free lists. * also checked when pcp lists are refilled from the free lists.
*/ */
static inline bool check_pcp_refill(struct page *page) static inline bool check_pcp_refill(struct page *page, unsigned int order)
{ {
if (debug_pagealloc_enabled_static()) if (debug_pagealloc_enabled_static())
return check_new_page(page); return check_new_pages(page, order);
else else
return false; return false;
} }
static inline bool check_new_pcp(struct page *page) static inline bool check_new_pcp(struct page *page, unsigned int order)
{ {
return check_new_page(page); return check_new_pages(page, order);
} }
#else #else
/* /*
...@@ -2315,32 +2328,19 @@ static inline bool check_new_pcp(struct page *page) ...@@ -2315,32 +2328,19 @@ static inline bool check_new_pcp(struct page *page)
* when pcp lists are being refilled from the free lists. With debug_pagealloc * when pcp lists are being refilled from the free lists. With debug_pagealloc
* enabled, they are also checked when being allocated from the pcp lists. * enabled, they are also checked when being allocated from the pcp lists.
*/ */
static inline bool check_pcp_refill(struct page *page) static inline bool check_pcp_refill(struct page *page, unsigned int order)
{ {
return check_new_page(page); return check_new_pages(page, order);
} }
static inline bool check_new_pcp(struct page *page) static inline bool check_new_pcp(struct page *page, unsigned int order)
{ {
if (debug_pagealloc_enabled_static()) if (debug_pagealloc_enabled_static())
return check_new_page(page); return check_new_pages(page, order);
else else
return false; return false;
} }
#endif /* CONFIG_DEBUG_VM */ #endif /* CONFIG_DEBUG_VM */
static bool check_new_pages(struct page *page, unsigned int order)
{
int i;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
return true;
}
return false;
}
inline void post_alloc_hook(struct page *page, unsigned int order, inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags) gfp_t gfp_flags)
{ {
...@@ -2982,7 +2982,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, ...@@ -2982,7 +2982,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
if (unlikely(page == NULL)) if (unlikely(page == NULL))
break; break;
if (unlikely(check_pcp_refill(page))) if (unlikely(check_pcp_refill(page, order)))
continue; continue;
/* /*
...@@ -3600,7 +3600,7 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, ...@@ -3600,7 +3600,7 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
page = list_first_entry(list, struct page, lru); page = list_first_entry(list, struct page, lru);
list_del(&page->lru); list_del(&page->lru);
pcp->count -= 1 << order; pcp->count -= 1 << order;
} while (check_new_pcp(page)); } while (check_new_pcp(page, order));
return page; return page;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment