Commit 5556cfe8 authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm, page_owner: fix off-by-one error in __set_page_owner_handle()

Patch series "followups to debug_pagealloc improvements through
page_owner", v3.

These are followups to [1] which made it to Linus meanwhile.  Patches 1
and 3 are based on Kirill's review, patch 2 on KASAN request [2].  It
would be nice if all of this made it to 5.4 with [1] already there (or
at least Patch 1).

This patch (of 3):

As noted by Kirill, commit 7e2f2a0c ("mm, page_owner: record page
owner for each subpage") has introduced an off-by-one error in
__set_page_owner_handle() when looking up page_ext for subpages.  As a
result, the head page page_owner info is set twice, while for the last
tail page, it's not set at all.

Fix this and also make the code more efficient by advancing the page_ext
pointer we already have, instead of calling lookup_page_ext() for each
subpage.  Since the full size of struct page_ext is not known at compile
time, we can't use a simple page_ext++ statement, so introduce a
page_ext_next() inline function for that.

Link: http://lkml.kernel.org/r/20190930122916.14969-2-vbabka@suse.cz
Fixes: 7e2f2a0c ("mm, page_owner: record page owner for each subpage")
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reported-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Reported-by: default avatarMiles Chen <miles.chen@mediatek.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Walter Wu <walter-zh.wu@mediatek.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2abd839a
...@@ -36,6 +36,7 @@ struct page_ext { ...@@ -36,6 +36,7 @@ struct page_ext {
unsigned long flags; unsigned long flags;
}; };
extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat); extern void pgdat_page_ext_init(struct pglist_data *pgdat);
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
...@@ -52,6 +53,13 @@ static inline void page_ext_init(void) ...@@ -52,6 +53,13 @@ static inline void page_ext_init(void)
struct page_ext *lookup_page_ext(const struct page *page); struct page_ext *lookup_page_ext(const struct page *page);
static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
void *next = curr;
next += page_ext_size;
return next;
}
#else /* !CONFIG_PAGE_EXTENSION */ #else /* !CONFIG_PAGE_EXTENSION */
struct page_ext; struct page_ext;
......
...@@ -67,8 +67,9 @@ static struct page_ext_operations *page_ext_ops[] = { ...@@ -67,8 +67,9 @@ static struct page_ext_operations *page_ext_ops[] = {
#endif #endif
}; };
unsigned long page_ext_size = sizeof(struct page_ext);
static unsigned long total_usage; static unsigned long total_usage;
static unsigned long extra_mem;
static bool __init invoke_need_callbacks(void) static bool __init invoke_need_callbacks(void)
{ {
...@@ -78,9 +79,8 @@ static bool __init invoke_need_callbacks(void) ...@@ -78,9 +79,8 @@ static bool __init invoke_need_callbacks(void)
for (i = 0; i < entries; i++) { for (i = 0; i < entries; i++) {
if (page_ext_ops[i]->need && page_ext_ops[i]->need()) { if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
page_ext_ops[i]->offset = sizeof(struct page_ext) + page_ext_ops[i]->offset = page_ext_size;
extra_mem; page_ext_size += page_ext_ops[i]->size;
extra_mem += page_ext_ops[i]->size;
need = true; need = true;
} }
} }
...@@ -99,14 +99,9 @@ static void __init invoke_init_callbacks(void) ...@@ -99,14 +99,9 @@ static void __init invoke_init_callbacks(void)
} }
} }
static unsigned long get_entry_size(void)
{
return sizeof(struct page_ext) + extra_mem;
}
static inline struct page_ext *get_entry(void *base, unsigned long index) static inline struct page_ext *get_entry(void *base, unsigned long index)
{ {
return base + get_entry_size() * index; return base + page_ext_size * index;
} }
#if !defined(CONFIG_SPARSEMEM) #if !defined(CONFIG_SPARSEMEM)
...@@ -156,7 +151,7 @@ static int __init alloc_node_page_ext(int nid) ...@@ -156,7 +151,7 @@ static int __init alloc_node_page_ext(int nid)
!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
nr_pages += MAX_ORDER_NR_PAGES; nr_pages += MAX_ORDER_NR_PAGES;
table_size = get_entry_size() * nr_pages; table_size = page_ext_size * nr_pages;
base = memblock_alloc_try_nid( base = memblock_alloc_try_nid(
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
...@@ -234,7 +229,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid) ...@@ -234,7 +229,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
if (section->page_ext) if (section->page_ext)
return 0; return 0;
table_size = get_entry_size() * PAGES_PER_SECTION; table_size = page_ext_size * PAGES_PER_SECTION;
base = alloc_page_ext(table_size, nid); base = alloc_page_ext(table_size, nid);
/* /*
...@@ -254,7 +249,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid) ...@@ -254,7 +249,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
* we need to apply a mask. * we need to apply a mask.
*/ */
pfn &= PAGE_SECTION_MASK; pfn &= PAGE_SECTION_MASK;
section->page_ext = (void *)base - get_entry_size() * pfn; section->page_ext = (void *)base - page_ext_size * pfn;
total_usage += table_size; total_usage += table_size;
return 0; return 0;
} }
...@@ -267,7 +262,7 @@ static void free_page_ext(void *addr) ...@@ -267,7 +262,7 @@ static void free_page_ext(void *addr)
struct page *page = virt_to_page(addr); struct page *page = virt_to_page(addr);
size_t table_size; size_t table_size;
table_size = get_entry_size() * PAGES_PER_SECTION; table_size = page_ext_size * PAGES_PER_SECTION;
BUG_ON(PageReserved(page)); BUG_ON(PageReserved(page));
kmemleak_free(addr); kmemleak_free(addr);
......
...@@ -156,10 +156,10 @@ void __reset_page_owner(struct page *page, unsigned int order) ...@@ -156,10 +156,10 @@ void __reset_page_owner(struct page *page, unsigned int order)
handle = save_stack(GFP_NOWAIT | __GFP_NOWARN); handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
#endif #endif
page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
for (i = 0; i < (1 << order); i++) { for (i = 0; i < (1 << order); i++) {
page_ext = lookup_page_ext(page + i);
if (unlikely(!page_ext))
continue;
__clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags); __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled()) { if (debug_pagealloc_enabled()) {
...@@ -167,6 +167,7 @@ void __reset_page_owner(struct page *page, unsigned int order) ...@@ -167,6 +167,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
page_owner->free_handle = handle; page_owner->free_handle = handle;
} }
#endif #endif
page_ext = page_ext_next(page_ext);
} }
} }
...@@ -186,7 +187,7 @@ static inline void __set_page_owner_handle(struct page *page, ...@@ -186,7 +187,7 @@ static inline void __set_page_owner_handle(struct page *page,
__set_bit(PAGE_EXT_OWNER, &page_ext->flags); __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
__set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags); __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
page_ext = lookup_page_ext(page + i); page_ext = page_ext_next(page_ext);
} }
} }
...@@ -224,12 +225,10 @@ void __split_page_owner(struct page *page, unsigned int order) ...@@ -224,12 +225,10 @@ void __split_page_owner(struct page *page, unsigned int order)
if (unlikely(!page_ext)) if (unlikely(!page_ext))
return; return;
page_owner = get_page_owner(page_ext); for (i = 0; i < (1 << order); i++) {
page_owner->order = 0;
for (i = 1; i < (1 << order); i++) {
page_ext = lookup_page_ext(page + i);
page_owner = get_page_owner(page_ext); page_owner = get_page_owner(page_ext);
page_owner->order = 0; page_owner->order = 0;
page_ext = page_ext_next(page_ext);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment