Commit a0132ac0 authored by Dave Hansen's avatar Dave Hansen Committed by Linus Torvalds

mm/slub.c: do not VM_BUG_ON_PAGE() for temporary on-stack pages

Commit 309381fe ("mm: dump page when hitting a VM_BUG_ON using
VM_BUG_ON_PAGE") added a bunch of VM_BUG_ON_PAGE() calls.

But, most of the ones in the slub code are for _temporary_ 'struct
page's which are declared on the stack and likely have lots of gunk in
them.  Dumping their contents out will just confuse folks looking at
bad_page() output.  Plus, if we try to page_to_pfn() on them or
soemthing, we'll probably oops anyway.

Turn them back in to VM_BUG_ON()s.
Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ba3253c7
...@@ -1559,7 +1559,7 @@ static inline void *acquire_slab(struct kmem_cache *s, ...@@ -1559,7 +1559,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
new.freelist = freelist; new.freelist = freelist;
} }
VM_BUG_ON_PAGE(new.frozen, &new); VM_BUG_ON(new.frozen);
new.frozen = 1; new.frozen = 1;
if (!__cmpxchg_double_slab(s, page, if (!__cmpxchg_double_slab(s, page,
...@@ -1812,7 +1812,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, ...@@ -1812,7 +1812,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
set_freepointer(s, freelist, prior); set_freepointer(s, freelist, prior);
new.counters = counters; new.counters = counters;
new.inuse--; new.inuse--;
VM_BUG_ON_PAGE(!new.frozen, &new); VM_BUG_ON(!new.frozen);
} while (!__cmpxchg_double_slab(s, page, } while (!__cmpxchg_double_slab(s, page,
prior, counters, prior, counters,
...@@ -1840,7 +1840,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, ...@@ -1840,7 +1840,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
old.freelist = page->freelist; old.freelist = page->freelist;
old.counters = page->counters; old.counters = page->counters;
VM_BUG_ON_PAGE(!old.frozen, &old); VM_BUG_ON(!old.frozen);
/* Determine target state of the slab */ /* Determine target state of the slab */
new.counters = old.counters; new.counters = old.counters;
...@@ -1952,7 +1952,7 @@ static void unfreeze_partials(struct kmem_cache *s, ...@@ -1952,7 +1952,7 @@ static void unfreeze_partials(struct kmem_cache *s,
old.freelist = page->freelist; old.freelist = page->freelist;
old.counters = page->counters; old.counters = page->counters;
VM_BUG_ON_PAGE(!old.frozen, &old); VM_BUG_ON(!old.frozen);
new.counters = old.counters; new.counters = old.counters;
new.freelist = old.freelist; new.freelist = old.freelist;
...@@ -2225,7 +2225,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) ...@@ -2225,7 +2225,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
counters = page->counters; counters = page->counters;
new.counters = counters; new.counters = counters;
VM_BUG_ON_PAGE(!new.frozen, &new); VM_BUG_ON(!new.frozen);
new.inuse = page->objects; new.inuse = page->objects;
new.frozen = freelist != NULL; new.frozen = freelist != NULL;
...@@ -2319,7 +2319,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2319,7 +2319,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
* page is pointing to the page from which the objects are obtained. * page is pointing to the page from which the objects are obtained.
* That page must be frozen for per cpu allocations to work. * That page must be frozen for per cpu allocations to work.
*/ */
VM_BUG_ON_PAGE(!c->page->frozen, c->page); VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist); c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid); c->tid = next_tid(c->tid);
local_irq_restore(flags); local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment