Commit dcce284a authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Linus Torvalds

mm: Extend gfp masking to the page allocator

The page allocator also needs the masking of gfp flags during boot,
so this moves it out of slab/slub and uses it with the page allocator
as well.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9729a6eb
...@@ -99,7 +99,7 @@ struct vm_area_struct; ...@@ -99,7 +99,7 @@ struct vm_area_struct;
__GFP_NORETRY|__GFP_NOMEMALLOC) __GFP_NORETRY|__GFP_NOMEMALLOC)
/* Control slab gfp mask during early boot */ /* Control slab gfp mask during early boot */
#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) #define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
/* Control allocation constraints */ /* Control allocation constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
...@@ -348,4 +348,11 @@ static inline void oom_killer_enable(void) ...@@ -348,4 +348,11 @@ static inline void oom_killer_enable(void)
oom_killer_disabled = false; oom_killer_disabled = false;
} }
extern gfp_t gfp_allowed_mask;
static inline void set_gfp_allowed_mask(gfp_t mask)
{
gfp_allowed_mask = mask;
}
#endif /* __LINUX_GFP_H */ #endif /* __LINUX_GFP_H */
...@@ -642,6 +642,10 @@ asmlinkage void __init start_kernel(void) ...@@ -642,6 +642,10 @@ asmlinkage void __init start_kernel(void)
"enabled early\n"); "enabled early\n");
early_boot_irqs_on(); early_boot_irqs_on();
local_irq_enable(); local_irq_enable();
/* Interrupts are enabled now so all GFP allocations are safe. */
set_gfp_allowed_mask(__GFP_BITS_MASK);
kmem_cache_init_late(); kmem_cache_init_late();
/* /*
......
...@@ -73,6 +73,7 @@ unsigned long totalram_pages __read_mostly; ...@@ -73,6 +73,7 @@ unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly; unsigned long totalreserve_pages __read_mostly;
unsigned long highest_memmap_pfn __read_mostly; unsigned long highest_memmap_pfn __read_mostly;
int percpu_pagelist_fraction; int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly; int pageblock_order __read_mostly;
...@@ -1863,6 +1864,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, ...@@ -1863,6 +1864,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct page *page; struct page *page;
int migratetype = allocflags_to_migratetype(gfp_mask); int migratetype = allocflags_to_migratetype(gfp_mask);
gfp_mask &= gfp_allowed_mask;
lockdep_trace_alloc(gfp_mask); lockdep_trace_alloc(gfp_mask);
might_sleep_if(gfp_mask & __GFP_WAIT); might_sleep_if(gfp_mask & __GFP_WAIT);
......
...@@ -304,12 +304,6 @@ struct kmem_list3 { ...@@ -304,12 +304,6 @@ struct kmem_list3 {
int free_touched; /* updated without locking */ int free_touched; /* updated without locking */
}; };
/*
* The slab allocator is initialized with interrupts disabled. Therefore, make
* sure early boot allocations don't accidentally enable interrupts.
*/
static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
/* /*
* Need this for bootstrapping a per node allocator. * Need this for bootstrapping a per node allocator.
*/ */
...@@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void) ...@@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
/*
* Interrupts are enabled now so all GFP allocations are safe.
*/
slab_gfp_mask = __GFP_BITS_MASK;
/* 6) resize the head arrays to their final sizes */ /* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) list_for_each_entry(cachep, &cache_chain, next)
...@@ -3307,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, ...@@ -3307,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long save_flags; unsigned long save_flags;
void *ptr; void *ptr;
flags &= slab_gfp_mask; flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags); lockdep_trace_alloc(flags);
...@@ -3392,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) ...@@ -3392,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
unsigned long save_flags; unsigned long save_flags;
void *objp; void *objp;
flags &= slab_gfp_mask; flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags); lockdep_trace_alloc(flags);
......
...@@ -179,12 +179,6 @@ static enum { ...@@ -179,12 +179,6 @@ static enum {
SYSFS /* Sysfs up */ SYSFS /* Sysfs up */
} slab_state = DOWN; } slab_state = DOWN;
/*
* The slab allocator is initialized with interrupts disabled. Therefore, make
* sure early boot allocations don't accidentally enable interrupts.
*/
static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
/* A list of all slab caches on the system */ /* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock); static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches); static LIST_HEAD(slab_caches);
...@@ -1692,7 +1686,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, ...@@ -1692,7 +1686,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
unsigned long flags; unsigned long flags;
unsigned int objsize; unsigned int objsize;
gfpflags &= slab_gfp_mask; gfpflags &= gfp_allowed_mask;
lockdep_trace_alloc(gfpflags); lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT); might_sleep_if(gfpflags & __GFP_WAIT);
...@@ -3220,10 +3214,6 @@ void __init kmem_cache_init(void) ...@@ -3220,10 +3214,6 @@ void __init kmem_cache_init(void)
void __init kmem_cache_init_late(void) void __init kmem_cache_init_late(void)
{ {
/*
* Interrupts are enabled now so all GFP allocations are safe.
*/
slab_gfp_mask = __GFP_BITS_MASK;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment