Commit 01b34d16 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Vlastimil Babka

mm/slub: Convert pfmemalloc_match() to take a struct slab

Preparatory for mass conversion. Use the new slab_test_pfmemalloc()
helper.  As it doesn't do VM_BUG_ON(!PageSlab()) we no longer need the
pfmemalloc_match_unsafe() variant.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
parent 4020b4a2
...@@ -2128,7 +2128,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); ...@@ -2128,7 +2128,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
static inline void put_cpu_partial(struct kmem_cache *s, struct page *page, static inline void put_cpu_partial(struct kmem_cache *s, struct page *page,
int drain) { } int drain) { }
#endif #endif
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
/* /*
* Try to allocate a partial slab from a specific node. * Try to allocate a partial slab from a specific node.
...@@ -2154,7 +2154,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, ...@@ -2154,7 +2154,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
list_for_each_entry_safe(page, page2, &n->partial, slab_list) { list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
void *t; void *t;
if (!pfmemalloc_match(page, gfpflags)) if (!pfmemalloc_match(page_slab(page), gfpflags))
continue; continue;
t = acquire_slab(s, n, page, object == NULL); t = acquire_slab(s, n, page, object == NULL);
...@@ -2832,22 +2832,9 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) ...@@ -2832,22 +2832,9 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
#endif #endif
} }
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
{ {
if (unlikely(PageSlabPfmemalloc(page))) if (unlikely(slab_test_pfmemalloc(slab)))
return gfp_pfmemalloc_allowed(gfpflags);
return true;
}
/*
* A variant of pfmemalloc_match() that tests page flags without asserting
* PageSlab. Intended for opportunistic checks before taking a lock and
* rechecking that nobody else freed the page under us.
*/
static inline bool pfmemalloc_match_unsafe(struct page *page, gfp_t gfpflags)
{
if (unlikely(__PageSlabPfmemalloc(page)))
return gfp_pfmemalloc_allowed(gfpflags); return gfp_pfmemalloc_allowed(gfpflags);
return true; return true;
...@@ -2949,7 +2936,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2949,7 +2936,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
* PFMEMALLOC but right now, we are losing the pfmemalloc * PFMEMALLOC but right now, we are losing the pfmemalloc
* information when the page leaves the per-cpu allocator * information when the page leaves the per-cpu allocator
*/ */
if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags))) if (unlikely(!pfmemalloc_match(page_slab(page), gfpflags)))
goto deactivate_slab; goto deactivate_slab;
/* must check again c->page in case we got preempted and it changed */ /* must check again c->page in case we got preempted and it changed */
...@@ -3061,7 +3048,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -3061,7 +3048,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
} }
} }
if (unlikely(!pfmemalloc_match(page, gfpflags))) if (unlikely(!pfmemalloc_match(page_slab(page), gfpflags)))
/* /*
* For !pfmemalloc_match() case we don't load freelist so that * For !pfmemalloc_match() case we don't load freelist so that
* we don't make further mismatched allocations easier. * we don't make further mismatched allocations easier.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment