Commit 23910c50 authored by Pekka Enberg's avatar Pekka Enberg

Merge branch 'slub/cleanups' into slab/next

* Fix a merge conflict in mm/slub.c::acquire_slab() due to commit 02d7633f
  ("slub: fix a memory leak in get_partial_node()").

Conflicts:
	mm/slub.c
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parents f8f5701b 57d437d2
...@@ -48,7 +48,6 @@ struct kmem_cache_cpu { ...@@ -48,7 +48,6 @@ struct kmem_cache_cpu {
unsigned long tid; /* Globally unique transaction id */ unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */ struct page *page; /* The slab from which we are allocating */
struct page *partial; /* Partially allocated frozen slabs */ struct page *partial; /* Partially allocated frozen slabs */
int node; /* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS #ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS]; unsigned stat[NR_SLUB_STAT_ITEMS];
#endif #endif
......
...@@ -1490,12 +1490,12 @@ static inline void remove_partial(struct kmem_cache_node *n, ...@@ -1490,12 +1490,12 @@ static inline void remove_partial(struct kmem_cache_node *n,
} }
/* /*
* Lock slab, remove from the partial list and put the object into the * Remove slab from the partial list, freeze it and
* per cpu freelist. * return the pointer to the freelist.
* *
* Returns a list of objects or NULL if it fails. * Returns a list of objects or NULL if it fails.
* *
* Must hold list_lock. * Must hold list_lock since we modify the partial list.
*/ */
static inline void *acquire_slab(struct kmem_cache *s, static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page, struct kmem_cache_node *n, struct page *page,
...@@ -1510,7 +1510,6 @@ static inline void *acquire_slab(struct kmem_cache *s, ...@@ -1510,7 +1510,6 @@ static inline void *acquire_slab(struct kmem_cache *s,
* The old freelist is the list of objects for the * The old freelist is the list of objects for the
* per cpu allocation list. * per cpu allocation list.
*/ */
do {
freelist = page->freelist; freelist = page->freelist;
counters = page->counters; counters = page->counters;
new.counters = counters; new.counters = counters;
...@@ -1524,12 +1523,14 @@ static inline void *acquire_slab(struct kmem_cache *s, ...@@ -1524,12 +1523,14 @@ static inline void *acquire_slab(struct kmem_cache *s,
VM_BUG_ON(new.frozen); VM_BUG_ON(new.frozen);
new.frozen = 1; new.frozen = 1;
} while (!__cmpxchg_double_slab(s, page, if (!__cmpxchg_double_slab(s, page,
freelist, counters, freelist, counters,
new.freelist, new.counters, new.freelist, new.counters,
"lock and freeze")); "acquire_slab"))
return NULL;
remove_partial(n, page); remove_partial(n, page);
WARN_ON(!freelist);
return freelist; return freelist;
} }
...@@ -1563,7 +1564,6 @@ static void *get_partial_node(struct kmem_cache *s, ...@@ -1563,7 +1564,6 @@ static void *get_partial_node(struct kmem_cache *s,
if (!object) { if (!object) {
c->page = page; c->page = page;
c->node = page_to_nid(page);
stat(s, ALLOC_FROM_PARTIAL); stat(s, ALLOC_FROM_PARTIAL);
object = t; object = t;
available = page->objects - page->inuse; available = page->objects - page->inuse;
...@@ -1731,14 +1731,12 @@ void init_kmem_cache_cpus(struct kmem_cache *s) ...@@ -1731,14 +1731,12 @@ void init_kmem_cache_cpus(struct kmem_cache *s)
/* /*
* Remove the cpu slab * Remove the cpu slab
*/ */
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
{ {
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct page *page = c->page;
struct kmem_cache_node *n = get_node(s, page_to_nid(page)); struct kmem_cache_node *n = get_node(s, page_to_nid(page));
int lock = 0; int lock = 0;
enum slab_modes l = M_NONE, m = M_NONE; enum slab_modes l = M_NONE, m = M_NONE;
void *freelist;
void *nextfree; void *nextfree;
int tail = DEACTIVATE_TO_HEAD; int tail = DEACTIVATE_TO_HEAD;
struct page new; struct page new;
...@@ -1749,11 +1747,6 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) ...@@ -1749,11 +1747,6 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
tail = DEACTIVATE_TO_TAIL; tail = DEACTIVATE_TO_TAIL;
} }
c->tid = next_tid(c->tid);
c->page = NULL;
freelist = c->freelist;
c->freelist = NULL;
/* /*
* Stage one: Free all available per cpu objects back * Stage one: Free all available per cpu objects back
* to the page freelist while it is still frozen. Leave the * to the page freelist while it is still frozen. Leave the
...@@ -2011,7 +2004,11 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) ...@@ -2011,7 +2004,11 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{ {
stat(s, CPUSLAB_FLUSH); stat(s, CPUSLAB_FLUSH);
deactivate_slab(s, c); deactivate_slab(s, c->page, c->freelist);
c->tid = next_tid(c->tid);
c->page = NULL;
c->freelist = NULL;
} }
/* /*
...@@ -2055,10 +2052,10 @@ static void flush_all(struct kmem_cache *s) ...@@ -2055,10 +2052,10 @@ static void flush_all(struct kmem_cache *s)
* Check if the objects in a per cpu structure fit numa * Check if the objects in a per cpu structure fit numa
* locality expectations. * locality expectations.
*/ */
static inline int node_match(struct kmem_cache_cpu *c, int node) static inline int node_match(struct page *page, int node)
{ {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (node != NUMA_NO_NODE && c->node != node) if (node != NUMA_NO_NODE && page_to_nid(page) != node)
return 0; return 0;
#endif #endif
return 1; return 1;
...@@ -2130,10 +2127,16 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) ...@@ -2130,10 +2127,16 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_cpu **pc) int node, struct kmem_cache_cpu **pc)
{ {
void *object; void *freelist;
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c = *pc;
struct page *page = new_slab(s, flags, node); struct page *page;
freelist = get_partial(s, flags, node, c);
if (freelist)
return freelist;
page = new_slab(s, flags, node);
if (page) { if (page) {
c = __this_cpu_ptr(s->cpu_slab); c = __this_cpu_ptr(s->cpu_slab);
if (c->page) if (c->page)
...@@ -2143,17 +2146,16 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, ...@@ -2143,17 +2146,16 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
* No other reference to the page yet so we can * No other reference to the page yet so we can
* muck around with it freely without cmpxchg * muck around with it freely without cmpxchg
*/ */
object = page->freelist; freelist = page->freelist;
page->freelist = NULL; page->freelist = NULL;
stat(s, ALLOC_SLAB); stat(s, ALLOC_SLAB);
c->node = page_to_nid(page);
c->page = page; c->page = page;
*pc = c; *pc = c;
} else } else
object = NULL; freelist = NULL;
return object; return freelist;
} }
/* /*
...@@ -2173,6 +2175,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) ...@@ -2173,6 +2175,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
do { do {
freelist = page->freelist; freelist = page->freelist;
counters = page->counters; counters = page->counters;
new.counters = counters; new.counters = counters;
VM_BUG_ON(!new.frozen); VM_BUG_ON(!new.frozen);
...@@ -2206,7 +2209,8 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) ...@@ -2206,7 +2209,8 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c) unsigned long addr, struct kmem_cache_cpu *c)
{ {
void **object; void *freelist;
struct page *page;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
...@@ -2219,25 +2223,29 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2219,25 +2223,29 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c = this_cpu_ptr(s->cpu_slab); c = this_cpu_ptr(s->cpu_slab);
#endif #endif
if (!c->page) page = c->page;
if (!page)
goto new_slab; goto new_slab;
redo: redo:
if (unlikely(!node_match(c, node))) {
if (unlikely(!node_match(page, node))) {
stat(s, ALLOC_NODE_MISMATCH); stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, c); deactivate_slab(s, page, c->freelist);
c->page = NULL;
c->freelist = NULL;
goto new_slab; goto new_slab;
} }
/* must check again c->freelist in case of cpu migration or IRQ */ /* must check again c->freelist in case of cpu migration or IRQ */
object = c->freelist; freelist = c->freelist;
if (object) if (freelist)
goto load_freelist; goto load_freelist;
stat(s, ALLOC_SLOWPATH); stat(s, ALLOC_SLOWPATH);
object = get_freelist(s, c->page); freelist = get_freelist(s, page);
if (!object) { if (!freelist) {
c->page = NULL; c->page = NULL;
stat(s, DEACTIVATE_BYPASS); stat(s, DEACTIVATE_BYPASS);
goto new_slab; goto new_slab;
...@@ -2246,50 +2254,50 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2246,50 +2254,50 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, ALLOC_REFILL); stat(s, ALLOC_REFILL);
load_freelist: load_freelist:
c->freelist = get_freepointer(s, object); /*
* freelist is pointing to the list of objects to be used.
* page is pointing to the page from which the objects are obtained.
* That page must be frozen for per cpu allocations to work.
*/
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid); c->tid = next_tid(c->tid);
local_irq_restore(flags); local_irq_restore(flags);
return object; return freelist;
new_slab: new_slab:
if (c->partial) { if (c->partial) {
c->page = c->partial; page = c->page = c->partial;
c->partial = c->page->next; c->partial = page->next;
c->node = page_to_nid(c->page);
stat(s, CPU_PARTIAL_ALLOC); stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL; c->freelist = NULL;
goto redo; goto redo;
} }
/* Then do expensive stuff like retrieving pages from the partial lists */ freelist = new_slab_objects(s, gfpflags, node, &c);
object = get_partial(s, gfpflags, node, c);
if (unlikely(!object)) {
object = new_slab_objects(s, gfpflags, node, &c);
if (unlikely(!object)) { if (unlikely(!freelist)) {
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node); slab_out_of_memory(s, gfpflags, node);
local_irq_restore(flags); local_irq_restore(flags);
return NULL; return NULL;
} }
}
page = c->page;
if (likely(!kmem_cache_debug(s))) if (likely(!kmem_cache_debug(s)))
goto load_freelist; goto load_freelist;
/* Only entered in the debug case */ /* Only entered in the debug case */
if (!alloc_debug_processing(s, c->page, object, addr)) if (!alloc_debug_processing(s, page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */ goto new_slab; /* Slab failed checks. Next slab needed */
c->freelist = get_freepointer(s, object); deactivate_slab(s, page, get_freepointer(s, freelist));
deactivate_slab(s, c); c->page = NULL;
c->node = NUMA_NO_NODE; c->freelist = NULL;
local_irq_restore(flags); local_irq_restore(flags);
return object; return freelist;
} }
/* /*
...@@ -2307,6 +2315,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, ...@@ -2307,6 +2315,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
{ {
void **object; void **object;
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
struct page *page;
unsigned long tid; unsigned long tid;
if (slab_pre_alloc_hook(s, gfpflags)) if (slab_pre_alloc_hook(s, gfpflags))
...@@ -2332,7 +2341,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, ...@@ -2332,7 +2341,8 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
barrier(); barrier();
object = c->freelist; object = c->freelist;
if (unlikely(!object || !node_match(c, node))) page = c->page;
if (unlikely(!object || !node_match(page, node)))
object = __slab_alloc(s, gfpflags, node, addr, c); object = __slab_alloc(s, gfpflags, node, addr, c);
...@@ -4500,13 +4510,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -4500,13 +4510,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
int node = ACCESS_ONCE(c->node); int node;
struct page *page; struct page *page;
if (node < 0)
continue;
page = ACCESS_ONCE(c->page); page = ACCESS_ONCE(c->page);
if (page) { if (!page)
continue;
node = page_to_nid(page);
if (flags & SO_TOTAL) if (flags & SO_TOTAL)
x = page->objects; x = page->objects;
else if (flags & SO_OBJECTS) else if (flags & SO_OBJECTS)
...@@ -4516,14 +4527,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -4516,14 +4527,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x; total += x;
nodes[node] += x; nodes[node] += x;
}
page = c->partial;
page = ACCESS_ONCE(c->partial);
if (page) { if (page) {
x = page->pobjects; x = page->pobjects;
total += x; total += x;
nodes[node] += x; nodes[node] += x;
} }
per_cpu[node]++; per_cpu[node]++;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment