Commit 633b0764 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Pekka Enberg

slub: correct to calculate num of acquired objects in get_partial_node()

There is a subtle bug when calculating a number of acquired objects.

Currently, we calculate "available = page->objects - page->inuse",
after acquire_slab() is called in get_partial_node().

In acquire_slab() with mode = 1, we always set new.inuse = page->objects.
So,

	acquire_slab(s, n, page, object == NULL);

	if (!object) {
		c->page = page;
		stat(s, ALLOC_FROM_PARTIAL);
		object = t;
		available = page->objects - page->inuse;

		!!! availabe is always 0 !!!
	...

Therfore, "available > s->cpu_partial / 2" is always false and
we always go to second iteration.
This patch correct this problem.

After that, we don't need return value of put_cpu_partial().
So remove it.
Reviewed-by: default avatarWanpeng Li <liwanp@linux.vnet.ibm.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 7d557b3c
...@@ -1493,7 +1493,7 @@ static inline void remove_partial(struct kmem_cache_node *n, ...@@ -1493,7 +1493,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
*/ */
static inline void *acquire_slab(struct kmem_cache *s, static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page, struct kmem_cache_node *n, struct page *page,
int mode) int mode, int *objects)
{ {
void *freelist; void *freelist;
unsigned long counters; unsigned long counters;
...@@ -1507,6 +1507,7 @@ static inline void *acquire_slab(struct kmem_cache *s, ...@@ -1507,6 +1507,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
freelist = page->freelist; freelist = page->freelist;
counters = page->counters; counters = page->counters;
new.counters = counters; new.counters = counters;
*objects = new.objects - new.inuse;
if (mode) { if (mode) {
new.inuse = page->objects; new.inuse = page->objects;
new.freelist = NULL; new.freelist = NULL;
...@@ -1528,7 +1529,7 @@ static inline void *acquire_slab(struct kmem_cache *s, ...@@ -1528,7 +1529,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
return freelist; return freelist;
} }
static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
/* /*
...@@ -1539,6 +1540,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, ...@@ -1539,6 +1540,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
{ {
struct page *page, *page2; struct page *page, *page2;
void *object = NULL; void *object = NULL;
int available = 0;
int objects;
/* /*
* Racy check. If we mistakenly see no partial slabs then we * Racy check. If we mistakenly see no partial slabs then we
...@@ -1552,22 +1555,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, ...@@ -1552,22 +1555,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
list_for_each_entry_safe(page, page2, &n->partial, lru) { list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t; void *t;
int available;
if (!pfmemalloc_match(page, flags)) if (!pfmemalloc_match(page, flags))
continue; continue;
t = acquire_slab(s, n, page, object == NULL); t = acquire_slab(s, n, page, object == NULL, &objects);
if (!t) if (!t)
break; break;
available += objects;
if (!object) { if (!object) {
c->page = page; c->page = page;
stat(s, ALLOC_FROM_PARTIAL); stat(s, ALLOC_FROM_PARTIAL);
object = t; object = t;
available = page->objects - page->inuse;
} else { } else {
available = put_cpu_partial(s, page, 0); put_cpu_partial(s, page, 0);
stat(s, CPU_PARTIAL_NODE); stat(s, CPU_PARTIAL_NODE);
} }
if (kmem_cache_debug(s) || available > s->cpu_partial / 2) if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
...@@ -1946,7 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s, ...@@ -1946,7 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s,
* If we did not find a slot then simply move all the partials to the * If we did not find a slot then simply move all the partials to the
* per node partial list. * per node partial list.
*/ */
static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{ {
struct page *oldpage; struct page *oldpage;
int pages; int pages;
...@@ -1984,7 +1986,6 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) ...@@ -1984,7 +1986,6 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->next = oldpage; page->next = oldpage;
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
return pobjects;
} }
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment