Commit c65c1877 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Pekka Enberg

slub: use lockdep_assert_held

Instead of using comments in an attempt at getting the locking right,
use proper assertions that actively warn you if you got it wrong.

Also add extra braces in a few sites to comply with coding-style.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 8afb1474
...@@ -985,23 +985,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) ...@@ -985,23 +985,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
/* /*
* Tracking of fully allocated slabs for debugging purposes. * Tracking of fully allocated slabs for debugging purposes.
*
* list_lock must be held.
*/ */
static void add_full(struct kmem_cache *s, static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page) struct kmem_cache_node *n, struct page *page)
{ {
lockdep_assert_held(&n->list_lock);
if (!(s->flags & SLAB_STORE_USER)) if (!(s->flags & SLAB_STORE_USER))
return; return;
list_add(&page->lru, &n->full); list_add(&page->lru, &n->full);
} }
/* static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
* list_lock must be held.
*/
static void remove_full(struct kmem_cache *s, struct page *page)
{ {
lockdep_assert_held(&n->list_lock);
if (!(s->flags & SLAB_STORE_USER)) if (!(s->flags & SLAB_STORE_USER))
return; return;
...@@ -1250,7 +1249,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page, ...@@ -1250,7 +1249,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, u8 val) { return 1; } void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {} struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct page *page) {} static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long object_size, static inline unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name, unsigned long flags, const char *name,
void (*ctor)(void *)) void (*ctor)(void *))
...@@ -1504,12 +1504,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page) ...@@ -1504,12 +1504,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
/* /*
* Management of partially allocated slabs. * Management of partially allocated slabs.
*
* list_lock must be held.
*/ */
static inline void add_partial(struct kmem_cache_node *n, static inline void add_partial(struct kmem_cache_node *n,
struct page *page, int tail) struct page *page, int tail)
{ {
lockdep_assert_held(&n->list_lock);
n->nr_partial++; n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL) if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial); list_add_tail(&page->lru, &n->partial);
...@@ -1517,12 +1517,11 @@ static inline void add_partial(struct kmem_cache_node *n, ...@@ -1517,12 +1517,11 @@ static inline void add_partial(struct kmem_cache_node *n,
list_add(&page->lru, &n->partial); list_add(&page->lru, &n->partial);
} }
/*
* list_lock must be held.
*/
static inline void remove_partial(struct kmem_cache_node *n, static inline void remove_partial(struct kmem_cache_node *n,
struct page *page) struct page *page)
{ {
lockdep_assert_held(&n->list_lock);
list_del(&page->lru); list_del(&page->lru);
n->nr_partial--; n->nr_partial--;
} }
...@@ -1532,8 +1531,6 @@ static inline void remove_partial(struct kmem_cache_node *n, ...@@ -1532,8 +1531,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
* return the pointer to the freelist. * return the pointer to the freelist.
* *
* Returns a list of objects or NULL if it fails. * Returns a list of objects or NULL if it fails.
*
* Must hold list_lock since we modify the partial list.
*/ */
static inline void *acquire_slab(struct kmem_cache *s, static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page, struct kmem_cache_node *n, struct page *page,
...@@ -1543,6 +1540,8 @@ static inline void *acquire_slab(struct kmem_cache *s, ...@@ -1543,6 +1540,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
unsigned long counters; unsigned long counters;
struct page new; struct page new;
lockdep_assert_held(&n->list_lock);
/* /*
* Zap the freelist and set the frozen bit. * Zap the freelist and set the frozen bit.
* The old freelist is the list of objects for the * The old freelist is the list of objects for the
...@@ -1887,7 +1886,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, ...@@ -1887,7 +1886,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
else if (l == M_FULL) else if (l == M_FULL)
remove_full(s, page); remove_full(s, n, page);
if (m == M_PARTIAL) { if (m == M_PARTIAL) {
...@@ -2541,7 +2540,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -2541,7 +2540,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
new.inuse--; new.inuse--;
if ((!new.inuse || !prior) && !was_frozen) { if ((!new.inuse || !prior) && !was_frozen) {
if (kmem_cache_has_cpu_partial(s) && !prior) if (kmem_cache_has_cpu_partial(s) && !prior) {
/* /*
* Slab was on no list before and will be * Slab was on no list before and will be
...@@ -2551,7 +2550,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -2551,7 +2550,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/ */
new.frozen = 1; new.frozen = 1;
else { /* Needs to be taken off a list */ } else { /* Needs to be taken off a list */
n = get_node(s, page_to_nid(page)); n = get_node(s, page_to_nid(page));
/* /*
...@@ -2600,7 +2599,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -2600,7 +2599,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/ */
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
if (kmem_cache_debug(s)) if (kmem_cache_debug(s))
remove_full(s, page); remove_full(s, n, page);
add_partial(n, page, DEACTIVATE_TO_TAIL); add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL); stat(s, FREE_ADD_PARTIAL);
} }
...@@ -2614,9 +2613,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -2614,9 +2613,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/ */
remove_partial(n, page); remove_partial(n, page);
stat(s, FREE_REMOVE_PARTIAL); stat(s, FREE_REMOVE_PARTIAL);
} else } else {
/* Slab must be on the full list */ /* Slab must be on the full list */
remove_full(s, page); remove_full(s, n, page);
}
spin_unlock_irqrestore(&n->list_lock, flags); spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB); stat(s, FREE_SLAB);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment