Commit 41bec7c3 authored by Vlastimil Babka's avatar Vlastimil Babka

mm/slub: remove slab_lock() usage for debug operations

All alloc and free operations on debug caches are now serialized by
n->list_lock, so we can remove slab_lock() usage in validate_slab()
and list_slab_objects() as those also happen under n->list_lock.

Note the usage in list_slab_objects() could happen even on non-debug
caches, but only during cache shutdown time, so there should not be any
parallel freeing activity anymore. Except for buggy slab users, but in
that case the slab_lock() would not help against the common cmpxchg
based fast paths (in non-debug caches) anyway.

Also adjust documentation comments accordingly.
Suggested-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
parent c7323a5a
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
* 1. slab_mutex (Global Mutex) * 1. slab_mutex (Global Mutex)
* 2. node->list_lock (Spinlock) * 2. node->list_lock (Spinlock)
* 3. kmem_cache->cpu_slab->lock (Local lock) * 3. kmem_cache->cpu_slab->lock (Local lock)
* 4. slab_lock(slab) (Only on some arches or for debugging) * 4. slab_lock(slab) (Only on some arches)
* 5. object_map_lock (Only for debugging) * 5. object_map_lock (Only for debugging)
* *
* slab_mutex * slab_mutex
...@@ -64,8 +64,9 @@ ...@@ -64,8 +64,9 @@
* The slab_lock is a wrapper around the page lock, thus it is a bit * The slab_lock is a wrapper around the page lock, thus it is a bit
* spinlock. * spinlock.
* *
* The slab_lock is only used for debugging and on arches that do not * The slab_lock is only used on arches that do not have the ability
* have the ability to do a cmpxchg_double. It only protects: * to do a cmpxchg_double. It only protects:
*
* A. slab->freelist -> List of free objects in a slab * A. slab->freelist -> List of free objects in a slab
* B. slab->inuse -> Number of objects in use * B. slab->inuse -> Number of objects in use
* C. slab->objects -> Number of objects in slab * C. slab->objects -> Number of objects in slab
...@@ -94,6 +95,9 @@ ...@@ -94,6 +95,9 @@
* allocating a long series of objects that fill up slabs does not require * allocating a long series of objects that fill up slabs does not require
* the list lock. * the list lock.
* *
* For debug caches, all allocations are forced to go through a list_lock
* protected region to serialize against concurrent validation.
*
* cpu_slab->lock local lock * cpu_slab->lock local lock
* *
* This locks protect slowpath manipulation of all kmem_cache_cpu fields * This locks protect slowpath manipulation of all kmem_cache_cpu fields
...@@ -4369,7 +4373,6 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab, ...@@ -4369,7 +4373,6 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
void *p; void *p;
slab_err(s, slab, text, s->name); slab_err(s, slab, text, s->name);
slab_lock(slab, &flags);
map = get_map(s, slab); map = get_map(s, slab);
for_each_object(p, s, addr, slab->objects) { for_each_object(p, s, addr, slab->objects) {
...@@ -4380,7 +4383,6 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab, ...@@ -4380,7 +4383,6 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
} }
} }
put_map(map); put_map(map);
slab_unlock(slab, &flags);
#endif #endif
} }
...@@ -5108,12 +5110,9 @@ static void validate_slab(struct kmem_cache *s, struct slab *slab, ...@@ -5108,12 +5110,9 @@ static void validate_slab(struct kmem_cache *s, struct slab *slab,
{ {
void *p; void *p;
void *addr = slab_address(slab); void *addr = slab_address(slab);
unsigned long flags;
slab_lock(slab, &flags);
if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
goto unlock; return;
/* Now we know that a valid freelist exists */ /* Now we know that a valid freelist exists */
__fill_map(obj_map, s, slab); __fill_map(obj_map, s, slab);
...@@ -5124,8 +5123,6 @@ static void validate_slab(struct kmem_cache *s, struct slab *slab, ...@@ -5124,8 +5123,6 @@ static void validate_slab(struct kmem_cache *s, struct slab *slab,
if (!check_object(s, slab, p, val)) if (!check_object(s, slab, p, val))
break; break;
} }
unlock:
slab_unlock(slab, &flags);
} }
static int validate_slab_node(struct kmem_cache *s, static int validate_slab_node(struct kmem_cache *s,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment