Commit d3f661d6 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

slub: Get rid of slab_free_hook_irq()

The following patch will make the fastpaths lockless and will no longer
require interrupts to be disabled. Calling the free hook with irq disabled
will no longer be possible.

Move the slab_free_hook_irq() logic into slab_free_hook. Only disable
interrupts if the features are selected that require callbacks with
interrupts off and reenable after calls have been made.
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 1a757fe5
...@@ -807,14 +807,24 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void ...@@ -807,14 +807,24 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void
static inline void slab_free_hook(struct kmem_cache *s, void *x) static inline void slab_free_hook(struct kmem_cache *s, void *x)
{ {
kmemleak_free_recursive(x, s->flags); kmemleak_free_recursive(x, s->flags);
}
static inline void slab_free_hook_irq(struct kmem_cache *s, void *object) /*
{ * Trouble is that we may no longer disable interupts in the fast path
kmemcheck_slab_free(s, object, s->objsize); * So in order to make the debug calls that expect irqs to be
debug_check_no_locks_freed(object, s->objsize); * disabled we need to disable interrupts temporarily.
*/
#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
{
unsigned long flags;
local_irq_save(flags);
kmemcheck_slab_free(s, x, s->objsize);
debug_check_no_locks_freed(x, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS)) if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, s->objsize); debug_check_no_obj_freed(x, s->objsize);
local_irq_restore(flags);
}
#endif
} }
/* /*
...@@ -1101,9 +1111,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, ...@@ -1101,9 +1111,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
static inline void slab_free_hook(struct kmem_cache *s, void *x) {} static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
static inline void slab_free_hook_irq(struct kmem_cache *s,
void *object) {}
#endif /* CONFIG_SLUB_DEBUG */ #endif /* CONFIG_SLUB_DEBUG */
/* /*
...@@ -1909,8 +1916,6 @@ static __always_inline void slab_free(struct kmem_cache *s, ...@@ -1909,8 +1916,6 @@ static __always_inline void slab_free(struct kmem_cache *s,
local_irq_save(flags); local_irq_save(flags);
c = __this_cpu_ptr(s->cpu_slab); c = __this_cpu_ptr(s->cpu_slab);
slab_free_hook_irq(s, x);
if (likely(page == c->page && c->node != NUMA_NO_NODE)) { if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
set_freepointer(s, object, c->freelist); set_freepointer(s, object, c->freelist);
c->freelist = object; c->freelist = object;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment