Commit 3c0f396a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'slab-for-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab fixes from Vlastimil Babka:

 - Fix a possible use-after-free in SLUB's kmem_cache removal,
   introduced in this cycle, by Feng Tang.

 - WQ_MEM_RECLAIM dependency fix for the workqueue-based cpu slab
   flushing introduced in 5.15, by Maurizio Lombardi.

 - Add missing KASAN hooks in two kmalloc entry paths, by Peter
   Collingbourne.

 - A BUG_ON() removal in SLUB's kmem_cache creation when allocation
   fails (too small to possibly happen in practice, syzbot used fault
   injection), by Chao Yu.

* tag 'slab-for-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  mm: slub: fix flush_cpu_slab()/__free_slab() invocations in task context.
  mm/slab_common: fix possible double free of kmem_cache
  kasan: call kasan_malloc() from __kmalloc_*track_caller()
  mm/slub: fix to return errno if kmalloc() fails
parents c69cf88c e45cc288
...@@ -475,6 +475,7 @@ void slab_kmem_cache_release(struct kmem_cache *s) ...@@ -475,6 +475,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s) void kmem_cache_destroy(struct kmem_cache *s)
{ {
int refcnt; int refcnt;
bool rcu_set;
if (unlikely(!s) || !kasan_check_byte(s)) if (unlikely(!s) || !kasan_check_byte(s))
return; return;
...@@ -482,6 +483,8 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -482,6 +483,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
cpus_read_lock(); cpus_read_lock();
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
refcnt = --s->refcount; refcnt = --s->refcount;
if (refcnt) if (refcnt)
goto out_unlock; goto out_unlock;
...@@ -492,7 +495,7 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -492,7 +495,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
out_unlock: out_unlock:
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
cpus_read_unlock(); cpus_read_unlock();
if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU)) if (!refcnt && !rcu_set)
kmem_cache_release(s); kmem_cache_release(s);
} }
EXPORT_SYMBOL(kmem_cache_destroy); EXPORT_SYMBOL(kmem_cache_destroy);
......
...@@ -310,6 +310,11 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si) ...@@ -310,6 +310,11 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
*/ */
static nodemask_t slab_nodes; static nodemask_t slab_nodes;
/*
* Workqueue used for flush_cpu_slab().
*/
static struct workqueue_struct *flushwq;
/******************************************************************** /********************************************************************
* Core slab cache functions * Core slab cache functions
*******************************************************************/ *******************************************************************/
...@@ -2730,7 +2735,7 @@ static void flush_all_cpus_locked(struct kmem_cache *s) ...@@ -2730,7 +2735,7 @@ static void flush_all_cpus_locked(struct kmem_cache *s)
INIT_WORK(&sfw->work, flush_cpu_slab); INIT_WORK(&sfw->work, flush_cpu_slab);
sfw->skip = false; sfw->skip = false;
sfw->s = s; sfw->s = s;
schedule_work_on(cpu, &sfw->work); queue_work_on(cpu, flushwq, &sfw->work);
} }
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
...@@ -4858,6 +4863,8 @@ void __init kmem_cache_init(void) ...@@ -4858,6 +4863,8 @@ void __init kmem_cache_init(void)
void __init kmem_cache_init_late(void) void __init kmem_cache_init_late(void)
{ {
flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
WARN_ON(!flushwq);
} }
struct kmem_cache * struct kmem_cache *
...@@ -4926,6 +4933,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) ...@@ -4926,6 +4933,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
/* Honor the call site pointer we received. */ /* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, s, size, s->size, gfpflags); trace_kmalloc(caller, ret, s, size, s->size, gfpflags);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret; return ret;
} }
EXPORT_SYMBOL(__kmalloc_track_caller); EXPORT_SYMBOL(__kmalloc_track_caller);
...@@ -4957,6 +4966,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -4957,6 +4966,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
/* Honor the call site pointer we received. */ /* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node); trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret; return ret;
} }
EXPORT_SYMBOL(__kmalloc_node_track_caller); EXPORT_SYMBOL(__kmalloc_node_track_caller);
...@@ -5890,7 +5901,8 @@ static char *create_unique_id(struct kmem_cache *s) ...@@ -5890,7 +5901,8 @@ static char *create_unique_id(struct kmem_cache *s)
char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
char *p = name; char *p = name;
BUG_ON(!name); if (!name)
return ERR_PTR(-ENOMEM);
*p++ = ':'; *p++ = ':';
/* /*
...@@ -5948,6 +5960,8 @@ static int sysfs_slab_add(struct kmem_cache *s) ...@@ -5948,6 +5960,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
* for the symlinks. * for the symlinks.
*/ */
name = create_unique_id(s); name = create_unique_id(s);
if (IS_ERR(name))
return PTR_ERR(name);
} }
s->kobj.kset = kset; s->kobj.kset = kset;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment