Commit 6f3dd2c3 authored by Vlastimil Babka's avatar Vlastimil Babka

mm/slub: fix bulk alloc and free stats

The SLUB sysfs stats enabled CONFIG_SLUB_STATS have two deficiencies
identified wrt bulk alloc/free operations:

- Bulk allocations from cpu freelist are not counted. Add the
  ALLOC_FASTPATH counter there.

- Bulk fastpath freeing will count a list of multiple objects with a
  single FREE_FASTPATH inc. Add a stat_add() variant to count them all.
Reviewed-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent ecf9a253
......@@ -396,6 +396,14 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
#endif
}
static inline
void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
{
#ifdef CONFIG_SLUB_STATS
raw_cpu_add(s->cpu_slab->stat[si], v);
#endif
}
/*
* The slab lists for all objects.
*/
......@@ -4268,7 +4276,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
local_unlock(&s->cpu_slab->lock);
}
stat(s, FREE_FASTPATH);
stat_add(s, FREE_FASTPATH, cnt);
}
#else /* CONFIG_SLUB_TINY */
static void do_slab_free(struct kmem_cache *s,
......@@ -4545,6 +4553,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
c->freelist = get_freepointer(s, object);
p[i] = object;
maybe_wipe_obj_freeptr(s, p[i]);
stat(s, ALLOC_FASTPATH);
}
c->tid = next_tid(c->tid);
local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment