Commit 46c99e26 authored by Peng Zhang's avatar Peng Zhang Committed by Andrew Morton

radix tree test suite: align kmem_cache_alloc_bulk() with kernel behavior.

When kmem_cache_alloc_bulk() fails to allocate, leave the freed pointers
in the array.  This enables a more accurate simulation of the kernel's
behavior and allows for testing potential double-free scenarios.

Link: https://lkml.kernel.org/r/20231027033845.90608-5-zhangpeng.00@bytedance.comSigned-off-by: default avatarPeng Zhang <zhangpeng.00@bytedance.com>
Reviewed-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Mike Christie <michael.christie@oracle.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fd32e4e9
......@@ -93,13 +93,9 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
return p;
}
void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
{
assert(objp);
uatomic_dec(&nr_allocated);
uatomic_dec(&cachep->nr_allocated);
if (kmalloc_verbose)
printf("Freeing %p to slab\n", objp);
if (cachep->nr_objs > 10 || cachep->align) {
memset(objp, POISON_FREE, cachep->size);
free(objp);
......@@ -111,6 +107,15 @@ void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
}
}
void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
{
uatomic_dec(&nr_allocated);
uatomic_dec(&cachep->nr_allocated);
if (kmalloc_verbose)
printf("Freeing %p to slab\n", objp);
__kmem_cache_free_locked(cachep, objp);
}
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
pthread_mutex_lock(&cachep->lock);
......@@ -141,18 +146,17 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
if (kmalloc_verbose)
pr_debug("Bulk alloc %lu\n", size);
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
if (cachep->non_kernel < size)
return 0;
cachep->non_kernel -= size;
}
pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs >= size) {
struct radix_tree_node *node;
for (i = 0; i < size; i++) {
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
if (!cachep->non_kernel)
break;
cachep->non_kernel--;
}
node = cachep->objs;
cachep->nr_objs--;
cachep->objs = node->parent;
......@@ -163,11 +167,19 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
} else {
pthread_mutex_unlock(&cachep->lock);
for (i = 0; i < size; i++) {
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
if (!cachep->non_kernel)
break;
cachep->non_kernel--;
}
if (cachep->align) {
posix_memalign(&p[i], cachep->align,
cachep->size);
} else {
p[i] = malloc(cachep->size);
if (!p[i])
break;
}
if (cachep->ctor)
cachep->ctor(p[i]);
......@@ -176,6 +188,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
}
}
if (i < size) {
size = i;
pthread_mutex_lock(&cachep->lock);
for (i = 0; i < size; i++)
__kmem_cache_free_locked(cachep, p[i]);
pthread_mutex_unlock(&cachep->lock);
return 0;
}
for (i = 0; i < size; i++) {
uatomic_inc(&nr_allocated);
uatomic_inc(&cachep->nr_allocated);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment