Commit 617f8e4d authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

maple_tree: add test to replicate low memory race conditions

Add new callback fields to the userspace implementation of struct
kmem_cache.  This allows for executing callback functions in order to
further test low memory scenarios where node allocation is retried.

This callback can help test race conditions by calling a function when a
low memory event is tested.

Link: https://lkml.kernel.org/r/20240812190543.71967-2-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e1b8b883
......@@ -7005,6 +7005,19 @@ void mt_set_non_kernel(unsigned int val)
kmem_cache_set_non_kernel(maple_node_cache, val);
}
extern void kmem_cache_set_callback(struct kmem_cache *cachep,
void (*callback)(void *));
void mt_set_callback(void (*callback)(void *))
{
kmem_cache_set_callback(maple_node_cache, callback);
}
extern void kmem_cache_set_private(struct kmem_cache *cachep, void *private);
void mt_set_private(void *private)
{
kmem_cache_set_private(maple_node_cache, private);
}
extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
unsigned long mt_get_alloc_size(void)
{
......
......@@ -36224,6 +36224,65 @@ static noinline void __init check_mtree_dup(struct maple_tree *mt)
extern void test_kmem_cache_bulk(void);
/* callback function used for check_nomem_writer_race() */
static void writer2(void *maple_tree)
{
struct maple_tree *mt = (struct maple_tree *)maple_tree;
MA_STATE(mas, mt, 6, 10);
mtree_lock(mas.tree);
mas_store(&mas, xa_mk_value(0xC));
mas_destroy(&mas);
mtree_unlock(mas.tree);
}
/*
* check_nomem_writer_race() - test a possible race in the mas_nomem() path
* @mt: The tree to build.
*
* There is a possible race condition in low memory conditions when mas_nomem()
* gives up its lock. A second writer can chagne the entry that the primary
* writer executing the mas_nomem() path is modifying. This test recreates this
* scenario to ensure we are handling it correctly.
*/
static void check_nomem_writer_race(struct maple_tree *mt)
{
MA_STATE(mas, mt, 0, 5);
mt_set_non_kernel(0);
/* setup root with 2 values with NULL in between */
mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL);
mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL);
mtree_store_range(mt, 11, 15, xa_mk_value(0xB), GFP_KERNEL);
/* setup writer 2 that will trigger the race condition */
mt_set_private(mt);
mt_set_callback(writer2);
mtree_lock(mt);
/* erase 0-5 */
mas_erase(&mas);
/* index 6-10 should retain the value from writer 2 */
check_load(mt, 6, xa_mk_value(0xC));
mtree_unlock(mt);
/* test for the same race but with mas_store_gfp() */
mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL);
mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL);
mas_set_range(&mas, 0, 5);
mtree_lock(mt);
mas_store_gfp(&mas, NULL, GFP_KERNEL);
/* ensure write made by writer 2 is retained */
check_load(mt, 6, xa_mk_value(0xC));
mt_set_private(NULL);
mt_set_callback(NULL);
mtree_unlock(mt);
}
void farmer_tests(void)
{
struct maple_node *node;
......@@ -36257,6 +36316,10 @@ void farmer_tests(void)
check_dfs_preorder(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU);
check_nomem_writer_race(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_prealloc(&tree);
mtree_destroy(&tree);
......@@ -26,8 +26,21 @@ struct kmem_cache {
unsigned int non_kernel;
unsigned long nr_allocated;
unsigned long nr_tallocated;
bool exec_callback;
void (*callback)(void *);
void *private;
};
void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *))
{
cachep->callback = callback;
}
void kmem_cache_set_private(struct kmem_cache *cachep, void *private)
{
cachep->private = private;
}
void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
{
cachep->non_kernel = val;
......@@ -58,9 +71,17 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
{
void *p;
if (cachep->exec_callback) {
if (cachep->callback)
cachep->callback(cachep->private);
cachep->exec_callback = false;
}
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
if (!cachep->non_kernel)
if (!cachep->non_kernel) {
cachep->exec_callback = true;
return NULL;
}
cachep->non_kernel--;
}
......@@ -223,6 +244,9 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
ret->objs = NULL;
ret->ctor = ctor;
ret->non_kernel = 0;
ret->exec_callback = false;
ret->callback = NULL;
ret->private = NULL;
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment