Commit 446e1867 authored by Peng Zhang's avatar Peng Zhang Committed by Andrew Morton

maple_tree: update check_forking() and bench_forking()

Updated check_forking() and bench_forking() to use __mt_dup() to duplicate
maple tree.

Link: https://lkml.kernel.org/r/20231027033845.90608-9-zhangpeng.00@bytedance.comSigned-off-by: default avatarPeng Zhang <zhangpeng.00@bytedance.com>
Reviewed-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Mike Christie <michael.christie@oracle.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f670fa1c
......@@ -1834,47 +1834,48 @@ static noinline void __init bench_mas_prev(struct maple_tree *mt)
}
#endif
/* check_forking - simulate the kernel forking sequence with the tree. */
static noinline void __init check_forking(struct maple_tree *mt)
static noinline void __init check_forking(void)
{
struct maple_tree newmt;
int i, nr_entries = 134;
struct maple_tree mt, newmt;
int i, nr_entries = 134, ret;
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
struct rw_semaphore newmt_lock;
MA_STATE(mas, &mt, 0, 0);
MA_STATE(newmas, &newmt, 0, 0);
struct rw_semaphore mt_lock, newmt_lock;
init_rwsem(&mt_lock);
init_rwsem(&newmt_lock);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&mt, &mt_lock);
mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&newmt, &newmt_lock);
newmas.tree = &newmt;
mas_reset(&newmas);
mas_reset(&mas);
down_write(&newmt_lock);
mas.index = 0;
mas.last = 0;
if (mas_expected_entries(&newmas, nr_entries)) {
down_write(&mt_lock);
for (i = 0; i <= nr_entries; i++) {
mas_set_range(&mas, i*10, i*10 + 5);
mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
}
down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
if (ret) {
pr_err("OOM!");
BUG_ON(1);
}
rcu_read_lock();
mas_for_each(&mas, val, ULONG_MAX) {
newmas.index = mas.index;
newmas.last = mas.last;
mas_set(&newmas, 0);
mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val);
}
rcu_read_unlock();
mas_destroy(&newmas);
mas_destroy(&mas);
mt_validate(&newmt);
mt_set_non_kernel(0);
__mt_destroy(&newmt);
__mt_destroy(&mt);
up_write(&newmt_lock);
up_write(&mt_lock);
}
static noinline void __init check_iteration(struct maple_tree *mt)
......@@ -1977,49 +1978,51 @@ static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
}
#if defined(BENCH_FORK)
static noinline void __init bench_forking(struct maple_tree *mt)
static noinline void __init bench_forking(void)
{
struct maple_tree newmt;
int i, nr_entries = 134, nr_fork = 80000;
struct maple_tree mt, newmt;
int i, nr_entries = 134, nr_fork = 80000, ret;
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
struct rw_semaphore newmt_lock;
MA_STATE(mas, &mt, 0, 0);
MA_STATE(newmas, &newmt, 0, 0);
struct rw_semaphore mt_lock, newmt_lock;
init_rwsem(&mt_lock);
init_rwsem(&newmt_lock);
mt_set_external_lock(&newmt, &newmt_lock);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&mt, &mt_lock);
down_write(&mt_lock);
for (i = 0; i <= nr_entries; i++) {
mas_set_range(&mas, i*10, i*10 + 5);
mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
}
for (i = 0; i < nr_fork; i++) {
mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
newmas.tree = &newmt;
mas_reset(&newmas);
mas_reset(&mas);
mas.index = 0;
mas.last = 0;
rcu_read_lock();
down_write(&newmt_lock);
if (mas_expected_entries(&newmas, nr_entries)) {
printk("OOM!");
mt_init_flags(&newmt,
MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&newmt, &newmt_lock);
down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
if (ret) {
pr_err("OOM!");
BUG_ON(1);
}
mas_for_each(&mas, val, ULONG_MAX) {
newmas.index = mas.index;
newmas.last = mas.last;
mas_set(&newmas, 0);
mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val);
}
mas_destroy(&newmas);
rcu_read_unlock();
mt_validate(&newmt);
mt_set_non_kernel(0);
__mt_destroy(&newmt);
up_write(&newmt_lock);
}
mas_destroy(&mas);
__mt_destroy(&mt);
up_write(&mt_lock);
}
#endif
......@@ -3615,9 +3618,7 @@ static int __init maple_tree_seed(void)
#endif
#if defined(BENCH_FORK)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_forking(&tree);
mtree_destroy(&tree);
bench_forking();
goto skip;
#endif
#if defined(BENCH_MT_FOR_EACH)
......@@ -3650,9 +3651,7 @@ static int __init maple_tree_seed(void)
check_iteration(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_forking(&tree);
mtree_destroy(&tree);
check_forking();
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_mas_store_gfp(&tree);
......
......@@ -37,4 +37,8 @@ static inline int up_write(struct rw_semaphore *sem)
{
return pthread_rwlock_unlock(&sem->lock);
}
#define down_read_nested(sem, subclass) down_read(sem)
#define down_write_nested(sem, subclass) down_write(sem)
#endif /* _TOOLS_RWSEM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment