Commit 446e1867 authored by Peng Zhang's avatar Peng Zhang Committed by Andrew Morton

maple_tree: update check_forking() and bench_forking()

Updated check_forking() and bench_forking() to use __mt_dup() to duplicate
maple tree.

Link: https://lkml.kernel.org/r/20231027033845.90608-9-zhangpeng.00@bytedance.comSigned-off-by: default avatarPeng Zhang <zhangpeng.00@bytedance.com>
Reviewed-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Mike Christie <michael.christie@oracle.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f670fa1c
...@@ -1834,47 +1834,48 @@ static noinline void __init bench_mas_prev(struct maple_tree *mt) ...@@ -1834,47 +1834,48 @@ static noinline void __init bench_mas_prev(struct maple_tree *mt)
} }
#endif #endif
/* check_forking - simulate the kernel forking sequence with the tree. */ /* check_forking - simulate the kernel forking sequence with the tree. */
static noinline void __init check_forking(struct maple_tree *mt) static noinline void __init check_forking(void)
{ {
struct maple_tree mt, newmt;
struct maple_tree newmt; int i, nr_entries = 134, ret;
int i, nr_entries = 134;
void *val; void *val;
MA_STATE(mas, mt, 0, 0); MA_STATE(mas, &mt, 0, 0);
MA_STATE(newmas, mt, 0, 0); MA_STATE(newmas, &newmt, 0, 0);
struct rw_semaphore newmt_lock; struct rw_semaphore mt_lock, newmt_lock;
init_rwsem(&mt_lock);
init_rwsem(&newmt_lock); init_rwsem(&newmt_lock);
for (i = 0; i <= nr_entries; i++) mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mtree_store_range(mt, i*10, i*10 + 5, mt_set_external_lock(&mt, &mt_lock);
xa_mk_value(i), GFP_KERNEL);
mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&newmt, &newmt_lock); mt_set_external_lock(&newmt, &newmt_lock);
newmas.tree = &newmt;
mas_reset(&newmas); down_write(&mt_lock);
mas_reset(&mas); for (i = 0; i <= nr_entries; i++) {
down_write(&newmt_lock); mas_set_range(&mas, i*10, i*10 + 5);
mas.index = 0; mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
mas.last = 0; }
if (mas_expected_entries(&newmas, nr_entries)) {
down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
if (ret) {
pr_err("OOM!"); pr_err("OOM!");
BUG_ON(1); BUG_ON(1);
} }
rcu_read_lock();
mas_for_each(&mas, val, ULONG_MAX) { mas_set(&newmas, 0);
newmas.index = mas.index; mas_for_each(&newmas, val, ULONG_MAX)
newmas.last = mas.last;
mas_store(&newmas, val); mas_store(&newmas, val);
}
rcu_read_unlock();
mas_destroy(&newmas); mas_destroy(&newmas);
mas_destroy(&mas);
mt_validate(&newmt); mt_validate(&newmt);
mt_set_non_kernel(0);
__mt_destroy(&newmt); __mt_destroy(&newmt);
__mt_destroy(&mt);
up_write(&newmt_lock); up_write(&newmt_lock);
up_write(&mt_lock);
} }
static noinline void __init check_iteration(struct maple_tree *mt) static noinline void __init check_iteration(struct maple_tree *mt)
...@@ -1977,49 +1978,51 @@ static noinline void __init check_mas_store_gfp(struct maple_tree *mt) ...@@ -1977,49 +1978,51 @@ static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
} }
#if defined(BENCH_FORK) #if defined(BENCH_FORK)
static noinline void __init bench_forking(struct maple_tree *mt) static noinline void __init bench_forking(void)
{ {
struct maple_tree mt, newmt;
struct maple_tree newmt; int i, nr_entries = 134, nr_fork = 80000, ret;
int i, nr_entries = 134, nr_fork = 80000;
void *val; void *val;
MA_STATE(mas, mt, 0, 0); MA_STATE(mas, &mt, 0, 0);
MA_STATE(newmas, mt, 0, 0); MA_STATE(newmas, &newmt, 0, 0);
struct rw_semaphore newmt_lock; struct rw_semaphore mt_lock, newmt_lock;
init_rwsem(&mt_lock);
init_rwsem(&newmt_lock); init_rwsem(&newmt_lock);
mt_set_external_lock(&newmt, &newmt_lock);
for (i = 0; i <= nr_entries; i++) mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mtree_store_range(mt, i*10, i*10 + 5, mt_set_external_lock(&mt, &mt_lock);
xa_mk_value(i), GFP_KERNEL);
down_write(&mt_lock);
for (i = 0; i <= nr_entries; i++) {
mas_set_range(&mas, i*10, i*10 + 5);
mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
}
for (i = 0; i < nr_fork; i++) { for (i = 0; i < nr_fork; i++) {
mt_set_non_kernel(99999); mt_init_flags(&newmt,
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
newmas.tree = &newmt; mt_set_external_lock(&newmt, &newmt_lock);
mas_reset(&newmas);
mas_reset(&mas); down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
mas.index = 0; ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
mas.last = 0; if (ret) {
rcu_read_lock(); pr_err("OOM!");
down_write(&newmt_lock);
if (mas_expected_entries(&newmas, nr_entries)) {
printk("OOM!");
BUG_ON(1); BUG_ON(1);
} }
mas_for_each(&mas, val, ULONG_MAX) {
newmas.index = mas.index; mas_set(&newmas, 0);
newmas.last = mas.last; mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val); mas_store(&newmas, val);
}
mas_destroy(&newmas); mas_destroy(&newmas);
rcu_read_unlock();
mt_validate(&newmt); mt_validate(&newmt);
mt_set_non_kernel(0);
__mt_destroy(&newmt); __mt_destroy(&newmt);
up_write(&newmt_lock); up_write(&newmt_lock);
} }
mas_destroy(&mas);
__mt_destroy(&mt);
up_write(&mt_lock);
} }
#endif #endif
...@@ -3615,9 +3618,7 @@ static int __init maple_tree_seed(void) ...@@ -3615,9 +3618,7 @@ static int __init maple_tree_seed(void)
#endif #endif
#if defined(BENCH_FORK) #if defined(BENCH_FORK)
#define BENCH #define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); bench_forking();
bench_forking(&tree);
mtree_destroy(&tree);
goto skip; goto skip;
#endif #endif
#if defined(BENCH_MT_FOR_EACH) #if defined(BENCH_MT_FOR_EACH)
...@@ -3650,9 +3651,7 @@ static int __init maple_tree_seed(void) ...@@ -3650,9 +3651,7 @@ static int __init maple_tree_seed(void)
check_iteration(&tree); check_iteration(&tree);
mtree_destroy(&tree); mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_forking();
check_forking(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_mas_store_gfp(&tree); check_mas_store_gfp(&tree);
......
...@@ -37,4 +37,8 @@ static inline int up_write(struct rw_semaphore *sem) ...@@ -37,4 +37,8 @@ static inline int up_write(struct rw_semaphore *sem)
{ {
return pthread_rwlock_unlock(&sem->lock); return pthread_rwlock_unlock(&sem->lock);
} }
#define down_read_nested(sem, subclass) down_read(sem)
#define down_write_nested(sem, subclass) down_write(sem)
#endif /* _TOOLS_RWSEM_H */ #endif /* _TOOLS_RWSEM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment