Commit 24662dec authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

maple_tree: don't find node end in mtree_lookup_walk()

Since the pivot being set is now reliable, the optimized loop no longer
needs to find the node end.  The redundant check for a dead node can also
be avoided as there is no danger of using the wrong pivot since the
results will be thrown out in the case of a dead node by the later check.

This patch also adds a benchmark test for the function to the maple tree
test framework.  The benchmark shows an average increase performance of
5.98% over 3 runs with this commit.

Link: https://lkml.kernel.org/r/20231101171629.3612299-12-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Cc: Peng Zhang <zhangpeng.00@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0de56e38
...@@ -3742,23 +3742,17 @@ static inline void *mtree_lookup_walk(struct ma_state *mas) ...@@ -3742,23 +3742,17 @@ static inline void *mtree_lookup_walk(struct ma_state *mas)
enum maple_type type; enum maple_type type;
void __rcu **slots; void __rcu **slots;
unsigned char end; unsigned char end;
unsigned long max;
next = mas->node; next = mas->node;
max = ULONG_MAX;
do { do {
offset = 0;
node = mte_to_node(next); node = mte_to_node(next);
type = mte_node_type(next); type = mte_node_type(next);
pivots = ma_pivots(node, type); pivots = ma_pivots(node, type);
end = ma_data_end(node, type, pivots, max); end = mt_pivots[type];
if (unlikely(ma_dead_node(node))) offset = 0;
goto dead_node;
do { do {
if (pivots[offset] >= mas->index) { if (pivots[offset] >= mas->index)
max = pivots[offset];
break; break;
}
} while (++offset < end); } while (++offset < end);
slots = ma_slots(node, type); slots = ma_slots(node, type);
......
...@@ -43,6 +43,7 @@ atomic_t maple_tree_tests_passed; ...@@ -43,6 +43,7 @@ atomic_t maple_tree_tests_passed;
/* #define BENCH_NODE_STORE */ /* #define BENCH_NODE_STORE */
/* #define BENCH_AWALK */ /* #define BENCH_AWALK */
/* #define BENCH_WALK */ /* #define BENCH_WALK */
/* #define BENCH_LOAD */
/* #define BENCH_MT_FOR_EACH */ /* #define BENCH_MT_FOR_EACH */
/* #define BENCH_FORK */ /* #define BENCH_FORK */
/* #define BENCH_MAS_FOR_EACH */ /* #define BENCH_MAS_FOR_EACH */
...@@ -1754,6 +1755,19 @@ static noinline void __init bench_walk(struct maple_tree *mt) ...@@ -1754,6 +1755,19 @@ static noinline void __init bench_walk(struct maple_tree *mt)
} }
#endif #endif
#if defined(BENCH_LOAD)
static noinline void __init bench_load(struct maple_tree *mt)
{
int i, max = 2500, count = 550000000;
for (i = 0; i < max; i += 10)
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
for (i = 0; i < count; i++)
mtree_load(mt, 1470);
}
#endif
#if defined(BENCH_MT_FOR_EACH) #if defined(BENCH_MT_FOR_EACH)
static noinline void __init bench_mt_for_each(struct maple_tree *mt) static noinline void __init bench_mt_for_each(struct maple_tree *mt)
{ {
...@@ -3623,6 +3637,13 @@ static int __init maple_tree_seed(void) ...@@ -3623,6 +3637,13 @@ static int __init maple_tree_seed(void)
mtree_destroy(&tree); mtree_destroy(&tree);
goto skip; goto skip;
#endif #endif
#if defined(BENCH_LOAD)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_load(&tree);
mtree_destroy(&tree);
goto skip;
#endif
#if defined(BENCH_FORK) #if defined(BENCH_FORK)
#define BENCH #define BENCH
bench_forking(); bench_forking();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment