Skip to content

Commit

Permalink
maple_tree: don't find node end in mtree_lookup_walk()
Browse files Browse the repository at this point in the history
Since the pivot being set is now reliable, the optimized loop no longer
needs to find the node end.  The redundant check for a dead node can also
be avoided as there is no danger of using the wrong pivot since the
results will be thrown out in the case of a dead node by the later check.

This patch also adds a benchmark test for the function to the maple tree
test framework.  The benchmark shows an average increase performance of
5.98% over 3 runs with this commit.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Liam R. Howlett <[email protected]>
Cc: Peng Zhang <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
howlett authored and akpm00 committed Dec 12, 2023
1 parent 0de56e3 commit 24662de
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 9 deletions.
12 changes: 3 additions & 9 deletions lib/maple_tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -3742,23 +3742,17 @@ static inline void *mtree_lookup_walk(struct ma_state *mas)
enum maple_type type;
void __rcu **slots;
unsigned char end;
unsigned long max;

next = mas->node;
max = ULONG_MAX;
do {
offset = 0;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
end = ma_data_end(node, type, pivots, max);
if (unlikely(ma_dead_node(node)))
goto dead_node;
end = mt_pivots[type];
offset = 0;
do {
if (pivots[offset] >= mas->index) {
max = pivots[offset];
if (pivots[offset] >= mas->index)
break;
}
} while (++offset < end);

slots = ma_slots(node, type);
Expand Down
21 changes: 21 additions & 0 deletions lib/test_maple_tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ atomic_t maple_tree_tests_passed;
/* #define BENCH_NODE_STORE */
/* #define BENCH_AWALK */
/* #define BENCH_WALK */
/* #define BENCH_LOAD */
/* #define BENCH_MT_FOR_EACH */
/* #define BENCH_FORK */
/* #define BENCH_MAS_FOR_EACH */
Expand Down Expand Up @@ -1754,6 +1755,19 @@ static noinline void __init bench_walk(struct maple_tree *mt)
}
#endif

#if defined(BENCH_LOAD)
static noinline void __init bench_load(struct maple_tree *mt)
{
int i, max = 2500, count = 550000000;

for (i = 0; i < max; i += 10)
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);

for (i = 0; i < count; i++)
mtree_load(mt, 1470);
}
#endif

#if defined(BENCH_MT_FOR_EACH)
static noinline void __init bench_mt_for_each(struct maple_tree *mt)
{
Expand Down Expand Up @@ -3623,6 +3637,13 @@ static int __init maple_tree_seed(void)
mtree_destroy(&tree);
goto skip;
#endif
#if defined(BENCH_LOAD)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_load(&tree);
mtree_destroy(&tree);
goto skip;
#endif
#if defined(BENCH_FORK)
#define BENCH
bench_forking();
Expand Down

0 comments on commit 24662de

Please sign in to comment.