Commit 3aef2cad authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Arnaldo Carvalho de Melo

tools: Update rbtree implementation

There have been a number of changes in the kernel's rbrtee
implementation, including loose lockless searching guarantees and
rb_root_cached, which later patches will use as an optimization.
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Tested-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/20181206191819.30182-2-dave@stgolabs.netSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 95420d33
...@@ -43,13 +43,28 @@ struct rb_root { ...@@ -43,13 +43,28 @@ struct rb_root {
struct rb_node *rb_node; struct rb_node *rb_node;
}; };
/*
* Leftmost-cached rbtrees.
*
* We do not cache the rightmost node based on footprint
* size vs number of potential users that could benefit
* from O(1) rb_last(). Just not worth it, users that want
* this feature can always implement the logic explicitly.
* Furthermore, users that want to cache both pointers may
* find it a bit asymmetric, but that's ok.
*/
struct rb_root_cached {
struct rb_root rb_root;
struct rb_node *rb_leftmost;
};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, } #define RB_ROOT (struct rb_root) { NULL, }
#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
#define rb_entry(ptr, type, member) container_of(ptr, type, member) #define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \ #define RB_EMPTY_NODE(node) \
...@@ -68,6 +83,12 @@ extern struct rb_node *rb_prev(const struct rb_node *); ...@@ -68,6 +83,12 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *); extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *); extern struct rb_node *rb_last(const struct rb_root *);
extern void rb_insert_color_cached(struct rb_node *,
struct rb_root_cached *, bool);
extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
/* Same as rb_first(), but O(1) */
#define rb_first_cached(root) (root)->rb_leftmost
/* Postorder iteration - always visit the parent after its children */ /* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *); extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *); extern struct rb_node *rb_next_postorder(const struct rb_node *);
...@@ -75,6 +96,8 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *); ...@@ -75,6 +96,8 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */ /* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root); struct rb_root *root);
extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
struct rb_root_cached *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link) struct rb_node **rb_link)
...@@ -90,12 +113,29 @@ static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, ...@@ -90,12 +113,29 @@ static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
____ptr ? rb_entry(____ptr, type, member) : NULL; \ ____ptr ? rb_entry(____ptr, type, member) : NULL; \
}) })
/**
/* * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
* Handy for checking that we are not deleting an entry that is * given type allowing the backing memory of @pos to be invalidated
* already in a list, found in block/{blk-throttle,cfq-iosched}.c, *
* probably should be moved to lib/rbtree.c... * @pos: the 'type *' to use as a loop cursor.
* @n: another 'type *' to use as temporary storage
* @root: 'rb_root *' of the rbtree.
* @field: the name of the rb_node field within 'type'.
*
* rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
* list_for_each_entry_safe() and allows the iteration to continue independent
* of changes to @pos by the body of the loop.
*
* Note, however, that it cannot handle other modifications that re-order the
* rbtree it is iterating over. This includes calling rb_erase() on @pos, as
* rb_erase() may rebalance the tree, causing us to miss some nodes.
*/ */
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
typeof(*pos), field); 1; }); \
pos = n)
static inline void rb_erase_init(struct rb_node *n, struct rb_root *root) static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
{ {
rb_erase(n, root); rb_erase(n, root);
......
...@@ -44,7 +44,9 @@ struct rb_augment_callbacks { ...@@ -44,7 +44,9 @@ struct rb_augment_callbacks {
void (*rotate)(struct rb_node *old, struct rb_node *new); void (*rotate)(struct rb_node *old, struct rb_node *new);
}; };
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, extern void __rb_insert_augmented(struct rb_node *node,
struct rb_root *root,
bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
/* /*
* Fixup the rbtree and update the augmented information when rebalancing. * Fixup the rbtree and update the augmented information when rebalancing.
...@@ -60,7 +62,16 @@ static inline void ...@@ -60,7 +62,16 @@ static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root, rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment) const struct rb_augment_callbacks *augment)
{ {
__rb_insert_augmented(node, root, augment->rotate); __rb_insert_augmented(node, root, false, NULL, augment->rotate);
}
static inline void
rb_insert_augmented_cached(struct rb_node *node,
struct rb_root_cached *root, bool newleft,
const struct rb_augment_callbacks *augment)
{
__rb_insert_augmented(node, &root->rb_root,
newleft, &root->rb_leftmost, augment->rotate);
} }
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ #define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
...@@ -93,7 +104,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ ...@@ -93,7 +104,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
old->rbaugmented = rbcompute(old); \ old->rbaugmented = rbcompute(old); \
} \ } \
rbstatic const struct rb_augment_callbacks rbname = { \ rbstatic const struct rb_augment_callbacks rbname = { \
rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ .propagate = rbname ## _propagate, \
.copy = rbname ## _copy, \
.rotate = rbname ## _rotate \
}; };
...@@ -126,11 +139,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new, ...@@ -126,11 +139,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
{ {
if (parent) { if (parent) {
if (parent->rb_left == old) if (parent->rb_left == old)
parent->rb_left = new; WRITE_ONCE(parent->rb_left, new);
else else
parent->rb_right = new; WRITE_ONCE(parent->rb_right, new);
} else } else
root->rb_node = new; WRITE_ONCE(root->rb_node, new);
} }
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
...@@ -138,12 +151,17 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, ...@@ -138,12 +151,17 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
static __always_inline struct rb_node * static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root, __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
struct rb_node **leftmost,
const struct rb_augment_callbacks *augment) const struct rb_augment_callbacks *augment)
{ {
struct rb_node *child = node->rb_right, *tmp = node->rb_left; struct rb_node *child = node->rb_right;
struct rb_node *tmp = node->rb_left;
struct rb_node *parent, *rebalance; struct rb_node *parent, *rebalance;
unsigned long pc; unsigned long pc;
if (leftmost && node == *leftmost)
*leftmost = rb_next(node);
if (!tmp) { if (!tmp) {
/* /*
* Case 1: node to erase has no more than 1 child (easy!) * Case 1: node to erase has no more than 1 child (easy!)
...@@ -170,6 +188,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, ...@@ -170,6 +188,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
tmp = parent; tmp = parent;
} else { } else {
struct rb_node *successor = child, *child2; struct rb_node *successor = child, *child2;
tmp = child->rb_left; tmp = child->rb_left;
if (!tmp) { if (!tmp) {
/* /*
...@@ -183,6 +202,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, ...@@ -183,6 +202,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
*/ */
parent = successor; parent = successor;
child2 = successor->rb_right; child2 = successor->rb_right;
augment->copy(node, successor); augment->copy(node, successor);
} else { } else {
/* /*
...@@ -204,19 +224,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, ...@@ -204,19 +224,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
successor = tmp; successor = tmp;
tmp = tmp->rb_left; tmp = tmp->rb_left;
} while (tmp); } while (tmp);
parent->rb_left = child2 = successor->rb_right; child2 = successor->rb_right;
successor->rb_right = child; WRITE_ONCE(parent->rb_left, child2);
WRITE_ONCE(successor->rb_right, child);
rb_set_parent(child, successor); rb_set_parent(child, successor);
augment->copy(node, successor); augment->copy(node, successor);
augment->propagate(parent, successor); augment->propagate(parent, successor);
} }
successor->rb_left = tmp = node->rb_left; tmp = node->rb_left;
WRITE_ONCE(successor->rb_left, tmp);
rb_set_parent(tmp, successor); rb_set_parent(tmp, successor);
pc = node->__rb_parent_color; pc = node->__rb_parent_color;
tmp = __rb_parent(pc); tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root); __rb_change_child(node, successor, tmp, root);
if (child2) { if (child2) {
successor->__rb_parent_color = pc; successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK); rb_set_parent_color(child2, parent, RB_BLACK);
...@@ -237,9 +261,21 @@ static __always_inline void ...@@ -237,9 +261,21 @@ static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root, rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment) const struct rb_augment_callbacks *augment)
{ {
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); struct rb_node *rebalance = __rb_erase_augmented(node, root,
NULL, augment);
if (rebalance) if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate); __rb_erase_color(rebalance, root, augment->rotate);
} }
#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */ static __always_inline void
rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
&root->rb_leftmost,
augment);
if (rebalance)
__rb_erase_color(rebalance, &root->rb_root, augment->rotate);
}
#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment