Commit ccf5a109 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: bch2_btree_iter_peek_prev()

Last of the basic operations for iterating forwards and backwards over
the btree: we now have
 - peek(),	returns key >= iter->pos
 - next(),	returns key >  iter->pos
 - peek_prev(),	returns key <= iter->pos
 - prev(),	returns key < iter->pos
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 6cc3535d
......@@ -686,6 +686,13 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter,
bch2_btree_node_iter_peek(&l->iter, l->b));
}
static inline struct bkey_s_c __btree_iter_prev(struct btree_iter *iter,
struct btree_iter_level *l)
{
return __btree_iter_unpack(iter, l, &iter->k,
bch2_btree_node_iter_prev(&l->iter, l->b));
}
static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
struct btree_iter_level *l,
int max_advance)
......@@ -1446,51 +1453,79 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
return k;
}
struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
/**
* bch2_btree_iter_peek_prev: returns first key less than or equal to
* iterator's current position
*/
struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
{
struct btree_iter_level *l = &iter->l[0];
struct bkey_packed *p;
struct bkey_s_c k;
int ret;
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
k = bch2_btree_iter_peek(iter);
if (IS_ERR(k.k))
return k;
}
if (iter->uptodate == BTREE_ITER_UPTODATE)
return btree_iter_peek_uptodate(iter);
while (1) {
p = bch2_btree_node_iter_prev(&l->iter, l->b);
if (likely(p))
break;
iter->pos = l->b->data->min_key;
if (!bkey_cmp(iter->pos, POS_MIN))
return bkey_s_c_null;
bch2_btree_iter_set_pos(iter,
btree_type_predecessor(iter->btree_id, iter->pos));
ret = bch2_btree_iter_traverse(iter);
if (unlikely(ret))
return bkey_s_c_err(ret);
p = bch2_btree_node_iter_peek(&l->iter, l->b);
if (p)
k = __btree_iter_peek(iter, l);
if (!k.k ||
bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
k = __btree_iter_prev(iter, l);
if (likely(k.k))
break;
}
k = __btree_iter_unpack(iter, l, &iter->k, p);
if (!btree_iter_set_pos_to_prev_leaf(iter))
return bkey_s_c_null;
}
EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
iter->pos = bkey_start_pos(k.k);
iter->uptodate = BTREE_ITER_UPTODATE;
return k;
}
/**
* bch2_btree_iter_prev: returns first key less than iterator's current
* position
*/
struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
{
struct btree_iter_level *l = &iter->l[0];
struct bkey_s_c k;
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
/*
* XXX: when we just need to relock we should be able to avoid
* calling traverse, but we need to kill BTREE_ITER_NEED_PEEK
* for that to work
*/
iter->pos = btree_type_predecessor(iter->btree_id,
iter->pos);
iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
return bch2_btree_iter_peek_prev(iter);
}
k = __btree_iter_prev(iter, l);
if (unlikely(!k.k))
return btree_iter_set_pos_to_prev_leaf(iter)
? bch2_btree_iter_peek(iter)
: bkey_s_c_null;
EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0);
iter->pos = bkey_start_pos(k.k);
return k;
}
static inline struct bkey_s_c
__bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
{
......
......@@ -151,6 +151,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment