Commit 2dac0eae authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Iterator debug code improvements

More aggressively checking iterator invariants, and fixing the resulting
bugs. Also greatly simplifying iter_next() and iter_next_slot() - they
were hyper optimized before, but the optimizations were getting too
brittle.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 3186c80f
......@@ -1665,6 +1665,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
struct bset_tree *t;
unsigned end = 0;
if (btree_keys_expensive_checks(b))
bch2_btree_node_iter_verify(iter, b);
for_each_bset(b, t) {
......@@ -1700,6 +1701,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
iter->data[0].k = __btree_node_key_to_offset(b, prev);
iter->data[0].end = end;
if (btree_keys_expensive_checks(b))
bch2_btree_node_iter_verify(iter, b);
return prev;
}
......
This diff is collapsed.
......@@ -96,10 +96,10 @@ __trans_next_iter_with_node(struct btree_trans *trans, struct btree *b,
(_iter)->idx + 1))
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
void bch2_btree_trans_verify_iters(struct btree_trans *, struct btree *);
void bch2_btree_trans_verify_locks(struct btree_trans *);
#else
static inline void bch2_btree_iter_verify(struct btree_iter *iter,
static inline void bch2_btree_trans_verify_iters(struct btree_trans *trans,
struct btree *b) {}
static inline void bch2_btree_trans_verify_locks(struct btree_trans *iter) {}
#endif
......@@ -154,7 +154,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
int bch2_btree_iter_traverse_all(struct btree_trans *);
struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
struct btree *bch2_btree_iter_next_node(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
......@@ -231,7 +231,7 @@ static inline int bch2_trans_cond_resched(struct btree_trans *trans)
_start, _locks_want, _depth, _flags), \
_b = bch2_btree_iter_peek_node(_iter); \
(_b); \
(_b) = bch2_btree_iter_next_node(_iter, _depth))
(_b) = bch2_btree_iter_next_node(_iter))
#define for_each_btree_node(_trans, _iter, _btree_id, _start, \
_flags, _b) \
......
......@@ -238,9 +238,10 @@ struct btree_iter {
u16 flags;
u8 idx;
enum btree_iter_uptodate uptodate:4;
enum btree_id btree_id:4;
enum btree_iter_uptodate uptodate:4;
unsigned level:4,
min_depth:4,
locks_want:4,
nodes_locked:4,
nodes_intent_locked:4;
......
......@@ -1557,7 +1557,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
trans_for_each_iter_with_node(iter->trans, b, linked)
bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
bch2_btree_iter_verify(iter, b);
bch2_btree_trans_verify_iters(iter->trans, b);
}
/**
......@@ -1827,7 +1827,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
bch2_btree_iter_node_replace(iter, n);
bch2_btree_iter_verify(iter, n);
bch2_btree_trans_verify_iters(trans, n);
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_node_free_inmem(c, m, iter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment