Commit dcf141b9 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Fix spurious transaction restarts

The check for whether locking a btree node would deadlock was wrong - we
have to check that interior nodes are locked before descendents, but
this check was wrong when consider cached vs. non cached iterators.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent a301dc38
...@@ -244,6 +244,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, ...@@ -244,6 +244,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
* we're about to lock, it must have the ancestors locked too: * we're about to lock, it must have the ancestors locked too:
*/ */
if (linked->btree_id == iter->btree_id && if (linked->btree_id == iter->btree_id &&
btree_iter_is_cached(linked) == btree_iter_is_cached(iter) &&
level > __fls(linked->nodes_locked)) { level > __fls(linked->nodes_locked)) {
if (!(trans->nounlock)) { if (!(trans->nounlock)) {
linked->locks_want = linked->locks_want =
......
...@@ -283,6 +283,11 @@ btree_iter_type(const struct btree_iter *iter) ...@@ -283,6 +283,11 @@ btree_iter_type(const struct btree_iter *iter)
return iter->flags & BTREE_ITER_TYPE; return iter->flags & BTREE_ITER_TYPE;
} }
static inline bool btree_iter_is_cached(const struct btree_iter *iter)
{
return btree_iter_type(iter) == BTREE_ITER_CACHED;
}
static inline struct btree_iter_level *iter_l(struct btree_iter *iter) static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
{ {
return iter->l + iter->level; return iter->l + iter->level;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment