Commit 647d7b60 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Fix an assertion in the btree node merge path

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 94c1f4ad
...@@ -69,26 +69,6 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter) ...@@ -69,26 +69,6 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
&b->lock.state.counter); &b->lock.state.counter);
} }
/*
* Lock a btree node if we already have it locked on one of our linked
* iterators:
*/
static inline bool btree_node_lock_increment(struct btree_iter *iter,
struct btree *b, unsigned level,
enum btree_node_locked_type want)
{
struct btree_iter *linked;
for_each_linked_btree_iter(iter, linked)
if (linked->l[level].b == b &&
btree_node_locked_type(linked, level) >= want) {
six_lock_increment(&b->lock, (enum six_lock_type) want);
return true;
}
return false;
}
bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level) bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
{ {
struct btree *b = btree_iter_node(iter, level); struct btree *b = btree_iter_node(iter, level);
...@@ -190,34 +170,12 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, ...@@ -190,34 +170,12 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
struct btree_iter *linked; struct btree_iter *linked;
bool ret = true; bool ret = true;
/* Can't have children locked before ancestors: */ /* Check if it's safe to block: */
EBUG_ON(iter->nodes_locked && level > __ffs(iter->nodes_locked)); for_each_btree_iter(iter, linked) {
/*
* Can't hold any read locks while we block taking an intent lock - see
* below for reasoning, and we should have already dropped any read
* locks in the current iterator
*/
EBUG_ON(type == SIX_LOCK_intent &&
iter->nodes_locked != iter->nodes_intent_locked);
if (btree_node_lock_increment(iter, b, level, (enum btree_node_locked_type) type))
return true;
/*
* Must lock btree nodes in key order - this case happens when locking
* the prev sibling in btree node merging:
*/
if (iter->nodes_locked &&
__ffs(iter->nodes_locked) <= level &&
__btree_iter_cmp(iter->btree_id, pos, iter))
return false;
for_each_linked_btree_iter(iter, linked) {
if (!linked->nodes_locked) if (!linked->nodes_locked)
continue; continue;
/* We have to lock btree nodes in key order: */ /* * Must lock btree nodes in key order: */
if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0) if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
ret = false; ret = false;
...@@ -252,9 +210,10 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, ...@@ -252,9 +210,10 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
if (linked->btree_id == iter->btree_id && if (linked->btree_id == iter->btree_id &&
level > __fls(linked->nodes_locked)) { level > __fls(linked->nodes_locked)) {
if (may_drop_locks) { if (may_drop_locks) {
linked->locks_want = max_t(unsigned, linked->locks_want =
linked->locks_want, max(level + 1, max_t(unsigned,
iter->locks_want); linked->locks_want,
iter->locks_want));
btree_iter_get_locks(linked, true); btree_iter_get_locks(linked, true);
} }
ret = false; ret = false;
......
...@@ -147,6 +147,26 @@ static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b, ...@@ -147,6 +147,26 @@ static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
__btree_node_lock_type(c, b, type); __btree_node_lock_type(c, b, type);
} }
/*
* Lock a btree node if we already have it locked on one of our linked
* iterators:
*/
static inline bool btree_node_lock_increment(struct btree_iter *iter,
struct btree *b, unsigned level,
enum btree_node_locked_type want)
{
struct btree_iter *linked;
for_each_linked_btree_iter(iter, linked)
if (linked->l[level].b == b &&
btree_node_locked_type(linked, level) >= want) {
six_lock_increment(&b->lock, want);
return true;
}
return false;
}
bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned, bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
struct btree_iter *, enum six_lock_type, bool); struct btree_iter *, enum six_lock_type, bool);
...@@ -159,6 +179,7 @@ static inline bool btree_node_lock(struct btree *b, struct bpos pos, ...@@ -159,6 +179,7 @@ static inline bool btree_node_lock(struct btree *b, struct bpos pos,
EBUG_ON(level >= BTREE_MAX_DEPTH); EBUG_ON(level >= BTREE_MAX_DEPTH);
return likely(six_trylock_type(&b->lock, type)) || return likely(six_trylock_type(&b->lock, type)) ||
btree_node_lock_increment(iter, b, level, type) ||
__bch2_btree_node_lock(b, pos, level, iter, __bch2_btree_node_lock(b, pos, level, iter,
type, may_drop_locks); type, may_drop_locks);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment