Commit 5d8c9d94 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: bch2_btree_path_upgrade() checks nodes_locked, not uptodate

In the key cache fill path, we use path_upgrade() on a path that isn't
uptodate yet but should be locked.

This change makes bch2_btree_path_upgrade() slightly looser so we can
use it in key cache upgrade, instead of the __ version.

Also, make the related assert - that path->uptodate implies nodes_locked
- slightly clearer.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent f2d9823f
...@@ -840,20 +840,19 @@ int __bch2_trans_mutex_lock(struct btree_trans *trans, ...@@ -840,20 +840,19 @@ int __bch2_trans_mutex_lock(struct btree_trans *trans,
void bch2_btree_path_verify_locks(struct btree_path *path) void bch2_btree_path_verify_locks(struct btree_path *path)
{ {
unsigned l;
/* /*
* A path may be uptodate and yet have nothing locked if and only if * A path may be uptodate and yet have nothing locked if and only if
* there is no node at path->level, which generally means we were * there is no node at path->level, which generally means we were
* iterating over all nodes and got to the end of the btree * iterating over all nodes and got to the end of the btree
*/ */
if (!path->nodes_locked) { BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
BUG_ON(path->uptodate == BTREE_ITER_UPTODATE && btree_path_node(path, path->level) &&
btree_path_node(path, path->level)); !path->nodes_locked);
if (!path->nodes_locked)
return; return;
}
for (l = 0; l < BTREE_MAX_DEPTH; l++) { for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
int want = btree_lock_want(path, l); int want = btree_lock_want(path, l);
int have = btree_node_locked_type(path, l); int have = btree_node_locked_type(path, l);
......
...@@ -364,14 +364,14 @@ static inline int bch2_btree_path_upgrade(struct btree_trans *trans, ...@@ -364,14 +364,14 @@ static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
struct btree_path *path, struct btree_path *path,
unsigned new_locks_want) unsigned new_locks_want)
{ {
struct get_locks_fail f; struct get_locks_fail f = {};
unsigned old_locks_want = path->locks_want; unsigned old_locks_want = path->locks_want;
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH); new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
if (path->locks_want < new_locks_want if (path->locks_want < new_locks_want
? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f) ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
: path->uptodate == BTREE_ITER_UPTODATE) : path->nodes_locked)
return 0; return 0;
trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path, trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment