Commit ecab6be7 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: bch2_foreground_maybe_merge() now correctly reports lock restarts

This means that btree node splits don't have to automatically trigger a
transaction restart.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 54ca47e1
...@@ -1527,16 +1527,16 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter, ...@@ -1527,16 +1527,16 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
bch2_btree_update_done(as); bch2_btree_update_done(as);
for (l = iter->level + 1; btree_iter_node(iter, l) && !ret; l++) for (l = iter->level + 1; btree_iter_node(iter, l) && !ret; l++)
bch2_foreground_maybe_merge(c, iter, l, flags); ret = bch2_foreground_maybe_merge(c, iter, l, flags);
return ret; return ret;
} }
void __bch2_foreground_maybe_merge(struct bch_fs *c, int __bch2_foreground_maybe_merge(struct bch_fs *c,
struct btree_iter *iter, struct btree_iter *iter,
unsigned level, unsigned level,
unsigned flags, unsigned flags,
enum btree_node_sibling sib) enum btree_node_sibling sib)
{ {
struct btree_trans *trans = iter->trans; struct btree_trans *trans = iter->trans;
struct btree_iter *sib_iter = NULL; struct btree_iter *sib_iter = NULL;
...@@ -1547,10 +1547,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c, ...@@ -1547,10 +1547,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
struct btree *b, *m, *n, *prev, *next, *parent; struct btree *b, *m, *n, *prev, *next, *parent;
struct bpos sib_pos; struct bpos sib_pos;
size_t sib_u64s; size_t sib_u64s;
int ret = 0; int ret = 0, ret2 = 0;
if (trans->nounlock)
return;
BUG_ON(!btree_node_locked(iter, level)); BUG_ON(!btree_node_locked(iter, level));
retry: retry:
...@@ -1689,17 +1686,16 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c, ...@@ -1689,17 +1686,16 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
* split path, and downgrading to read locks in there is potentially * split path, and downgrading to read locks in there is potentially
* confusing: * confusing:
*/ */
return; return ret ?: ret2;
err: err:
bch2_trans_iter_put(trans, sib_iter); bch2_trans_iter_put(trans, sib_iter);
sib_iter = NULL; sib_iter = NULL;
if (ret == -EINTR && bch2_trans_relock(trans)) { if (ret == -EINTR && bch2_trans_relock(trans))
ret = 0;
goto retry; goto retry;
}
if (ret == -EINTR && !(flags & BTREE_INSERT_NOUNLOCK)) { if (ret == -EINTR && !(flags & BTREE_INSERT_NOUNLOCK)) {
ret2 = ret;
ret = bch2_btree_iter_traverse_all(trans); ret = bch2_btree_iter_traverse_all(trans);
if (!ret) if (!ret)
goto retry; goto retry;
......
...@@ -132,10 +132,10 @@ void bch2_btree_insert_node(struct btree_update *, struct btree *, ...@@ -132,10 +132,10 @@ void bch2_btree_insert_node(struct btree_update *, struct btree *,
unsigned); unsigned);
int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned); int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *, int __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
unsigned, unsigned, enum btree_node_sibling); unsigned, unsigned, enum btree_node_sibling);
static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c, static inline int bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
struct btree_iter *iter, struct btree_iter *iter,
unsigned level, unsigned flags, unsigned level, unsigned flags,
enum btree_node_sibling sib) enum btree_node_sibling sib)
...@@ -143,27 +143,27 @@ static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c, ...@@ -143,27 +143,27 @@ static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
struct btree *b; struct btree *b;
if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE) if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
return; return 0;
if (!bch2_btree_node_relock(iter, level)) if (!bch2_btree_node_relock(iter, level))
return; return 0;
b = iter->l[level].b; b = iter->l[level].b;
if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold) if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
return; return 0;
__bch2_foreground_maybe_merge(c, iter, level, flags, sib); return __bch2_foreground_maybe_merge(c, iter, level, flags, sib);
} }
static inline void bch2_foreground_maybe_merge(struct bch_fs *c, static inline int bch2_foreground_maybe_merge(struct bch_fs *c,
struct btree_iter *iter, struct btree_iter *iter,
unsigned level, unsigned level,
unsigned flags) unsigned flags)
{ {
bch2_foreground_maybe_merge_sibling(c, iter, level, flags, return bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
btree_prev_sib); btree_prev_sib) ?:
bch2_foreground_maybe_merge_sibling(c, iter, level, flags, bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
btree_next_sib); btree_next_sib);
} }
void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *); void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment