Commit ef035f42 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Separate out flush_new_cached_update()

This separates out the slowpath of bch2_trans_update_by_path_trace()
into a new non-inlined helper.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent b0c5b15c
...@@ -1406,6 +1406,37 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans, ...@@ -1406,6 +1406,37 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
return ret; return ret;
} }
static int __must_check
bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *path,
struct bkey_i *k, enum btree_update_flags flags,
unsigned long ip);
static noinline int flush_new_cached_update(struct btree_trans *trans,
struct btree_path *path,
struct btree_insert_entry *i,
enum btree_update_flags flags,
unsigned long ip)
{
struct btree_path *btree_path;
int ret;
i->key_cache_already_flushed = true;
i->flags |= BTREE_TRIGGER_NORUN;
btree_path = bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
BTREE_ITER_INTENT);
ret = bch2_btree_path_traverse(trans, btree_path, 0);
if (ret)
goto err;
btree_path_set_should_be_locked(btree_path);
ret = bch2_trans_update_by_path_trace(trans, btree_path, i->k, flags, ip);
err:
bch2_path_put(trans, btree_path, true);
return ret;
}
static int __must_check static int __must_check
bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *path, bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *path,
struct bkey_i *k, enum btree_update_flags flags, struct bkey_i *k, enum btree_update_flags flags,
...@@ -1413,7 +1444,6 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa ...@@ -1413,7 +1444,6 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_insert_entry *i, n; struct btree_insert_entry *i, n;
int ret = 0;
BUG_ON(!path->should_be_locked); BUG_ON(!path->should_be_locked);
...@@ -1484,26 +1514,10 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa ...@@ -1484,26 +1514,10 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
*/ */
if (path->cached && if (path->cached &&
bkey_deleted(&i->old_k) && bkey_deleted(&i->old_k) &&
!(flags & BTREE_UPDATE_NO_KEY_CACHE_COHERENCY)) { !(flags & BTREE_UPDATE_NO_KEY_CACHE_COHERENCY))
struct btree_path *btree_path; return flush_new_cached_update(trans, path, i, flags, ip);
i->key_cache_already_flushed = true;
i->flags |= BTREE_TRIGGER_NORUN;
btree_path = bch2_path_get(trans, path->btree_id, path->pos, return 0;
1, 0, BTREE_ITER_INTENT);
ret = bch2_btree_path_traverse(trans, btree_path, 0);
if (ret)
goto err;
btree_path_set_should_be_locked(btree_path);
ret = bch2_trans_update_by_path_trace(trans, btree_path, k, flags, ip);
err:
bch2_path_put(trans, btree_path, true);
}
return ret;
} }
static int __must_check static int __must_check
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment