Commit 53b1c6f4 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Don't use key cache during fsck

The btree key cache mainly helps with lock contention, at the cost of
additional memory overhead. During some fsck passes the memory overhead
really matters, but fsck is single threaded so lock contention is an
issue - so skipping the key cache during fsck will help with
performance.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent b32f9a57
...@@ -352,7 +352,8 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc ...@@ -352,7 +352,8 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
u64 bp_offset = 0; u64 bp_offset = 0;
ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1, ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
&bp_offset, &bp); &bp_offset, &bp,
BTREE_ITER_NOPRESERVE);
if (ret) { if (ret) {
ob = ERR_PTR(ret); ob = ERR_PTR(ret);
goto err; goto err;
......
...@@ -300,7 +300,8 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans, ...@@ -300,7 +300,8 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
int bch2_get_next_backpointer(struct btree_trans *trans, int bch2_get_next_backpointer(struct btree_trans *trans,
struct bpos bucket, int gen, struct bpos bucket, int gen,
u64 *bp_offset, u64 *bp_offset,
struct bch_backpointer *dst) struct bch_backpointer *dst,
unsigned iter_flags)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bpos bp_pos, bp_end_pos; struct bpos bp_pos, bp_end_pos;
...@@ -921,7 +922,7 @@ static int check_one_backpointer(struct btree_trans *trans, ...@@ -921,7 +922,7 @@ static int check_one_backpointer(struct btree_trans *trans,
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;
int ret; int ret;
ret = bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp); ret = bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp, 0);
if (ret || *bp_offset == U64_MAX) if (ret || *bp_offset == U64_MAX)
return ret; return ret;
......
...@@ -118,7 +118,7 @@ static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, ...@@ -118,7 +118,7 @@ static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
} }
int bch2_get_next_backpointer(struct btree_trans *, struct bpos, int, int bch2_get_next_backpointer(struct btree_trans *, struct bpos, int,
u64 *, struct bch_backpointer *); u64 *, struct bch_backpointer *, unsigned);
struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *, struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
struct bpos, u64, struct bch_backpointer); struct bpos, u64, struct bch_backpointer);
struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *, struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *,
......
...@@ -841,7 +841,8 @@ static int ec_stripe_update_extent(struct btree_trans *trans, ...@@ -841,7 +841,8 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
struct bkey_i *n; struct bkey_i *n;
int ret, dev, block; int ret, dev, block;
ret = bch2_get_next_backpointer(trans, bucket, gen, bp_offset, &bp); ret = bch2_get_next_backpointer(trans, bucket, gen,
bp_offset, &bp, BTREE_ITER_CACHED);
if (ret) if (ret)
return ret; return ret;
if (*bp_offset == U64_MAX) if (*bp_offset == U64_MAX)
......
...@@ -608,7 +608,8 @@ static int verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket ...@@ -608,7 +608,8 @@ static int verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket
bch2_trans_begin(trans); bch2_trans_begin(trans);
ret = bch2_get_next_backpointer(trans, bucket, gen, ret = bch2_get_next_backpointer(trans, bucket, gen,
&bp_offset, &bp); &bp_offset, &bp,
BTREE_ITER_CACHED);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue; continue;
if (ret) if (ret)
...@@ -681,7 +682,8 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, ...@@ -681,7 +682,8 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt,
bch2_trans_begin(&trans); bch2_trans_begin(&trans);
ret = bch2_get_next_backpointer(&trans, bucket, gen, ret = bch2_get_next_backpointer(&trans, bucket, gen,
&bp_offset, &bp); &bp_offset, &bp,
BTREE_ITER_CACHED);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue; continue;
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment