Commit 2f528663 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: moving_context->stats is allowed to be NULL

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e84face6
......@@ -273,7 +273,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
}
continue;
nomatch:
if (m->ctxt) {
if (m->ctxt && m->ctxt->stats) {
BUG_ON(k.k->p.offset <= iter.pos.offset);
atomic64_inc(&m->ctxt->stats->keys_raced);
atomic64_add(k.k->p.offset - iter.pos.offset,
......
......@@ -303,12 +303,6 @@ static int bch2_move_extent(struct btree_trans *trans,
if (ret && ret != -BCH_ERR_unwritten_extent_update)
goto err_free_pages;
io->write.ctxt = ctxt;
io->write.op.end_io = move_write_done;
atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
if (ret == -BCH_ERR_unwritten_extent_update) {
bch2_update_unwritten_extent(trans, &io->write);
move_free(io);
......@@ -317,6 +311,14 @@ static int bch2_move_extent(struct btree_trans *trans,
BUG_ON(ret);
io->write.ctxt = ctxt;
io->write.op.end_io = move_write_done;
if (ctxt->stats) {
atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
}
this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
trace_move_extent_read(k.k);
......@@ -468,9 +470,11 @@ static int __bch2_move_data(struct moving_context *ctxt,
bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
if (ctxt->stats) {
ctxt->stats->data_type = BCH_DATA_user;
ctxt->stats->btree_id = btree_id;
ctxt->stats->pos = start;
}
bch2_trans_iter_init(&trans, &iter, btree_id, start,
BTREE_ITER_PREFETCH|
......@@ -495,6 +499,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
if (bkey_ge(bkey_start_pos(k.k), end))
break;
if (ctxt->stats)
ctxt->stats->pos = iter.pos;
if (!bkey_extent_is_direct_data(k.k))
......@@ -535,6 +540,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, k.k->size);
next:
if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
next_nondata:
bch2_btree_iter_advance(&iter);
......@@ -759,6 +765,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, k.k->size);
if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
} else {
struct btree *b;
......@@ -786,9 +793,11 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate,
c->opts.btree_node_size >> 9);
if (ctxt->stats) {
atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
}
}
next:
bp_offset++;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment