Commit 2f528663 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: moving_context->stats is allowed to be NULL

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e84face6
...@@ -273,7 +273,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, ...@@ -273,7 +273,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
} }
continue; continue;
nomatch: nomatch:
if (m->ctxt) { if (m->ctxt && m->ctxt->stats) {
BUG_ON(k.k->p.offset <= iter.pos.offset); BUG_ON(k.k->p.offset <= iter.pos.offset);
atomic64_inc(&m->ctxt->stats->keys_raced); atomic64_inc(&m->ctxt->stats->keys_raced);
atomic64_add(k.k->p.offset - iter.pos.offset, atomic64_add(k.k->p.offset - iter.pos.offset,
......
...@@ -303,12 +303,6 @@ static int bch2_move_extent(struct btree_trans *trans, ...@@ -303,12 +303,6 @@ static int bch2_move_extent(struct btree_trans *trans,
if (ret && ret != -BCH_ERR_unwritten_extent_update) if (ret && ret != -BCH_ERR_unwritten_extent_update)
goto err_free_pages; goto err_free_pages;
io->write.ctxt = ctxt;
io->write.op.end_io = move_write_done;
atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
if (ret == -BCH_ERR_unwritten_extent_update) { if (ret == -BCH_ERR_unwritten_extent_update) {
bch2_update_unwritten_extent(trans, &io->write); bch2_update_unwritten_extent(trans, &io->write);
move_free(io); move_free(io);
...@@ -317,6 +311,14 @@ static int bch2_move_extent(struct btree_trans *trans, ...@@ -317,6 +311,14 @@ static int bch2_move_extent(struct btree_trans *trans,
BUG_ON(ret); BUG_ON(ret);
io->write.ctxt = ctxt;
io->write.op.end_io = move_write_done;
if (ctxt->stats) {
atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
}
this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size); this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
trace_move_extent_read(k.k); trace_move_extent_read(k.k);
...@@ -468,9 +470,11 @@ static int __bch2_move_data(struct moving_context *ctxt, ...@@ -468,9 +470,11 @@ static int __bch2_move_data(struct moving_context *ctxt,
bch2_bkey_buf_init(&sk); bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
ctxt->stats->data_type = BCH_DATA_user; if (ctxt->stats) {
ctxt->stats->btree_id = btree_id; ctxt->stats->data_type = BCH_DATA_user;
ctxt->stats->pos = start; ctxt->stats->btree_id = btree_id;
ctxt->stats->pos = start;
}
bch2_trans_iter_init(&trans, &iter, btree_id, start, bch2_trans_iter_init(&trans, &iter, btree_id, start,
BTREE_ITER_PREFETCH| BTREE_ITER_PREFETCH|
...@@ -495,7 +499,8 @@ static int __bch2_move_data(struct moving_context *ctxt, ...@@ -495,7 +499,8 @@ static int __bch2_move_data(struct moving_context *ctxt,
if (bkey_ge(bkey_start_pos(k.k), end)) if (bkey_ge(bkey_start_pos(k.k), end))
break; break;
ctxt->stats->pos = iter.pos; if (ctxt->stats)
ctxt->stats->pos = iter.pos;
if (!bkey_extent_is_direct_data(k.k)) if (!bkey_extent_is_direct_data(k.k))
goto next_nondata; goto next_nondata;
...@@ -535,7 +540,8 @@ static int __bch2_move_data(struct moving_context *ctxt, ...@@ -535,7 +540,8 @@ static int __bch2_move_data(struct moving_context *ctxt,
if (ctxt->rate) if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, k.k->size); bch2_ratelimit_increment(ctxt->rate, k.k->size);
next: next:
atomic64_add(k.k->size, &ctxt->stats->sectors_seen); if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
next_nondata: next_nondata:
bch2_btree_iter_advance(&iter); bch2_btree_iter_advance(&iter);
} }
...@@ -759,7 +765,8 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, ...@@ -759,7 +765,8 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
if (ctxt->rate) if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, k.k->size); bch2_ratelimit_increment(ctxt->rate, k.k->size);
atomic64_add(k.k->size, &ctxt->stats->sectors_seen); if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
} else { } else {
struct btree *b; struct btree *b;
...@@ -786,8 +793,10 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, ...@@ -786,8 +793,10 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
if (ctxt->rate) if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, bch2_ratelimit_increment(ctxt->rate,
c->opts.btree_node_size >> 9); c->opts.btree_node_size >> 9);
atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); if (ctxt->stats) {
atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
}
} }
next: next:
bp_offset++; bp_offset++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment