Commit 4dc34ae1 authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: improve multithreaded bch_sectors_dirty_init()

Commit b144e45f ("bcache: make bch_sectors_dirty_init() to be
multithreaded") makes bch_sectors_dirty_init() to be much faster
when counting dirty sectors by iterating all dirty keys in the btree.
But it isn't in ideal shape yet, still can be improved.

This patch does the following changes to improve current parallel dirty
keys iteration on the btree,
- Add read lock to root node when multiple threads iterating the btree,
  to prevent the root node gets split by I/Os from other registered
  bcache devices.
- Remove local variable "char name[32]" and generate kernel thread name
  string directly when calling kthread_run().
- Allocate "struct bch_dirty_init_state state" directly on stack and
  avoid the unnecessary dynamic memory allocation for it.
- Decrease BCH_DIRTY_INIT_THRD_MAX from 64 to 12 which is enough indeed.
- Increase &state->started to count created kernel thread after it
  succeeds to create.
- When wait for all dirty key counting threads to finish, use
  wait_event() to replace wait_event_interruptible().

With the above changes, the code is more clear, and some potential error
conditions are avoided.

Fixes: b144e45f ("bcache: make bch_sectors_dirty_init() to be multithreaded")
Signed-off-by: default avatarColy Li <colyli@suse.de>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20220524102336.10684-3-colyli@suse.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 62253644
...@@ -948,10 +948,10 @@ void bch_sectors_dirty_init(struct bcache_device *d) ...@@ -948,10 +948,10 @@ void bch_sectors_dirty_init(struct bcache_device *d)
struct btree_iter iter; struct btree_iter iter;
struct sectors_dirty_init op; struct sectors_dirty_init op;
struct cache_set *c = d->c; struct cache_set *c = d->c;
struct bch_dirty_init_state *state; struct bch_dirty_init_state state;
char name[32];
/* Just count root keys if no leaf node */ /* Just count root keys if no leaf node */
rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) { if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1); bch_btree_op_init(&op.op, -1);
op.inode = d->id; op.inode = d->id;
...@@ -961,54 +961,42 @@ void bch_sectors_dirty_init(struct bcache_device *d) ...@@ -961,54 +961,42 @@ void bch_sectors_dirty_init(struct bcache_device *d)
for_each_key_filter(&c->root->keys, for_each_key_filter(&c->root->keys,
k, &iter, bch_ptr_invalid) k, &iter, bch_ptr_invalid)
sectors_dirty_init_fn(&op.op, c->root, k); sectors_dirty_init_fn(&op.op, c->root, k);
rw_unlock(0, c->root);
return; return;
} }
state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL); state.c = c;
if (!state) { state.d = d;
pr_warn("sectors dirty init failed: cannot allocate memory\n"); state.total_threads = bch_btre_dirty_init_thread_nr();
return; state.key_idx = 0;
} spin_lock_init(&state.idx_lock);
atomic_set(&state.started, 0);
state->c = c; atomic_set(&state.enough, 0);
state->d = d; init_waitqueue_head(&state.wait);
state->total_threads = bch_btre_dirty_init_thread_nr();
state->key_idx = 0; for (i = 0; i < state.total_threads; i++) {
spin_lock_init(&state->idx_lock); /* Fetch latest state.enough earlier */
atomic_set(&state->started, 0);
atomic_set(&state->enough, 0);
init_waitqueue_head(&state->wait);
for (i = 0; i < state->total_threads; i++) {
/* Fetch latest state->enough earlier */
smp_mb__before_atomic(); smp_mb__before_atomic();
if (atomic_read(&state->enough)) if (atomic_read(&state.enough))
break; break;
state->infos[i].state = state; state.infos[i].state = &state;
atomic_inc(&state->started); state.infos[i].thread =
snprintf(name, sizeof(name), "bch_dirty_init[%d]", i); kthread_run(bch_dirty_init_thread, &state.infos[i],
"bch_dirtcnt[%d]", i);
state->infos[i].thread = if (IS_ERR(state.infos[i].thread)) {
kthread_run(bch_dirty_init_thread,
&state->infos[i],
name);
if (IS_ERR(state->infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i); pr_err("fails to run thread bch_dirty_init[%d]\n", i);
for (--i; i >= 0; i--) for (--i; i >= 0; i--)
kthread_stop(state->infos[i].thread); kthread_stop(state.infos[i].thread);
goto out; goto out;
} }
atomic_inc(&state.started);
} }
/*
* Must wait for all threads to stop.
*/
wait_event_interruptible(state->wait,
atomic_read(&state->started) == 0);
out: out:
kfree(state); /* Must wait for all threads to stop. */
wait_event(state.wait, atomic_read(&state.started) == 0);
rw_unlock(0, c->root);
} }
void bch_cached_dev_writeback_init(struct cached_dev *dc) void bch_cached_dev_writeback_init(struct cached_dev *dc)
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57 #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64 #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
#define BCH_DIRTY_INIT_THRD_MAX 64 #define BCH_DIRTY_INIT_THRD_MAX 12
/* /*
* 14 (16384ths) is chosen here as something that each backing device * 14 (16384ths) is chosen here as something that each backing device
* should be a reasonable fraction of the share, and not to blow up * should be a reasonable fraction of the share, and not to blow up
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment