Commit 161f73c2 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Split out btree_write_submit_wq

Split the workqueues for btree read completions and btree write
submissions; we don't want concurrency control on btree read
completions, but we do want concurrency control on write submissions,
else blocking in submit_bio() will cause a ton of kworkers to be
allocated.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 83a7eefe
...@@ -790,7 +790,8 @@ struct bch_fs { ...@@ -790,7 +790,8 @@ struct bch_fs {
/* BTREE CACHE */ /* BTREE CACHE */
struct bio_set btree_bio; struct bio_set btree_bio;
struct workqueue_struct *io_complete_wq; struct workqueue_struct *btree_read_complete_wq;
struct workqueue_struct *btree_write_submit_wq;
struct btree_root btree_roots_known[BTREE_ID_NR]; struct btree_root btree_roots_known[BTREE_ID_NR];
DARRAY(struct btree_root) btree_roots_extra; DARRAY(struct btree_root) btree_roots_extra;
......
...@@ -1389,7 +1389,7 @@ static void btree_node_read_endio(struct bio *bio) ...@@ -1389,7 +1389,7 @@ static void btree_node_read_endio(struct bio *bio)
bch2_latency_acct(ca, rb->start_time, READ); bch2_latency_acct(ca, rb->start_time, READ);
} }
queue_work(c->io_complete_wq, &rb->work); queue_work(c->btree_read_complete_wq, &rb->work);
} }
struct btree_node_read_all { struct btree_node_read_all {
...@@ -1656,7 +1656,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool ...@@ -1656,7 +1656,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
btree_node_read_all_replicas_done(&ra->cl.work); btree_node_read_all_replicas_done(&ra->cl.work);
} else { } else {
continue_at(&ra->cl, btree_node_read_all_replicas_done, continue_at(&ra->cl, btree_node_read_all_replicas_done,
c->io_complete_wq); c->btree_read_complete_wq);
} }
return 0; return 0;
...@@ -1737,7 +1737,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, ...@@ -1737,7 +1737,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
if (sync) if (sync)
btree_node_read_work(&rb->work); btree_node_read_work(&rb->work);
else else
queue_work(c->io_complete_wq, &rb->work); queue_work(c->btree_read_complete_wq, &rb->work);
} }
} }
...@@ -2229,7 +2229,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) ...@@ -2229,7 +2229,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes); atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
INIT_WORK(&wbio->work, btree_write_submit); INIT_WORK(&wbio->work, btree_write_submit);
queue_work(c->io_complete_wq, &wbio->work); queue_work(c->btree_write_submit_wq, &wbio->work);
return; return;
err: err:
set_btree_node_noevict(b); set_btree_node_noevict(b);
......
...@@ -582,8 +582,10 @@ static void __bch2_fs_free(struct bch_fs *c) ...@@ -582,8 +582,10 @@ static void __bch2_fs_free(struct bch_fs *c)
if (c->write_ref_wq) if (c->write_ref_wq)
destroy_workqueue(c->write_ref_wq); destroy_workqueue(c->write_ref_wq);
if (c->io_complete_wq) if (c->btree_write_submit_wq)
destroy_workqueue(c->io_complete_wq); destroy_workqueue(c->btree_write_submit_wq);
if (c->btree_read_complete_wq)
destroy_workqueue(c->btree_read_complete_wq);
if (c->copygc_wq) if (c->copygc_wq)
destroy_workqueue(c->copygc_wq); destroy_workqueue(c->copygc_wq);
if (c->btree_io_complete_wq) if (c->btree_io_complete_wq)
...@@ -878,8 +880,10 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) ...@@ -878,8 +880,10 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) || WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
!(c->copygc_wq = alloc_workqueue("bcachefs_copygc", !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) || WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
!(c->io_complete_wq = alloc_workqueue("bcachefs_io", !(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete",
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) || WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) ||
!(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit",
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
!(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref", !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
WQ_FREEZABLE, 0)) || WQ_FREEZABLE, 0)) ||
#ifndef BCH_WRITE_REF_DEBUG #ifndef BCH_WRITE_REF_DEBUG
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment