Commit 9ace606e authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Don't block on reclaim_lock from journal_res_get

When we're doing btree updates from journal flush, this becomes a
locking inversion
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 03d5eaed
...@@ -390,7 +390,10 @@ static int __journal_res_get(struct journal *j, struct journal_res *res, ...@@ -390,7 +390,10 @@ static int __journal_res_get(struct journal *j, struct journal_res *res,
goto retry; goto retry;
} }
bch2_journal_reclaim_work(&j->reclaim_work.work); if (mutex_trylock(&j->reclaim_lock)) {
bch2_journal_reclaim(j);
mutex_unlock(&j->reclaim_lock);
}
} }
ret = -EAGAIN; ret = -EAGAIN;
......
...@@ -433,7 +433,7 @@ static void journal_flush_pins(struct journal *j, u64 seq_to_flush, ...@@ -433,7 +433,7 @@ static void journal_flush_pins(struct journal *j, u64 seq_to_flush,
} }
/** /**
* bch2_journal_reclaim_work - free up journal buckets * bch2_journal_reclaim - free up journal buckets
* *
* Background journal reclaim writes out btree nodes. It should be run * Background journal reclaim writes out btree nodes. It should be run
* early enough so that we never completely run out of journal buckets. * early enough so that we never completely run out of journal buckets.
...@@ -450,18 +450,17 @@ static void journal_flush_pins(struct journal *j, u64 seq_to_flush, ...@@ -450,18 +450,17 @@ static void journal_flush_pins(struct journal *j, u64 seq_to_flush,
* 512 journal entries or 25% of all journal buckets, then * 512 journal entries or 25% of all journal buckets, then
* journal_next_bucket() should not stall. * journal_next_bucket() should not stall.
*/ */
void bch2_journal_reclaim_work(struct work_struct *work) void bch2_journal_reclaim(struct journal *j)
{ {
struct bch_fs *c = container_of(to_delayed_work(work), struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_fs, journal.reclaim_work);
struct journal *j = &c->journal;
struct bch_dev *ca; struct bch_dev *ca;
unsigned iter, bucket_to_flush, min_nr = 0; unsigned iter, bucket_to_flush, min_nr = 0;
u64 seq_to_flush = 0; u64 seq_to_flush = 0;
lockdep_assert_held(&j->reclaim_lock);
bch2_journal_do_discards(j); bch2_journal_do_discards(j);
mutex_lock(&j->reclaim_lock);
spin_lock(&j->lock); spin_lock(&j->lock);
for_each_rw_member(ca, c, iter) { for_each_rw_member(ca, c, iter) {
...@@ -493,13 +492,21 @@ void bch2_journal_reclaim_work(struct work_struct *work) ...@@ -493,13 +492,21 @@ void bch2_journal_reclaim_work(struct work_struct *work)
journal_flush_pins(j, seq_to_flush, min_nr); journal_flush_pins(j, seq_to_flush, min_nr);
mutex_unlock(&j->reclaim_lock);
if (!test_bit(BCH_FS_RO, &c->flags)) if (!test_bit(BCH_FS_RO, &c->flags))
queue_delayed_work(c->journal_reclaim_wq, &j->reclaim_work, queue_delayed_work(c->journal_reclaim_wq, &j->reclaim_work,
msecs_to_jiffies(j->reclaim_delay_ms)); msecs_to_jiffies(j->reclaim_delay_ms));
} }
void bch2_journal_reclaim_work(struct work_struct *work)
{
struct journal *j = container_of(to_delayed_work(work),
struct journal, reclaim_work);
mutex_lock(&j->reclaim_lock);
bch2_journal_reclaim(j);
mutex_unlock(&j->reclaim_lock);
}
static int journal_flush_done(struct journal *j, u64 seq_to_flush) static int journal_flush_done(struct journal *j, u64 seq_to_flush)
{ {
int ret; int ret;
......
...@@ -42,6 +42,7 @@ void bch2_journal_pin_add_if_older(struct journal *, ...@@ -42,6 +42,7 @@ void bch2_journal_pin_add_if_older(struct journal *,
void bch2_journal_pin_flush(struct journal *, struct journal_entry_pin *); void bch2_journal_pin_flush(struct journal *, struct journal_entry_pin *);
void bch2_journal_do_discards(struct journal *); void bch2_journal_do_discards(struct journal *);
void bch2_journal_reclaim(struct journal *);
void bch2_journal_reclaim_work(struct work_struct *); void bch2_journal_reclaim_work(struct work_struct *);
void bch2_journal_flush_pins(struct journal *, u64); void bch2_journal_flush_pins(struct journal *, u64);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment