Commit a14a68b7 authored by Dongsheng Yang's avatar Dongsheng Yang Committed by Jens Axboe

bcache: allow allocator to invalidate bucket in gc

Currently, if the gc is running, when the allocator found free_inc
is empty, allocator has to wait the gc finish. Before that, the
IO is blocked.

But actually, there would be some buckets is reclaimable before gc,
and gc will never mark this kind of bucket to be unreclaimable.

So we can put these buckets into free_inc in gc running to avoid
IO being blocked.
Signed-off-by: default avatarDongsheng Yang <dongsheng.yang@easystack.cn>
Signed-off-by: default avatarMingzhe Zou <mingzhe.zou@easystack.cn>
Signed-off-by: default avatarColy Li <colyli@suse.de>
Link: https://lore.kernel.org/r/20240528120914.28705-2-colyli@suse.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e993db2d
...@@ -129,12 +129,9 @@ static inline bool can_inc_bucket_gen(struct bucket *b) ...@@ -129,12 +129,9 @@ static inline bool can_inc_bucket_gen(struct bucket *b)
bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
{ {
BUG_ON(!ca->set->gc_mark_valid); return (ca->set->gc_mark_valid || b->reclaimable_in_gc) &&
((!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
return (!GC_MARK(b) || !atomic_read(&b->pin) && can_inc_bucket_gen(b));
GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
!atomic_read(&b->pin) &&
can_inc_bucket_gen(b);
} }
void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
...@@ -148,6 +145,7 @@ void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) ...@@ -148,6 +145,7 @@ void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
bch_inc_gen(ca, b); bch_inc_gen(ca, b);
b->prio = INITIAL_PRIO; b->prio = INITIAL_PRIO;
atomic_inc(&b->pin); atomic_inc(&b->pin);
b->reclaimable_in_gc = 0;
} }
static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
...@@ -352,8 +350,7 @@ static int bch_allocator_thread(void *arg) ...@@ -352,8 +350,7 @@ static int bch_allocator_thread(void *arg)
*/ */
retry_invalidate: retry_invalidate:
allocator_wait(ca, ca->set->gc_mark_valid && allocator_wait(ca, !ca->invalidate_needs_gc);
!ca->invalidate_needs_gc);
invalidate_buckets(ca); invalidate_buckets(ca);
/* /*
......
...@@ -200,6 +200,7 @@ struct bucket { ...@@ -200,6 +200,7 @@ struct bucket {
uint8_t gen; uint8_t gen;
uint8_t last_gc; /* Most out of date gen in the btree */ uint8_t last_gc; /* Most out of date gen in the btree */
uint16_t gc_mark; /* Bitfield used by GC. See below for field */ uint16_t gc_mark; /* Bitfield used by GC. See below for field */
uint16_t reclaimable_in_gc:1;
}; };
/* /*
......
...@@ -1741,18 +1741,20 @@ static void btree_gc_start(struct cache_set *c) ...@@ -1741,18 +1741,20 @@ static void btree_gc_start(struct cache_set *c)
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
c->gc_mark_valid = 0;
c->gc_done = ZERO_KEY; c->gc_done = ZERO_KEY;
ca = c->cache; ca = c->cache;
for_each_bucket(b, ca) { for_each_bucket(b, ca) {
b->last_gc = b->gen; b->last_gc = b->gen;
if (bch_can_invalidate_bucket(ca, b))
b->reclaimable_in_gc = 1;
if (!atomic_read(&b->pin)) { if (!atomic_read(&b->pin)) {
SET_GC_MARK(b, 0); SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0); SET_GC_SECTORS_USED(b, 0);
} }
} }
c->gc_mark_valid = 0;
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
} }
...@@ -1809,6 +1811,9 @@ static void bch_btree_gc_finish(struct cache_set *c) ...@@ -1809,6 +1811,9 @@ static void bch_btree_gc_finish(struct cache_set *c)
for_each_bucket(b, ca) { for_each_bucket(b, ca) {
c->need_gc = max(c->need_gc, bucket_gc_gen(b)); c->need_gc = max(c->need_gc, bucket_gc_gen(b));
if (b->reclaimable_in_gc)
b->reclaimable_in_gc = 0;
if (atomic_read(&b->pin)) if (atomic_read(&b->pin))
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment