Commit 5c25c4fc authored by Tang Junhui's avatar Tang Junhui Committed by Jens Axboe

bcache: finish incremental GC

In GC thread, we record the latest GC key in gc_done, which is expected
to be used for incremental GC, but in currently code, we didn't realize
it. When GC runs, front side IO would be blocked until the GC over, it
would be a long time if there is a lot of btree nodes.

This patch realizes incremental GC, the main ideal is that, when there
are front side I/Os, after GC some nodes (100), we stop GC, release locker
of the btree node, and go to process the front side I/Os for some times
(100 ms), then go back to GC again.

By this patch, when we doing GC, I/Os are not blocked all the time, and
there is no obvious I/Os zero jump problem any more.

Patch v2: Rename some variables and macros name as Coly suggested.
Signed-off-by: default avatarTang Junhui <tang.junhui@zte.com.cn>
Signed-off-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 99a27d59
...@@ -474,6 +474,7 @@ struct cache { ...@@ -474,6 +474,7 @@ struct cache {
struct gc_stat { struct gc_stat {
size_t nodes; size_t nodes;
size_t nodes_pre;
size_t key_bytes; size_t key_bytes;
size_t nkeys; size_t nkeys;
...@@ -603,6 +604,10 @@ struct cache_set { ...@@ -603,6 +604,10 @@ struct cache_set {
* rescale; when it hits 0 we rescale all the bucket priorities. * rescale; when it hits 0 we rescale all the bucket priorities.
*/ */
atomic_t rescale; atomic_t rescale;
/*
* used for GC, identify if any front side I/Os is inflight
*/
atomic_t search_inflight;
/* /*
* When we invalidate buckets, we use both the priority and the amount * When we invalidate buckets, we use both the priority and the amount
* of good data to determine which buckets to reuse first - to weight * of good data to determine which buckets to reuse first - to weight
......
...@@ -90,6 +90,8 @@ ...@@ -90,6 +90,8 @@
#define MAX_NEED_GC 64 #define MAX_NEED_GC 64
#define MAX_SAVE_PRIO 72 #define MAX_SAVE_PRIO 72
#define MIN_GC_NODES 100
#define GC_SLEEP_MS 100
#define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
...@@ -1585,6 +1587,13 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, ...@@ -1585,6 +1587,13 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
r->b = NULL; r->b = NULL;
if (atomic_read(&b->c->search_inflight) &&
gc->nodes >= gc->nodes_pre + MIN_GC_NODES) {
gc->nodes_pre = gc->nodes;
ret = -EAGAIN;
break;
}
if (need_resched()) { if (need_resched()) {
ret = -EAGAIN; ret = -EAGAIN;
break; break;
...@@ -1753,7 +1762,10 @@ static void bch_btree_gc(struct cache_set *c) ...@@ -1753,7 +1762,10 @@ static void bch_btree_gc(struct cache_set *c)
closure_sync(&writes); closure_sync(&writes);
cond_resched(); cond_resched();
if (ret && ret != -EAGAIN) if (ret == -EAGAIN)
schedule_timeout_interruptible(msecs_to_jiffies
(GC_SLEEP_MS));
else if (ret)
pr_warn("gc failed!"); pr_warn("gc failed!");
} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
......
...@@ -701,6 +701,8 @@ static void search_free(struct closure *cl) ...@@ -701,6 +701,8 @@ static void search_free(struct closure *cl)
{ {
struct search *s = container_of(cl, struct search, cl); struct search *s = container_of(cl, struct search, cl);
atomic_dec(&s->d->c->search_inflight);
if (s->iop.bio) if (s->iop.bio)
bio_put(s->iop.bio); bio_put(s->iop.bio);
...@@ -718,6 +720,7 @@ static inline struct search *search_alloc(struct bio *bio, ...@@ -718,6 +720,7 @@ static inline struct search *search_alloc(struct bio *bio,
closure_init(&s->cl, NULL); closure_init(&s->cl, NULL);
do_bio_hook(s, bio, request_endio); do_bio_hook(s, bio, request_endio);
atomic_inc(&d->c->search_inflight);
s->orig_bio = bio; s->orig_bio = bio;
s->cache_miss = NULL; s->cache_miss = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment