Commit 7a671d8e authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: option to automatically run gc thread after writeback

The option gc_after_writeback is disabled by default, because garbage
collection will discard SSD data which drops cached data.

Echo 1 into /sys/fs/bcache/<UUID>/internal/gc_after_writeback will
enable this option, which wakes up gc thread when writeback accomplished
and all cached data is clean.

This option is helpful for people who cares writing performance more. In
heavy writing workload, all cached data can be clean only happens when
writeback thread cleans all cached data in I/O idle time. In such
situation a following gc running may help to shrink bcache B+ tree and
discard more clean data, which may be helpful for future writing
requests.

If you are not sure whether this is helpful for your own workload,
please leave it as disabled by default.
Signed-off-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent cb07ad63
...@@ -626,6 +626,20 @@ struct cache_set { ...@@ -626,6 +626,20 @@ struct cache_set {
/* Where in the btree gc currently is */ /* Where in the btree gc currently is */
struct bkey gc_done; struct bkey gc_done;
/*
* For automatical garbage collection after writeback completed, this
* varialbe is used as bit fields,
* - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
* - 0000 0010b (BCH_DO_AUTO_GC): do gc after writeback
* This is an optimization for following write request after writeback
* finished, but read hit rate dropped due to clean data on cache is
* discarded. Unless user explicitly sets it via sysfs, it won't be
* enabled.
*/
#define BCH_ENABLE_AUTO_GC 1
#define BCH_DO_AUTO_GC 2
uint8_t gc_after_writeback;
/* /*
* The allocation code needs gc_mark in struct bucket to be correct, but * The allocation code needs gc_mark in struct bucket to be correct, but
* it's not while a gc is in progress. Protected by bucket_lock. * it's not while a gc is in progress. Protected by bucket_lock.
......
...@@ -128,6 +128,7 @@ rw_attribute(expensive_debug_checks); ...@@ -128,6 +128,7 @@ rw_attribute(expensive_debug_checks);
rw_attribute(cache_replacement_policy); rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled); rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled); rw_attribute(copy_gc_enabled);
rw_attribute(gc_after_writeback);
rw_attribute(size); rw_attribute(size);
static ssize_t bch_snprint_string_list(char *buf, static ssize_t bch_snprint_string_list(char *buf,
...@@ -693,6 +694,7 @@ SHOW(__bch_cache_set) ...@@ -693,6 +694,7 @@ SHOW(__bch_cache_set)
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
sysfs_printf(io_disable, "%i", sysfs_printf(io_disable, "%i",
test_bit(CACHE_SET_IO_DISABLE, &c->flags)); test_bit(CACHE_SET_IO_DISABLE, &c->flags));
...@@ -793,6 +795,12 @@ STORE(__bch_cache_set) ...@@ -793,6 +795,12 @@ STORE(__bch_cache_set)
sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
/*
* write gc_after_writeback here may overwrite an already set
* BCH_DO_AUTO_GC, it doesn't matter because this flag will be
* set in next chance.
*/
sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
return size; return size;
} }
...@@ -873,6 +881,7 @@ static struct attribute *bch_cache_set_internal_files[] = { ...@@ -873,6 +881,7 @@ static struct attribute *bch_cache_set_internal_files[] = {
&sysfs_gc_always_rewrite, &sysfs_gc_always_rewrite,
&sysfs_btree_shrinker_disabled, &sysfs_btree_shrinker_disabled,
&sysfs_copy_gc_enabled, &sysfs_copy_gc_enabled,
&sysfs_gc_after_writeback,
&sysfs_io_disable, &sysfs_io_disable,
NULL NULL
}; };
......
...@@ -17,6 +17,15 @@ ...@@ -17,6 +17,15 @@
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#include <trace/events/bcache.h> #include <trace/events/bcache.h>
static void update_gc_after_writeback(struct cache_set *c)
{
if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
return;
c->gc_after_writeback |= BCH_DO_AUTO_GC;
}
/* Rate limiting */ /* Rate limiting */
static uint64_t __calc_target_rate(struct cached_dev *dc) static uint64_t __calc_target_rate(struct cached_dev *dc)
{ {
...@@ -191,6 +200,7 @@ static void update_writeback_rate(struct work_struct *work) ...@@ -191,6 +200,7 @@ static void update_writeback_rate(struct work_struct *work)
if (!set_at_max_writeback_rate(c, dc)) { if (!set_at_max_writeback_rate(c, dc)) {
down_read(&dc->writeback_lock); down_read(&dc->writeback_lock);
__update_writeback_rate(dc); __update_writeback_rate(dc);
update_gc_after_writeback(c);
up_read(&dc->writeback_lock); up_read(&dc->writeback_lock);
} }
} }
...@@ -689,6 +699,23 @@ static int bch_writeback_thread(void *arg) ...@@ -689,6 +699,23 @@ static int bch_writeback_thread(void *arg)
up_write(&dc->writeback_lock); up_write(&dc->writeback_lock);
break; break;
} }
/*
* When dirty data rate is high (e.g. 50%+), there might
* be heavy buckets fragmentation after writeback
* finished, which hurts following write performance.
* If users really care about write performance they
* may set BCH_ENABLE_AUTO_GC via sysfs, then when
* BCH_DO_AUTO_GC is set, garbage collection thread
* will be wake up here. After moving gc, the shrunk
* btree and discarded free buckets SSD space may be
* helpful for following write requests.
*/
if (c->gc_after_writeback ==
(BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
force_wake_up_gc(c);
}
} }
up_write(&dc->writeback_lock); up_write(&dc->writeback_lock);
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#define WRITEBACK_RATE_UPDATE_SECS_MAX 60 #define WRITEBACK_RATE_UPDATE_SECS_MAX 60
#define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5 #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
#define BCH_AUTO_GC_DIRTY_THRESHOLD 50
/* /*
* 14 (16384ths) is chosen here as something that each backing device * 14 (16384ths) is chosen here as something that each backing device
* should be a reasonable fraction of the share, and not to blow up * should be a reasonable fraction of the share, and not to blow up
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment