Commit 5e6926da authored by Kent Overstreet's avatar Kent Overstreet

bcache: Convert writeback to a kthread

This simplifies the writeback flow control quite a bit - previously, it
was conceptually two coroutines, refill_dirty() and read_dirty(). This
makes the code quite a bit more straightforward.
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent 72a44517
......@@ -509,7 +509,7 @@ struct cached_dev {
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
struct closure_with_timer writeback;
struct task_struct *writeback_thread;
struct keybuf writeback_keys;
......@@ -1038,7 +1038,11 @@ static inline void bkey_init(struct bkey *k)
#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
#define MAX_KEY_INODE (~(~0 << 20))
#define MAX_KEY_OFFSET (((uint64_t) ~0) >> 1)
#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
#define ZERO_KEY KEY(0, 0, 0)
/*
......@@ -1214,8 +1218,6 @@ int bch_cache_allocator_init(struct cache *ca);
void bch_debug_exit(void);
int bch_debug_init(struct kobject *);
void bch_writeback_exit(void);
int bch_writeback_init(void);
void bch_request_exit(void);
int bch_request_init(void);
void bch_btree_exit(void);
......
......@@ -1029,6 +1029,7 @@ static void cached_dev_free(struct closure *cl)
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
cancel_delayed_work_sync(&dc->writeback_rate_update);
kthread_stop(dc->writeback_thread);
mutex_lock(&bch_register_lock);
......@@ -2006,7 +2007,6 @@ static struct notifier_block reboot = {
static void bcache_exit(void)
{
bch_debug_exit();
bch_writeback_exit();
bch_request_exit();
bch_btree_exit();
if (bcache_kobj)
......@@ -2039,7 +2039,6 @@ static int __init bcache_init(void)
sysfs_create_files(bcache_kobj, files) ||
bch_btree_init() ||
bch_request_init() ||
bch_writeback_init() ||
bch_debug_init(bcache_kobj))
goto err;
......
This diff is collapsed.
......@@ -56,11 +56,30 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
in_use <= CUTOFF_WRITEBACK;
}
static inline void bch_writeback_queue(struct cached_dev *dc)
{
wake_up_process(dc->writeback_thread);
}
static inline void bch_writeback_add(struct cached_dev *dc)
{
if (!atomic_read(&dc->has_dirty) &&
!atomic_xchg(&dc->has_dirty, 1)) {
atomic_inc(&dc->count);
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
/* XXX: should do this synchronously */
bch_write_bdev_super(dc, NULL);
}
bch_writeback_queue(dc);
}
}
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
void bch_writeback_queue(struct cached_dev *);
void bch_writeback_add(struct cached_dev *);
void bch_sectors_dirty_init(struct cached_dev *dc);
void bch_cached_dev_writeback_init(struct cached_dev *);
int bch_cached_dev_writeback_init(struct cached_dev *);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment