Commit 0878ae2d authored by Jens Axboe's avatar Jens Axboe

Merge branch 'bcache-for-3.11' of git://evilpiepirate.org/~kent/linux-bcache into for-3.11/drivers

Kent writes:

Hey Jens - I've been busy torture testing and chasing bugs, here's the
fruits of my labors. These are all fairly small fixes, some of them
quite important.
parents d0e3d023 79826c35
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
#include "bcache.h" #include "bcache.h"
#include "btree.h" #include "btree.h"
#include <linux/freezer.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/random.h> #include <linux/random.h>
#include <trace/events/bcache.h> #include <trace/events/bcache.h>
...@@ -363,11 +364,10 @@ do { \ ...@@ -363,11 +364,10 @@ do { \
break; \ break; \
\ \
mutex_unlock(&(ca)->set->bucket_lock); \ mutex_unlock(&(ca)->set->bucket_lock); \
if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \ if (kthread_should_stop()) \
closure_put(&ca->set->cl); \
return 0; \ return 0; \
} \
\ \
try_to_freeze(); \
schedule(); \ schedule(); \
mutex_lock(&(ca)->set->bucket_lock); \ mutex_lock(&(ca)->set->bucket_lock); \
} \ } \
...@@ -547,14 +547,12 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, ...@@ -547,14 +547,12 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
int bch_cache_allocator_start(struct cache *ca) int bch_cache_allocator_start(struct cache *ca)
{ {
ca->alloc_thread = kthread_create(bch_allocator_thread, struct task_struct *k = kthread_run(bch_allocator_thread,
ca, "bcache_allocator"); ca, "bcache_allocator");
if (IS_ERR(ca->alloc_thread)) if (IS_ERR(k))
return PTR_ERR(ca->alloc_thread); return PTR_ERR(k);
closure_get(&ca->set->cl);
wake_up_process(ca->alloc_thread);
ca->alloc_thread = k;
return 0; return 0;
} }
......
...@@ -434,6 +434,7 @@ struct bcache_device { ...@@ -434,6 +434,7 @@ struct bcache_device {
/* If nonzero, we're detaching/unregistering from cache set */ /* If nonzero, we're detaching/unregistering from cache set */
atomic_t detaching; atomic_t detaching;
int flush_done;
uint64_t nr_stripes; uint64_t nr_stripes;
unsigned stripe_size_bits; unsigned stripe_size_bits;
...@@ -663,13 +664,9 @@ struct gc_stat { ...@@ -663,13 +664,9 @@ struct gc_stat {
* CACHE_SET_STOPPING always gets set first when we're closing down a cache set; * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
* we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
* flushing dirty data). * flushing dirty data).
*
* CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
* the allocation thread.
*/ */
#define CACHE_SET_UNREGISTERING 0 #define CACHE_SET_UNREGISTERING 0
#define CACHE_SET_STOPPING 1 #define CACHE_SET_STOPPING 1
#define CACHE_SET_STOPPING_2 2
struct cache_set { struct cache_set {
struct closure cl; struct closure cl;
......
...@@ -1410,8 +1410,10 @@ static void btree_gc_start(struct cache_set *c) ...@@ -1410,8 +1410,10 @@ static void btree_gc_start(struct cache_set *c)
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
for_each_bucket(b, ca) { for_each_bucket(b, ca) {
b->gc_gen = b->gen; b->gc_gen = b->gen;
if (!atomic_read(&b->pin)) if (!atomic_read(&b->pin)) {
SET_GC_MARK(b, GC_MARK_RECLAIMABLE); SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
SET_GC_SECTORS_USED(b, 0);
}
} }
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
......
...@@ -66,16 +66,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) ...@@ -66,16 +66,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
} else { } else {
struct closure *parent = cl->parent; struct closure *parent = cl->parent;
struct closure_waitlist *wait = closure_waitlist(cl); struct closure_waitlist *wait = closure_waitlist(cl);
closure_fn *destructor = cl->fn;
closure_debug_destroy(cl); closure_debug_destroy(cl);
smp_mb();
atomic_set(&cl->remaining, -1); atomic_set(&cl->remaining, -1);
if (wait) if (wait)
closure_wake_up(wait); closure_wake_up(wait);
if (cl->fn) if (destructor)
cl->fn(cl); destructor(cl);
if (parent) if (parent)
closure_put(parent); closure_put(parent);
......
...@@ -184,9 +184,14 @@ int bch_journal_read(struct cache_set *c, struct list_head *list, ...@@ -184,9 +184,14 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
pr_debug("starting binary search, l %u r %u", l, r); pr_debug("starting binary search, l %u r %u", l, r);
while (l + 1 < r) { while (l + 1 < r) {
seq = list_entry(list->prev, struct journal_replay,
list)->j.seq;
m = (l + r) >> 1; m = (l + r) >> 1;
read_bucket(m);
if (read_bucket(m)) if (seq != list_entry(list->prev, struct journal_replay,
list)->j.seq)
l = m; l = m;
else else
r = m; r = m;
......
...@@ -488,6 +488,12 @@ static void bch_insert_data_loop(struct closure *cl) ...@@ -488,6 +488,12 @@ static void bch_insert_data_loop(struct closure *cl)
bch_queue_gc(op->c); bch_queue_gc(op->c);
} }
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
*/
bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
do { do {
unsigned i; unsigned i;
struct bkey *k; struct bkey *k;
...@@ -710,7 +716,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d) ...@@ -710,7 +716,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
s->task = current; s->task = current;
s->orig_bio = bio; s->orig_bio = bio;
s->write = (bio->bi_rw & REQ_WRITE) != 0; s->write = (bio->bi_rw & REQ_WRITE) != 0;
s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0; s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0; s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
s->recoverable = 1; s->recoverable = 1;
s->start_time = jiffies; s->start_time = jiffies;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/genhd.h> #include <linux/genhd.h>
#include <linux/kthread.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/reboot.h> #include <linux/reboot.h>
...@@ -706,6 +707,7 @@ static void bcache_device_detach(struct bcache_device *d) ...@@ -706,6 +707,7 @@ static void bcache_device_detach(struct bcache_device *d)
atomic_set(&d->detaching, 0); atomic_set(&d->detaching, 0);
} }
if (!d->flush_done)
bcache_device_unlink(d); bcache_device_unlink(d);
d->c->devices[d->id] = NULL; d->c->devices[d->id] = NULL;
...@@ -806,6 +808,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, ...@@ -806,6 +808,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
return 0; return 0;
} }
...@@ -1053,6 +1057,14 @@ static void cached_dev_flush(struct closure *cl) ...@@ -1053,6 +1057,14 @@ static void cached_dev_flush(struct closure *cl)
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
struct bcache_device *d = &dc->disk; struct bcache_device *d = &dc->disk;
mutex_lock(&bch_register_lock);
d->flush_done = 1;
if (d->c)
bcache_device_unlink(d);
mutex_unlock(&bch_register_lock);
bch_cache_accounting_destroy(&dc->accounting); bch_cache_accounting_destroy(&dc->accounting);
kobject_del(&d->kobj); kobject_del(&d->kobj);
...@@ -1318,11 +1330,9 @@ static void cache_set_free(struct closure *cl) ...@@ -1318,11 +1330,9 @@ static void cache_set_free(struct closure *cl)
static void cache_set_flush(struct closure *cl) static void cache_set_flush(struct closure *cl)
{ {
struct cache_set *c = container_of(cl, struct cache_set, caching); struct cache_set *c = container_of(cl, struct cache_set, caching);
struct cache *ca;
struct btree *b; struct btree *b;
unsigned i;
/* Shut down allocator threads */
set_bit(CACHE_SET_STOPPING_2, &c->flags);
wake_up_allocators(c);
bch_cache_accounting_destroy(&c->accounting); bch_cache_accounting_destroy(&c->accounting);
...@@ -1337,24 +1347,32 @@ static void cache_set_flush(struct closure *cl) ...@@ -1337,24 +1347,32 @@ static void cache_set_flush(struct closure *cl)
if (btree_node_dirty(b)) if (btree_node_dirty(b))
bch_btree_node_write(b, NULL); bch_btree_node_write(b, NULL);
for_each_cache(ca, c, i)
if (ca->alloc_thread)
kthread_stop(ca->alloc_thread);
closure_return(cl); closure_return(cl);
} }
static void __cache_set_unregister(struct closure *cl) static void __cache_set_unregister(struct closure *cl)
{ {
struct cache_set *c = container_of(cl, struct cache_set, caching); struct cache_set *c = container_of(cl, struct cache_set, caching);
struct cached_dev *dc, *t; struct cached_dev *dc;
size_t i; size_t i;
mutex_lock(&bch_register_lock); mutex_lock(&bch_register_lock);
if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
list_for_each_entry_safe(dc, t, &c->cached_devs, list)
bch_cached_dev_detach(dc);
for (i = 0; i < c->nr_uuids; i++) for (i = 0; i < c->nr_uuids; i++)
if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i])) if (c->devices[i]) {
if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
dc = container_of(c->devices[i],
struct cached_dev, disk);
bch_cached_dev_detach(dc);
} else {
bcache_device_stop(c->devices[i]); bcache_device_stop(c->devices[i]);
}
}
mutex_unlock(&bch_register_lock); mutex_unlock(&bch_register_lock);
......
...@@ -232,6 +232,8 @@ STORE(__cached_dev) ...@@ -232,6 +232,8 @@ STORE(__cached_dev)
bch_uuid_write(dc->disk.c); bch_uuid_write(dc->disk.c);
} }
env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
add_uevent_var(env, "DRIVER=bcache"); add_uevent_var(env, "DRIVER=bcache");
add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
add_uevent_var(env, "CACHED_LABEL=%s", buf); add_uevent_var(env, "CACHED_LABEL=%s", buf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment