Commit 96d2e8b5 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'bcache-for-3.14' of git://evilpiepirate.org/~kent/linux-bcache into for-linus

parents 556ee818 e3b4825b
...@@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); ...@@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0 #define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1 #define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2 #define GC_MARK_METADATA 2
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); #define GC_SECTORS_USED_SIZE 13
#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h" #include "journal.h"
......
...@@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) ...@@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
for (k = i->start; k < bset_bkey_last(i); k = next) { for (k = i->start; k < bset_bkey_last(i); k = next) {
next = bkey_next(k); next = bkey_next(k);
printk(KERN_ERR "block %u key %zi/%u: ", set, printk(KERN_ERR "block %u key %li/%u: ", set,
(uint64_t *) k - i->d, i->keys); (uint64_t *) k - i->d, i->keys);
if (b->ops->key_dump) if (b->ops->key_dump)
...@@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, ...@@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
order); order);
if (!out) { if (!out) {
struct page *outp;
BUG_ON(order > state->page_order); BUG_ON(order > state->page_order);
out = page_address(mempool_alloc(state->pool, GFP_NOIO)); outp = mempool_alloc(state->pool, GFP_NOIO);
out = page_address(outp);
used_mempool = true; used_mempool = true;
order = state->page_order; order = state->page_order;
} }
......
...@@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) ...@@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
/* guard against overflow */ /* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned, SET_GC_SECTORS_USED(g, min_t(unsigned,
GC_SECTORS_USED(g) + KEY_SIZE(k), GC_SECTORS_USED(g) + KEY_SIZE(k),
(1 << 14) - 1)); MAX_GC_SECTORS_USED));
BUG_ON(!GC_SECTORS_USED(g)); BUG_ON(!GC_SECTORS_USED(g));
} }
...@@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k, ...@@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k,
static size_t insert_u64s_remaining(struct btree *b) static size_t insert_u64s_remaining(struct btree *b)
{ {
ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys); long ret = bch_btree_keys_u64s_remaining(&b->keys);
/* /*
* Might land in the middle of an existing extent and have to split it * Might land in the middle of an existing extent and have to split it
......
...@@ -194,7 +194,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) ...@@ -194,7 +194,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
mutex_unlock(&b->c->bucket_lock); mutex_unlock(&b->c->bucket_lock);
bch_extent_to_text(buf, sizeof(buf), k); bch_extent_to_text(buf, sizeof(buf), k);
btree_bug(b, btree_bug(b,
"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true; return true;
......
...@@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl) ...@@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n; struct bio *bio = op->bio, *n;
if (op->bypass)
return bch_data_invalidate(cl);
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c); set_gc_sectors(op->c);
wake_up_gc(op->c); wake_up_gc(op->c);
} }
if (op->bypass)
return bch_data_invalidate(cl);
/* /*
* Journal writes are marked REQ_FLUSH; if the original write was a * Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write. * flush, it'll wait on the journal write.
......
...@@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b) ...@@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
return MAP_CONTINUE; return MAP_CONTINUE;
} }
int bch_bset_print_stats(struct cache_set *c, char *buf) static int bch_bset_print_stats(struct cache_set *c, char *buf)
{ {
struct bset_stats_op op; struct bset_stats_op op;
int ret; int ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment