Commit 280481d0 authored by Kent Overstreet's avatar Kent Overstreet

bcache: Debug code improvements

Couple changes:
 * Consolidate bch_check_keys() and bch_check_key_order(), and move the
   checks that only check_key_order() could do to bch_btree_iter_next().

 * Get rid of CONFIG_BCACHE_EDEBUG - now, all that code is compiled in
   when CONFIG_BCACHE_DEBUG is enabled, and there's now a sysfs file to
   flip on the EDEBUG checks at runtime.

 * Dropped an old not terribly useful check in rw_unlock(), and
   refactored/improved a some of the other debug code.
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent e58ff155
...@@ -13,15 +13,8 @@ config BCACHE_DEBUG ...@@ -13,15 +13,8 @@ config BCACHE_DEBUG
---help--- ---help---
Don't select this option unless you're a developer Don't select this option unless you're a developer
Enables extra debugging tools (primarily a fuzz tester) Enables extra debugging tools, allows expensive runtime checks to be
turned on.
config BCACHE_EDEBUG
bool "Extended runtime checks"
depends on BCACHE
---help---
Don't select this option unless you're a developer
Enables extra runtime checks which significantly affect performance
config BCACHE_CLOSURES_DEBUG config BCACHE_CLOSURES_DEBUG
bool "Debug closures" bool "Debug closures"
......
...@@ -398,8 +398,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait) ...@@ -398,8 +398,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
out: out:
wake_up_process(ca->alloc_thread); wake_up_process(ca->alloc_thread);
#ifdef CONFIG_BCACHE_EDEBUG if (expensive_debug_checks(ca->set)) {
{
size_t iter; size_t iter;
long i; long i;
...@@ -413,7 +412,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait) ...@@ -413,7 +412,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
fifo_for_each(i, &ca->unused, iter) fifo_for_each(i, &ca->unused, iter)
BUG_ON(i == r); BUG_ON(i == r);
} }
#endif
b = ca->buckets + r; b = ca->buckets + r;
BUG_ON(atomic_read(&b->pin) != 1); BUG_ON(atomic_read(&b->pin) != 1);
......
...@@ -690,6 +690,7 @@ struct cache_set { ...@@ -690,6 +690,7 @@ struct cache_set {
unsigned short journal_delay_ms; unsigned short journal_delay_ms;
unsigned verify:1; unsigned verify:1;
unsigned key_merging_disabled:1; unsigned key_merging_disabled:1;
unsigned expensive_debug_checks:1;
unsigned gc_always_rewrite:1; unsigned gc_always_rewrite:1;
unsigned shrinker_disabled:1; unsigned shrinker_disabled:1;
unsigned copy_gc_enabled:1; unsigned copy_gc_enabled:1;
...@@ -698,15 +699,6 @@ struct cache_set { ...@@ -698,15 +699,6 @@ struct cache_set {
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
}; };
static inline bool key_merging_disabled(struct cache_set *c)
{
#ifdef CONFIG_BCACHE_DEBUG
return c->key_merging_disabled;
#else
return 0;
#endif
}
struct bbio { struct bbio {
unsigned submit_time_us; unsigned submit_time_us;
union { union {
......
...@@ -106,6 +106,43 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) ...@@ -106,6 +106,43 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
return true; return true;
} }
static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
unsigned ptr)
{
struct bucket *g = PTR_BUCKET(b->c, k, ptr);
char buf[80];
if (mutex_trylock(&b->c->bucket_lock)) {
if (b->level) {
if (KEY_DIRTY(k) ||
g->prio != BTREE_PRIO ||
(b->c->gc_mark_valid &&
GC_MARK(g) != GC_MARK_METADATA))
goto err;
} else {
if (g->prio == BTREE_PRIO)
goto err;
if (KEY_DIRTY(k) &&
b->c->gc_mark_valid &&
GC_MARK(g) != GC_MARK_DIRTY)
goto err;
}
mutex_unlock(&b->c->bucket_lock);
}
return false;
err:
mutex_unlock(&b->c->bucket_lock);
bch_bkey_to_text(buf, sizeof(buf), k);
btree_bug(b,
"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true;
}
bool bch_ptr_bad(struct btree *b, const struct bkey *k) bool bch_ptr_bad(struct btree *b, const struct bkey *k)
{ {
struct bucket *g; struct bucket *g;
...@@ -133,46 +170,12 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k) ...@@ -133,46 +170,12 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
if (stale) if (stale)
return true; return true;
#ifdef CONFIG_BCACHE_EDEBUG if (expensive_debug_checks(b->c) &&
if (!mutex_trylock(&b->c->bucket_lock)) ptr_bad_expensive_checks(b, k, i))
continue; return true;
if (b->level) {
if (KEY_DIRTY(k) ||
g->prio != BTREE_PRIO ||
(b->c->gc_mark_valid &&
GC_MARK(g) != GC_MARK_METADATA))
goto bug;
} else {
if (g->prio == BTREE_PRIO)
goto bug;
if (KEY_DIRTY(k) &&
b->c->gc_mark_valid &&
GC_MARK(g) != GC_MARK_DIRTY)
goto bug;
}
mutex_unlock(&b->c->bucket_lock);
#endif
} }
return false; return false;
#ifdef CONFIG_BCACHE_EDEBUG
bug:
mutex_unlock(&b->c->bucket_lock);
{
char buf[80];
bch_bkey_to_text(buf, sizeof(buf), k);
btree_bug(b,
"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
}
return true;
#endif
} }
/* Key/pointer manipulation */ /* Key/pointer manipulation */
...@@ -821,7 +824,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, ...@@ -821,7 +824,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
} else } else
i = bset_search_write_set(b, t, search); i = bset_search_write_set(b, t, search);
#ifdef CONFIG_BCACHE_EDEBUG if (expensive_debug_checks(b->c)) {
BUG_ON(bset_written(b, t) && BUG_ON(bset_written(b, t) &&
i.l != t->data->start && i.l != t->data->start &&
bkey_cmp(tree_to_prev_bkey(t, bkey_cmp(tree_to_prev_bkey(t,
...@@ -830,7 +833,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, ...@@ -830,7 +833,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
BUG_ON(i.r != end(t->data) && BUG_ON(i.r != end(t->data) &&
bkey_cmp(i.r, search) <= 0); bkey_cmp(i.r, search) <= 0);
#endif }
while (likely(i.l != i.r) && while (likely(i.l != i.r) &&
bkey_cmp(i.l, search) <= 0) bkey_cmp(i.l, search) <= 0)
...@@ -877,6 +880,10 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, ...@@ -877,6 +880,10 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
iter->size = ARRAY_SIZE(iter->data); iter->size = ARRAY_SIZE(iter->data);
iter->used = 0; iter->used = 0;
#ifdef CONFIG_BCACHE_DEBUG
iter->b = b;
#endif
for (; start <= &b->sets[b->nsets]; start++) { for (; start <= &b->sets[b->nsets]; start++) {
ret = bch_bset_search(b, start, search); ret = bch_bset_search(b, start, search);
bch_btree_iter_push(iter, ret, end(start->data)); bch_btree_iter_push(iter, ret, end(start->data));
...@@ -891,6 +898,8 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter) ...@@ -891,6 +898,8 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
struct bkey *ret = NULL; struct bkey *ret = NULL;
if (!btree_iter_end(iter)) { if (!btree_iter_end(iter)) {
bch_btree_iter_next_check(iter);
ret = iter->data->k; ret = iter->data->k;
iter->data->k = bkey_next(iter->data->k); iter->data->k = bkey_next(iter->data->k);
...@@ -1002,7 +1011,6 @@ static void btree_mergesort(struct btree *b, struct bset *out, ...@@ -1002,7 +1011,6 @@ static void btree_mergesort(struct btree *b, struct bset *out,
out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
pr_debug("sorted %i keys", out->keys); pr_debug("sorted %i keys", out->keys);
bch_check_key_order(b, out);
} }
static void __btree_sort(struct btree *b, struct btree_iter *iter, static void __btree_sort(struct btree *b, struct btree_iter *iter,
...@@ -1063,15 +1071,15 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter, ...@@ -1063,15 +1071,15 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
void bch_btree_sort_partial(struct btree *b, unsigned start) void bch_btree_sort_partial(struct btree *b, unsigned start)
{ {
size_t oldsize = 0, order = b->page_order, keys = 0; size_t order = b->page_order, keys = 0;
struct btree_iter iter; struct btree_iter iter;
int oldsize = bch_count_data(b);
__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]); __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
BUG_ON(b->sets[b->nsets].data == write_block(b) && BUG_ON(b->sets[b->nsets].data == write_block(b) &&
(b->sets[b->nsets].size || b->nsets)); (b->sets[b->nsets].size || b->nsets));
if (b->written)
oldsize = bch_count_data(b);
if (start) { if (start) {
unsigned i; unsigned i;
...@@ -1087,7 +1095,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start) ...@@ -1087,7 +1095,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start)
__btree_sort(b, &iter, start, order, false); __btree_sort(b, &iter, start, order, false);
EBUG_ON(b->written && bch_count_data(b) != oldsize); EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
} }
void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter) void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
......
...@@ -148,6 +148,9 @@ ...@@ -148,6 +148,9 @@
struct btree_iter { struct btree_iter {
size_t size, used; size_t size, used;
#ifdef CONFIG_BCACHE_DEBUG
struct btree *b;
#endif
struct btree_iter_set { struct btree_iter_set {
struct bkey *k, *end; struct bkey *k, *end;
} data[MAX_BSETS]; } data[MAX_BSETS];
......
...@@ -216,6 +216,10 @@ static void bch_btree_node_read_done(struct btree *b) ...@@ -216,6 +216,10 @@ static void bch_btree_node_read_done(struct btree *b)
iter->size = b->c->sb.bucket_size / b->c->sb.block_size; iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
iter->used = 0; iter->used = 0;
#ifdef CONFIG_BCACHE_DEBUG
iter->b = b;
#endif
if (!i->seq) if (!i->seq)
goto err; goto err;
...@@ -454,7 +458,7 @@ void bch_btree_node_write(struct btree *b, struct closure *parent) ...@@ -454,7 +458,7 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
BUG_ON(b->written >= btree_blocks(b)); BUG_ON(b->written >= btree_blocks(b));
BUG_ON(b->written && !i->keys); BUG_ON(b->written && !i->keys);
BUG_ON(b->sets->data->seq != i->seq); BUG_ON(b->sets->data->seq != i->seq);
bch_check_key_order(b, i); bch_check_keys(b, "writing");
cancel_delayed_work(&b->work); cancel_delayed_work(&b->work);
...@@ -1917,7 +1921,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, ...@@ -1917,7 +1921,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
struct bkey *replace_key) struct bkey *replace_key)
{ {
bool ret = false; bool ret = false;
unsigned oldsize = bch_count_data(b); int oldsize = bch_count_data(b);
while (!bch_keylist_empty(insert_keys)) { while (!bch_keylist_empty(insert_keys)) {
struct bset *i = write_block(b); struct bset *i = write_block(b);
......
...@@ -259,14 +259,6 @@ static inline void rw_lock(bool w, struct btree *b, int level) ...@@ -259,14 +259,6 @@ static inline void rw_lock(bool w, struct btree *b, int level)
static inline void rw_unlock(bool w, struct btree *b) static inline void rw_unlock(bool w, struct btree *b)
{ {
#ifdef CONFIG_BCACHE_EDEBUG
unsigned i;
if (w && b->key.ptr[0])
for (i = 0; i <= b->nsets; i++)
bch_check_key_order(b, b->sets[i].data);
#endif
if (w) if (w)
b->seq++; b->seq++;
(w ? up_write : up_read)(&b->lock); (w ? up_write : up_read)(&b->lock);
......
...@@ -76,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k) ...@@ -76,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
return out - buf; return out - buf;
} }
int bch_btree_to_text(char *buf, size_t size, const struct btree *b) #ifdef CONFIG_BCACHE_DEBUG
{
return scnprintf(buf, size, "%zu level %i/%i",
PTR_BUCKET_NR(b->c, &b->key, 0),
b->level, b->c->root ? b->c->root->level : -1);
}
#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
static bool skipped_backwards(struct btree *b, struct bkey *k)
{
return bkey_cmp(k, (!b->level)
? &START_KEY(bkey_next(k))
: bkey_next(k)) > 0;
}
static void dump_bset(struct btree *b, struct bset *i) static void dump_bset(struct btree *b, struct bset *i)
{ {
struct bkey *k; struct bkey *k, *next;
unsigned j; unsigned j;
char buf[80]; char buf[80];
for (k = i->start; k < end(i); k = bkey_next(k)) { for (k = i->start; k < end(i); k = next) {
next = bkey_next(k);
bch_bkey_to_text(buf, sizeof(buf), k); bch_bkey_to_text(buf, sizeof(buf), k);
printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
(uint64_t *) k - i->d, i->keys, buf); (uint64_t *) k - i->d, i->keys, buf);
...@@ -114,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i) ...@@ -114,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i)
printk(" %s\n", bch_ptr_status(b->c, k)); printk(" %s\n", bch_ptr_status(b->c, k));
if (bkey_next(k) < end(i) && if (next < end(i) &&
skipped_backwards(b, k)) bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
printk(KERN_ERR "Key skipped backwards\n"); printk(KERN_ERR "Key skipped backwards\n");
} }
} }
#endif static void bch_dump_bucket(struct btree *b)
{
unsigned i;
#ifdef CONFIG_BCACHE_DEBUG console_lock();
for (i = 0; i <= b->nsets; i++)
dump_bset(b, b->sets[i].data);
console_unlock();
}
void bch_btree_verify(struct btree *b, struct bset *new) void bch_btree_verify(struct btree *b, struct bset *new)
{ {
...@@ -211,11 +205,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) ...@@ -211,11 +205,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
bio_put(check); bio_put(check);
} }
#endif int __bch_count_data(struct btree *b)
#ifdef CONFIG_BCACHE_EDEBUG
unsigned bch_count_data(struct btree *b)
{ {
unsigned ret = 0; unsigned ret = 0;
struct btree_iter iter; struct btree_iter iter;
...@@ -227,72 +217,60 @@ unsigned bch_count_data(struct btree *b) ...@@ -227,72 +217,60 @@ unsigned bch_count_data(struct btree *b)
return ret; return ret;
} }
static void vdump_bucket_and_panic(struct btree *b, const char *fmt, void __bch_check_keys(struct btree *b, const char *fmt, ...)
va_list args)
{
unsigned i;
char buf[80];
console_lock();
for (i = 0; i <= b->nsets; i++)
dump_bset(b, b->sets[i].data);
vprintk(fmt, args);
console_unlock();
bch_btree_to_text(buf, sizeof(buf), b);
panic("at %s\n", buf);
}
void bch_check_key_order_msg(struct btree *b, struct bset *i,
const char *fmt, ...)
{
struct bkey *k;
if (!i->keys)
return;
for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
if (skipped_backwards(b, k)) {
va_list args;
va_start(args, fmt);
vdump_bucket_and_panic(b, fmt, args);
va_end(args);
}
}
void bch_check_keys(struct btree *b, const char *fmt, ...)
{ {
va_list args; va_list args;
struct bkey *k, *p = NULL; struct bkey *k, *p = NULL;
struct btree_iter iter; struct btree_iter iter;
const char *err;
if (b->level)
return;
for_each_key(b, k, &iter) { for_each_key(b, k, &iter) {
if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) { if (!b->level) {
printk(KERN_ERR "Keys out of order:\n"); err = "Keys out of order";
if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
goto bug; goto bug;
}
if (bch_ptr_invalid(b, k)) if (bch_ptr_invalid(b, k))
continue; continue;
if (p && bkey_cmp(p, &START_KEY(k)) > 0) { err = "Overlapping keys";
printk(KERN_ERR "Overlapping keys:\n"); if (p && bkey_cmp(p, &START_KEY(k)) > 0)
goto bug;
} else {
if (bch_ptr_bad(b, k))
continue;
err = "Duplicate keys";
if (p && !bkey_cmp(p, k))
goto bug; goto bug;
} }
p = k; p = k;
} }
err = "Key larger than btree node key";
if (p && bkey_cmp(p, &b->key) > 0)
goto bug;
return; return;
bug: bug:
bch_dump_bucket(b);
va_start(args, fmt); va_start(args, fmt);
vdump_bucket_and_panic(b, fmt, args); vprintk(fmt, args);
va_end(args); va_end(args);
panic("bcache error: %s:\n", err);
}
void bch_btree_iter_next_check(struct btree_iter *iter)
{
struct bkey *k = iter->data->k, *next = bkey_next(k);
if (next < iter->data->end &&
bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
bch_dump_bucket(iter->b);
panic("Key skipped backwards\n");
}
} }
#endif #endif
......
...@@ -4,40 +4,42 @@ ...@@ -4,40 +4,42 @@
/* Btree/bkey debug printing */ /* Btree/bkey debug printing */
int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k); int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
int bch_btree_to_text(char *buf, size_t size, const struct btree *b);
#ifdef CONFIG_BCACHE_EDEBUG
unsigned bch_count_data(struct btree *);
void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...);
void bch_check_keys(struct btree *, const char *, ...);
#define bch_check_key_order(b, i) \
bch_check_key_order_msg(b, i, "keys out of order")
#define EBUG_ON(cond) BUG_ON(cond)
#else /* EDEBUG */
#define bch_count_data(b) 0
#define bch_check_key_order(b, i) do {} while (0)
#define bch_check_key_order_msg(b, i, ...) do {} while (0)
#define bch_check_keys(b, ...) do {} while (0)
#define EBUG_ON(cond) do {} while (0)
#endif
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG
void bch_btree_verify(struct btree *, struct bset *); void bch_btree_verify(struct btree *, struct bset *);
void bch_data_verify(struct cached_dev *, struct bio *); void bch_data_verify(struct cached_dev *, struct bio *);
int __bch_count_data(struct btree *);
void __bch_check_keys(struct btree *, const char *, ...);
void bch_btree_iter_next_check(struct btree_iter *);
#define EBUG_ON(cond) BUG_ON(cond)
#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
#define key_merging_disabled(c) ((c)->key_merging_disabled)
#else /* DEBUG */ #else /* DEBUG */
static inline void bch_btree_verify(struct btree *b, struct bset *i) {} static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}; static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
static inline int __bch_count_data(struct btree *b) { return -1; }
static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
#define EBUG_ON(cond) do { if (cond); } while (0)
#define expensive_debug_checks(c) 0
#define key_merging_disabled(c) 0
#endif #endif
#define bch_count_data(b) \
(expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
#define bch_check_keys(b, ...) \
do { \
if (expensive_debug_checks((b)->c)) \
__bch_check_keys(b, __VA_ARGS__); \
} while (0)
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void bch_debug_init_cache_set(struct cache_set *); void bch_debug_init_cache_set(struct cache_set *);
#else #else
......
...@@ -102,6 +102,7 @@ rw_attribute(io_error_halflife); ...@@ -102,6 +102,7 @@ rw_attribute(io_error_halflife);
rw_attribute(verify); rw_attribute(verify);
rw_attribute(key_merging_disabled); rw_attribute(key_merging_disabled);
rw_attribute(gc_always_rewrite); rw_attribute(gc_always_rewrite);
rw_attribute(expensive_debug_checks);
rw_attribute(freelist_percent); rw_attribute(freelist_percent);
rw_attribute(cache_replacement_policy); rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled); rw_attribute(btree_shrinker_disabled);
...@@ -517,6 +518,8 @@ SHOW(__bch_cache_set) ...@@ -517,6 +518,8 @@ SHOW(__bch_cache_set)
sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
sysfs_printf(verify, "%i", c->verify); sysfs_printf(verify, "%i", c->verify);
sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
sysfs_printf(expensive_debug_checks,
"%i", c->expensive_debug_checks);
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
...@@ -599,6 +602,7 @@ STORE(__bch_cache_set) ...@@ -599,6 +602,7 @@ STORE(__bch_cache_set)
sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
sysfs_strtoul(verify, c->verify); sysfs_strtoul(verify, c->verify);
sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
...@@ -674,6 +678,7 @@ static struct attribute *bch_cache_set_internal_files[] = { ...@@ -674,6 +678,7 @@ static struct attribute *bch_cache_set_internal_files[] = {
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify, &sysfs_verify,
&sysfs_key_merging_disabled, &sysfs_key_merging_disabled,
&sysfs_expensive_debug_checks,
#endif #endif
&sysfs_gc_always_rewrite, &sysfs_gc_always_rewrite,
&sysfs_btree_shrinker_disabled, &sysfs_btree_shrinker_disabled,
......
...@@ -15,12 +15,12 @@ ...@@ -15,12 +15,12 @@
struct closure; struct closure;
#ifdef CONFIG_BCACHE_EDEBUG #ifdef CONFIG_BCACHE_DEBUG
#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
#else /* EDEBUG */ #else /* DEBUG */
#define atomic_dec_bug(v) atomic_dec(v) #define atomic_dec_bug(v) atomic_dec(v)
#define atomic_inc_bug(v, i) atomic_inc(v) #define atomic_inc_bug(v, i) atomic_inc(v)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment