Commit 29364f34 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Drop sysfs interface to debug parameters

It's not used much anymore, the module paramter interface is better.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 2f33ece9
......@@ -74,7 +74,7 @@ static inline void bch2_wake_allocator(struct bch_dev *ca)
static inline void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
size_t bucket)
{
if (expensive_debug_checks(c)) {
if (bch2_expensive_debug_checks) {
size_t iter;
long i;
unsigned j;
......
......@@ -295,6 +295,16 @@ do { \
#define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS()
#endif
#define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
#ifndef CONFIG_BCACHEFS_DEBUG
#define BCH_DEBUG_PARAM(name, description) static const bool bch2_##name;
BCH_DEBUG_PARAMS_DEBUG()
#undef BCH_DEBUG_PARAM
#endif
#define BCH_TIME_STATS() \
x(btree_node_mem_alloc) \
x(btree_node_split) \
......@@ -726,7 +736,7 @@ struct bch_fs {
struct bio_set bio_read_split;
struct bio_set bio_write;
struct mutex bio_bounce_pages_lock;
mempool_t bio_bounce_pages;
mempool_t bio_bounce_pages;
struct rhashtable promote_table;
mempool_t compression_bounce[2];
......@@ -831,10 +841,6 @@ struct bch_fs {
unsigned copy_gc_enabled:1;
bool promote_whole_extents;
#define BCH_DEBUG_PARAM(name, description) bool name;
BCH_DEBUG_PARAMS_ALL()
#undef BCH_DEBUG_PARAM
struct bch2_time_stats times[BCH_TIME_STAT_NR];
};
......
......@@ -236,7 +236,7 @@ enum merge_result bch2_bkey_merge(struct bch_fs *c,
const struct bkey_ops *ops = &bch2_bkey_ops[l.k->type];
enum merge_result ret;
if (key_merging_disabled(c) ||
if (bch2_key_merging_disabled ||
!ops->key_merge ||
l.k->type != r.k->type ||
bversion_cmp(l.k->version, r.k->version) ||
......
......@@ -376,15 +376,13 @@ static void bset_aux_tree_verify(struct btree *b)
#endif
}
void bch2_btree_keys_init(struct btree *b, bool *expensive_debug_checks)
void bch2_btree_keys_init(struct btree *b)
{
unsigned i;
b->nsets = 0;
memset(&b->nr, 0, sizeof(b->nr));
#ifdef CONFIG_BCACHEFS_DEBUG
b->expensive_debug_checks = expensive_debug_checks;
#endif
for (i = 0; i < MAX_BSETS; i++)
b->set[i].data_offset = U16_MAX;
......@@ -510,7 +508,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b,
struct bkey_packed *k = btree_bkey_first(b, t);
unsigned j = 0;
if (!btree_keys_expensive_checks(b))
if (!bch2_expensive_debug_checks)
return;
BUG_ON(bset_has_ro_aux_tree(t));
......@@ -910,7 +908,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
k = p;
}
if (btree_keys_expensive_checks(b)) {
if (bch2_expensive_debug_checks) {
BUG_ON(ret >= orig_k);
for (i = ret
......@@ -1333,7 +1331,7 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b,
bkey_iter_pos_cmp(b, m, search) < 0)
m = bkey_next_skip_noops(m, btree_bkey_last(b, t));
if (btree_keys_expensive_checks(b)) {
if (bch2_expensive_debug_checks) {
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
BUG_ON(prev &&
......@@ -1589,7 +1587,7 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
struct btree *b)
{
if (btree_keys_expensive_checks(b)) {
if (bch2_expensive_debug_checks) {
bch2_btree_node_iter_verify(iter, b);
bch2_btree_node_iter_next_check(iter, b);
}
......@@ -1608,7 +1606,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
struct bset_tree *t;
unsigned end = 0;
if (btree_keys_expensive_checks(b))
if (bch2_expensive_debug_checks)
bch2_btree_node_iter_verify(iter, b);
for_each_bset(b, t) {
......@@ -1644,7 +1642,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
iter->data[0].k = __btree_node_key_to_offset(b, prev);
iter->data[0].end = end;
if (btree_keys_expensive_checks(b))
if (bch2_expensive_debug_checks)
bch2_btree_node_iter_verify(iter, b);
return prev;
}
......
......@@ -5,7 +5,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include "bcachefs_format.h"
#include "bcachefs.h"
#include "bkey.h"
#include "bkey_methods.h"
#include "btree_types.h"
......@@ -147,17 +147,6 @@
* first key in that range of bytes again.
*/
extern bool bch2_expensive_debug_checks;
static inline bool btree_keys_expensive_checks(const struct btree *b)
{
#ifdef CONFIG_BCACHEFS_DEBUG
return bch2_expensive_debug_checks || *b->expensive_debug_checks;
#else
return false;
#endif
}
enum bset_aux_tree_type {
BSET_NO_AUX_TREE,
BSET_RO_AUX_TREE,
......@@ -228,7 +217,7 @@ __bkey_unpack_key_format_checked(const struct btree *b,
compiled_unpack_fn unpack_fn = b->aux_data;
unpack_fn(dst, src);
if (btree_keys_expensive_checks(b)) {
if (bch2_expensive_debug_checks) {
struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src);
BUG_ON(memcmp(dst, &dst2, sizeof(*dst)));
......@@ -366,7 +355,7 @@ static inline struct bset *bset_next_set(struct btree *b,
return ((void *) i) + round_up(vstruct_bytes(i), block_bytes);
}
void bch2_btree_keys_init(struct btree *, bool *);
void bch2_btree_keys_init(struct btree *);
void bch2_bset_init_first(struct btree *, struct bset *);
void bch2_bset_init_next(struct bch_fs *, struct btree *,
......@@ -669,7 +658,7 @@ static inline void bch2_verify_insert_pos(struct btree *b,
static inline void bch2_verify_btree_nr_keys(struct btree *b)
{
if (btree_keys_expensive_checks(b))
if (bch2_expensive_debug_checks)
__bch2_verify_btree_nr_keys(b);
}
......
......@@ -212,7 +212,7 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
* - unless btree verify mode is enabled, since it runs out of
* the post write cleanup:
*/
if (verify_btree_ondisk(c))
if (bch2_verify_btree_ondisk)
bch2_btree_node_write(c, b, SIX_LOCK_intent);
else
__bch2_btree_node_write(c, b, SIX_LOCK_read);
......@@ -255,7 +255,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
unsigned long freed = 0;
unsigned i, flags;
if (btree_shrinker_disabled(c))
if (bch2_btree_shrinker_disabled)
return SHRINK_STOP;
/* Return -1 if we can't do anything right now */
......@@ -342,7 +342,7 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
btree_cache.shrink);
struct btree_cache *bc = &c->btree_cache;
if (btree_shrinker_disabled(c))
if (bch2_btree_shrinker_disabled)
return 0;
return btree_cache_can_free(bc) * btree_pages(c);
......@@ -591,7 +591,7 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
b->sib_u64s[0] = 0;
b->sib_u64s[1] = 0;
b->whiteout_u64s = 0;
bch2_btree_keys_init(b, &c->expensive_debug_checks);
bch2_btree_keys_init(b);
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
start_time);
......
......@@ -101,7 +101,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
int ret = 0;
if (initial) {
BUG_ON(journal_seq_verify(c) &&
BUG_ON(bch2_journal_seq_verify &&
k.k->version.lo > journal_cur_seq(&c->journal));
/* XXX change to fsck check */
......@@ -209,7 +209,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
struct btree_iter *iter;
struct btree *b;
unsigned depth = metadata_only ? 1
: expensive_debug_checks(c) ? 0
: bch2_expensive_debug_checks ? 0
: !btree_node_type_needs_gc(btree_id) ? 1
: 0;
u8 max_stale = 0;
......@@ -236,8 +236,8 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
else if (!btree_gc_rewrite_disabled(c) &&
(btree_gc_always_rewrite(c) || max_stale > 16))
else if (!bch2_btree_gc_rewrite_disabled &&
(bch2_btree_gc_always_rewrite || max_stale > 16))
bch2_btree_node_rewrite(c, iter,
b->data->keys.seq,
BTREE_INSERT_NOWAIT|
......@@ -328,7 +328,7 @@ static int bch2_gc_btree_init(struct bch_fs *c,
{
struct btree *b;
unsigned target_depth = metadata_only ? 1
: expensive_debug_checks(c) ? 0
: bch2_expensive_debug_checks ? 0
: !btree_node_type_needs_gc(btree_id) ? 1
: 0;
u8 max_stale = 0;
......@@ -835,7 +835,7 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
out:
if (!ret &&
(test_bit(BCH_FS_FIXED_GENS, &c->flags) ||
(!iter && test_restart_gc(c)))) {
(!iter && bch2_test_restart_gc))) {
/*
* XXX: make sure gens we fixed got saved
*/
......
......@@ -1044,7 +1044,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry
const char *invalid = bch2_bkey_val_invalid(c, u.s_c);
if (invalid ||
(inject_invalid_keys(c) &&
(bch2_inject_invalid_keys &&
!bversion_cmp(u.k->version, MAX_VERSION))) {
char buf[160];
......
......@@ -487,7 +487,7 @@ static void bch2_btree_iter_verify_level(struct btree_iter *iter,
char buf1[100], buf2[100];
const char *msg;
if (!debug_check_iterators(iter->trans->c))
if (!bch2_debug_check_iterators)
return;
if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
......@@ -583,7 +583,7 @@ void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
{
struct btree_iter *iter;
if (!debug_check_iterators(trans->c))
if (!bch2_debug_check_iterators)
return;
trans_for_each_iter_with_node(trans, b, iter)
......@@ -755,7 +755,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
__bch2_btree_node_iter_fix(iter, b, node_iter, t,
where, clobber_u64s, new_u64s);
if (debug_check_iterators(iter->trans->c))
if (bch2_debug_check_iterators)
bch2_btree_node_iter_verify(node_iter, b);
}
......@@ -785,7 +785,7 @@ static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
ret = bkey_disassemble(l->b, k, u);
if (debug_check_bkeys(iter->trans->c))
if (bch2_debug_check_bkeys)
bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
return ret;
......@@ -1566,13 +1566,13 @@ static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter)
ret.v = bkeyp_val(&l->b->format, _k);
if (debug_check_iterators(iter->trans->c)) {
if (bch2_debug_check_iterators) {
struct bkey k = bkey_unpack_key(l->b, _k);
BUG_ON(memcmp(&k, &iter->k, sizeof(k)));
}
if (debug_check_bkeys(iter->trans->c))
if (bch2_debug_check_bkeys)
bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
}
......
......@@ -130,10 +130,6 @@ struct btree {
struct btree_write writes[2];
#ifdef CONFIG_BCACHEFS_DEBUG
bool *expensive_debug_checks;
#endif
/* Key/pointer for this btree node */
__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
};
......
......@@ -220,7 +220,7 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
struct bch_fs *c = trans->c;
BUG_ON(bkey_cmp(insert->k.p, iter->pos));
BUG_ON(debug_check_bkeys(c) &&
BUG_ON(bch2_debug_check_bkeys &&
bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
__btree_node_type(iter->level, iter->btree_id)));
}
......@@ -440,10 +440,10 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
*/
if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) {
if (journal_seq_verify(c))
if (bch2_journal_seq_verify)
trans_for_each_update2(trans, i)
i->k->k.version.lo = trans->journal_res.seq;
else if (inject_invalid_keys(c))
else if (bch2_inject_invalid_keys)
trans_for_each_update2(trans, i)
i->k->k.version = MAX_VERSION;
}
......
......@@ -54,7 +54,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
v->written = 0;
v->c.level = b->c.level;
v->c.btree_id = b->c.btree_id;
bch2_btree_keys_init(v, &c->expensive_debug_checks);
bch2_btree_keys_init(v);
if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
NULL, &pick) <= 0)
......
......@@ -8,44 +8,15 @@ struct bio;
struct btree;
struct bch_fs;
#define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
#define BCH_DEBUG_PARAM(name, description) \
static inline bool name(struct bch_fs *c) \
{ return bch2_##name || c->name; }
BCH_DEBUG_PARAMS_ALWAYS()
#undef BCH_DEBUG_PARAM
#ifdef CONFIG_BCACHEFS_DEBUG
#define BCH_DEBUG_PARAM(name, description) \
static inline bool name(struct bch_fs *c) \
{ return bch2_##name || c->name; }
BCH_DEBUG_PARAMS_DEBUG()
#undef BCH_DEBUG_PARAM
void __bch2_btree_verify(struct bch_fs *, struct btree *);
#define bypass_torture_test(d) ((d)->bypass_torture_test)
#else /* DEBUG */
#define BCH_DEBUG_PARAM(name, description) \
static inline bool name(struct bch_fs *c) { return false; }
BCH_DEBUG_PARAMS_DEBUG()
#undef BCH_DEBUG_PARAM
#else
static inline void __bch2_btree_verify(struct bch_fs *c, struct btree *b) {}
#define bypass_torture_test(d) 0
#endif
static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b)
{
if (verify_btree_ondisk(c))
if (bch2_verify_btree_ondisk)
__bch2_btree_verify(c, b);
}
......
......@@ -88,7 +88,7 @@ static inline bool ptr_better(struct bch_fs *c,
return bch2_rand_range(l1 + l2) > l1;
}
if (force_reconstruct_read(c))
if (bch2_force_reconstruct_read)
return p1.idx > p2.idx;
return p1.idx < p2.idx;
......@@ -136,7 +136,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
!bch2_dev_is_readable(ca))
p.idx++;
if (force_reconstruct_read(c) &&
if (bch2_force_reconstruct_read &&
!p.idx && p.has_ec)
p.idx++;
......
......@@ -208,12 +208,6 @@ read_attribute(io_timers_write);
write_attribute(perf_test);
#endif /* CONFIG_BCACHEFS_TESTS */
#define BCH_DEBUG_PARAM(name, description) \
rw_attribute(name);
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
#define x(_name) \
static struct attribute sysfs_time_stat_##_name = \
{ .name = #_name, .mode = S_IRUGO };
......@@ -414,10 +408,6 @@ SHOW(bch2_fs)
return out.pos - buf;
}
#define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
return 0;
}
......@@ -462,10 +452,6 @@ STORE(bch2_fs)
/* Debugging: */
#define BCH_DEBUG_PARAM(name, description) sysfs_strtoul(name, c->name);
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
if (!test_bit(BCH_FS_STARTED, &c->flags))
return -EPERM;
......@@ -590,11 +576,6 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_io_timers_write,
&sysfs_internal_uuid,
#define BCH_DEBUG_PARAM(name, description) &sysfs_##name,
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
NULL
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment