Commit b6fb4269 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: for_each_bset() declares loop iter

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e2f48c48
...@@ -103,8 +103,6 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, ...@@ -103,8 +103,6 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
void bch2_dump_btree_node(struct bch_fs *c, struct btree *b) void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
{ {
struct bset_tree *t;
console_lock(); console_lock();
for_each_bset(b, t) for_each_bset(b, t)
bch2_dump_bset(c, b, bset(b, t), t - b->set); bch2_dump_bset(c, b, bset(b, t), t - b->set);
...@@ -136,7 +134,6 @@ void bch2_dump_btree_node_iter(struct btree *b, ...@@ -136,7 +134,6 @@ void bch2_dump_btree_node_iter(struct btree *b,
struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b) struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b)
{ {
struct bset_tree *t;
struct bkey_packed *k; struct bkey_packed *k;
struct btree_nr_keys nr = {}; struct btree_nr_keys nr = {};
...@@ -198,7 +195,6 @@ void bch2_btree_node_iter_verify(struct btree_node_iter *iter, ...@@ -198,7 +195,6 @@ void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
{ {
struct btree_node_iter_set *set, *s2; struct btree_node_iter_set *set, *s2;
struct bkey_packed *k, *p; struct bkey_packed *k, *p;
struct bset_tree *t;
if (bch2_btree_node_iter_end(iter)) if (bch2_btree_node_iter_end(iter))
return; return;
...@@ -213,12 +209,14 @@ void bch2_btree_node_iter_verify(struct btree_node_iter *iter, ...@@ -213,12 +209,14 @@ void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
/* Verify that set->end is correct: */ /* Verify that set->end is correct: */
btree_node_iter_for_each(iter, set) { btree_node_iter_for_each(iter, set) {
for_each_bset(b, t) for_each_bset(b, t)
if (set->end == t->end_offset) if (set->end == t->end_offset) {
BUG_ON(set->k < btree_bkey_first_offset(t) ||
set->k >= t->end_offset);
goto found; goto found;
}
BUG(); BUG();
found: found:
BUG_ON(set->k < btree_bkey_first_offset(t) || do {} while (0);
set->k >= t->end_offset);
} }
/* Verify iterator is sorted: */ /* Verify iterator is sorted: */
...@@ -377,11 +375,9 @@ static struct bkey_float *bkey_float(const struct btree *b, ...@@ -377,11 +375,9 @@ static struct bkey_float *bkey_float(const struct btree *b,
return ro_aux_tree_base(b, t)->f + idx; return ro_aux_tree_base(b, t)->f + idx;
} }
static void bset_aux_tree_verify(const struct btree *b) static void bset_aux_tree_verify(struct btree *b)
{ {
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
const struct bset_tree *t;
for_each_bset(b, t) { for_each_bset(b, t) {
if (t->aux_data_offset == U16_MAX) if (t->aux_data_offset == U16_MAX)
continue; continue;
...@@ -685,20 +681,20 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t, ...@@ -685,20 +681,20 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
} }
/* bytes remaining - only valid for last bset: */ /* bytes remaining - only valid for last bset: */
static unsigned __bset_tree_capacity(const struct btree *b, const struct bset_tree *t) static unsigned __bset_tree_capacity(struct btree *b, const struct bset_tree *t)
{ {
bset_aux_tree_verify(b); bset_aux_tree_verify(b);
return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64); return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
} }
static unsigned bset_ro_tree_capacity(const struct btree *b, const struct bset_tree *t) static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree *t)
{ {
return __bset_tree_capacity(b, t) / return __bset_tree_capacity(b, t) /
(sizeof(struct bkey_float) + sizeof(u8)); (sizeof(struct bkey_float) + sizeof(u8));
} }
static unsigned bset_rw_tree_capacity(const struct btree *b, const struct bset_tree *t) static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree *t)
{ {
return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree); return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
} }
...@@ -1374,8 +1370,6 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter, ...@@ -1374,8 +1370,6 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter, void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
struct btree *b) struct btree *b)
{ {
struct bset_tree *t;
memset(iter, 0, sizeof(*iter)); memset(iter, 0, sizeof(*iter));
for_each_bset(b, t) for_each_bset(b, t)
...@@ -1481,7 +1475,6 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter, ...@@ -1481,7 +1475,6 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
{ {
struct bkey_packed *k, *prev = NULL; struct bkey_packed *k, *prev = NULL;
struct btree_node_iter_set *set; struct btree_node_iter_set *set;
struct bset_tree *t;
unsigned end = 0; unsigned end = 0;
if (bch2_expensive_debug_checks) if (bch2_expensive_debug_checks)
...@@ -1550,9 +1543,7 @@ struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter, ...@@ -1550,9 +1543,7 @@ struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats) void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats)
{ {
const struct bset_tree *t; for_each_bset_c(b, t) {
for_each_bset(b, t) {
enum bset_aux_tree_type type = bset_aux_tree_type(t); enum bset_aux_tree_type type = bset_aux_tree_type(t);
size_t j; size_t j;
......
...@@ -206,7 +206,10 @@ static inline size_t btree_aux_data_u64s(const struct btree *b) ...@@ -206,7 +206,10 @@ static inline size_t btree_aux_data_u64s(const struct btree *b)
} }
#define for_each_bset(_b, _t) \ #define for_each_bset(_b, _t) \
for (_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++) for (struct bset_tree *_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
#define for_each_bset_c(_b, _t) \
for (const struct bset_tree *_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
#define bset_tree_for_each_key(_b, _t, _k) \ #define bset_tree_for_each_key(_b, _t, _k) \
for (_k = btree_bkey_first(_b, _t); \ for (_k = btree_bkey_first(_b, _t); \
...@@ -294,7 +297,6 @@ static inline struct bset_tree * ...@@ -294,7 +297,6 @@ static inline struct bset_tree *
bch2_bkey_to_bset_inlined(struct btree *b, struct bkey_packed *k) bch2_bkey_to_bset_inlined(struct btree *b, struct bkey_packed *k)
{ {
unsigned offset = __btree_node_key_to_offset(b, k); unsigned offset = __btree_node_key_to_offset(b, k);
struct bset_tree *t;
for_each_bset(b, t) for_each_bset(b, t)
if (offset <= t->end_offset) { if (offset <= t->end_offset) {
......
...@@ -881,7 +881,6 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr ...@@ -881,7 +881,6 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b; struct btree *b;
struct bset_tree *t;
bool need_relock = false; bool need_relock = false;
int ret; int ret;
...@@ -1001,7 +1000,6 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path * ...@@ -1001,7 +1000,6 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree *b; struct btree *b;
struct bset_tree *t;
int ret; int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH); EBUG_ON(level >= BTREE_MAX_DEPTH);
...@@ -1078,7 +1076,6 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans, ...@@ -1078,7 +1076,6 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b; struct btree *b;
struct bset_tree *t;
int ret; int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH); EBUG_ON(level >= BTREE_MAX_DEPTH);
......
...@@ -229,7 +229,6 @@ static bool should_compact_bset(struct btree *b, struct bset_tree *t, ...@@ -229,7 +229,6 @@ static bool should_compact_bset(struct btree *b, struct bset_tree *t,
static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
{ {
struct bset_tree *t;
bool ret = false; bool ret = false;
for_each_bset(b, t) { for_each_bset(b, t) {
...@@ -451,8 +450,6 @@ static bool btree_node_compact(struct bch_fs *c, struct btree *b) ...@@ -451,8 +450,6 @@ static bool btree_node_compact(struct bch_fs *c, struct btree *b)
void bch2_btree_build_aux_trees(struct btree *b) void bch2_btree_build_aux_trees(struct btree *b)
{ {
struct bset_tree *t;
for_each_bset(b, t) for_each_bset(b, t)
bch2_bset_build_aux_tree(b, t, bch2_bset_build_aux_tree(b, t,
!bset_written(b, bset(b, t)) && !bset_written(b, bset(b, t)) &&
...@@ -637,8 +634,6 @@ static int __btree_err(int ret, ...@@ -637,8 +634,6 @@ static int __btree_err(int ret,
__cold __cold
void bch2_btree_node_drop_keys_outside_node(struct btree *b) void bch2_btree_node_drop_keys_outside_node(struct btree *b)
{ {
struct bset_tree *t;
for_each_bset(b, t) { for_each_bset(b, t) {
struct bset *i = bset(b, t); struct bset *i = bset(b, t);
struct bkey_packed *k; struct bkey_packed *k;
...@@ -1987,7 +1982,6 @@ static void btree_write_submit(struct work_struct *work) ...@@ -1987,7 +1982,6 @@ static void btree_write_submit(struct work_struct *work)
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
{ {
struct btree_write_bio *wbio; struct btree_write_bio *wbio;
struct bset_tree *t;
struct bset *i; struct bset *i;
struct btree_node *bn = NULL; struct btree_node *bn = NULL;
struct btree_node_entry *bne = NULL; struct btree_node_entry *bne = NULL;
...@@ -2244,7 +2238,6 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) ...@@ -2244,7 +2238,6 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
{ {
bool invalidated_iter = false; bool invalidated_iter = false;
struct btree_node_entry *bne; struct btree_node_entry *bne;
struct bset_tree *t;
if (!btree_node_just_written(b)) if (!btree_node_just_written(b))
return false; return false;
......
...@@ -81,8 +81,6 @@ static inline bool should_compact_bset_lazy(struct btree *b, ...@@ -81,8 +81,6 @@ static inline bool should_compact_bset_lazy(struct btree *b,
static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b) static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
{ {
struct bset_tree *t;
for_each_bset(b, t) for_each_bset(b, t)
if (should_compact_bset_lazy(b, t)) if (should_compact_bset_lazy(b, t))
return bch2_compact_whiteouts(c, b, COMPACT_LAZY); return bch2_compact_whiteouts(c, b, COMPACT_LAZY);
......
...@@ -160,7 +160,6 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) ...@@ -160,7 +160,6 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b) static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
{ {
struct bkey_packed *k; struct bkey_packed *k;
struct bset_tree *t;
struct bkey uk; struct bkey uk;
for_each_bset(b, t) for_each_bset(b, t)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment