Commit 0390ea8a authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Drop bkey noops

Bkey noops were introduced to deal with trimming inline data extents in
place in the btree: if the u64s field of a bkey was 0, that u64 was a
noop and we'd start looking for the next bkey immediately after it.

But extent handling has been lifted above the btree - we no longer
modify existing extents in place in the btree, and the compatibilty code
for old style extent btree nodes is gone, so we can completely drop this
code.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 7c8b166e
...@@ -41,16 +41,6 @@ struct bkey_s { ...@@ -41,16 +41,6 @@ struct bkey_s {
#define bkey_next(_k) vstruct_next(_k) #define bkey_next(_k) vstruct_next(_k)
static inline struct bkey_packed *bkey_next_skip_noops(struct bkey_packed *k,
struct bkey_packed *end)
{
k = bkey_next(k);
while (k != end && !k->u64s)
k = (void *) ((u64 *) k + 1);
return k;
}
#define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s) #define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s)
static inline size_t bkey_val_bytes(const struct bkey *k) static inline size_t bkey_val_bytes(const struct bkey *k)
......
...@@ -45,7 +45,7 @@ static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp) ...@@ -45,7 +45,7 @@ static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
BUG_ON(!iter->used); BUG_ON(!iter->used);
i->k = bkey_next_skip_noops(i->k, i->end); i->k = bkey_next(i->k);
BUG_ON(i->k > i->end); BUG_ON(i->k > i->end);
......
...@@ -66,7 +66,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, ...@@ -66,7 +66,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
for (_k = i->start; for (_k = i->start;
_k < vstruct_last(i); _k < vstruct_last(i);
_k = _n) { _k = _n) {
_n = bkey_next_skip_noops(_k, vstruct_last(i)); _n = bkey_next(_k);
k = bkey_disassemble(b, _k, &uk); k = bkey_disassemble(b, _k, &uk);
if (c) if (c)
...@@ -532,7 +532,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b, ...@@ -532,7 +532,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b,
rw_aux_tree(b, t)[j - 1].offset); rw_aux_tree(b, t)[j - 1].offset);
} }
k = bkey_next_skip_noops(k, btree_bkey_last(b, t)); k = bkey_next(k);
BUG_ON(k >= btree_bkey_last(b, t)); BUG_ON(k >= btree_bkey_last(b, t));
} }
} }
...@@ -747,7 +747,7 @@ static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t) ...@@ -747,7 +747,7 @@ static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
/* First we figure out where the first key in each cacheline is */ /* First we figure out where the first key in each cacheline is */
eytzinger1_for_each(j, t->size) { eytzinger1_for_each(j, t->size) {
while (bkey_to_cacheline(b, t, k) < cacheline) while (bkey_to_cacheline(b, t, k) < cacheline)
prev = k, k = bkey_next_skip_noops(k, btree_bkey_last(b, t)); prev = k, k = bkey_next(k);
if (k >= btree_bkey_last(b, t)) { if (k >= btree_bkey_last(b, t)) {
/* XXX: this path sucks */ /* XXX: this path sucks */
...@@ -764,7 +764,7 @@ static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t) ...@@ -764,7 +764,7 @@ static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
} }
while (k != btree_bkey_last(b, t)) while (k != btree_bkey_last(b, t))
prev = k, k = bkey_next_skip_noops(k, btree_bkey_last(b, t)); prev = k, k = bkey_next(k);
t->max_key = bkey_unpack_pos(b, prev); t->max_key = bkey_unpack_pos(b, prev);
...@@ -899,7 +899,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b, ...@@ -899,7 +899,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
struct bkey_packed *p, *i, *ret = NULL, *orig_k = k; struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
while ((p = __bkey_prev(b, t, k)) && !ret) { while ((p = __bkey_prev(b, t, k)) && !ret) {
for (i = p; i != k; i = bkey_next_skip_noops(i, k)) for (i = p; i != k; i = bkey_next(i))
if (i->type >= min_key_type) if (i->type >= min_key_type)
ret = i; ret = i;
...@@ -910,10 +910,10 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b, ...@@ -910,10 +910,10 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
BUG_ON(ret >= orig_k); BUG_ON(ret >= orig_k);
for (i = ret for (i = ret
? bkey_next_skip_noops(ret, orig_k) ? bkey_next(ret)
: btree_bkey_first(b, t); : btree_bkey_first(b, t);
i != orig_k; i != orig_k;
i = bkey_next_skip_noops(i, orig_k)) i = bkey_next(i))
BUG_ON(i->type >= min_key_type); BUG_ON(i->type >= min_key_type);
} }
...@@ -948,7 +948,7 @@ static void ro_aux_tree_fix_invalidated_key(struct btree *b, ...@@ -948,7 +948,7 @@ static void ro_aux_tree_fix_invalidated_key(struct btree *b,
/* signal to make_bfloat() that they're uninitialized: */ /* signal to make_bfloat() that they're uninitialized: */
min_key.u64s = max_key.u64s = 0; min_key.u64s = max_key.u64s = 0;
if (bkey_next_skip_noops(k, btree_bkey_last(b, t)) == btree_bkey_last(b, t)) { if (bkey_next(k) == btree_bkey_last(b, t)) {
t->max_key = bkey_unpack_pos(b, k); t->max_key = bkey_unpack_pos(b, k);
for (j = 1; j < t->size; j = j * 2 + 1) for (j = 1; j < t->size; j = j * 2 + 1)
...@@ -1072,7 +1072,7 @@ static void bch2_bset_fix_lookup_table(struct btree *b, ...@@ -1072,7 +1072,7 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
struct bkey_packed *k = start; struct bkey_packed *k = start;
while (1) { while (1) {
k = bkey_next_skip_noops(k, end); k = bkey_next(k);
if (k == end) if (k == end)
break; break;
...@@ -1322,12 +1322,12 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b, ...@@ -1322,12 +1322,12 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b,
while (m != btree_bkey_last(b, t) && while (m != btree_bkey_last(b, t) &&
bkey_iter_cmp_p_or_unp(b, m, bkey_iter_cmp_p_or_unp(b, m,
lossy_packed_search, search) < 0) lossy_packed_search, search) < 0)
m = bkey_next_skip_noops(m, btree_bkey_last(b, t)); m = bkey_next(m);
if (!packed_search) if (!packed_search)
while (m != btree_bkey_last(b, t) && while (m != btree_bkey_last(b, t) &&
bkey_iter_pos_cmp(b, m, search) < 0) bkey_iter_pos_cmp(b, m, search) < 0)
m = bkey_next_skip_noops(m, btree_bkey_last(b, t)); m = bkey_next(m);
if (bch2_expensive_debug_checks) { if (bch2_expensive_debug_checks) {
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m); struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
...@@ -1561,10 +1561,6 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter, ...@@ -1561,10 +1561,6 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
EBUG_ON(iter->data->k > iter->data->end); EBUG_ON(iter->data->k > iter->data->end);
while (!__btree_node_iter_set_end(iter, 0) &&
!__bch2_btree_node_iter_peek_all(iter, b)->u64s)
iter->data->k++;
if (unlikely(__btree_node_iter_set_end(iter, 0))) { if (unlikely(__btree_node_iter_set_end(iter, 0))) {
bch2_btree_node_iter_set_drop(iter, iter->data); bch2_btree_node_iter_set_drop(iter, iter->data);
return; return;
......
...@@ -305,7 +305,7 @@ static inline struct bkey_s __bkey_disassemble(struct btree *b, ...@@ -305,7 +305,7 @@ static inline struct bkey_s __bkey_disassemble(struct btree *b,
#define bset_tree_for_each_key(_b, _t, _k) \ #define bset_tree_for_each_key(_b, _t, _k) \
for (_k = btree_bkey_first(_b, _t); \ for (_k = btree_bkey_first(_b, _t); \
_k != btree_bkey_last(_b, _t); \ _k != btree_bkey_last(_b, _t); \
_k = bkey_next_skip_noops(_k, btree_bkey_last(_b, _t))) _k = bkey_next(_k))
static inline bool bset_has_ro_aux_tree(struct bset_tree *t) static inline bool bset_has_ro_aux_tree(struct bset_tree *t)
{ {
......
...@@ -1373,7 +1373,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter, ...@@ -1373,7 +1373,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
k < vstruct_last(s2) && k < vstruct_last(s2) &&
vstruct_blocks_plus(n1->data, c->block_bits, vstruct_blocks_plus(n1->data, c->block_bits,
u64s + k->u64s) <= blocks; u64s + k->u64s) <= blocks;
k = bkey_next_skip_noops(k, vstruct_last(s2))) { k = bkey_next(k)) {
last = k; last = k;
u64s += k->u64s; u64s += k->u64s;
} }
......
...@@ -32,9 +32,9 @@ static void verify_no_dups(struct btree *b, ...@@ -32,9 +32,9 @@ static void verify_no_dups(struct btree *b,
if (start == end) if (start == end)
return; return;
for (p = start, k = bkey_next_skip_noops(start, end); for (p = start, k = bkey_next(start);
k != end; k != end;
p = k, k = bkey_next_skip_noops(k, end)) { p = k, k = bkey_next(k)) {
struct bkey l = bkey_unpack_key(b, p); struct bkey l = bkey_unpack_key(b, p);
struct bkey r = bkey_unpack_key(b, k); struct bkey r = bkey_unpack_key(b, k);
...@@ -47,9 +47,7 @@ static void set_needs_whiteout(struct bset *i, int v) ...@@ -47,9 +47,7 @@ static void set_needs_whiteout(struct bset *i, int v)
{ {
struct bkey_packed *k; struct bkey_packed *k;
for (k = i->start; for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
k != vstruct_last(i);
k = bkey_next_skip_noops(k, vstruct_last(i)))
k->needs_whiteout = v; k->needs_whiteout = v;
} }
...@@ -213,7 +211,7 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) ...@@ -213,7 +211,7 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
out = i->start; out = i->start;
for (k = start; k != end; k = n) { for (k = start; k != end; k = n) {
n = bkey_next_skip_noops(k, end); n = bkey_next(k);
if (!bkey_deleted(k)) { if (!bkey_deleted(k)) {
bkey_copy(out, k); bkey_copy(out, k);
...@@ -754,7 +752,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, ...@@ -754,7 +752,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
} }
prev = k; prev = k;
k = bkey_next_skip_noops(k, vstruct_last(i)); k = bkey_next(k);
} }
fsck_err: fsck_err:
return ret; return ret;
...@@ -947,7 +945,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, ...@@ -947,7 +945,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
bp.v->mem_ptr = 0; bp.v->mem_ptr = 0;
} }
k = bkey_next_skip_noops(k, vstruct_last(i)); k = bkey_next(k);
} }
bch2_bset_build_aux_tree(b, b->set, false); bch2_bset_build_aux_tree(b, b->set, false);
......
...@@ -1119,7 +1119,7 @@ static struct btree *__btree_split_node(struct btree_update *as, ...@@ -1119,7 +1119,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
*/ */
k = set1->start; k = set1->start;
while (1) { while (1) {
struct bkey_packed *n = bkey_next_skip_noops(k, vstruct_last(set1)); struct bkey_packed *n = bkey_next(k);
if (n == vstruct_last(set1)) if (n == vstruct_last(set1))
break; break;
...@@ -1216,7 +1216,7 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b, ...@@ -1216,7 +1216,7 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
i = btree_bset_first(b); i = btree_bset_first(b);
src = dst = i->start; src = dst = i->start;
while (src != vstruct_last(i)) { while (src != vstruct_last(i)) {
n = bkey_next_skip_noops(src, vstruct_last(i)); n = bkey_next(src);
if (!bkey_deleted(src)) { if (!bkey_deleted(src)) {
memmove_u64s_down(dst, src, src->u64s); memmove_u64s_down(dst, src, src->u64s);
dst = bkey_next(dst); dst = bkey_next(dst);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment