Commit 811d2bcd authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Drop typechecking from bkey_cmp_packed()

This only did anything in two places, and those can just be replaced
wiht bkey_cmp_left_packed()).
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 73e7470b
...@@ -413,7 +413,7 @@ static bool bkey_packed_successor(struct bkey_packed *out, ...@@ -413,7 +413,7 @@ static bool bkey_packed_successor(struct bkey_packed *out,
if ((*p & mask) != mask) { if ((*p & mask) != mask) {
*p += 1ULL << offset; *p += 1ULL << offset;
EBUG_ON(bkey_cmp_packed(b, out, &k) <= 0); EBUG_ON(bch2_bkey_cmp_packed(b, out, &k) <= 0);
return true; return true;
} }
...@@ -1057,9 +1057,9 @@ int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *b, ...@@ -1057,9 +1057,9 @@ int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *b,
} }
__pure __flatten __pure __flatten
int __bch2_bkey_cmp_packed(const struct bkey_packed *l, int bch2_bkey_cmp_packed(const struct btree *b,
const struct bkey_packed *r, const struct bkey_packed *l,
const struct btree *b) const struct bkey_packed *r)
{ {
struct bkey unpacked; struct bkey unpacked;
......
...@@ -75,13 +75,6 @@ static inline void set_bkey_val_bytes(struct bkey *k, unsigned bytes) ...@@ -75,13 +75,6 @@ static inline void set_bkey_val_bytes(struct bkey *k, unsigned bytes)
#define bkey_whiteout(_k) \ #define bkey_whiteout(_k) \
((_k)->type == KEY_TYPE_deleted || (_k)->type == KEY_TYPE_discard) ((_k)->type == KEY_TYPE_deleted || (_k)->type == KEY_TYPE_discard)
#define bkey_packed_typecheck(_k) \
({ \
BUILD_BUG_ON(!type_is(_k, struct bkey *) && \
!type_is(_k, struct bkey_packed *)); \
type_is(_k, struct bkey_packed *); \
})
enum bkey_lr_packed { enum bkey_lr_packed {
BKEY_PACKED_BOTH, BKEY_PACKED_BOTH,
BKEY_PACKED_RIGHT, BKEY_PACKED_RIGHT,
...@@ -89,9 +82,6 @@ enum bkey_lr_packed { ...@@ -89,9 +82,6 @@ enum bkey_lr_packed {
BKEY_PACKED_NONE, BKEY_PACKED_NONE,
}; };
#define bkey_lr_packed_typecheck(_l, _r) \
(!bkey_packed_typecheck(_l) + ((!bkey_packed_typecheck(_r)) << 1))
#define bkey_lr_packed(_l, _r) \ #define bkey_lr_packed(_l, _r) \
((_l)->format + ((_r)->format << 1)) ((_l)->format + ((_r)->format << 1))
...@@ -140,9 +130,9 @@ int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *, ...@@ -140,9 +130,9 @@ int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *,
const struct bpos *); const struct bpos *);
__pure __pure
int __bch2_bkey_cmp_packed(const struct bkey_packed *, int bch2_bkey_cmp_packed(const struct btree *,
const struct bkey_packed *, const struct bkey_packed *,
const struct btree *); const struct bkey_packed *);
__pure __pure
int __bch2_bkey_cmp_left_packed(const struct btree *, int __bch2_bkey_cmp_left_packed(const struct btree *,
...@@ -168,37 +158,6 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b, ...@@ -168,37 +158,6 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b,
return bkey_cmp_left_packed(b, l, &r); return bkey_cmp_left_packed(b, l, &r);
} }
/*
* If @_l or @_r are struct bkey * (not bkey_packed *), uses type information to
* skip dispatching on k->format:
*/
#define bkey_cmp_packed(_b, _l, _r) \
({ \
int _cmp; \
\
switch (bkey_lr_packed_typecheck(_l, _r)) { \
case BKEY_PACKED_NONE: \
_cmp = bkey_cmp(((struct bkey *) (_l))->p, \
((struct bkey *) (_r))->p); \
break; \
case BKEY_PACKED_LEFT: \
_cmp = bkey_cmp_left_packed((_b), \
(struct bkey_packed *) (_l), \
&((struct bkey *) (_r))->p); \
break; \
case BKEY_PACKED_RIGHT: \
_cmp = -bkey_cmp_left_packed((_b), \
(struct bkey_packed *) (_r), \
&((struct bkey *) (_l))->p); \
break; \
case BKEY_PACKED_BOTH: \
_cmp = __bch2_bkey_cmp_packed((void *) (_l), \
(void *) (_r), (_b)); \
break; \
} \
_cmp; \
})
#if 1 #if 1
static __always_inline int bkey_cmp(struct bpos l, struct bpos r) static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
{ {
......
...@@ -86,7 +86,7 @@ static inline int key_sort_fix_overlapping_cmp(struct btree *b, ...@@ -86,7 +86,7 @@ static inline int key_sort_fix_overlapping_cmp(struct btree *b,
struct bkey_packed *l, struct bkey_packed *l,
struct bkey_packed *r) struct bkey_packed *r)
{ {
return bkey_cmp_packed(b, l, r) ?: return bch2_bkey_cmp_packed(b, l, r) ?:
cmp_int((unsigned long) l, (unsigned long) r); cmp_int((unsigned long) l, (unsigned long) r);
} }
...@@ -98,7 +98,7 @@ static inline bool should_drop_next_key(struct sort_iter *iter) ...@@ -98,7 +98,7 @@ static inline bool should_drop_next_key(struct sort_iter *iter)
* and should be dropped. * and should be dropped.
*/ */
return iter->used >= 2 && return iter->used >= 2 &&
!bkey_cmp_packed(iter->b, !bch2_bkey_cmp_packed(iter->b,
iter->data[0].k, iter->data[0].k,
iter->data[1].k); iter->data[1].k);
} }
...@@ -223,7 +223,7 @@ static inline int sort_keys_cmp(struct btree *b, ...@@ -223,7 +223,7 @@ static inline int sort_keys_cmp(struct btree *b,
struct bkey_packed *l, struct bkey_packed *l,
struct bkey_packed *r) struct bkey_packed *r)
{ {
return bkey_cmp_packed(b, l, r) ?: return bch2_bkey_cmp_packed(b, l, r) ?:
(int) bkey_deleted(r) - (int) bkey_deleted(l) ?: (int) bkey_deleted(r) - (int) bkey_deleted(l) ?:
(int) l->needs_whiteout - (int) r->needs_whiteout; (int) l->needs_whiteout - (int) r->needs_whiteout;
} }
...@@ -245,7 +245,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst, ...@@ -245,7 +245,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst,
continue; continue;
while ((next = sort_iter_peek(iter)) && while ((next = sort_iter_peek(iter)) &&
!bkey_cmp_packed(iter->b, in, next)) { !bch2_bkey_cmp_packed(iter->b, in, next)) {
BUG_ON(in->needs_whiteout && BUG_ON(in->needs_whiteout &&
next->needs_whiteout); next->needs_whiteout);
needs_whiteout |= in->needs_whiteout; needs_whiteout |= in->needs_whiteout;
...@@ -406,7 +406,7 @@ static inline int sort_extents_cmp(struct btree *b, ...@@ -406,7 +406,7 @@ static inline int sort_extents_cmp(struct btree *b,
struct bkey_packed *l, struct bkey_packed *l,
struct bkey_packed *r) struct bkey_packed *r)
{ {
return bkey_cmp_packed(b, l, r) ?: return bch2_bkey_cmp_packed(b, l, r) ?:
(int) bkey_deleted(l) - (int) bkey_deleted(r); (int) bkey_deleted(l) - (int) bkey_deleted(r);
} }
......
...@@ -481,7 +481,7 @@ static inline int bkey_iter_cmp(const struct btree *b, ...@@ -481,7 +481,7 @@ static inline int bkey_iter_cmp(const struct btree *b,
const struct bkey_packed *l, const struct bkey_packed *l,
const struct bkey_packed *r) const struct bkey_packed *r)
{ {
return bkey_cmp_packed(b, l, r) return bch2_bkey_cmp_packed(b, l, r)
?: (int) bkey_deleted(r) - (int) bkey_deleted(l) ?: (int) bkey_deleted(r) - (int) bkey_deleted(l)
?: cmp_int(l, r); ?: cmp_int(l, r);
} }
......
...@@ -42,7 +42,7 @@ static void verify_no_dups(struct btree *b, ...@@ -42,7 +42,7 @@ static void verify_no_dups(struct btree *b,
BUG_ON(extents BUG_ON(extents
? bkey_cmp(l.p, bkey_start_pos(&r)) > 0 ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
: bkey_cmp(l.p, bkey_start_pos(&r)) >= 0); : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
//BUG_ON(bkey_cmp_packed(&b->format, p, k) >= 0); //BUG_ON(bch2_bkey_cmp_packed(&b->format, p, k) >= 0);
} }
#endif #endif
} }
...@@ -102,14 +102,14 @@ static void sort_bkey_ptrs(const struct btree *bt, ...@@ -102,14 +102,14 @@ static void sort_bkey_ptrs(const struct btree *bt,
break; break;
for (b = a; c = 2 * b + 1, (d = c + 1) < n;) for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
b = bkey_cmp_packed(bt, b = bch2_bkey_cmp_packed(bt,
ptrs[c], ptrs[c],
ptrs[d]) >= 0 ? c : d; ptrs[d]) >= 0 ? c : d;
if (d == n) if (d == n)
b = c; b = c;
while (b != a && while (b != a &&
bkey_cmp_packed(bt, bch2_bkey_cmp_packed(bt,
ptrs[a], ptrs[a],
ptrs[b]) >= 0) ptrs[b]) >= 0)
b = (b - 1) / 2; b = (b - 1) / 2;
......
...@@ -1313,7 +1313,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b, ...@@ -1313,7 +1313,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
* the node the iterator points to: * the node the iterator points to:
*/ */
while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) && while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
(bkey_cmp_packed(b, k, &insert->k) >= 0)) (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0))
; ;
for_each_keylist_key(keys, insert) for_each_keylist_key(keys, insert)
......
...@@ -72,7 +72,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter, ...@@ -72,7 +72,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS); EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
k = bch2_btree_node_iter_peek_all(node_iter, b); k = bch2_btree_node_iter_peek_all(node_iter, b);
if (k && bkey_cmp_packed(b, k, &insert->k)) if (k && bkey_cmp_left_packed(b, k, &insert->k.p))
k = NULL; k = NULL;
/* @k is the key being overwritten/deleted, if any: */ /* @k is the key being overwritten/deleted, if any: */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment