Commit 0423fb71 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Keep a sorted list of btree iterators

This will be used to make other operations on btree iterators within a
transaction more efficient, and enable some other improvements to how we
manage btree iterators.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 877da05f
This diff is collapsed.
......@@ -71,6 +71,36 @@ __trans_next_iter(struct btree_trans *trans, unsigned idx)
(_iter); \
_iter = __trans_next_iter((_trans), (_iter)->idx + 1))
static inline struct btree_iter *next_btree_iter(struct btree_trans *trans, struct btree_iter *iter)
{
unsigned idx = iter ? iter->sorted_idx + 1 : 0;
EBUG_ON(idx > trans->nr_sorted);
return idx < trans->nr_sorted
? trans->iters + trans->sorted[idx]
: NULL;
}
static inline struct btree_iter *prev_btree_iter(struct btree_trans *trans, struct btree_iter *iter)
{
unsigned idx = iter ? iter->sorted_idx : trans->nr_sorted;
return idx
? trans->iters + trans->sorted[idx - 1]
: NULL;
}
#define trans_for_each_iter_inorder(_trans, _iter, _i) \
for (_i = 0; \
((_iter) = (_trans)->iters + trans->sorted[_i]), (_i) < (_trans)->nr_sorted;\
_i++)
#define trans_for_each_iter_inorder_reverse(_trans, _iter, _i) \
for (_i = trans->nr_sorted - 1; \
((_iter) = (_trans)->iters + trans->sorted[_i]), (_i) >= 0;\
--_i)
static inline bool __iter_has_node(const struct btree_iter *iter,
const struct btree *b)
{
......@@ -191,19 +221,14 @@ static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *it
iter->pos = bkey_start_pos(&iter->k);
}
static inline struct btree_iter *btree_iter_child(struct btree_iter *iter)
static inline struct btree_iter *idx_to_btree_iter(struct btree_trans *trans, unsigned idx)
{
return iter->child_idx == U8_MAX ? NULL
: iter->trans->iters + iter->child_idx;
return idx != U8_MAX ? trans->iters + idx : NULL;
}
/* Sort order for locking btree iterators: */
static inline int btree_iter_lock_cmp(const struct btree_iter *l,
const struct btree_iter *r)
static inline struct btree_iter *btree_iter_child(struct btree_iter *iter)
{
return cmp_int(l->btree_id, r->btree_id) ?:
-cmp_int(btree_iter_is_cached(l), btree_iter_is_cached(r)) ?:
bkey_cmp(l->real_pos, r->real_pos);
return idx_to_btree_iter(iter->trans, iter->child_idx);
}
/*
......
......@@ -246,6 +246,7 @@ struct btree_iter {
u8 idx;
u8 child_idx;
u8 sorted_idx;
/* btree_iter_copy starts here: */
u16 flags;
......@@ -379,11 +380,13 @@ struct btree_trans {
unsigned long ip;
int srcu_idx;
u8 nr_sorted;
u8 nr_updates;
bool used_mempool:1;
bool error:1;
bool in_traverse_all:1;
bool restarted:1;
bool iters_sorted:1;
/*
* For when bch2_trans_update notices we'll be splitting a compressed
* extent:
......@@ -398,6 +401,7 @@ struct btree_trans {
unsigned mem_bytes;
void *mem;
u8 sorted[BTREE_ITER_MAX + 8];
struct btree_iter *iters;
struct btree_insert_entry *updates;
......
......@@ -593,6 +593,20 @@ static inline void memmove_u64s_down(void *dst, const void *src,
__memmove_u64s_down(dst, src, u64s);
}
static inline void __memmove_u64s_down_small(void *dst, const void *src,
unsigned u64s)
{
memcpy_u64s_small(dst, src, u64s);
}
static inline void memmove_u64s_down_small(void *dst, const void *src,
unsigned u64s)
{
EBUG_ON(dst > src);
__memmove_u64s_down_small(dst, src, u64s);
}
static inline void __memmove_u64s_up_small(void *_dst, const void *_src,
unsigned u64s)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment