Commit c96f108b authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Optimize bch2_trans_iter_init()

When flags & btree_id are constants, we can constant fold the entire
calculation of the actual iterator flags - and the whole thing becomes
small enough to inline.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 4d868d18
......@@ -2623,68 +2623,22 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
iter->key_cache_path = NULL;
}
static inline void __bch2_trans_iter_init(struct btree_trans *trans,
static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans,
struct btree_iter *iter,
enum btree_id btree_id, struct bpos pos,
unsigned locks_want,
unsigned depth,
unsigned btree_id, struct bpos pos,
unsigned flags)
{
if (unlikely(trans->restarted))
panic("bch2_trans_iter_init(): in transaction restart, %s by %pS\n",
bch2_err_str(trans->restarted),
(void *) trans->last_restarted_ip);
if (flags & BTREE_ITER_ALL_LEVELS)
flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
btree_node_type_is_extents(btree_id))
flags |= BTREE_ITER_IS_EXTENTS;
if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
!btree_type_has_snapshots(btree_id))
flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
btree_type_has_snapshots(btree_id))
flags |= BTREE_ITER_FILTER_SNAPSHOTS;
if (trans->journal_replay_not_finished)
flags |= BTREE_ITER_WITH_JOURNAL;
iter->trans = trans;
iter->path = NULL;
iter->update_path = NULL;
iter->key_cache_path = NULL;
iter->btree_id = btree_id;
iter->min_depth = depth;
iter->flags = flags;
iter->snapshot = pos.snapshot;
iter->pos = pos;
iter->k.type = KEY_TYPE_deleted;
iter->k.p = pos;
iter->k.size = 0;
iter->journal_idx = 0;
iter->journal_pos = POS_MIN;
iter->path = bch2_path_get(trans, btree_id, iter->pos,
locks_want, depth, flags);
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
bch2_btree_iter_flags(trans, btree_id, flags));
}
void bch2_trans_iter_init(struct btree_trans *trans,
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
struct btree_iter *iter,
unsigned btree_id, struct bpos pos,
enum btree_id btree_id, struct bpos pos,
unsigned flags)
{
if (!btree_id_cached(trans->c, btree_id)) {
flags &= ~BTREE_ITER_CACHED;
flags &= ~BTREE_ITER_WITH_KEY_CACHE;
} else if (!(flags & BTREE_ITER_CACHED))
flags |= BTREE_ITER_WITH_KEY_CACHE;
__bch2_trans_iter_init(trans, iter, btree_id, pos,
0, 0, flags);
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
bch2_btree_iter_flags(trans, btree_id, flags));
}
void bch2_trans_node_iter_init(struct btree_trans *trans,
......@@ -2695,11 +2649,15 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
unsigned depth,
unsigned flags)
{
__bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
BTREE_ITER_NOT_EXTENTS|
__BTREE_ITER_ALL_SNAPSHOTS|
BTREE_ITER_ALL_SNAPSHOTS|
flags);
flags |= BTREE_ITER_NOT_EXTENTS;
flags |= __BTREE_ITER_ALL_SNAPSHOTS;
flags |= BTREE_ITER_ALL_SNAPSHOTS;
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
__bch2_btree_iter_flags(trans, btree_id, flags));
iter->min_depth = depth;
BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
BUG_ON(iter->path->level != depth);
BUG_ON(iter->min_depth != depth);
......
......@@ -315,8 +315,80 @@ static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 sna
}
void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
void bch2_trans_iter_init(struct btree_trans *, struct btree_iter *,
unsigned, struct bpos, unsigned);
static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
unsigned btree_id,
unsigned flags)
{
if (flags & BTREE_ITER_ALL_LEVELS)
flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
btree_node_type_is_extents(btree_id))
flags |= BTREE_ITER_IS_EXTENTS;
if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
!btree_type_has_snapshots(btree_id))
flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
btree_type_has_snapshots(btree_id))
flags |= BTREE_ITER_FILTER_SNAPSHOTS;
if (trans->journal_replay_not_finished)
flags |= BTREE_ITER_WITH_JOURNAL;
return flags;
}
static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
unsigned btree_id,
unsigned flags)
{
if (!btree_id_cached(trans->c, btree_id)) {
flags &= ~BTREE_ITER_CACHED;
flags &= ~BTREE_ITER_WITH_KEY_CACHE;
} else if (!(flags & BTREE_ITER_CACHED))
flags |= BTREE_ITER_WITH_KEY_CACHE;
return __bch2_btree_iter_flags(trans, btree_id, flags);
}
static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
struct btree_iter *iter,
unsigned btree_id, struct bpos pos,
unsigned locks_want,
unsigned depth,
unsigned flags)
{
memset(iter, 0, sizeof(*iter));
iter->trans = trans;
iter->btree_id = btree_id;
iter->flags = flags;
iter->snapshot = pos.snapshot;
iter->pos = pos;
iter->k.p = pos;
iter->path = bch2_path_get(trans, btree_id, iter->pos,
locks_want, depth, flags);
}
void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
enum btree_id, struct bpos, unsigned);
static inline void bch2_trans_iter_init(struct btree_trans *trans,
struct btree_iter *iter,
unsigned btree_id, struct bpos pos,
unsigned flags)
{
if (__builtin_constant_p(btree_id) &&
__builtin_constant_p(flags))
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
bch2_btree_iter_flags(trans, btree_id, flags));
else
bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
}
void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
enum btree_id, struct bpos,
unsigned, unsigned, unsigned);
......
......@@ -278,7 +278,6 @@ struct btree_iter {
unsigned snapshot;
struct bpos pos;
struct bpos pos_after_commit;
/*
* Current unpacked key - so that bch2_btree_iter_next()/
* bch2_btree_iter_next_slot() can correctly advance pos.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment