Commit 78cf784e authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Further reduce iter->trans usage

This is prep work for splitting btree_path out from btree_iter -
btree_path will not have a pointer to btree_trans.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 05046a96
......@@ -639,6 +639,7 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
/* Slowpath, don't want it inlined into btree_iter_traverse() */
static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
struct btree_trans *trans,
struct btree_iter *iter,
const struct bkey_i *k,
enum btree_id btree_id,
......@@ -655,8 +656,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
* Parent node must be locked, else we could read in a btree node that's
* been freed:
*/
if (iter && !bch2_btree_node_relock(iter, level + 1)) {
btree_trans_restart(iter->trans);
if (trans && !bch2_btree_node_relock(trans, iter, level + 1)) {
btree_trans_restart(trans);
return ERR_PTR(-EINTR);
}
......@@ -687,23 +688,23 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
six_unlock_intent(&b->c.lock);
/* Unlock before doing IO: */
if (iter && sync)
bch2_trans_unlock(iter->trans);
if (trans && sync)
bch2_trans_unlock(trans);
bch2_btree_node_read(c, b, sync);
if (!sync)
return NULL;
if (iter &&
(!bch2_trans_relock(iter->trans) ||
!bch2_btree_iter_relock_intent(iter))) {
BUG_ON(!iter->trans->restarted);
if (trans &&
(!bch2_trans_relock(trans) ||
!bch2_btree_iter_relock_intent(trans, iter))) {
BUG_ON(!trans->restarted);
return ERR_PTR(-EINTR);
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) {
btree_trans_restart(iter->trans);
btree_trans_restart(trans);
return ERR_PTR(-EINTR);
}
......@@ -786,7 +787,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
* else we could read in a btree node from disk that's been
* freed:
*/
b = bch2_btree_node_fill(c, iter, k, iter->btree_id,
b = bch2_btree_node_fill(c, trans, iter, k, iter->btree_id,
level, lock_type, true);
/* We raced and found the btree node in the cache */
......@@ -828,7 +829,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
if (btree_node_read_locked(iter, level + 1))
btree_node_unlock(iter, level + 1);
if (!btree_node_lock(b, k->k.p, level, iter, lock_type,
if (!btree_node_lock(trans, iter, b, k->k.p, level, lock_type,
lock_node_check_fn, (void *) k, trace_ip)) {
if (!trans->restarted)
goto retry;
......@@ -839,7 +840,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
b->c.level != level ||
race_fault())) {
six_unlock_type(&b->c.lock, lock_type);
if (bch2_btree_node_relock(iter, level + 1))
if (bch2_btree_node_relock(trans, iter, level + 1))
goto retry;
trace_trans_restart_btree_node_reused(trans->ip,
......@@ -863,9 +864,9 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
* should_be_locked is not set on this iterator yet, so we need
* to relock it specifically:
*/
if (iter &&
if (trans &&
(!bch2_trans_relock(trans) ||
!bch2_btree_iter_relock_intent(iter))) {
!bch2_btree_iter_relock_intent(trans, iter))) {
BUG_ON(!trans->restarted);
return ERR_PTR(-EINTR);
}
......@@ -924,7 +925,7 @@ struct btree *bch2_btree_node_get_noiter(struct bch_fs *c,
if (nofill)
goto out;
b = bch2_btree_node_fill(c, NULL, k, btree_id,
b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id,
level, SIX_LOCK_read, true);
/* We raced and found the btree node in the cache */
......@@ -982,21 +983,24 @@ struct btree *bch2_btree_node_get_noiter(struct bch_fs *c,
return b;
}
int bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
int bch2_btree_node_prefetch(struct bch_fs *c,
struct btree_trans *trans,
struct btree_iter *iter,
const struct bkey_i *k,
enum btree_id btree_id, unsigned level)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
BUG_ON(iter && !btree_node_locked(iter, level + 1));
BUG_ON(trans && !btree_node_locked(iter, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH);
b = btree_cache_find(bc, k);
if (b)
return 0;
b = bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
b = bch2_btree_node_fill(c, trans, iter, k, btree_id,
level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b);
}
......
......@@ -27,8 +27,9 @@ struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_iter *,
struct btree *bch2_btree_node_get_noiter(struct bch_fs *, const struct bkey_i *,
enum btree_id, unsigned, bool);
int bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *,
const struct bkey_i *, enum btree_id, unsigned);
int bch2_btree_node_prefetch(struct bch_fs *, struct btree_trans *,
struct btree_iter *, const struct bkey_i *,
enum btree_id, unsigned);
void bch2_btree_node_evict(struct bch_fs *, const struct bkey_i *);
......
This diff is collapsed.
......@@ -141,7 +141,7 @@ void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_iter *,
struct btree *, struct btree_node_iter *,
struct bkey_packed *, unsigned, unsigned);
bool bch2_btree_iter_relock_intent(struct btree_iter *);
bool bch2_btree_iter_relock_intent(struct btree_trans *, struct btree_iter *);
bool bch2_trans_relock(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
......@@ -154,15 +154,17 @@ static inline int btree_trans_restart(struct btree_trans *trans)
return -EINTR;
}
bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned);
bool __bch2_btree_iter_upgrade(struct btree_trans *,
struct btree_iter *, unsigned);
static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter,
static inline bool bch2_btree_iter_upgrade(struct btree_trans *trans,
struct btree_iter *iter,
unsigned new_locks_want)
{
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
return iter->locks_want < new_locks_want
? __bch2_btree_iter_upgrade(iter, new_locks_want)
? __bch2_btree_iter_upgrade(trans, iter, new_locks_want)
: iter->uptodate <= BTREE_ITER_NEED_PEEK;
}
......
......@@ -213,7 +213,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
if (ret)
goto err;
if (!bch2_btree_node_relock(ck_iter, 0)) {
if (!bch2_btree_node_relock(trans, ck_iter, 0)) {
trace_transaction_restart_ip(trans->ip, _THIS_IP_);
ret = btree_trans_restart(trans);
goto err;
......@@ -266,9 +266,8 @@ static int bkey_cached_check_fn(struct six_lock *lock, void *p)
}
__flatten
int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
int bch2_btree_iter_traverse_cached(struct btree_trans *trans, struct btree_iter *iter)
{
struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c;
struct bkey_cached *ck;
int ret = 0;
......@@ -277,7 +276,7 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
iter->l[1].b = NULL;
if (bch2_btree_node_relock(iter, 0)) {
if (bch2_btree_node_relock(trans, iter, 0)) {
ck = (void *) iter->l[0].b;
goto fill;
}
......@@ -302,7 +301,7 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
} else {
enum six_lock_type lock_want = __btree_lock_want(iter, 0);
if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want,
if (!btree_node_lock(trans, iter, (void *) ck, iter->pos, 0, lock_want,
bkey_cached_check_fn, iter, _THIS_IP_)) {
if (!trans->restarted)
goto retry;
......@@ -326,7 +325,7 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
fill:
if (!ck->valid && !(iter->flags & BTREE_ITER_CACHED_NOFILL)) {
if (!iter->locks_want &&
!!__bch2_btree_iter_upgrade(iter, 1)) {
!!__bch2_btree_iter_upgrade(trans, iter, 1)) {
trace_transaction_restart_ip(trans->ip, _THIS_IP_);
BUG_ON(!trans->restarted);
ret = -EINTR;
......@@ -344,7 +343,7 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
iter->uptodate = BTREE_ITER_NEED_PEEK;
if ((iter->flags & BTREE_ITER_INTENT) &&
!bch2_btree_iter_upgrade(iter, 1)) {
!bch2_btree_iter_upgrade(trans, iter, 1)) {
BUG_ON(!trans->restarted);
ret = -EINTR;
}
......
......@@ -26,7 +26,7 @@ int bch2_btree_key_cache_journal_flush(struct journal *,
struct bkey_cached *
bch2_btree_key_cache_find(struct bch_fs *, enum btree_id, struct bpos);
int bch2_btree_iter_traverse_cached(struct btree_iter *);
int bch2_btree_iter_traverse_cached(struct btree_trans *, struct btree_iter *);
bool bch2_btree_insert_key_cached(struct btree_trans *,
struct btree_iter *, struct bkey_i *);
......
......@@ -166,40 +166,38 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans,
return false;
}
bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
struct btree_iter *, enum six_lock_type,
six_lock_should_sleep_fn, void *,
unsigned long);
bool __bch2_btree_node_lock(struct btree_trans *, struct btree_iter *,
struct btree *, struct bpos, unsigned,
enum six_lock_type, six_lock_should_sleep_fn,
void *, unsigned long);
static inline bool btree_node_lock(struct btree *b,
struct bpos pos, unsigned level,
static inline bool btree_node_lock(struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b, struct bpos pos, unsigned level,
enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p,
unsigned long ip)
{
struct btree_trans *trans = iter->trans;
EBUG_ON(level >= BTREE_MAX_DEPTH);
EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
return likely(six_trylock_type(&b->c.lock, type)) ||
btree_node_lock_increment(trans, b, level, type) ||
__bch2_btree_node_lock(b, pos, level, iter, type,
__bch2_btree_node_lock(trans, iter, b, pos, level, type,
should_sleep_fn, p, ip);
}
bool __bch2_btree_node_relock(struct btree_iter *, unsigned);
bool __bch2_btree_node_relock(struct btree_trans *, struct btree_iter *, unsigned);
static inline bool bch2_btree_node_relock(struct btree_iter *iter,
unsigned level)
static inline bool bch2_btree_node_relock(struct btree_trans *trans,
struct btree_iter *iter, unsigned level)
{
EBUG_ON(btree_node_locked(iter, level) &&
btree_node_locked_type(iter, level) !=
__btree_lock_want(iter, level));
return likely(btree_node_locked(iter, level)) ||
__bch2_btree_node_relock(iter, level);
__bch2_btree_node_relock(trans, iter, level);
}
/*
......@@ -224,8 +222,7 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_ite
void bch2_btree_node_unlock_write(struct btree_trans *,
struct btree_iter *, struct btree *);
void __bch2_btree_node_lock_write(struct btree_trans *,
struct btree_iter *, struct btree *);
void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
struct btree_iter *iter,
......@@ -233,9 +230,10 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
{
EBUG_ON(iter->l[b->c.level].b != b);
EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
if (unlikely(!six_trylock_write(&b->c.lock)))
__bch2_btree_node_lock_write(trans, iter, b);
__bch2_btree_node_lock_write(trans, b);
}
#endif /* _BCACHEFS_BTREE_LOCKING_H */
......
......@@ -937,7 +937,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_iter *iter,
* XXX: figure out how far we might need to split,
* instead of locking/reserving all the way to the root:
*/
if (!bch2_btree_iter_upgrade(iter, U8_MAX)) {
if (!bch2_btree_iter_upgrade(trans, iter, U8_MAX)) {
trace_trans_restart_iter_upgrade(trans->ip, _RET_IP_,
iter->btree_id,
&iter->real_pos);
......
......@@ -132,7 +132,7 @@ static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
return 0;
if (!bch2_btree_node_relock(iter, level))
if (!bch2_btree_node_relock(trans, iter, level))
return 0;
b = iter->l[level].b;
......
......@@ -561,7 +561,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
*/
trans_for_each_iter(trans, iter)
if (iter->nodes_locked != iter->nodes_intent_locked &&
!bch2_btree_iter_upgrade(iter, 1)) {
!bch2_btree_iter_upgrade(trans, iter, 1)) {
trace_trans_restart_upgrade(trans->ip, trace_ip,
iter->btree_id,
&iter->real_pos);
......@@ -783,7 +783,8 @@ int __bch2_trans_commit(struct btree_trans *trans)
trans_for_each_update(trans, i) {
BUG_ON(!i->iter->should_be_locked);
if (unlikely(!bch2_btree_iter_upgrade(i->iter, i->level + 1))) {
if (unlikely(!bch2_btree_iter_upgrade(trans, i->iter,
i->level + 1))) {
trace_trans_restart_upgrade(trans->ip, _RET_IP_,
i->iter->btree_id,
&i->iter->pos);
......
......@@ -326,8 +326,8 @@ static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
(k = bch2_btree_and_journal_iter_peek(&iter)).k) {
bch2_bkey_buf_reassemble(&tmp, c, k);
bch2_btree_node_prefetch(c, NULL, tmp.k,
b->c.btree_id, b->c.level - 1);
bch2_btree_node_prefetch(c, NULL, NULL, tmp.k,
b->c.btree_id, b->c.level - 1);
bch2_btree_and_journal_iter_advance(&iter);
i++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment