Commit 6e075b54 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: bch2_btree_iter_relock_intent()

This adds a new helper for btree_cache.c that does what we want where
the iterator is still being traverse - and also eliminates some
unnecessary transaction restarts.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent a6eba44b
......@@ -693,14 +693,9 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
if (!sync)
return NULL;
/*
* XXX: this will probably always fail because btree_iter_relock()
* currently fails for iterators that aren't pointed at a valid btree
* node
*/
if (iter &&
(!bch2_trans_relock(iter->trans) ||
!bch2_btree_iter_relock(iter, _THIS_IP_)))
!bch2_btree_iter_relock_intent(iter)))
return ERR_PTR(-EINTR);
if (!six_relock_type(&b->c.lock, lock_type, seq))
......@@ -760,11 +755,12 @@ static inline void btree_check_header(struct bch_fs *c, struct btree *b)
* The btree node will have either a read or a write lock held, depending on
* the @write parameter.
*/
struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *iter,
const struct bkey_i *k, unsigned level,
enum six_lock_type lock_type,
unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
struct bset_tree *t;
......@@ -838,7 +834,7 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
if (bch2_btree_node_relock(iter, level + 1))
goto retry;
trace_trans_restart_btree_node_reused(iter->trans->ip,
trace_trans_restart_btree_node_reused(trans->ip,
trace_ip,
iter->btree_id,
&iter->real_pos);
......@@ -850,18 +846,17 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
u32 seq = b->c.lock.state.seq;
six_unlock_type(&b->c.lock, lock_type);
bch2_trans_unlock(iter->trans);
bch2_trans_unlock(trans);
bch2_btree_node_wait_on_read(b);
/*
* XXX: check if this always fails - btree_iter_relock()
* currently fails for iterators that aren't pointed at a valid
* btree node
* should_be_locked is not set on this iterator yet, so we need
* to relock it specifically:
*/
if (iter &&
(!bch2_trans_relock(iter->trans) ||
!bch2_btree_iter_relock(iter, _THIS_IP_)))
(!bch2_trans_relock(trans) ||
!bch2_btree_iter_relock_intent(iter)))
return ERR_PTR(-EINTR);
if (!six_relock_type(&b->c.lock, lock_type, seq))
......
......@@ -20,7 +20,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *);
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
struct btree *bch2_btree_node_mem_alloc(struct bch_fs *);
struct btree *bch2_btree_node_get(struct bch_fs *, struct btree_iter *,
struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_iter *,
const struct bkey_i *, unsigned,
enum six_lock_type, unsigned long);
......
......@@ -205,7 +205,6 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
is_btree_node(iter, l)
? iter->l[l].b->c.lock.state.seq
: 0);
fail_idx = l;
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
}
......@@ -382,6 +381,34 @@ void bch2_btree_trans_verify_locks(struct btree_trans *trans)
static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
#endif
/*
* Only for btree_cache.c - only relocks intent locks
*/
bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
{
unsigned l;
for (l = iter->level;
l < iter->locks_want && btree_iter_node(iter, l);
l++) {
if (!bch2_btree_node_relock(iter, l)) {
trace_node_relock_fail(iter->trans->ip, _RET_IP_,
iter->btree_id, &iter->real_pos,
l, iter->l[l].lock_seq,
is_btree_node(iter, l)
? 0
: (unsigned long) iter->l[l].b,
is_btree_node(iter, l)
? iter->l[l].b->c.lock.state.seq
: 0);
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
return false;
}
}
return true;
}
__flatten
bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
{
......@@ -1172,7 +1199,8 @@ static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
static __always_inline int btree_iter_down(struct btree_iter *iter,
unsigned long trace_ip)
{
struct bch_fs *c = iter->trans->c;
struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c;
struct btree_iter_level *l = &iter->l[iter->level];
struct btree *b;
unsigned level = iter->level - 1;
......@@ -1186,7 +1214,7 @@ static __always_inline int btree_iter_down(struct btree_iter *iter,
bch2_bkey_buf_unpack(&tmp, c, l->b,
bch2_btree_node_iter_peek(&l->iter, l->b));
b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip);
b = bch2_btree_node_get(trans, iter, tmp.k, level, lock_type, trace_ip);
ret = PTR_ERR_OR_ZERO(b);
if (unlikely(ret))
goto err;
......
......@@ -111,6 +111,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
struct btree_node_iter *, struct bkey_packed *,
unsigned, unsigned);
bool bch2_btree_iter_relock_intent(struct btree_iter *);
bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
bool bch2_trans_relock(struct btree_trans *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment