Commit b5fd7566 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: drop_locks_do()

Add a new helper for the common pattern of:
 - trans_unlock()
 - do something
 - trans_relock()
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 19c304be
...@@ -41,13 +41,10 @@ static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_pa ...@@ -41,13 +41,10 @@ static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_pa
*/ */
static inline int bch2_trans_cond_resched(struct btree_trans *trans) static inline int bch2_trans_cond_resched(struct btree_trans *trans)
{ {
if (need_resched() || race_fault()) { if (need_resched() || race_fault())
bch2_trans_unlock(trans); return drop_locks_do(trans, (schedule(), 0));
schedule(); else
return bch2_trans_relock(trans);
} else {
return 0; return 0;
}
} }
static inline int __btree_path_cmp(const struct btree_path *l, static inline int __btree_path_cmp(const struct btree_path *l,
......
...@@ -854,6 +854,11 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, ...@@ -854,6 +854,11 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
!((_ret) = bkey_err(_k)) && (_k).k; \ !((_ret) = bkey_err(_k)) && (_k).k; \
bch2_btree_iter_advance(&(_iter))) bch2_btree_iter_advance(&(_iter)))
#define drop_locks_do(_trans, _do) \
({ \
bch2_trans_unlock(_trans); \
_do ?: bch2_trans_relock(_trans); \
})
/* new multiple iterator interface: */ /* new multiple iterator interface: */
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *); void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
......
...@@ -736,11 +736,8 @@ bool bch2_trans_locked(struct btree_trans *trans) ...@@ -736,11 +736,8 @@ bool bch2_trans_locked(struct btree_trans *trans)
int __bch2_trans_mutex_lock(struct btree_trans *trans, int __bch2_trans_mutex_lock(struct btree_trans *trans,
struct mutex *lock) struct mutex *lock)
{ {
int ret; int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
bch2_trans_unlock(trans);
mutex_lock(lock);
ret = bch2_trans_relock(trans);
if (ret) if (ret)
mutex_unlock(lock); mutex_unlock(lock);
return ret; return ret;
......
...@@ -1083,9 +1083,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, ...@@ -1083,9 +1083,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
if (flags & BTREE_INSERT_GC_LOCK_HELD) if (flags & BTREE_INSERT_GC_LOCK_HELD)
lockdep_assert_held(&c->gc_lock); lockdep_assert_held(&c->gc_lock);
else if (!down_read_trylock(&c->gc_lock)) { else if (!down_read_trylock(&c->gc_lock)) {
bch2_trans_unlock(trans); ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0));
down_read(&c->gc_lock);
ret = bch2_trans_relock(trans);
if (ret) { if (ret) {
up_read(&c->gc_lock); up_read(&c->gc_lock);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -2256,9 +2254,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite ...@@ -2256,9 +2254,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
if (btree_ptr_hash_val(new_key) != b->hash_val) { if (btree_ptr_hash_val(new_key) != b->hash_val) {
ret = bch2_btree_cache_cannibalize_lock(c, &cl); ret = bch2_btree_cache_cannibalize_lock(c, &cl);
if (ret) { if (ret) {
bch2_trans_unlock(trans); ret = drop_locks_do(trans, (closure_sync(&cl), 0));
closure_sync(&cl);
ret = bch2_trans_relock(trans);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -316,25 +316,11 @@ static noinline int ...@@ -316,25 +316,11 @@ static noinline int
bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned flags, bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned flags,
unsigned long trace_ip) unsigned long trace_ip)
{ {
struct bch_fs *c = trans->c; return drop_locks_do(trans,
int ret; bch2_journal_preres_get(&trans->c->journal,
bch2_trans_unlock(trans);
ret = bch2_journal_preres_get(&c->journal,
&trans->journal_preres, &trans->journal_preres,
trans->journal_preres_u64s, trans->journal_preres_u64s,
(flags & JOURNAL_WATERMARK_MASK)); (flags & JOURNAL_WATERMARK_MASK)));
if (ret)
return ret;
ret = bch2_trans_relock(trans);
if (ret) {
trace_and_count(c, trans_restart_journal_preres_get, trans, trace_ip, 0);
return ret;
}
return 0;
} }
static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans, static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
...@@ -1053,10 +1039,7 @@ bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags) ...@@ -1053,10 +1039,7 @@ bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags)
test_bit(BCH_FS_STARTED, &c->flags)) test_bit(BCH_FS_STARTED, &c->flags))
return -BCH_ERR_erofs_trans_commit; return -BCH_ERR_erofs_trans_commit;
bch2_trans_unlock(trans); ret = drop_locks_do(trans, bch2_fs_read_write_early(c));
ret = bch2_fs_read_write_early(c) ?:
bch2_trans_relock(trans);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment