Commit ccb7b08f authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: trans_for_each_path() no longer uses path->idx

path->idx is now a code smell: we should be using path_idx_t, since it's
stable across btree path reallocation.

This is also a bit faster, using the same loop counter vs. fetching
path->idx from each path we iterate over.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 4c5289e6
......@@ -241,8 +241,9 @@ static void bch2_btree_path_verify(struct btree_trans *trans,
void bch2_trans_verify_paths(struct btree_trans *trans)
{
struct btree_path *path;
unsigned iter;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, iter)
bch2_btree_path_verify(trans, path);
}
......@@ -962,7 +963,8 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
struct bch_fs *c = trans->c;
struct btree_path *path;
unsigned long trace_ip = _RET_IP_;
int i, ret = 0;
unsigned i;
int ret = 0;
if (trans->in_traverse_all)
return -BCH_ERR_transaction_restart_in_traverse_all;
......@@ -972,7 +974,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
trans->restarted = 0;
trans->last_restarted_ip = 0;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
path->should_be_locked = false;
btree_trans_sort_paths(trans);
......@@ -2530,9 +2532,9 @@ static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX) - 1);
trans_for_each_path(trans, path) {
trans_for_each_path(trans, path, i) {
BUG_ON(path->sorted_idx >= trans->nr_sorted);
BUG_ON(trans->sorted[path->sorted_idx] != path->idx);
BUG_ON(trans->sorted[path->sorted_idx] != i);
}
for (i = 0; i < trans->nr_sorted; i++) {
......@@ -2774,8 +2776,9 @@ void bch2_trans_srcu_unlock(struct btree_trans *trans)
if (trans->srcu_held) {
struct bch_fs *c = trans->c;
struct btree_path *path;
unsigned i;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
if (path->cached && !btree_node_locked(path, 0))
path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
......@@ -2807,6 +2810,7 @@ static void bch2_trans_srcu_lock(struct btree_trans *trans)
u32 bch2_trans_begin(struct btree_trans *trans)
{
struct btree_path *path;
unsigned i;
u64 now;
bch2_trans_reset_updates(trans);
......@@ -2815,7 +2819,7 @@ u32 bch2_trans_begin(struct btree_trans *trans)
trans->mem_top = 0;
trans->journal_entries = NULL;
trans_for_each_path(trans, path) {
trans_for_each_path(trans, path, i) {
path->should_be_locked = false;
/*
......@@ -2832,7 +2836,7 @@ u32 bch2_trans_begin(struct btree_trans *trans)
* iterators if we do that
*/
if (!path->ref && !path->preserve)
__bch2_path_free(trans, path->idx);
__bch2_path_free(trans, i);
else
path->preserve = false;
}
......@@ -2972,14 +2976,15 @@ static void check_btree_paths_leaked(struct btree_trans *trans)
#ifdef CONFIG_BCACHEFS_DEBUG
struct bch_fs *c = trans->c;
struct btree_path *path;
unsigned i;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
if (path->ref)
goto leaked;
return;
leaked:
bch_err(c, "btree paths leaked from %s!", trans->fn);
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
if (path->ref)
printk(KERN_ERR " btree %s %pS\n",
bch2_btree_id_str(path->btree_id),
......
......@@ -63,22 +63,6 @@ static inline void btree_trans_sort_paths(struct btree_trans *trans)
__bch2_btree_trans_sort_paths(trans);
}
static inline struct btree_path *
__trans_next_path(struct btree_trans *trans, unsigned idx)
{
idx = find_next_bit(trans->paths_allocated, BTREE_ITER_MAX, idx);
if (idx == BTREE_ITER_MAX)
return NULL;
EBUG_ON(idx > BTREE_ITER_MAX);
EBUG_ON(trans->paths[idx].idx != idx);
return &trans->paths[idx];
}
#define trans_for_each_path(_trans, _path) \
for (_path = __trans_next_path((_trans), 1); \
(_path); \
_path = __trans_next_path((_trans), (_path)->idx + 1))
static inline struct btree_path *
__trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
{
......@@ -102,6 +86,19 @@ __trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
#define trans_for_each_path_safe(_trans, _path, _idx) \
trans_for_each_path_safe_from(_trans, _path, _idx, 1)
static inline struct btree_path *
__trans_next_path(struct btree_trans *trans, unsigned *idx)
{
struct btree_path *path = __trans_next_path_safe(trans, idx);
EBUG_ON(path && path->idx != *idx);
return path;
}
#define trans_for_each_path(_trans, _path, _iter) \
for (_iter = 1; \
(_path = __trans_next_path((_trans), &_iter)); \
_iter++)
static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
{
unsigned idx = path ? path->sorted_idx + 1 : 0;
......@@ -156,10 +153,11 @@ static inline struct btree_path *
__trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
unsigned idx)
{
struct btree_path *path = __trans_next_path(trans, idx);
struct btree_path *path = __trans_next_path(trans, &idx);
while (path && !__path_has_node(path, b))
path = __trans_next_path(trans, path->idx + 1);
while ((path = __trans_next_path(trans, &idx)) &&
!__path_has_node(path, b))
idx++;
return path;
}
......
......@@ -690,8 +690,9 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
}
} else {
struct btree_path *path2;
unsigned i;
evict:
trans_for_each_path(trans, path2)
trans_for_each_path(trans, path2, i)
if (path2 != path)
__bch2_btree_path_unlock(trans, path2);
......
......@@ -32,13 +32,14 @@ struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
{
struct btree_path *path;
struct six_lock_count ret;
unsigned i;
memset(&ret, 0, sizeof(ret));
if (IS_ERR_OR_NULL(b))
return ret;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
if (path != skip && &path->l[level].b->c == b) {
int t = btree_node_locked_type(path, level);
......@@ -417,7 +418,7 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
struct btree_bkey_cached_common *b)
{
struct btree_path *linked;
unsigned i;
unsigned i, iter;
int ret;
/*
......@@ -431,7 +432,7 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
* already taken are no longer needed:
*/
trans_for_each_path(trans, linked) {
trans_for_each_path(trans, linked, iter) {
if (!linked->nodes_locked)
continue;
......@@ -643,8 +644,6 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
unsigned new_locks_want,
struct get_locks_fail *f)
{
struct btree_path *linked;
if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
return true;
......@@ -667,8 +666,11 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
* before interior nodes - now that's handled by
* bch2_btree_path_traverse_all().
*/
if (!path->cached && !trans->in_traverse_all)
trans_for_each_path(trans, linked)
if (!path->cached && !trans->in_traverse_all) {
struct btree_path *linked;
unsigned i;
trans_for_each_path(trans, linked, i)
if (linked != path &&
linked->cached == path->cached &&
linked->btree_id == path->btree_id &&
......@@ -676,6 +678,7 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
linked->locks_want = new_locks_want;
btree_path_get_locks(trans, linked, true, NULL);
}
}
return false;
}
......@@ -716,22 +719,24 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
void bch2_trans_downgrade(struct btree_trans *trans)
{
struct btree_path *path;
unsigned i;
if (trans->restarted)
return;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
bch2_btree_path_downgrade(trans, path);
}
int bch2_trans_relock(struct btree_trans *trans)
{
struct btree_path *path;
unsigned i;
if (unlikely(trans->restarted))
return -((int) trans->restarted);
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
if (path->should_be_locked &&
!bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
......@@ -743,11 +748,12 @@ int bch2_trans_relock(struct btree_trans *trans)
int bch2_trans_relock_notrace(struct btree_trans *trans)
{
struct btree_path *path;
unsigned i;
if (unlikely(trans->restarted))
return -((int) trans->restarted);
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
if (path->should_be_locked &&
!bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
......@@ -758,16 +764,18 @@ int bch2_trans_relock_notrace(struct btree_trans *trans)
void bch2_trans_unlock_noassert(struct btree_trans *trans)
{
struct btree_path *path;
unsigned i;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
__bch2_btree_path_unlock(trans, path);
}
void bch2_trans_unlock(struct btree_trans *trans)
{
struct btree_path *path;
unsigned i;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
__bch2_btree_path_unlock(trans, path);
}
......@@ -780,8 +788,9 @@ void bch2_trans_unlock_long(struct btree_trans *trans)
bool bch2_trans_locked(struct btree_trans *trans)
{
struct btree_path *path;
unsigned i;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
if (path->nodes_locked)
return true;
return false;
......@@ -827,8 +836,9 @@ void bch2_btree_path_verify_locks(struct btree_path *path)
void bch2_trans_verify_locks(struct btree_trans *trans)
{
struct btree_path *path;
unsigned i;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
bch2_btree_path_verify_locks(path);
}
......
......@@ -242,8 +242,9 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans,
enum btree_node_locked_type want)
{
struct btree_path *path;
unsigned i;
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
if (&path->l[level].b->c == b &&
btree_node_locked_type(path, level) >= want) {
six_lock_increment(&b->lock, (enum six_lock_type) want);
......
......@@ -190,7 +190,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans,
struct btree *b)
{
struct bch_fs *c = trans->c;
unsigned level = b->c.level;
unsigned i, level = b->c.level;
bch2_btree_node_lock_write_nofail(trans, path, &b->c);
bch2_btree_node_hash_remove(&c->btree_cache, b);
......@@ -198,7 +198,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans,
six_unlock_write(&b->c.lock);
mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
if (path->l[level].b == b) {
btree_node_unlock(trans, path, level);
path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
......@@ -212,7 +212,7 @@ static void bch2_btree_node_free_never_used(struct btree_update *as,
struct bch_fs *c = as->c;
struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL];
struct btree_path *path;
unsigned level = b->c.level;
unsigned i, level = b->c.level;
BUG_ON(!list_empty(&b->write_blocked));
BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as));
......@@ -235,7 +235,7 @@ static void bch2_btree_node_free_never_used(struct btree_update *as,
six_unlock_intent(&b->c.lock);
trans_for_each_path(trans, path)
trans_for_each_path(trans, path, i)
if (path->l[level].b == b) {
btree_node_unlock(trans, path, level);
path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment