Commit 07f383c7 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: btree_iter -> btree_path_idx_t

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 788cc25d
......@@ -862,8 +862,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
bch2_trans_copy_iter(&iter2, iter);
if (!bpos_eq(iter->path->l[0].b->key.k.p, SPOS_MAX))
end = bkey_min(end, bpos_nosnap_successor(iter->path->l[0].b->key.k.p));
struct btree_path *path = btree_iter_path(iter->trans, iter);
if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
......
......@@ -367,7 +367,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
if (!ob)
set_btree_iter_dontneed(&iter);
err:
if (iter.trans && iter.path)
if (iter.path)
set_btree_iter_dontneed(&iter);
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
......
This diff is collapsed.
......@@ -359,10 +359,12 @@ static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpo
static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
struct btree_trans *trans = iter->trans;
if (unlikely(iter->update_path))
bch2_path_put(iter->trans, iter->update_path->idx,
bch2_path_put(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
iter->update_path = NULL;
iter->update_path = 0;
if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
new_pos.snapshot = iter->snapshot;
......@@ -431,8 +433,8 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
unsigned long ip)
{
iter->trans = trans;
iter->update_path = NULL;
iter->key_cache_path = NULL;
iter->update_path = 0;
iter->key_cache_path = 0;
iter->btree_id = btree_id;
iter->min_depth = 0;
iter->flags = flags;
......@@ -443,7 +445,7 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
#ifdef CONFIG_BCACHEFS_DEBUG
iter->ip_allocated = ip;
#endif
iter->path = trans->paths + bch2_path_get(trans, btree_id, iter->pos,
iter->path = bch2_path_get(trans, btree_id, iter->pos,
locks_want, depth, flags, ip);
}
......@@ -471,8 +473,10 @@ void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
static inline void set_btree_iter_dontneed(struct btree_iter *iter)
{
if (!iter->trans->restarted)
iter->path->preserve = false;
struct btree_trans *trans = iter->trans;
if (!trans->restarted)
btree_iter_path(trans, iter)->preserve = false;
}
void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
......
......@@ -630,7 +630,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
if (ret)
goto out;
ck = (void *) c_iter.path->l[0].b;
ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b;
if (!ck)
goto out;
......@@ -680,7 +680,8 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
bch2_journal_pin_drop(j, &ck->journal);
BUG_ON(!btree_node_locked(c_iter.path, 0));
struct btree_path *path = btree_iter_path(trans, &c_iter);
BUG_ON(!btree_node_locked(path, 0));
if (!evict) {
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
......@@ -691,17 +692,17 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
struct btree_path *path2;
evict:
trans_for_each_path(trans, path2)
if (path2 != c_iter.path)
if (path2 != path)
__bch2_btree_path_unlock(trans, path2);
bch2_btree_node_lock_write_nofail(trans, c_iter.path, &ck->c);
bch2_btree_node_lock_write_nofail(trans, path, &ck->c);
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
atomic_long_dec(&c->btree_key_cache.nr_dirty);
}
mark_btree_node_locked_noreset(c_iter.path, 0, BTREE_NODE_UNLOCKED);
mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
bkey_cached_evict(&c->btree_key_cache, ck);
bkey_cached_free_fast(&c->btree_key_cache, ck);
}
......
......@@ -225,8 +225,8 @@ enum btree_path_uptodate {
typedef u16 btree_path_idx_t;
struct btree_path {
u8 idx;
u8 sorted_idx;
btree_path_idx_t idx;
btree_path_idx_t sorted_idx;
u8 ref;
u8 intent_ref;
......@@ -282,9 +282,9 @@ static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
*/
struct btree_iter {
struct btree_trans *trans;
struct btree_path *path;
struct btree_path *update_path;
struct btree_path *key_cache_path;
btree_path_idx_t path;
btree_path_idx_t update_path;
btree_path_idx_t key_cache_path;
enum btree_id btree_id:8;
u8 min_depth;
......@@ -410,7 +410,7 @@ struct btree_trans {
* extent:
*/
unsigned extra_journal_res;
u8 nr_max_paths;
btree_path_idx_t nr_max_paths;
u16 journal_entries_u64s;
u16 journal_entries_size;
......@@ -437,6 +437,18 @@ struct btree_trans {
struct replicas_delta_list *fs_usage_deltas;
};
static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
{
return trans->paths + iter->path;
}
static inline struct btree_path *btree_iter_key_cache_path(struct btree_trans *trans, struct btree_iter *iter)
{
return iter->key_cache_path
? trans->paths + iter->key_cache_path
: NULL;
}
#define BCH_BTREE_WRITE_TYPES() \
x(initial, 0) \
x(init_next_bset, 1) \
......
......@@ -266,7 +266,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
bch2_cut_front(new.k->p, update);
ret = bch2_trans_update_by_path(trans, iter->path, update,
ret = bch2_trans_update_by_path(trans, btree_iter_path(trans, iter), update,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
flags, _RET_IP_);
if (ret)
......@@ -462,37 +462,37 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
struct btree_iter *iter,
struct btree_path *path)
{
if (!iter->key_cache_path ||
!iter->key_cache_path->should_be_locked ||
!bpos_eq(iter->key_cache_path->pos, iter->pos)) {
struct btree_path *key_cache_path = btree_iter_key_cache_path(trans, iter);
if (!key_cache_path ||
!key_cache_path->should_be_locked ||
!bpos_eq(key_cache_path->pos, iter->pos)) {
struct bkey_cached *ck;
int ret;
if (!iter->key_cache_path)
iter->key_cache_path = trans->paths +
iter->key_cache_path =
bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
BTREE_ITER_INTENT|
BTREE_ITER_CACHED, _THIS_IP_);
iter->key_cache_path = trans->paths +
bch2_btree_path_set_pos(trans, iter->key_cache_path->idx, path->pos,
iter->key_cache_path =
bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
iter->flags & BTREE_ITER_INTENT,
_THIS_IP_);
ret = bch2_btree_path_traverse(trans, iter->key_cache_path->idx,
BTREE_ITER_CACHED);
ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_CACHED);
if (unlikely(ret))
return ret;
ck = (void *) iter->key_cache_path->l[0].b;
ck = (void *) trans->paths[iter->key_cache_path].l[0].b;
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
}
btree_path_set_should_be_locked(iter->key_cache_path);
btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
}
return 0;
......@@ -501,7 +501,7 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_i *k, enum btree_update_flags flags)
{
struct btree_path *path = iter->update_path ?: iter->path;
struct btree_path *path = trans->paths + (iter->update_path ?: iter->path);
int ret;
if (iter->flags & BTREE_ITER_IS_EXTENTS)
......@@ -529,7 +529,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
if (ret)
return ret;
path = iter->key_cache_path;
path = trans->paths + iter->key_cache_path;
}
return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_);
......
......@@ -1951,9 +1951,9 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
flags |= BCH_TRANS_COMMIT_no_enospc;
parent = btree_node_parent(iter->path, b);
as = bch2_btree_update_start(trans, iter->path, b->c.level,
false, flags);
struct btree_path *path = btree_iter_path(trans, iter);
parent = btree_node_parent(path, b);
as = bch2_btree_update_start(trans, path, b->c.level, false, flags);
ret = PTR_ERR_OR_ZERO(as);
if (ret)
goto out;
......@@ -1975,20 +1975,20 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
if (parent) {
bch2_keylist_add(&as->parent_keys, &n->key);
ret = bch2_btree_insert_node(as, trans, iter->path, parent,
&as->parent_keys, flags);
ret = bch2_btree_insert_node(as, trans, btree_iter_path(trans, iter),
parent, &as->parent_keys, flags);
if (ret)
goto err;
} else {
bch2_btree_set_root(as, trans, iter->path, n);
bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n);
}
bch2_btree_update_get_open_buckets(as, n);
bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
bch2_btree_node_free_inmem(trans, iter->path, b);
bch2_btree_node_free_inmem(trans, btree_iter_path(trans, iter), b);
bch2_trans_node_add(trans, iter->path, n);
bch2_trans_node_add(trans, trans->paths + iter->path, n);
six_unlock_intent(&n->c.lock);
bch2_btree_update_done(as, trans);
......@@ -2165,18 +2165,19 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
BUG_ON(ret);
}
parent = btree_node_parent(iter->path, b);
parent = btree_node_parent(btree_iter_path(trans, iter), b);
if (parent) {
bch2_trans_copy_iter(&iter2, iter);
iter2.path = trans->paths + bch2_btree_path_make_mut(trans, iter2.path->idx,
iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
iter2.flags & BTREE_ITER_INTENT,
_THIS_IP_);
BUG_ON(iter2.path->level != b->c.level);
BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p));
struct btree_path *path2 = btree_iter_path(trans, &iter2);
BUG_ON(path2->level != b->c.level);
BUG_ON(!bpos_eq(path2->pos, new_key->k.p));
btree_path_set_level_up(trans, iter2.path);
btree_path_set_level_up(trans, path2);
trans->paths_sorted = false;
......@@ -2203,7 +2204,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
if (ret)
goto err;
bch2_btree_node_lock_write_nofail(trans, iter->path, &b->c);
bch2_btree_node_lock_write_nofail(trans, btree_iter_path(trans, iter), &b->c);
if (new_hash) {
mutex_lock(&c->btree_cache.lock);
......@@ -2218,7 +2219,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
bkey_copy(&b->key, new_key);
}
bch2_btree_node_unlock_write(trans, iter->path, b);
bch2_btree_node_unlock_write(trans, btree_iter_path(trans, iter), b);
out:
bch2_trans_iter_exit(trans, &iter2);
return ret;
......@@ -2237,7 +2238,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
{
struct bch_fs *c = trans->c;
struct btree *new_hash = NULL;
struct btree_path *path = iter->path;
struct btree_path *path = btree_iter_path(trans, iter);
struct closure cl;
int ret = 0;
......@@ -2295,7 +2296,7 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
goto out;
/* has node been freed? */
if (iter.path->l[b->c.level].b != b) {
if (btree_iter_path(trans, &iter)->l[b->c.level].b != b) {
/* node has been freed: */
BUG_ON(!btree_node_dying(b));
goto out;
......
......@@ -106,7 +106,9 @@ static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
struct btree_iter *iter,
struct btree_write_buffered_key *wb)
{
bch2_btree_node_unlock_write(trans, iter->path, iter->path->l[0].b);
struct btree_path *path = btree_iter_path(trans, iter);
bch2_btree_node_unlock_write(trans, path, path->l[0].b);
trans->journal_res.seq = wb->journal_seq;
......@@ -139,10 +141,10 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite
* We can't clone a path that has write locks: unshare it now, before
* set_pos and traverse():
*/
if (iter->path->ref > 1)
iter->path = trans->paths + __bch2_btree_path_make_mut(trans, iter->path->idx, true, _THIS_IP_);
if (btree_iter_path(trans, iter)->ref > 1)
iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
path = iter->path;
path = btree_iter_path(trans, iter);
if (!*write_locked) {
ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
......@@ -300,7 +302,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
}
if (write_locked) {
struct btree_path *path = iter.path;
struct btree_path *path = btree_iter_path(trans, &iter);
if (path->btree_id != i->btree ||
bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
......@@ -316,7 +318,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
}
bch2_btree_iter_set_pos(&iter, k->k.k.p);
iter.path->preserve = false;
btree_iter_path(trans, &iter)->preserve = false;
do {
if (race_fault()) {
......@@ -338,8 +340,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
break;
}
if (write_locked)
bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
if (write_locked) {
struct btree_path *path = btree_iter_path(trans, &iter);
bch2_btree_node_unlock_write(trans, path, path->l[0].b);
}
bch2_trans_iter_exit(trans, &iter);
if (ret)
......
......@@ -460,7 +460,8 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
for_each_btree_key(trans, iter, i->id, i->from,
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS, k, ({
struct btree_path_level *l = &iter.path->l[0];
struct btree_path_level *l =
&btree_iter_path(trans, &iter)->l[0];
struct bkey_packed *_k =
bch2_btree_node_iter_peek(&l->iter, l->b);
......
......@@ -1668,7 +1668,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
}
BUG_ON(!iter->path->should_be_locked);
BUG_ON(!btree_iter_path(trans, iter)->should_be_locked);
i = walk_inode(trans, dir, equiv, k.k->type == KEY_TYPE_whiteout);
ret = PTR_ERR_OR_ZERO(i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment