Commit 67e0dd8f authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: btree_path

This splits btree_iter into two components: btree_iter is now the
externally visible componont, and it points to a btree_path which is now
reference counted.

This means we no longer have to clone iterators up front if they might
be mutated - btree_path can be shared by multiple iterators, and cloned
if an iterator would mutate a shared btree_path. This will help us use
iterators more efficiently, as well as slimming down the main long lived
state in btree_trans, and significantly cleans up the logic for iterator
lifetimes.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 8f54337d
......@@ -219,7 +219,7 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter = { NULL };
struct bkey_s_c_xattr xattr;
struct posix_acl *acl = NULL;
struct bkey_s_c k;
......@@ -229,20 +229,19 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
retry:
bch2_trans_begin(&trans);
iter = bch2_hash_lookup(&trans, bch2_xattr_hash_desc,
ret = bch2_hash_lookup(&trans, &iter, bch2_xattr_hash_desc,
&hash, inode->v.i_ino,
&X_SEARCH(acl_to_xattr_type(type), "", 0),
0);
if (IS_ERR(iter)) {
if (PTR_ERR(iter) == -EINTR)
if (ret) {
if (ret == -EINTR)
goto retry;
if (PTR_ERR(iter) != -ENOENT)
acl = ERR_CAST(iter);
if (ret != -ENOENT)
acl = ERR_PTR(ret);
goto out;
}
k = bch2_btree_iter_peek_slot(iter);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret) {
acl = ERR_PTR(ret);
......@@ -255,8 +254,8 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
if (!IS_ERR(acl))
set_cached_acl(&inode->v, type, acl);
bch2_trans_iter_put(&trans, iter);
out:
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
return acl;
}
......@@ -298,7 +297,7 @@ int bch2_set_acl(struct mnt_idmap *idmap,
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct btree_trans trans;
struct btree_iter *inode_iter;
struct btree_iter inode_iter = { NULL };
struct bch_inode_unpacked inode_u;
struct bch_hash_info hash_info;
struct posix_acl *acl;
......@@ -311,9 +310,8 @@ int bch2_set_acl(struct mnt_idmap *idmap,
bch2_trans_begin(&trans);
acl = _acl;
inode_iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino,
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(inode_iter);
ret = bch2_inode_peek(&trans, &inode_iter, &inode_u, inode->v.i_ino,
BTREE_ITER_INTENT);
if (ret)
goto btree_err;
......@@ -334,11 +332,11 @@ int bch2_set_acl(struct mnt_idmap *idmap,
inode_u.bi_ctime = bch2_current_time(c);
inode_u.bi_mode = mode;
ret = bch2_inode_write(&trans, inode_iter, &inode_u) ?:
ret = bch2_inode_write(&trans, &inode_iter, &inode_u) ?:
bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq, 0);
btree_err:
bch2_trans_iter_put(&trans, inode_iter);
bch2_trans_iter_exit(&trans, &inode_iter);
if (ret == -EINTR)
goto retry;
......@@ -362,22 +360,21 @@ int bch2_acl_chmod(struct btree_trans *trans,
struct posix_acl **new_acl)
{
struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c_xattr xattr;
struct bkey_i_xattr *new;
struct posix_acl *acl;
struct bkey_s_c k;
int ret;
iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc,
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
&hash_info, inode->bi_inum,
&X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0),
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(iter);
if (ret)
return ret == -ENOENT ? 0 : ret;
k = bch2_btree_iter_peek_slot(iter);
k = bch2_btree_iter_peek_slot(&iter);
xattr = bkey_s_c_to_xattr(k);
if (ret)
goto err;
......@@ -398,12 +395,12 @@ int bch2_acl_chmod(struct btree_trans *trans,
goto err;
}
new->k.p = iter->pos;
ret = bch2_trans_update(trans, iter, &new->k_i, 0);
new->k.p = iter.pos;
ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
*new_acl = acl;
acl = NULL;
err:
bch2_trans_iter_put(trans, iter);
bch2_trans_iter_exit(trans, &iter);
if (!IS_ERR_OR_NULL(acl))
kfree(acl);
return ret;
......
......@@ -353,32 +353,32 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
int bch2_alloc_write(struct bch_fs *c, unsigned flags)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bch_dev *ca;
unsigned i;
int ret = 0;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
for_each_member_device(ca, c, i) {
bch2_btree_iter_set_pos(iter,
bch2_btree_iter_set_pos(&iter,
POS(ca->dev_idx, ca->mi.first_bucket));
while (iter->pos.offset < ca->mi.nbuckets) {
while (iter.pos.offset < ca->mi.nbuckets) {
bch2_trans_cond_resched(&trans);
ret = bch2_alloc_write_key(&trans, iter, flags);
ret = bch2_alloc_write_key(&trans, &iter, flags);
if (ret) {
percpu_ref_put(&ca->ref);
goto err;
}
bch2_btree_iter_advance(iter);
bch2_btree_iter_advance(&iter);
}
}
err:
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -390,18 +390,18 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
struct btree_iter *iter;
struct btree_iter iter;
struct bucket *g;
struct bkey_alloc_buf *a;
struct bkey_alloc_unpacked u;
u64 *time, now;
int ret = 0;
iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, POS(dev, bucket_nr),
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(iter);
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(&iter);
if (ret)
goto out;
......@@ -412,7 +412,7 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
percpu_down_read(&c->mark_lock);
g = bucket(ca, bucket_nr);
u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
u = alloc_mem_to_key(&iter, g, READ_ONCE(g->mark));
percpu_up_read(&c->mark_lock);
time = rw == READ ? &u.read_time : &u.write_time;
......@@ -423,10 +423,10 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
*time = now;
bch2_alloc_pack(c, a, u);
ret = bch2_trans_update(trans, iter, &a->k, 0) ?:
ret = bch2_trans_update(trans, &iter, &a->k, 0) ?:
bch2_trans_commit(trans, NULL, NULL, 0);
out:
bch2_trans_iter_put(trans, iter);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
......@@ -695,27 +695,28 @@ static int bucket_invalidate_btree(struct btree_trans *trans,
struct bkey_alloc_unpacked u;
struct bucket *g;
struct bucket_mark m;
struct btree_iter *iter =
bch2_trans_get_iter(trans, BTREE_ID_alloc,
POS(ca->dev_idx, b),
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
struct btree_iter iter;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
POS(ca->dev_idx, b),
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
a = bch2_trans_kmalloc(trans, sizeof(*a));
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto err;
ret = bch2_btree_iter_traverse(iter);
ret = bch2_btree_iter_traverse(&iter);
if (ret)
goto err;
percpu_down_read(&c->mark_lock);
g = bucket(ca, b);
m = READ_ONCE(g->mark);
u = alloc_mem_to_key(iter, g, m);
u = alloc_mem_to_key(&iter, g, m);
percpu_up_read(&c->mark_lock);
u.gen++;
......@@ -726,10 +727,10 @@ static int bucket_invalidate_btree(struct btree_trans *trans,
u.write_time = atomic64_read(&c->io_clock[WRITE].now);
bch2_alloc_pack(c, a, u);
ret = bch2_trans_update(trans, iter, &a->k,
ret = bch2_trans_update(trans, &iter, &a->k,
BTREE_TRIGGER_BUCKET_INVALIDATE);
err:
bch2_trans_iter_put(trans, iter);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
......
......@@ -558,8 +558,8 @@ struct journal_keys {
u64 journal_seq_base;
};
struct btree_iter_buf {
struct btree_iter *iter;
struct btree_path_buf {
struct btree_path *path;
};
#define REPLICAS_DELTA_LIST_MAX (1U << 16)
......@@ -667,9 +667,9 @@ struct bch_fs {
/* btree_iter.c: */
struct mutex btree_trans_lock;
struct list_head btree_trans_list;
mempool_t btree_iters_pool;
mempool_t btree_paths_pool;
mempool_t btree_trans_mem_pool;
struct btree_iter_buf __percpu *btree_iters_bufs;
struct btree_path_buf __percpu *btree_paths_bufs;
struct srcu_struct btree_trans_barrier;
......
......@@ -185,9 +185,11 @@ void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
return;
/* Verify no duplicates: */
btree_node_iter_for_each(iter, set)
btree_node_iter_for_each(iter, set) {
BUG_ON(set->k > set->end);
btree_node_iter_for_each(iter, s2)
BUG_ON(set != s2 && set->end == s2->end);
}
/* Verify that set->end is correct: */
btree_node_iter_for_each(iter, set) {
......
......@@ -641,7 +641,7 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
/* Slowpath, don't want it inlined into btree_iter_traverse() */
static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
struct btree_trans *trans,
struct btree_iter *iter,
struct btree_path *path,
const struct bkey_i *k,
enum btree_id btree_id,
unsigned level,
......@@ -657,7 +657,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
* Parent node must be locked, else we could read in a btree node that's
* been freed:
*/
if (trans && !bch2_btree_node_relock(trans, iter, level + 1)) {
if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
btree_trans_restart(trans);
return ERR_PTR(-EINTR);
}
......@@ -699,7 +699,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
if (trans &&
(!bch2_trans_relock(trans) ||
!bch2_btree_iter_relock_intent(trans, iter))) {
!bch2_btree_path_relock_intent(trans, path))) {
BUG_ON(!trans->restarted);
return ERR_PTR(-EINTR);
}
......@@ -763,7 +763,7 @@ static inline void btree_check_header(struct bch_fs *c, struct btree *b)
* The btree node will have either a read or a write lock held, depending on
* the @write parameter.
*/
struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *iter,
struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
const struct bkey_i *k, unsigned level,
enum six_lock_type lock_type,
unsigned long trace_ip)
......@@ -788,7 +788,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
* else we could read in a btree node from disk that's been
* freed:
*/
b = bch2_btree_node_fill(c, trans, iter, k, iter->btree_id,
b = bch2_btree_node_fill(c, trans, path, k, path->btree_id,
level, lock_type, true);
/* We raced and found the btree node in the cache */
......@@ -827,10 +827,10 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
* the parent was modified, when the pointer to the node we want
* was removed - and we'll bail out:
*/
if (btree_node_read_locked(iter, level + 1))
btree_node_unlock(iter, level + 1);
if (btree_node_read_locked(path, level + 1))
btree_node_unlock(path, level + 1);
if (!btree_node_lock(trans, iter, b, k->k.p, level, lock_type,
if (!btree_node_lock(trans, path, b, k->k.p, level, lock_type,
lock_node_check_fn, (void *) k, trace_ip)) {
if (!trans->restarted)
goto retry;
......@@ -841,13 +841,13 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
b->c.level != level ||
race_fault())) {
six_unlock_type(&b->c.lock, lock_type);
if (bch2_btree_node_relock(trans, iter, level + 1))
if (bch2_btree_node_relock(trans, path, level + 1))
goto retry;
trace_trans_restart_btree_node_reused(trans->ip,
trace_ip,
iter->btree_id,
&iter->real_pos);
path->btree_id,
&path->pos);
btree_trans_restart(trans);
return ERR_PTR(-EINTR);
}
......@@ -862,12 +862,12 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
bch2_btree_node_wait_on_read(b);
/*
* should_be_locked is not set on this iterator yet, so we need
* to relock it specifically:
* should_be_locked is not set on this path yet, so we need to
* relock it specifically:
*/
if (trans &&
(!bch2_trans_relock(trans) ||
!bch2_btree_iter_relock_intent(trans, iter))) {
!bch2_btree_path_relock_intent(trans, path))) {
BUG_ON(!trans->restarted);
return ERR_PTR(-EINTR);
}
......@@ -895,7 +895,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
return ERR_PTR(-EIO);
}
EBUG_ON(b->c.btree_id != iter->btree_id);
EBUG_ON(b->c.btree_id != path->btree_id);
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
btree_check_header(c, b);
......@@ -986,21 +986,21 @@ struct btree *bch2_btree_node_get_noiter(struct bch_fs *c,
int bch2_btree_node_prefetch(struct bch_fs *c,
struct btree_trans *trans,
struct btree_iter *iter,
struct btree_path *path,
const struct bkey_i *k,
enum btree_id btree_id, unsigned level)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
BUG_ON(trans && !btree_node_locked(iter, level + 1));
BUG_ON(trans && !btree_node_locked(path, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH);
b = btree_cache_find(bc, k);
if (b)
return 0;
b = bch2_btree_node_fill(c, trans, iter, k, btree_id,
b = bch2_btree_node_fill(c, trans, path, k, btree_id,
level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b);
}
......
......@@ -20,16 +20,15 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *);
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
struct btree *bch2_btree_node_mem_alloc(struct bch_fs *);
struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_iter *,
struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
const struct bkey_i *, unsigned,
enum six_lock_type, unsigned long);
struct btree *bch2_btree_node_get_noiter(struct bch_fs *, const struct bkey_i *,
enum btree_id, unsigned, bool);
int bch2_btree_node_prefetch(struct bch_fs *, struct btree_trans *,
struct btree_iter *, const struct bkey_i *,
enum btree_id, unsigned);
int bch2_btree_node_prefetch(struct bch_fs *, struct btree_trans *, struct btree_path *,
const struct bkey_i *, enum btree_id, unsigned);
void bch2_btree_node_evict(struct bch_fs *, const struct bkey_i *);
......
......@@ -775,7 +775,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
bool initial, bool metadata_only)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct btree *b;
unsigned depth = metadata_only ? 1
: bch2_expensive_debug_checks ? 0
......@@ -800,13 +800,13 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
if (!initial) {
if (max_stale > 64)
bch2_btree_node_rewrite(&trans, iter,
bch2_btree_node_rewrite(&trans, &iter,
b->data->keys.seq,
BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
else if (!bch2_btree_gc_rewrite_disabled &&
(bch2_btree_gc_always_rewrite || max_stale > 16))
bch2_btree_node_rewrite(&trans, iter,
bch2_btree_node_rewrite(&trans, &iter,
b->data->keys.seq,
BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
......@@ -814,7 +814,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
bch2_trans_cond_resched(&trans);
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
......@@ -1414,7 +1414,7 @@ static int bch2_gc_reflink_done(struct bch_fs *c, bool initial,
bool metadata_only)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
struct reflink_gc *r;
size_t idx = 0;
......@@ -1480,7 +1480,7 @@ static int bch2_gc_reflink_done(struct bch_fs *c, bool initial,
}
}
fsck_err:
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
out:
genradix_free(&c->reflink_gc_table);
......@@ -1512,7 +1512,7 @@ static int bch2_gc_reflink_start(struct bch_fs *c, bool initial,
bool metadata_only)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
struct reflink_gc *r;
int ret;
......@@ -1547,7 +1547,7 @@ static int bch2_gc_reflink_start(struct bch_fs *c, bool initial,
r->size = k.k->size;
r->refcount = 0;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
return 0;
......@@ -1722,7 +1722,7 @@ static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_buf sk;
int ret = 0, commit_err = 0;
......@@ -1730,13 +1730,13 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, btree_id, POS_MIN,
BTREE_ITER_PREFETCH|
BTREE_ITER_NOT_EXTENTS|
BTREE_ITER_ALL_SNAPSHOTS);
bch2_trans_iter_init(&trans, &iter, btree_id, POS_MIN,
BTREE_ITER_PREFETCH|
BTREE_ITER_NOT_EXTENTS|
BTREE_ITER_ALL_SNAPSHOTS);
while ((bch2_trans_begin(&trans),
k = bch2_btree_iter_peek(iter)).k) {
k = bch2_btree_iter_peek(&iter)).k) {
ret = bkey_err(k);
if (ret == -EINTR)
......@@ -1744,7 +1744,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
if (ret)
break;
c->gc_gens_pos = iter->pos;
c->gc_gens_pos = iter.pos;
if (gc_btree_gens_key(c, k) && !commit_err) {
bch2_bkey_buf_reassemble(&sk, c, k);
......@@ -1752,7 +1752,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
commit_err =
bch2_trans_update(&trans, iter, sk.k, 0) ?:
bch2_trans_update(&trans, &iter, sk.k, 0) ?:
bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOWAIT|
BTREE_INSERT_NOFAIL);
......@@ -1762,9 +1762,9 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
}
}
bch2_btree_iter_advance(iter);
bch2_btree_iter_advance(&iter);
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
......@@ -197,23 +197,23 @@ btree_key_cache_create(struct btree_key_cache *c,
}
static int btree_key_cache_fill(struct btree_trans *trans,
struct btree_iter *ck_iter,
struct btree_path *ck_path,
struct bkey_cached *ck)
{
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
unsigned new_u64s = 0;
struct bkey_i *new_k = NULL;
int ret;
iter = bch2_trans_get_iter(trans, ck->key.btree_id,
ck->key.pos, BTREE_ITER_SLOTS);
k = bch2_btree_iter_peek_slot(iter);
bch2_trans_iter_init(trans, &iter, ck->key.btree_id,
ck->key.pos, BTREE_ITER_SLOTS);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
if (!bch2_btree_node_relock(trans, ck_iter, 0)) {
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
trace_transaction_restart_ip(trans->ip, _THIS_IP_);
ret = btree_trans_restart(trans);
goto err;
......@@ -238,7 +238,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
* XXX: not allowed to be holding read locks when we take a write lock,
* currently
*/
bch2_btree_node_lock_write(trans, ck_iter, ck_iter->l[0].b);
bch2_btree_node_lock_write(trans, ck_path, ck_path->l[0].b);
if (new_k) {
kfree(ck->k);
ck->u64s = new_u64s;
......@@ -247,62 +247,64 @@ static int btree_key_cache_fill(struct btree_trans *trans,
bkey_reassemble(ck->k, k);
ck->valid = true;
bch2_btree_node_unlock_write(trans, ck_iter, ck_iter->l[0].b);
bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
/* We're not likely to need this iterator again: */
set_btree_iter_dontneed(trans, iter);
set_btree_iter_dontneed(&iter);
err:
bch2_trans_iter_put(trans, iter);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
static int bkey_cached_check_fn(struct six_lock *lock, void *p)
{
struct bkey_cached *ck = container_of(lock, struct bkey_cached, c.lock);
const struct btree_iter *iter = p;
const struct btree_path *path = p;
return ck->key.btree_id == iter->btree_id &&
!bpos_cmp(ck->key.pos, iter->pos) ? 0 : -1;
return ck->key.btree_id == path->btree_id &&
!bpos_cmp(ck->key.pos, path->pos) ? 0 : -1;
}
__flatten
int bch2_btree_iter_traverse_cached(struct btree_trans *trans, struct btree_iter *iter)
int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
unsigned flags)
{
struct bch_fs *c = trans->c;
struct bkey_cached *ck;
int ret = 0;
BUG_ON(iter->level);
BUG_ON(path->level);
iter->l[1].b = NULL;
path->l[1].b = NULL;
if (bch2_btree_node_relock(trans, iter, 0)) {
ck = (void *) iter->l[0].b;
if (bch2_btree_node_relock(trans, path, 0)) {
ck = (void *) path->l[0].b;
goto fill;
}
retry:
ck = bch2_btree_key_cache_find(c, iter->btree_id, iter->pos);
ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
if (!ck) {
if (iter->flags & BTREE_ITER_CACHED_NOCREATE) {
iter->l[0].b = NULL;
if (flags & BTREE_ITER_CACHED_NOCREATE) {
path->l[0].b = NULL;
return 0;
}
ck = btree_key_cache_create(&c->btree_key_cache,
iter->btree_id, iter->pos);
path->btree_id, path->pos);
ret = PTR_ERR_OR_ZERO(ck);
if (ret)
goto err;
if (!ck)
goto retry;
mark_btree_node_locked(iter, 0, SIX_LOCK_intent);
iter->locks_want = 1;
mark_btree_node_locked(path, 0, SIX_LOCK_intent);
path->locks_want = 1;
} else {
enum six_lock_type lock_want = __btree_lock_want(iter, 0);
enum six_lock_type lock_want = __btree_lock_want(path, 0);
if (!btree_node_lock(trans, iter, (void *) ck, iter->pos, 0, lock_want,
bkey_cached_check_fn, iter, _THIS_IP_)) {
if (!btree_node_lock(trans, path, (void *) ck, path->pos, 0,
lock_want,
bkey_cached_check_fn, path, _THIS_IP_)) {
if (!trans->restarted)
goto retry;
......@@ -311,28 +313,27 @@ int bch2_btree_iter_traverse_cached(struct btree_trans *trans, struct btree_iter
goto err;
}
if (ck->key.btree_id != iter->btree_id ||
bpos_cmp(ck->key.pos, iter->pos)) {
if (ck->key.btree_id != path->btree_id ||
bpos_cmp(ck->key.pos, path->pos)) {
six_unlock_type(&ck->c.lock, lock_want);
goto retry;
}
mark_btree_node_locked(iter, 0, lock_want);
mark_btree_node_locked(path, 0, lock_want);
}
iter->l[0].lock_seq = ck->c.lock.state.seq;
iter->l[0].b = (void *) ck;
path->l[0].lock_seq = ck->c.lock.state.seq;
path->l[0].b = (void *) ck;
fill:
if (!ck->valid && !(iter->flags & BTREE_ITER_CACHED_NOFILL)) {
if (!iter->locks_want &&
!!__bch2_btree_iter_upgrade(trans, iter, 1)) {
if (!ck->valid && !(flags & BTREE_ITER_CACHED_NOFILL)) {
if (!path->locks_want &&
!__bch2_btree_path_upgrade(trans, path, 1)) {
trace_transaction_restart_ip(trans->ip, _THIS_IP_);
BUG_ON(!trans->restarted);
ret = -EINTR;
ret = btree_trans_restart(trans);
goto err;
}
ret = btree_key_cache_fill(trans, iter, ck);
ret = btree_key_cache_fill(trans, path, ck);
if (ret)
goto err;
}
......@@ -340,22 +341,14 @@ int bch2_btree_iter_traverse_cached(struct btree_trans *trans, struct btree_iter
if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
iter->uptodate = BTREE_ITER_UPTODATE;
if ((iter->flags & BTREE_ITER_INTENT) &&
!bch2_btree_iter_upgrade(trans, iter, 1)) {
BUG_ON(!trans->restarted);
ret = -EINTR;
}
BUG_ON(!ret && !btree_node_locked(iter, 0));
path->uptodate = BTREE_ITER_UPTODATE;
BUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
return ret;
err:
if (ret != -EINTR) {
btree_node_unlock(iter, 0);
iter->flags |= BTREE_ITER_ERROR;
iter->l[0].b = BTREE_ITER_NO_NODE_ERROR;
btree_node_unlock(path, 0);
path->l[0].b = BTREE_ITER_NO_NODE_ERROR;
}
return ret;
}
......@@ -368,23 +361,23 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
struct btree_iter *c_iter = NULL, *b_iter = NULL;
struct btree_iter c_iter, b_iter;
struct bkey_cached *ck = NULL;
int ret;
b_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
BTREE_ITER_SLOTS|
BTREE_ITER_INTENT);
c_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_CACHED_NOCREATE|
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(c_iter);
bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
BTREE_ITER_SLOTS|
BTREE_ITER_INTENT);
bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_CACHED_NOCREATE|
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(&c_iter);
if (ret)
goto out;
ck = (void *) c_iter->l[0].b;
ck = (void *) c_iter.path->l[0].b;
if (!ck ||
(journal_seq && ck->journal.seq != journal_seq))
goto out;
......@@ -400,8 +393,8 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
* allocator/copygc depend on journal reclaim making progress, we need
* to be using alloc reserves:
* */
ret = bch2_btree_iter_traverse(b_iter) ?:
bch2_trans_update(trans, b_iter, ck->k,
ret = bch2_btree_iter_traverse(&b_iter) ?:
bch2_trans_update(trans, &b_iter, ck->k,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
BTREE_TRIGGER_NORUN) ?:
bch2_trans_commit(trans, NULL, NULL,
......@@ -423,7 +416,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
bch2_journal_pin_drop(j, &ck->journal);
bch2_journal_preres_put(j, &ck->res);
BUG_ON(!btree_node_locked(c_iter, 0));
BUG_ON(!btree_node_locked(c_iter.path, 0));
if (!evict) {
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
......@@ -432,10 +425,10 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
}
} else {
evict:
BUG_ON(!btree_node_intent_locked(c_iter, 0));
BUG_ON(!btree_node_intent_locked(c_iter.path, 0));
mark_btree_node_unlocked(c_iter, 0);
c_iter->l[0].b = NULL;
mark_btree_node_unlocked(c_iter.path, 0);
c_iter.path->l[0].b = NULL;
six_lock_write(&ck->c.lock, NULL, NULL);
......@@ -451,8 +444,8 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
mutex_unlock(&c->btree_key_cache.lock);
}
out:
bch2_trans_iter_put(trans, b_iter);
bch2_trans_iter_put(trans, c_iter);
bch2_trans_iter_exit(trans, &b_iter);
bch2_trans_iter_exit(trans, &c_iter);
return ret;
}
......@@ -503,11 +496,11 @@ int bch2_btree_key_cache_flush(struct btree_trans *trans,
}
bool bch2_btree_insert_key_cached(struct btree_trans *trans,
struct btree_iter *iter,
struct btree_path *path,
struct bkey_i *insert)
{
struct bch_fs *c = trans->c;
struct bkey_cached *ck = (void *) iter->l[0].b;
struct bkey_cached *ck = (void *) path->l[0].b;
bool kick_reclaim = false;
BUG_ON(insert->u64s > ck->u64s);
......
......@@ -26,10 +26,11 @@ int bch2_btree_key_cache_journal_flush(struct journal *,
struct bkey_cached *
bch2_btree_key_cache_find(struct bch_fs *, enum btree_id, struct bpos);
int bch2_btree_iter_traverse_cached(struct btree_trans *, struct btree_iter *);
int bch2_btree_path_traverse_cached(struct btree_trans *, struct btree_path *,
unsigned);
bool bch2_btree_insert_key_cached(struct btree_trans *,
struct btree_iter *, struct bkey_i *);
struct btree_path *, struct bkey_i *);
int bch2_btree_key_cache_flush(struct btree_trans *,
enum btree_id, struct bpos);
#ifdef CONFIG_BCACHEFS_DEBUG
......
......@@ -20,7 +20,7 @@ enum btree_node_locked_type {
BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
};
static inline int btree_node_locked_type(struct btree_iter *iter,
static inline int btree_node_locked_type(struct btree_path *path,
unsigned level)
{
/*
......@@ -29,35 +29,35 @@ static inline int btree_node_locked_type(struct btree_iter *iter,
* branches:
*/
return BTREE_NODE_UNLOCKED +
((iter->nodes_locked >> level) & 1) +
((iter->nodes_intent_locked >> level) & 1);
((path->nodes_locked >> level) & 1) +
((path->nodes_intent_locked >> level) & 1);
}
static inline bool btree_node_intent_locked(struct btree_iter *iter,
static inline bool btree_node_intent_locked(struct btree_path *path,
unsigned level)
{
return btree_node_locked_type(iter, level) == BTREE_NODE_INTENT_LOCKED;
return btree_node_locked_type(path, level) == BTREE_NODE_INTENT_LOCKED;
}
static inline bool btree_node_read_locked(struct btree_iter *iter,
static inline bool btree_node_read_locked(struct btree_path *path,
unsigned level)
{
return btree_node_locked_type(iter, level) == BTREE_NODE_READ_LOCKED;
return btree_node_locked_type(path, level) == BTREE_NODE_READ_LOCKED;
}
static inline bool btree_node_locked(struct btree_iter *iter, unsigned level)
static inline bool btree_node_locked(struct btree_path *path, unsigned level)
{
return iter->nodes_locked & (1 << level);
return path->nodes_locked & (1 << level);
}
static inline void mark_btree_node_unlocked(struct btree_iter *iter,
static inline void mark_btree_node_unlocked(struct btree_path *path,
unsigned level)
{
iter->nodes_locked &= ~(1 << level);
iter->nodes_intent_locked &= ~(1 << level);
path->nodes_locked &= ~(1 << level);
path->nodes_intent_locked &= ~(1 << level);
}
static inline void mark_btree_node_locked(struct btree_iter *iter,
static inline void mark_btree_node_locked(struct btree_path *path,
unsigned level,
enum six_lock_type type)
{
......@@ -65,52 +65,52 @@ static inline void mark_btree_node_locked(struct btree_iter *iter,
BUILD_BUG_ON(SIX_LOCK_read != 0);
BUILD_BUG_ON(SIX_LOCK_intent != 1);
iter->nodes_locked |= 1 << level;
iter->nodes_intent_locked |= type << level;
path->nodes_locked |= 1 << level;
path->nodes_intent_locked |= type << level;
}
static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
static inline void mark_btree_node_intent_locked(struct btree_path *path,
unsigned level)
{
mark_btree_node_locked(iter, level, SIX_LOCK_intent);
mark_btree_node_locked(path, level, SIX_LOCK_intent);
}
static inline enum six_lock_type __btree_lock_want(struct btree_iter *iter, int level)
static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
{
return level < iter->locks_want
return level < path->locks_want
? SIX_LOCK_intent
: SIX_LOCK_read;
}
static inline enum btree_node_locked_type
btree_lock_want(struct btree_iter *iter, int level)
btree_lock_want(struct btree_path *path, int level)
{
if (level < iter->level)
if (level < path->level)
return BTREE_NODE_UNLOCKED;
if (level < iter->locks_want)
if (level < path->locks_want)
return BTREE_NODE_INTENT_LOCKED;
if (level == iter->level)
if (level == path->level)
return BTREE_NODE_READ_LOCKED;
return BTREE_NODE_UNLOCKED;
}
static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
static inline void btree_node_unlock(struct btree_path *path, unsigned level)
{
int lock_type = btree_node_locked_type(iter, level);
int lock_type = btree_node_locked_type(path, level);
EBUG_ON(level >= BTREE_MAX_DEPTH);
if (lock_type != BTREE_NODE_UNLOCKED)
six_unlock_type(&iter->l[level].b->c.lock, lock_type);
mark_btree_node_unlocked(iter, level);
six_unlock_type(&path->l[level].b->c.lock, lock_type);
mark_btree_node_unlocked(path, level);
}
static inline void __bch2_btree_iter_unlock(struct btree_iter *iter)
static inline void __bch2_btree_path_unlock(struct btree_path *path)
{
btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
while (iter->nodes_locked)
btree_node_unlock(iter, __ffs(iter->nodes_locked));
while (path->nodes_locked)
btree_node_unlock(path, __ffs(path->nodes_locked));
}
static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
......@@ -154,11 +154,11 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans,
struct btree *b, unsigned level,
enum btree_node_locked_type want)
{
struct btree_iter *iter;
struct btree_path *path;
trans_for_each_iter(trans, iter)
if (iter->l[level].b == b &&
btree_node_locked_type(iter, level) >= want) {
trans_for_each_path(trans, path)
if (path->l[level].b == b &&
btree_node_locked_type(path, level) >= want) {
six_lock_increment(&b->c.lock, want);
return true;
}
......@@ -166,38 +166,39 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans,
return false;
}
bool __bch2_btree_node_lock(struct btree_trans *, struct btree_iter *,
bool __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
struct btree *, struct bpos, unsigned,
enum six_lock_type, six_lock_should_sleep_fn,
void *, unsigned long);
enum six_lock_type,
six_lock_should_sleep_fn, void *,
unsigned long);
static inline bool btree_node_lock(struct btree_trans *trans,
struct btree_iter *iter,
struct btree_path *path,
struct btree *b, struct bpos pos, unsigned level,
enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p,
unsigned long ip)
{
EBUG_ON(level >= BTREE_MAX_DEPTH);
EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
return likely(six_trylock_type(&b->c.lock, type)) ||
btree_node_lock_increment(trans, b, level, type) ||
__bch2_btree_node_lock(trans, iter, b, pos, level, type,
__bch2_btree_node_lock(trans, path, b, pos, level, type,
should_sleep_fn, p, ip);
}
bool __bch2_btree_node_relock(struct btree_trans *, struct btree_iter *, unsigned);
bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
static inline bool bch2_btree_node_relock(struct btree_trans *trans,
struct btree_iter *iter, unsigned level)
struct btree_path *path, unsigned level)
{
EBUG_ON(btree_node_locked(iter, level) &&
btree_node_locked_type(iter, level) !=
__btree_lock_want(iter, level));
EBUG_ON(btree_node_locked(path, level) &&
btree_node_locked_type(path, level) !=
__btree_lock_want(path, level));
return likely(btree_node_locked(iter, level)) ||
__bch2_btree_node_relock(trans, iter, level);
return likely(btree_node_locked(path, level)) ||
__bch2_btree_node_relock(trans, path, level);
}
/*
......@@ -205,32 +206,32 @@ static inline bool bch2_btree_node_relock(struct btree_trans *trans,
* succeed:
*/
static inline void
bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_iter *iter,
bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
struct btree *b)
{
struct btree_iter *linked;
struct btree_path *linked;
EBUG_ON(iter->l[b->c.level].b != b);
EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
EBUG_ON(path->l[b->c.level].b != b);
EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
trans_for_each_iter_with_node(trans, b, linked)
trans_for_each_path_with_node(trans, b, linked)
linked->l[b->c.level].lock_seq += 2;
six_unlock_write(&b->c.lock);
}
void bch2_btree_node_unlock_write(struct btree_trans *,
struct btree_iter *, struct btree *);
struct btree_path *, struct btree *);
void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
struct btree_iter *iter,
struct btree_path *path,
struct btree *b)
{
EBUG_ON(iter->l[b->c.level].b != b);
EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
EBUG_ON(path->l[b->c.level].b != b);
EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
EBUG_ON(!btree_node_intent_locked(path, b->c.level));
if (unlikely(!six_trylock_write(&b->c.lock)))
__bch2_btree_node_lock_write(trans, b);
......
......@@ -210,7 +210,7 @@ struct btree_node_iter {
#define __BTREE_ITER_ALL_SNAPSHOTS (1 << 11)
#define BTREE_ITER_ALL_SNAPSHOTS (1 << 12)
enum btree_iter_uptodate {
enum btree_path_uptodate {
BTREE_ITER_UPTODATE = 0,
BTREE_ITER_NEED_RELOCK = 1,
BTREE_ITER_NEED_TRAVERSE = 2,
......@@ -225,51 +225,66 @@ enum btree_iter_uptodate {
#define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7)
#define BTREE_ITER_NO_NODE_CACHED ((struct btree *) 8)
/*
* @pos - iterator's current position
* @level - current btree depth
* @locks_want - btree level below which we start taking intent locks
* @nodes_locked - bitmask indicating which nodes in @nodes are locked
* @nodes_intent_locked - bitmask indicating which locks are intent locks
*/
struct btree_iter {
struct btree_trans *trans;
unsigned long ip_allocated;
struct btree_path {
u8 idx;
u8 child_idx;
u8 sorted_idx;
u8 ref;
u8 intent_ref;
/* btree_iter_copy starts here: */
u16 flags;
/* When we're filtering by snapshot, the snapshot ID we're looking for: */
unsigned snapshot;
struct bpos pos;
struct bpos real_pos;
enum btree_id btree_id:4;
bool cached:1;
enum btree_iter_uptodate uptodate:2;
bool preserve:1;
enum btree_path_uptodate uptodate:2;
/*
* True if we've returned a key (and thus are expected to keep it
* locked), false after set_pos - for avoiding spurious transaction
* restarts in bch2_trans_relock():
* When true, failing to relock this path will cause the transaction to
* restart:
*/
bool should_be_locked:1;
unsigned level:4,
min_depth:4,
unsigned level:3,
locks_want:4,
nodes_locked:4,
nodes_intent_locked:4;
struct btree_iter_level {
struct btree_path_level {
struct btree *b;
struct btree_node_iter iter;
u32 lock_seq;
} l[BTREE_MAX_DEPTH];
#ifdef CONFIG_BCACHEFS_DEBUG
unsigned long ip_allocated;
#endif
};
static inline struct btree_path_level *path_l(struct btree_path *path)
{
return path->l + path->level;
}
/*
* @pos - iterator's current position
* @level - current btree depth
* @locks_want - btree level below which we start taking intent locks
* @nodes_locked - bitmask indicating which nodes in @nodes are locked
* @nodes_intent_locked - bitmask indicating which locks are intent locks
*/
struct btree_iter {
struct btree_trans *trans;
struct btree_path *path;
enum btree_id btree_id:4;
unsigned min_depth:4;
/* btree_iter_copy starts here: */
u16 flags;
/* When we're filtering by snapshot, the snapshot ID we're looking for: */
unsigned snapshot;
struct bpos pos;
struct bpos pos_after_commit;
/*
* Current unpacked key - so that bch2_btree_iter_next()/
* bch2_btree_iter_next_slot() can correctly advance pos.
......@@ -277,11 +292,6 @@ struct btree_iter {
struct bkey k;
};
static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
{
return iter->l + iter->level;
}
struct btree_key_cache {
struct mutex lock;
struct rhashtable table;
......@@ -329,7 +339,7 @@ struct btree_insert_entry {
bool cached:1;
bool trans_triggers_run:1;
struct bkey_i *k;
struct btree_iter *iter;
struct btree_path *path;
unsigned long ip_allocated;
};
......@@ -354,7 +364,7 @@ struct btree_trans {
#ifdef CONFIG_BCACHEFS_DEBUG
struct list_head list;
struct btree *locking;
unsigned locking_iter_idx;
unsigned locking_path_idx;
struct bpos locking_pos;
u8 locking_btree_id;
u8 locking_level;
......@@ -369,23 +379,21 @@ struct btree_trans {
bool error:1;
bool in_traverse_all:1;
bool restarted:1;
bool iters_sorted:1;
bool paths_sorted:1;
/*
* For when bch2_trans_update notices we'll be splitting a compressed
* extent:
*/
unsigned extra_journal_res;
u64 iters_linked;
u64 iters_live;
u64 iters_touched;
u64 paths_allocated;
unsigned mem_top;
unsigned mem_bytes;
void *mem;
u8 sorted[BTREE_ITER_MAX + 8];
struct btree_iter *iters;
struct btree_path *paths;
struct btree_insert_entry *updates;
/* update path: */
......@@ -589,16 +597,6 @@ static inline bool btree_node_is_extents(struct btree *b)
return btree_node_type_is_extents(btree_node_type(b));
}
static inline enum btree_node_type btree_iter_key_type(struct btree_iter *iter)
{
return __btree_node_type(iter->level, iter->btree_id);
}
static inline bool btree_iter_is_extents(struct btree_iter *iter)
{
return btree_node_type_is_extents(btree_iter_key_type(iter));
}
#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
((1U << BKEY_TYPE_extents)| \
(1U << BKEY_TYPE_inodes)| \
......
......@@ -8,9 +8,9 @@
struct bch_fs;
struct btree;
void bch2_btree_node_lock_for_insert(struct btree_trans *, struct btree_iter *,
void bch2_btree_node_lock_for_insert(struct btree_trans *, struct btree_path *,
struct btree *);
bool bch2_btree_bset_insert_key(struct btree_trans *, struct btree_iter *,
bool bch2_btree_bset_insert_key(struct btree_trans *, struct btree_path *,
struct btree *, struct btree_node_iter *,
struct bkey_i *);
void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
......@@ -135,4 +135,13 @@ static inline int bch2_trans_commit(struct btree_trans *trans,
(_i) < (_trans)->updates + (_trans)->nr_updates; \
(_i)++)
struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *);
static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter)
{
return iter->flags & BTREE_ITER_WITH_UPDATES
? __bch2_btree_trans_peek_updates(iter)
: NULL;
}
#endif /* _BCACHEFS_BTREE_UPDATE_H */
This diff is collapsed.
......@@ -117,39 +117,39 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
struct btree *,
struct bkey_format);
int bch2_btree_split_leaf(struct btree_trans *, struct btree_iter *, unsigned);
int bch2_btree_split_leaf(struct btree_trans *, struct btree_path *, unsigned);
int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_iter *,
int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_path *,
unsigned, unsigned, enum btree_node_sibling);
static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
struct btree_iter *iter,
struct btree_path *path,
unsigned level, unsigned flags,
enum btree_node_sibling sib)
{
struct btree *b;
if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
if (path->uptodate >= BTREE_ITER_NEED_TRAVERSE)
return 0;
if (!bch2_btree_node_relock(trans, iter, level))
if (!bch2_btree_node_relock(trans, path, level))
return 0;
b = iter->l[level].b;
b = path->l[level].b;
if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
return 0;
return __bch2_foreground_maybe_merge(trans, iter, level, flags, sib);
return __bch2_foreground_maybe_merge(trans, path, level, flags, sib);
}
static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
struct btree_iter *iter,
struct btree_path *path,
unsigned level,
unsigned flags)
{
return bch2_foreground_maybe_merge_sibling(trans, iter, level, flags,
return bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
btree_prev_sib) ?:
bch2_foreground_maybe_merge_sibling(trans, iter, level, flags,
bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
btree_next_sib);
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -228,13 +228,13 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned);
int bch2_mark_update(struct btree_trans *, struct btree_iter *,
int bch2_mark_update(struct btree_trans *, struct btree_path *,
struct bkey_i *, unsigned);
int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c,
struct bkey_s_c, unsigned);
int bch2_trans_mark_update(struct btree_trans *, struct btree_iter *iter,
struct bkey_i *insert, unsigned);
int bch2_trans_mark_update(struct btree_trans *, struct btree_path *,
struct bkey_i *, unsigned);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
......
......@@ -243,7 +243,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
{
struct dump_iter *i = file->private_data;
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
int err;
......@@ -260,10 +260,10 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
bch2_trans_init(&trans, i->c, 0, 0);
iter = bch2_trans_get_iter(&trans, i->id, i->from,
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS);
k = bch2_btree_iter_peek(iter);
bch2_trans_iter_init(&trans, &iter, i->id, i->from,
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS);
k = bch2_btree_iter_peek(&iter);
while (k.k && !(err = bkey_err(k))) {
bch2_bkey_val_to_text(&PBUF(i->buf), i->c, k);
......@@ -272,8 +272,8 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
i->buf[i->bytes] = '\n';
i->bytes++;
k = bch2_btree_iter_next(iter);
i->from = iter->pos;
k = bch2_btree_iter_next(&iter);
i->from = iter.pos;
err = flush_buf(i);
if (err)
......@@ -282,7 +282,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
if (!i->size)
break;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
......@@ -301,7 +301,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
{
struct dump_iter *i = file->private_data;
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct btree *b;
int err;
......@@ -336,7 +336,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
if (!i->size)
break;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
......@@ -355,7 +355,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
{
struct dump_iter *i = file->private_data;
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
struct btree *prev_node = NULL;
int err;
......@@ -373,11 +373,11 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
bch2_trans_init(&trans, i->c, 0, 0);
iter = bch2_trans_get_iter(&trans, i->id, i->from, BTREE_ITER_PREFETCH);
bch2_trans_iter_init(&trans, &iter, i->id, i->from, BTREE_ITER_PREFETCH);
while ((k = bch2_btree_iter_peek(iter)).k &&
while ((k = bch2_btree_iter_peek(&iter)).k &&
!(err = bkey_err(k))) {
struct btree_iter_level *l = &iter->l[0];
struct btree_path_level *l = &iter.path->l[0];
struct bkey_packed *_k =
bch2_btree_node_iter_peek(&l->iter, l->b);
......@@ -396,8 +396,8 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
if (err)
break;
bch2_btree_iter_advance(iter);
i->from = iter->pos;
bch2_btree_iter_advance(&iter);
i->from = iter.pos;
err = flush_buf(i);
if (err)
......
......@@ -183,7 +183,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
const struct qstr *dst_name, u64 *dst_inum, u64 *dst_offset,
enum bch_rename_mode mode)
{
struct btree_iter *src_iter = NULL, *dst_iter = NULL;
struct btree_iter src_iter = { NULL };
struct btree_iter dst_iter = { NULL };
struct bkey_s_c old_src, old_dst;
struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
struct bpos dst_pos =
......@@ -199,17 +200,16 @@ int bch2_dirent_rename(struct btree_trans *trans,
* the target already exists - we're relying on the VFS
* to do that check for us for correctness:
*/
dst_iter = mode == BCH_RENAME
? bch2_hash_hole(trans, bch2_dirent_hash_desc,
ret = mode == BCH_RENAME
? bch2_hash_hole(trans, &dst_iter, bch2_dirent_hash_desc,
dst_hash, dst_dir, dst_name)
: bch2_hash_lookup(trans, bch2_dirent_hash_desc,
: bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
dst_hash, dst_dir, dst_name,
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(dst_iter);
if (ret)
goto out;
old_dst = bch2_btree_iter_peek_slot(dst_iter);
old_dst = bch2_btree_iter_peek_slot(&dst_iter);
ret = bkey_err(old_dst);
if (ret)
goto out;
......@@ -217,17 +217,16 @@ int bch2_dirent_rename(struct btree_trans *trans,
if (mode != BCH_RENAME)
*dst_inum = le64_to_cpu(bkey_s_c_to_dirent(old_dst).v->d_inum);
if (mode != BCH_RENAME_EXCHANGE)
*src_offset = dst_iter->pos.offset;
*src_offset = dst_iter.pos.offset;
/* Lookup src: */
src_iter = bch2_hash_lookup(trans, bch2_dirent_hash_desc,
src_hash, src_dir, src_name,
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(src_iter);
ret = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
src_hash, src_dir, src_name,
BTREE_ITER_INTENT);
if (ret)
goto out;
old_src = bch2_btree_iter_peek_slot(src_iter);
old_src = bch2_btree_iter_peek_slot(&src_iter);
ret = bkey_err(old_src);
if (ret)
goto out;
......@@ -241,7 +240,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
goto out;
dirent_copy_target(new_dst, bkey_s_c_to_dirent(old_src));
new_dst->k.p = dst_iter->pos;
new_dst->k.p = dst_iter.pos;
/* Create new src key: */
if (mode == BCH_RENAME_EXCHANGE) {
......@@ -251,7 +250,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
goto out;
dirent_copy_target(new_src, bkey_s_c_to_dirent(old_dst));
new_src->k.p = src_iter->pos;
new_src->k.p = src_iter.pos;
} else {
new_src = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
ret = PTR_ERR_OR_ZERO(new_src);
......@@ -259,10 +258,10 @@ int bch2_dirent_rename(struct btree_trans *trans,
goto out;
bkey_init(&new_src->k);
new_src->k.p = src_iter->pos;
new_src->k.p = src_iter.pos;
if (bkey_cmp(dst_pos, src_iter->pos) <= 0 &&
bkey_cmp(src_iter->pos, dst_iter->pos) < 0) {
if (bkey_cmp(dst_pos, src_iter.pos) <= 0 &&
bkey_cmp(src_iter.pos, dst_iter.pos) < 0) {
/*
* We have a hash collision for the new dst key,
* and new_src - the key we're deleting - is between
......@@ -275,8 +274,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
* If we're not overwriting, we can just insert
* new_dst at the src position:
*/
new_dst->k.p = src_iter->pos;
bch2_trans_update(trans, src_iter,
new_dst->k.p = src_iter.pos;
bch2_trans_update(trans, &src_iter,
&new_dst->k_i, 0);
goto out_set_offset;
} else {
......@@ -290,7 +289,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
} else {
/* Check if we need a whiteout to delete src: */
ret = bch2_hash_needs_whiteout(trans, bch2_dirent_hash_desc,
src_hash, src_iter);
src_hash, &src_iter);
if (ret < 0)
goto out;
......@@ -299,15 +298,15 @@ int bch2_dirent_rename(struct btree_trans *trans,
}
}
bch2_trans_update(trans, src_iter, &new_src->k_i, 0);
bch2_trans_update(trans, dst_iter, &new_dst->k_i, 0);
bch2_trans_update(trans, &src_iter, &new_src->k_i, 0);
bch2_trans_update(trans, &dst_iter, &new_dst->k_i, 0);
out_set_offset:
if (mode == BCH_RENAME_EXCHANGE)
*src_offset = new_src->k.p.offset;
*dst_offset = new_dst->k.p.offset;
out:
bch2_trans_iter_put(trans, src_iter);
bch2_trans_iter_put(trans, dst_iter);
bch2_trans_iter_exit(trans, &src_iter);
bch2_trans_iter_exit(trans, &dst_iter);
return ret;
}
......@@ -319,12 +318,13 @@ int bch2_dirent_delete_at(struct btree_trans *trans,
hash_info, iter);
}
struct btree_iter *
__bch2_dirent_lookup_trans(struct btree_trans *trans, u64 dir_inum,
const struct bch_hash_info *hash_info,
const struct qstr *name, unsigned flags)
int __bch2_dirent_lookup_trans(struct btree_trans *trans,
struct btree_iter *iter,
u64 dir_inum,
const struct bch_hash_info *hash_info,
const struct qstr *name, unsigned flags)
{
return bch2_hash_lookup(trans, bch2_dirent_hash_desc,
return bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
hash_info, dir_inum, name, flags);
}
......@@ -333,26 +333,25 @@ u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
const struct qstr *name)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
u64 inum = 0;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
iter = __bch2_dirent_lookup_trans(&trans, dir_inum,
hash_info, name, 0);
ret = PTR_ERR_OR_ZERO(iter);
ret = __bch2_dirent_lookup_trans(&trans, &iter, dir_inum,
hash_info, name, 0);
if (ret)
goto out;
k = bch2_btree_iter_peek_slot(iter);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto out;
inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum);
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
out:
BUG_ON(ret == -EINTR);
bch2_trans_exit(&trans);
......@@ -361,7 +360,7 @@ u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
int bch2_empty_dir_trans(struct btree_trans *trans, u64 dir_inum)
{
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
int ret;
......@@ -375,7 +374,7 @@ int bch2_empty_dir_trans(struct btree_trans *trans, u64 dir_inum)
break;
}
}
bch2_trans_iter_put(trans, iter);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
......@@ -383,7 +382,7 @@ int bch2_empty_dir_trans(struct btree_trans *trans, u64 dir_inum)
int bch2_readdir(struct bch_fs *c, u64 inum, struct dir_context *ctx)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_s_c_dirent dirent;
int ret;
......@@ -412,7 +411,7 @@ int bch2_readdir(struct bch_fs *c, u64 inum, struct dir_context *ctx)
break;
ctx->pos = dirent.k->p.offset + 1;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
ret = bch2_trans_exit(&trans) ?: ret;
......
......@@ -50,8 +50,7 @@ int bch2_dirent_rename(struct btree_trans *,
const struct qstr *, u64 *, u64 *,
enum bch_rename_mode);
struct btree_iter *
__bch2_dirent_lookup_trans(struct btree_trans *, u64,
int __bch2_dirent_lookup_trans(struct btree_trans *, struct btree_iter *, u64,
const struct bch_hash_info *,
const struct qstr *, unsigned);
u64 bch2_dirent_lookup(struct bch_fs *, u64, const struct bch_hash_info *,
......
......@@ -429,13 +429,14 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
int ret;
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_stripes, POS(0, idx), BTREE_ITER_SLOTS);
k = bch2_btree_iter_peek_slot(iter);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_stripes,
POS(0, idx), BTREE_ITER_SLOTS);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
......@@ -445,6 +446,7 @@ static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *strip
}
bkey_reassemble(&stripe->key.k_i, k);
err:
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -704,7 +706,7 @@ static int ec_stripe_bkey_insert(struct bch_fs *c,
struct disk_reservation *res)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
struct bpos min_pos = POS(0, 1);
struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
......@@ -719,7 +721,7 @@ static int ec_stripe_bkey_insert(struct bch_fs *c,
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
if (start_pos.offset) {
start_pos = min_pos;
bch2_btree_iter_set_pos(iter, start_pos);
bch2_btree_iter_set_pos(&iter, start_pos);
continue;
}
......@@ -733,19 +735,19 @@ static int ec_stripe_bkey_insert(struct bch_fs *c,
goto err;
found_slot:
start_pos = iter->pos;
start_pos = iter.pos;
ret = ec_stripe_mem_alloc(&trans, iter);
ret = ec_stripe_mem_alloc(&trans, &iter);
if (ret)
goto err;
stripe->k.p = iter->pos;
stripe->k.p = iter.pos;
ret = bch2_trans_update(&trans, iter, &stripe->k_i, 0) ?:
ret = bch2_trans_update(&trans, &iter, &stripe->k_i, 0) ?:
bch2_trans_commit(&trans, res, NULL,
BTREE_INSERT_NOFAIL);
err:
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
if (ret == -EINTR)
goto retry;
......@@ -759,15 +761,15 @@ static int ec_stripe_bkey_insert(struct bch_fs *c,
static int ec_stripe_bkey_update(struct btree_trans *trans,
struct bkey_i_stripe *new)
{
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
const struct bch_stripe *existing;
unsigned i;
int ret;
iter = bch2_trans_get_iter(trans, BTREE_ID_stripes,
new->k.p, BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes,
new->k.p, BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
......@@ -790,9 +792,9 @@ static int ec_stripe_bkey_update(struct btree_trans *trans,
stripe_blockcount_set(&new->v, i,
stripe_blockcount_get(existing, i));
ret = bch2_trans_update(trans, iter, &new->k_i, 0);
ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
err:
bch2_trans_iter_put(trans, iter);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
......@@ -820,7 +822,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
struct bkey *pos)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_s_extent e;
struct bkey_buf sk;
......@@ -832,23 +834,23 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
/* XXX this doesn't support the reflink btree */
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
bkey_start_pos(pos),
BTREE_ITER_INTENT);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
bkey_start_pos(pos),
BTREE_ITER_INTENT);
while ((k = bch2_btree_iter_peek(iter)).k &&
while ((k = bch2_btree_iter_peek(&iter)).k &&
!(ret = bkey_err(k)) &&
bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
struct bch_extent_ptr *ptr, *ec_ptr = NULL;
if (extent_has_stripe_ptr(k, s->key.k.p.offset)) {
bch2_btree_iter_advance(iter);
bch2_btree_iter_advance(&iter);
continue;
}
block = bkey_matches_stripe(&s->key.v, k);
if (block < 0) {
bch2_btree_iter_advance(iter);
bch2_btree_iter_advance(&iter);
continue;
}
......@@ -863,21 +865,21 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
extent_stripe_ptr_add(e, s, ec_ptr, block);
bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k));
bch2_btree_iter_set_pos(&iter, bkey_start_pos(&sk.k->k));
next_pos = sk.k->k.p;
ret = bch2_btree_iter_traverse(iter) ?:
bch2_trans_update(&trans, iter, sk.k, 0) ?:
ret = bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(&trans, &iter, sk.k, 0) ?:
bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
if (!ret)
bch2_btree_iter_set_pos(iter, next_pos);
bch2_btree_iter_set_pos(&iter, next_pos);
if (ret == -EINTR)
ret = 0;
if (ret)
break;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
......@@ -1598,7 +1600,7 @@ static int __bch2_stripe_write_key(struct btree_trans *trans,
int bch2_stripes_write(struct bch_fs *c, unsigned flags)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct genradix_iter giter;
struct bkey_i_stripe *new_key;
struct stripe *m;
......@@ -1609,8 +1611,8 @@ int bch2_stripes_write(struct bch_fs *c, unsigned flags)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_stripes, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_stripes, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
genradix_for_each(&c->stripes[0], giter, m) {
if (!m->alive)
......@@ -1618,13 +1620,13 @@ int bch2_stripes_write(struct bch_fs *c, unsigned flags)
ret = __bch2_trans_do(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL|flags,
__bch2_stripe_write_key(&trans, iter, m,
__bch2_stripe_write_key(&trans, &iter, m,
giter.pos, new_key));
if (ret)
break;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
......@@ -1659,19 +1661,19 @@ int bch2_stripes_read(struct bch_fs *c)
int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
size_t i, idx = 0;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_stripes, POS(0, U64_MAX), 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_stripes, POS(0, U64_MAX), 0);
k = bch2_btree_iter_prev(iter);
k = bch2_btree_iter_prev(&iter);
if (!IS_ERR_OR_NULL(k.k))
idx = k.k->p.offset + 1;
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
ret = bch2_trans_exit(&trans);
if (ret)
return ret;
......
......@@ -58,7 +58,7 @@ static int count_iters_for_insert(struct btree_trans *trans,
u64 idx = le64_to_cpu(p.v->idx);
unsigned sectors = bpos_min(*end, p.k->p).offset -
bkey_start_offset(p.k);
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c r_k;
for_each_btree_key(trans, iter,
......@@ -83,8 +83,8 @@ static int count_iters_for_insert(struct btree_trans *trans,
break;
}
}
bch2_trans_iter_exit(trans, &iter);
bch2_trans_iter_put(trans, iter);
break;
}
}
......@@ -99,7 +99,7 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
struct bkey_i *insert,
struct bpos *end)
{
struct btree_iter *copy;
struct btree_iter copy;
struct bkey_s_c k;
unsigned nr_iters = 0;
int ret;
......@@ -118,7 +118,7 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
if (ret < 0)
return ret;
copy = bch2_trans_copy_iter(trans, iter);
bch2_trans_copy_iter(&copy, iter);
for_each_btree_key_continue(copy, 0, k, ret) {
unsigned offset = 0;
......@@ -149,7 +149,7 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
break;
}
bch2_trans_iter_put(trans, copy);
bch2_trans_iter_exit(trans, &copy);
return ret < 0 ? ret : 0;
}
......
......@@ -615,7 +615,7 @@ bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
unsigned nr_replicas, bool compressed)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bpos end = pos;
struct bkey_s_c k;
bool ret = true;
......@@ -636,7 +636,7 @@ bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
break;
}
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
......
......@@ -19,16 +19,15 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum,
struct posix_acl *acl)
{
struct bch_fs *c = trans->c;
struct btree_iter *dir_iter = NULL;
struct btree_iter *inode_iter = NULL;
struct btree_iter dir_iter = { NULL };
struct btree_iter inode_iter = { NULL };
struct bch_hash_info hash = bch2_hash_info_init(c, new_inode);
u64 now = bch2_current_time(c);
u64 cpu = raw_smp_processor_id();
u64 dir_offset = 0;
int ret;
dir_iter = bch2_inode_peek(trans, dir_u, dir_inum, BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(dir_iter);
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir_inum, BTREE_ITER_INTENT);
if (ret)
goto err;
......@@ -37,8 +36,7 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum,
if (!name)
new_inode->bi_flags |= BCH_INODE_UNLINKED;
inode_iter = bch2_inode_create(trans, new_inode, U32_MAX, cpu);
ret = PTR_ERR_OR_ZERO(inode_iter);
ret = bch2_inode_create(trans, &inode_iter, new_inode, U32_MAX, cpu);
if (ret)
goto err;
......@@ -63,7 +61,7 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum,
if (S_ISDIR(new_inode->bi_mode))
dir_u->bi_nlink++;
ret = bch2_inode_write(trans, dir_iter, dir_u);
ret = bch2_inode_write(trans, &dir_iter, dir_u);
if (ret)
goto err;
......@@ -82,14 +80,14 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum,
}
/* XXX use bch2_btree_iter_set_snapshot() */
inode_iter->snapshot = U32_MAX;
bch2_btree_iter_set_pos(inode_iter, SPOS(0, new_inode->bi_inum, U32_MAX));
inode_iter.snapshot = U32_MAX;
bch2_btree_iter_set_pos(&inode_iter, SPOS(0, new_inode->bi_inum, U32_MAX));
ret = bch2_btree_iter_traverse(inode_iter) ?:
bch2_inode_write(trans, inode_iter, new_inode);
ret = bch2_btree_iter_traverse(&inode_iter) ?:
bch2_inode_write(trans, &inode_iter, new_inode);
err:
bch2_trans_iter_put(trans, inode_iter);
bch2_trans_iter_put(trans, dir_iter);
bch2_trans_iter_exit(trans, &inode_iter);
bch2_trans_iter_exit(trans, &dir_iter);
return ret;
}
......@@ -98,22 +96,21 @@ int bch2_link_trans(struct btree_trans *trans, u64 dir_inum,
struct bch_inode_unpacked *inode_u, const struct qstr *name)
{
struct bch_fs *c = trans->c;
struct btree_iter *dir_iter = NULL, *inode_iter = NULL;
struct btree_iter dir_iter = { NULL };
struct btree_iter inode_iter = { NULL };
struct bch_hash_info dir_hash;
u64 now = bch2_current_time(c);
u64 dir_offset = 0;
int ret;
inode_iter = bch2_inode_peek(trans, inode_u, inum, BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(inode_iter);
ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
if (ret)
goto err;
inode_u->bi_ctime = now;
bch2_inode_nlink_inc(inode_u);
dir_iter = bch2_inode_peek(trans, dir_u, dir_inum, 0);
ret = PTR_ERR_OR_ZERO(dir_iter);
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir_inum, 0);
if (ret)
goto err;
......@@ -133,11 +130,11 @@ int bch2_link_trans(struct btree_trans *trans, u64 dir_inum,
inode_u->bi_dir_offset = dir_offset;
}
ret = bch2_inode_write(trans, dir_iter, dir_u) ?:
bch2_inode_write(trans, inode_iter, inode_u);
ret = bch2_inode_write(trans, &dir_iter, dir_u) ?:
bch2_inode_write(trans, &inode_iter, inode_u);
err:
bch2_trans_iter_put(trans, dir_iter);
bch2_trans_iter_put(trans, inode_iter);
bch2_trans_iter_exit(trans, &dir_iter);
bch2_trans_iter_exit(trans, &inode_iter);
return ret;
}
......@@ -147,35 +144,33 @@ int bch2_unlink_trans(struct btree_trans *trans,
const struct qstr *name)
{
struct bch_fs *c = trans->c;
struct btree_iter *dir_iter = NULL, *dirent_iter = NULL,
*inode_iter = NULL;
struct btree_iter dir_iter = { NULL };
struct btree_iter dirent_iter = { NULL };
struct btree_iter inode_iter = { NULL };
struct bch_hash_info dir_hash;
u64 inum, now = bch2_current_time(c);
struct bkey_s_c k;
int ret;
dir_iter = bch2_inode_peek(trans, dir_u, dir_inum, BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(dir_iter);
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir_inum, BTREE_ITER_INTENT);
if (ret)
goto err;
dir_hash = bch2_hash_info_init(c, dir_u);
dirent_iter = __bch2_dirent_lookup_trans(trans, dir_inum, &dir_hash,
name, BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(dirent_iter);
ret = __bch2_dirent_lookup_trans(trans, &dirent_iter, dir_inum, &dir_hash,
name, BTREE_ITER_INTENT);
if (ret)
goto err;
k = bch2_btree_iter_peek_slot(dirent_iter);
k = bch2_btree_iter_peek_slot(&dirent_iter);
ret = bkey_err(k);
if (ret)
goto err;
inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum);
inode_iter = bch2_inode_peek(trans, inode_u, inum, BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(inode_iter);
ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
if (ret)
goto err;
......@@ -192,13 +187,13 @@ int bch2_unlink_trans(struct btree_trans *trans,
ret = (S_ISDIR(inode_u->bi_mode)
? bch2_empty_dir_trans(trans, inum)
: 0) ?:
bch2_dirent_delete_at(trans, &dir_hash, dirent_iter) ?:
bch2_inode_write(trans, dir_iter, dir_u) ?:
bch2_inode_write(trans, inode_iter, inode_u);
bch2_dirent_delete_at(trans, &dir_hash, &dirent_iter) ?:
bch2_inode_write(trans, &dir_iter, dir_u) ?:
bch2_inode_write(trans, &inode_iter, inode_u);
err:
bch2_trans_iter_put(trans, inode_iter);
bch2_trans_iter_put(trans, dirent_iter);
bch2_trans_iter_put(trans, dir_iter);
bch2_trans_iter_exit(trans, &inode_iter);
bch2_trans_iter_exit(trans, &dirent_iter);
bch2_trans_iter_exit(trans, &dir_iter);
return ret;
}
......@@ -236,25 +231,25 @@ int bch2_rename_trans(struct btree_trans *trans,
enum bch_rename_mode mode)
{
struct bch_fs *c = trans->c;
struct btree_iter *src_dir_iter = NULL, *dst_dir_iter = NULL;
struct btree_iter *src_inode_iter = NULL, *dst_inode_iter = NULL;
struct btree_iter src_dir_iter = { NULL };
struct btree_iter dst_dir_iter = { NULL };
struct btree_iter src_inode_iter = { NULL };
struct btree_iter dst_inode_iter = { NULL };
struct bch_hash_info src_hash, dst_hash;
u64 src_inode, src_offset, dst_inode, dst_offset;
u64 now = bch2_current_time(c);
int ret;
src_dir_iter = bch2_inode_peek(trans, src_dir_u, src_dir,
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(src_dir_iter);
ret = bch2_inode_peek(trans, &src_dir_iter, src_dir_u, src_dir,
BTREE_ITER_INTENT);
if (ret)
goto err;
src_hash = bch2_hash_info_init(c, src_dir_u);
if (dst_dir != src_dir) {
dst_dir_iter = bch2_inode_peek(trans, dst_dir_u, dst_dir,
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(dst_dir_iter);
ret = bch2_inode_peek(trans, &dst_dir_iter, dst_dir_u, dst_dir,
BTREE_ITER_INTENT);
if (ret)
goto err;
......@@ -273,16 +268,14 @@ int bch2_rename_trans(struct btree_trans *trans,
if (ret)
goto err;
src_inode_iter = bch2_inode_peek(trans, src_inode_u, src_inode,
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(src_inode_iter);
ret = bch2_inode_peek(trans, &src_inode_iter, src_inode_u, src_inode,
BTREE_ITER_INTENT);
if (ret)
goto err;
if (dst_inode) {
dst_inode_iter = bch2_inode_peek(trans, dst_inode_u, dst_inode,
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(dst_inode_iter);
ret = bch2_inode_peek(trans, &dst_inode_iter, dst_inode_u, dst_inode,
BTREE_ITER_INTENT);
if (ret)
goto err;
}
......@@ -357,18 +350,18 @@ int bch2_rename_trans(struct btree_trans *trans,
if (dst_inode)
dst_inode_u->bi_ctime = now;
ret = bch2_inode_write(trans, src_dir_iter, src_dir_u) ?:
ret = bch2_inode_write(trans, &src_dir_iter, src_dir_u) ?:
(src_dir != dst_dir
? bch2_inode_write(trans, dst_dir_iter, dst_dir_u)
? bch2_inode_write(trans, &dst_dir_iter, dst_dir_u)
: 0 ) ?:
bch2_inode_write(trans, src_inode_iter, src_inode_u) ?:
bch2_inode_write(trans, &src_inode_iter, src_inode_u) ?:
(dst_inode
? bch2_inode_write(trans, dst_inode_iter, dst_inode_u)
? bch2_inode_write(trans, &dst_inode_iter, dst_inode_u)
: 0 );
err:
bch2_trans_iter_put(trans, dst_inode_iter);
bch2_trans_iter_put(trans, src_inode_iter);
bch2_trans_iter_put(trans, dst_dir_iter);
bch2_trans_iter_put(trans, src_dir_iter);
bch2_trans_iter_exit(trans, &dst_inode_iter);
bch2_trans_iter_exit(trans, &src_inode_iter);
bch2_trans_iter_exit(trans, &dst_dir_iter);
bch2_trans_iter_exit(trans, &src_dir_iter);
return ret;
}
......@@ -867,7 +867,7 @@ void bch2_readahead(struct readahead_control *ractl)
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct page *page;
struct readpages_iter readpages_iter;
int ret;
......@@ -876,8 +876,8 @@ void bch2_readahead(struct readahead_control *ractl)
BUG_ON(ret);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS);
bch2_pagecache_add_get(&inode->ei_pagecache_lock);
......@@ -898,13 +898,13 @@ void bch2_readahead(struct readahead_control *ractl)
rbio->bio.bi_end_io = bch2_readpages_end_io;
BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
bchfs_read(&trans, iter, rbio, inode->v.i_ino,
bchfs_read(&trans, &iter, rbio, inode->v.i_ino,
&readpages_iter);
}
bch2_pagecache_add_put(&inode->ei_pagecache_lock);
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
kfree(readpages_iter.pages);
}
......@@ -913,7 +913,7 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
u64 inum, struct page *page)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
bch2_page_state_create(page, __GFP_NOFAIL);
......@@ -923,12 +923,12 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS);
bchfs_read(&trans, iter, rbio, inum, NULL);
bchfs_read(&trans, &iter, rbio, inum, NULL);
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
}
......@@ -2146,7 +2146,7 @@ static inline int range_has_data(struct bch_fs *c,
struct bpos end)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
......@@ -2161,7 +2161,7 @@ static inline int range_has_data(struct bch_fs *c,
break;
}
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
return bch2_trans_exit(&trans) ?: ret;
}
......@@ -2471,7 +2471,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
struct address_space *mapping = inode->v.i_mapping;
struct bkey_buf copy;
struct btree_trans trans;
struct btree_iter *src, *dst, *del;
struct btree_iter src, dst, del;
loff_t shift, new_size;
u64 src_start;
int ret = 0;
......@@ -2536,11 +2536,11 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
bch2_bkey_buf_init(&copy);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
src = bch2_trans_get_iter(&trans, BTREE_ID_extents,
bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
POS(inode->v.i_ino, src_start >> 9),
BTREE_ITER_INTENT);
dst = bch2_trans_copy_iter(&trans, src);
del = bch2_trans_copy_iter(&trans, src);
bch2_trans_copy_iter(&dst, &src);
bch2_trans_copy_iter(&del, &src);
while (ret == 0 || ret == -EINTR) {
struct disk_reservation disk_res =
......@@ -2555,8 +2555,8 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
bch2_trans_begin(&trans);
k = insert
? bch2_btree_iter_peek_prev(src)
: bch2_btree_iter_peek(src);
? bch2_btree_iter_peek_prev(&src)
: bch2_btree_iter_peek(&src);
if ((ret = bkey_err(k)))
continue;
......@@ -2574,9 +2574,9 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
bch2_cut_front(move_pos, copy.k);
copy.k->k.p.offset += shift >> 9;
bch2_btree_iter_set_pos(dst, bkey_start_pos(&copy.k->k));
bch2_btree_iter_set_pos(&dst, bkey_start_pos(&copy.k->k));
ret = bch2_extent_atomic_end(&trans, dst, copy.k, &atomic_end);
ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
if (ret)
continue;
......@@ -2594,7 +2594,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
delete.k.p = copy.k->k.p;
delete.k.size = copy.k->k.size;
delete.k.p.offset -= shift >> 9;
bch2_btree_iter_set_pos(del, bkey_start_pos(&delete.k));
bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
......@@ -2615,20 +2615,20 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
BUG_ON(ret);
}
ret = bch2_btree_iter_traverse(del) ?:
bch2_trans_update(&trans, del, &delete, trigger_flags) ?:
bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?:
ret = bch2_btree_iter_traverse(&del) ?:
bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
bch2_trans_commit(&trans, &disk_res,
&inode->ei_journal_seq,
BTREE_INSERT_NOFAIL);
bch2_disk_reservation_put(c, &disk_res);
if (!ret)
bch2_btree_iter_set_pos(src, next_pos);
bch2_btree_iter_set_pos(&src, next_pos);
}
bch2_trans_iter_put(&trans, del);
bch2_trans_iter_put(&trans, dst);
bch2_trans_iter_put(&trans, src);
bch2_trans_iter_exit(&trans, &del);
bch2_trans_iter_exit(&trans, &dst);
bch2_trans_iter_exit(&trans, &src);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&copy, c);
......@@ -2653,18 +2653,18 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bpos end_pos = POS(inode->v.i_ino, end_sector);
unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
int ret = 0;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
POS(inode->v.i_ino, start_sector),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
while (!ret && bkey_cmp(iter->pos, end_pos) < 0) {
while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
s64 i_sectors_delta = 0;
struct disk_reservation disk_res = { 0 };
struct quota_res quota_res = { 0 };
......@@ -2674,20 +2674,20 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
bch2_trans_begin(&trans);
k = bch2_btree_iter_peek_slot(iter);
k = bch2_btree_iter_peek_slot(&iter);
if ((ret = bkey_err(k)))
goto bkey_err;
/* already reserved */
if (k.k->type == KEY_TYPE_reservation &&
bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
bch2_btree_iter_advance(iter);
bch2_btree_iter_advance(&iter);
continue;
}
if (bkey_extent_is_data(k.k) &&
!(mode & FALLOC_FL_ZERO_RANGE)) {
bch2_btree_iter_advance(iter);
bch2_btree_iter_advance(&iter);
continue;
}
......@@ -2696,7 +2696,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
reservation.k.p = k.k->p;
reservation.k.size = k.k->size;
bch2_cut_front(iter->pos, &reservation.k_i);
bch2_cut_front(iter.pos, &reservation.k_i);
bch2_cut_back(end_pos, &reservation.k_i);
sectors = reservation.k.size;
......@@ -2720,7 +2720,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
reservation.v.nr_replicas = disk_res.nr_replicas;
}
ret = bch2_extent_update(&trans, iter, &reservation.k_i,
ret = bch2_extent_update(&trans, &iter, &reservation.k_i,
&disk_res, &inode->ei_journal_seq,
0, &i_sectors_delta, true);
i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
......@@ -2730,7 +2730,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
if (ret == -EINTR)
ret = 0;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -3010,7 +3010,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
u64 isize, next_data = MAX_LFS_FILESIZE;
int ret;
......@@ -3031,7 +3031,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
} else if (k.k->p.offset >> 9 > isize)
break;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
......@@ -3106,7 +3106,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
u64 isize, next_hole = MAX_LFS_FILESIZE;
int ret;
......@@ -3135,7 +3135,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
offset = max(offset, bkey_start_offset(k.k) << 9);
}
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
......
......@@ -142,7 +142,7 @@ int __must_check bch2_write_inode(struct bch_fs *c,
void *p, unsigned fields)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter = { NULL };
struct bch_inode_unpacked inode_u;
int ret;
......@@ -150,11 +150,10 @@ int __must_check bch2_write_inode(struct bch_fs *c,
retry:
bch2_trans_begin(&trans);
iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino,
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(iter) ?:
ret = bch2_inode_peek(&trans, &iter, &inode_u, inode->v.i_ino,
BTREE_ITER_INTENT) ?:
(set ? set(inode, &inode_u, p) : 0) ?:
bch2_inode_write(&trans, iter, &inode_u) ?:
bch2_inode_write(&trans, &iter, &inode_u) ?:
bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_NOFAIL);
......@@ -166,7 +165,7 @@ int __must_check bch2_write_inode(struct bch_fs *c,
if (!ret)
bch2_inode_update_after_write(c, inode, &inode_u, fields);
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
if (ret == -EINTR)
goto retry;
......@@ -687,7 +686,7 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap,
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_qid qid;
struct btree_trans trans;
struct btree_iter *inode_iter;
struct btree_iter inode_iter = { NULL };
struct bch_inode_unpacked inode_u;
struct posix_acl *acl = NULL;
int ret;
......@@ -713,9 +712,8 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap,
kfree(acl);
acl = NULL;
inode_iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino,
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(inode_iter);
ret = bch2_inode_peek(&trans, &inode_iter, &inode_u, inode->v.i_ino,
BTREE_ITER_INTENT);
if (ret)
goto btree_err;
......@@ -727,12 +725,12 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap,
goto btree_err;
}
ret = bch2_inode_write(&trans, inode_iter, &inode_u) ?:
ret = bch2_inode_write(&trans, &inode_iter, &inode_u) ?:
bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_NOFAIL);
btree_err:
bch2_trans_iter_put(&trans, inode_iter);
bch2_trans_iter_exit(&trans, &inode_iter);
if (ret == -EINTR)
goto retry;
......@@ -883,7 +881,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
struct bch_fs *c = vinode->i_sb->s_fs_info;
struct bch_inode_info *ei = to_bch_ei(vinode);
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_buf cur, prev;
struct bpos end = POS(ei->v.i_ino, (start + len) >> 9);
......@@ -902,23 +900,23 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bch2_bkey_buf_init(&prev);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(ei->v.i_ino, start >> 9), 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
POS(ei->v.i_ino, start >> 9), 0);
retry:
bch2_trans_begin(&trans);
while ((k = bch2_btree_iter_peek(iter)).k &&
while ((k = bch2_btree_iter_peek(&iter)).k &&
!(ret = bkey_err(k)) &&
bkey_cmp(iter->pos, end) < 0) {
bkey_cmp(iter.pos, end) < 0) {
enum btree_id data_btree = BTREE_ID_extents;
if (!bkey_extent_is_data(k.k) &&
k.k->type != KEY_TYPE_reservation) {
bch2_btree_iter_advance(iter);
bch2_btree_iter_advance(&iter);
continue;
}
offset_into_extent = iter->pos.offset -
offset_into_extent = iter.pos.offset -
bkey_start_offset(k.k);
sectors = k.k->size - offset_into_extent;
......@@ -939,7 +937,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
offset_into_extent),
cur.k);
bch2_key_resize(&cur.k->k, sectors);
cur.k->k.p = iter->pos;
cur.k->k.p = iter.pos;
cur.k->k.p.offset += cur.k->k.size;
if (have_extent) {
......@@ -952,8 +950,8 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bkey_copy(prev.k, cur.k);
have_extent = true;
bch2_btree_iter_set_pos(iter,
POS(iter->pos.inode, iter->pos.offset + sectors));
bch2_btree_iter_set_pos(&iter,
POS(iter.pos.inode, iter.pos.offset + sectors));
}
if (ret == -EINTR)
......@@ -963,7 +961,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k),
FIEMAP_EXTENT_LAST);
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
ret = bch2_trans_exit(&trans) ?: ret;
bch2_bkey_buf_exit(&cur, c);
bch2_bkey_buf_exit(&prev, c);
......
This diff is collapsed.
......@@ -292,18 +292,18 @@ int bch2_inode_unpack(struct bkey_s_c_inode inode,
return 0;
}
struct btree_iter *bch2_inode_peek(struct btree_trans *trans,
struct bch_inode_unpacked *inode,
u64 inum, unsigned flags)
int bch2_inode_peek(struct btree_trans *trans,
struct btree_iter *iter,
struct bch_inode_unpacked *inode,
u64 inum, unsigned flags)
{
struct btree_iter *iter;
struct bkey_s_c k;
int ret;
if (trans->c->opts.inodes_use_key_cache)
flags |= BTREE_ITER_CACHED;
iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum), flags);
bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, inum), flags);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
......@@ -317,10 +317,10 @@ struct btree_iter *bch2_inode_peek(struct btree_trans *trans,
if (ret)
goto err;
return iter;
return 0;
err:
bch2_trans_iter_put(trans, iter);
return ERR_PTR(ret);
bch2_trans_iter_exit(trans, iter);
return ret;
}
int bch2_inode_write(struct btree_trans *trans,
......@@ -482,12 +482,12 @@ static inline u32 bkey_generation(struct bkey_s_c k)
}
}
struct btree_iter *bch2_inode_create(struct btree_trans *trans,
struct bch_inode_unpacked *inode_u,
u32 snapshot, u64 cpu)
int bch2_inode_create(struct btree_trans *trans,
struct btree_iter *iter,
struct bch_inode_unpacked *inode_u,
u32 snapshot, u64 cpu)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = NULL;
struct bkey_s_c k;
u64 min, max, start, pos, *hint;
int ret = 0;
......@@ -513,9 +513,9 @@ struct btree_iter *bch2_inode_create(struct btree_trans *trans,
start = min;
pos = start;
iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, pos),
BTREE_ITER_ALL_SNAPSHOTS|
BTREE_ITER_INTENT);
bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, pos),
BTREE_ITER_ALL_SNAPSHOTS|
BTREE_ITER_INTENT);
again:
while ((k = bch2_btree_iter_peek(iter)).k &&
!(ret = bkey_err(k)) &&
......@@ -553,8 +553,8 @@ struct btree_iter *bch2_inode_create(struct btree_trans *trans,
ret = -ENOSPC;
if (ret) {
bch2_trans_iter_put(trans, iter);
return ERR_PTR(ret);
bch2_trans_iter_exit(trans, iter);
return ret;
}
/* Retry from start */
......@@ -566,8 +566,8 @@ struct btree_iter *bch2_inode_create(struct btree_trans *trans,
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret) {
bch2_trans_iter_put(trans, iter);
return ERR_PTR(ret);
bch2_trans_iter_exit(trans, iter);
return ret;
}
/* We may have raced while the iterator wasn't pointing at pos: */
......@@ -578,13 +578,13 @@ struct btree_iter *bch2_inode_create(struct btree_trans *trans,
*hint = k.k->p.offset;
inode_u->bi_inum = k.k->p.offset;
inode_u->bi_generation = bkey_generation(k);
return iter;
return 0;
}
int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
{
struct btree_trans trans;
struct btree_iter *iter = NULL;
struct btree_iter iter = { NULL };
struct bkey_i_inode_generation delete;
struct bpos start = POS(inode_nr, 0);
struct bpos end = POS(inode_nr + 1, 0);
......@@ -617,9 +617,9 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
retry:
bch2_trans_begin(&trans);
iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes,
POS(0, inode_nr), iter_flags);
k = bch2_btree_iter_peek_slot(iter);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_inodes,
POS(0, inode_nr), iter_flags);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
......@@ -636,14 +636,14 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
bch2_inode_unpack(bkey_s_c_to_inode(k), &inode_u);
bkey_inode_generation_init(&delete.k_i);
delete.k.p = iter->pos;
delete.k.p = iter.pos;
delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
ret = bch2_trans_update(&trans, iter, &delete.k_i, 0) ?:
ret = bch2_trans_update(&trans, &iter, &delete.k_i, 0) ?:
bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
err:
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
if (ret == -EINTR)
goto retry;
......@@ -654,12 +654,11 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
static int bch2_inode_find_by_inum_trans(struct btree_trans *trans, u64 inode_nr,
struct bch_inode_unpacked *inode)
{
struct btree_iter *iter;
struct btree_iter iter = { NULL };
int ret;
iter = bch2_inode_peek(trans, inode, inode_nr, 0);
ret = PTR_ERR_OR_ZERO(iter);
bch2_trans_iter_put(trans, iter);
ret = bch2_inode_peek(trans, &iter, inode, inode_nr, 0);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
......
......@@ -57,8 +57,8 @@ int bch2_inode_unpack(struct bkey_s_c_inode, struct bch_inode_unpacked *);
void bch2_inode_unpacked_to_text(struct printbuf *, struct bch_inode_unpacked *);
struct btree_iter *bch2_inode_peek(struct btree_trans *,
struct bch_inode_unpacked *, u64, unsigned);
int bch2_inode_peek(struct btree_trans *, struct btree_iter *,
struct bch_inode_unpacked *, u64, unsigned);
int bch2_inode_write(struct btree_trans *, struct btree_iter *,
struct bch_inode_unpacked *);
......@@ -71,8 +71,8 @@ void bch2_inode_init(struct bch_fs *, struct bch_inode_unpacked *,
uid_t, gid_t, umode_t, dev_t,
struct bch_inode_unpacked *);
struct btree_iter *bch2_inode_create(struct btree_trans *,
struct bch_inode_unpacked *, u32, u64);
int bch2_inode_create(struct btree_trans *, struct btree_iter *,
struct bch_inode_unpacked *, u32, u64);
int bch2_inode_rm(struct bch_fs *, u64, bool);
......
This diff is collapsed.
......@@ -250,7 +250,7 @@ void bch2_blacklist_entries_gc(struct work_struct *work)
bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < BTREE_ID_NR; i++) {
struct btree_iter *iter;
struct btree_iter iter;
struct btree *b;
for_each_btree_node(&trans, iter, i, POS_MIN,
......@@ -259,7 +259,7 @@ void bch2_blacklist_entries_gc(struct work_struct *work)
bch2_trans_exit(&trans);
return;
}
bch2_trans_iter_free(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
}
ret = bch2_trans_exit(&trans);
......
......@@ -39,7 +39,7 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
enum btree_id btree_id)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_buf sk;
int ret = 0;
......@@ -47,13 +47,13 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, btree_id, POS_MIN,
BTREE_ITER_PREFETCH);
bch2_trans_iter_init(&trans, &iter, btree_id, POS_MIN,
BTREE_ITER_PREFETCH);
while ((k = bch2_btree_iter_peek(iter)).k &&
while ((k = bch2_btree_iter_peek(&iter)).k &&
!(ret = bkey_err(k))) {
if (!bch2_bkey_has_device(k, dev_idx)) {
bch2_btree_iter_advance(iter);
bch2_btree_iter_advance(&iter);
continue;
}
......@@ -71,10 +71,10 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
*/
bch2_extent_normalize(c, bkey_i_to_s(sk.k));
bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k));
bch2_btree_iter_set_pos(&iter, bkey_start_pos(&sk.k->k));
ret = bch2_btree_iter_traverse(iter) ?:
bch2_trans_update(&trans, iter, sk.k, 0) ?:
ret = bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(&trans, &iter, sk.k, 0) ?:
bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
......@@ -88,7 +88,7 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
if (ret)
break;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
ret = bch2_trans_exit(&trans) ?: ret;
bch2_bkey_buf_exit(&sk, c);
......@@ -107,7 +107,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter iter;
struct closure cl;
struct btree *b;
struct bkey_buf k;
......@@ -139,9 +139,9 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
break;
}
ret = bch2_btree_node_update_key(&trans, iter, b, k.k, false);
ret = bch2_btree_node_update_key(&trans, &iter, b, k.k, false);
if (ret == -EINTR) {
b = bch2_btree_iter_peek_node(iter);
b = bch2_btree_iter_peek_node(&iter);
ret = 0;
goto retry;
}
......@@ -150,7 +150,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
break;
}
}
bch2_trans_iter_free(&trans, iter);
bch2_trans_iter_exit(&trans, &iter);
if (ret)
goto err;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment