Commit 50dc0f69 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Require all btree iterators to be freed

We keep running into occasional bugs with btree transaction iterators
overflowing - this will make those bugs more visible.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 8d956c2f
......@@ -241,12 +241,12 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
}
xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter));
acl = bch2_acl_from_disk(xattr_val(xattr.v),
le16_to_cpu(xattr.v->x_val_len));
if (!IS_ERR(acl))
set_cached_acl(&inode->v, type, acl);
bch2_trans_iter_put(&trans, iter);
out:
bch2_trans_exit(&trans);
return acl;
......@@ -313,7 +313,7 @@ int bch2_set_acl(struct mnt_idmap *idmap,
if (type == ACL_TYPE_ACCESS) {
ret = posix_acl_update_mode(idmap, &inode->v, &mode, &acl);
if (ret)
goto err;
goto btree_err;
}
hash_info = bch2_hash_info_init(c, &inode_u);
......@@ -330,6 +330,8 @@ int bch2_set_acl(struct mnt_idmap *idmap,
&inode->ei_journal_seq,
BTREE_INSERT_NOUNLOCK);
btree_err:
bch2_trans_iter_put(&trans, inode_iter);
if (ret == -EINTR)
goto retry;
if (unlikely(ret))
......@@ -356,21 +358,22 @@ int bch2_acl_chmod(struct btree_trans *trans,
struct bkey_s_c_xattr xattr;
struct bkey_i_xattr *new;
struct posix_acl *acl;
int ret = 0;
int ret;
iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc,
&hash_info, inode->bi_inum,
&X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0),
BTREE_ITER_INTENT);
if (IS_ERR(iter))
return PTR_ERR(iter) != -ENOENT ? PTR_ERR(iter) : 0;
ret = PTR_ERR_OR_ZERO(iter);
if (ret)
return ret == -ENOENT ? 0 : ret;
xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter));
acl = bch2_acl_from_disk(xattr_val(xattr.v),
le16_to_cpu(xattr.v->x_val_len));
if (IS_ERR_OR_NULL(acl))
return PTR_ERR(acl);
ret = PTR_ERR_OR_ZERO(acl);
if (ret || !acl)
goto err;
ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode);
if (ret)
......@@ -387,6 +390,7 @@ int bch2_acl_chmod(struct btree_trans *trans,
*new_acl = acl;
acl = NULL;
err:
bch2_trans_iter_put(trans, iter);
kfree(acl);
return ret;
}
......
......@@ -385,7 +385,6 @@ int bch2_alloc_write(struct bch_fs *c, unsigned flags)
int ret = 0;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
......@@ -405,6 +404,7 @@ int bch2_alloc_write(struct bch_fs *c, unsigned flags)
}
}
err:
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -926,7 +926,6 @@ static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc,
POS(ca->dev_idx, 0),
BTREE_ITER_CACHED|
......@@ -942,6 +941,7 @@ static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
(!fifo_empty(&ca->free_inc)
? BTREE_INSERT_NOWAIT : 0));
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
/* If we used NOWAIT, don't return the error: */
......
......@@ -456,6 +456,8 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
bch2_trans_cond_resched(&trans);
}
bch2_trans_iter_put(&trans, iter);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
return ret;
......@@ -1212,6 +1214,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
bch2_btree_iter_next(iter);
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
......@@ -1509,6 +1512,7 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
struct btree *b;
bool kthread = (current->flags & PF_KTHREAD) != 0;
unsigned i;
int ret = 0;
/* Sliding window of adjacent btree nodes */
struct btree *merge[GC_MERGE_NODES];
......@@ -1557,8 +1561,8 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
lock_seq[0] = merge[0]->c.lock.state.seq;
if (kthread && kthread_should_stop()) {
bch2_trans_exit(&trans);
return -ESHUTDOWN;
ret = -ESHUTDOWN;
break;
}
bch2_trans_cond_resched(&trans);
......@@ -1573,7 +1577,9 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
memset(merge + 1, 0,
(GC_MERGE_NODES - 1) * sizeof(merge[0]));
}
return bch2_trans_exit(&trans);
bch2_trans_iter_put(&trans, iter);
return bch2_trans_exit(&trans) ?: ret;
}
/**
......
......@@ -1208,6 +1208,7 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
if (ret)
goto err;
out:
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&k, c);
bio_put(&wbio->wbio.bio);
......
......@@ -9,6 +9,7 @@
#include "btree_locking.h"
#include "btree_update.h"
#include "debug.h"
#include "error.h"
#include "extents.h"
#include "journal.h"
#include "trace.h"
......@@ -2116,6 +2117,7 @@ struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
for (i = 0; i < ARRAY_SIZE(iter->l); i++)
iter->l[i].b = NULL;
iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
iter->ip_allocated = _RET_IP_;
return iter;
}
......@@ -2224,6 +2226,8 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
(void *) &trans->fs_usage_deltas->memset_start);
}
bch2_trans_cond_resched(trans);
if (!(flags & TRANS_RESET_NOTRAVERSE))
bch2_btree_iter_traverse_all(trans);
}
......@@ -2290,6 +2294,19 @@ int bch2_trans_exit(struct btree_trans *trans)
bch2_trans_unlock(trans);
#ifdef CONFIG_BCACHEFS_DEBUG
if (trans->iters_live) {
struct btree_iter *iter;
bch_err(c, "btree iterators leaked!");
trans_for_each_iter(trans, iter)
if (btree_iter_live(trans, iter))
printk(KERN_ERR " btree %s allocated at %pS\n",
bch2_btree_ids[iter->btree_id],
(void *) iter->ip_allocated);
/* Be noisy about this: */
bch2_fatal_error(c);
}
mutex_lock(&trans->c->btree_trans_lock);
list_del(&trans->list);
mutex_unlock(&trans->c->btree_trans_lock);
......
......@@ -242,6 +242,8 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
if (!i->size)
break;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return err < 0 ? err : i->ret;
......@@ -294,6 +296,8 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
if (!i->size)
break;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return err < 0 ? err : i->ret;
......
......@@ -321,6 +321,7 @@ u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
k = bch2_btree_iter_peek_slot(iter);
inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum);
bch2_trans_iter_put(&trans, iter);
out:
bch2_trans_exit(&trans);
return inum;
......@@ -379,6 +380,8 @@ int bch2_readdir(struct bch_fs *c, u64 inum, struct dir_context *ctx)
break;
ctx->pos = dirent.k->p.offset + 1;
}
bch2_trans_iter_put(&trans, iter);
ret = bch2_trans_exit(&trans) ?: ret;
return ret;
......
......@@ -873,6 +873,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
if (ret)
break;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
......@@ -1663,12 +1664,13 @@ int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_stripes, POS(0, U64_MAX), 0);
k = bch2_btree_iter_prev(iter);
if (!IS_ERR_OR_NULL(k.k))
idx = k.k->p.offset + 1;
bch2_trans_iter_put(&trans, iter);
ret = bch2_trans_exit(&trans);
if (ret)
return ret;
......
......@@ -687,6 +687,8 @@ bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
break;
}
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
......
......@@ -866,7 +866,6 @@ void bch2_readahead(struct readahead_control *ractl)
BUG_ON(ret);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS);
......@@ -895,6 +894,7 @@ void bch2_readahead(struct readahead_control *ractl)
bch2_pagecache_add_put(&inode->ei_pagecache_lock);
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
kfree(readpages_iter.pages);
}
......@@ -918,6 +918,7 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
bchfs_read(&trans, iter, rbio, inum, NULL);
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
}
......@@ -2155,6 +2156,7 @@ static inline int range_has_data(struct bch_fs *c,
break;
}
}
bch2_trans_iter_put(&trans, iter);
return bch2_trans_exit(&trans) ?: ret;
}
......@@ -2325,6 +2327,7 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
ret = PTR_ERR_OR_ZERO(iter);
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
if (ret)
......@@ -2459,14 +2462,11 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
struct btree_iter *src, *dst, *del;
loff_t shift, new_size;
u64 src_start;
int ret;
int ret = 0;
if ((offset | len) & (block_bytes(c) - 1))
return -EINVAL;
bch2_bkey_buf_init(&copy);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
/*
* We need i_mutex to keep the page cache consistent with the extents
* btree, and the btree consistent with i_size - we don't need outside
......@@ -2522,13 +2522,15 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
goto err;
}
bch2_bkey_buf_init(&copy);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
src = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(inode->v.i_ino, src_start >> 9),
BTREE_ITER_INTENT);
dst = bch2_trans_copy_iter(&trans, src);
del = bch2_trans_copy_iter(&trans, src);
while (1) {
while (ret == 0 || ret == -EINTR) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(c, 0);
struct bkey_i delete;
......@@ -2542,7 +2544,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
? bch2_btree_iter_peek_prev(src)
: bch2_btree_iter_peek(src);
if ((ret = bkey_err(k)))
goto bkey_err;
continue;
if (!k.k || k.k->p.inode != inode->v.i_ino)
break;
......@@ -2562,7 +2564,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
if (ret)
goto bkey_err;
continue;
if (bkey_cmp(atomic_end, copy.k->k.p)) {
if (insert) {
......@@ -2605,18 +2607,18 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
&inode->ei_journal_seq,
BTREE_INSERT_NOFAIL);
bch2_disk_reservation_put(c, &disk_res);
bkey_err:
if (!ret)
bch2_btree_iter_set_pos(src, next_pos);
if (ret == -EINTR)
ret = 0;
if (ret)
goto err;
bch2_trans_cond_resched(&trans);
}
bch2_trans_unlock(&trans);
bch2_trans_iter_put(&trans, del);
bch2_trans_iter_put(&trans, dst);
bch2_trans_iter_put(&trans, src);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&copy, c);
if (ret)
goto err;
if (!insert) {
i_size_write(&inode->v, new_size);
......@@ -2626,8 +2628,6 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
mutex_unlock(&inode->ei_update_lock);
}
err:
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&copy, c);
bch2_pagecache_block_put(&inode->ei_pagecache_lock);
inode_unlock(&inode->v);
return ret;
......@@ -2682,7 +2682,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
end_pos = POS(inode->v.i_ino, block_end >> 9);
while (bkey_cmp(iter->pos, end_pos) < 0) {
while (!ret && bkey_cmp(iter->pos, end_pos) < 0) {
s64 i_sectors_delta = 0;
struct disk_reservation disk_res = { 0 };
struct quota_res quota_res = { 0 };
......@@ -2746,9 +2746,11 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
bch2_disk_reservation_put(c, &disk_res);
if (ret == -EINTR)
ret = 0;
if (ret)
goto err;
}
bch2_trans_iter_put(&trans, iter);
if (ret)
goto err;
/*
* Do we need to extend the file?
......@@ -2770,6 +2772,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
ret = PTR_ERR_OR_ZERO(inode_iter);
} while (ret == -EINTR);
bch2_trans_iter_put(&trans, inode_iter);
bch2_trans_unlock(&trans);
if (ret)
......@@ -3015,6 +3018,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
} else if (k.k->p.offset >> 9 > isize)
break;
}
bch2_trans_iter_put(&trans, iter);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
......@@ -3118,6 +3122,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
offset = max(offset, bkey_start_offset(k.k) << 9);
}
}
bch2_trans_iter_put(&trans, iter);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
......
......@@ -734,6 +734,8 @@ static int bch2_setattr_nonsize(struct mnt_idmap *idmap,
BTREE_INSERT_NOUNLOCK|
BTREE_INSERT_NOFAIL);
btree_err:
bch2_trans_iter_put(&trans, inode_iter);
if (ret == -EINTR)
goto retry;
if (unlikely(ret))
......@@ -961,6 +963,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k),
FIEMAP_EXTENT_LAST);
bch2_trans_iter_put(&trans, iter);
ret = bch2_trans_exit(&trans) ?: ret;
bch2_bkey_buf_exit(&cur, c);
bch2_bkey_buf_exit(&prev, c);
......
......@@ -1485,11 +1485,12 @@ int bch2_fsck_walk_inodes_only(struct bch_fs *c)
BCH_INODE_I_SECTORS_DIRTY|
BCH_INODE_UNLINKED)) {
ret = check_inode(&trans, NULL, iter, inode, NULL);
BUG_ON(ret == -EINTR);
if (ret)
break;
}
}
bch2_trans_iter_put(&trans, iter);
BUG_ON(ret == -EINTR);
return bch2_trans_exit(&trans) ?: ret;
......
......@@ -620,6 +620,7 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
bch2_trans_iter_put(&trans, iter);
err:
if (ret == -EINTR)
goto retry;
......
......@@ -414,6 +414,8 @@ int bch2_fpunch(struct bch_fs *c, u64 inum, u64 start, u64 end,
ret = bch2_fpunch_at(&trans, iter, POS(inum, end),
journal_seq, i_sectors_delta);
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
if (ret == -EINTR)
......@@ -460,6 +462,7 @@ int bch2_write_index_default(struct bch_write_op *op)
bch2_keylist_pop_front(keys);
} while (!bch2_keylist_empty(keys));
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
......@@ -1659,6 +1662,7 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
goto err;
out:
bch2_rbio_done(rbio);
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
return;
......@@ -2259,7 +2263,7 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
goto err;
break;
offset_into_extent = iter->pos.offset -
bkey_start_offset(k.k);
......@@ -2270,7 +2274,7 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
ret = bch2_read_indirect_extent(&trans, &data_btree,
&offset_into_extent, &sk);
if (ret)
goto err;
break;
k = bkey_i_to_s_c(sk.k);
......@@ -2295,12 +2299,8 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
ret = __bch2_read_extent(&trans, rbio, bvec_iter, iter->pos,
data_btree, k,
offset_into_extent, failed, flags);
switch (ret) {
case READ_RETRY:
goto retry;
case READ_ERR:
goto err;
};
if (ret)
break;
if (flags & BCH_READ_LAST_FRAGMENT)
break;
......@@ -2308,19 +2308,19 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
swap(bvec_iter.bi_size, bytes);
bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
}
out:
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
return;
err:
if (ret == -EINTR)
bch2_trans_iter_put(&trans, iter);
if (ret == -EINTR || ret == READ_RETRY || ret == READ_RETRY_AVOID)
goto retry;
bch_err_inum_ratelimited(c, inode,
"read error %i from btree lookup", ret);
rbio->bio.bi_status = BLK_STS_IOERR;
bch2_rbio_done(rbio);
goto out;
if (ret) {
bch_err_inum_ratelimited(c, inode,
"read error %i from btree lookup", ret);
rbio->bio.bi_status = BLK_STS_IOERR;
bch2_rbio_done(rbio);
}
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
}
void bch2_fs_io_exit(struct bch_fs *c)
......
......@@ -88,6 +88,7 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
if (ret)
break;
}
bch2_trans_iter_put(&trans, iter);
ret = bch2_trans_exit(&trans) ?: ret;
bch2_bkey_buf_exit(&sk, c);
......@@ -135,20 +136,24 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
dev_idx, flags, true);
if (ret) {
bch_err(c, "Cannot drop device without losing data");
goto err;
break;
}
ret = bch2_btree_node_update_key(c, iter, b, k.k);
if (ret == -EINTR) {
b = bch2_btree_iter_peek_node(iter);
ret = 0;
goto retry;
}
if (ret) {
bch_err(c, "Error updating btree node key: %i", ret);
goto err;
break;
}
}
bch2_trans_iter_free(&trans, iter);
if (ret)
goto err;
}
/* flush relevant btree updates */
......
......@@ -195,6 +195,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
goto next;
}
out:
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&_insert, c);
bch2_bkey_buf_exit(&_new, c);
......@@ -641,6 +642,8 @@ static int __bch2_move_data(struct bch_fs *c,
bch2_trans_cond_resched(&trans);
}
out:
bch2_trans_iter_put(&trans, iter);
ret = bch2_trans_exit(&trans) ?: ret;
bch2_bkey_buf_exit(&sk, c);
......
......@@ -372,6 +372,7 @@ static int bch2_quota_init_type(struct bch_fs *c, enum quota_types type)
if (ret)
break;
}
bch2_trans_iter_put(&trans, iter);
return bch2_trans_exit(&trans) ?: ret;
}
......@@ -449,6 +450,8 @@ int bch2_fs_quota_read(struct bch_fs *c)
KEY_TYPE_QUOTA_NOCHECK);
}
}
bch2_trans_iter_put(&trans, iter);
return bch2_trans_exit(&trans) ?: ret;
}
......@@ -739,7 +742,9 @@ static int bch2_set_quota_trans(struct btree_trans *trans,
if (qdq->d_fieldmask & QC_INO_HARD)
new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
return bch2_trans_update(trans, iter, &new_quota->k_i, 0);
ret = bch2_trans_update(trans, iter, &new_quota->k_i, 0);
bch2_trans_iter_put(trans, iter);
return ret;
}
static int bch2_set_quota(struct super_block *sb, struct kqid qid,
......
......@@ -223,20 +223,18 @@ s64 bch2_remap_range(struct bch_fs *c,
dst_iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, dst_start,
BTREE_ITER_INTENT);
while (1) {
while (ret == 0 || ret == -EINTR) {
bch2_trans_begin(&trans);
trans.mem_top = 0;
if (fatal_signal_pending(current)) {
ret = -EINTR;
goto err;
break;
}
src_k = get_next_src(src_iter, src_end);
ret = bkey_err(src_k);
if (ret)
goto btree_err;
continue;
src_done = bpos_min(src_iter->pos, src_end).offset -
src_start.offset;
......@@ -245,8 +243,6 @@ s64 bch2_remap_range(struct bch_fs *c,
if (bkey_cmp(dst_iter->pos, dst_want) < 0) {
ret = bch2_fpunch_at(&trans, dst_iter, dst_want,
journal_seq, i_sectors_delta);
if (ret)
goto btree_err;
continue;
}
......@@ -265,7 +261,7 @@ s64 bch2_remap_range(struct bch_fs *c,
ret = bch2_make_extent_indirect(&trans, src_iter,
new_src.k);
if (ret)
goto btree_err;
continue;
BUG_ON(src_k.k->type != KEY_TYPE_reflink_p);
}
......@@ -294,20 +290,16 @@ s64 bch2_remap_range(struct bch_fs *c,
NULL, journal_seq,
new_i_size, i_sectors_delta);
if (ret)
goto btree_err;
continue;
dst_done = dst_iter->pos.offset - dst_start.offset;
src_want = POS(src_start.inode, src_start.offset + dst_done);
bch2_btree_iter_set_pos(src_iter, src_want);
btree_err:
if (ret == -EINTR)
ret = 0;
if (ret)
goto err;
}
bch2_trans_iter_put(&trans, dst_iter);
bch2_trans_iter_put(&trans, src_iter);
BUG_ON(bkey_cmp(dst_iter->pos, dst_end));
err:
BUG_ON(!ret && bkey_cmp(dst_iter->pos, dst_end));
BUG_ON(bkey_cmp(dst_iter->pos, dst_end) > 0);
dst_done = dst_iter->pos.offset - dst_start.offset;
......@@ -329,6 +321,8 @@ s64 bch2_remap_range(struct bch_fs *c,
ret2 = bch2_inode_write(&trans, inode_iter, &inode_u) ?:
bch2_trans_commit(&trans, NULL, journal_seq, 0);
}
bch2_trans_iter_put(&trans, inode_iter);
} while (ret2 == -EINTR);
ret = bch2_trans_exit(&trans) ?: ret;
......
......@@ -67,6 +67,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
goto err;
}
err:
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -106,6 +107,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
goto err;
}
err:
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -113,7 +115,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
static int test_iterate(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter *iter = NULL;
struct bkey_s_c k;
u64 i;
int ret = 0;
......@@ -159,6 +161,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
BUG_ON(i);
err:
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -166,7 +169,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
static int test_iterate_extents(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree_iter *iter = NULL;
struct bkey_s_c k;
u64 i;
int ret = 0;
......@@ -213,6 +216,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
BUG_ON(i);
err:
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -257,7 +261,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
BUG_ON(k.k->p.offset != i);
i += 2;
}
bch2_trans_iter_free(&trans, iter);
bch2_trans_iter_put(&trans, iter);
BUG_ON(i != nr * 2);
......@@ -274,6 +278,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
if (i == nr * 2)
break;
}
bch2_trans_iter_put(&trans, iter);
err:
bch2_trans_exit(&trans);
return ret;
......@@ -318,7 +323,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
BUG_ON(k.k->size != 8);
i += 16;
}
bch2_trans_iter_free(&trans, iter);
bch2_trans_iter_put(&trans, iter);
BUG_ON(i != nr);
......@@ -337,6 +342,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
if (i == nr)
break;
}
bch2_trans_iter_put(&trans, iter);
err:
bch2_trans_exit(&trans);
return 0;
......@@ -362,6 +368,8 @@ static int test_peek_end(struct bch_fs *c, u64 nr)
k = bch2_btree_iter_peek(iter);
BUG_ON(k.k);
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return 0;
}
......@@ -382,6 +390,8 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
k = bch2_btree_iter_peek(iter);
BUG_ON(k.k);
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return 0;
}
......@@ -508,7 +518,7 @@ static int rand_lookup(struct bch_fs *c, u64 nr)
}
}
bch2_trans_iter_free(&trans, iter);
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -549,7 +559,7 @@ static int rand_mixed(struct bch_fs *c, u64 nr)
}
}
bch2_trans_iter_free(&trans, iter);
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -630,6 +640,8 @@ static int seq_insert(struct bch_fs *c, u64 nr)
if (++i == nr)
break;
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -645,6 +657,8 @@ static int seq_lookup(struct bch_fs *c, u64 nr)
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN, 0, k, ret)
;
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
}
......@@ -671,6 +685,8 @@ static int seq_overwrite(struct bch_fs *c, u64 nr)
break;
}
}
bch2_trans_iter_put(&trans, iter);
bch2_trans_exit(&trans);
return ret;
}
......
......@@ -133,12 +133,9 @@ int bch2_xattr_get(struct bch_fs *c, struct bch_inode_info *inode,
inode->v.i_ino,
&X_SEARCH(type, name, strlen(name)),
0);
if (IS_ERR(iter)) {
bch2_trans_exit(&trans);
BUG_ON(PTR_ERR(iter) == -EINTR);
return PTR_ERR(iter) == -ENOENT ? -ENODATA : PTR_ERR(iter);
}
ret = PTR_ERR_OR_ZERO(iter);
if (ret)
goto err;
xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter));
ret = le16_to_cpu(xattr.v->x_val_len);
......@@ -148,9 +145,12 @@ int bch2_xattr_get(struct bch_fs *c, struct bch_inode_info *inode,
else
memcpy(buffer, xattr_val(xattr.v), ret);
}
bch2_trans_iter_put(&trans, iter);
err:
bch2_trans_exit(&trans);
return ret;
BUG_ON(ret == -EINTR);
return ret == -ENOENT ? -ENODATA : ret;
}
int bch2_xattr_set(struct btree_trans *trans, u64 inum,
......@@ -294,6 +294,8 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
if (ret)
break;
}
bch2_trans_iter_put(&trans, iter);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment