Commit 8b3e9bd6 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Always check for transaction restarts

On transaction restart iterators won't be locked anymore - make sure
we're always checking for errors.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 67b07638
......@@ -222,6 +222,8 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
struct btree_iter *iter;
struct bkey_s_c_xattr xattr;
struct posix_acl *acl = NULL;
struct bkey_s_c k;
int ret;
bch2_trans_init(&trans, c, 0, 0);
retry:
......@@ -240,7 +242,14 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
goto out;
}
xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter));
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret) {
acl = ERR_PTR(ret);
goto out;
}
xattr = bkey_s_c_to_xattr(k);
acl = bch2_acl_from_disk(xattr_val(xattr.v),
le16_to_cpu(xattr.v->x_val_len));
......@@ -358,6 +367,7 @@ int bch2_acl_chmod(struct btree_trans *trans,
struct bkey_s_c_xattr xattr;
struct bkey_i_xattr *new;
struct posix_acl *acl;
struct bkey_s_c k;
int ret;
iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc,
......@@ -368,7 +378,11 @@ int bch2_acl_chmod(struct btree_trans *trans,
if (ret)
return ret == -ENOENT ? 0 : ret;
xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter));
k = bch2_btree_iter_peek_slot(iter);
xattr = bkey_s_c_to_xattr(k);
if (ret)
goto err;
acl = bch2_acl_from_disk(xattr_val(xattr.v),
le16_to_cpu(xattr.v->x_val_len));
ret = PTR_ERR_OR_ZERO(acl);
......
......@@ -374,7 +374,7 @@ int bch2_alloc_write(struct bch_fs *c, unsigned flags)
percpu_ref_put(&ca->ref);
goto err;
}
bch2_btree_iter_next_slot(iter);
bch2_btree_iter_advance(iter);
}
}
err:
......
......@@ -973,9 +973,9 @@ struct btree *bch2_btree_node_get_noiter(struct bch_fs *c,
return b;
}
void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
const struct bkey_i *k,
enum btree_id btree_id, unsigned level)
int bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
const struct bkey_i *k,
enum btree_id btree_id, unsigned level)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
......@@ -985,9 +985,10 @@ void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
b = btree_cache_find(bc, k);
if (b)
return;
return 0;
bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
b = bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b);
}
void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k)
......
......@@ -27,8 +27,8 @@ struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_iter *,
struct btree *bch2_btree_node_get_noiter(struct bch_fs *, const struct bkey_i *,
enum btree_id, unsigned, bool);
void bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *,
const struct bkey_i *, enum btree_id, unsigned);
int bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *,
const struct bkey_i *, enum btree_id, unsigned);
void bch2_btree_node_evict(struct bch_fs *, const struct bkey_i *);
......
......@@ -1142,7 +1142,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
}
noinline
static void btree_iter_prefetch(struct btree_iter *iter)
static int btree_iter_prefetch(struct btree_iter *iter)
{
struct bch_fs *c = iter->trans->c;
struct btree_iter_level *l = &iter->l[iter->level];
......@@ -1153,10 +1153,11 @@ static void btree_iter_prefetch(struct btree_iter *iter)
? (iter->level > 1 ? 0 : 2)
: (iter->level > 1 ? 1 : 16);
bool was_locked = btree_node_locked(iter, iter->level);
int ret = 0;
bch2_bkey_buf_init(&tmp);
while (nr) {
while (nr && !ret) {
if (!bch2_btree_node_relock(iter, iter->level))
break;
......@@ -1166,14 +1167,15 @@ static void btree_iter_prefetch(struct btree_iter *iter)
break;
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
iter->level - 1);
ret = bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
iter->level - 1);
}
if (!was_locked)
btree_node_unlock(iter, iter->level);
bch2_bkey_buf_exit(&tmp, c);
return ret;
}
static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
......@@ -1228,7 +1230,7 @@ static __always_inline int btree_iter_down(struct btree_iter *iter,
btree_node_mem_ptr_set(iter, level + 1, b);
if (iter->flags & BTREE_ITER_PREFETCH)
btree_iter_prefetch(iter);
ret = btree_iter_prefetch(iter);
if (btree_node_read_locked(iter, level + 1))
btree_node_unlock(iter, level + 1);
......
......@@ -210,6 +210,9 @@ int bch2_dirent_rename(struct btree_trans *trans,
goto out;
old_dst = bch2_btree_iter_peek_slot(dst_iter);
ret = bkey_err(old_dst);
if (ret)
goto out;
if (mode != BCH_RENAME)
*dst_inum = le64_to_cpu(bkey_s_c_to_dirent(old_dst).v->d_inum);
......@@ -225,6 +228,10 @@ int bch2_dirent_rename(struct btree_trans *trans,
goto out;
old_src = bch2_btree_iter_peek_slot(src_iter);
ret = bkey_err(old_src);
if (ret)
goto out;
*src_inum = le64_to_cpu(bkey_s_c_to_dirent(old_src).v->d_inum);
/* Create new dst key: */
......@@ -329,20 +336,25 @@ u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
struct btree_iter *iter;
struct bkey_s_c k;
u64 inum = 0;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
iter = __bch2_dirent_lookup_trans(&trans, dir_inum,
hash_info, name, 0);
if (IS_ERR(iter)) {
BUG_ON(PTR_ERR(iter) == -EINTR);
ret = PTR_ERR_OR_ZERO(iter);
if (ret)
goto out;
}
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
goto out;
inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum);
bch2_trans_iter_put(&trans, iter);
out:
BUG_ON(ret == -EINTR);
bch2_trans_exit(&trans);
return inum;
}
......
......@@ -168,6 +168,10 @@ int bch2_unlink_trans(struct btree_trans *trans,
goto err;
k = bch2_btree_iter_peek_slot(dirent_iter);
ret = bkey_err(k);
if (ret)
goto err;
inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum);
inode_iter = bch2_inode_peek(trans, inode_u, inum, BTREE_ITER_INTENT);
......
......@@ -2668,13 +2668,13 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
/* already reserved */
if (k.k->type == KEY_TYPE_reservation &&
bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
bch2_btree_iter_next_slot(iter);
bch2_btree_iter_advance(iter);
continue;
}
if (bkey_extent_is_data(k.k) &&
!(mode & FALLOC_FL_ZERO_RANGE)) {
bch2_btree_iter_next_slot(iter);
bch2_btree_iter_advance(iter);
continue;
}
......
......@@ -519,7 +519,7 @@ struct btree_iter *bch2_inode_create(struct btree_trans *trans,
if (k.k->p.snapshot == snapshot &&
k.k->type != KEY_TYPE_inode &&
!bch2_btree_key_cache_find(c, BTREE_ID_inodes, SPOS(0, pos, snapshot))) {
bch2_btree_iter_next(iter);
bch2_btree_iter_advance(iter);
continue;
}
......
......@@ -245,8 +245,12 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
* writing to, because i_size could be up to one block
* less:
*/
if (!bkey_cmp(old.k->p, new->k.p))
if (!bkey_cmp(old.k->p, new->k.p)) {
old = bch2_btree_iter_next(iter);
ret = bkey_err(old);
if (ret)
break;
}
if (old.k && !bkey_err(old) &&
old.k->p.inode == extent_iter->pos.inode &&
......
......@@ -190,7 +190,7 @@ int bch2_migrate_index_update(struct bch_write_op *op)
}
atomic_long_inc(&c->extent_migrate_raced);
trace_move_race(&new->k);
bch2_btree_iter_next_slot(iter);
bch2_btree_iter_advance(iter);
goto next;
}
out:
......
......@@ -192,8 +192,9 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
return k;
}
bch2_btree_iter_set_pos(iter, end);
return bkey_s_c_null;
if (bkey_cmp(iter->pos, end) >= 0)
bch2_btree_iter_set_pos(iter, end);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
}
s64 bch2_remap_range(struct bch_fs *c,
......
......@@ -209,7 +209,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
iter = bch2_trans_copy_iter(trans, start);
bch2_btree_iter_next_slot(iter);
bch2_btree_iter_advance(iter);
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k, ret) {
if (k.k->type != desc.key_type &&
......
......@@ -124,6 +124,7 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
struct btree_iter *iter;
struct bkey_s_c_xattr xattr;
struct bkey_s_c k;
int ret;
iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc, &hash,
......@@ -134,7 +135,12 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
if (ret)
goto err;
xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter));
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
goto err;
xattr = bkey_s_c_to_xattr(k);
ret = le16_to_cpu(xattr.v->x_val_len);
if (buffer) {
if (ret > size)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment