Commit 0564b167 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: convert bch2_btree_insert_at() usage to bch2_trans_commit()

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 94d290e4
...@@ -268,8 +268,8 @@ int bch2_set_acl_trans(struct btree_trans *trans, ...@@ -268,8 +268,8 @@ int bch2_set_acl_trans(struct btree_trans *trans,
if (IS_ERR(xattr)) if (IS_ERR(xattr))
return PTR_ERR(xattr); return PTR_ERR(xattr);
ret = __bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info, ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
inode_u->bi_inum, &xattr->k_i, 0); inode_u->bi_inum, &xattr->k_i, 0);
} else { } else {
struct xattr_search_key search = struct xattr_search_key search =
X_SEARCH(acl_to_xattr_type(type), "", 0); X_SEARCH(acl_to_xattr_type(type), "", 0);
......
...@@ -310,10 +310,53 @@ int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list) ...@@ -310,10 +310,53 @@ int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
return 0; return 0;
} }
static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca, int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bch_dev *ca;
int ret;
if (k->k.p.inode >= c->sb.nr_devices ||
!c->devs[k->k.p.inode])
return 0;
ca = bch_dev_bkey_exists(c, k->k.p.inode);
if (k->k.p.offset >= ca->mi.nbuckets)
return 0;
bch2_trans_init(&trans, c);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, k->k.p,
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(iter);
if (ret)
goto err;
/* check buckets_written with btree node locked: */
if (test_bit(k->k.p.offset, ca->buckets_written)) {
ret = 0;
goto err;
}
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_JOURNAL_REPLAY|
BTREE_INSERT_NOMARK);
err:
bch2_trans_exit(&trans);
return ret;
}
static int __bch2_alloc_write_key(struct btree_trans *trans, struct bch_dev *ca,
size_t b, struct btree_iter *iter, size_t b, struct btree_iter *iter,
u64 *journal_seq, unsigned flags) u64 *journal_seq, unsigned flags)
{ {
struct bch_fs *c = trans->c;
#if 0 #if 0
__BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key; __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
#else #else
...@@ -349,14 +392,15 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca, ...@@ -349,14 +392,15 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
bch2_btree_iter_cond_resched(iter); bch2_btree_iter_cond_resched(iter);
ret = bch2_btree_insert_at(c, NULL, journal_seq, bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
ret = bch2_trans_commit(trans, NULL, journal_seq,
BTREE_INSERT_NOCHECK_RW| BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL| BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE| BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE| BTREE_INSERT_USE_ALLOC_RESERVE|
BTREE_INSERT_NOMARK| BTREE_INSERT_NOMARK|
flags, flags);
BTREE_INSERT_ENTRY(iter, &a->k_i));
if (ret) if (ret)
return ret; return ret;
...@@ -370,42 +414,6 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca, ...@@ -370,42 +414,6 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
return 0; return 0;
} }
int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
{
struct bch_dev *ca;
struct btree_iter iter;
int ret;
if (k->k.p.inode >= c->sb.nr_devices ||
!c->devs[k->k.p.inode])
return 0;
ca = bch_dev_bkey_exists(c, k->k.p.inode);
if (k->k.p.offset >= ca->mi.nbuckets)
return 0;
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, k->k.p,
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(&iter);
if (ret)
goto err;
/* check buckets_written with btree node locked: */
ret = test_bit(k->k.p.offset, ca->buckets_written)
? 0
: bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_JOURNAL_REPLAY|
BTREE_INSERT_NOMARK,
BTREE_INSERT_ENTRY(&iter, k));
err:
bch2_btree_iter_unlock(&iter);
return ret;
}
int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote) int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote)
{ {
struct bch_dev *ca; struct bch_dev *ca;
...@@ -415,12 +423,15 @@ int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote) ...@@ -415,12 +423,15 @@ int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote)
*wrote = false; *wrote = false;
for_each_rw_member(ca, c, i) { for_each_rw_member(ca, c, i) {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bucket_array *buckets; struct bucket_array *buckets;
size_t b; size_t b;
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN, bch2_trans_init(&trans, c);
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
down_read(&ca->bucket_lock); down_read(&ca->bucket_lock);
buckets = bucket_array(ca); buckets = bucket_array(ca);
...@@ -431,7 +442,7 @@ int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote) ...@@ -431,7 +442,7 @@ int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote)
if (!buckets->b[b].mark.dirty) if (!buckets->b[b].mark.dirty)
continue; continue;
ret = __bch2_alloc_write_key(c, ca, b, &iter, NULL, ret = __bch2_alloc_write_key(&trans, ca, b, iter, NULL,
nowait nowait
? BTREE_INSERT_NOWAIT ? BTREE_INSERT_NOWAIT
: 0); : 0);
...@@ -441,7 +452,8 @@ int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote) ...@@ -441,7 +452,8 @@ int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote)
*wrote = true; *wrote = true;
} }
up_read(&ca->bucket_lock); up_read(&ca->bucket_lock);
bch2_btree_iter_unlock(&iter);
bch2_trans_exit(&trans);
if (ret) { if (ret) {
percpu_ref_put(&ca->io_ref); percpu_ref_put(&ca->io_ref);
...@@ -887,7 +899,8 @@ static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m) ...@@ -887,7 +899,8 @@ static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
} }
} }
static int bch2_invalidate_one_bucket2(struct bch_fs *c, struct bch_dev *ca, static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
struct bch_dev *ca,
struct btree_iter *iter, struct btree_iter *iter,
u64 *journal_seq, unsigned flags) u64 *journal_seq, unsigned flags)
{ {
...@@ -897,6 +910,7 @@ static int bch2_invalidate_one_bucket2(struct bch_fs *c, struct bch_dev *ca, ...@@ -897,6 +910,7 @@ static int bch2_invalidate_one_bucket2(struct bch_fs *c, struct bch_dev *ca,
/* hack: */ /* hack: */
__BKEY_PADDED(k, 8) alloc_key; __BKEY_PADDED(k, 8) alloc_key;
#endif #endif
struct bch_fs *c = trans->c;
struct bkey_i_alloc *a; struct bkey_i_alloc *a;
struct bkey_alloc_unpacked u; struct bkey_alloc_unpacked u;
struct bucket_mark m; struct bucket_mark m;
...@@ -959,6 +973,8 @@ static int bch2_invalidate_one_bucket2(struct bch_fs *c, struct bch_dev *ca, ...@@ -959,6 +973,8 @@ static int bch2_invalidate_one_bucket2(struct bch_fs *c, struct bch_dev *ca,
a->k.p = iter->pos; a->k.p = iter->pos;
bch2_alloc_pack(a, u); bch2_alloc_pack(a, u);
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
/* /*
* XXX: * XXX:
* when using deferred btree updates, we have journal reclaim doing * when using deferred btree updates, we have journal reclaim doing
...@@ -966,16 +982,15 @@ static int bch2_invalidate_one_bucket2(struct bch_fs *c, struct bch_dev *ca, ...@@ -966,16 +982,15 @@ static int bch2_invalidate_one_bucket2(struct bch_fs *c, struct bch_dev *ca,
* progress, and here the allocator is requiring space in the journal - * progress, and here the allocator is requiring space in the journal -
* so we need a journal pre-reservation: * so we need a journal pre-reservation:
*/ */
ret = bch2_btree_insert_at(c, NULL, ret = bch2_trans_commit(trans, NULL,
invalidating_cached_data ? journal_seq : NULL, invalidating_cached_data ? journal_seq : NULL,
BTREE_INSERT_ATOMIC| BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK| BTREE_INSERT_NOUNLOCK|
BTREE_INSERT_NOCHECK_RW| BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL| BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE| BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE| BTREE_INSERT_USE_ALLOC_RESERVE|
flags, flags);
BTREE_INSERT_ENTRY(iter, &a->k_i));
if (ret == -EINTR) if (ret == -EINTR)
goto retry; goto retry;
...@@ -1049,23 +1064,27 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -1049,23 +1064,27 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
*/ */
static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca) static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
u64 journal_seq = 0; u64 journal_seq = 0;
int ret = 0; int ret = 0;
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), bch2_trans_init(&trans, c);
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
POS(ca->dev_idx, 0),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
/* Only use nowait if we've already invalidated at least one bucket: */ /* Only use nowait if we've already invalidated at least one bucket: */
while (!ret && while (!ret &&
!fifo_full(&ca->free_inc) && !fifo_full(&ca->free_inc) &&
ca->alloc_heap.used) ca->alloc_heap.used)
ret = bch2_invalidate_one_bucket2(c, ca, &iter, &journal_seq, ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
BTREE_INSERT_GC_LOCK_HELD| BTREE_INSERT_GC_LOCK_HELD|
(!fifo_empty(&ca->free_inc) (!fifo_empty(&ca->free_inc)
? BTREE_INSERT_NOWAIT : 0)); ? BTREE_INSERT_NOWAIT : 0));
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
/* If we used NOWAIT, don't return the error: */ /* If we used NOWAIT, don't return the error: */
if (!fifo_empty(&ca->free_inc)) if (!fifo_empty(&ca->free_inc))
......
...@@ -21,8 +21,6 @@ void bch2_deferred_update_free(struct bch_fs *, ...@@ -21,8 +21,6 @@ void bch2_deferred_update_free(struct bch_fs *,
struct deferred_update * struct deferred_update *
bch2_deferred_update_alloc(struct bch_fs *, enum btree_id, unsigned); bch2_deferred_update_alloc(struct bch_fs *, enum btree_id, unsigned);
/* Normal update interface: */
struct btree_insert { struct btree_insert {
struct bch_fs *c; struct bch_fs *c;
struct disk_reservation *disk_res; struct disk_reservation *disk_res;
...@@ -35,8 +33,6 @@ struct btree_insert { ...@@ -35,8 +33,6 @@ struct btree_insert {
struct btree_insert_entry *entries; struct btree_insert_entry *entries;
}; };
int __bch2_btree_insert_at(struct btree_insert *);
#define BTREE_INSERT_ENTRY(_iter, _k) \ #define BTREE_INSERT_ENTRY(_iter, _k) \
((struct btree_insert_entry) { \ ((struct btree_insert_entry) { \
.iter = (_iter), \ .iter = (_iter), \
...@@ -50,30 +46,6 @@ int __bch2_btree_insert_at(struct btree_insert *); ...@@ -50,30 +46,6 @@ int __bch2_btree_insert_at(struct btree_insert *);
.deferred = true, \ .deferred = true, \
}) })
/**
* bch_btree_insert_at - insert one or more keys at iterator positions
* @iter: btree iterator
* @insert_key: key to insert
* @disk_res: disk reservation
* @hook: extent insert callback
*
* Return values:
* -EINTR: locking changed, this function should be called again. Only returned
* if passed BTREE_INSERT_ATOMIC.
* -EROFS: filesystem read only
* -EIO: journal or btree node IO error
*/
#define bch2_btree_insert_at(_c, _disk_res, _journal_seq, _flags, ...) \
__bch2_btree_insert_at(&(struct btree_insert) { \
.c = (_c), \
.disk_res = (_disk_res), \
.journal_seq = (_journal_seq), \
.flags = (_flags), \
.nr = COUNT_ARGS(__VA_ARGS__), \
.entries = (struct btree_insert_entry[]) { \
__VA_ARGS__ \
}})
enum { enum {
__BTREE_INSERT_ATOMIC, __BTREE_INSERT_ATOMIC,
__BTREE_INSERT_NOUNLOCK, __BTREE_INSERT_NOUNLOCK,
...@@ -125,7 +97,7 @@ enum { ...@@ -125,7 +97,7 @@ enum {
#define BCH_HASH_SET_MUST_CREATE (1 << __BCH_HASH_SET_MUST_CREATE) #define BCH_HASH_SET_MUST_CREATE (1 << __BCH_HASH_SET_MUST_CREATE)
#define BCH_HASH_SET_MUST_REPLACE (1 << __BCH_HASH_SET_MUST_REPLACE) #define BCH_HASH_SET_MUST_REPLACE (1 << __BCH_HASH_SET_MUST_REPLACE)
int bch2_btree_delete_at(struct btree_iter *, unsigned); int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *, int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
struct disk_reservation *, u64 *, int flags); struct disk_reservation *, u64 *, int flags);
...@@ -138,8 +110,6 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *, ...@@ -138,8 +110,6 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *, int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
struct btree *, struct bkey_i_btree_ptr *); struct btree *, struct bkey_i_btree_ptr *);
/* new transactional interface: */
static inline void static inline void
bch2_trans_update(struct btree_trans *trans, bch2_trans_update(struct btree_trans *trans,
struct btree_insert_entry entry) struct btree_insert_entry entry)
......
...@@ -631,7 +631,7 @@ static inline void btree_insert_entry_checks(struct bch_fs *c, ...@@ -631,7 +631,7 @@ static inline void btree_insert_entry_checks(struct bch_fs *c,
* -EROFS: filesystem read only * -EROFS: filesystem read only
* -EIO: journal or btree node IO error * -EIO: journal or btree node IO error
*/ */
int __bch2_btree_insert_at(struct btree_insert *trans) static int __bch2_btree_insert_at(struct btree_insert *trans)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_insert_entry *i; struct btree_insert_entry *i;
...@@ -847,17 +847,18 @@ int bch2_trans_commit(struct btree_trans *trans, ...@@ -847,17 +847,18 @@ int bch2_trans_commit(struct btree_trans *trans,
return __bch2_btree_insert_at(&insert); return __bch2_btree_insert_at(&insert);
} }
int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags) int bch2_btree_delete_at(struct btree_trans *trans,
struct btree_iter *iter, unsigned flags)
{ {
struct bkey_i k; struct bkey_i k;
bkey_init(&k.k); bkey_init(&k.k);
k.k.p = iter->pos; k.k.p = iter->pos;
return bch2_btree_insert_at(iter->c, NULL, NULL, bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &k));
BTREE_INSERT_NOFAIL| return bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_USE_RESERVE|flags, BTREE_INSERT_NOFAIL|
BTREE_INSERT_ENTRY(iter, &k)); BTREE_INSERT_USE_RESERVE|flags);
} }
/** /**
...@@ -872,14 +873,19 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id, ...@@ -872,14 +873,19 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
struct disk_reservation *disk_res, struct disk_reservation *disk_res,
u64 *journal_seq, int flags) u64 *journal_seq, int flags)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
int ret; int ret;
bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k), bch2_trans_init(&trans, c);
BTREE_ITER_INTENT);
ret = bch2_btree_insert_at(c, disk_res, journal_seq, flags, iter = bch2_trans_get_iter(&trans, id, bkey_start_pos(&k->k),
BTREE_INSERT_ENTRY(&iter, k)); BTREE_ITER_INTENT);
bch2_btree_iter_unlock(&iter);
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
ret = bch2_trans_commit(&trans, disk_res, journal_seq, flags);
bch2_trans_exit(&trans);
return ret; return ret;
} }
...@@ -893,16 +899,18 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id, ...@@ -893,16 +899,18 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
struct bpos start, struct bpos end, struct bpos start, struct bpos end,
u64 *journal_seq) u64 *journal_seq)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
int ret = 0; int ret = 0;
bch2_btree_iter_init(&iter, c, id, start, bch2_trans_init(&trans, c);
BTREE_ITER_INTENT);
while ((k = bch2_btree_iter_peek(&iter)).k && iter = bch2_trans_get_iter(&trans, id, start, BTREE_ITER_INTENT);
while ((k = bch2_btree_iter_peek(iter)).k &&
!(ret = btree_iter_err(k)) && !(ret = btree_iter_err(k)) &&
bkey_cmp(iter.pos, end) < 0) { bkey_cmp(iter->pos, end) < 0) {
unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits); unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
/* really shouldn't be using a bare, unpadded bkey_i */ /* really shouldn't be using a bare, unpadded bkey_i */
struct bkey_i delete; struct bkey_i delete;
...@@ -919,24 +927,25 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id, ...@@ -919,24 +927,25 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
* (bch2_btree_iter_peek() does guarantee that iter.pos >= * (bch2_btree_iter_peek() does guarantee that iter.pos >=
* bkey_start_pos(k.k)). * bkey_start_pos(k.k)).
*/ */
delete.k.p = iter.pos; delete.k.p = iter->pos;
if (iter.flags & BTREE_ITER_IS_EXTENTS) { if (iter->flags & BTREE_ITER_IS_EXTENTS) {
/* create the biggest key we can */ /* create the biggest key we can */
bch2_key_resize(&delete.k, max_sectors); bch2_key_resize(&delete.k, max_sectors);
bch2_cut_back(end, &delete.k); bch2_cut_back(end, &delete.k);
bch2_extent_trim_atomic(&delete, &iter); bch2_extent_trim_atomic(&delete, iter);
} }
ret = bch2_btree_insert_at(c, NULL, journal_seq, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &delete));
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &delete)); ret = bch2_trans_commit(&trans, NULL, journal_seq,
BTREE_INSERT_NOFAIL);
if (ret) if (ret)
break; break;
bch2_btree_iter_cond_resched(&iter); bch2_btree_iter_cond_resched(iter);
} }
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
return ret; return ret;
} }
...@@ -151,8 +151,8 @@ int __bch2_dirent_create(struct btree_trans *trans, ...@@ -151,8 +151,8 @@ int __bch2_dirent_create(struct btree_trans *trans,
if (ret) if (ret)
return ret; return ret;
return __bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info, return bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
dir_inum, &dirent->k_i, flags); dir_inum, &dirent->k_i, flags);
} }
int bch2_dirent_create(struct bch_fs *c, u64 dir_inum, int bch2_dirent_create(struct bch_fs *c, u64 dir_inum,
......
...@@ -629,36 +629,12 @@ void bch2_stripes_heap_insert(struct bch_fs *c, ...@@ -629,36 +629,12 @@ void bch2_stripes_heap_insert(struct bch_fs *c,
/* stripe deletion */ /* stripe deletion */
static void ec_stripe_delete(struct bch_fs *c, size_t idx) static int ec_stripe_delete(struct bch_fs *c, size_t idx)
{ {
struct btree_iter iter; return bch2_btree_delete_range(c, BTREE_ID_EC,
struct bch_stripe *v = NULL; POS(0, idx),
struct bkey_s_c k; POS(0, idx + 1),
struct bkey_i delete; NULL);
u64 journal_seq = 0;
bch2_btree_iter_init(&iter, c, BTREE_ID_EC,
POS(0, idx),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&iter);
if (btree_iter_err(k) || k.k->type != KEY_TYPE_stripe)
goto out;
v = kmalloc(bkey_val_bytes(k.k), GFP_KERNEL);
BUG_ON(!v);
memcpy(v, bkey_s_c_to_stripe(k).v, bkey_val_bytes(k.k));
bkey_init(&delete.k);
delete.k.p = iter.pos;
bch2_btree_insert_at(c, NULL, &journal_seq,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_NOUNLOCK,
BTREE_INSERT_ENTRY(&iter, &delete));
out:
bch2_btree_iter_unlock(&iter);
kfree(v);
} }
static void ec_stripe_delete_work(struct work_struct *work) static void ec_stripe_delete_work(struct work_struct *work)
...@@ -690,39 +666,46 @@ static void ec_stripe_delete_work(struct work_struct *work) ...@@ -690,39 +666,46 @@ static void ec_stripe_delete_work(struct work_struct *work)
static int ec_stripe_bkey_insert(struct bch_fs *c, static int ec_stripe_bkey_insert(struct bch_fs *c,
struct bkey_i_stripe *stripe) struct bkey_i_stripe *stripe)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
int ret; int ret;
/* XXX: start pos hint */ bch2_trans_init(&trans, c);
retry: retry:
for_each_btree_key(&iter, c, BTREE_ID_EC, POS_MIN, bch2_trans_begin(&trans);
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) { /* XXX: start pos hint */
bch2_btree_iter_unlock(&iter); iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
return -ENOSPC; BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
}
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0)
break;
if (bkey_deleted(k.k)) if (bkey_deleted(k.k))
goto found_slot; goto found_slot;
} }
return bch2_btree_iter_unlock(&iter) ?: -ENOSPC; ret = -ENOSPC;
goto out;
found_slot: found_slot:
ret = ec_stripe_mem_alloc(c, &iter); ret = ec_stripe_mem_alloc(c, iter);
if (ret == -EINTR) if (ret == -EINTR)
goto retry; goto retry;
if (ret) if (ret)
return ret; return ret;
stripe->k.p = iter.pos; stripe->k.p = iter->pos;
ret = bch2_btree_insert_at(c, NULL, NULL, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &stripe->k_i));
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE, ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_ENTRY(&iter, &stripe->k_i)); BTREE_INSERT_NOFAIL|
bch2_btree_iter_unlock(&iter); BTREE_INSERT_USE_RESERVE);
out:
bch2_trans_exit(&trans);
return ret; return ret;
} }
...@@ -749,23 +732,26 @@ static int ec_stripe_update_ptrs(struct bch_fs *c, ...@@ -749,23 +732,26 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
struct ec_stripe_buf *s, struct ec_stripe_buf *s,
struct bkey *pos) struct bkey *pos)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bkey_s_extent e; struct bkey_s_extent e;
struct bch_extent_ptr *ptr; struct bch_extent_ptr *ptr;
BKEY_PADDED(k) tmp; BKEY_PADDED(k) tmp;
int ret = 0, dev, idx; int ret = 0, dev, idx;
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, bch2_trans_init(&trans, c);
bkey_start_pos(pos),
BTREE_ITER_INTENT);
while ((k = bch2_btree_iter_peek(&iter)).k && iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
!btree_iter_err(k) && bkey_start_pos(pos),
BTREE_ITER_INTENT);
while ((k = bch2_btree_iter_peek(iter)).k &&
!(ret = btree_iter_err(k)) &&
bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) { bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
idx = extent_matches_stripe(c, &s->key.v, k); idx = extent_matches_stripe(c, &s->key.v, k);
if (idx < 0) { if (idx < 0) {
bch2_btree_iter_next(&iter); bch2_btree_iter_next(iter);
continue; continue;
} }
...@@ -783,18 +769,21 @@ static int ec_stripe_update_ptrs(struct bch_fs *c, ...@@ -783,18 +769,21 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
extent_stripe_ptr_add(e, s, ptr, idx); extent_stripe_ptr_add(e, s, ptr, idx);
ret = bch2_btree_insert_at(c, NULL, NULL, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &tmp.k));
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL| ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_USE_RESERVE, BTREE_INSERT_ATOMIC|
BTREE_INSERT_ENTRY(&iter, &tmp.k)); BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE);
if (ret == -EINTR) if (ret == -EINTR)
ret = 0; ret = 0;
if (ret) if (ret)
break; break;
} }
return bch2_btree_iter_unlock(&iter) ?: ret; bch2_trans_exit(&trans);
return ret;
} }
/* /*
...@@ -1163,13 +1152,14 @@ void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca) ...@@ -1163,13 +1152,14 @@ void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
mutex_unlock(&c->ec_new_stripe_lock); mutex_unlock(&c->ec_new_stripe_lock);
} }
static int __bch2_stripe_write_key(struct bch_fs *c, static int __bch2_stripe_write_key(struct btree_trans *trans,
struct btree_iter *iter, struct btree_iter *iter,
struct stripe *m, struct stripe *m,
size_t idx, size_t idx,
struct bkey_i_stripe *new_key, struct bkey_i_stripe *new_key,
unsigned flags) unsigned flags)
{ {
struct bch_fs *c = trans->c;
struct bkey_s_c k; struct bkey_s_c k;
unsigned i; unsigned i;
int ret; int ret;
...@@ -1195,14 +1185,16 @@ static int __bch2_stripe_write_key(struct bch_fs *c, ...@@ -1195,14 +1185,16 @@ static int __bch2_stripe_write_key(struct bch_fs *c,
spin_unlock(&c->ec_stripes_heap_lock); spin_unlock(&c->ec_stripes_heap_lock);
return bch2_btree_insert_at(c, NULL, NULL, bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &new_key->k_i));
BTREE_INSERT_NOFAIL|flags,
BTREE_INSERT_ENTRY(iter, &new_key->k_i)); return bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|flags);
} }
int bch2_stripes_write(struct bch_fs *c, bool *wrote) int bch2_stripes_write(struct bch_fs *c, bool *wrote)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct genradix_iter giter; struct genradix_iter giter;
struct bkey_i_stripe *new_key; struct bkey_i_stripe *new_key;
struct stripe *m; struct stripe *m;
...@@ -1211,14 +1203,16 @@ int bch2_stripes_write(struct bch_fs *c, bool *wrote) ...@@ -1211,14 +1203,16 @@ int bch2_stripes_write(struct bch_fs *c, bool *wrote)
new_key = kmalloc(255 * sizeof(u64), GFP_KERNEL); new_key = kmalloc(255 * sizeof(u64), GFP_KERNEL);
BUG_ON(!new_key); BUG_ON(!new_key);
bch2_btree_iter_init(&iter, c, BTREE_ID_EC, POS_MIN, bch2_trans_init(&trans, c);
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
genradix_for_each(&c->stripes[0], giter, m) { genradix_for_each(&c->stripes[0], giter, m) {
if (!m->dirty) if (!m->dirty)
continue; continue;
ret = __bch2_stripe_write_key(c, &iter, m, giter.pos, ret = __bch2_stripe_write_key(&trans, iter, m, giter.pos,
new_key, BTREE_INSERT_NOCHECK_RW); new_key, BTREE_INSERT_NOCHECK_RW);
if (ret) if (ret)
break; break;
...@@ -1226,7 +1220,7 @@ int bch2_stripes_write(struct bch_fs *c, bool *wrote) ...@@ -1226,7 +1220,7 @@ int bch2_stripes_write(struct bch_fs *c, bool *wrote)
*wrote = true; *wrote = true;
} }
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
kfree(new_key); kfree(new_key);
......
...@@ -152,7 +152,7 @@ static void hash_check_set_inode(struct hash_check *h, struct bch_fs *c, ...@@ -152,7 +152,7 @@ static void hash_check_set_inode(struct hash_check *h, struct bch_fs *c,
} }
static int hash_redo_key(const struct bch_hash_desc desc, static int hash_redo_key(const struct bch_hash_desc desc,
struct hash_check *h, struct bch_fs *c, struct btree_trans *trans, struct hash_check *h,
struct btree_iter *k_iter, struct bkey_s_c k, struct btree_iter *k_iter, struct bkey_s_c k,
u64 hashed) u64 hashed)
{ {
...@@ -165,15 +165,16 @@ static int hash_redo_key(const struct bch_hash_desc desc, ...@@ -165,15 +165,16 @@ static int hash_redo_key(const struct bch_hash_desc desc,
bkey_reassemble(tmp, k); bkey_reassemble(tmp, k);
ret = bch2_btree_delete_at(k_iter, 0); ret = bch2_btree_delete_at(trans, k_iter, 0);
if (ret) if (ret)
goto err; goto err;
bch2_btree_iter_unlock(k_iter); bch2_btree_iter_unlock(k_iter);
bch2_hash_set(desc, &h->info, c, k_iter->pos.inode, NULL, tmp, bch2_hash_set(trans, desc, &h->info, k_iter->pos.inode,
BTREE_INSERT_NOFAIL| tmp, BCH_HASH_SET_MUST_CREATE);
BCH_HASH_SET_MUST_CREATE); ret = bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
err: err:
kfree(tmp); kfree(tmp);
return ret; return ret;
...@@ -272,9 +273,10 @@ static bool key_has_correct_hash(const struct bch_hash_desc desc, ...@@ -272,9 +273,10 @@ static bool key_has_correct_hash(const struct bch_hash_desc desc,
} }
static int hash_check_key(const struct bch_hash_desc desc, static int hash_check_key(const struct bch_hash_desc desc,
struct hash_check *h, struct bch_fs *c, struct btree_trans *trans, struct hash_check *h,
struct btree_iter *k_iter, struct bkey_s_c k) struct btree_iter *k_iter, struct bkey_s_c k)
{ {
struct bch_fs *c = trans->c;
char buf[200]; char buf[200];
u64 hashed; u64 hashed;
int ret = 0; int ret = 0;
...@@ -300,7 +302,7 @@ static int hash_check_key(const struct bch_hash_desc desc, ...@@ -300,7 +302,7 @@ static int hash_check_key(const struct bch_hash_desc desc,
hashed, h->chain->pos.offset, hashed, h->chain->pos.offset,
(bch2_bkey_val_to_text(&PBUF(buf), c, (bch2_bkey_val_to_text(&PBUF(buf), c,
k), buf))) { k), buf))) {
ret = hash_redo_key(desc, h, c, k_iter, k, hashed); ret = hash_redo_key(desc, trans, h, k_iter, k, hashed);
if (ret) { if (ret) {
bch_err(c, "hash_redo_key err %i", ret); bch_err(c, "hash_redo_key err %i", ret);
return ret; return ret;
...@@ -313,9 +315,10 @@ static int hash_check_key(const struct bch_hash_desc desc, ...@@ -313,9 +315,10 @@ static int hash_check_key(const struct bch_hash_desc desc,
return ret; return ret;
} }
static int check_dirent_hash(struct hash_check *h, struct bch_fs *c, static int check_dirent_hash(struct btree_trans *trans, struct hash_check *h,
struct btree_iter *iter, struct bkey_s_c *k) struct btree_iter *iter, struct bkey_s_c *k)
{ {
struct bch_fs *c = trans->c;
struct bkey_i_dirent *d = NULL; struct bkey_i_dirent *d = NULL;
int ret = -EINVAL; int ret = -EINVAL;
char buf[200]; char buf[200];
...@@ -360,9 +363,9 @@ static int check_dirent_hash(struct hash_check *h, struct bch_fs *c, ...@@ -360,9 +363,9 @@ static int check_dirent_hash(struct hash_check *h, struct bch_fs *c,
if (fsck_err(c, "dirent with junk at end, was %s (%zu) now %s (%u)", if (fsck_err(c, "dirent with junk at end, was %s (%zu) now %s (%u)",
buf, strlen(buf), d->v.d_name, len)) { buf, strlen(buf), d->v.d_name, len)) {
ret = bch2_btree_insert_at(c, NULL, NULL, bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &d->k_i));
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(iter, &d->k_i)); ret = bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
if (ret) if (ret)
goto err; goto err;
...@@ -384,8 +387,8 @@ static int check_dirent_hash(struct hash_check *h, struct bch_fs *c, ...@@ -384,8 +387,8 @@ static int check_dirent_hash(struct hash_check *h, struct bch_fs *c,
k->k->p.offset, hash, h->chain->pos.offset, k->k->p.offset, hash, h->chain->pos.offset,
(bch2_bkey_val_to_text(&PBUF(buf), c, (bch2_bkey_val_to_text(&PBUF(buf), c,
*k), buf))) { *k), buf))) {
ret = hash_redo_key(bch2_dirent_hash_desc, ret = hash_redo_key(bch2_dirent_hash_desc, trans,
h, c, iter, *k, hash); h, iter, *k, hash);
if (ret) if (ret)
bch_err(c, "hash_redo_key err %i", ret); bch_err(c, "hash_redo_key err %i", ret);
else else
...@@ -532,7 +535,7 @@ static int check_dirents(struct bch_fs *c) ...@@ -532,7 +535,7 @@ static int check_dirents(struct bch_fs *c)
mode_to_type(w.inode.bi_mode), mode_to_type(w.inode.bi_mode),
(bch2_bkey_val_to_text(&PBUF(buf), c, (bch2_bkey_val_to_text(&PBUF(buf), c,
k), buf))) { k), buf))) {
ret = bch2_btree_delete_at(iter, 0); ret = bch2_btree_delete_at(&trans, iter, 0);
if (ret) if (ret)
goto err; goto err;
continue; continue;
...@@ -541,7 +544,7 @@ static int check_dirents(struct bch_fs *c) ...@@ -541,7 +544,7 @@ static int check_dirents(struct bch_fs *c)
if (w.first_this_inode && w.have_inode) if (w.first_this_inode && w.have_inode)
hash_check_set_inode(&h, c, &w.inode); hash_check_set_inode(&h, c, &w.inode);
ret = check_dirent_hash(&h, c, iter, &k); ret = check_dirent_hash(&trans, &h, iter, &k);
if (ret > 0) { if (ret > 0) {
ret = 0; ret = 0;
continue; continue;
...@@ -623,9 +626,11 @@ static int check_dirents(struct bch_fs *c) ...@@ -623,9 +626,11 @@ static int check_dirents(struct bch_fs *c)
bkey_reassemble(&n->k_i, d.s_c); bkey_reassemble(&n->k_i, d.s_c);
n->v.d_type = mode_to_type(target.bi_mode); n->v.d_type = mode_to_type(target.bi_mode);
ret = bch2_btree_insert_at(c, NULL, NULL, bch2_trans_update(&trans,
BTREE_INSERT_NOFAIL, BTREE_INSERT_ENTRY(iter, &n->k_i));
BTREE_INSERT_ENTRY(iter, &n->k_i));
ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
kfree(n); kfree(n);
if (ret) if (ret)
goto err; goto err;
...@@ -669,7 +674,7 @@ static int check_xattrs(struct bch_fs *c) ...@@ -669,7 +674,7 @@ static int check_xattrs(struct bch_fs *c)
if (fsck_err_on(!w.have_inode, c, if (fsck_err_on(!w.have_inode, c,
"xattr for missing inode %llu", "xattr for missing inode %llu",
k.k->p.inode)) { k.k->p.inode)) {
ret = bch2_btree_delete_at(iter, 0); ret = bch2_btree_delete_at(&trans, iter, 0);
if (ret) if (ret)
goto err; goto err;
continue; continue;
...@@ -678,7 +683,7 @@ static int check_xattrs(struct bch_fs *c) ...@@ -678,7 +683,7 @@ static int check_xattrs(struct bch_fs *c)
if (w.first_this_inode && w.have_inode) if (w.first_this_inode && w.have_inode)
hash_check_set_inode(&h, c, &w.inode); hash_check_set_inode(&h, c, &w.inode);
ret = hash_check_key(bch2_xattr_hash_desc, &h, c, iter, k); ret = hash_check_key(bch2_xattr_hash_desc, &trans, &h, iter, k);
if (ret) if (ret)
goto fsck_err; goto fsck_err;
} }
...@@ -1163,12 +1168,13 @@ static int check_inode_nlink(struct bch_fs *c, ...@@ -1163,12 +1168,13 @@ static int check_inode_nlink(struct bch_fs *c,
return ret; return ret;
} }
static int check_inode(struct bch_fs *c, static int check_inode(struct btree_trans *trans,
struct bch_inode_unpacked *lostfound_inode, struct bch_inode_unpacked *lostfound_inode,
struct btree_iter *iter, struct btree_iter *iter,
struct bkey_s_c_inode inode, struct bkey_s_c_inode inode,
struct nlink *link) struct nlink *link)
{ {
struct bch_fs *c = trans->c;
struct bch_inode_unpacked u; struct bch_inode_unpacked u;
bool do_update = false; bool do_update = false;
int ret = 0; int ret = 0;
...@@ -1259,10 +1265,10 @@ static int check_inode(struct bch_fs *c, ...@@ -1259,10 +1265,10 @@ static int check_inode(struct bch_fs *c,
struct bkey_inode_buf p; struct bkey_inode_buf p;
bch2_inode_pack(&p, &u); bch2_inode_pack(&p, &u);
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
ret = bch2_btree_insert_at(c, NULL, NULL, ret = bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL, BTREE_INSERT_NOFAIL);
BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
if (ret && ret != -EINTR) if (ret && ret != -EINTR)
bch_err(c, "error in fs gc: error %i " bch_err(c, "error in fs gc: error %i "
"updating inode", ret); "updating inode", ret);
...@@ -1277,25 +1283,29 @@ static int bch2_gc_walk_inodes(struct bch_fs *c, ...@@ -1277,25 +1283,29 @@ static int bch2_gc_walk_inodes(struct bch_fs *c,
nlink_table *links, nlink_table *links,
u64 range_start, u64 range_end) u64 range_start, u64 range_end)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
struct nlink *link, zero_links = { 0, 0 }; struct nlink *link, zero_links = { 0, 0 };
struct genradix_iter nlinks_iter; struct genradix_iter nlinks_iter;
int ret = 0, ret2 = 0; int ret = 0, ret2 = 0;
u64 nlinks_pos; u64 nlinks_pos;
bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(range_start, 0), 0); bch2_trans_init(&trans, c);
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
POS(range_start, 0), 0);
nlinks_iter = genradix_iter_init(links, 0); nlinks_iter = genradix_iter_init(links, 0);
while ((k = bch2_btree_iter_peek(&iter)).k && while ((k = bch2_btree_iter_peek(iter)).k &&
!btree_iter_err(k)) { !(ret2 = btree_iter_err(k))) {
peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links); peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
if (!link && (!k.k || iter.pos.inode >= range_end)) if (!link && (!k.k || iter->pos.inode >= range_end))
break; break;
nlinks_pos = range_start + nlinks_iter.pos; nlinks_pos = range_start + nlinks_iter.pos;
if (iter.pos.inode > nlinks_pos) { if (iter->pos.inode > nlinks_pos) {
/* Should have been caught by dirents pass: */ /* Should have been caught by dirents pass: */
need_fsck_err_on(link && link->count, c, need_fsck_err_on(link && link->count, c,
"missing inode %llu (nlink %u)", "missing inode %llu (nlink %u)",
...@@ -1304,7 +1314,7 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links); ...@@ -1304,7 +1314,7 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
goto peek_nlinks; goto peek_nlinks;
} }
if (iter.pos.inode < nlinks_pos || !link) if (iter->pos.inode < nlinks_pos || !link)
link = &zero_links; link = &zero_links;
if (k.k && k.k->type == KEY_TYPE_inode) { if (k.k && k.k->type == KEY_TYPE_inode) {
...@@ -1312,9 +1322,9 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links); ...@@ -1312,9 +1322,9 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
* Avoid potential deadlocks with iter for * Avoid potential deadlocks with iter for
* truncate/rm/etc.: * truncate/rm/etc.:
*/ */
bch2_btree_iter_unlock(&iter); bch2_btree_iter_unlock(iter);
ret = check_inode(c, lostfound_inode, &iter, ret = check_inode(&trans, lostfound_inode, iter,
bkey_s_c_to_inode(k), link); bkey_s_c_to_inode(k), link);
BUG_ON(ret == -EINTR); BUG_ON(ret == -EINTR);
if (ret) if (ret)
...@@ -1326,14 +1336,15 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links); ...@@ -1326,14 +1336,15 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
nlinks_pos, link->count); nlinks_pos, link->count);
} }
if (nlinks_pos == iter.pos.inode) if (nlinks_pos == iter->pos.inode)
genradix_iter_advance(&nlinks_iter, links); genradix_iter_advance(&nlinks_iter, links);
bch2_btree_iter_next(&iter); bch2_btree_iter_next(iter);
bch2_btree_iter_cond_resched(&iter); bch2_btree_iter_cond_resched(iter);
} }
fsck_err: fsck_err:
ret2 = bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
if (ret2) if (ret2)
bch_err(c, "error in fs gc: btree error %i while walking inodes", ret2); bch_err(c, "error in fs gc: btree error %i while walking inodes", ret2);
...@@ -1379,12 +1390,18 @@ static int check_inode_nlinks(struct bch_fs *c, ...@@ -1379,12 +1390,18 @@ static int check_inode_nlinks(struct bch_fs *c,
noinline_for_stack noinline_for_stack
static int check_inodes_fast(struct bch_fs *c) static int check_inodes_fast(struct bch_fs *c)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bkey_s_c_inode inode; struct bkey_s_c_inode inode;
int ret = 0; int ret = 0;
for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN, 0, k) { bch2_trans_init(&trans, c);
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
POS_MIN, 0);
for_each_btree_key_continue(iter, 0, k) {
if (k.k->type != KEY_TYPE_inode) if (k.k->type != KEY_TYPE_inode)
continue; continue;
...@@ -1394,14 +1411,19 @@ static int check_inodes_fast(struct bch_fs *c) ...@@ -1394,14 +1411,19 @@ static int check_inodes_fast(struct bch_fs *c)
(BCH_INODE_I_SIZE_DIRTY| (BCH_INODE_I_SIZE_DIRTY|
BCH_INODE_I_SECTORS_DIRTY| BCH_INODE_I_SECTORS_DIRTY|
BCH_INODE_UNLINKED)) { BCH_INODE_UNLINKED)) {
ret = check_inode(c, NULL, &iter, inode, NULL); ret = check_inode(&trans, NULL, iter, inode, NULL);
BUG_ON(ret == -EINTR); BUG_ON(ret == -EINTR);
if (ret) if (ret)
break; break;
} }
} }
return bch2_btree_iter_unlock(&iter) ?: ret; if (!ret)
ret = bch2_btree_iter_unlock(iter);
bch2_trans_exit(&trans);
return ret;
} }
/* /*
......
...@@ -367,7 +367,8 @@ int bch2_inode_create(struct bch_fs *c, struct bch_inode_unpacked *inode_u, ...@@ -367,7 +367,8 @@ int bch2_inode_create(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
int bch2_inode_rm(struct bch_fs *c, u64 inode_nr) int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_i_inode_generation delete; struct bkey_i_inode_generation delete;
struct bpos start = POS(inode_nr, 0); struct bpos start = POS(inode_nr, 0);
struct bpos end = POS(inode_nr + 1, 0); struct bpos end = POS(inode_nr + 1, 0);
...@@ -390,17 +391,17 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr) ...@@ -390,17 +391,17 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
if (ret) if (ret)
return ret; return ret;
bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(inode_nr, 0), bch2_trans_init(&trans, c);
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS(inode_nr, 0),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
do { do {
struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
u32 bi_generation = 0; u32 bi_generation = 0;
ret = btree_iter_err(k); ret = btree_iter_err(k);
if (ret) { if (ret)
bch2_btree_iter_unlock(&iter); break;
return ret;
}
bch2_fs_inconsistent_on(k.k->type != KEY_TYPE_inode, c, bch2_fs_inconsistent_on(k.k->type != KEY_TYPE_inode, c,
"inode %llu not found when deleting", "inode %llu not found when deleting",
...@@ -431,13 +432,15 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr) ...@@ -431,13 +432,15 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
delete.v.bi_generation = cpu_to_le32(bi_generation); delete.v.bi_generation = cpu_to_le32(bi_generation);
} }
ret = bch2_btree_insert_at(c, NULL, NULL, bch2_trans_update(&trans,
BTREE_INSERT_ATOMIC| BTREE_INSERT_ENTRY(iter, &delete.k_i));
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &delete.k_i)); ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL);
} while (ret == -EINTR); } while (ret == -EINTR);
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
return ret; return ret;
} }
......
...@@ -294,36 +294,43 @@ static void bch2_write_done(struct closure *cl) ...@@ -294,36 +294,43 @@ static void bch2_write_done(struct closure *cl)
int bch2_write_index_default(struct bch_write_op *op) int bch2_write_index_default(struct bch_write_op *op)
{ {
struct bch_fs *c = op->c; struct bch_fs *c = op->c;
struct btree_trans trans;
struct btree_iter *iter;
struct keylist *keys = &op->insert_keys; struct keylist *keys = &op->insert_keys;
struct btree_iter iter;
int ret; int ret;
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, BUG_ON(bch2_keylist_empty(keys));
bkey_start_pos(&bch2_keylist_front(keys)->k), bch2_verify_keylist_sorted(keys);
BTREE_ITER_INTENT);
bch2_trans_init(&trans, c);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k),
BTREE_ITER_INTENT);
do { do {
BKEY_PADDED(k) split; BKEY_PADDED(k) split;
bkey_copy(&split.k, bch2_keylist_front(keys)); bkey_copy(&split.k, bch2_keylist_front(keys));
bch2_extent_trim_atomic(&split.k, &iter); bch2_extent_trim_atomic(&split.k, iter);
ret = bch2_btree_insert_at(c, &op->res, bch2_trans_update(&trans,
op_journal_seq(op), BTREE_INSERT_ENTRY(iter, &split.k));
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE, ret = bch2_trans_commit(&trans, &op->res, op_journal_seq(op),
BTREE_INSERT_ENTRY(&iter, &split.k)); BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE);
if (ret) if (ret)
break; break;
if (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) < 0) if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
bch2_cut_front(iter.pos, bch2_keylist_front(keys)); bch2_cut_front(iter->pos, bch2_keylist_front(keys));
else else
bch2_keylist_pop_front(keys); bch2_keylist_pop_front(keys);
} while (!bch2_keylist_empty(keys)); } while (!bch2_keylist_empty(keys));
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
return ret; return ret;
} }
...@@ -1403,7 +1410,8 @@ static void bch2_rbio_error(struct bch_read_bio *rbio, int retry, ...@@ -1403,7 +1410,8 @@ static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
{ {
struct bch_fs *c = rbio->c; struct bch_fs *c = rbio->c;
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bkey_i_extent *e; struct bkey_i_extent *e;
BKEY_PADDED(k) new; BKEY_PADDED(k) new;
...@@ -1414,10 +1422,13 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) ...@@ -1414,10 +1422,13 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
if (rbio->pick.crc.compression_type) if (rbio->pick.crc.compression_type)
return; return;
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, rbio->pos, bch2_trans_init(&trans, c);
BTREE_ITER_INTENT);
retry: retry:
k = bch2_btree_iter_peek(&iter); bch2_trans_begin(&trans);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, rbio->pos,
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek(iter);
if (IS_ERR_OR_NULL(k.k)) if (IS_ERR_OR_NULL(k.k))
goto out; goto out;
...@@ -1453,15 +1464,15 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) ...@@ -1453,15 +1464,15 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
if (!bch2_extent_narrow_crcs(e, new_crc)) if (!bch2_extent_narrow_crcs(e, new_crc))
goto out; goto out;
ret = bch2_btree_insert_at(c, NULL, NULL, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &e->k_i));
BTREE_INSERT_ATOMIC| ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL| BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOWAIT, BTREE_INSERT_NOFAIL|
BTREE_INSERT_ENTRY(&iter, &e->k_i)); BTREE_INSERT_NOWAIT);
if (ret == -EINTR) if (ret == -EINTR)
goto retry; goto retry;
out: out:
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
} }
static bool should_narrow_crcs(struct bkey_s_c k, static bool should_narrow_crcs(struct bkey_s_c k,
......
...@@ -825,6 +825,8 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list) ...@@ -825,6 +825,8 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list)
static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k) static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
{ {
struct btree_trans trans;
struct btree_iter *iter;
/* /*
* We might cause compressed extents to be * We might cause compressed extents to be
* split, so we need to pass in a * split, so we need to pass in a
...@@ -833,20 +835,21 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k) ...@@ -833,20 +835,21 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
struct disk_reservation disk_res = struct disk_reservation disk_res =
bch2_disk_reservation_init(c, 0); bch2_disk_reservation_init(c, 0);
BKEY_PADDED(k) split; BKEY_PADDED(k) split;
struct btree_iter iter;
int ret; int ret;
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, bch2_trans_init(&trans, c);
bkey_start_pos(&k->k),
BTREE_ITER_INTENT); iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
bkey_start_pos(&k->k),
BTREE_ITER_INTENT);
do { do {
ret = bch2_btree_iter_traverse(&iter); ret = bch2_btree_iter_traverse(iter);
if (ret) if (ret)
break; break;
bkey_copy(&split.k, k); bkey_copy(&split.k, k);
bch2_cut_front(iter.pos, &split.k); bch2_cut_front(iter->pos, &split.k);
bch2_extent_trim_atomic(&split.k, &iter); bch2_extent_trim_atomic(&split.k, iter);
ret = bch2_disk_reservation_add(c, &disk_res, ret = bch2_disk_reservation_add(c, &disk_res,
split.k.k.size * split.k.k.size *
...@@ -854,13 +857,13 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k) ...@@ -854,13 +857,13 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
BCH_DISK_RESERVATION_NOFAIL); BCH_DISK_RESERVATION_NOFAIL);
BUG_ON(ret); BUG_ON(ret);
ret = bch2_btree_insert_at(c, &disk_res, NULL, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &split.k));
BTREE_INSERT_ATOMIC| ret = bch2_trans_commit(&trans, &disk_res, NULL,
BTREE_INSERT_NOFAIL| BTREE_INSERT_ATOMIC|
BTREE_INSERT_JOURNAL_REPLAY, BTREE_INSERT_NOFAIL|
BTREE_INSERT_ENTRY(&iter, &split.k)); BTREE_INSERT_JOURNAL_REPLAY);
} while ((!ret || ret == -EINTR) && } while ((!ret || ret == -EINTR) &&
bkey_cmp(k->k.p, iter.pos)); bkey_cmp(k->k.p, iter->pos));
bch2_disk_reservation_put(c, &disk_res); bch2_disk_reservation_put(c, &disk_res);
...@@ -873,9 +876,9 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k) ...@@ -873,9 +876,9 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
* before journal replay finishes * before journal replay finishes
*/ */
bch2_mark_key(c, bkey_i_to_s_c(k), false, -((s64) k->k.size), bch2_mark_key(c, bkey_i_to_s_c(k), false, -((s64) k->k.size),
gc_pos_btree_node(iter.l[0].b), gc_pos_btree_node(iter->l[0].b),
NULL, 0, 0); NULL, 0, 0);
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
return ret; return ret;
} }
......
...@@ -36,25 +36,29 @@ static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s k, ...@@ -36,25 +36,29 @@ static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s k,
static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags) static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
{ {
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
BKEY_PADDED(key) tmp; BKEY_PADDED(key) tmp;
struct btree_iter iter;
int ret = 0; int ret = 0;
bch2_trans_init(&trans, c);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
POS_MIN, BTREE_ITER_PREFETCH);
mutex_lock(&c->replicas_gc_lock); mutex_lock(&c->replicas_gc_lock);
bch2_replicas_gc_start(c, (1 << BCH_DATA_USER)|(1 << BCH_DATA_CACHED)); bch2_replicas_gc_start(c, (1 << BCH_DATA_USER)|(1 << BCH_DATA_CACHED));
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
POS_MIN, BTREE_ITER_PREFETCH);
while ((k = bch2_btree_iter_peek(&iter)).k && while ((k = bch2_btree_iter_peek(iter)).k &&
!(ret = btree_iter_err(k))) { !(ret = btree_iter_err(k))) {
if (!bkey_extent_is_data(k.k) || if (!bkey_extent_is_data(k.k) ||
!bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx)) { !bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx)) {
ret = bch2_mark_bkey_replicas(c, k); ret = bch2_mark_bkey_replicas(c, k);
if (ret) if (ret)
break; break;
bch2_btree_iter_next(&iter); bch2_btree_iter_next(iter);
continue; continue;
} }
...@@ -72,12 +76,14 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags) ...@@ -72,12 +76,14 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
*/ */
bch2_extent_normalize(c, bkey_i_to_s(&tmp.key)); bch2_extent_normalize(c, bkey_i_to_s(&tmp.key));
iter.pos = bkey_start_pos(&tmp.key.k); /* XXX not sketchy at all */
iter->pos = bkey_start_pos(&tmp.key.k);
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &tmp.key));
ret = bch2_btree_insert_at(c, NULL, NULL, ret = bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_ATOMIC| BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL, BTREE_INSERT_NOFAIL);
BTREE_INSERT_ENTRY(&iter, &tmp.key));
/* /*
* don't want to leave ret == -EINTR, since if we raced and * don't want to leave ret == -EINTR, since if we raced and
...@@ -90,11 +96,11 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags) ...@@ -90,11 +96,11 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
break; break;
} }
bch2_btree_iter_unlock(&iter);
bch2_replicas_gc_end(c, ret); bch2_replicas_gc_end(c, ret);
mutex_unlock(&c->replicas_gc_lock); mutex_unlock(&c->replicas_gc_lock);
bch2_trans_exit(&trans);
return ret; return ret;
} }
......
...@@ -54,18 +54,21 @@ struct moving_context { ...@@ -54,18 +54,21 @@ struct moving_context {
static int bch2_migrate_index_update(struct bch_write_op *op) static int bch2_migrate_index_update(struct bch_write_op *op)
{ {
struct bch_fs *c = op->c; struct bch_fs *c = op->c;
struct btree_trans trans;
struct btree_iter *iter;
struct migrate_write *m = struct migrate_write *m =
container_of(op, struct migrate_write, op); container_of(op, struct migrate_write, op);
struct keylist *keys = &op->insert_keys; struct keylist *keys = &op->insert_keys;
struct btree_iter iter;
int ret = 0; int ret = 0;
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, bch2_trans_init(&trans, c);
bkey_start_pos(&bch2_keylist_front(keys)->k),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT); iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
while (1) { while (1) {
struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
struct bkey_i_extent *insert, *new = struct bkey_i_extent *insert, *new =
bkey_i_to_extent(bch2_keylist_front(keys)); bkey_i_to_extent(bch2_keylist_front(keys));
BKEY_PADDED(k) _new, _insert; BKEY_PADDED(k) _new, _insert;
...@@ -74,10 +77,9 @@ static int bch2_migrate_index_update(struct bch_write_op *op) ...@@ -74,10 +77,9 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
bool did_work = false; bool did_work = false;
int nr; int nr;
if (btree_iter_err(k)) { ret = btree_iter_err(k);
ret = bch2_btree_iter_unlock(&iter); if (ret)
break; break;
}
if (bversion_cmp(k.k->version, new->k.version) || if (bversion_cmp(k.k->version, new->k.version) ||
!bkey_extent_is_data(k.k) || !bkey_extent_is_data(k.k) ||
...@@ -96,7 +98,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op) ...@@ -96,7 +98,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
bkey_copy(&_new.k, bch2_keylist_front(keys)); bkey_copy(&_new.k, bch2_keylist_front(keys));
new = bkey_i_to_extent(&_new.k); new = bkey_i_to_extent(&_new.k);
bch2_cut_front(iter.pos, &insert->k_i); bch2_cut_front(iter->pos, &insert->k_i);
bch2_cut_back(new->k.p, &insert->k); bch2_cut_back(new->k.p, &insert->k);
bch2_cut_back(insert->k.p, &new->k); bch2_cut_back(insert->k.p, &new->k);
...@@ -138,12 +140,6 @@ static int bch2_migrate_index_update(struct bch_write_op *op) ...@@ -138,12 +140,6 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
if (insert->k.size < k.k->size && if (insert->k.size < k.k->size &&
bch2_extent_is_compressed(k) && bch2_extent_is_compressed(k) &&
nr > 0) { nr > 0) {
/*
* can't call bch2_disk_reservation_add() with btree
* locks held, at least not without a song and dance
*/
bch2_btree_iter_unlock(&iter);
ret = bch2_disk_reservation_add(c, &op->res, ret = bch2_disk_reservation_add(c, &op->res,
keylist_sectors(keys) * nr, 0); keylist_sectors(keys) * nr, 0);
if (ret) if (ret)
...@@ -153,13 +149,15 @@ static int bch2_migrate_index_update(struct bch_write_op *op) ...@@ -153,13 +149,15 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
goto next; goto next;
} }
ret = bch2_btree_insert_at(c, &op->res, bch2_trans_update(&trans,
BTREE_INSERT_ENTRY(iter, &insert->k_i));
ret = bch2_trans_commit(&trans, &op->res,
op_journal_seq(op), op_journal_seq(op),
BTREE_INSERT_ATOMIC| BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL| BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE| BTREE_INSERT_USE_RESERVE|
m->data_opts.btree_insert_flags, m->data_opts.btree_insert_flags);
BTREE_INSERT_ENTRY(&iter, &insert->k_i));
if (!ret) if (!ret)
atomic_long_inc(&c->extent_migrate_done); atomic_long_inc(&c->extent_migrate_done);
if (ret == -EINTR) if (ret == -EINTR)
...@@ -167,25 +165,25 @@ static int bch2_migrate_index_update(struct bch_write_op *op) ...@@ -167,25 +165,25 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
if (ret) if (ret)
break; break;
next: next:
while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) { while (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) >= 0) {
bch2_keylist_pop_front(keys); bch2_keylist_pop_front(keys);
if (bch2_keylist_empty(keys)) if (bch2_keylist_empty(keys))
goto out; goto out;
} }
bch2_cut_front(iter.pos, bch2_keylist_front(keys)); bch2_cut_front(iter->pos, bch2_keylist_front(keys));
continue; continue;
nomatch: nomatch:
if (m->ctxt) if (m->ctxt)
atomic64_add(k.k->p.offset - iter.pos.offset, atomic64_add(k.k->p.offset - iter->pos.offset,
&m->ctxt->stats->sectors_raced); &m->ctxt->stats->sectors_raced);
atomic_long_inc(&c->extent_migrate_raced); atomic_long_inc(&c->extent_migrate_raced);
trace_move_race(&new->k); trace_move_race(&new->k);
bch2_btree_iter_next_slot(&iter); bch2_btree_iter_next_slot(iter);
goto next; goto next;
} }
out: out:
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
return ret; return ret;
} }
......
...@@ -708,7 +708,8 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid, ...@@ -708,7 +708,8 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
struct qc_dqblk *qdq) struct qc_dqblk *qdq)
{ {
struct bch_fs *c = sb->s_fs_info; struct bch_fs *c = sb->s_fs_info;
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bkey_i_quota new_quota; struct bkey_i_quota new_quota;
int ret; int ret;
...@@ -719,9 +720,11 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid, ...@@ -719,9 +720,11 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
bkey_quota_init(&new_quota.k_i); bkey_quota_init(&new_quota.k_i);
new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid)); new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
bch2_btree_iter_init(&iter, c, BTREE_ID_QUOTAS, new_quota.k.p, bch2_trans_init(&trans, c);
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&iter); iter = bch2_trans_get_iter(&trans, BTREE_ID_QUOTAS, new_quota.k.p,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
ret = btree_iter_err(k); ret = btree_iter_err(k);
if (unlikely(ret)) if (unlikely(ret))
...@@ -743,9 +746,11 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid, ...@@ -743,9 +746,11 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
if (qdq->d_fieldmask & QC_INO_HARD) if (qdq->d_fieldmask & QC_INO_HARD)
new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit); new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
ret = bch2_btree_insert_at(c, NULL, NULL, 0, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &new_quota.k_i));
BTREE_INSERT_ENTRY(&iter, &new_quota.k_i));
bch2_btree_iter_unlock(&iter); ret = bch2_trans_commit(&trans, NULL, NULL, 0);
bch2_trans_exit(&trans);
if (ret) if (ret)
return ret; return ret;
......
...@@ -213,10 +213,10 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans, ...@@ -213,10 +213,10 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
} }
static __always_inline static __always_inline
int __bch2_hash_set(struct btree_trans *trans, int bch2_hash_set(struct btree_trans *trans,
const struct bch_hash_desc desc, const struct bch_hash_desc desc,
const struct bch_hash_info *info, const struct bch_hash_info *info,
u64 inode, struct bkey_i *insert, int flags) u64 inode, struct bkey_i *insert, int flags)
{ {
struct btree_iter *iter, *slot = NULL; struct btree_iter *iter, *slot = NULL;
struct bkey_s_c k; struct bkey_s_c k;
...@@ -267,17 +267,6 @@ int __bch2_hash_set(struct btree_trans *trans, ...@@ -267,17 +267,6 @@ int __bch2_hash_set(struct btree_trans *trans,
return 0; return 0;
} }
static inline int bch2_hash_set(const struct bch_hash_desc desc,
const struct bch_hash_info *info,
struct bch_fs *c, u64 inode,
u64 *journal_seq,
struct bkey_i *insert, int flags)
{
return bch2_trans_do(c, journal_seq, flags|BTREE_INSERT_ATOMIC,
__bch2_hash_set(&trans, desc, info,
inode, insert, flags));
}
static __always_inline static __always_inline
int bch2_hash_delete_at(struct btree_trans *trans, int bch2_hash_delete_at(struct btree_trans *trans,
const struct bch_hash_desc desc, const struct bch_hash_desc desc,
......
...@@ -28,57 +28,63 @@ static void delete_test_keys(struct bch_fs *c) ...@@ -28,57 +28,63 @@ static void delete_test_keys(struct bch_fs *c)
static void test_delete(struct bch_fs *c, u64 nr) static void test_delete(struct bch_fs *c, u64 nr)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_i_cookie k; struct bkey_i_cookie k;
int ret; int ret;
bkey_cookie_init(&k.k_i); bkey_cookie_init(&k.k_i);
bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p, bch2_trans_init(&trans, c);
BTREE_ITER_INTENT);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(&iter); ret = bch2_btree_iter_traverse(iter);
BUG_ON(ret); BUG_ON(ret);
ret = bch2_btree_insert_at(c, NULL, NULL, 0, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
BTREE_INSERT_ENTRY(&iter, &k.k_i)); ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret); BUG_ON(ret);
pr_info("deleting once"); pr_info("deleting once");
ret = bch2_btree_delete_at(&iter, 0); ret = bch2_btree_delete_at(&trans, iter, 0);
BUG_ON(ret); BUG_ON(ret);
pr_info("deleting twice"); pr_info("deleting twice");
ret = bch2_btree_delete_at(&iter, 0); ret = bch2_btree_delete_at(&trans, iter, 0);
BUG_ON(ret); BUG_ON(ret);
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
} }
static void test_delete_written(struct bch_fs *c, u64 nr) static void test_delete_written(struct bch_fs *c, u64 nr)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_i_cookie k; struct bkey_i_cookie k;
int ret; int ret;
bkey_cookie_init(&k.k_i); bkey_cookie_init(&k.k_i);
bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, k.k.p, bch2_trans_init(&trans, c);
BTREE_ITER_INTENT);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(&iter); ret = bch2_btree_iter_traverse(iter);
BUG_ON(ret); BUG_ON(ret);
ret = bch2_btree_insert_at(c, NULL, NULL, 0, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
BTREE_INSERT_ENTRY(&iter, &k.k_i)); ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret); BUG_ON(ret);
bch2_journal_flush_all_pins(&c->journal); bch2_journal_flush_all_pins(&c->journal);
ret = bch2_btree_delete_at(&iter, 0); ret = bch2_btree_delete_at(&trans, iter, 0);
BUG_ON(ret); BUG_ON(ret);
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
} }
static void test_iterate(struct bch_fs *c, u64 nr) static void test_iterate(struct bch_fs *c, u64 nr)
...@@ -415,26 +421,29 @@ static void rand_mixed(struct bch_fs *c, u64 nr) ...@@ -415,26 +421,29 @@ static void rand_mixed(struct bch_fs *c, u64 nr)
u64 i; u64 i;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, bch2_trans_init(&trans, c);
POS(0, test_rand()), 0);
k = bch2_btree_iter_peek(&iter); iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
POS(0, test_rand()), 0);
k = bch2_btree_iter_peek(iter);
if (!(i & 3) && k.k) { if (!(i & 3) && k.k) {
struct bkey_i_cookie k; struct bkey_i_cookie k;
bkey_cookie_init(&k.k_i); bkey_cookie_init(&k.k_i);
k.k.p = iter.pos; k.k.p = iter->pos;
ret = bch2_btree_insert_at(c, NULL, NULL, 0, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
BTREE_INSERT_ENTRY(&iter, &k.k_i)); ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret); BUG_ON(ret);
} }
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
} }
} }
...@@ -457,7 +466,8 @@ static void rand_delete(struct bch_fs *c, u64 nr) ...@@ -457,7 +466,8 @@ static void rand_delete(struct bch_fs *c, u64 nr)
static void seq_insert(struct bch_fs *c, u64 nr) static void seq_insert(struct bch_fs *c, u64 nr)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bkey_i_cookie insert; struct bkey_i_cookie insert;
int ret; int ret;
...@@ -465,18 +475,22 @@ static void seq_insert(struct bch_fs *c, u64 nr) ...@@ -465,18 +475,22 @@ static void seq_insert(struct bch_fs *c, u64 nr)
bkey_cookie_init(&insert.k_i); bkey_cookie_init(&insert.k_i);
for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, bch2_trans_init(&trans, c);
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
insert.k.p = iter.pos; iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
insert.k.p = iter->pos;
ret = bch2_btree_insert_at(c, NULL, NULL, 0, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &insert.k_i));
BTREE_INSERT_ENTRY(&iter, &insert.k_i)); ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret); BUG_ON(ret);
if (++i == nr) if (++i == nr)
break; break;
} }
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
} }
static void seq_lookup(struct bch_fs *c, u64 nr) static void seq_lookup(struct bch_fs *c, u64 nr)
...@@ -491,21 +505,26 @@ static void seq_lookup(struct bch_fs *c, u64 nr) ...@@ -491,21 +505,26 @@ static void seq_lookup(struct bch_fs *c, u64 nr)
static void seq_overwrite(struct bch_fs *c, u64 nr) static void seq_overwrite(struct bch_fs *c, u64 nr)
{ {
struct btree_iter iter; struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
int ret; int ret;
for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, bch2_trans_init(&trans, c);
BTREE_ITER_INTENT, k) {
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_INTENT);
for_each_btree_key_continue(iter, 0, k) {
struct bkey_i_cookie u; struct bkey_i_cookie u;
bkey_reassemble(&u.k_i, k); bkey_reassemble(&u.k_i, k);
ret = bch2_btree_insert_at(c, NULL, NULL, 0, bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &u.k_i));
BTREE_INSERT_ENTRY(&iter, &u.k_i)); ret = bch2_trans_commit(&trans, NULL, NULL, 0);
BUG_ON(ret); BUG_ON(ret);
} }
bch2_btree_iter_unlock(&iter); bch2_trans_exit(&trans);
} }
static void seq_delete(struct bch_fs *c, u64 nr) static void seq_delete(struct bch_fs *c, u64 nr)
......
...@@ -180,7 +180,7 @@ int bch2_xattr_set(struct btree_trans *trans, u64 inum, ...@@ -180,7 +180,7 @@ int bch2_xattr_set(struct btree_trans *trans, u64 inum,
memcpy(xattr->v.x_name, name, namelen); memcpy(xattr->v.x_name, name, namelen);
memcpy(xattr_val(&xattr->v), value, size); memcpy(xattr_val(&xattr->v), value, size);
ret = __bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info, ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
inum, &xattr->k_i, inum, &xattr->k_i,
(flags & XATTR_CREATE ? BCH_HASH_SET_MUST_CREATE : 0)| (flags & XATTR_CREATE ? BCH_HASH_SET_MUST_CREATE : 0)|
(flags & XATTR_REPLACE ? BCH_HASH_SET_MUST_REPLACE : 0)); (flags & XATTR_REPLACE ? BCH_HASH_SET_MUST_REPLACE : 0));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment