Commit 80eab7a7 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: for_each_btree_key() now declares loop iter

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c47e8bfb
......@@ -534,14 +534,8 @@ void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bke
int bch2_bucket_gens_init(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
struct bch_alloc_v4 a;
struct bkey_i_bucket_gens g;
bool have_bucket_gens_key = false;
unsigned offset;
struct bpos pos;
u8 gen;
int ret;
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
......@@ -553,8 +547,10 @@ int bch2_bucket_gens_init(struct bch_fs *c)
if (!bch2_dev_bucket_exists(c, k.k->p))
continue;
gen = bch2_alloc_to_v4(k, &a)->gen;
pos = alloc_gens_pos(iter.pos, &offset);
struct bch_alloc_v4 a;
u8 gen = bch2_alloc_to_v4(k, &a)->gen;
unsigned offset;
struct bpos pos = alloc_gens_pos(iter.pos, &offset);
if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
ret = commit_do(trans, NULL, NULL,
......@@ -589,17 +585,11 @@ int bch2_bucket_gens_init(struct bch_fs *c)
int bch2_alloc_read(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
struct bch_dev *ca;
int ret;
down_read(&c->gc_lock);
if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
const struct bch_bucket_gens *g;
u64 b;
ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
BTREE_ITER_PREFETCH, k, ({
u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
......@@ -608,7 +598,7 @@ int bch2_alloc_read(struct bch_fs *c)
if (k.k->type != KEY_TYPE_bucket_gens)
continue;
g = bkey_s_c_to_bucket_gens(k).v;
const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
/*
* Not a fsck error because this is checked/repaired by
......@@ -617,17 +607,15 @@ int bch2_alloc_read(struct bch_fs *c)
if (!bch2_dev_exists2(c, k.k->p.inode))
continue;
ca = bch_dev_bkey_exists(c, k.k->p.inode);
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
for (b = max_t(u64, ca->mi.first_bucket, start);
for (u64 b = max_t(u64, ca->mi.first_bucket, start);
b < min_t(u64, ca->mi.nbuckets, end);
b++)
*bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
0;
}));
} else {
struct bch_alloc_v4 a;
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_PREFETCH, k, ({
/*
......@@ -637,8 +625,9 @@ int bch2_alloc_read(struct bch_fs *c)
if (!bch2_dev_bucket_exists(c, k.k->p))
continue;
ca = bch_dev_bkey_exists(c, k.k->p.inode);
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
struct bch_alloc_v4 a;
*bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
0;
}));
......@@ -1549,9 +1538,6 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
POS_MIN, BTREE_ITER_PREFETCH, k,
......@@ -1680,8 +1666,6 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
static void bch2_do_discards_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
struct btree_iter iter;
struct bkey_s_c k;
u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
struct bpos discard_pos_done = POS_MAX;
int ret;
......@@ -1805,8 +1789,6 @@ static void bch2_do_invalidates_work(struct work_struct *work)
struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
struct bch_dev *ca;
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
unsigned i;
int ret = 0;
......
......@@ -391,9 +391,6 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
/* verify that every backpointer has a corresponding alloc key */
int bch2_check_btree_backpointers(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_backpointers, POS_MIN, 0, k,
......@@ -821,8 +818,6 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
struct bbpos start,
struct bbpos end)
{
struct btree_iter iter;
struct bkey_s_c k;
struct bpos last_flushed_pos = SPOS_MAX;
return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
......
......@@ -1479,8 +1479,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
struct bch_dev *ca;
unsigned i;
int ret = 0;
......@@ -1507,8 +1505,6 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
{
struct bch_dev *ca;
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
struct bucket *g;
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
......@@ -1632,43 +1628,31 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
{
struct btree_trans *trans;
struct btree_iter iter;
struct bkey_s_c k;
size_t idx = 0;
int ret = 0;
if (metadata_only)
return 0;
trans = bch2_trans_get(c);
ret = for_each_btree_key_commit(trans, iter,
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_reflink, POS_MIN,
BTREE_ITER_PREFETCH, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_gc_write_reflink_key(trans, &iter, k, &idx));
bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
c->reflink_gc_nr = 0;
bch2_trans_put(trans);
bch_err_fn(c, ret);
return ret;
}
static int bch2_gc_reflink_start(struct bch_fs *c,
bool metadata_only)
{
struct btree_iter iter;
struct bkey_s_c k;
struct reflink_gc *r;
int ret = 0;
if (metadata_only)
return 0;
c->reflink_gc_nr = 0;
ret = bch2_trans_run(c,
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
BTREE_ITER_PREFETCH, k, ({
const __le64 *refcount = bkey_refcount_c(k);
......@@ -1676,8 +1660,8 @@ static int bch2_gc_reflink_start(struct bch_fs *c,
if (!refcount)
continue;
r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++,
GFP_KERNEL);
struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table,
c->reflink_gc_nr++, GFP_KERNEL);
if (!r) {
ret = -BCH_ERR_ENOMEM_gc_reflink_start;
break;
......@@ -1757,24 +1741,15 @@ static int bch2_gc_write_stripes_key(struct btree_trans *trans,
static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
{
struct btree_trans *trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
if (metadata_only)
return 0;
trans = bch2_trans_get(c);
ret = for_each_btree_key_commit(trans, iter,
return bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_stripes, POS_MIN,
BTREE_ITER_PREFETCH, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_gc_write_stripes_key(trans, &iter, k));
bch2_trans_put(trans);
return ret;
bch2_gc_write_stripes_key(trans, &iter, k)));
}
static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
......@@ -1958,8 +1933,6 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i
int bch2_gc_gens(struct bch_fs *c)
{
struct btree_trans *trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bch_dev *ca;
u64 b, start_time = local_clock();
unsigned i;
......
......@@ -704,6 +704,8 @@ transaction_restart: \
#define for_each_btree_key_upto(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _do) \
({ \
struct btree_iter _iter; \
struct bkey_s_c _k; \
int _ret3 = 0; \
\
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
......@@ -732,6 +734,8 @@ transaction_restart: \
#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
_start, _flags, _k, _do) \
({ \
struct btree_iter _iter; \
struct bkey_s_c _k; \
int _ret3 = 0; \
\
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
......
......@@ -366,36 +366,23 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
struct dump_iter *i = file->private_data;
struct btree_trans *trans;
struct btree_iter iter;
struct bkey_s_c k;
ssize_t ret;
i->ubuf = buf;
i->size = size;
i->ret = 0;
ret = flush_buf(i);
if (ret)
return ret;
trans = bch2_trans_get(i->c);
ret = for_each_btree_key(trans, iter, i->id, i->from,
return flush_buf(i) ?:
bch2_trans_run(i->c,
for_each_btree_key(trans, iter, i->id, i->from,
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS, k, ({
bch2_bkey_val_to_text(&i->buf, i->c, k);
prt_newline(&i->buf);
bch2_trans_unlock(trans);
i->from = bpos_successor(iter.pos);
flush_buf(i);
}));
i->from = iter.pos;
bch2_trans_put(trans);
if (!ret)
ret = flush_buf(i);
return ret ?: i->ret;
}))) ?:
i->ret;
}
static const struct file_operations btree_debug_ops = {
......@@ -463,22 +450,14 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
struct dump_iter *i = file->private_data;
struct btree_trans *trans;
struct btree_iter iter;
struct bkey_s_c k;
ssize_t ret;
i->ubuf = buf;
i->size = size;
i->ret = 0;
ret = flush_buf(i);
if (ret)
return ret;
trans = bch2_trans_get(i->c);
ret = for_each_btree_key(trans, iter, i->id, i->from,
return flush_buf(i) ?:
bch2_trans_run(i->c,
for_each_btree_key(trans, iter, i->id, i->from,
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS, k, ({
struct btree_path_level *l = &iter.path->l[0];
......@@ -492,16 +471,10 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
bch2_bfloat_to_text(&i->buf, l->b, _k);
bch2_trans_unlock(trans);
i->from = bpos_successor(iter.pos);
flush_buf(i);
}));
i->from = iter.pos;
bch2_trans_put(trans);
if (!ret)
ret = flush_buf(i);
return ret ?: i->ret;
}))) ?:
i->ret;
}
static const struct file_operations bfloat_failed_debug_ops = {
......
......@@ -1826,14 +1826,7 @@ void bch2_fs_ec_flush(struct bch_fs *c)
int bch2_stripes_read(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
const struct bch_stripe *s;
struct stripe *m;
unsigned i;
int ret;
ret = bch2_trans_run(c,
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
BTREE_ITER_PREFETCH, k, ({
if (k.k->type != KEY_TYPE_stripe)
......@@ -1843,16 +1836,16 @@ int bch2_stripes_read(struct bch_fs *c)
if (ret)
break;
s = bkey_s_c_to_stripe(k).v;
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
m = genradix_ptr(&c->stripes, k.k->p.offset);
struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset);
m->sectors = le16_to_cpu(s->sectors);
m->algorithm = s->algorithm;
m->nr_blocks = s->nr_blocks;
m->nr_redundant = s->nr_redundant;
m->blocks_nonempty = 0;
for (i = 0; i < s->nr_blocks; i++)
for (unsigned i = 0; i < s->nr_blocks; i++)
m->blocks_nonempty += !!stripe_blockcount_get(s, i);
bch2_stripes_heap_insert(c, m, k.k->p.offset);
......
......@@ -27,8 +27,6 @@
static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
u32 snapshot)
{
struct btree_iter iter;
struct bkey_s_c k;
u64 sectors = 0;
int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
......@@ -46,8 +44,6 @@ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
u32 snapshot)
{
struct btree_iter iter;
struct bkey_s_c k;
u64 subdirs = 0;
int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
......@@ -978,10 +974,8 @@ int bch2_check_inodes(struct bch_fs *c)
{
bool full = c->opts.fsck;
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bch_inode_unpacked prev = { 0 };
struct snapshots_seen s;
struct bkey_s_c k;
int ret;
snapshots_seen_init(&s);
......@@ -1424,8 +1418,6 @@ int bch2_check_extents(struct bch_fs *c)
struct inode_walker w = inode_walker_init();
struct snapshots_seen s;
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
struct extent_ends extent_ends;
struct disk_reservation res = { 0 };
int ret = 0;
......@@ -1457,8 +1449,6 @@ int bch2_check_extents(struct bch_fs *c)
int bch2_check_indirect_extents(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
struct disk_reservation res = { 0 };
int ret = 0;
......@@ -1827,8 +1817,6 @@ int bch2_check_dirents(struct bch_fs *c)
struct snapshots_seen s;
struct bch_hash_info hash_info;
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
snapshots_seen_init(&s);
......@@ -1892,8 +1880,6 @@ int bch2_check_xattrs(struct bch_fs *c)
{
struct inode_walker inode = inode_walker_init();
struct bch_hash_info hash_info;
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
ret = bch2_trans_run(c,
......@@ -2220,10 +2206,6 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
struct nlink_table *t,
u64 start, u64 *end)
{
struct btree_iter iter;
struct bkey_s_c k;
struct bch_inode_unpacked u;
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_inodes,
POS(0, start),
......@@ -2234,6 +2216,7 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
continue;
/* Should never fail, checked by bch2_inode_invalid: */
struct bch_inode_unpacked u;
BUG_ON(bch2_inode_unpack(k, &u));
/*
......@@ -2264,9 +2247,6 @@ static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links
u64 range_start, u64 range_end)
{
struct snapshots_seen s;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_s_c_dirent d;
snapshots_seen_init(&s);
......@@ -2279,16 +2259,14 @@ static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links
if (ret)
break;
switch (k.k->type) {
case KEY_TYPE_dirent:
d = bkey_s_c_to_dirent(k);
if (k.k->type == KEY_TYPE_dirent) {
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
if (d.v->d_type != DT_DIR &&
d.v->d_type != DT_SUBVOL)
inc_link(c, &s, links, range_start, range_end,
le64_to_cpu(d.v->d_inum),
bch2_snapshot_equiv(c, d.k->p.snapshot));
break;
}
0;
})));
......@@ -2346,12 +2324,9 @@ static int check_nlinks_update_hardlinks(struct bch_fs *c,
struct nlink_table *links,
u64 range_start, u64 range_end)
{
struct btree_iter iter;
struct bkey_s_c k;
size_t idx = 0;
int ret = 0;
ret = bch2_trans_run(c,
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
POS(0, range_start),
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
......@@ -2427,14 +2402,10 @@ static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
int bch2_fix_reflink_p(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
return 0;
ret = bch2_trans_run(c,
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_extents, POS_MIN,
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
......
......@@ -1155,8 +1155,6 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
int bch2_delete_dead_inodes(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
bool need_another_pass;
int ret;
again:
......@@ -1200,6 +1198,5 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
}
err:
bch2_trans_put(trans);
bch_err_fn(c, ret);
return ret;
}
......@@ -1166,9 +1166,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_i *orig;
struct bkey_s_c k;
int ret;
for_each_keylist_key(&op->insert_keys, orig) {
......
......@@ -54,11 +54,7 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
int bch2_resume_logged_ops(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
ret = bch2_trans_run(c,
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter,
BTREE_ID_logged_ops, POS_MIN,
BTREE_ITER_PREFETCH, k,
......
......@@ -147,12 +147,8 @@ static int bch2_check_lru_key(struct btree_trans *trans,
int bch2_check_lrus(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
struct bpos last_flushed_pos = POS_MIN;
int ret = 0;
ret = bch2_trans_run(c,
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc|BCH_TRANS_COMMIT_lazy_rw,
......
......@@ -79,8 +79,6 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
enum btree_id id;
int ret = 0;
......
......@@ -372,9 +372,6 @@ struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
int ret = 0;
if (io_opts->cur_inum != extent_k.k->p.inode) {
struct btree_iter iter;
struct bkey_s_c k;
io_opts->d.nr = 0;
ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode),
......
......@@ -145,8 +145,6 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
{
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
int ret;
......
......@@ -599,14 +599,9 @@ static int bch2_fs_quota_read_inode(struct btree_trans *trans,
int bch2_fs_quota_read(struct bch_fs *c)
{
struct bch_sb_field_quota *sb_quota;
struct btree_trans *trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret;
mutex_lock(&c->sb_lock);
sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
struct bch_sb_field_quota *sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
if (!sb_quota) {
mutex_unlock(&c->sb_lock);
return -BCH_ERR_ENOSPC_sb_quota;
......@@ -615,17 +610,13 @@ int bch2_fs_quota_read(struct bch_fs *c)
bch2_sb_quota_read(c);
mutex_unlock(&c->sb_lock);
trans = bch2_trans_get(c);
ret = for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN,
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN,
BTREE_ITER_PREFETCH, k,
__bch2_quota_set(c, k, NULL)) ?:
for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN,
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
bch2_fs_quota_read_inode(trans, &iter, k));
bch2_trans_put(trans);
bch2_fs_quota_read_inode(trans, &iter, k)));
bch_err_fn(c, ret);
return ret;
}
......
......@@ -581,11 +581,7 @@ static int check_snapshot_tree(struct btree_trans *trans,
*/
int bch2_check_snapshot_trees(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
ret = bch2_trans_run(c,
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_snapshot_trees, POS_MIN,
BTREE_ITER_PREFETCH, k,
......@@ -853,15 +849,11 @@ static int check_snapshot(struct btree_trans *trans,
int bch2_check_snapshots(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
/*
* We iterate backwards as checking/fixing the depth field requires that
* the parent's depth already be correct:
*/
ret = bch2_trans_run(c,
int ret = bch2_trans_run(c,
for_each_btree_key_reverse_commit(trans, iter,
BTREE_ID_snapshots, POS_MAX,
BTREE_ITER_PREFETCH, k,
......@@ -1363,9 +1355,6 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
int bch2_delete_dead_snapshots(struct bch_fs *c)
{
struct btree_trans *trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_s_c_snapshot snap;
snapshot_id_list deleted = { 0 };
snapshot_id_list deleted_interior = { 0 };
u32 id;
......@@ -1407,8 +1396,7 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
if (k.k->type != KEY_TYPE_snapshot)
continue;
snap = bkey_s_c_to_snapshot(k);
BCH_SNAPSHOT_DELETED(snap.v)
BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v)
? snapshot_list_add(c, &deleted, k.k->p.offset)
: 0;
}));
......@@ -1673,11 +1661,7 @@ static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct
int bch2_snapshots_read(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
ret = bch2_trans_run(c,
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k,
bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
......
......@@ -79,11 +79,7 @@ static int check_subvol(struct btree_trans *trans,
int bch2_check_subvols(struct bch_fs *c)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
ret = bch2_trans_run(c,
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
......@@ -224,8 +220,6 @@ static int bch2_subvolume_reparent(struct btree_trans *trans,
*/
static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete)
{
struct btree_iter iter;
struct bkey_s_c k;
struct bch_subvolume s;
return lockrestart_do(trans,
......
......@@ -256,8 +256,6 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
{
struct btree_trans *trans;
struct btree_iter iter;
struct bkey_s_c k;
enum btree_id id;
struct compression_type_stats {
u64 nr_extents;
......
......@@ -107,9 +107,6 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
static int test_iterate(struct bch_fs *c, u64 nr)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter = { NULL };
struct bkey_s_c k;
u64 i;
int ret = 0;
......@@ -127,49 +124,43 @@ static int test_iterate(struct bch_fs *c, u64 nr)
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
bch_err_msg(c, ret, "insert error");
if (ret)
goto err;
return ret;
}
pr_info("iterating forwards");
i = 0;
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
ret = bch2_trans_run(c,
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
0, k, ({
BUG_ON(k.k->p.offset != i++);
0;
}));
})));
bch_err_msg(c, ret, "error iterating forwards");
if (ret)
goto err;
return ret;
BUG_ON(i != nr);
pr_info("iterating backwards");
ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs,
SPOS(0, U64_MAX, U32_MAX), 0, k,
({
ret = bch2_trans_run(c,
for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs,
SPOS(0, U64_MAX, U32_MAX), 0, k, ({
BUG_ON(k.k->p.offset != --i);
0;
}));
})));
bch_err_msg(c, ret, "error iterating backwards");
if (ret)
goto err;
return ret;
BUG_ON(i);
err:
bch2_trans_iter_exit(trans, &iter);
bch2_trans_put(trans);
return ret;
return 0;
}
static int test_iterate_extents(struct bch_fs *c, u64 nr)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter = { NULL };
struct bkey_s_c k;
u64 i;
int ret = 0;
......@@ -188,51 +179,45 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
bch_err_msg(c, ret, "insert error");
if (ret)
goto err;
return ret;
}
pr_info("iterating forwards");
i = 0;
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
ret = bch2_trans_run(c,
for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
0, k, ({
BUG_ON(bkey_start_offset(k.k) != i);
i = k.k->p.offset;
0;
}));
})));
bch_err_msg(c, ret, "error iterating forwards");
if (ret)
goto err;
return ret;
BUG_ON(i != nr);
pr_info("iterating backwards");
ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_extents,
SPOS(0, U64_MAX, U32_MAX), 0, k,
({
ret = bch2_trans_run(c,
for_each_btree_key_reverse(trans, iter, BTREE_ID_extents,
SPOS(0, U64_MAX, U32_MAX), 0, k, ({
BUG_ON(k.k->p.offset != i);
i = bkey_start_offset(k.k);
0;
}));
})));
bch_err_msg(c, ret, "error iterating backwards");
if (ret)
goto err;
return ret;
BUG_ON(i);
err:
bch2_trans_iter_exit(trans, &iter);
bch2_trans_put(trans);
return ret;
return 0;
}
static int test_iterate_slots(struct bch_fs *c, u64 nr)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter = { NULL };
struct bkey_s_c k;
u64 i;
int ret = 0;
......@@ -250,31 +235,31 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
bch_err_msg(c, ret, "insert error");
if (ret)
goto err;
return ret;
}
pr_info("iterating forwards");
i = 0;
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
ret = bch2_trans_run(c,
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
0, k, ({
BUG_ON(k.k->p.offset != i);
i += 2;
0;
}));
})));
bch_err_msg(c, ret, "error iterating forwards");
if (ret)
goto err;
return ret;
BUG_ON(i != nr * 2);
pr_info("iterating forwards by slots");
i = 0;
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
ret = bch2_trans_run(c,
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
BTREE_ITER_SLOTS, k, ({
if (i >= nr * 2)
......@@ -285,22 +270,13 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
i++;
0;
}));
if (ret < 0) {
})));
bch_err_msg(c, ret, "error iterating forwards by slots");
goto err;
}
ret = 0;
err:
bch2_trans_put(trans);
return ret;
}
static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter = { NULL };
struct bkey_s_c k;
u64 i;
int ret = 0;
......@@ -319,32 +295,32 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
bch_err_msg(c, ret, "insert error");
if (ret)
goto err;
return ret;
}
pr_info("iterating forwards");
i = 0;
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
ret = bch2_trans_run(c,
for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
0, k, ({
BUG_ON(bkey_start_offset(k.k) != i + 8);
BUG_ON(k.k->size != 8);
i += 16;
0;
}));
})));
bch_err_msg(c, ret, "error iterating forwards");
if (ret)
goto err;
return ret;
BUG_ON(i != nr);
pr_info("iterating forwards by slots");
i = 0;
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
ret = bch2_trans_run(c,
for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
BTREE_ITER_SLOTS, k, ({
if (i == nr)
......@@ -355,14 +331,9 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
BUG_ON(k.k->size != 8);
i = k.k->p.offset;
0;
}));
})));
bch_err_msg(c, ret, "error iterating forwards by slots");
if (ret)
goto err;
ret = 0;
err:
bch2_trans_put(trans);
return 0;
return ret;
}
/*
......@@ -736,8 +707,6 @@ static int rand_delete(struct bch_fs *c, u64 nr)
static int seq_insert(struct bch_fs *c, u64 nr)
{
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_i_cookie insert;
bkey_cookie_init(&insert.k_i);
......@@ -756,9 +725,6 @@ static int seq_insert(struct bch_fs *c, u64 nr)
static int seq_lookup(struct bch_fs *c, u64 nr)
{
struct btree_iter iter;
struct bkey_s_c k;
return bch2_trans_run(c,
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
......@@ -768,9 +734,6 @@ static int seq_lookup(struct bch_fs *c, u64 nr)
static int seq_overwrite(struct bch_fs *c, u64 nr)
{
struct btree_iter iter;
struct bkey_s_c k;
return bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment