Commit 0beebd92 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: bkey_for_each_ptr() now declares loop iter

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 0bc64d7e
...@@ -1844,7 +1844,6 @@ static int gc_btree_gens_key(struct btree_trans *trans, ...@@ -1844,7 +1844,6 @@ static int gc_btree_gens_key(struct btree_trans *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
struct bkey_i *u; struct bkey_i *u;
int ret; int ret;
......
...@@ -934,7 +934,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, ...@@ -934,7 +934,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
struct sort_iter *iter; struct sort_iter *iter;
struct btree_node *sorted; struct btree_node *sorted;
struct bkey_packed *k; struct bkey_packed *k;
struct bch_extent_ptr *ptr;
struct bset *i; struct bset *i;
bool used_mempool, blacklisted; bool used_mempool, blacklisted;
bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
...@@ -1896,7 +1895,6 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b, ...@@ -1896,7 +1895,6 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
static void btree_write_submit(struct work_struct *work) static void btree_write_submit(struct work_struct *work)
{ {
struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work); struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
struct bch_extent_ptr *ptr;
BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
bkey_copy(&tmp.k, &wbio->key); bkey_copy(&tmp.k, &wbio->key);
......
...@@ -356,7 +356,6 @@ void bch2_data_update_exit(struct data_update *update) ...@@ -356,7 +356,6 @@ void bch2_data_update_exit(struct data_update *update)
struct bch_fs *c = update->op.c; struct bch_fs *c = update->op.c;
struct bkey_ptrs_c ptrs = struct bkey_ptrs_c ptrs =
bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k)); bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(ptrs, ptr) { bkey_for_each_ptr(ptrs, ptr) {
if (c->opts.nocow_enabled) if (c->opts.nocow_enabled)
...@@ -377,7 +376,6 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans, ...@@ -377,7 +376,6 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
struct bio *bio = &update->op.wbio.bio; struct bio *bio = &update->op.wbio.bio;
struct bkey_i_extent *e; struct bkey_i_extent *e;
struct write_point *wp; struct write_point *wp;
struct bch_extent_ptr *ptr;
struct closure cl; struct closure cl;
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k; struct bkey_s_c k;
...@@ -509,7 +507,6 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -509,7 +507,6 @@ int bch2_data_update_init(struct btree_trans *trans,
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry; const union bch_extent_entry *entry;
struct extent_ptr_decoded p; struct extent_ptr_decoded p;
const struct bch_extent_ptr *ptr;
unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas; unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
unsigned ptrs_locked = 0; unsigned ptrs_locked = 0;
int ret = 0; int ret = 0;
...@@ -655,7 +652,6 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -655,7 +652,6 @@ int bch2_data_update_init(struct btree_trans *trans,
void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts) void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
{ {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
unsigned i = 0; unsigned i = 0;
bkey_for_each_ptr(ptrs, ptr) { bkey_for_each_ptr(ptrs, ptr) {
......
...@@ -161,7 +161,6 @@ static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s, ...@@ -161,7 +161,6 @@ static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
struct bkey_s_c k, unsigned *block) struct bkey_s_c k, unsigned *block)
{ {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
unsigned i, nr_data = s->nr_blocks - s->nr_redundant; unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
bkey_for_each_ptr(ptrs, ptr) bkey_for_each_ptr(ptrs, ptr)
......
...@@ -843,7 +843,6 @@ void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev) ...@@ -843,7 +843,6 @@ void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev) const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
{ {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(ptrs, ptr) bkey_for_each_ptr(ptrs, ptr)
if (ptr->dev == dev) if (ptr->dev == dev)
...@@ -855,7 +854,6 @@ const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned ...@@ -855,7 +854,6 @@ const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned
bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target) bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
{ {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(ptrs, ptr) bkey_for_each_ptr(ptrs, ptr)
if (bch2_dev_in_target(c, ptr->dev, target) && if (bch2_dev_in_target(c, ptr->dev, target) &&
...@@ -1065,7 +1063,6 @@ static int extent_ptr_invalid(struct bch_fs *c, ...@@ -1065,7 +1063,6 @@ static int extent_ptr_invalid(struct bch_fs *c,
struct printbuf *err) struct printbuf *err)
{ {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr2;
u64 bucket; u64 bucket;
u32 bucket_offset; u32 bucket_offset;
struct bch_dev *ca; struct bch_dev *ca;
...@@ -1307,7 +1304,6 @@ unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k, ...@@ -1307,7 +1304,6 @@ unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
} }
incompressible: incompressible:
if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) { if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
const struct bch_extent_ptr *ptr;
unsigned i = 0; unsigned i = 0;
bkey_for_each_ptr(ptrs, ptr) { bkey_for_each_ptr(ptrs, ptr) {
......
...@@ -300,7 +300,7 @@ static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k) ...@@ -300,7 +300,7 @@ static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
bkey_extent_entry_for_each_from(_p, _entry, _p.start) bkey_extent_entry_for_each_from(_p, _entry, _p.start)
#define __bkey_for_each_ptr(_start, _end, _ptr) \ #define __bkey_for_each_ptr(_start, _end, _ptr) \
for ((_ptr) = (_start); \ for (typeof(_start) (_ptr) = (_start); \
((_ptr) = __bkey_ptr_next(_ptr, _end)); \ ((_ptr) = __bkey_ptr_next(_ptr, _end)); \
(_ptr)++) (_ptr)++)
...@@ -547,7 +547,6 @@ static inline bool bkey_extent_is_allocation(const struct bkey *k) ...@@ -547,7 +547,6 @@ static inline bool bkey_extent_is_allocation(const struct bkey *k)
static inline bool bkey_extent_is_unwritten(struct bkey_s_c k) static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
{ {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(ptrs, ptr) bkey_for_each_ptr(ptrs, ptr)
if (ptr->unwritten) if (ptr->unwritten)
...@@ -565,7 +564,6 @@ static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k) ...@@ -565,7 +564,6 @@ static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
{ {
struct bch_devs_list ret = (struct bch_devs_list) { 0 }; struct bch_devs_list ret = (struct bch_devs_list) { 0 };
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(p, ptr) bkey_for_each_ptr(p, ptr)
ret.data[ret.nr++] = ptr->dev; ret.data[ret.nr++] = ptr->dev;
...@@ -577,7 +575,6 @@ static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k) ...@@ -577,7 +575,6 @@ static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
{ {
struct bch_devs_list ret = (struct bch_devs_list) { 0 }; struct bch_devs_list ret = (struct bch_devs_list) { 0 };
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(p, ptr) bkey_for_each_ptr(p, ptr)
if (!ptr->cached) if (!ptr->cached)
...@@ -590,7 +587,6 @@ static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k) ...@@ -590,7 +587,6 @@ static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
{ {
struct bch_devs_list ret = (struct bch_devs_list) { 0 }; struct bch_devs_list ret = (struct bch_devs_list) { 0 };
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(p, ptr) bkey_for_each_ptr(p, ptr)
if (ptr->cached) if (ptr->cached)
......
...@@ -74,7 +74,6 @@ int bch2_extent_fallocate(struct btree_trans *trans, ...@@ -74,7 +74,6 @@ int bch2_extent_fallocate(struct btree_trans *trans,
struct bkey_i_extent *e; struct bkey_i_extent *e;
struct bch_devs_list devs_have; struct bch_devs_list devs_have;
struct write_point *wp; struct write_point *wp;
struct bch_extent_ptr *ptr;
devs_have.nr = 0; devs_have.nr = 0;
......
...@@ -396,16 +396,14 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, ...@@ -396,16 +396,14 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
bool nocow) bool nocow)
{ {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k)); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
const struct bch_extent_ptr *ptr;
struct bch_write_bio *n; struct bch_write_bio *n;
struct bch_dev *ca;
BUG_ON(c->opts.nochanges); BUG_ON(c->opts.nochanges);
bkey_for_each_ptr(ptrs, ptr) { bkey_for_each_ptr(ptrs, ptr) {
BUG_ON(!bch2_dev_exists2(c, ptr->dev)); BUG_ON(!bch2_dev_exists2(c, ptr->dev));
ca = bch_dev_bkey_exists(c, ptr->dev); struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
if (to_entry(ptr + 1) < ptrs.end) { if (to_entry(ptr + 1) < ptrs.end) {
n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
...@@ -1108,7 +1106,6 @@ static bool bch2_extent_is_writeable(struct bch_write_op *op, ...@@ -1108,7 +1106,6 @@ static bool bch2_extent_is_writeable(struct bch_write_op *op,
static inline void bch2_nocow_write_unlock(struct bch_write_op *op) static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
{ {
struct bch_fs *c = op->c; struct bch_fs *c = op->c;
const struct bch_extent_ptr *ptr;
struct bkey_i *k; struct bkey_i *k;
for_each_keylist_key(&op->insert_keys, k) { for_each_keylist_key(&op->insert_keys, k) {
...@@ -1127,25 +1124,20 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans, ...@@ -1127,25 +1124,20 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
struct bkey_s_c k, struct bkey_s_c k,
u64 new_i_size) u64 new_i_size)
{ {
struct bkey_i *new;
struct bkey_ptrs ptrs;
struct bch_extent_ptr *ptr;
int ret;
if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) { if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
/* trace this */ /* trace this */
return 0; return 0;
} }
new = bch2_bkey_make_mut_noupdate(trans, k); struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
ret = PTR_ERR_OR_ZERO(new); int ret = PTR_ERR_OR_ZERO(new);
if (ret) if (ret)
return ret; return ret;
bch2_cut_front(bkey_start_pos(&orig->k), new); bch2_cut_front(bkey_start_pos(&orig->k), new);
bch2_cut_back(orig->k.p, new); bch2_cut_back(orig->k.p, new);
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
bkey_for_each_ptr(ptrs, ptr) bkey_for_each_ptr(ptrs, ptr)
ptr->unwritten = 0; ptr->unwritten = 0;
...@@ -1225,8 +1217,6 @@ static void bch2_nocow_write(struct bch_write_op *op) ...@@ -1225,8 +1217,6 @@ static void bch2_nocow_write(struct bch_write_op *op)
struct btree_trans *trans; struct btree_trans *trans;
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bkey_ptrs_c ptrs;
const struct bch_extent_ptr *ptr;
DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets; DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
u32 snapshot; u32 snapshot;
struct bucket_to_lock *stale_at; struct bucket_to_lock *stale_at;
...@@ -1269,7 +1259,7 @@ static void bch2_nocow_write(struct bch_write_op *op) ...@@ -1269,7 +1259,7 @@ static void bch2_nocow_write(struct bch_write_op *op)
break; break;
/* Get iorefs before dropping btree locks: */ /* Get iorefs before dropping btree locks: */
ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr) { bkey_for_each_ptr(ptrs, ptr) {
struct bpos b = PTR_BUCKET_POS(c, ptr); struct bpos b = PTR_BUCKET_POS(c, ptr);
struct nocow_lock_bucket *l = struct nocow_lock_bucket *l =
......
...@@ -1678,7 +1678,6 @@ static CLOSURE_CALLBACK(do_journal_write) ...@@ -1678,7 +1678,6 @@ static CLOSURE_CALLBACK(do_journal_write)
struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca; struct bch_dev *ca;
struct journal_buf *w = journal_last_unwritten_buf(j); struct journal_buf *w = journal_last_unwritten_buf(j);
struct bch_extent_ptr *ptr;
struct bio *bio; struct bio *bio;
unsigned sectors = vstruct_sectors(w->data, c->block_bits); unsigned sectors = vstruct_sectors(w->data, c->block_bits);
......
...@@ -695,9 +695,6 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, ...@@ -695,9 +695,6 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
break; break;
if (!bp.level) { if (!bp.level) {
const struct bch_extent_ptr *ptr;
unsigned i = 0;
k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0); k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0);
ret = bkey_err(k); ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
...@@ -720,6 +717,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, ...@@ -720,6 +717,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
data_opts.target = io_opts.background_target; data_opts.target = io_opts.background_target;
data_opts.rewrite_ptrs = 0; data_opts.rewrite_ptrs = 0;
unsigned i = 0;
bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
if (ptr->dev == bucket.inode) { if (ptr->dev == bucket.inode) {
data_opts.rewrite_ptrs |= 1U << i; data_opts.rewrite_ptrs |= 1U << i;
...@@ -890,7 +888,6 @@ static bool migrate_pred(struct bch_fs *c, void *arg, ...@@ -890,7 +888,6 @@ static bool migrate_pred(struct bch_fs *c, void *arg,
struct data_update_opts *data_opts) struct data_update_opts *data_opts)
{ {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
struct bch_ioctl_data *op = arg; struct bch_ioctl_data *op = arg;
unsigned i = 0; unsigned i = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment