Commit 9f6bd307 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Reduce iter->trans usage

Disfavoured, and should go away.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 84841b0d
......@@ -498,7 +498,7 @@ void bch2_btree_init_next(struct btree_trans *trans,
bch2_btree_build_aux_trees(b);
if (iter && reinit_iter)
bch2_btree_iter_reinit_node(iter, b);
bch2_btree_iter_reinit_node(trans, iter, b);
}
static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
......
This diff is collapsed.
......@@ -135,14 +135,13 @@ static inline void bch2_btree_trans_verify_iters(struct btree_trans *trans,
static inline void bch2_btree_trans_verify_locks(struct btree_trans *iter) {}
#endif
void bch2_btree_iter_fix_key_modified(struct btree_iter *, struct btree *,
struct bkey_packed *);
void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
struct btree_node_iter *, struct bkey_packed *,
unsigned, unsigned);
void bch2_btree_iter_fix_key_modified(struct btree_trans *trans, struct btree_iter *,
struct btree *, struct bkey_packed *);
void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_iter *,
struct btree *, struct btree_node_iter *,
struct bkey_packed *, unsigned, unsigned);
bool bch2_btree_iter_relock_intent(struct btree_iter *);
bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
bool bch2_trans_relock(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
......@@ -179,10 +178,13 @@ static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
void bch2_trans_downgrade(struct btree_trans *);
void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
void bch2_btree_iter_node_replace(struct btree_trans *trans,
struct btree_iter *, struct btree *);
void bch2_btree_iter_node_drop(struct btree_trans *,
struct btree_iter *, struct btree *);
void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
void bch2_btree_iter_reinit_node(struct btree_trans *,
struct btree_iter *, struct btree *);
int __must_check bch2_btree_iter_traverse(struct btree_iter *);
......@@ -226,9 +228,10 @@ static inline struct btree_iter *idx_to_btree_iter(struct btree_trans *trans, un
return idx != U8_MAX ? trans->iters + idx : NULL;
}
static inline struct btree_iter *btree_iter_child(struct btree_iter *iter)
static inline struct btree_iter *btree_iter_child(struct btree_trans *trans,
struct btree_iter *iter)
{
return idx_to_btree_iter(iter->trans, iter->child_idx);
return idx_to_btree_iter(trans, iter->child_idx);
}
/*
......@@ -319,7 +322,7 @@ bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
}
struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *,
struct btree_iter *);
struct btree_iter *);
static inline struct btree_iter *
bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src)
{
......
......@@ -238,7 +238,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
* XXX: not allowed to be holding read locks when we take a write lock,
* currently
*/
bch2_btree_node_lock_write(ck_iter->l[0].b, ck_iter);
bch2_btree_node_lock_write(trans, ck_iter, ck_iter->l[0].b);
if (new_k) {
kfree(ck->k);
ck->u64s = new_u64s;
......@@ -247,7 +247,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
bkey_reassemble(ck->k, k);
ck->valid = true;
bch2_btree_node_unlock_write(ck_iter->l[0].b, ck_iter);
bch2_btree_node_unlock_write(trans, ck_iter, ck_iter->l[0].b);
/* We're not likely to need this iterator again: */
set_btree_iter_dontneed(trans, iter);
......
......@@ -207,30 +207,35 @@ static inline bool bch2_btree_node_relock(struct btree_iter *iter,
* succeed:
*/
static inline void
bch2_btree_node_unlock_write_inlined(struct btree *b, struct btree_iter *iter)
bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_iter *iter,
struct btree *b)
{
struct btree_iter *linked;
EBUG_ON(iter->l[b->c.level].b != b);
EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
trans_for_each_iter_with_node(iter->trans, b, linked)
trans_for_each_iter_with_node(trans, b, linked)
linked->l[b->c.level].lock_seq += 2;
six_unlock_write(&b->c.lock);
}
void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
void bch2_btree_node_unlock_write(struct btree_trans *,
struct btree_iter *, struct btree *);
void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
void __bch2_btree_node_lock_write(struct btree_trans *,
struct btree_iter *, struct btree *);
static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b)
{
EBUG_ON(iter->l[b->c.level].b != b);
EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
if (unlikely(!six_trylock_write(&b->c.lock)))
__bch2_btree_node_lock_write(b, iter);
__bch2_btree_node_lock_write(trans, iter, b);
}
#endif /* _BCACHEFS_BTREE_LOCKING_H */
......
......@@ -10,8 +10,9 @@ struct btree;
void bch2_btree_node_lock_for_insert(struct btree_trans *, struct btree_iter *,
struct btree *);
bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *,
struct btree_node_iter *, struct bkey_i *);
bool bch2_btree_bset_insert_key(struct btree_trans *, struct btree_iter *,
struct btree *, struct btree_node_iter *,
struct bkey_i *);
void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
enum btree_insert_flags {
......
This diff is collapsed.
......@@ -113,24 +113,10 @@ struct btree_update {
u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
};
void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
struct btree_iter *);
void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
void bch2_btree_update_get_open_buckets(struct btree_update *, struct btree *);
struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
struct btree *,
struct bkey_format);
void bch2_btree_update_done(struct btree_update *);
struct btree_update *
bch2_btree_update_start(struct btree_iter *, unsigned, unsigned, unsigned);
void bch2_btree_interior_update_will_free_node(struct btree_update *,
struct btree *);
void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
int bch2_btree_split_leaf(struct btree_trans *, struct btree_iter *, unsigned);
int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_iter *,
......
......@@ -42,14 +42,14 @@ inline void bch2_btree_node_lock_for_insert(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
bch2_btree_node_lock_write(b, iter);
bch2_btree_node_lock_write(trans, iter, b);
if (btree_iter_type(iter) == BTREE_ITER_CACHED)
return;
if (unlikely(btree_node_just_written(b)) &&
bch2_btree_post_write_cleanup(c, b))
bch2_btree_iter_reinit_node(iter, b);
bch2_btree_iter_reinit_node(trans, iter, b);
/*
* If the last bset has been written, or if it's gotten too big - start
......@@ -62,7 +62,8 @@ inline void bch2_btree_node_lock_for_insert(struct btree_trans *trans,
/* Inserting into a given leaf node (last stage of insert): */
/* Handle overwrites and do insert, for non extents: */
bool bch2_btree_bset_insert_key(struct btree_iter *iter,
bool bch2_btree_bset_insert_key(struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b,
struct btree_node_iter *node_iter,
struct bkey_i *insert)
......@@ -76,7 +77,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0);
EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0);
EBUG_ON(insert->k.u64s >
bch_btree_keys_u64s_remaining(iter->trans->c, b));
bch_btree_keys_u64s_remaining(trans->c, b));
EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
k = bch2_btree_node_iter_peek_all(node_iter, b);
......@@ -96,7 +97,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
k->type = KEY_TYPE_deleted;
if (k->needs_whiteout)
push_whiteout(iter->trans->c, b, insert->k.p);
push_whiteout(trans->c, b, insert->k.p);
k->needs_whiteout = false;
if (k >= btree_bset_last(b)->start) {
......@@ -104,7 +105,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
bch2_bset_delete(b, k, clobber_u64s);
goto fix_iter;
} else {
bch2_btree_iter_fix_key_modified(iter, b, k);
bch2_btree_iter_fix_key_modified(trans, iter, b, k);
}
return true;
......@@ -122,7 +123,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
clobber_u64s = k->u64s;
goto overwrite;
} else {
bch2_btree_iter_fix_key_modified(iter, b, k);
bch2_btree_iter_fix_key_modified(trans, iter, b, k);
}
}
......@@ -132,7 +133,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
new_u64s = k->u64s;
fix_iter:
if (clobber_u64s != new_u64s)
bch2_btree_node_iter_fix(iter, b, node_iter, k,
bch2_btree_node_iter_fix(trans, iter, b, node_iter, k,
clobber_u64s, new_u64s);
return true;
}
......@@ -190,7 +191,7 @@ static bool btree_insert_key_leaf(struct btree_trans *trans,
EBUG_ON(!iter->level &&
!test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags));
if (unlikely(!bch2_btree_bset_insert_key(iter, b,
if (unlikely(!bch2_btree_bset_insert_key(trans, iter, b,
&iter_l(iter)->iter, insert)))
return false;
......@@ -212,7 +213,7 @@ static bool btree_insert_key_leaf(struct btree_trans *trans,
if (u64s_added > live_u64s_added &&
bch2_maybe_compact_whiteouts(c, b))
bch2_btree_iter_reinit_node(iter, b);
bch2_btree_iter_reinit_node(trans, iter, b);
trace_btree_insert_key(c, b, insert);
return true;
......@@ -610,8 +611,8 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
trans_for_each_update(trans, i)
if (!same_leaf_as_prev(trans, i))
bch2_btree_node_unlock_write_inlined(iter_l(i->iter)->b,
i->iter);
bch2_btree_node_unlock_write_inlined(trans, i->iter,
iter_l(i->iter)->b);
if (!ret && trans->journal_pin)
bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
......@@ -1178,7 +1179,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
bch2_key_resize(&delete.k, max_sectors);
bch2_cut_back(end, &delete);
ret = bch2_extent_trim_atomic(&delete, iter);
ret = bch2_extent_trim_atomic(trans, iter, &delete);
if (ret)
break;
}
......
......@@ -552,19 +552,19 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
return 0;
}
static int ec_stripe_mem_alloc(struct bch_fs *c,
static int ec_stripe_mem_alloc(struct btree_trans *trans,
struct btree_iter *iter)
{
size_t idx = iter->pos.offset;
int ret = 0;
if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT|__GFP_NOWARN))
if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_NOWAIT|__GFP_NOWARN))
return ret;
bch2_trans_unlock(iter->trans);
bch2_trans_unlock(trans);
ret = -EINTR;
if (!__ec_stripe_mem_alloc(c, idx, GFP_KERNEL))
if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL))
return ret;
return -ENOMEM;
......@@ -735,7 +735,7 @@ static int ec_stripe_bkey_insert(struct bch_fs *c,
found_slot:
start_pos = iter->pos;
ret = ec_stripe_mem_alloc(c, iter);
ret = ec_stripe_mem_alloc(&trans, iter);
if (ret)
goto err;
......
......@@ -94,11 +94,11 @@ static int count_iters_for_insert(struct btree_trans *trans,
#define EXTENT_ITERS_MAX (BTREE_ITER_MAX / 3)
int bch2_extent_atomic_end(struct btree_iter *iter,
int bch2_extent_atomic_end(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *insert,
struct bpos *end)
{
struct btree_trans *trans = iter->trans;
struct btree_iter *copy;
struct bkey_s_c k;
unsigned nr_iters = 0;
......@@ -153,27 +153,17 @@ int bch2_extent_atomic_end(struct btree_iter *iter,
return ret < 0 ? ret : 0;
}
int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
int bch2_extent_trim_atomic(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *k)
{
struct bpos end;
int ret;
ret = bch2_extent_atomic_end(iter, k, &end);
ret = bch2_extent_atomic_end(trans, iter, k, &end);
if (ret)
return ret;
bch2_cut_back(end, k);
return 0;
}
int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
{
struct bpos end;
int ret;
ret = bch2_extent_atomic_end(iter, k, &end);
if (ret)
return ret;
return !bkey_cmp(end, k->k.p);
}
......@@ -4,9 +4,9 @@
#include "bcachefs.h"
int bch2_extent_atomic_end(struct btree_iter *, struct bkey_i *,
struct bpos *);
int bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
int bch2_extent_is_atomic(struct bkey_i *, struct btree_iter *);
int bch2_extent_atomic_end(struct btree_trans *, struct btree_iter *,
struct bkey_i *, struct bpos *);
int bch2_extent_trim_atomic(struct btree_trans *, struct btree_iter *,
struct bkey_i *);
#endif /* _BCACHEFS_EXTENT_UPDATE_H */
......@@ -2576,7 +2576,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
copy.k->k.p.offset += shift >> 9;
bch2_btree_iter_set_pos(dst, bkey_start_pos(&copy.k->k));
ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
ret = bch2_extent_atomic_end(&trans, dst, copy.k, &atomic_end);
if (ret)
continue;
......
......@@ -280,7 +280,7 @@ int bch2_extent_update(struct btree_trans *trans,
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
int ret;
ret = bch2_extent_trim_atomic(k, iter);
ret = bch2_extent_trim_atomic(trans, iter, k);
if (ret)
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment