Commit 2c3b0fc3 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: trans->nr_paths

Start to plumb through dynamically growable btree_paths; this patch
replaces most BTREE_ITER_MAX references with trans->nr_paths.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 5cc6daf7
...@@ -1475,7 +1475,7 @@ static void bch2_trans_update_max_paths(struct btree_trans *trans) ...@@ -1475,7 +1475,7 @@ static void bch2_trans_update_max_paths(struct btree_trans *trans)
{ {
struct btree_transaction_stats *s = btree_trans_stats(trans); struct btree_transaction_stats *s = btree_trans_stats(trans);
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;
size_t nr = bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX); size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
if (!s) if (!s)
return; return;
...@@ -1521,9 +1521,9 @@ static noinline void btree_path_overflow(struct btree_trans *trans) ...@@ -1521,9 +1521,9 @@ static noinline void btree_path_overflow(struct btree_trans *trans)
static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans, static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
btree_path_idx_t pos) btree_path_idx_t pos)
{ {
btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, BTREE_ITER_MAX); btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
if (unlikely(idx == BTREE_ITER_MAX)) if (unlikely(idx == trans->nr_paths))
btree_path_overflow(trans); btree_path_overflow(trans);
/* /*
...@@ -2527,7 +2527,7 @@ static void btree_trans_verify_sorted_refs(struct btree_trans *trans) ...@@ -2527,7 +2527,7 @@ static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
struct btree_path *path; struct btree_path *path;
unsigned i; unsigned i;
BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX) - 1); BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
trans_for_each_path(trans, path, i) { trans_for_each_path(trans, path, i) {
BUG_ON(path->sorted_idx >= trans->nr_sorted); BUG_ON(path->sorted_idx >= trans->nr_sorted);
...@@ -2933,6 +2933,7 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx) ...@@ -2933,6 +2933,7 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
trans->journal_replay_not_finished = trans->journal_replay_not_finished =
unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) && unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) &&
atomic_inc_not_zero(&c->journal_keys.ref); atomic_inc_not_zero(&c->journal_keys.ref);
trans->nr_paths = ARRAY_SIZE(trans->_paths);
trans->paths_allocated = trans->_paths_allocated; trans->paths_allocated = trans->_paths_allocated;
trans->sorted = trans->_sorted; trans->sorted = trans->_sorted;
trans->paths = trans->_paths; trans->paths = trans->_paths;
......
...@@ -82,9 +82,25 @@ static inline unsigned long *trans_paths_allocated(struct btree_path *paths) ...@@ -82,9 +82,25 @@ static inline unsigned long *trans_paths_allocated(struct btree_path *paths)
static inline struct btree_path * static inline struct btree_path *
__trans_next_path(struct btree_trans *trans, unsigned *idx) __trans_next_path(struct btree_trans *trans, unsigned *idx)
{ {
*idx = find_next_bit(trans->paths_allocated, BTREE_ITER_MAX, *idx); unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG;
/*
* Open coded find_next_bit(), because
* - this is fast path, we can't afford the function call
* - and we know that nr_paths is a multiple of BITS_PER_LONG,
*/
while (*idx < trans->nr_paths) {
unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1));
if (v) {
*idx += __ffs(v);
return trans->paths + *idx;
}
*idx += BITS_PER_LONG;
*idx &= ~(BITS_PER_LONG - 1);
w++;
}
return *idx < BTREE_ITER_MAX ? &trans->paths[*idx] : NULL; return NULL;
} }
/* /*
...@@ -626,7 +642,7 @@ int __bch2_btree_trans_too_many_iters(struct btree_trans *); ...@@ -626,7 +642,7 @@ int __bch2_btree_trans_too_many_iters(struct btree_trans *);
static inline int btree_trans_too_many_iters(struct btree_trans *trans) static inline int btree_trans_too_many_iters(struct btree_trans *trans)
{ {
if (bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX) > BTREE_ITER_MAX - 8) if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_MAX - 8)
return __bch2_btree_trans_too_many_iters(trans); return __bch2_btree_trans_too_many_iters(trans);
return 0; return 0;
......
...@@ -390,6 +390,7 @@ struct btree_trans { ...@@ -390,6 +390,7 @@ struct btree_trans {
unsigned mem_bytes; unsigned mem_bytes;
btree_path_idx_t nr_sorted; btree_path_idx_t nr_sorted;
btree_path_idx_t nr_paths;
btree_path_idx_t nr_paths_max; btree_path_idx_t nr_paths_max;
u8 fn_idx; u8 fn_idx;
u8 nr_updates; u8 nr_updates;
......
...@@ -386,7 +386,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx, ...@@ -386,7 +386,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
struct btree_path *path = trans->paths + path_idx; struct btree_path *path = trans->paths + path_idx;
EBUG_ON(!path->should_be_locked); EBUG_ON(!path->should_be_locked);
EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX); EBUG_ON(trans->nr_updates >= trans->nr_paths);
EBUG_ON(!bpos_eq(k->k.p, path->pos)); EBUG_ON(!bpos_eq(k->k.p, path->pos));
n = (struct btree_insert_entry) { n = (struct btree_insert_entry) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment