Commit ee2c6ea7 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: btree_iter->ip_allocated

In debug mode, we now track where btree iterators and paths are
initialized/allocated - helpful in tracking down btree path overflows.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 6c36318c
......@@ -24,6 +24,15 @@ static inline void btree_path_list_remove(struct btree_trans *, struct btree_pat
static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
struct btree_path *);
static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
{
#ifdef CONFIG_BCACHEFS_DEBUG
return iter->ip_allocated;
#else
return 0;
#endif
}
static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
/*
......@@ -1221,7 +1230,8 @@ static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btr
__flatten
struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
struct btree_path *path, bool intent)
struct btree_path *path, bool intent,
unsigned long ip)
{
__btree_path_put(path, intent);
path = btree_path_clone(trans, path, intent);
......@@ -1231,15 +1241,15 @@ struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
struct btree_path * __must_check
__bch2_btree_path_set_pos(struct btree_trans *trans,
struct btree_path *path, struct bpos new_pos,
bool intent, int cmp)
struct btree_path *path, struct bpos new_pos,
bool intent, unsigned long ip, int cmp)
{
unsigned level = path->level;
EBUG_ON(trans->restarted);
EBUG_ON(!path->ref);
path = bch2_btree_path_make_mut(trans, path, intent);
path = bch2_btree_path_make_mut(trans, path, intent, ip);
path->pos = new_pos;
trans->paths_sorted = false;
......@@ -1524,7 +1534,7 @@ static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
struct btree_path *bch2_path_get(struct btree_trans *trans,
enum btree_id btree_id, struct bpos pos,
unsigned locks_want, unsigned level,
unsigned flags)
unsigned flags, unsigned long ip)
{
struct btree_path *path, *path_pos = NULL;
bool cached = flags & BTREE_ITER_CACHED;
......@@ -1552,7 +1562,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
path_pos->btree_id == btree_id &&
path_pos->level == level) {
__btree_path_get(path_pos, intent);
path = bch2_btree_path_set_pos(trans, path_pos, pos, intent);
path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
} else {
path = btree_path_alloc(trans, path_pos);
path_pos = NULL;
......@@ -1569,7 +1579,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
for (i = 0; i < ARRAY_SIZE(path->l); i++)
path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
#ifdef CONFIG_BCACHEFS_DEBUG
path->ip_allocated = _RET_IP_;
path->ip_allocated = ip;
#endif
trans->paths_sorted = false;
}
......@@ -1651,7 +1661,8 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
btree_iter_search_key(iter),
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
if (ret)
......@@ -1686,7 +1697,8 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
iter->k.p = iter->pos = b->key.k.p;
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
out:
bch2_btree_iter_verify_entry_exit(iter);
......@@ -1740,7 +1752,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
*/
path = iter->path =
bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
btree_path_set_level_down(trans, path, iter->min_depth);
......@@ -1755,7 +1768,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
iter->k.p = iter->pos = b->key.k.p;
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
BUG_ON(iter->path->uptodate);
out:
......@@ -1907,10 +1921,12 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
iter->flags & BTREE_ITER_INTENT, 0,
iter->flags|BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL);
BTREE_ITER_CACHED_NOFILL,
_THIS_IP_);
iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
iter->flags|BTREE_ITER_CACHED) ?:
......@@ -1942,7 +1958,8 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
struct btree_path_level *l;
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
......@@ -2092,7 +2109,8 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
iter->update_path = bch2_btree_path_set_pos(trans,
iter->update_path, pos,
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
_THIS_IP_);
ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
if (unlikely(ret)) {
k = bkey_s_c_err(ret);
......@@ -2124,7 +2142,8 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
iter->pos = iter_pos;
iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
out_no_locked:
......@@ -2170,7 +2189,8 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
......@@ -2283,7 +2303,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
......@@ -2413,7 +2434,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
search_key = btree_iter_search_key(iter);
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
......@@ -2678,7 +2700,8 @@ static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans,
unsigned flags)
{
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
bch2_btree_iter_flags(trans, btree_id, flags));
bch2_btree_iter_flags(trans, btree_id, flags),
_RET_IP_);
}
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
......@@ -2687,7 +2710,8 @@ void bch2_trans_iter_init_outlined(struct btree_trans *trans,
unsigned flags)
{
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
bch2_btree_iter_flags(trans, btree_id, flags));
bch2_btree_iter_flags(trans, btree_id, flags),
_RET_IP_);
}
void bch2_trans_node_iter_init(struct btree_trans *trans,
......@@ -2703,7 +2727,8 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
flags |= BTREE_ITER_ALL_SNAPSHOTS;
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
__bch2_btree_iter_flags(trans, btree_id, flags));
__bch2_btree_iter_flags(trans, btree_id, flags),
_RET_IP_);
iter->min_depth = depth;
......
......@@ -144,39 +144,40 @@ __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
_path = __trans_next_path_with_node((_trans), (_b), \
(_path)->idx + 1))
struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *,
struct btree_path *, bool);
struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *,
bool, unsigned long);
static inline struct btree_path * __must_check
bch2_btree_path_make_mut(struct btree_trans *trans,
struct btree_path *path, bool intent)
struct btree_path *path, bool intent,
unsigned long ip)
{
if (path->ref > 1 || path->preserve)
path = __bch2_btree_path_make_mut(trans, path, intent);
path = __bch2_btree_path_make_mut(trans, path, intent, ip);
path->should_be_locked = false;
return path;
}
struct btree_path * __must_check
__bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
struct bpos, bool, int);
struct bpos, bool, unsigned long, int);
static inline struct btree_path * __must_check
bch2_btree_path_set_pos(struct btree_trans *trans,
struct btree_path *path, struct bpos new_pos,
bool intent)
bool intent, unsigned long ip)
{
int cmp = bpos_cmp(new_pos, path->pos);
return cmp
? __bch2_btree_path_set_pos(trans, path, new_pos, intent, cmp)
? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip, cmp)
: path;
}
int __must_check bch2_btree_path_traverse(struct btree_trans *,
struct btree_path *, unsigned);
struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
unsigned, unsigned, unsigned);
unsigned, unsigned, unsigned, unsigned long);
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
......@@ -359,7 +360,8 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
unsigned btree_id, struct bpos pos,
unsigned locks_want,
unsigned depth,
unsigned flags)
unsigned flags,
unsigned long ip)
{
memset(iter, 0, sizeof(*iter));
iter->trans = trans;
......@@ -369,8 +371,11 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
iter->pos = pos;
iter->k.p = pos;
#ifdef CONFIG_BCACHEFS_DEBUG
iter->ip_allocated = ip;
#endif
iter->path = bch2_path_get(trans, btree_id, iter->pos,
locks_want, depth, flags);
locks_want, depth, flags, ip);
}
void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
......@@ -384,7 +389,8 @@ static inline void bch2_trans_iter_init(struct btree_trans *trans,
if (__builtin_constant_p(btree_id) &&
__builtin_constant_p(flags))
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
bch2_btree_iter_flags(trans, btree_id, flags));
bch2_btree_iter_flags(trans, btree_id, flags),
_THIS_IP_);
else
bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
}
......
......@@ -289,6 +289,9 @@ struct btree_iter {
/* BTREE_ITER_WITH_JOURNAL: */
size_t journal_idx;
struct bpos journal_pos;
#ifdef CONFIG_BCACHEFS_DEBUG
unsigned long ip_allocated;
#endif
};
struct btree_key_cache_freelist {
......
......@@ -36,8 +36,9 @@ static struct btree_path *get_unlocked_mut_path(struct btree_trans *trans,
struct btree_path *path;
path = bch2_path_get(trans, btree_id, pos, level + 1, level,
BTREE_ITER_NOPRESERVE|BTREE_ITER_INTENT);
path = bch2_btree_path_make_mut(trans, path, true);
BTREE_ITER_NOPRESERVE|
BTREE_ITER_INTENT, _RET_IP_);
path = bch2_btree_path_make_mut(trans, path, true, _RET_IP_);
bch2_btree_path_downgrade(trans, path);
__bch2_btree_path_unlock(trans, path);
return path;
......@@ -1780,7 +1781,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
: bpos_successor(b->data->max_key);
sib_path = bch2_path_get(trans, path->btree_id, sib_pos,
U8_MAX, level, BTREE_ITER_INTENT);
U8_MAX, level, BTREE_ITER_INTENT, _THIS_IP_);
ret = bch2_btree_path_traverse(trans, sib_path, false);
if (ret)
goto err;
......@@ -2093,7 +2094,8 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
bch2_trans_copy_iter(&iter2, iter);
iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
iter2.flags & BTREE_ITER_INTENT);
iter2.flags & BTREE_ITER_INTENT,
_THIS_IP_);
BUG_ON(iter2.path->level != b->c.level);
BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p));
......
......@@ -1452,7 +1452,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
i->flags |= BTREE_TRIGGER_NORUN;
btree_path = bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
BTREE_ITER_INTENT);
BTREE_ITER_INTENT, _THIS_IP_);
ret = bch2_btree_path_traverse(trans, btree_path, 0);
if (ret)
......@@ -1590,11 +1590,13 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
if (!iter->key_cache_path)
iter->key_cache_path =
bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
BTREE_ITER_INTENT|BTREE_ITER_CACHED);
BTREE_ITER_INTENT|
BTREE_ITER_CACHED, _THIS_IP_);
iter->key_cache_path =
bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
iter->flags & BTREE_ITER_INTENT);
iter->flags & BTREE_ITER_INTENT,
_THIS_IP_);
ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
BTREE_ITER_CACHED);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment