Commit a564c9fa authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Include btree_trans in more tracepoints

This gives us more context information - e.g. which codepath is invoking
btree node reads.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent d9e14a4e
...@@ -500,19 +500,21 @@ void bch2_fs_btree_cache_init_early(struct btree_cache *bc) ...@@ -500,19 +500,21 @@ void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
* cannibalize_bucket() will take. This means every time we unlock the root of * cannibalize_bucket() will take. This means every time we unlock the root of
* the btree, we need to release this lock if we have it held. * the btree, we need to release this lock if we have it held.
*/ */
void bch2_btree_cache_cannibalize_unlock(struct bch_fs *c) void bch2_btree_cache_cannibalize_unlock(struct btree_trans *trans)
{ {
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
if (bc->alloc_lock == current) { if (bc->alloc_lock == current) {
trace_and_count(c, btree_cache_cannibalize_unlock, c); trace_and_count(c, btree_cache_cannibalize_unlock, trans);
bc->alloc_lock = NULL; bc->alloc_lock = NULL;
closure_wake_up(&bc->alloc_wait); closure_wake_up(&bc->alloc_wait);
} }
} }
int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure *cl)
{ {
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct task_struct *old; struct task_struct *old;
...@@ -521,7 +523,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) ...@@ -521,7 +523,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
goto success; goto success;
if (!cl) { if (!cl) {
trace_and_count(c, btree_cache_cannibalize_lock_fail, c); trace_and_count(c, btree_cache_cannibalize_lock_fail, trans);
return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock; return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
} }
...@@ -535,11 +537,11 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) ...@@ -535,11 +537,11 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
goto success; goto success;
} }
trace_and_count(c, btree_cache_cannibalize_lock_fail, c); trace_and_count(c, btree_cache_cannibalize_lock_fail, trans);
return -BCH_ERR_btree_cache_cannibalize_lock_blocked; return -BCH_ERR_btree_cache_cannibalize_lock_blocked;
success: success:
trace_and_count(c, btree_cache_cannibalize_lock, c); trace_and_count(c, btree_cache_cannibalize_lock, trans);
return 0; return 0;
} }
...@@ -673,7 +675,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea ...@@ -673,7 +675,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
mutex_unlock(&bc->lock); mutex_unlock(&bc->lock);
trace_and_count(c, btree_cache_cannibalize, c); trace_and_count(c, btree_cache_cannibalize, trans);
goto out; goto out;
} }
...@@ -749,7 +751,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, ...@@ -749,7 +751,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
if (path && sync) if (path && sync)
bch2_trans_unlock_noassert(trans); bch2_trans_unlock_noassert(trans);
bch2_btree_node_read(c, b, sync); bch2_btree_node_read(trans, b, sync);
if (!sync) if (!sync)
return NULL; return NULL;
...@@ -1039,7 +1041,7 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans, ...@@ -1039,7 +1041,7 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
goto retry; goto retry;
if (IS_ERR(b) && if (IS_ERR(b) &&
!bch2_btree_cache_cannibalize_lock(c, NULL)) !bch2_btree_cache_cannibalize_lock(trans, NULL))
goto retry; goto retry;
if (IS_ERR(b)) if (IS_ERR(b))
...@@ -1087,7 +1089,7 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans, ...@@ -1087,7 +1089,7 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
btree_check_header(c, b); btree_check_header(c, b);
out: out:
bch2_btree_cache_cannibalize_unlock(c); bch2_btree_cache_cannibalize_unlock(trans);
return b; return b;
} }
......
...@@ -17,8 +17,8 @@ int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *); ...@@ -17,8 +17,8 @@ int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *, int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
unsigned, enum btree_id); unsigned, enum btree_id);
void bch2_btree_cache_cannibalize_unlock(struct bch_fs *); void bch2_btree_cache_cannibalize_unlock(struct btree_trans *);
int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *); int bch2_btree_cache_cannibalize_lock(struct btree_trans *, struct closure *);
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *); struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool); struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
......
...@@ -1575,16 +1575,17 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool ...@@ -1575,16 +1575,17 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
return 0; return 0;
} }
void bch2_btree_node_read(struct bch_fs *c, struct btree *b, void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
bool sync) bool sync)
{ {
struct bch_fs *c = trans->c;
struct extent_ptr_decoded pick; struct extent_ptr_decoded pick;
struct btree_read_bio *rb; struct btree_read_bio *rb;
struct bch_dev *ca; struct bch_dev *ca;
struct bio *bio; struct bio *bio;
int ret; int ret;
trace_and_count(c, btree_node_read, c, b); trace_and_count(c, btree_node_read, trans, b);
if (bch2_verify_all_btree_replicas && if (bch2_verify_all_btree_replicas &&
!btree_node_read_all_replicas(c, b, sync)) !btree_node_read_all_replicas(c, b, sync))
...@@ -1663,12 +1664,12 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id, ...@@ -1663,12 +1664,12 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
closure_init_stack(&cl); closure_init_stack(&cl);
do { do {
ret = bch2_btree_cache_cannibalize_lock(c, &cl); ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
closure_sync(&cl); closure_sync(&cl);
} while (ret); } while (ret);
b = bch2_btree_node_mem_alloc(trans, level != 0); b = bch2_btree_node_mem_alloc(trans, level != 0);
bch2_btree_cache_cannibalize_unlock(c); bch2_btree_cache_cannibalize_unlock(trans);
BUG_ON(IS_ERR(b)); BUG_ON(IS_ERR(b));
...@@ -1677,7 +1678,7 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id, ...@@ -1677,7 +1678,7 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
set_btree_node_read_in_flight(b); set_btree_node_read_in_flight(b);
bch2_btree_node_read(c, b, true); bch2_btree_node_read(trans, b, true);
if (btree_node_read_error(b)) { if (btree_node_read_error(b)) {
bch2_btree_node_hash_remove(&c->btree_cache, b); bch2_btree_node_hash_remove(&c->btree_cache, b);
......
...@@ -130,7 +130,7 @@ void bch2_btree_init_next(struct btree_trans *, struct btree *); ...@@ -130,7 +130,7 @@ void bch2_btree_init_next(struct btree_trans *, struct btree *);
int bch2_btree_node_read_done(struct bch_fs *, struct bch_dev *, int bch2_btree_node_read_done(struct bch_fs *, struct bch_dev *,
struct btree *, bool, bool *); struct btree *, bool, bool *);
void bch2_btree_node_read(struct bch_fs *, struct btree *, bool); void bch2_btree_node_read(struct btree_trans *, struct btree *, bool);
int bch2_btree_root_read(struct bch_fs *, enum btree_id, int bch2_btree_root_read(struct bch_fs *, enum btree_id,
const struct bkey_i *, unsigned); const struct bkey_i *, unsigned);
......
...@@ -977,7 +977,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans) ...@@ -977,7 +977,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
closure_init_stack(&cl); closure_init_stack(&cl);
do { do {
ret = bch2_btree_cache_cannibalize_lock(c, &cl); ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
closure_sync(&cl); closure_sync(&cl);
} while (ret); } while (ret);
} }
...@@ -1013,7 +1013,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans) ...@@ -1013,7 +1013,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
* then failed to relock a path - that's fine. * then failed to relock a path - that's fine.
*/ */
err: err:
bch2_btree_cache_cannibalize_unlock(c); bch2_btree_cache_cannibalize_unlock(trans);
trans->in_traverse_all = false; trans->in_traverse_all = false;
......
...@@ -164,9 +164,11 @@ static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, ...@@ -164,9 +164,11 @@ static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
/* Btree node freeing/allocation: */ /* Btree node freeing/allocation: */
static void __btree_node_free(struct bch_fs *c, struct btree *b) static void __btree_node_free(struct btree_trans *trans, struct btree *b)
{ {
trace_and_count(c, btree_node_free, c, b); struct bch_fs *c = trans->c;
trace_and_count(c, btree_node_free, trans, b);
BUG_ON(btree_node_write_blocked(b)); BUG_ON(btree_node_write_blocked(b));
BUG_ON(btree_node_dirty(b)); BUG_ON(btree_node_dirty(b));
...@@ -192,7 +194,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans, ...@@ -192,7 +194,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans,
bch2_btree_node_lock_write_nofail(trans, path, &b->c); bch2_btree_node_lock_write_nofail(trans, path, &b->c);
bch2_btree_node_hash_remove(&c->btree_cache, b); bch2_btree_node_hash_remove(&c->btree_cache, b);
__btree_node_free(c, b); __btree_node_free(trans, b);
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
...@@ -363,7 +365,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, ...@@ -363,7 +365,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as,
ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id); ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id);
BUG_ON(ret); BUG_ON(ret);
trace_and_count(c, btree_node_alloc, c, b); trace_and_count(c, btree_node_alloc, trans, b);
bch2_increment_clock(c, btree_sectors(c), WRITE); bch2_increment_clock(c, btree_sectors(c), WRITE);
return b; return b;
} }
...@@ -453,7 +455,7 @@ static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans * ...@@ -453,7 +455,7 @@ static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans *
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
__btree_node_free(c, b); __btree_node_free(trans, b);
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock); six_unlock_intent(&b->c.lock);
} }
...@@ -466,7 +468,6 @@ static int bch2_btree_reserve_get(struct btree_trans *trans, ...@@ -466,7 +468,6 @@ static int bch2_btree_reserve_get(struct btree_trans *trans,
unsigned flags, unsigned flags,
struct closure *cl) struct closure *cl)
{ {
struct bch_fs *c = as->c;
struct btree *b; struct btree *b;
unsigned interior; unsigned interior;
int ret = 0; int ret = 0;
...@@ -477,7 +478,7 @@ static int bch2_btree_reserve_get(struct btree_trans *trans, ...@@ -477,7 +478,7 @@ static int bch2_btree_reserve_get(struct btree_trans *trans,
* Protects reaping from the btree node cache and using the btree node * Protects reaping from the btree node cache and using the btree node
* open bucket reserve: * open bucket reserve:
*/ */
ret = bch2_btree_cache_cannibalize_lock(c, cl); ret = bch2_btree_cache_cannibalize_lock(trans, cl);
if (ret) if (ret)
return ret; return ret;
...@@ -496,7 +497,7 @@ static int bch2_btree_reserve_get(struct btree_trans *trans, ...@@ -496,7 +497,7 @@ static int bch2_btree_reserve_get(struct btree_trans *trans,
} }
} }
err: err:
bch2_btree_cache_cannibalize_unlock(c); bch2_btree_cache_cannibalize_unlock(trans);
return ret; return ret;
} }
...@@ -1223,7 +1224,7 @@ static void bch2_btree_set_root(struct btree_update *as, ...@@ -1223,7 +1224,7 @@ static void bch2_btree_set_root(struct btree_update *as,
struct bch_fs *c = as->c; struct bch_fs *c = as->c;
struct btree *old; struct btree *old;
trace_and_count(c, btree_node_set_root, c, b); trace_and_count(c, btree_node_set_root, trans, b);
old = btree_node_root(c, b); old = btree_node_root(c, b);
...@@ -1489,7 +1490,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, ...@@ -1489,7 +1490,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) { if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) {
struct btree *n[2]; struct btree *n[2];
trace_and_count(c, btree_node_split, c, b); trace_and_count(c, btree_node_split, trans, b);
n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level); n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level);
n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level); n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level);
...@@ -1547,7 +1548,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, ...@@ -1547,7 +1548,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
btree_split_insert_keys(as, trans, path, n3, &as->parent_keys); btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
} }
} else { } else {
trace_and_count(c, btree_node_compact, c, b); trace_and_count(c, btree_node_compact, trans, b);
n1 = bch2_btree_node_alloc_replacement(as, trans, b); n1 = bch2_btree_node_alloc_replacement(as, trans, b);
...@@ -1867,7 +1868,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, ...@@ -1867,7 +1868,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
if (ret) if (ret)
goto err; goto err;
trace_and_count(c, btree_node_merge, c, b); trace_and_count(c, btree_node_merge, trans, b);
bch2_btree_interior_update_will_free_node(as, b); bch2_btree_interior_update_will_free_node(as, b);
bch2_btree_interior_update_will_free_node(as, m); bch2_btree_interior_update_will_free_node(as, m);
...@@ -1970,7 +1971,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, ...@@ -1970,7 +1971,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
mark_btree_node_locked(trans, new_path, n->c.level, BTREE_NODE_INTENT_LOCKED); mark_btree_node_locked(trans, new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
bch2_btree_path_level_init(trans, new_path, n); bch2_btree_path_level_init(trans, new_path, n);
trace_and_count(c, btree_node_rewrite, c, b); trace_and_count(c, btree_node_rewrite, trans, b);
if (parent) { if (parent) {
bch2_keylist_add(&as->parent_keys, &n->key); bch2_keylist_add(&as->parent_keys, &n->key);
...@@ -2252,7 +2253,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite ...@@ -2252,7 +2253,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
* btree_iter_traverse(): * btree_iter_traverse():
*/ */
if (btree_ptr_hash_val(new_key) != b->hash_val) { if (btree_ptr_hash_val(new_key) != b->hash_val) {
ret = bch2_btree_cache_cannibalize_lock(c, &cl); ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
if (ret) { if (ret) {
ret = drop_locks_do(trans, (closure_sync(&cl), 0)); ret = drop_locks_do(trans, (closure_sync(&cl), 0));
if (ret) if (ret)
...@@ -2276,7 +2277,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite ...@@ -2276,7 +2277,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
six_unlock_intent(&new_hash->c.lock); six_unlock_intent(&new_hash->c.lock);
} }
closure_sync(&cl); closure_sync(&cl);
bch2_btree_cache_cannibalize_unlock(c); bch2_btree_cache_cannibalize_unlock(trans);
return ret; return ret;
} }
...@@ -2337,12 +2338,12 @@ static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id) ...@@ -2337,12 +2338,12 @@ static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
closure_init_stack(&cl); closure_init_stack(&cl);
do { do {
ret = bch2_btree_cache_cannibalize_lock(c, &cl); ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
closure_sync(&cl); closure_sync(&cl);
} while (ret); } while (ret);
b = bch2_btree_node_mem_alloc(trans, false); b = bch2_btree_node_mem_alloc(trans, false);
bch2_btree_cache_cannibalize_unlock(c); bch2_btree_cache_cannibalize_unlock(trans);
set_btree_node_fake(b); set_btree_node_fake(b);
set_btree_node_need_rewrite(b); set_btree_node_need_rewrite(b);
......
...@@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(trans_str, ...@@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(trans_str,
__entry->trans_fn, (void *) __entry->caller_ip, __get_str(str)) __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
); );
DECLARE_EVENT_CLASS(btree_node, DECLARE_EVENT_CLASS(btree_node_nofs,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b), TP_ARGS(c, b),
...@@ -97,6 +97,33 @@ DECLARE_EVENT_CLASS(btree_node, ...@@ -97,6 +97,33 @@ DECLARE_EVENT_CLASS(btree_node,
__entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot) __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
); );
DECLARE_EVENT_CLASS(btree_node,
TP_PROTO(struct btree_trans *trans, struct btree *b),
TP_ARGS(trans, b),
TP_STRUCT__entry(
__field(dev_t, dev )
__array(char, trans_fn, 32 )
__field(u8, level )
__field(u8, btree_id )
TRACE_BPOS_entries(pos)
),
TP_fast_assign(
__entry->dev = trans->c->dev;
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->level = b->c.level;
__entry->btree_id = b->c.btree_id;
TRACE_BPOS_assign(pos, b->key.k.p);
),
TP_printk("%d,%d %s %u %s %llu:%llu:%u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
__entry->level,
bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
);
DECLARE_EVENT_CLASS(bch_fs, DECLARE_EVENT_CLASS(bch_fs,
TP_PROTO(struct bch_fs *c), TP_PROTO(struct bch_fs *c),
TP_ARGS(c), TP_ARGS(c),
...@@ -112,6 +139,23 @@ DECLARE_EVENT_CLASS(bch_fs, ...@@ -112,6 +139,23 @@ DECLARE_EVENT_CLASS(bch_fs,
TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev)) TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
); );
DECLARE_EVENT_CLASS(btree_trans,
TP_PROTO(struct btree_trans *trans),
TP_ARGS(trans),
TP_STRUCT__entry(
__field(dev_t, dev )
__array(char, trans_fn, 32 )
),
TP_fast_assign(
__entry->dev = trans->c->dev;
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
),
TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
);
DECLARE_EVENT_CLASS(bio, DECLARE_EVENT_CLASS(bio,
TP_PROTO(struct bio *bio), TP_PROTO(struct bio *bio),
TP_ARGS(bio), TP_ARGS(bio),
...@@ -330,36 +374,36 @@ TRACE_EVENT(btree_cache_scan, ...@@ -330,36 +374,36 @@ TRACE_EVENT(btree_cache_scan,
__entry->nr_to_scan, __entry->can_free, __entry->ret) __entry->nr_to_scan, __entry->can_free, __entry->ret)
); );
DEFINE_EVENT(btree_node, btree_cache_reap, DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b) TP_ARGS(c, b)
); );
DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail, DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
TP_PROTO(struct bch_fs *c), TP_PROTO(struct btree_trans *trans),
TP_ARGS(c) TP_ARGS(trans)
); );
DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock, DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
TP_PROTO(struct bch_fs *c), TP_PROTO(struct btree_trans *trans),
TP_ARGS(c) TP_ARGS(trans)
); );
DEFINE_EVENT(bch_fs, btree_cache_cannibalize, DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
TP_PROTO(struct bch_fs *c), TP_PROTO(struct btree_trans *trans),
TP_ARGS(c) TP_ARGS(trans)
); );
DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock, DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
TP_PROTO(struct bch_fs *c), TP_PROTO(struct btree_trans *trans),
TP_ARGS(c) TP_ARGS(trans)
); );
/* Btree */ /* Btree */
DEFINE_EVENT(btree_node, btree_node_read, DEFINE_EVENT(btree_node, btree_node_read,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct btree_trans *trans, struct btree *b),
TP_ARGS(c, b) TP_ARGS(trans, b)
); );
TRACE_EVENT(btree_node_write, TRACE_EVENT(btree_node_write,
...@@ -383,13 +427,13 @@ TRACE_EVENT(btree_node_write, ...@@ -383,13 +427,13 @@ TRACE_EVENT(btree_node_write,
); );
DEFINE_EVENT(btree_node, btree_node_alloc, DEFINE_EVENT(btree_node, btree_node_alloc,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct btree_trans *trans, struct btree *b),
TP_ARGS(c, b) TP_ARGS(trans, b)
); );
DEFINE_EVENT(btree_node, btree_node_free, DEFINE_EVENT(btree_node, btree_node_free,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct btree_trans *trans, struct btree *b),
TP_ARGS(c, b) TP_ARGS(trans, b)
); );
TRACE_EVENT(btree_reserve_get_fail, TRACE_EVENT(btree_reserve_get_fail,
...@@ -421,28 +465,28 @@ TRACE_EVENT(btree_reserve_get_fail, ...@@ -421,28 +465,28 @@ TRACE_EVENT(btree_reserve_get_fail,
); );
DEFINE_EVENT(btree_node, btree_node_compact, DEFINE_EVENT(btree_node, btree_node_compact,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct btree_trans *trans, struct btree *b),
TP_ARGS(c, b) TP_ARGS(trans, b)
); );
DEFINE_EVENT(btree_node, btree_node_merge, DEFINE_EVENT(btree_node, btree_node_merge,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct btree_trans *trans, struct btree *b),
TP_ARGS(c, b) TP_ARGS(trans, b)
); );
DEFINE_EVENT(btree_node, btree_node_split, DEFINE_EVENT(btree_node, btree_node_split,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct btree_trans *trans, struct btree *b),
TP_ARGS(c, b) TP_ARGS(trans, b)
); );
DEFINE_EVENT(btree_node, btree_node_rewrite, DEFINE_EVENT(btree_node, btree_node_rewrite,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct btree_trans *trans, struct btree *b),
TP_ARGS(c, b) TP_ARGS(trans, b)
); );
DEFINE_EVENT(btree_node, btree_node_set_root, DEFINE_EVENT(btree_node, btree_node_set_root,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct btree_trans *trans, struct btree *b),
TP_ARGS(c, b) TP_ARGS(trans, b)
); );
TRACE_EVENT(btree_path_relock_fail, TRACE_EVENT(btree_path_relock_fail,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment