Commit e96f8f18 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes from Chris Mason:
 "These are all over the place.

  The tracepoint part of the pull fixes a crash and adds a little more
  information to two tracepoints, while the rest are good old fashioned
  fixes"

* 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  btrfs: make tracepoint format strings more compact
  Btrfs: add truncated_len for ordered extent tracepoints
  Btrfs: add 'inode' for extent map tracepoint
  btrfs: fix crash when tracepoint arguments are freed by wq callbacks
  Btrfs: adjust outstanding_extents counter properly when dio write is split
  Btrfs: fix lockdep warning about log_mutex
  Btrfs: use down_read_nested to make lockdep silent
  btrfs: fix locking when we put back a delayed ref that's too new
  btrfs: fix error handling when run_delayed_extent_op fails
  btrfs: return the actual error value from  from btrfs_uuid_tree_iterate
parents 04e39627 0bf70aeb
......@@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
unsigned long flags;
while (1) {
void *wtag;
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
......@@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
spin_unlock_irqrestore(lock, flags);
/*
* we don't want to call the ordered free functions
* with the lock held though
* We don't want to call the ordered free functions with the
* lock held though. Save the work as tag for the trace event,
* because the callback could free the structure.
*/
wtag = work;
work->ordered_free(work);
trace_btrfs_all_work_done(work);
trace_btrfs_all_work_done(wq->fs_info, wtag);
}
spin_unlock_irqrestore(lock, flags);
}
......@@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
static void normal_work_helper(struct btrfs_work *work)
{
struct __btrfs_workqueue *wq;
void *wtag;
int need_order = 0;
/*
......@@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)
if (work->ordered_func)
need_order = 1;
wq = work->wq;
/* Safe for tracepoints in case work gets freed by the callback */
wtag = work;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq);
......@@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)
run_ordered_work(wq);
}
if (!need_order)
trace_btrfs_all_work_done(work);
trace_btrfs_all_work_done(wq->fs_info, wtag);
}
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
......
......@@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (ref && ref->seq &&
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
spin_unlock(&locked_ref->lock);
btrfs_delayed_ref_unlock(locked_ref);
spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
cond_resched();
count++;
......@@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
*/
if (must_insert_reserved)
locked_ref->must_insert_reserved = 1;
spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
btrfs_debug(fs_info,
"run_delayed_extent_op returned %d",
ret);
......@@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
spin_unlock(&cluster->refill_lock);
down_read(&used_bg->data_rwsem);
/* We should only have one-level nested. */
down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
spin_lock(&cluster->refill_lock);
if (used_bg == cluster->block_group)
......
......@@ -7059,7 +7059,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
write_unlock(&em_tree->lock);
out:
trace_btrfs_get_extent(root, em);
trace_btrfs_get_extent(root, inode, em);
btrfs_free_path(path);
if (trans) {
......@@ -7623,11 +7623,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
* within our reservation, otherwise we need to adjust our inode
* counter appropriately.
*/
if (dio_data->outstanding_extents) {
if (dio_data->outstanding_extents >= num_extents) {
dio_data->outstanding_extents -= num_extents;
} else {
/*
* If dio write length has been split due to no large enough
* contiguous space, we need to compensate our inode counter
* appropriately.
*/
u64 num_needed = num_extents - dio_data->outstanding_extents;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents += num_extents;
BTRFS_I(inode)->outstanding_extents += num_needed;
spin_unlock(&BTRFS_I(inode)->lock);
}
}
......
......@@ -37,6 +37,7 @@
*/
#define LOG_INODE_ALL 0
#define LOG_INODE_EXISTS 1
#define LOG_OTHER_INODE 2
/*
* directory trouble cases
......@@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
if (S_ISDIR(inode->i_mode) ||
(!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags) &&
inode_only == LOG_INODE_EXISTS))
inode_only >= LOG_INODE_EXISTS))
max_key.type = BTRFS_XATTR_ITEM_KEY;
else
max_key.type = (u8)-1;
......@@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
return ret;
}
mutex_lock(&BTRFS_I(inode)->log_mutex);
if (inode_only == LOG_OTHER_INODE) {
inode_only = LOG_INODE_EXISTS;
mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&BTRFS_I(inode)->log_mutex);
}
/*
* a brute force approach to making sure we get the most uptodate
......@@ -4817,7 +4824,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
* unpin it.
*/
err = btrfs_log_inode(trans, root, other_inode,
LOG_INODE_EXISTS,
LOG_OTHER_INODE,
0, LLONG_MAX, ctx);
iput(other_inode);
if (err)
......
......@@ -352,7 +352,5 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
out:
btrfs_free_path(path);
if (ret)
btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
return 0;
return ret;
}
......@@ -130,8 +130,8 @@ DECLARE_EVENT_CLASS(btrfs__inode,
BTRFS_I(inode)->root->root_key.objectid;
),
TP_printk_btrfs("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
"disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%lu blocks=%llu "
"disk_i_size=%llu last_trans=%llu logged_trans=%llu",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->generation,
(unsigned long)__entry->ino,
......@@ -184,14 +184,16 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
TRACE_EVENT_CONDITION(btrfs_get_extent,
TP_PROTO(struct btrfs_root *root, struct extent_map *map),
TP_PROTO(struct btrfs_root *root, struct inode *inode,
struct extent_map *map),
TP_ARGS(root, map),
TP_ARGS(root, inode, map),
TP_CONDITION(map),
TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, ino )
__field( u64, start )
__field( u64, len )
__field( u64, orig_start )
......@@ -204,7 +206,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
__entry->start = map->start;
__entry->ino = btrfs_ino(inode);
__entry->start = map->start;
__entry->len = map->len;
__entry->orig_start = map->orig_start;
__entry->block_start = map->block_start;
......@@ -214,11 +217,12 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->compress_type = map->compress_type;
),
TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu, "
"orig_start = %llu, block_start = %llu(%s), "
"block_len = %llu, flags = %s, refs = %u, "
"compress_type = %u",
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
"orig_start=%llu block_start=%llu(%s) "
"block_len=%llu flags=%s refs=%u "
"compress_type=%u",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->ino,
(unsigned long long)__entry->start,
(unsigned long long)__entry->len,
(unsigned long long)__entry->orig_start,
......@@ -259,6 +263,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__field( int, compress_type )
__field( int, refs )
__field( u64, root_objectid )
__field( u64, truncated_len )
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
......@@ -273,18 +278,21 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__entry->refs = atomic_read(&ordered->refs);
__entry->root_objectid =
BTRFS_I(inode)->root->root_key.objectid;
__entry->truncated_len = ordered->truncated_len;
),
TP_printk_btrfs("root = %llu(%s), ino = %llu, file_offset = %llu, "
"start = %llu, len = %llu, disk_len = %llu, "
"bytes_left = %llu, flags = %s, compress_type = %d, "
"refs = %d",
TP_printk_btrfs("root=%llu(%s) ino=%llu file_offset=%llu "
"start=%llu len=%llu disk_len=%llu "
"truncated_len=%llu "
"bytes_left=%llu flags=%s compress_type=%d "
"refs=%d",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->ino,
(unsigned long long)__entry->file_offset,
(unsigned long long)__entry->start,
(unsigned long long)__entry->len,
(unsigned long long)__entry->disk_len,
(unsigned long long)__entry->truncated_len,
(unsigned long long)__entry->bytes_left,
show_ordered_flags(__entry->flags),
__entry->compress_type, __entry->refs)
......@@ -354,10 +362,10 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
BTRFS_I(inode)->root->root_key.objectid;
),
TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, "
"nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
"range_end = %llu, for_kupdate = %d, "
"for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu "
"nr_to_write=%ld pages_skipped=%ld range_start=%llu "
"range_end=%llu for_kupdate=%d "
"for_reclaim=%d range_cyclic=%d writeback_index=%lu",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, __entry->index,
__entry->nr_to_write, __entry->pages_skipped,
......@@ -400,8 +408,8 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
BTRFS_I(page->mapping->host)->root->root_key.objectid;
),
TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
"end = %llu, uptodate = %d",
TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu start=%llu "
"end=%llu uptodate=%d",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, (unsigned long)__entry->index,
(unsigned long long)__entry->start,
......@@ -433,7 +441,7 @@ TRACE_EVENT(btrfs_sync_file,
BTRFS_I(inode)->root->root_key.objectid;
),
TP_printk_btrfs("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
TP_printk_btrfs("root=%llu(%s) ino=%ld parent=%ld datasync=%d",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, (unsigned long)__entry->parent,
__entry->datasync)
......@@ -484,9 +492,9 @@ TRACE_EVENT(btrfs_add_block_group,
__entry->create = create;
),
TP_printk("%pU: block_group offset = %llu, size = %llu, "
"flags = %llu(%s), bytes_used = %llu, bytes_super = %llu, "
"create = %d", __entry->fsid,
TP_printk("%pU: block_group offset=%llu size=%llu "
"flags=%llu(%s) bytes_used=%llu bytes_super=%llu "
"create=%d", __entry->fsid,
(unsigned long long)__entry->offset,
(unsigned long long)__entry->size,
(unsigned long long)__entry->flags,
......@@ -535,9 +543,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
__entry->seq = ref->seq;
),
TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
"parent = %llu(%s), ref_root = %llu(%s), level = %d, "
"type = %s, seq = %llu",
TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
"parent=%llu(%s) ref_root=%llu(%s) level=%d "
"type=%s seq=%llu",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes,
show_ref_action(__entry->action),
......@@ -600,9 +608,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
__entry->seq = ref->seq;
),
TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
"parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
"offset = %llu, type = %s, seq = %llu",
TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
"parent=%llu(%s) ref_root=%llu(%s) owner=%llu "
"offset=%llu type=%s seq=%llu",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes,
show_ref_action(__entry->action),
......@@ -657,7 +665,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
__entry->is_data = head_ref->is_data;
),
TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s is_data=%d",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes,
show_ref_action(__entry->action),
......@@ -721,8 +729,8 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__entry->root_objectid = fs_info->chunk_root->root_key.objectid;
),
TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
"num_stripes = %d, sub_stripes = %d, type = %s",
TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
"num_stripes=%d sub_stripes=%d type=%s",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->offset,
(unsigned long long)__entry->size,
......@@ -771,8 +779,8 @@ TRACE_EVENT(btrfs_cow_block,
__entry->cow_level = btrfs_header_level(cow);
),
TP_printk_btrfs("root = %llu(%s), refs = %d, orig_buf = %llu "
"(orig_level = %d), cow_buf = %llu (cow_level = %d)",
TP_printk_btrfs("root=%llu(%s) refs=%d orig_buf=%llu "
"(orig_level=%d) cow_buf=%llu (cow_level=%d)",
show_root_type(__entry->root_objectid),
__entry->refs,
(unsigned long long)__entry->buf_start,
......@@ -836,7 +844,7 @@ TRACE_EVENT(btrfs_trigger_flush,
__assign_str(reason, reason)
),
TP_printk("%pU: %s: flush = %d(%s), flags = %llu(%s), bytes = %llu",
TP_printk("%pU: %s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
__entry->fsid, __get_str(reason), __entry->flush,
show_flush_action(__entry->flush),
(unsigned long long)__entry->flags,
......@@ -879,8 +887,8 @@ TRACE_EVENT(btrfs_flush_space,
__entry->ret = ret;
),
TP_printk("%pU: state = %d(%s), flags = %llu(%s), num_bytes = %llu, "
"orig_bytes = %llu, ret = %d", __entry->fsid, __entry->state,
TP_printk("%pU: state=%d(%s) flags=%llu(%s) num_bytes=%llu "
"orig_bytes=%llu ret=%d", __entry->fsid, __entry->state,
show_flush_state(__entry->state),
(unsigned long long)__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
......@@ -905,7 +913,7 @@ DECLARE_EVENT_CLASS(btrfs__reserved_extent,
__entry->len = len;
),
TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
TP_printk_btrfs("root=%llu(%s) start=%llu len=%llu",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
(unsigned long long)__entry->start,
(unsigned long long)__entry->len)
......@@ -944,7 +952,7 @@ TRACE_EVENT(find_free_extent,
__entry->data = data;
),
TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)",
TP_printk_btrfs("root=%Lu(%s) len=%Lu empty_size=%Lu flags=%Lu(%s)",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->num_bytes, __entry->empty_size, __entry->data,
__print_flags((unsigned long)__entry->data, "|",
......@@ -973,8 +981,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
__entry->len = len;
),
TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
"start = %Lu, len = %Lu",
TP_printk_btrfs("root=%Lu(%s) block_group=%Lu flags=%Lu(%s) "
"start=%Lu len=%Lu",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->bg_objectid,
__entry->flags, __print_flags((unsigned long)__entry->flags,
......@@ -1025,8 +1033,8 @@ TRACE_EVENT(btrfs_find_cluster,
__entry->min_bytes = min_bytes;
),
TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
" empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) start=%Lu len=%Lu "
"empty_size=%Lu min_bytes=%Lu", __entry->bg_objectid,
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS), __entry->start,
......@@ -1047,7 +1055,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
__entry->bg_objectid = block_group->key.objectid;
),
TP_printk_btrfs("block_group = %Lu", __entry->bg_objectid)
TP_printk_btrfs("block_group=%Lu", __entry->bg_objectid)
);
TRACE_EVENT(btrfs_setup_cluster,
......@@ -1075,8 +1083,8 @@ TRACE_EVENT(btrfs_setup_cluster,
__entry->bitmap = bitmap;
),
TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
"size = %Lu, max_size = %Lu, bitmap = %d",
TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) window_start=%Lu "
"size=%Lu max_size=%Lu bitmap=%d",
__entry->bg_objectid,
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
......@@ -1103,7 +1111,7 @@ TRACE_EVENT(alloc_extent_state,
__entry->ip = IP
),
TP_printk("state=%p; mask = %s; caller = %pS", __entry->state,
TP_printk("state=%p mask=%s caller=%pS", __entry->state,
show_gfp_flags(__entry->mask), (void *)__entry->ip)
);
......@@ -1123,7 +1131,7 @@ TRACE_EVENT(free_extent_state,
__entry->ip = IP
),
TP_printk(" state=%p; caller = %pS", __entry->state,
TP_printk("state=%p caller=%pS", __entry->state,
(void *)__entry->ip)
);
......@@ -1151,28 +1159,32 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->normal_work = &work->normal_work;
),
TP_printk_btrfs("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
" ordered_free=%p",
TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%pf ordered_func=%p "
"ordered_free=%p",
__entry->work, __entry->normal_work, __entry->wq,
__entry->func, __entry->ordered_func, __entry->ordered_free)
);
/* For situiations that the work is freed */
/*
* For situiations when the work is freed, we pass fs_info and a tag that that
* matches address of the work structure so it can be paired with the
* scheduling event.
*/
DECLARE_EVENT_CLASS(btrfs__work__done,
TP_PROTO(struct btrfs_work *work),
TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
TP_ARGS(work),
TP_ARGS(fs_info, wtag),
TP_STRUCT__entry_btrfs(
__field( void *, work )
__field( void *, wtag )
),
TP_fast_assign_btrfs(btrfs_work_owner(work),
__entry->work = work;
TP_fast_assign_btrfs(fs_info,
__entry->wtag = wtag;
),
TP_printk_btrfs("work->%p", __entry->work)
TP_printk_btrfs("work->%p", __entry->wtag)
);
DEFINE_EVENT(btrfs__work, btrfs_work_queued,
......@@ -1191,9 +1203,9 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
TP_PROTO(struct btrfs_work *work),
TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
TP_ARGS(work)
TP_ARGS(fs_info, wtag)
);
DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
......@@ -1221,7 +1233,7 @@ DECLARE_EVENT_CLASS(btrfs__workqueue,
__entry->high = high;
),
TP_printk_btrfs("name=%s%s, wq=%p", __get_str(name),
TP_printk_btrfs("name=%s%s wq=%p", __get_str(name),
__print_flags(__entry->high, "",
{(WQ_HIGHPRI), "-high"}),
__entry->wq)
......@@ -1276,7 +1288,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
__entry->free_reserved = free_reserved;
),
TP_printk_btrfs("rootid=%llu, ino=%lu, free_reserved=%llu",
TP_printk_btrfs("rootid=%llu ino=%lu free_reserved=%llu",
__entry->rootid, __entry->ino, __entry->free_reserved)
);
......@@ -1323,7 +1335,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
__entry->op = op;
),
TP_printk_btrfs("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s",
TP_printk_btrfs("root=%llu ino=%lu start=%llu len=%llu reserved=%llu op=%s",
__entry->rootid, __entry->ino, __entry->start, __entry->len,
__entry->reserved,
__print_flags((unsigned long)__entry->op, "",
......@@ -1361,7 +1373,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
__entry->reserved = reserved;
),
TP_printk_btrfs("root=%llu, reserved=%llu, op=free",
TP_printk_btrfs("root=%llu reserved=%llu op=free",
__entry->ref_root, __entry->reserved)
);
......@@ -1388,7 +1400,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
__entry->num_bytes = rec->num_bytes;
),
TP_printk_btrfs("bytenr = %llu, num_bytes = %llu",
TP_printk_btrfs("bytenr=%llu num_bytes=%llu",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes)
);
......@@ -1430,8 +1442,8 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
__entry->nr_new_roots = nr_new_roots;
),
TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
"nr_new_roots = %llu",
TP_printk_btrfs("bytenr=%llu num_bytes=%llu nr_old_roots=%llu "
"nr_new_roots=%llu",
__entry->bytenr,
__entry->num_bytes,
__entry->nr_old_roots,
......@@ -1457,7 +1469,7 @@ TRACE_EVENT(qgroup_update_counters,
__entry->cur_new_count = cur_new_count;
),
TP_printk_btrfs("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
TP_printk_btrfs("qgid=%llu cur_old_count=%llu cur_new_count=%llu",
__entry->qgid,
__entry->cur_old_count,
__entry->cur_new_count)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment