Commit 64166699 authored by Tejun Heo's avatar Tejun Heo

workqueue: temporarily remove workqueue tracing

Strip tracing code from workqueue and remove workqueue tracing.  This
is temporary measure till concurrency managed workqueue is complete.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
parent a62428c0
#undef TRACE_SYSTEM
#define TRACE_SYSTEM workqueue
#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_WORKQUEUE_H
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(workqueue,
TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
TP_ARGS(wq_thread, work),
TP_STRUCT__entry(
__array(char, thread_comm, TASK_COMM_LEN)
__field(pid_t, thread_pid)
__field(work_func_t, func)
),
TP_fast_assign(
memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
__entry->thread_pid = wq_thread->pid;
__entry->func = work->func;
),
TP_printk("thread=%s:%d func=%pf", __entry->thread_comm,
__entry->thread_pid, __entry->func)
);
DEFINE_EVENT(workqueue, workqueue_insertion,
TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
TP_ARGS(wq_thread, work)
);
DEFINE_EVENT(workqueue, workqueue_execution,
TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
TP_ARGS(wq_thread, work)
);
/* Trace the creation of one workqueue thread on a cpu */
TRACE_EVENT(workqueue_creation,
TP_PROTO(struct task_struct *wq_thread, int cpu),
TP_ARGS(wq_thread, cpu),
TP_STRUCT__entry(
__array(char, thread_comm, TASK_COMM_LEN)
__field(pid_t, thread_pid)
__field(int, cpu)
),
TP_fast_assign(
memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
__entry->thread_pid = wq_thread->pid;
__entry->cpu = cpu;
),
TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm,
__entry->thread_pid, __entry->cpu)
);
TRACE_EVENT(workqueue_destruction,
TP_PROTO(struct task_struct *wq_thread),
TP_ARGS(wq_thread),
TP_STRUCT__entry(
__array(char, thread_comm, TASK_COMM_LEN)
__field(pid_t, thread_pid)
),
TP_fast_assign(
memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
__entry->thread_pid = wq_thread->pid;
),
TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid)
);
#endif /* _TRACE_WORKQUEUE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
...@@ -391,17 +391,6 @@ config KMEMTRACE ...@@ -391,17 +391,6 @@ config KMEMTRACE
If unsure, say N. If unsure, say N.
config WORKQUEUE_TRACER
bool "Trace workqueues"
select GENERIC_TRACER
help
The workqueue tracer provides some statistical information
about each cpu workqueue thread such as the number of the
works inserted and executed since their creation. It can help
to evaluate the amount of work each of them has to perform.
For example it can help a developer to decide whether he should
choose a per-cpu workqueue instead of a singlethreaded one.
config BLK_DEV_IO_TRACE config BLK_DEV_IO_TRACE
bool "Support for tracing block IO actions" bool "Support for tracing block IO actions"
depends on SYSFS depends on SYSFS
......
...@@ -33,8 +33,6 @@ ...@@ -33,8 +33,6 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
/* /*
* Structure fields follow one of the following exclusion rules. * Structure fields follow one of the following exclusion rules.
...@@ -243,10 +241,10 @@ static inline void clear_wq_data(struct work_struct *work) ...@@ -243,10 +241,10 @@ static inline void clear_wq_data(struct work_struct *work)
atomic_long_set(&work->data, work_static(work)); atomic_long_set(&work->data, work_static(work));
} }
static inline static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
{ {
return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); return (void *)(atomic_long_read(&work->data) &
WORK_STRUCT_WQ_DATA_MASK);
} }
/** /**
...@@ -265,8 +263,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, ...@@ -265,8 +263,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work, struct list_head *head, struct work_struct *work, struct list_head *head,
unsigned int extra_flags) unsigned int extra_flags)
{ {
trace_workqueue_insertion(cwq->thread, work);
/* we own @work, set data and link */ /* we own @work, set data and link */
set_wq_data(work, cwq, extra_flags); set_wq_data(work, cwq, extra_flags);
...@@ -431,7 +427,6 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, ...@@ -431,7 +427,6 @@ static void process_one_work(struct cpu_workqueue_struct *cwq,
struct lockdep_map lockdep_map = work->lockdep_map; struct lockdep_map lockdep_map = work->lockdep_map;
#endif #endif
/* claim and process */ /* claim and process */
trace_workqueue_execution(cwq->thread, work);
debug_work_deactivate(work); debug_work_deactivate(work);
cwq->current_work = work; cwq->current_work = work;
list_del_init(&work->entry); list_del_init(&work->entry);
...@@ -1017,8 +1012,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) ...@@ -1017,8 +1012,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
return PTR_ERR(p); return PTR_ERR(p);
cwq->thread = p; cwq->thread = p;
trace_workqueue_creation(cwq->thread, cpu);
return 0; return 0;
} }
...@@ -1123,7 +1116,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) ...@@ -1123,7 +1116,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
* checks list_empty(), and a "normal" queue_work() can't use * checks list_empty(), and a "normal" queue_work() can't use
* a dead CPU. * a dead CPU.
*/ */
trace_workqueue_destruction(cwq->thread);
kthread_stop(cwq->thread); kthread_stop(cwq->thread);
cwq->thread = NULL; cwq->thread = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment