Commit 14d0ee05 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-fixes-v3.10-rc3-v3' of...

Merge tag 'trace-fixes-v3.10-rc3-v3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
 "This contains 4 fixes.

  The first two fix the case where full RCU debugging is enabled,
  enabling function tracing causes a live lock of the system.  This is
  due to the added debug checks in rcu_dereference_raw() that is used by
  the function tracer.  These checks are also traced by the function
  tracer as well as cause enough overhead to the function tracer to slow
  down the system enough that the time to finish an interrupt can take
  longer than when the next interrupt is triggered, causing a live lock
  from the timer interrupt.

  Talking this over with Paul McKenney, we came up with a fix that adds
  a new rcu_dereference_raw_notrace() that does not perform these added
  checks, and let the function tracer use that.

  The third commit fixes a failed compile when branch tracing is
  enabled, due to the conversion of the trace_test_buffer() selftest
  that the branch trace wasn't converted for.

  The forth patch fixes a bug caught by the RCU lockdep code where a
  rcu_read_lock() is performed when rcu is disabled (either going to or
  from idle, or user space).  This happened on the irqsoff tracer as it
  calls task_uid().  The fix here was to use current_uid() when possible
  that doesn't use rcu locking.  Which luckily, is always used when
  irqsoff calls this code."

* tag 'trace-fixes-v3.10-rc3-v3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Use current_uid() for critical time tracing
  tracing: Fix bad parameter passed in branch selftest
  ftrace: Use the rcu _notrace variants for rcu_dereference_raw() and friends
  rcu: Add _notrace variation of rcu_dereference_raw() and hlist_for_each_entry_rcu()
parents ea7f6656 f17a5194
...@@ -460,6 +460,26 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, ...@@ -460,6 +460,26 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member)) &(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*
* This is the same as hlist_for_each_entry_rcu() except that it does
* not do any RCU debugging or tracing.
*/
#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
/** /**
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor. * @pos: the type * to use as a loop cursor.
......
...@@ -640,6 +640,15 @@ static inline void rcu_preempt_sleep_check(void) ...@@ -640,6 +640,15 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
/*
* The tracing infrastructure traces RCU (we want that), but unfortunately
* some of the RCU checks causes tracing to lock up the system.
*
* The tracing version of rcu_dereference_raw() must not call
* rcu_read_lock_held().
*/
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
/** /**
* rcu_access_index() - fetch RCU index with no dereferencing * rcu_access_index() - fetch RCU index with no dereferencing
* @p: The index to read * @p: The index to read
......
...@@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); ...@@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
/* /*
* Traverse the ftrace_global_list, invoking all entries. The reason that we * Traverse the ftrace_global_list, invoking all entries. The reason that we
* can use rcu_dereference_raw() is that elements removed from this list * can use rcu_dereference_raw_notrace() is that elements removed from this list
* are simply leaked, so there is no need to interact with a grace-period * are simply leaked, so there is no need to interact with a grace-period
* mechanism. The rcu_dereference_raw() calls are needed to handle * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
* concurrent insertions into the ftrace_global_list. * concurrent insertions into the ftrace_global_list.
* *
* Silly Alpha and silly pointer-speculation compiler optimizations! * Silly Alpha and silly pointer-speculation compiler optimizations!
*/ */
#define do_for_each_ftrace_op(op, list) \ #define do_for_each_ftrace_op(op, list) \
op = rcu_dereference_raw(list); \ op = rcu_dereference_raw_notrace(list); \
do do
/* /*
* Optimized for just a single item in the list (as that is the normal case). * Optimized for just a single item in the list (as that is the normal case).
*/ */
#define while_for_each_ftrace_op(op) \ #define while_for_each_ftrace_op(op) \
while (likely(op = rcu_dereference_raw((op)->next)) && \ while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
unlikely((op) != &ftrace_list_end)) unlikely((op) != &ftrace_list_end))
static inline void ftrace_ops_init(struct ftrace_ops *ops) static inline void ftrace_ops_init(struct ftrace_ops *ops)
...@@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) ...@@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
if (hlist_empty(hhd)) if (hlist_empty(hhd))
return NULL; return NULL;
hlist_for_each_entry_rcu(rec, hhd, node) { hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
if (rec->ip == ip) if (rec->ip == ip)
return rec; return rec;
} }
...@@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) ...@@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
hhd = &hash->buckets[key]; hhd = &hash->buckets[key];
hlist_for_each_entry_rcu(entry, hhd, hlist) { hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
if (entry->ip == ip) if (entry->ip == ip)
return entry; return entry;
} }
...@@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) ...@@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
struct ftrace_hash *notrace_hash; struct ftrace_hash *notrace_hash;
int ret; int ret;
filter_hash = rcu_dereference_raw(ops->filter_hash); filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
notrace_hash = rcu_dereference_raw(ops->notrace_hash); notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
if ((ftrace_hash_empty(filter_hash) || if ((ftrace_hash_empty(filter_hash) ||
ftrace_lookup_ip(filter_hash, ip)) && ftrace_lookup_ip(filter_hash, ip)) &&
...@@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, ...@@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
* on the hash. rcu_read_lock is too dangerous here. * on the hash. rcu_read_lock is too dangerous here.
*/ */
preempt_disable_notrace(); preempt_disable_notrace();
hlist_for_each_entry_rcu(entry, hhd, node) { hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
if (entry->ip == ip) if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data); entry->ops->func(ip, parent_ip, &entry->data);
} }
......
...@@ -843,7 +843,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -843,7 +843,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
max_data->pid = tsk->pid; max_data->pid = tsk->pid;
max_data->uid = task_uid(tsk); /*
* If tsk == current, then use current_uid(), as that does not use
* RCU. The irq tracer can be called out of RCU scope.
*/
if (tsk == current)
max_data->uid = current_uid();
else
max_data->uid = task_uid(tsk);
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
max_data->policy = tsk->policy; max_data->policy = tsk->policy;
max_data->rt_priority = tsk->rt_priority; max_data->rt_priority = tsk->rt_priority;
......
...@@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) ...@@ -1159,7 +1159,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */ /* stop the tracing. */
tracing_stop(); tracing_stop();
/* check the trace buffer */ /* check the trace buffer */
ret = trace_test_buffer(tr, &count); ret = trace_test_buffer(&tr->trace_buffer, &count);
trace->reset(tr); trace->reset(tr);
tracing_start(); tracing_start();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment