Commit fe4b04fa authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Cure task_oncpu_function_call() races

Oleg reported that on architectures with
__ARCH_WANT_INTERRUPTS_ON_CTXSW the IPI from
task_oncpu_function_call() can land before perf_event_task_sched_in()
and cause interesting situations for eg. perf_install_in_context().

This patch reworks the task_oncpu_function_call() interface to give a
more usable primitive as well as rework all its users to hopefully be
more obvious as well as remove the races.

While looking at the code I also found a number of races against
perf_event_task_sched_out() which can flip contexts between tasks so
plug those too.
Reported-and-reviewed-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b84defe6
...@@ -2578,13 +2578,6 @@ static inline void inc_syscw(struct task_struct *tsk) ...@@ -2578,13 +2578,6 @@ static inline void inc_syscw(struct task_struct *tsk)
#define TASK_SIZE_OF(tsk) TASK_SIZE #define TASK_SIZE_OF(tsk) TASK_SIZE
#endif #endif
/*
* Call the function if the target task is executing on a CPU right now:
*/
extern void task_oncpu_function_call(struct task_struct *p,
void (*func) (void *info), void *info);
#ifdef CONFIG_MM_OWNER #ifdef CONFIG_MM_OWNER
extern void mm_update_next_owner(struct mm_struct *mm); extern void mm_update_next_owner(struct mm_struct *mm);
extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
......
This diff is collapsed.
...@@ -2265,27 +2265,6 @@ void kick_process(struct task_struct *p) ...@@ -2265,27 +2265,6 @@ void kick_process(struct task_struct *p)
EXPORT_SYMBOL_GPL(kick_process); EXPORT_SYMBOL_GPL(kick_process);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/**
* task_oncpu_function_call - call a function on the cpu on which a task runs
* @p: the task to evaluate
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func when the task is currently running. This might
* be on the current CPU, which just calls the function directly
*/
void task_oncpu_function_call(struct task_struct *p,
void (*func) (void *info), void *info)
{
int cpu;
preempt_disable();
cpu = task_cpu(p);
if (task_curr(p))
smp_call_function_single(cpu, func, info, 1);
preempt_enable();
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
...@@ -2776,9 +2755,12 @@ static inline void ...@@ -2776,9 +2755,12 @@ static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev, prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next) struct task_struct *next)
{ {
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next); fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next); prepare_lock_switch(rq, next);
prepare_arch_switch(next); prepare_arch_switch(next);
trace_sched_switch(prev, next);
} }
/** /**
...@@ -2911,7 +2893,7 @@ context_switch(struct rq *rq, struct task_struct *prev, ...@@ -2911,7 +2893,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
struct mm_struct *mm, *oldmm; struct mm_struct *mm, *oldmm;
prepare_task_switch(rq, prev, next); prepare_task_switch(rq, prev, next);
trace_sched_switch(prev, next);
mm = next->mm; mm = next->mm;
oldmm = prev->active_mm; oldmm = prev->active_mm;
/* /*
...@@ -3989,9 +3971,6 @@ asmlinkage void __sched schedule(void) ...@@ -3989,9 +3971,6 @@ asmlinkage void __sched schedule(void)
rq->skip_clock_update = 0; rq->skip_clock_update = 0;
if (likely(prev != next)) { if (likely(prev != next)) {
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
rq->nr_switches++; rq->nr_switches++;
rq->curr = next; rq->curr = next;
++*switch_count; ++*switch_count;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment