Commit fafe870f authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

context_tracking: Inherit TIF_NOHZ through forks instead of context switches

TIF_NOHZ is used by context_tracking to force syscall slow-path
on every task in order to track userspace roundtrips. As such,
it must be set on all running tasks.

It's currently explicitly inherited through context switches.
There is no need to do it in this fast-path though. The flag
could simply be set once for all on all tasks, whether they are
running or not.

Lets do this by setting the flag for the init task on early boot,
and let it propagate through fork inheritance.

While at it, mark context_tracking_cpu_set() as init code, we
only need it at early boot time.
Suggested-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Dave Jones <davej@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J . Wysocki <rafael.j.wysocki@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1430928266-24888-3-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent aed5ed47
...@@ -14,8 +14,6 @@ extern void context_tracking_enter(enum ctx_state state); ...@@ -14,8 +14,6 @@ extern void context_tracking_enter(enum ctx_state state);
extern void context_tracking_exit(enum ctx_state state); extern void context_tracking_exit(enum ctx_state state);
extern void context_tracking_user_enter(void); extern void context_tracking_user_enter(void);
extern void context_tracking_user_exit(void); extern void context_tracking_user_exit(void);
extern void __context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next);
static inline void user_enter(void) static inline void user_enter(void)
{ {
...@@ -51,19 +49,11 @@ static inline void exception_exit(enum ctx_state prev_ctx) ...@@ -51,19 +49,11 @@ static inline void exception_exit(enum ctx_state prev_ctx)
} }
} }
static inline void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next)
{
if (context_tracking_is_enabled())
__context_tracking_task_switch(prev, next);
}
#else #else
static inline void user_enter(void) { } static inline void user_enter(void) { }
static inline void user_exit(void) { } static inline void user_exit(void) { }
static inline enum ctx_state exception_enter(void) { return 0; } static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { } static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline void context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* !CONFIG_CONTEXT_TRACKING */ #endif /* !CONFIG_CONTEXT_TRACKING */
......
...@@ -2532,6 +2532,9 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, ...@@ -2532,6 +2532,9 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
} }
#endif #endif
#define tasklist_empty() \
list_empty(&init_task.tasks)
#define next_task(p) \ #define next_task(p) \
list_entry_rcu((p)->tasks.next, struct task_struct, tasks) list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
......
...@@ -30,14 +30,6 @@ EXPORT_SYMBOL_GPL(context_tracking_enabled); ...@@ -30,14 +30,6 @@ EXPORT_SYMBOL_GPL(context_tracking_enabled);
DEFINE_PER_CPU(struct context_tracking, context_tracking); DEFINE_PER_CPU(struct context_tracking, context_tracking);
EXPORT_SYMBOL_GPL(context_tracking); EXPORT_SYMBOL_GPL(context_tracking);
void context_tracking_cpu_set(int cpu)
{
if (!per_cpu(context_tracking.active, cpu)) {
per_cpu(context_tracking.active, cpu) = true;
static_key_slow_inc(&context_tracking_enabled);
}
}
static bool context_tracking_recursion_enter(void) static bool context_tracking_recursion_enter(void)
{ {
int recursion; int recursion;
...@@ -193,24 +185,26 @@ void context_tracking_user_exit(void) ...@@ -193,24 +185,26 @@ void context_tracking_user_exit(void)
} }
NOKPROBE_SYMBOL(context_tracking_user_exit); NOKPROBE_SYMBOL(context_tracking_user_exit);
/** void __init context_tracking_cpu_set(int cpu)
* __context_tracking_task_switch - context switch the syscall callbacks
* @prev: the task that is being switched out
* @next: the task that is being switched in
*
* The context tracking uses the syscall slow path to implement its user-kernel
* boundaries probes on syscalls. This way it doesn't impact the syscall fast
* path on CPUs that don't do context tracking.
*
* But we need to clear the flag on the previous task because it may later
* migrate to some CPU that doesn't do the context tracking. As such the TIF
* flag may not be desired there.
*/
void __context_tracking_task_switch(struct task_struct *prev,
struct task_struct *next)
{ {
clear_tsk_thread_flag(prev, TIF_NOHZ); static __initdata bool initialized = false;
set_tsk_thread_flag(next, TIF_NOHZ);
if (!per_cpu(context_tracking.active, cpu)) {
per_cpu(context_tracking.active, cpu) = true;
static_key_slow_inc(&context_tracking_enabled);
}
if (initialized)
return;
/*
* Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
* This assumes that init is the only task at this early boot stage.
*/
set_tsk_thread_flag(&init_task, TIF_NOHZ);
WARN_ON_ONCE(!tasklist_empty());
initialized = true;
} }
#ifdef CONFIG_CONTEXT_TRACKING_FORCE #ifdef CONFIG_CONTEXT_TRACKING_FORCE
......
...@@ -2332,7 +2332,6 @@ context_switch(struct rq *rq, struct task_struct *prev, ...@@ -2332,7 +2332,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
*/ */
spin_release(&rq->lock.dep_map, 1, _THIS_IP_); spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
context_tracking_task_switch(prev, next);
/* Here we just switch the register state and the stack. */ /* Here we just switch the register state and the stack. */
switch_to(prev, next, prev); switch_to(prev, next, prev);
barrier(); barrier();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment