Commit b30f0e3f authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

sched/preempt: Optimize preemption operations on __schedule() callers

__schedule() disables preemption and some of its callers
(the preempt_schedule*() family) also set PREEMPT_ACTIVE.

So we have two preempt_count() modifications that could be performed
at once.

Lets remove the preemption disablement from __schedule() and pull
this responsibility to its callers in order to optimize preempt_count()
operations in a single place.
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-5-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 90b62b51
...@@ -137,6 +137,18 @@ extern void preempt_count_sub(int val); ...@@ -137,6 +137,18 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1) #define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1) #define preempt_count_dec() preempt_count_sub(1)
#define preempt_active_enter() \
do { \
preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
barrier(); \
} while (0)
#define preempt_active_exit() \
do { \
barrier(); \
preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
} while (0)
#ifdef CONFIG_PREEMPT_COUNT #ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \ #define preempt_disable() \
......
...@@ -2773,9 +2773,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev) ...@@ -2773,9 +2773,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
* - return from syscall or exception to user-space * - return from syscall or exception to user-space
* - return from interrupt-handler to user-space * - return from interrupt-handler to user-space
* *
* WARNING: all callers must re-check need_resched() afterward and reschedule * WARNING: must be called with preemption disabled!
* accordingly in case an event triggered the need for rescheduling (such as
* an interrupt waking up a task) while preemption was disabled in __schedule().
*/ */
static void __sched __schedule(void) static void __sched __schedule(void)
{ {
...@@ -2784,7 +2782,6 @@ static void __sched __schedule(void) ...@@ -2784,7 +2782,6 @@ static void __sched __schedule(void)
struct rq *rq; struct rq *rq;
int cpu; int cpu;
preempt_disable();
cpu = smp_processor_id(); cpu = smp_processor_id();
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
rcu_note_context_switch(); rcu_note_context_switch();
...@@ -2848,8 +2845,6 @@ static void __sched __schedule(void) ...@@ -2848,8 +2845,6 @@ static void __sched __schedule(void)
raw_spin_unlock_irq(&rq->lock); raw_spin_unlock_irq(&rq->lock);
post_schedule(rq); post_schedule(rq);
sched_preempt_enable_no_resched();
} }
static inline void sched_submit_work(struct task_struct *tsk) static inline void sched_submit_work(struct task_struct *tsk)
...@@ -2870,7 +2865,9 @@ asmlinkage __visible void __sched schedule(void) ...@@ -2870,7 +2865,9 @@ asmlinkage __visible void __sched schedule(void)
sched_submit_work(tsk); sched_submit_work(tsk);
do { do {
preempt_disable();
__schedule(); __schedule();
sched_preempt_enable_no_resched();
} while (need_resched()); } while (need_resched());
} }
EXPORT_SYMBOL(schedule); EXPORT_SYMBOL(schedule);
...@@ -2909,15 +2906,14 @@ void __sched schedule_preempt_disabled(void) ...@@ -2909,15 +2906,14 @@ void __sched schedule_preempt_disabled(void)
static void __sched notrace preempt_schedule_common(void) static void __sched notrace preempt_schedule_common(void)
{ {
do { do {
__preempt_count_add(PREEMPT_ACTIVE); preempt_active_enter();
__schedule(); __schedule();
__preempt_count_sub(PREEMPT_ACTIVE); preempt_active_exit();
/* /*
* Check again in case we missed a preemption opportunity * Check again in case we missed a preemption opportunity
* between schedule and now. * between schedule and now.
*/ */
barrier();
} while (need_resched()); } while (need_resched());
} }
...@@ -2964,7 +2960,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void) ...@@ -2964,7 +2960,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
return; return;
do { do {
__preempt_count_add(PREEMPT_ACTIVE); preempt_active_enter();
/* /*
* Needs preempt disabled in case user_exit() is traced * Needs preempt disabled in case user_exit() is traced
* and the tracer calls preempt_enable_notrace() causing * and the tracer calls preempt_enable_notrace() causing
...@@ -2974,8 +2970,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void) ...@@ -2974,8 +2970,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
__schedule(); __schedule();
exception_exit(prev_ctx); exception_exit(prev_ctx);
__preempt_count_sub(PREEMPT_ACTIVE); preempt_active_exit();
barrier();
} while (need_resched()); } while (need_resched());
} }
EXPORT_SYMBOL_GPL(preempt_schedule_context); EXPORT_SYMBOL_GPL(preempt_schedule_context);
...@@ -2999,17 +2994,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) ...@@ -2999,17 +2994,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
prev_state = exception_enter(); prev_state = exception_enter();
do { do {
__preempt_count_add(PREEMPT_ACTIVE); preempt_active_enter();
local_irq_enable(); local_irq_enable();
__schedule(); __schedule();
local_irq_disable(); local_irq_disable();
__preempt_count_sub(PREEMPT_ACTIVE); preempt_active_exit();
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched()); } while (need_resched());
exception_exit(prev_state); exception_exit(prev_state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment