Commit bdb43806 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Extract the basic add/sub preempt_count modifiers

Rewrite the preempt_count macros in order to extract the 3 basic
preempt_count value modifiers:

  __preempt_count_add()
  __preempt_count_sub()

and the new:

  __preempt_count_dec_and_test()

And since we're at it anyway, replace the unconventional
$op_preempt_count names with the more conventional preempt_count_$op.

Since these basic operators are equivalent to the previous _notrace()
variants, do away with the _notrace() versions.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-ewbpdbupy9xpsjhg960zwbv8@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 01028747
...@@ -124,7 +124,7 @@ void *kmap_coherent(struct page *page, unsigned long addr) ...@@ -124,7 +124,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
BUG_ON(Page_dcache_dirty(page)); BUG_ON(Page_dcache_dirty(page));
inc_preempt_count(); pagefault_disable();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
idx += FIX_N_COLOURS * smp_processor_id() + idx += FIX_N_COLOURS * smp_processor_id() +
...@@ -193,8 +193,7 @@ void kunmap_coherent(void) ...@@ -193,8 +193,7 @@ void kunmap_coherent(void)
write_c0_entryhi(old_ctx); write_c0_entryhi(old_ctx);
EXIT_CRITICAL(flags); EXIT_CRITICAL(flags);
#endif #endif
dec_preempt_count(); pagefault_enable();
preempt_check_resched();
} }
void copy_user_highpage(struct page *to, struct page *from, void copy_user_highpage(struct page *to, struct page *from,
......
...@@ -88,7 +88,7 @@ static inline void conditional_sti(struct pt_regs *regs) ...@@ -88,7 +88,7 @@ static inline void conditional_sti(struct pt_regs *regs)
static inline void preempt_conditional_sti(struct pt_regs *regs) static inline void preempt_conditional_sti(struct pt_regs *regs)
{ {
inc_preempt_count(); preempt_count_inc();
if (regs->flags & X86_EFLAGS_IF) if (regs->flags & X86_EFLAGS_IF)
local_irq_enable(); local_irq_enable();
} }
...@@ -103,7 +103,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) ...@@ -103,7 +103,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
{ {
if (regs->flags & X86_EFLAGS_IF) if (regs->flags & X86_EFLAGS_IF)
local_irq_disable(); local_irq_disable();
dec_preempt_count(); preempt_count_dec();
} }
static int __kprobes static int __kprobes
......
...@@ -65,4 +65,39 @@ static __always_inline bool test_preempt_need_resched(void) ...@@ -65,4 +65,39 @@ static __always_inline bool test_preempt_need_resched(void)
return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
} }
/*
* The various preempt_count add/sub methods
*/
static __always_inline void __preempt_count_add(int val)
{
*preempt_count_ptr() += val;
}
static __always_inline void __preempt_count_sub(int val)
{
*preempt_count_ptr() -= val;
}
static __always_inline bool __preempt_count_dec_and_test(void)
{
return !--*preempt_count_ptr();
}
/*
* Returns true when we need to resched -- even if we can not.
*/
static __always_inline bool need_resched(void)
{
return unlikely(test_preempt_need_resched());
}
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(void)
{
return unlikely(!*preempt_count_ptr());
}
#endif /* __ASM_PREEMPT_H */ #endif /* __ASM_PREEMPT_H */
...@@ -33,7 +33,7 @@ extern void rcu_nmi_exit(void); ...@@ -33,7 +33,7 @@ extern void rcu_nmi_exit(void);
#define __irq_enter() \ #define __irq_enter() \
do { \ do { \
account_irq_enter_time(current); \ account_irq_enter_time(current); \
add_preempt_count(HARDIRQ_OFFSET); \ preempt_count_add(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \ trace_hardirq_enter(); \
} while (0) } while (0)
...@@ -49,7 +49,7 @@ extern void irq_enter(void); ...@@ -49,7 +49,7 @@ extern void irq_enter(void);
do { \ do { \
trace_hardirq_exit(); \ trace_hardirq_exit(); \
account_irq_exit_time(current); \ account_irq_exit_time(current); \
sub_preempt_count(HARDIRQ_OFFSET); \ preempt_count_sub(HARDIRQ_OFFSET); \
} while (0) } while (0)
/* /*
...@@ -62,7 +62,7 @@ extern void irq_exit(void); ...@@ -62,7 +62,7 @@ extern void irq_exit(void);
lockdep_off(); \ lockdep_off(); \
ftrace_nmi_enter(); \ ftrace_nmi_enter(); \
BUG_ON(in_nmi()); \ BUG_ON(in_nmi()); \
add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \ rcu_nmi_enter(); \
trace_hardirq_enter(); \ trace_hardirq_enter(); \
} while (0) } while (0)
...@@ -72,7 +72,7 @@ extern void irq_exit(void); ...@@ -72,7 +72,7 @@ extern void irq_exit(void);
trace_hardirq_exit(); \ trace_hardirq_exit(); \
rcu_nmi_exit(); \ rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \ BUG_ON(!in_nmi()); \
sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
ftrace_nmi_exit(); \ ftrace_nmi_exit(); \
lockdep_on(); \ lockdep_on(); \
} while (0) } while (0)
......
...@@ -18,97 +18,86 @@ ...@@ -18,97 +18,86 @@
#include <asm/preempt.h> #include <asm/preempt.h>
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void add_preempt_count(int val); extern void preempt_count_add(int val);
extern void sub_preempt_count(int val); extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
#else #else
# define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0) #define preempt_count_add(val) __preempt_count_add(val)
# define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0) #define preempt_count_sub(val) __preempt_count_sub(val)
#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
#endif #endif
#define inc_preempt_count() add_preempt_count(1) #define __preempt_count_inc() __preempt_count_add(1)
#define dec_preempt_count() sub_preempt_count(1) #define __preempt_count_dec() __preempt_count_sub(1)
#ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void);
#define preempt_check_resched() \
do { \
if (unlikely(!*preempt_count_ptr())) \
preempt_schedule(); \
} while (0)
#ifdef CONFIG_CONTEXT_TRACKING
void preempt_schedule_context(void);
#define preempt_check_resched_context() \
do { \
if (unlikely(!*preempt_count_ptr())) \
preempt_schedule_context(); \
} while (0)
#else
#define preempt_check_resched_context() preempt_check_resched()
#endif /* CONFIG_CONTEXT_TRACKING */
#else /* !CONFIG_PREEMPT */
#define preempt_check_resched() do { } while (0)
#define preempt_check_resched_context() do { } while (0)
#endif /* CONFIG_PREEMPT */
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
#ifdef CONFIG_PREEMPT_COUNT #ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \ #define preempt_disable() \
do { \ do { \
inc_preempt_count(); \ preempt_count_inc(); \
barrier(); \ barrier(); \
} while (0) } while (0)
#define sched_preempt_enable_no_resched() \ #define sched_preempt_enable_no_resched() \
do { \ do { \
barrier(); \ barrier(); \
dec_preempt_count(); \ preempt_count_dec(); \
} while (0) } while (0)
#define preempt_enable_no_resched() sched_preempt_enable_no_resched() #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void);
#define preempt_enable() \ #define preempt_enable() \
do { \ do { \
preempt_enable_no_resched(); \ barrier(); \
preempt_check_resched(); \ if (unlikely(preempt_count_dec_and_test())) \
preempt_schedule(); \
} while (0) } while (0)
/* For debugging and tracer internals only! */ #define preempt_check_resched() \
#define add_preempt_count_notrace(val) \ do { \
do { *preempt_count_ptr() += (val); } while (0) if (should_resched()) \
#define sub_preempt_count_notrace(val) \ preempt_schedule(); \
do { *preempt_count_ptr() -= (val); } while (0) } while (0)
#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
#define dec_preempt_count_notrace() sub_preempt_count_notrace(1) #else
#define preempt_enable() preempt_enable_no_resched()
#define preempt_check_resched() do { } while (0)
#endif
#define preempt_disable_notrace() \ #define preempt_disable_notrace() \
do { \ do { \
inc_preempt_count_notrace(); \ __preempt_count_inc(); \
barrier(); \ barrier(); \
} while (0) } while (0)
#define preempt_enable_no_resched_notrace() \ #define preempt_enable_no_resched_notrace() \
do { \ do { \
barrier(); \ barrier(); \
dec_preempt_count_notrace(); \ __preempt_count_dec(); \
} while (0) } while (0)
/* preempt_check_resched is OK to trace */ #ifdef CONFIG_PREEMPT
#ifdef CONFIG_CONTEXT_TRACKING
asmlinkage void preempt_schedule_context(void);
#else
#define preempt_schedule_context() preempt_schedule()
#endif
#define preempt_enable_notrace() \ #define preempt_enable_notrace() \
do { \ do { \
preempt_enable_no_resched_notrace(); \ barrier(); \
preempt_check_resched_context(); \ if (unlikely(__preempt_count_dec_and_test())) \
preempt_schedule_context(); \
} while (0) } while (0)
#else
#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
#endif
#else /* !CONFIG_PREEMPT_COUNT */ #else /* !CONFIG_PREEMPT_COUNT */
...@@ -118,10 +107,11 @@ do { \ ...@@ -118,10 +107,11 @@ do { \
* that can cause faults and scheduling migrate into our preempt-protected * that can cause faults and scheduling migrate into our preempt-protected
* region. * region.
*/ */
#define preempt_disable() barrier() #define preempt_disable() barrier()
#define sched_preempt_enable_no_resched() barrier() #define sched_preempt_enable_no_resched() barrier()
#define preempt_enable_no_resched() barrier() #define preempt_enable_no_resched() barrier()
#define preempt_enable() barrier() #define preempt_enable() barrier()
#define preempt_check_resched() do { } while (0)
#define preempt_disable_notrace() barrier() #define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier()
......
...@@ -2409,11 +2409,6 @@ static inline int signal_pending_state(long state, struct task_struct *p) ...@@ -2409,11 +2409,6 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
} }
static inline int need_resched(void)
{
return unlikely(test_preempt_need_resched());
}
/* /*
* cond_resched() and cond_resched_lock(): latency reduction via * cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return * explicit rescheduling in places that are safe. The return
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
*/ */
static inline void pagefault_disable(void) static inline void pagefault_disable(void)
{ {
inc_preempt_count(); preempt_count_inc();
/* /*
* make sure to have issued the store before a pagefault * make sure to have issued the store before a pagefault
* can hit. * can hit.
...@@ -30,11 +30,7 @@ static inline void pagefault_enable(void) ...@@ -30,11 +30,7 @@ static inline void pagefault_enable(void)
* the pagefault handler again. * the pagefault handler again.
*/ */
barrier(); barrier();
dec_preempt_count(); preempt_count_dec();
/*
* make sure we do..
*/
barrier();
preempt_check_resched(); preempt_check_resched();
} }
......
...@@ -111,7 +111,7 @@ void context_tracking_user_enter(void) ...@@ -111,7 +111,7 @@ void context_tracking_user_enter(void)
* instead of preempt_schedule() to exit user context if needed before * instead of preempt_schedule() to exit user context if needed before
* calling the scheduler. * calling the scheduler.
*/ */
void __sched notrace preempt_schedule_context(void) asmlinkage void __sched notrace preempt_schedule_context(void)
{ {
enum ctx_state prev_ctx; enum ctx_state prev_ctx;
......
...@@ -2219,7 +2219,7 @@ notrace unsigned long get_parent_ip(unsigned long addr) ...@@ -2219,7 +2219,7 @@ notrace unsigned long get_parent_ip(unsigned long addr)
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER)) defined(CONFIG_PREEMPT_TRACER))
void __kprobes add_preempt_count(int val) void __kprobes preempt_count_add(int val)
{ {
#ifdef CONFIG_DEBUG_PREEMPT #ifdef CONFIG_DEBUG_PREEMPT
/* /*
...@@ -2228,7 +2228,7 @@ void __kprobes add_preempt_count(int val) ...@@ -2228,7 +2228,7 @@ void __kprobes add_preempt_count(int val)
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
return; return;
#endif #endif
add_preempt_count_notrace(val); __preempt_count_add(val);
#ifdef CONFIG_DEBUG_PREEMPT #ifdef CONFIG_DEBUG_PREEMPT
/* /*
* Spinlock count overflowing soon? * Spinlock count overflowing soon?
...@@ -2239,9 +2239,9 @@ void __kprobes add_preempt_count(int val) ...@@ -2239,9 +2239,9 @@ void __kprobes add_preempt_count(int val)
if (preempt_count() == val) if (preempt_count() == val)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
} }
EXPORT_SYMBOL(add_preempt_count); EXPORT_SYMBOL(preempt_count_add);
void __kprobes sub_preempt_count(int val) void __kprobes preempt_count_sub(int val)
{ {
#ifdef CONFIG_DEBUG_PREEMPT #ifdef CONFIG_DEBUG_PREEMPT
/* /*
...@@ -2259,9 +2259,9 @@ void __kprobes sub_preempt_count(int val) ...@@ -2259,9 +2259,9 @@ void __kprobes sub_preempt_count(int val)
if (preempt_count() == val) if (preempt_count() == val)
trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
sub_preempt_count_notrace(val); __preempt_count_sub(val);
} }
EXPORT_SYMBOL(sub_preempt_count); EXPORT_SYMBOL(preempt_count_sub);
#endif #endif
...@@ -2525,9 +2525,9 @@ asmlinkage void __sched notrace preempt_schedule(void) ...@@ -2525,9 +2525,9 @@ asmlinkage void __sched notrace preempt_schedule(void)
return; return;
do { do {
add_preempt_count_notrace(PREEMPT_ACTIVE); __preempt_count_add(PREEMPT_ACTIVE);
__schedule(); __schedule();
sub_preempt_count_notrace(PREEMPT_ACTIVE); __preempt_count_sub(PREEMPT_ACTIVE);
/* /*
* Check again in case we missed a preemption opportunity * Check again in case we missed a preemption opportunity
...@@ -2554,11 +2554,11 @@ asmlinkage void __sched preempt_schedule_irq(void) ...@@ -2554,11 +2554,11 @@ asmlinkage void __sched preempt_schedule_irq(void)
prev_state = exception_enter(); prev_state = exception_enter();
do { do {
add_preempt_count(PREEMPT_ACTIVE); __preempt_count_add(PREEMPT_ACTIVE);
local_irq_enable(); local_irq_enable();
__schedule(); __schedule();
local_irq_disable(); local_irq_disable();
sub_preempt_count(PREEMPT_ACTIVE); __preempt_count_sub(PREEMPT_ACTIVE);
/* /*
* Check again in case we missed a preemption opportunity * Check again in case we missed a preemption opportunity
...@@ -3798,16 +3798,11 @@ SYSCALL_DEFINE0(sched_yield) ...@@ -3798,16 +3798,11 @@ SYSCALL_DEFINE0(sched_yield)
return 0; return 0;
} }
static inline int should_resched(void)
{
return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
}
static void __cond_resched(void) static void __cond_resched(void)
{ {
add_preempt_count(PREEMPT_ACTIVE); __preempt_count_add(PREEMPT_ACTIVE);
__schedule(); __schedule();
sub_preempt_count(PREEMPT_ACTIVE); __preempt_count_sub(PREEMPT_ACTIVE);
} }
int __sched _cond_resched(void) int __sched _cond_resched(void)
......
...@@ -100,13 +100,13 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) ...@@ -100,13 +100,13 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
raw_local_irq_save(flags); raw_local_irq_save(flags);
/* /*
* The preempt tracer hooks into add_preempt_count and will break * The preempt tracer hooks into preempt_count_add and will break
* lockdep because it calls back into lockdep after SOFTIRQ_OFFSET * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
* is set and before current->softirq_enabled is cleared. * is set and before current->softirq_enabled is cleared.
* We must manually increment preempt_count here and manually * We must manually increment preempt_count here and manually
* call the trace_preempt_off later. * call the trace_preempt_off later.
*/ */
add_preempt_count_notrace(cnt); __preempt_count_add(cnt);
/* /*
* Were softirqs turned off above: * Were softirqs turned off above:
*/ */
...@@ -120,7 +120,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) ...@@ -120,7 +120,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
#else /* !CONFIG_TRACE_IRQFLAGS */ #else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
{ {
add_preempt_count(cnt); preempt_count_add(cnt);
barrier(); barrier();
} }
#endif /* CONFIG_TRACE_IRQFLAGS */ #endif /* CONFIG_TRACE_IRQFLAGS */
...@@ -139,7 +139,7 @@ static void __local_bh_enable(unsigned int cnt) ...@@ -139,7 +139,7 @@ static void __local_bh_enable(unsigned int cnt)
if (softirq_count() == cnt) if (softirq_count() == cnt)
trace_softirqs_on(_RET_IP_); trace_softirqs_on(_RET_IP_);
sub_preempt_count(cnt); preempt_count_sub(cnt);
} }
/* /*
...@@ -169,12 +169,12 @@ static inline void _local_bh_enable_ip(unsigned long ip) ...@@ -169,12 +169,12 @@ static inline void _local_bh_enable_ip(unsigned long ip)
* Keep preemption disabled until we are done with * Keep preemption disabled until we are done with
* softirq processing: * softirq processing:
*/ */
sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) if (unlikely(!in_interrupt() && local_softirq_pending()))
do_softirq(); do_softirq();
dec_preempt_count(); preempt_count_dec();
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
local_irq_enable(); local_irq_enable();
#endif #endif
...@@ -360,7 +360,7 @@ void irq_exit(void) ...@@ -360,7 +360,7 @@ void irq_exit(void)
account_irq_exit_time(current); account_irq_exit_time(current);
trace_hardirq_exit(); trace_hardirq_exit();
sub_preempt_count(HARDIRQ_OFFSET); preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending()) if (!in_interrupt() && local_softirq_pending())
invoke_softirq(); invoke_softirq();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment