Commit bdd4e85d authored by Frederic Weisbecker's avatar Frederic Weisbecker

sched: Isolate preempt counting in its own config option

Create a new CONFIG_PREEMPT_COUNT that handles the inc/dec
of preempt count offset independently. So that the offset
can be updated by preempt_disable() and preempt_enable()
even without the need for CONFIG_PREEMPT beeing set.

This prepares to make CONFIG_DEBUG_SPINLOCK_SLEEP working
with !CONFIG_PREEMPT where it currently doesn't detect
code that sleeps inside explicit preemption disabled
sections.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
parent 2da8c8bc
......@@ -88,7 +88,7 @@ static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
{
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
return test_bit(bitnum, addr);
#elif defined CONFIG_PREEMPT
#elif defined CONFIG_PREEMPT_COUNT
return preempt_count();
#else
return 1;
......
......@@ -93,7 +93,7 @@
*/
#define in_nmi() (preempt_count() & NMI_MASK)
#if defined(CONFIG_PREEMPT)
#if defined(CONFIG_PREEMPT_COUNT)
# define PREEMPT_CHECK_OFFSET 1
#else
# define PREEMPT_CHECK_OFFSET 0
......@@ -115,7 +115,7 @@
#define in_atomic_preempt_off() \
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
......
......@@ -134,7 +134,7 @@ static inline int page_cache_get_speculative(struct page *page)
VM_BUG_ON(in_interrupt());
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic());
# endif
/*
......@@ -172,7 +172,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
VM_BUG_ON(in_interrupt());
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic());
# endif
VM_BUG_ON(page_count(page) == 0);
......
......@@ -27,6 +27,21 @@
asmlinkage void preempt_schedule(void);
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#else /* !CONFIG_PREEMPT */
#define preempt_check_resched() do { } while (0)
#endif /* CONFIG_PREEMPT */
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
do { \
inc_preempt_count(); \
......@@ -39,12 +54,6 @@ do { \
dec_preempt_count(); \
} while (0)
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define preempt_enable() \
do { \
preempt_enable_no_resched(); \
......@@ -80,18 +89,17 @@ do { \
preempt_check_resched(); \
} while (0)
#else
#else /* !CONFIG_PREEMPT_COUNT */
#define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
#define preempt_disable_notrace() do { } while (0)
#define preempt_enable_no_resched_notrace() do { } while (0)
#define preempt_enable_notrace() do { } while (0)
#endif
#endif /* CONFIG_PREEMPT_COUNT */
#ifdef CONFIG_PREEMPT_NOTIFIERS
......
......@@ -239,7 +239,7 @@ extern int rcu_read_lock_bh_held(void);
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
*/
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void)
{
int lockdep_opinion = 0;
......@@ -250,12 +250,12 @@ static inline int rcu_read_lock_sched_held(void)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
}
#else /* #ifdef CONFIG_PREEMPT */
#else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void)
{
return 1;
}
#endif /* #else #ifdef CONFIG_PREEMPT */
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
......@@ -276,17 +276,17 @@ static inline int rcu_read_lock_bh_held(void)
return 1;
}
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void)
{
return preempt_count() != 0 || irqs_disabled();
}
#else /* #ifdef CONFIG_PREEMPT */
#else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void)
{
return 1;
}
#endif /* #else #ifdef CONFIG_PREEMPT */
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
......
......@@ -2502,7 +2502,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
#define PREEMPT_LOCK_OFFSET 0
......
......@@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY
config PREEMPT
bool "Preemptible Kernel (Low-Latency Desktop)"
select PREEMPT_COUNT
help
This option reduces the latency of the kernel by making
all kernel code (that is not executing in a critical section)
......@@ -52,3 +53,5 @@ config PREEMPT
endchoice
config PREEMPT_COUNT
bool
\ No newline at end of file
......@@ -2843,7 +2843,7 @@ void sched_fork(struct task_struct *p)
#if defined(CONFIG_SMP)
p->on_cpu = 0;
#endif
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT_COUNT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment