Commit 8128f55a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: Remove excessive early_res debug output
  softlockup: Stop spurious softlockup messages due to overflow
  rcu: Fix local_irq_disable() CONFIG_PROVE_RCU=y false positives
  rcu: Fix tracepoints & lockdep false positive
  rcu: Make rcu_read_lock_bh_held() allow for disabled BH
parents 50da5670 c26f91a3
...@@ -123,22 +123,11 @@ static inline int rcu_read_lock_held(void) ...@@ -123,22 +123,11 @@ static inline int rcu_read_lock_held(void)
return lock_is_held(&rcu_lock_map); return lock_is_held(&rcu_lock_map);
} }
/** /*
* rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
* * hell.
* If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
* an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
* this assumes we are in an RCU-bh read-side critical section unless it can
* prove otherwise.
*
* Check rcu_scheduler_active to prevent false positives during boot.
*/ */
static inline int rcu_read_lock_bh_held(void) extern int rcu_read_lock_bh_held(void);
{
if (!debug_lockdep_rcu_enabled())
return 1;
return lock_is_held(&rcu_bh_lock_map);
}
/** /**
* rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
...@@ -160,7 +149,7 @@ static inline int rcu_read_lock_sched_held(void) ...@@ -160,7 +149,7 @@ static inline int rcu_read_lock_sched_held(void)
return 1; return 1;
if (debug_locks) if (debug_locks)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map); lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0; return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
} }
#else /* #ifdef CONFIG_PREEMPT */ #else /* #ifdef CONFIG_PREEMPT */
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
...@@ -191,7 +180,7 @@ static inline int rcu_read_lock_bh_held(void) ...@@ -191,7 +180,7 @@ static inline int rcu_read_lock_bh_held(void)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
{ {
return !rcu_scheduler_active || preempt_count() != 0; return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled();
} }
#else /* #ifdef CONFIG_PREEMPT */ #else /* #ifdef CONFIG_PREEMPT */
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
......
...@@ -49,7 +49,7 @@ struct tracepoint { ...@@ -49,7 +49,7 @@ struct tracepoint {
void **it_func; \ void **it_func; \
\ \
rcu_read_lock_sched_notrace(); \ rcu_read_lock_sched_notrace(); \
it_func = rcu_dereference((tp)->funcs); \ it_func = rcu_dereference_sched((tp)->funcs); \
if (it_func) { \ if (it_func) { \
do { \ do { \
((void(*)(proto))(*it_func))(args); \ ((void(*)(proto))(*it_func))(args); \
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/hardirq.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key; static struct lock_class_key rcu_lock_key;
...@@ -66,6 +67,28 @@ EXPORT_SYMBOL_GPL(rcu_sched_lock_map); ...@@ -66,6 +67,28 @@ EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
int rcu_scheduler_active __read_mostly; int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active); EXPORT_SYMBOL_GPL(rcu_scheduler_active);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
* rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
*
* Check for bottom half being disabled, which covers both the
* CONFIG_PROVE_RCU and not cases. Note that if someone uses
* rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
* will show the situation.
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
*/
int rcu_read_lock_bh_held(void)
{
if (!debug_lockdep_rcu_enabled())
return 1;
return in_softirq();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/* /*
* This function is invoked towards the end of the scheduler's initialization * This function is invoked towards the end of the scheduler's initialization
* process. Before this is called, the idle task might contain * process. Before this is called, the idle task might contain
......
...@@ -155,11 +155,11 @@ void softlockup_tick(void) ...@@ -155,11 +155,11 @@ void softlockup_tick(void)
* Wake up the high-prio watchdog task twice per * Wake up the high-prio watchdog task twice per
* threshold timespan. * threshold timespan.
*/ */
if (now > touch_ts + softlockup_thresh/2) if (time_after(now - softlockup_thresh/2, touch_ts))
wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
/* Warn about unreasonable delays: */ /* Warn about unreasonable delays: */
if (now <= (touch_ts + softlockup_thresh)) if (time_before_eq(now - softlockup_thresh, touch_ts))
return; return;
per_cpu(softlockup_print_ts, this_cpu) = touch_ts; per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
......
...@@ -180,19 +180,12 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) ...@@ -180,19 +180,12 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
end_aligned = end & ~(BITS_PER_LONG - 1); end_aligned = end & ~(BITS_PER_LONG - 1);
if (end_aligned <= start_aligned) { if (end_aligned <= start_aligned) {
#if 1
printk(KERN_DEBUG " %lx - %lx\n", start, end);
#endif
for (i = start; i < end; i++) for (i = start; i < end; i++)
__free_pages_bootmem(pfn_to_page(i), 0); __free_pages_bootmem(pfn_to_page(i), 0);
return; return;
} }
#if 1
printk(KERN_DEBUG " %lx %lx - %lx %lx\n",
start, start_aligned, end_aligned, end);
#endif
for (i = start; i < start_aligned; i++) for (i = start; i < start_aligned; i++)
__free_pages_bootmem(pfn_to_page(i), 0); __free_pages_bootmem(pfn_to_page(i), 0);
...@@ -428,9 +421,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, ...@@ -428,9 +421,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
{ {
#ifdef CONFIG_NO_BOOTMEM #ifdef CONFIG_NO_BOOTMEM
free_early(physaddr, physaddr + size); free_early(physaddr, physaddr + size);
#if 0
printk(KERN_DEBUG "free %lx %lx\n", physaddr, size);
#endif
#else #else
unsigned long start, end; unsigned long start, end;
...@@ -456,9 +446,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size) ...@@ -456,9 +446,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
{ {
#ifdef CONFIG_NO_BOOTMEM #ifdef CONFIG_NO_BOOTMEM
free_early(addr, addr + size); free_early(addr, addr + size);
#if 0
printk(KERN_DEBUG "free %lx %lx\n", addr, size);
#endif
#else #else
unsigned long start, end; unsigned long start, end;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment