Commit 1ba4b8cb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-urgent-for-linus' of...

Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  rcu: Start RCU kthreads in TASK_INTERRUPTIBLE state
  rcu: Remove waitqueue usage for cpu, node, and boost kthreads
  rcu: Avoid acquiring rcu_node locks in timer functions
  atomic: Add atomic_or()
  Documentation: Add statistics about nested locks
  rcu: Decrease memory-barrier usage based on semi-formal proof
  rcu: Make rcu_enter_nohz() pay attention to nesting
  rcu: Don't do reschedule unless in irq
  rcu: Remove old memory barriers from rcu_process_callbacks()
  rcu: Add memory barriers
  rcu: Fix unpaired rcu_irq_enter() from locking selftests
parents c4a227d8 cc3ce517
...@@ -99,18 +99,11 @@ o "qp" indicates that RCU still expects a quiescent state from ...@@ -99,18 +99,11 @@ o "qp" indicates that RCU still expects a quiescent state from
o "dt" is the current value of the dyntick counter that is incremented o "dt" is the current value of the dyntick counter that is incremented
when entering or leaving dynticks idle state, either by the when entering or leaving dynticks idle state, either by the
scheduler or by irq. The number after the "/" is the interrupt scheduler or by irq. This number is even if the CPU is in
nesting depth when in dyntick-idle state, or one greater than dyntick idle mode and odd otherwise. The number after the first
the interrupt-nesting depth otherwise. "/" is the interrupt nesting depth when in dyntick-idle state,
or one greater than the interrupt-nesting depth otherwise.
This field is displayed only for CONFIG_NO_HZ kernels. The number after the second "/" is the NMI nesting depth.
o "dn" is the current value of the dyntick counter that is incremented
when entering or leaving dynticks idle state via NMI. If both
the "dt" and "dn" values are even, then this CPU is in dynticks
idle mode and may be ignored by RCU. If either of these two
counters is odd, then RCU must be alert to the possibility of
an RCU read-side critical section running on this CPU.
This field is displayed only for CONFIG_NO_HZ kernels. This field is displayed only for CONFIG_NO_HZ kernels.
......
...@@ -12,8 +12,9 @@ Because things like lock contention can severely impact performance. ...@@ -12,8 +12,9 @@ Because things like lock contention can severely impact performance.
- HOW - HOW
Lockdep already has hooks in the lock functions and maps lock instances to Lockdep already has hooks in the lock functions and maps lock instances to
lock classes. We build on that. The graph below shows the relation between lock classes. We build on that (see Documentation/lockdep-design.txt).
the lock functions and the various hooks therein. The graph below shows the relation between the lock functions and the various
hooks therein.
__acquire __acquire
| |
...@@ -128,6 +129,37 @@ points are the points we're contending with. ...@@ -128,6 +129,37 @@ points are the points we're contending with.
The integer part of the time values is in us. The integer part of the time values is in us.
Dealing with nested locks, subclasses may appear:
32...............................................................................................................................................................................................
33
34 &rq->lock: 13128 13128 0.43 190.53 103881.26 97454 3453404 0.00 401.11 13224683.11
35 ---------
36 &rq->lock 645 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
37 &rq->lock 297 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
38 &rq->lock 360 [<ffffffff8103c4c5>] select_task_rq_fair+0x1f0/0x74a
39 &rq->lock 428 [<ffffffff81045f98>] scheduler_tick+0x46/0x1fb
40 ---------
41 &rq->lock 77 [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
42 &rq->lock 174 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
43 &rq->lock 4715 [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
44 &rq->lock 893 [<ffffffff81340524>] schedule+0x157/0x7b8
45
46...............................................................................................................................................................................................
47
48 &rq->lock/1: 11526 11488 0.33 388.73 136294.31 21461 38404 0.00 37.93 109388.53
49 -----------
50 &rq->lock/1 11526 [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
51 -----------
52 &rq->lock/1 5645 [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
53 &rq->lock/1 1224 [<ffffffff81340524>] schedule+0x157/0x7b8
54 &rq->lock/1 4336 [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
55 &rq->lock/1 181 [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
Line 48 shows statistics for the second subclass (/1) of &rq->lock class
(subclass starts from 0), since in this case, as line 50 suggests,
double_rq_lock actually acquires a nested lock of two spinlocks.
View the top contending locks: View the top contending locks:
# grep : /proc/lock_stat | head # grep : /proc/lock_stat | head
......
...@@ -34,4 +34,17 @@ static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) ...@@ -34,4 +34,17 @@ static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
} }
#endif #endif
#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
static inline void atomic_or(int i, atomic_t *v)
{
int old;
int new;
do {
old = atomic_read(v);
new = old | i;
} while (atomic_cmpxchg(v, old, new) != old);
}
#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
#endif /* _LINUX_ATOMIC_H */ #endif /* _LINUX_ATOMIC_H */
This diff is collapsed.
...@@ -84,11 +84,9 @@ ...@@ -84,11 +84,9 @@
* Dynticks per-CPU state. * Dynticks per-CPU state.
*/ */
struct rcu_dynticks { struct rcu_dynticks {
int dynticks_nesting; /* Track nesting level, sort of. */ int dynticks_nesting; /* Track irq/process nesting level. */
int dynticks; /* Even value for dynticks-idle, else odd. */ int dynticks_nmi_nesting; /* Track NMI nesting level. */
int dynticks_nmi; /* Even value for either dynticks-idle or */ atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
/* not in nmi handler, else odd. So this */
/* remains even for nmi from irq handler. */
}; };
/* RCU's kthread states for tracing. */ /* RCU's kthread states for tracing. */
...@@ -121,7 +119,9 @@ struct rcu_node { ...@@ -121,7 +119,9 @@ struct rcu_node {
/* elements that need to drain to allow the */ /* elements that need to drain to allow the */
/* current expedited grace period to */ /* current expedited grace period to */
/* complete (only for TREE_PREEMPT_RCU). */ /* complete (only for TREE_PREEMPT_RCU). */
unsigned long wakemask; /* CPUs whose kthread needs to be awakened. */ atomic_t wakemask; /* CPUs whose kthread needs to be awakened. */
/* Since this has meaning only for leaf */
/* rcu_node structures, 32 bits suffices. */
unsigned long qsmaskinit; unsigned long qsmaskinit;
/* Per-GP initial value for qsmask & expmask. */ /* Per-GP initial value for qsmask & expmask. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */ unsigned long grpmask; /* Mask to apply to parent qsmask. */
...@@ -159,9 +159,6 @@ struct rcu_node { ...@@ -159,9 +159,6 @@ struct rcu_node {
struct task_struct *boost_kthread_task; struct task_struct *boost_kthread_task;
/* kthread that takes care of priority */ /* kthread that takes care of priority */
/* boosting for this rcu_node structure. */ /* boosting for this rcu_node structure. */
wait_queue_head_t boost_wq;
/* Wait queue on which to park the boost */
/* kthread. */
unsigned int boost_kthread_status; unsigned int boost_kthread_status;
/* State of boost_kthread_task for tracing. */ /* State of boost_kthread_task for tracing. */
unsigned long n_tasks_boosted; unsigned long n_tasks_boosted;
...@@ -188,9 +185,6 @@ struct rcu_node { ...@@ -188,9 +185,6 @@ struct rcu_node {
/* kthread that takes care of this rcu_node */ /* kthread that takes care of this rcu_node */
/* structure, for example, awakening the */ /* structure, for example, awakening the */
/* per-CPU kthreads as needed. */ /* per-CPU kthreads as needed. */
wait_queue_head_t node_wq;
/* Wait queue on which to park the per-node */
/* kthread. */
unsigned int node_kthread_status; unsigned int node_kthread_status;
/* State of node_kthread_task for tracing. */ /* State of node_kthread_task for tracing. */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
...@@ -284,7 +278,6 @@ struct rcu_data { ...@@ -284,7 +278,6 @@ struct rcu_data {
/* 3) dynticks interface. */ /* 3) dynticks interface. */
struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
int dynticks_snap; /* Per-GP tracking for dynticks. */ int dynticks_snap; /* Per-GP tracking for dynticks. */
int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
#endif /* #ifdef CONFIG_NO_HZ */ #endif /* #ifdef CONFIG_NO_HZ */
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */ /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
...@@ -337,6 +330,16 @@ struct rcu_data { ...@@ -337,6 +330,16 @@ struct rcu_data {
/* scheduling clock irq */ /* scheduling clock irq */
/* before ratting on them. */ /* before ratting on them. */
#define rcu_wait(cond) \
do { \
for (;;) { \
set_current_state(TASK_INTERRUPTIBLE); \
if (cond) \
break; \
schedule(); \
} \
__set_current_state(TASK_RUNNING); \
} while (0)
/* /*
* RCU global state, including node hierarchy. This hierarchy is * RCU global state, including node hierarchy. This hierarchy is
...@@ -446,7 +449,6 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu); ...@@ -446,7 +449,6 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
static void rcu_preempt_send_cbs_to_online(void); static void rcu_preempt_send_cbs_to_online(void);
static void __init __rcu_init_preempt(void); static void __init __rcu_init_preempt(void);
static void rcu_needs_cpu_flush(void); static void rcu_needs_cpu_flush(void);
static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
cpumask_var_t cm); cpumask_var_t cm);
......
...@@ -1196,8 +1196,7 @@ static int rcu_boost_kthread(void *arg) ...@@ -1196,8 +1196,7 @@ static int rcu_boost_kthread(void *arg)
for (;;) { for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING; rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks || rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
rnp->exp_tasks);
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp); more2boost = rcu_boost(rnp);
if (more2boost) if (more2boost)
...@@ -1274,14 +1273,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) ...@@ -1274,14 +1273,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
} }
/*
* Initialize the RCU-boost waitqueue.
*/
static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
{
init_waitqueue_head(&rnp->boost_wq);
}
/* /*
* Create an RCU-boost kthread for the specified node if one does not * Create an RCU-boost kthread for the specified node if one does not
* already exist. We only create this kthread for preemptible RCU. * already exist. We only create this kthread for preemptible RCU.
...@@ -1304,9 +1295,9 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, ...@@ -1304,9 +1295,9 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
if (IS_ERR(t)) if (IS_ERR(t))
return PTR_ERR(t); return PTR_ERR(t);
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
set_task_state(t, TASK_INTERRUPTIBLE);
rnp->boost_kthread_task = t; rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
wake_up_process(t);
sp.sched_priority = RCU_KTHREAD_PRIO; sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
return 0; return 0;
...@@ -1328,10 +1319,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) ...@@ -1328,10 +1319,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{ {
} }
static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
{
}
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp, struct rcu_node *rnp,
int rnp_index) int rnp_index)
...@@ -1520,7 +1507,6 @@ int rcu_needs_cpu(int cpu) ...@@ -1520,7 +1507,6 @@ int rcu_needs_cpu(int cpu)
{ {
int c = 0; int c = 0;
int snap; int snap;
int snap_nmi;
int thatcpu; int thatcpu;
/* Check for being in the holdoff period. */ /* Check for being in the holdoff period. */
...@@ -1531,10 +1517,10 @@ int rcu_needs_cpu(int cpu) ...@@ -1531,10 +1517,10 @@ int rcu_needs_cpu(int cpu)
for_each_online_cpu(thatcpu) { for_each_online_cpu(thatcpu) {
if (thatcpu == cpu) if (thatcpu == cpu)
continue; continue;
snap = per_cpu(rcu_dynticks, thatcpu).dynticks; snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi; thatcpu).dynticks);
smp_mb(); /* Order sampling of snap with end of grace period. */ smp_mb(); /* Order sampling of snap with end of grace period. */
if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { if ((snap & 0x1) != 0) {
per_cpu(rcu_dyntick_drain, cpu) = 0; per_cpu(rcu_dyntick_drain, cpu) = 0;
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
return rcu_needs_cpu_quick_check(cpu); return rcu_needs_cpu_quick_check(cpu);
......
...@@ -69,10 +69,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) ...@@ -69,10 +69,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->passed_quiesc, rdp->passed_quiesc_completed, rdp->passed_quiesc, rdp->passed_quiesc_completed,
rdp->qs_pending); rdp->qs_pending);
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
seq_printf(m, " dt=%d/%d dn=%d df=%lu", seq_printf(m, " dt=%d/%d/%d df=%lu",
rdp->dynticks->dynticks, atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting, rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi, rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs); rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */ #endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
...@@ -141,9 +141,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) ...@@ -141,9 +141,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
rdp->qs_pending); rdp->qs_pending);
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
seq_printf(m, ",%d,%d,%d,%lu", seq_printf(m, ",%d,%d,%d,%lu",
rdp->dynticks->dynticks, atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting, rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi, rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs); rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */ #endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
...@@ -167,7 +167,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) ...@@ -167,7 +167,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
{ {
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\","); seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */ #endif /* #ifdef CONFIG_NO_HZ */
seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
#ifdef CONFIG_TREE_PREEMPT_RCU #ifdef CONFIG_TREE_PREEMPT_RCU
......
...@@ -144,7 +144,7 @@ static void init_shared_classes(void) ...@@ -144,7 +144,7 @@ static void init_shared_classes(void)
#define HARDIRQ_ENTER() \ #define HARDIRQ_ENTER() \
local_irq_disable(); \ local_irq_disable(); \
irq_enter(); \ __irq_enter(); \
WARN_ON(!in_irq()); WARN_ON(!in_irq());
#define HARDIRQ_EXIT() \ #define HARDIRQ_EXIT() \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment