Commit cf6ace16 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-urgent-for-linus' of...

Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  signal: align __lock_task_sighand() irq disabling and RCU
  softirq,rcu: Inform RCU of irq_exit() activity
  sched: Add irq_{enter,exit}() to scheduler_ipi()
  rcu: protect __rcu_read_unlock() against scheduler-using irq handlers
  rcu: Streamline code produced by __rcu_read_unlock()
  rcu: Fix RCU_BOOST race handling current->rcu_read_unlock_special
  rcu: decrease rcu_report_exp_rnp coupling with scheduler
parents acc11eab d1e9ae47
...@@ -1260,6 +1260,9 @@ struct task_struct { ...@@ -1260,6 +1260,9 @@ struct task_struct {
#ifdef CONFIG_PREEMPT_RCU #ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting; int rcu_read_lock_nesting;
char rcu_read_unlock_special; char rcu_read_unlock_special;
#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
int rcu_boosted;
#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
struct list_head rcu_node_entry; struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */ #endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU #ifdef CONFIG_TREE_PREEMPT_RCU
......
...@@ -68,6 +68,7 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); ...@@ -68,6 +68,7 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
static struct rcu_state *rcu_state = &rcu_preempt_state; static struct rcu_state *rcu_state = &rcu_preempt_state;
static void rcu_read_unlock_special(struct task_struct *t);
static int rcu_preempted_readers_exp(struct rcu_node *rnp); static int rcu_preempted_readers_exp(struct rcu_node *rnp);
/* /*
...@@ -147,7 +148,7 @@ static void rcu_preempt_note_context_switch(int cpu) ...@@ -147,7 +148,7 @@ static void rcu_preempt_note_context_switch(int cpu)
struct rcu_data *rdp; struct rcu_data *rdp;
struct rcu_node *rnp; struct rcu_node *rnp;
if (t->rcu_read_lock_nesting && if (t->rcu_read_lock_nesting > 0 &&
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
/* Possibly blocking in an RCU read-side critical section. */ /* Possibly blocking in an RCU read-side critical section. */
...@@ -190,6 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu) ...@@ -190,6 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu)
rnp->gp_tasks = &t->rcu_node_entry; rnp->gp_tasks = &t->rcu_node_entry;
} }
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
} else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special) {
/*
* Complete exit from RCU read-side critical section on
* behalf of preempted instance of __rcu_read_unlock().
*/
rcu_read_unlock_special(t);
} }
/* /*
...@@ -284,7 +293,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, ...@@ -284,7 +293,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
* notify RCU core processing or task having blocked during the RCU * notify RCU core processing or task having blocked during the RCU
* read-side critical section. * read-side critical section.
*/ */
static void rcu_read_unlock_special(struct task_struct *t) static noinline void rcu_read_unlock_special(struct task_struct *t)
{ {
int empty; int empty;
int empty_exp; int empty_exp;
...@@ -309,7 +318,7 @@ static void rcu_read_unlock_special(struct task_struct *t) ...@@ -309,7 +318,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
} }
/* Hardware IRQ handlers cannot block. */ /* Hardware IRQ handlers cannot block. */
if (in_irq()) { if (in_irq() || in_serving_softirq()) {
local_irq_restore(flags); local_irq_restore(flags);
return; return;
} }
...@@ -342,6 +351,11 @@ static void rcu_read_unlock_special(struct task_struct *t) ...@@ -342,6 +351,11 @@ static void rcu_read_unlock_special(struct task_struct *t)
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
if (&t->rcu_node_entry == rnp->boost_tasks) if (&t->rcu_node_entry == rnp->boost_tasks)
rnp->boost_tasks = np; rnp->boost_tasks = np;
/* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
if (t->rcu_boosted) {
special |= RCU_READ_UNLOCK_BOOSTED;
t->rcu_boosted = 0;
}
#endif /* #ifdef CONFIG_RCU_BOOST */ #endif /* #ifdef CONFIG_RCU_BOOST */
t->rcu_blocked_node = NULL; t->rcu_blocked_node = NULL;
...@@ -358,7 +372,6 @@ static void rcu_read_unlock_special(struct task_struct *t) ...@@ -358,7 +372,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
/* Unboost if we were boosted. */ /* Unboost if we were boosted. */
if (special & RCU_READ_UNLOCK_BOOSTED) { if (special & RCU_READ_UNLOCK_BOOSTED) {
t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
rt_mutex_unlock(t->rcu_boost_mutex); rt_mutex_unlock(t->rcu_boost_mutex);
t->rcu_boost_mutex = NULL; t->rcu_boost_mutex = NULL;
} }
...@@ -387,13 +400,22 @@ void __rcu_read_unlock(void) ...@@ -387,13 +400,22 @@ void __rcu_read_unlock(void)
struct task_struct *t = current; struct task_struct *t = current;
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
if (t->rcu_read_lock_nesting != 1)
--t->rcu_read_lock_nesting; --t->rcu_read_lock_nesting;
barrier(); /* decrement before load of ->rcu_read_unlock_special */ else {
if (t->rcu_read_lock_nesting == 0 && t->rcu_read_lock_nesting = INT_MIN;
unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
rcu_read_unlock_special(t); rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0;
}
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); {
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
}
#endif /* #ifdef CONFIG_PROVE_LOCKING */ #endif /* #ifdef CONFIG_PROVE_LOCKING */
} }
EXPORT_SYMBOL_GPL(__rcu_read_unlock); EXPORT_SYMBOL_GPL(__rcu_read_unlock);
...@@ -589,7 +611,8 @@ static void rcu_preempt_check_callbacks(int cpu) ...@@ -589,7 +611,8 @@ static void rcu_preempt_check_callbacks(int cpu)
rcu_preempt_qs(cpu); rcu_preempt_qs(cpu);
return; return;
} }
if (per_cpu(rcu_preempt_data, cpu).qs_pending) if (t->rcu_read_lock_nesting > 0 &&
per_cpu(rcu_preempt_data, cpu).qs_pending)
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
} }
...@@ -695,9 +718,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) ...@@ -695,9 +718,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
for (;;) { for (;;) {
if (!sync_rcu_preempt_exp_done(rnp)) if (!sync_rcu_preempt_exp_done(rnp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
break; break;
}
if (rnp->parent == NULL) { if (rnp->parent == NULL) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
wake_up(&sync_rcu_preempt_exp_wq); wake_up(&sync_rcu_preempt_exp_wq);
break; break;
} }
...@@ -707,7 +733,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) ...@@ -707,7 +733,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
raw_spin_lock(&rnp->lock); /* irqs already disabled */ raw_spin_lock(&rnp->lock); /* irqs already disabled */
rnp->expmask &= ~mask; rnp->expmask &= ~mask;
} }
raw_spin_unlock_irqrestore(&rnp->lock, flags);
} }
/* /*
...@@ -1174,7 +1199,7 @@ static int rcu_boost(struct rcu_node *rnp) ...@@ -1174,7 +1199,7 @@ static int rcu_boost(struct rcu_node *rnp)
t = container_of(tb, struct task_struct, rcu_node_entry); t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&mtx, t); rt_mutex_init_proxy_locked(&mtx, t);
t->rcu_boost_mutex = &mtx; t->rcu_boost_mutex = &mtx;
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; t->rcu_boosted = 1;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
......
...@@ -2544,13 +2544,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags) ...@@ -2544,13 +2544,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void sched_ttwu_pending(void) static void sched_ttwu_do_pending(struct task_struct *list)
{ {
struct rq *rq = this_rq(); struct rq *rq = this_rq();
struct task_struct *list = xchg(&rq->wake_list, NULL);
if (!list)
return;
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
...@@ -2563,9 +2559,45 @@ static void sched_ttwu_pending(void) ...@@ -2563,9 +2559,45 @@ static void sched_ttwu_pending(void)
raw_spin_unlock(&rq->lock); raw_spin_unlock(&rq->lock);
} }
#ifdef CONFIG_HOTPLUG_CPU
static void sched_ttwu_pending(void)
{
struct rq *rq = this_rq();
struct task_struct *list = xchg(&rq->wake_list, NULL);
if (!list)
return;
sched_ttwu_do_pending(list);
}
#endif /* CONFIG_HOTPLUG_CPU */
void scheduler_ipi(void) void scheduler_ipi(void)
{ {
sched_ttwu_pending(); struct rq *rq = this_rq();
struct task_struct *list = xchg(&rq->wake_list, NULL);
if (!list)
return;
/*
* Not all reschedule IPI handlers call irq_enter/irq_exit, since
* traditionally all their work was done from the interrupt return
* path. Now that we actually do some work, we need to make sure
* we do call them.
*
* Some archs already do call them, luckily irq_enter/exit nest
* properly.
*
* Arguably we should visit all archs and update all handlers,
* however a fair share of IPIs are still resched only so this would
* somewhat pessimize the simple resched case.
*/
irq_enter();
sched_ttwu_do_pending(list);
irq_exit();
} }
static void ttwu_queue_remote(struct task_struct *p, int cpu) static void ttwu_queue_remote(struct task_struct *p, int cpu)
......
...@@ -1178,18 +1178,25 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, ...@@ -1178,18 +1178,25 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
{ {
struct sighand_struct *sighand; struct sighand_struct *sighand;
rcu_read_lock();
for (;;) { for (;;) {
local_irq_save(*flags);
rcu_read_lock();
sighand = rcu_dereference(tsk->sighand); sighand = rcu_dereference(tsk->sighand);
if (unlikely(sighand == NULL)) if (unlikely(sighand == NULL)) {
rcu_read_unlock();
local_irq_restore(*flags);
break; break;
}
spin_lock_irqsave(&sighand->siglock, *flags); spin_lock(&sighand->siglock);
if (likely(sighand == tsk->sighand)) if (likely(sighand == tsk->sighand)) {
rcu_read_unlock();
break; break;
spin_unlock_irqrestore(&sighand->siglock, *flags);
} }
spin_unlock(&sighand->siglock);
rcu_read_unlock(); rcu_read_unlock();
local_irq_restore(*flags);
}
return sighand; return sighand;
} }
......
...@@ -315,16 +315,24 @@ static inline void invoke_softirq(void) ...@@ -315,16 +315,24 @@ static inline void invoke_softirq(void)
{ {
if (!force_irqthreads) if (!force_irqthreads)
__do_softirq(); __do_softirq();
else else {
__local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET);
wakeup_softirqd(); wakeup_softirqd();
__local_bh_enable(SOFTIRQ_OFFSET);
}
} }
#else #else
static inline void invoke_softirq(void) static inline void invoke_softirq(void)
{ {
if (!force_irqthreads) if (!force_irqthreads)
do_softirq(); do_softirq();
else else {
__local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET);
wakeup_softirqd(); wakeup_softirqd();
__local_bh_enable(SOFTIRQ_OFFSET);
}
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment