Commit 90d3ac15 authored by David S. Miller's avatar David S. Miller

Merge commit '317f3941'

Conflicts:
	arch/sparc/kernel/smp_32.c

With merge conflict help from Daniel Hellstrom.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9fafbd80 317f3941
...@@ -585,8 +585,7 @@ handle_ipi(struct pt_regs *regs) ...@@ -585,8 +585,7 @@ handle_ipi(struct pt_regs *regs)
switch (which) { switch (which) {
case IPI_RESCHEDULE: case IPI_RESCHEDULE:
/* Reschedule callback. Everything to be done scheduler_ipi();
is done by the interrupt return path. */
break; break;
case IPI_CALL_FUNC: case IPI_CALL_FUNC:
......
...@@ -560,10 +560,7 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) ...@@ -560,10 +560,7 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
break; break;
case IPI_RESCHEDULE: case IPI_RESCHEDULE:
/* scheduler_ipi();
* nothing more to do - eveything is
* done on the interrupt return path
*/
break; break;
case IPI_CALL_FUNC: case IPI_CALL_FUNC:
......
...@@ -177,6 +177,9 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) ...@@ -177,6 +177,9 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
while (msg_queue->count) { while (msg_queue->count) {
msg = &msg_queue->ipi_message[msg_queue->head]; msg = &msg_queue->ipi_message[msg_queue->head];
switch (msg->type) { switch (msg->type) {
case BFIN_IPI_RESCHEDULE:
scheduler_ipi();
break;
case BFIN_IPI_CALL_FUNC: case BFIN_IPI_CALL_FUNC:
spin_unlock_irqrestore(&msg_queue->lock, flags); spin_unlock_irqrestore(&msg_queue->lock, flags);
ipi_call_function(cpu, msg); ipi_call_function(cpu, msg);
......
...@@ -342,15 +342,18 @@ irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id) ...@@ -342,15 +342,18 @@ irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id)
ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
if (ipi.vector & IPI_SCHEDULE) {
scheduler_ipi();
}
if (ipi.vector & IPI_CALL) { if (ipi.vector & IPI_CALL) {
func(info); func(info);
} }
if (ipi.vector & IPI_FLUSH_TLB) { if (ipi.vector & IPI_FLUSH_TLB) {
if (flush_mm == FLUSH_ALL) if (flush_mm == FLUSH_ALL)
__flush_tlb_all(); __flush_tlb_all();
else if (flush_vma == FLUSH_ALL) else if (flush_vma == FLUSH_ALL)
__flush_tlb_mm(flush_mm); __flush_tlb_mm(flush_mm);
else else
__flush_tlb_page(flush_vma, flush_addr); __flush_tlb_page(flush_vma, flush_addr);
} }
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/sched.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
...@@ -496,6 +497,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) ...@@ -496,6 +497,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
smp_local_flush_tlb(); smp_local_flush_tlb();
kstat_incr_irqs_this_cpu(irq, desc); kstat_incr_irqs_this_cpu(irq, desc);
} else if (unlikely(IS_RESCHEDULE(vector))) { } else if (unlikely(IS_RESCHEDULE(vector))) {
scheduler_ipi();
kstat_incr_irqs_this_cpu(irq, desc); kstat_incr_irqs_this_cpu(irq, desc);
} else { } else {
ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_setreg(_IA64_REG_CR_TPR, vector);
......
...@@ -92,6 +92,8 @@ static unsigned short saved_irq_cnt; ...@@ -92,6 +92,8 @@ static unsigned short saved_irq_cnt;
static int xen_slab_ready; static int xen_slab_ready;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#include <linux/sched.h>
/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
* it ends up to issue several memory accesses upon percpu data and * it ends up to issue several memory accesses upon percpu data and
* thus adds unnecessary traffic to other paths. * thus adds unnecessary traffic to other paths.
...@@ -99,7 +101,13 @@ static int xen_slab_ready; ...@@ -99,7 +101,13 @@ static int xen_slab_ready;
static irqreturn_t static irqreturn_t
xen_dummy_handler(int irq, void *dev_id) xen_dummy_handler(int irq, void *dev_id)
{ {
return IRQ_HANDLED;
}
static irqreturn_t
xen_resched_handler(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -110,7 +118,7 @@ static struct irqaction xen_ipi_irqaction = { ...@@ -110,7 +118,7 @@ static struct irqaction xen_ipi_irqaction = {
}; };
static struct irqaction xen_resched_irqaction = { static struct irqaction xen_resched_irqaction = {
.handler = xen_dummy_handler, .handler = xen_resched_handler,
.flags = IRQF_DISABLED, .flags = IRQF_DISABLED,
.name = "resched" .name = "resched"
}; };
......
...@@ -122,8 +122,6 @@ void smp_send_reschedule(int cpu_id) ...@@ -122,8 +122,6 @@ void smp_send_reschedule(int cpu_id)
* *
* Description: This routine executes on CPU which received * Description: This routine executes on CPU which received
* 'RESCHEDULE_IPI'. * 'RESCHEDULE_IPI'.
* Rescheduling is processed at the exit of interrupt
* operation.
* *
* Born on Date: 2002.02.05 * Born on Date: 2002.02.05
* *
...@@ -138,7 +136,7 @@ void smp_send_reschedule(int cpu_id) ...@@ -138,7 +136,7 @@ void smp_send_reschedule(int cpu_id)
*==========================================================================*/ *==========================================================================*/
void smp_reschedule_interrupt(void) void smp_reschedule_interrupt(void)
{ {
/* nothing to do */ scheduler_ipi();
} }
/*==========================================================================* /*==========================================================================*
......
...@@ -44,6 +44,8 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id) ...@@ -44,6 +44,8 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
if (action & SMP_CALL_FUNCTION) if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt(); smp_call_function_interrupt();
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
/* Check if we've been told to flush the icache */ /* Check if we've been told to flush the icache */
if (action & SMP_ICACHE_FLUSH) if (action & SMP_ICACHE_FLUSH)
......
...@@ -929,7 +929,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) ...@@ -929,7 +929,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
static void ipi_resched_interrupt(void) static void ipi_resched_interrupt(void)
{ {
/* Return from interrupt should be enough to cause scheduler check */ scheduler_ipi();
} }
static void ipi_call_interrupt(void) static void ipi_call_interrupt(void)
......
...@@ -309,6 +309,8 @@ static void ipi_call_dispatch(void) ...@@ -309,6 +309,8 @@ static void ipi_call_dispatch(void)
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{ {
scheduler_ipi();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -55,6 +55,8 @@ void titan_mailbox_irq(void) ...@@ -55,6 +55,8 @@ void titan_mailbox_irq(void)
if (status & 0x2) if (status & 0x2)
smp_call_function_interrupt(); smp_call_function_interrupt();
if (status & 0x4)
scheduler_ipi();
break; break;
case 1: case 1:
...@@ -63,6 +65,8 @@ void titan_mailbox_irq(void) ...@@ -63,6 +65,8 @@ void titan_mailbox_irq(void)
if (status & 0x2) if (status & 0x2)
smp_call_function_interrupt(); smp_call_function_interrupt();
if (status & 0x4)
scheduler_ipi();
break; break;
} }
} }
......
...@@ -147,8 +147,10 @@ static void ip27_do_irq_mask0(void) ...@@ -147,8 +147,10 @@ static void ip27_do_irq_mask0(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
scheduler_ipi();
} else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) { } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
scheduler_ipi();
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
smp_call_function_interrupt(); smp_call_function_interrupt();
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -189,10 +190,8 @@ void bcm1480_mailbox_interrupt(void) ...@@ -189,10 +190,8 @@ void bcm1480_mailbox_interrupt(void)
/* Clear the mailbox to clear the interrupt */ /* Clear the mailbox to clear the interrupt */
__raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]); __raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]);
/* if (action & SMP_RESCHEDULE_YOURSELF)
* Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the scheduler_ipi();
* interrupt will do the reschedule for us
*/
if (action & SMP_CALL_FUNCTION) if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt(); smp_call_function_interrupt();
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -177,10 +178,8 @@ void sb1250_mailbox_interrupt(void) ...@@ -177,10 +178,8 @@ void sb1250_mailbox_interrupt(void)
/* Clear the mailbox to clear the interrupt */ /* Clear the mailbox to clear the interrupt */
____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]); ____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]);
/* if (action & SMP_RESCHEDULE_YOURSELF)
* Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the scheduler_ipi();
* interrupt will do the reschedule for us
*/
if (action & SMP_CALL_FUNCTION) if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt(); smp_call_function_interrupt();
......
...@@ -494,14 +494,11 @@ void smp_send_stop(void) ...@@ -494,14 +494,11 @@ void smp_send_stop(void)
* @irq: The interrupt number. * @irq: The interrupt number.
* @dev_id: The device ID. * @dev_id: The device ID.
* *
* We need do nothing here, since the scheduling will be effected on our way
* back through entry.S.
*
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully. * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
*/ */
static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
{ {
/* do nothing */ scheduler_ipi();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -155,10 +155,7 @@ ipi_interrupt(int irq, void *dev_id) ...@@ -155,10 +155,7 @@ ipi_interrupt(int irq, void *dev_id)
case IPI_RESCHEDULE: case IPI_RESCHEDULE:
smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
/* scheduler_ipi();
* Reschedule callback. Everything to be
* done is done by the interrupt return path.
*/
break; break;
case IPI_CALL_FUNC: case IPI_CALL_FUNC:
......
...@@ -116,7 +116,7 @@ void smp_message_recv(int msg) ...@@ -116,7 +116,7 @@ void smp_message_recv(int msg)
generic_smp_call_function_interrupt(); generic_smp_call_function_interrupt();
break; break;
case PPC_MSG_RESCHEDULE: case PPC_MSG_RESCHEDULE:
/* we notice need_resched on exit */ scheduler_ipi();
break; break;
case PPC_MSG_CALL_FUNC_SINGLE: case PPC_MSG_CALL_FUNC_SINGLE:
generic_smp_call_function_single_interrupt(); generic_smp_call_function_single_interrupt();
...@@ -146,7 +146,7 @@ static irqreturn_t call_function_action(int irq, void *data) ...@@ -146,7 +146,7 @@ static irqreturn_t call_function_action(int irq, void *data)
static irqreturn_t reschedule_action(int irq, void *data) static irqreturn_t reschedule_action(int irq, void *data)
{ {
/* we just need the return path side effect of checking need_resched */ scheduler_ipi();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -165,12 +165,12 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, ...@@ -165,12 +165,12 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
/* /*
* handle bit signal external calls * handle bit signal external calls
*
* For the ec_schedule signal we have to do nothing. All the work
* is done automatically when we return from the interrupt.
*/ */
bits = xchg(&S390_lowcore.ext_call_fast, 0); bits = xchg(&S390_lowcore.ext_call_fast, 0);
if (test_bit(ec_schedule, &bits))
scheduler_ipi();
if (test_bit(ec_call_function, &bits)) if (test_bit(ec_call_function, &bits))
generic_smp_call_function_interrupt(); generic_smp_call_function_interrupt();
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -323,6 +324,7 @@ void smp_message_recv(unsigned int msg) ...@@ -323,6 +324,7 @@ void smp_message_recv(unsigned int msg)
generic_smp_call_function_interrupt(); generic_smp_call_function_interrupt();
break; break;
case SMP_MSG_RESCHEDULE: case SMP_MSG_RESCHEDULE:
scheduler_ipi();
break; break;
case SMP_MSG_FUNCTION_SINGLE: case SMP_MSG_FUNCTION_SINGLE:
generic_smp_call_function_single_interrupt(); generic_smp_call_function_single_interrupt();
......
...@@ -156,11 +156,11 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) ...@@ -156,11 +156,11 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
void smp_resched_interrupt(void) void smp_resched_interrupt(void)
{ {
irq_enter();
scheduler_ipi();
local_cpu_data().irq_resched_count++; local_cpu_data().irq_resched_count++;
/* irq_exit();
* do nothing, since it all was about calling re-schedule /* re-schedule routine called by interrupt return code. */
* routine called by interrupt return code.
*/
} }
void smp_call_function_single_interrupt(void) void smp_call_function_single_interrupt(void)
......
...@@ -1368,6 +1368,7 @@ void smp_send_reschedule(int cpu) ...@@ -1368,6 +1368,7 @@ void smp_send_reschedule(int cpu)
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
{ {
clear_softint(1 << irq); clear_softint(1 << irq);
scheduler_ipi();
} }
/* This is a nop because we capture all other cpus /* This is a nop because we capture all other cpus
......
...@@ -189,12 +189,8 @@ void flush_icache_range(unsigned long start, unsigned long end) ...@@ -189,12 +189,8 @@ void flush_icache_range(unsigned long start, unsigned long end)
/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
static irqreturn_t handle_reschedule_ipi(int irq, void *token) static irqreturn_t handle_reschedule_ipi(int irq, void *token)
{ {
/*
* Nothing to do here; when we return from interrupt, the
* rescheduling will occur there. But do bump the interrupt
* profiler count in the meantime.
*/
__get_cpu_var(irq_stat).irq_resched_count++; __get_cpu_var(irq_stat).irq_resched_count++;
scheduler_ipi();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -173,7 +173,7 @@ void IPI_handler(int cpu) ...@@ -173,7 +173,7 @@ void IPI_handler(int cpu)
break; break;
case 'R': case 'R':
set_tsk_need_resched(current); scheduler_ipi();
break; break;
case 'S': case 'S':
......
...@@ -194,14 +194,13 @@ static void native_stop_other_cpus(int wait) ...@@ -194,14 +194,13 @@ static void native_stop_other_cpus(int wait)
} }
/* /*
* Reschedule call back. Nothing to do, * Reschedule call back.
* all the work is done automatically when
* we return from the interrupt.
*/ */
void smp_reschedule_interrupt(struct pt_regs *regs) void smp_reschedule_interrupt(struct pt_regs *regs)
{ {
ack_APIC_irq(); ack_APIC_irq();
inc_irq_stat(irq_resched_count); inc_irq_stat(irq_resched_count);
scheduler_ipi();
/* /*
* KVM uses this interrupt to force a cpu out of guest mode * KVM uses this interrupt to force a cpu out of guest mode
*/ */
......
...@@ -46,13 +46,12 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); ...@@ -46,13 +46,12 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
/* /*
* Reschedule call back. Nothing to do, * Reschedule call back.
* all the work is done automatically when
* we return from the interrupt.
*/ */
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
{ {
inc_irq_stat(irq_resched_count); inc_irq_stat(irq_resched_count);
scheduler_ipi();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -51,7 +51,7 @@ struct mutex { ...@@ -51,7 +51,7 @@ struct mutex {
spinlock_t wait_lock; spinlock_t wait_lock;
struct list_head wait_list; struct list_head wait_list;
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
struct thread_info *owner; struct task_struct *owner;
#endif #endif
#ifdef CONFIG_DEBUG_MUTEXES #ifdef CONFIG_DEBUG_MUTEXES
const char *name; const char *name;
......
...@@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout); ...@@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void); asmlinkage void schedule(void);
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
struct nsproxy; struct nsproxy;
struct user_namespace; struct user_namespace;
...@@ -1048,8 +1048,12 @@ struct sched_domain; ...@@ -1048,8 +1048,12 @@ struct sched_domain;
#define WF_FORK 0x02 /* child wakeup after fork */ #define WF_FORK 0x02 /* child wakeup after fork */
#define ENQUEUE_WAKEUP 1 #define ENQUEUE_WAKEUP 1
#define ENQUEUE_WAKING 2 #define ENQUEUE_HEAD 2
#define ENQUEUE_HEAD 4 #ifdef CONFIG_SMP
#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
#else
#define ENQUEUE_WAKING 0
#endif
#define DEQUEUE_SLEEP 1 #define DEQUEUE_SLEEP 1
...@@ -1067,12 +1071,11 @@ struct sched_class { ...@@ -1067,12 +1071,11 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p); void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int (*select_task_rq)(struct rq *rq, struct task_struct *p, int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
int sd_flag, int flags);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq); void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct rq *this_rq, struct task_struct *task); void (*task_waking) (struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task); void (*task_woken) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p, void (*set_cpus_allowed)(struct task_struct *p,
...@@ -1200,10 +1203,10 @@ struct task_struct { ...@@ -1200,10 +1203,10 @@ struct task_struct {
int lock_depth; /* BKL lock depth */ int lock_depth; /* BKL lock depth */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef __ARCH_WANT_UNLOCKED_CTXSW struct task_struct *wake_entry;
int oncpu; int on_cpu;
#endif
#endif #endif
int on_rq;
int prio, static_prio, normal_prio; int prio, static_prio, normal_prio;
unsigned int rt_priority; unsigned int rt_priority;
...@@ -1274,6 +1277,7 @@ struct task_struct { ...@@ -1274,6 +1277,7 @@ struct task_struct {
/* Revert to default priority/policy when forking */ /* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1; unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
pid_t pid; pid_t pid;
pid_t tgid; pid_t tgid;
...@@ -2192,8 +2196,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); ...@@ -2192,8 +2196,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk); extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void scheduler_ipi(void);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state); extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else #else
static inline void scheduler_ipi(void) { }
static inline unsigned long wait_task_inactive(struct task_struct *p, static inline unsigned long wait_task_inactive(struct task_struct *p,
long match_state) long match_state)
{ {
......
...@@ -827,6 +827,11 @@ config SCHED_AUTOGROUP ...@@ -827,6 +827,11 @@ config SCHED_AUTOGROUP
desktop applications. Task group autogeneration is currently based desktop applications. Task group autogeneration is currently based
upon task session. upon task session.
config SCHED_TTWU_QUEUE
bool
depends on !SPARC32
default y
config MM_OWNER config MM_OWNER
bool bool
......
...@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock) ...@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
return; return;
DEBUG_LOCKS_WARN_ON(lock->magic != lock); DEBUG_LOCKS_WARN_ON(lock->magic != lock);
DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); DEBUG_LOCKS_WARN_ON(lock->owner != current);
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
mutex_clear_owner(lock); mutex_clear_owner(lock);
} }
......
...@@ -29,7 +29,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name, ...@@ -29,7 +29,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name,
static inline void mutex_set_owner(struct mutex *lock) static inline void mutex_set_owner(struct mutex *lock)
{ {
lock->owner = current_thread_info(); lock->owner = current;
} }
static inline void mutex_clear_owner(struct mutex *lock) static inline void mutex_clear_owner(struct mutex *lock)
......
...@@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
*/ */
for (;;) { for (;;) {
struct thread_info *owner; struct task_struct *owner;
/* /*
* If we own the BKL, then don't spin. The owner of * If we own the BKL, then don't spin. The owner of
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline void mutex_set_owner(struct mutex *lock) static inline void mutex_set_owner(struct mutex *lock)
{ {
lock->owner = current_thread_info(); lock->owner = current;
} }
static inline void mutex_clear_owner(struct mutex *lock) static inline void mutex_clear_owner(struct mutex *lock)
......
This diff is collapsed.
...@@ -152,7 +152,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) ...@@ -152,7 +152,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
read_lock_irqsave(&tasklist_lock, flags); read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, p) { do_each_thread(g, p) {
if (!p->se.on_rq || task_cpu(p) != rq_cpu) if (!p->on_rq || task_cpu(p) != rq_cpu)
continue; continue;
print_task(m, rq, p); print_task(m, rq, p);
......
...@@ -358,6 +358,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) ...@@ -358,6 +358,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
} }
cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
#ifndef CONFIG_64BIT
smp_wmb();
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
} }
/* /*
...@@ -1372,12 +1376,25 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -1372,12 +1376,25 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void task_waking_fair(struct rq *rq, struct task_struct *p) static void task_waking_fair(struct task_struct *p)
{ {
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se); struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 min_vruntime;
se->vruntime -= cfs_rq->min_vruntime; #ifndef CONFIG_64BIT
u64 min_vruntime_copy;
do {
min_vruntime_copy = cfs_rq->min_vruntime_copy;
smp_rmb();
min_vruntime = cfs_rq->min_vruntime;
} while (min_vruntime != min_vruntime_copy);
#else
min_vruntime = cfs_rq->min_vruntime;
#endif
se->vruntime -= min_vruntime;
} }
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
...@@ -1657,7 +1674,7 @@ static int select_idle_sibling(struct task_struct *p, int target) ...@@ -1657,7 +1674,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
* preempt must be disabled. * preempt must be disabled.
*/ */
static int static int
select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags) select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
{ {
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -1789,10 +1806,7 @@ wakeup_gran(struct sched_entity *curr, struct sched_entity *se) ...@@ -1789,10 +1806,7 @@ wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
* This is especially important for buddies when the leftmost * This is especially important for buddies when the leftmost
* task is higher priority than the buddy. * task is higher priority than the buddy.
*/ */
if (unlikely(se->load.weight != NICE_0_LOAD)) return calc_delta_fair(gran, se);
gran = calc_delta_fair(gran, se);
return gran;
} }
/* /*
......
...@@ -64,3 +64,9 @@ SCHED_FEAT(OWNER_SPIN, 1) ...@@ -64,3 +64,9 @@ SCHED_FEAT(OWNER_SPIN, 1)
* Decrement CPU power based on irq activity * Decrement CPU power based on irq activity
*/ */
SCHED_FEAT(NONIRQ_POWER, 1) SCHED_FEAT(NONIRQ_POWER, 1)
/*
* Queue remote wakeups on the target CPU and process them
* using the scheduler IPI. Reduces rq->lock contention/bounces.
*/
SCHED_FEAT(TTWU_QUEUE, 1)
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int static int
select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags) select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
{ {
return task_cpu(p); /* IDLE tasks as never migrated */ return task_cpu(p); /* IDLE tasks as never migrated */
} }
......
...@@ -977,13 +977,23 @@ static void yield_task_rt(struct rq *rq) ...@@ -977,13 +977,23 @@ static void yield_task_rt(struct rq *rq)
static int find_lowest_rq(struct task_struct *task); static int find_lowest_rq(struct task_struct *task);
static int static int
select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
{ {
struct task_struct *curr;
struct rq *rq;
int cpu;
if (sd_flag != SD_BALANCE_WAKE) if (sd_flag != SD_BALANCE_WAKE)
return smp_processor_id(); return smp_processor_id();
cpu = task_cpu(p);
rq = cpu_rq(cpu);
rcu_read_lock();
curr = ACCESS_ONCE(rq->curr); /* unlocked access */
/* /*
* If the current task is an RT task, then * If the current task on @p's runqueue is an RT task, then
* try to see if we can wake this RT task up on another * try to see if we can wake this RT task up on another
* runqueue. Otherwise simply start this RT task * runqueue. Otherwise simply start this RT task
* on its current runqueue. * on its current runqueue.
...@@ -997,21 +1007,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) ...@@ -997,21 +1007,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
* lock? * lock?
* *
* For equal prio tasks, we just let the scheduler sort it out. * For equal prio tasks, we just let the scheduler sort it out.
*
* Otherwise, just let it ride on the affined RQ and the
* post-schedule router will push the preempted task away
*
* This test is optimistic, if we get it wrong the load-balancer
* will have to sort it out.
*/ */
if (unlikely(rt_task(rq->curr)) && if (curr && unlikely(rt_task(curr)) &&
(rq->curr->rt.nr_cpus_allowed < 2 || (curr->rt.nr_cpus_allowed < 2 ||
rq->curr->prio < p->prio) && curr->prio < p->prio) &&
(p->rt.nr_cpus_allowed > 1)) { (p->rt.nr_cpus_allowed > 1)) {
int cpu = find_lowest_rq(p); int target = find_lowest_rq(p);
return (cpu == -1) ? task_cpu(p) : cpu; if (target != -1)
cpu = target;
} }
rcu_read_unlock();
/* return cpu;
* Otherwise, just let it ride on the affined RQ and the
* post-schedule router will push the preempted task away
*/
return task_cpu(p);
} }
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
...@@ -1136,7 +1150,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) ...@@ -1136,7 +1150,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
* The previous task needs to be made eligible for pushing * The previous task needs to be made eligible for pushing
* if it is still active * if it is still active
*/ */
if (p->se.on_rq && p->rt.nr_cpus_allowed > 1) if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p); enqueue_pushable_task(rq, p);
} }
...@@ -1287,7 +1301,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) ...@@ -1287,7 +1301,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
!cpumask_test_cpu(lowest_rq->cpu, !cpumask_test_cpu(lowest_rq->cpu,
&task->cpus_allowed) || &task->cpus_allowed) ||
task_running(rq, task) || task_running(rq, task) ||
!task->se.on_rq)) { !task->on_rq)) {
raw_spin_unlock(&lowest_rq->lock); raw_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL; lowest_rq = NULL;
...@@ -1321,7 +1335,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) ...@@ -1321,7 +1335,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
BUG_ON(task_current(rq, p)); BUG_ON(task_current(rq, p));
BUG_ON(p->rt.nr_cpus_allowed <= 1); BUG_ON(p->rt.nr_cpus_allowed <= 1);
BUG_ON(!p->se.on_rq); BUG_ON(!p->on_rq);
BUG_ON(!rt_task(p)); BUG_ON(!rt_task(p));
return p; return p;
...@@ -1467,7 +1481,7 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -1467,7 +1481,7 @@ static int pull_rt_task(struct rq *this_rq)
*/ */
if (p && (p->prio < this_rq->rt.highest_prio.curr)) { if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr); WARN_ON(p == src_rq->curr);
WARN_ON(!p->se.on_rq); WARN_ON(!p->on_rq);
/* /*
* There's a chance that p is higher in priority * There's a chance that p is higher in priority
...@@ -1538,7 +1552,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, ...@@ -1538,7 +1552,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
* Update the migration status of the RQ if we have an RT task * Update the migration status of the RQ if we have an RT task
* which is running AND changing its weight value. * which is running AND changing its weight value.
*/ */
if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
struct rq *rq = task_rq(p); struct rq *rq = task_rq(p);
if (!task_current(rq, p)) { if (!task_current(rq, p)) {
...@@ -1608,7 +1622,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) ...@@ -1608,7 +1622,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
* we may need to handle the pulling of RT tasks * we may need to handle the pulling of RT tasks
* now. * now.
*/ */
if (p->se.on_rq && !rq->rt.rt_nr_running) if (p->on_rq && !rq->rt.rt_nr_running)
pull_rt_task(rq); pull_rt_task(rq);
} }
...@@ -1638,7 +1652,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) ...@@ -1638,7 +1652,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
* If that current running task is also an RT task * If that current running task is also an RT task
* then see if we can move to another run queue. * then see if we can move to another run queue.
*/ */
if (p->se.on_rq && rq->curr != p) { if (p->on_rq && rq->curr != p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (rq->rt.overloaded && push_rt_task(rq) && if (rq->rt.overloaded && push_rt_task(rq) &&
/* Don't resched if we changed runqueues */ /* Don't resched if we changed runqueues */
...@@ -1657,7 +1671,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) ...@@ -1657,7 +1671,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
static void static void
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{ {
if (!p->se.on_rq) if (!p->on_rq)
return; return;
if (rq->curr == p) { if (rq->curr == p) {
......
...@@ -9,8 +9,7 @@ ...@@ -9,8 +9,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int static int
select_task_rq_stop(struct rq *rq, struct task_struct *p, select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
int sd_flag, int flags)
{ {
return task_cpu(p); /* stop tasks as never migrate */ return task_cpu(p); /* stop tasks as never migrate */
} }
...@@ -26,7 +25,7 @@ static struct task_struct *pick_next_task_stop(struct rq *rq) ...@@ -26,7 +25,7 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
{ {
struct task_struct *stop = rq->stop; struct task_struct *stop = rq->stop;
if (stop && stop->se.on_rq) if (stop && stop->on_rq)
return stop; return stop;
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment