Commit 0e67de54 authored by Paul Mackerras's avatar Paul Mackerras

PPC32: make the idle loop able to be platform-specific.

parent 1decdfc7
...@@ -23,13 +23,13 @@ obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ ...@@ -23,13 +23,13 @@ obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
process.o signal.o ptrace.o align.o \ process.o signal.o ptrace.o align.o \
semaphore.o syscalls.o setup.o \ semaphore.o syscalls.o setup.o \
cputable.o ppc_htab.o cputable.o ppc_htab.o
obj-$(CONFIG_6xx) += l2cr.o obj-$(CONFIG_6xx) += l2cr.o ppc6xx_idle.o
obj-$(CONFIG_ALL_PPC) += ppc6xx_idle.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_PCI) += pci.o
ifneq ($(CONFIG_PPC_ISERIES),y) ifneq ($(CONFIG_PPC_ISERIES),y)
obj-$(CONFIG_PCI) += pci-dma.o obj-$(CONFIG_PCI) += pci-dma.o
endif endif
obj-$(CONFIG_PPCBUG_NVRAM) += prep_nvram.o
obj-$(CONFIG_KGDB) += ppc-stub.o obj-$(CONFIG_KGDB) += ppc-stub.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_TAU) += temp.o obj-$(CONFIG_TAU) += temp.o
......
/*
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* Idle task for iSeries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/cache.h>
#include <asm/cputable.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/iSeries/LparData.h>
#include <asm/iSeries/HvCall.h>
#include <asm/hardirq.h>
static void yield_shared_processor(void);
static void run_light_on(int on);
extern unsigned long yield_count;
void iSeries_idle(void)
{
if (!need_resched()) {
/* Turn off the run light */
run_light_on(0);
yield_shared_processor();
HMT_low();
#ifdef CONFIG_SMP
set_thread_flag(TIF_POLLING_NRFLAG);
while (!need_resched())
barrier();
clear_thread_flag(TIF_POLLING_NRFLAG);
#endif
}
if (need_resched()) {
run_light_on(1);
schedule();
return;
}
}
extern void fake_interrupt(void);
extern u64 get_tb64(void);
static void run_light_on(int on)
{
unsigned long CTRL;
CTRL = mfspr(CTRLF);
CTRL = on? (CTRL | RUNLATCH): (CTRL & ~RUNLATCH);
mtspr(CTRLT, CTRL);
}
static void yield_shared_processor(void)
{
struct Paca *paca;
u64 tb;
/* Poll for I/O events */
local_irq_disable();
local_irq_enable();
paca = (struct Paca *)mfspr(SPRG1);
if ( paca->xLpPaca.xSharedProc ) {
HvCall_setEnabledInterrupts( HvCall_MaskIPI |
HvCall_MaskLpEvent |
HvCall_MaskLpProd |
HvCall_MaskTimeout );
/*
* Check here for any of the above pending...
* IPI and Decrementers are indicated in ItLpPaca
* LpEvents are indicated on the LpQueue
*
* Disabling/enabling will check for LpEvents, IPIs
* and decrementers
*/
local_irq_disable();
local_irq_enable();
++yield_count;
/* Get current tb value */
tb = get_tb64();
/* Compute future tb value when yield will expire */
tb += tb_ticks_per_jiffy;
HvCall_yieldProcessor( HvCall_YieldTimed, tb );
/* Check here for any of the above pending or timeout expired*/
local_irq_disable();
/*
* The decrementer stops during the yield. Just force
* a fake decrementer now and the timer_interrupt
* code will straighten it all out
*/
paca->xLpPaca.xDecrInt = 1;
local_irq_enable();
}
}
...@@ -2,7 +2,8 @@ ...@@ -2,7 +2,8 @@
* Idle daemon for PowerPC. Idle daemon will handle any action * Idle daemon for PowerPC. Idle daemon will handle any action
* that needs to be taken when the system becomes idle. * that needs to be taken when the system becomes idle.
* *
* Written by Cort Dougan (cort@cs.nmt.edu) * Written by Cort Dougan (cort@cs.nmt.edu). Subsequently hacked
* on by Tom Rini, Armin Kuster, Paul Mackerras and others.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
...@@ -29,175 +30,39 @@ ...@@ -29,175 +30,39 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#ifdef CONFIG_PPC_ISERIES #include <asm/machdep.h>
#include <asm/time.h>
#include <asm/iSeries/LparData.h>
#include <asm/iSeries/HvCall.h>
#include <asm/hardirq.h>
static void yield_shared_processor(void);
static void run_light_on(int on);
extern unsigned long yield_count;
#else /* CONFIG_PPC_ISERIES */
#define run_light_on(x) do { } while (0)
#endif /* CONFIG_PPC_ISERIES */
void power_save(void);
unsigned long zero_paged_on;
unsigned long powersave_nap;
void default_idle(void) void default_idle(void)
{ {
int do_power_save = 0; void (*powersave)(void);
/* Check if CPU can powersave */ powersave = ppc_md.power_save;
if (cur_cpu_spec[smp_processor_id()]->cpu_features &
(CPU_FTR_CAN_DOZE | CPU_FTR_CAN_NAP))
do_power_save = 1;
#ifdef CONFIG_PPC_ISERIES if (!need_resched()) {
if (!current->need_resched) { if (powersave != NULL)
/* Turn off the run light */ powersave();
run_light_on(0);
yield_shared_processor();
}
HMT_low();
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!do_power_save) { else {
if (!need_resched()) {
set_thread_flag(TIF_POLLING_NRFLAG); set_thread_flag(TIF_POLLING_NRFLAG);
while (!test_thread_flag(TIF_NEED_RESCHED)) while (!need_resched())
barrier(); barrier();
clear_thread_flag(TIF_POLLING_NRFLAG); clear_thread_flag(TIF_POLLING_NRFLAG);
} }
}
#endif #endif
if (do_power_save && !need_resched())
power_save();
if (need_resched()) {
run_light_on(1);
schedule();
} }
#ifdef CONFIG_PPC_ISERIES if (need_resched())
else { schedule();
run_light_on(0);
yield_shared_processor();
HMT_low();
}
#endif /* CONFIG_PPC_ISERIES */
} }
/* /*
* SMP entry into the idle task - calls the same thing as the * The body of the idle task.
* non-smp versions. -- Cort
*/ */
int cpu_idle(void) int cpu_idle(void)
{ {
for (;;) for (;;)
default_idle(); if (ppc_md.idle != NULL)
ppc_md.idle();
else
default_idle();
return 0; return 0;
} }
void power_save(void)
{
unsigned long hid0;
int nap = powersave_nap;
/* 7450 has no DOZE mode mode, we return if powersave_nap
* isn't enabled
*/
if (!(nap || (cur_cpu_spec[smp_processor_id()]->cpu_features & CPU_FTR_CAN_DOZE)))
return;
/*
* Disable interrupts to prevent a lost wakeup
* when going to sleep. This is necessary even with
* RTLinux since we are not guaranteed an interrupt
* didn't come in and is waiting for a local_irq_enable() before
* emulating one. This way, we really do hard disable.
*
* We assume that we're sti-ed when we come in here. We
* are in the idle loop so if we're cli-ed then it's a bug
* anyway.
* -- Cort
*/
_nmask_and_or_msr(MSR_EE, 0);
if (!need_resched())
{
asm("mfspr %0,1008" : "=r" (hid0) :);
hid0 &= ~(HID0_NAP | HID0_SLEEP | HID0_DOZE);
hid0 |= (powersave_nap? HID0_NAP: HID0_DOZE) | HID0_DPM;
asm("mtspr 1008,%0" : : "r" (hid0));
/* set the POW bit in the MSR, and enable interrupts
* so we wake up sometime! */
_nmask_and_or_msr(0, MSR_POW | MSR_EE);
}
_nmask_and_or_msr(0, MSR_EE);
}
#ifdef CONFIG_PPC_ISERIES
extern void fake_interrupt(void);
extern u64 get_tb64(void);
void run_light_on(int on)
{
unsigned long CTRL;
CTRL = mfspr(CTRLF);
CTRL = on? (CTRL | RUNLATCH): (CTRL & ~RUNLATCH);
mtspr(CTRLT, CTRL);
}
void yield_shared_processor(void)
{
struct Paca *paca;
u64 tb;
/* Poll for I/O events */
local_irq_disable();
local_irq_enable();
paca = (struct Paca *)mfspr(SPRG1);
if ( paca->xLpPaca.xSharedProc ) {
HvCall_setEnabledInterrupts( HvCall_MaskIPI |
HvCall_MaskLpEvent |
HvCall_MaskLpProd |
HvCall_MaskTimeout );
/*
* Check here for any of the above pending...
* IPI and Decrementers are indicated in ItLpPaca
* LpEvents are indicated on the LpQueue
*
* Disabling/enabling will check for LpEvents, IPIs
* and decrementers
*/
local_irq_disable();
local_irq_enable();
++yield_count;
/* Get current tb value */
tb = get_tb64();
/* Compute future tb value when yield will expire */
tb += tb_ticks_per_jiffy;
HvCall_yieldProcessor( HvCall_YieldTimed, tb );
/* Check here for any of the above pending or timeout expired*/
local_irq_disable();
/*
* The decrementer stops during the yield. Just force
* a fake decrementer now and the timer_interrupt
* code will straighten it all out
*/
paca->xLpPaca.xDecrInt = 1;
local_irq_enable();
}
}
#endif /* CONFIG_PPC_ISERIES */
/*
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* power_save() rountine for classic PPC CPUs.
*
* Written by Cort Dougan (cort@cs.nmt.edu)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/stringify.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/processor.h>
unsigned long powersave_nap = 0;
#define DSSALL .long (0x1f<<26)+(0x10<<21)+(0x336<<1)
void
ppc6xx_idle(void)
{
unsigned long hid0;
int nap = powersave_nap;
/* 7450 has no DOZE mode mode, we return if powersave_nap
* isn't enabled
*/
if (!(nap || (cur_cpu_spec[smp_processor_id()]->cpu_features
& CPU_FTR_CAN_DOZE)))
return;
/*
* Disable interrupts to prevent a lost wakeup
* when going to sleep. This is necessary even with
* RTLinux since we are not guaranteed an interrupt
* didn't come in and is waiting for a __sti() before
* emulating one. This way, we really do hard disable.
*
* We assume that we're sti-ed when we come in here. We
* are in the idle loop so if we're cli-ed then it's a bug
* anyway.
* -- Cort
*/
_nmask_and_or_msr(MSR_EE, 0);
if (!need_resched()) {
__asm__ __volatile__("mfspr %0,1008":"=r"(hid0):);
hid0 &= ~(HID0_NAP | HID0_SLEEP | HID0_DOZE);
hid0 |= (powersave_nap ? HID0_NAP : HID0_DOZE) | HID0_DPM;
__asm__ __volatile__("mtspr 1008,%0"::"r"(hid0));
/* Flush pending data streams, consider this instruction
* exist on all altivec capable CPUs
*/
__asm__ __volatile__("98: " __stringify(DSSALL) "\n"
" sync\n"
"99:\n"
".section __ftr_fixup,\"a\"\n"
" .long %0\n"
" .long %1\n"
" .long 98b\n"
" .long 99b\n"
".previous"::"i"
(CPU_FTR_ALTIVEC), "i"(CPU_FTR_ALTIVEC));
/* set the POW bit in the MSR, and enable interrupts
* so we wake up sometime! */
_nmask_and_or_msr(0, MSR_POW | MSR_EE);
}
_nmask_and_or_msr(0, MSR_EE);
}
...@@ -54,6 +54,8 @@ extern void xmon_map_scc(void); ...@@ -54,6 +54,8 @@ extern void xmon_map_scc(void);
extern void kgdb_map_scc(void); extern void kgdb_map_scc(void);
#endif #endif
extern void ppc6xx_idle(void);
extern boot_infos_t *boot_infos; extern boot_infos_t *boot_infos;
char saved_command_line[256]; char saved_command_line[256];
unsigned char aux_device_present; unsigned char aux_device_present;
...@@ -528,6 +530,10 @@ machine_init(unsigned long r3, unsigned long r4, unsigned long r5, ...@@ -528,6 +530,10 @@ machine_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, CONFIG_CMDLINE); strcpy(cmd_line, CONFIG_CMDLINE);
#endif /* CONFIG_CMDLINE */ #endif /* CONFIG_CMDLINE */
#if defined(CONFIG_6xx) || defined(CONFIG_ALL_PPC)
ppc_md.power_save = ppc6xx_idle;
#endif
platform_init(r3, r4, r5, r6, r7); platform_init(r3, r4, r5, r6, r7);
if (ppc_md.progress) if (ppc_md.progress)
......
...@@ -57,6 +57,7 @@ static void setup_iSeries_cache_sizes( void ); ...@@ -57,6 +57,7 @@ static void setup_iSeries_cache_sizes( void );
extern void iSeries_pci_Initialize(void); extern void iSeries_pci_Initialize(void);
static int iSeries_show_cpuinfo(struct seq_file *m); static int iSeries_show_cpuinfo(struct seq_file *m);
static int iSeries_show_percpuinfo(struct seq_file *m, int i); static int iSeries_show_percpuinfo(struct seq_file *m, int i);
extern void iSeries_idle(void);
extern struct pci_ops iSeries_pci_ops; extern struct pci_ops iSeries_pci_ops;
/* Global Variables */ /* Global Variables */
...@@ -537,6 +538,8 @@ iSeries_setup_arch(void) ...@@ -537,6 +538,8 @@ iSeries_setup_arch(void)
mf_init(); mf_init();
viopath_init(); viopath_init();
*/ */
ppc_md.idle = iSeries_idle;
} }
/* /*
......
...@@ -37,6 +37,9 @@ struct machdep_calls { ...@@ -37,6 +37,9 @@ struct machdep_calls {
void (*power_off)(void); void (*power_off)(void);
void (*halt)(void); void (*halt)(void);
void (*idle)(void);
void (*power_save)(void);
long (*time_init)(void); /* Optional, may be NULL */ long (*time_init)(void); /* Optional, may be NULL */
int (*set_rtc_time)(unsigned long nowtime); int (*set_rtc_time)(unsigned long nowtime);
unsigned long (*get_rtc_time)(void); unsigned long (*get_rtc_time)(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment