Commit ed0bc98f authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s: Reimplement power4_idle code in C

This implements the tricky tracing and soft irq handling bits in C,
leaving the low level bit to asm.

A functional difference is that this redirects the interrupt exit to
a return stub to execute blr, rather than the lr address itself. This
is probably barely measurable on real hardware, but it keeps the link
stack balanced.

Tested with QEMU.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
[mpe: Move power4_fixup_nap back into exceptions-64s.S]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190711022404.18132-1-npiggin@gmail.com
parent 30e813cf
......@@ -412,6 +412,9 @@ static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
#ifdef CONFIG_PPC_970_NAP
extern void power4_idle_nap(void);
#endif
extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
......
......@@ -62,8 +62,7 @@ obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
obj-$(CONFIG_PPC_P7_NAP) += idle_book3s.o
obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o
procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
......
......@@ -2208,11 +2208,20 @@ __end_interrupts:
DEFINE_FIXED_SYMBOL(__end_interrupts)
#ifdef CONFIG_PPC_970_NAP
/*
* Called by exception entry code if _TLF_NAPPING was set, this clears
* the NAPPING flag, and redirects the exception exit to
* power4_fixup_nap_return.
*/
.globl power4_fixup_nap
EXC_COMMON_BEGIN(power4_fixup_nap)
andc r9,r9,r10
std r9,TI_LOCAL_FLAGS(r11)
ld r10,_LINK(r1) /* make idle task do the */
std r10,_NIP(r1) /* equivalent of a blr */
LOAD_REG_ADDR(r10, power4_idle_nap_return)
std r10,_NIP(r1)
blr
power4_idle_nap_return:
blr
#endif
......
......@@ -77,6 +77,31 @@ void arch_cpu_idle(void)
int powersave_nap;
#ifdef CONFIG_PPC_970_NAP
void power4_idle(void)
{
if (!cpu_has_feature(CPU_FTR_CAN_NAP))
return;
if (!powersave_nap)
return;
if (!prep_irq_for_idle())
return;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile("DSSALL ; sync" ::: "memory");
power4_idle_nap();
/*
* power4_idle_nap returns with interrupts enabled (soft and hard).
* to our caller with interrupts enabled (soft and hard). Our caller
* can cope with either interrupts disabled or enabled upon return.
*/
}
#endif
#ifdef CONFIG_SYSCTL
/*
* Register the sysctl to set/clear powersave_nap.
......
......@@ -15,7 +15,9 @@
#include <asm/asm-offsets.h>
#include <asm/ppc-opcode.h>
#include <asm/cpuidle.h>
#include <asm/thread_info.h> /* TLF_NAPPING */
#ifdef CONFIG_PPC_P7_NAP
/*
* Desired PSSCR in r3
*
......@@ -181,4 +183,22 @@ _GLOBAL(isa206_idle_insn_mayloss)
bne 2f
IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
2: IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
#endif
#ifdef CONFIG_PPC_970_NAP
_GLOBAL(power4_idle_nap)
LOAD_REG_IMMEDIATE(r7, MSR_KERNEL|MSR_EE|MSR_POW)
ld r9,PACA_THREAD_INFO(r13)
ld r8,TI_LOCAL_FLAGS(r9)
ori r8,r8,_TLF_NAPPING
std r8,TI_LOCAL_FLAGS(r9)
/*
* NAPPING bit is set, from this point onward power4_fixup_nap
* will cause exceptions to return to power4_idle_nap_return.
*/
1: sync
isync
mtmsrd r7
isync
b 1b
#endif
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* This file contains the power_save function for 970-family CPUs.
*/
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/irqflags.h>
#include <asm/hw_irq.h>
#include <asm/feature-fixups.h>
#undef DEBUG
.text
_GLOBAL(power4_idle)
BEGIN_FTR_SECTION
blr
END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
/* Now check if user or arch enabled NAP mode */
LOAD_REG_ADDRBASE(r3,powersave_nap)
lwz r4,ADDROFF(powersave_nap)(r3)
cmpwi 0,r4,0
beqlr
/* This sequence is similar to prep_irq_for_idle() */
/* Hard disable interrupts */
mfmsr r7
rldicl r0,r7,48,1
rotldi r0,r0,16
mtmsrd r0,1
/* Check if something happened while soft-disabled */
lbz r0,PACAIRQHAPPENED(r13)
cmpwi cr0,r0,0
bne- 2f
/*
* Soft-enable interrupts. This will make power4_fixup_nap return
* to our caller with interrupts enabled (soft and hard). The caller
* can cope with either interrupts disabled or enabled upon return.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
/* Tell the tracer interrupts are on, because idle responds to them. */
mflr r0
std r0,16(r1)
stdu r1,-128(r1)
bl trace_hardirqs_on
addi r1,r1,128
ld r0,16(r1)
mtlr r0
mfmsr r7
#endif /* CONFIG_TRACE_IRQFLAGS */
li r0,IRQS_ENABLED
stb r0,PACAIRQSOFTMASK(r13) /* we'll hard-enable shortly */
BEGIN_FTR_SECTION
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
ld r9, PACA_THREAD_INFO(r13)
ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
ori r8,r8,_TLF_NAPPING /* so when we take an exception */
std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
ori r7,r7,MSR_EE
oris r7,r7,MSR_POW@h
1: sync
isync
mtmsrd r7
isync
b 1b
2: /* Return if an interrupt had happened while soft disabled */
/* Set the HARD_DIS flag because interrupts are now hard disabled */
ori r0,r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13)
blr
......@@ -177,6 +177,10 @@ config PPC_970_NAP
config PPC_P7_NAP
bool
config PPC_BOOK3S_IDLE
def_bool y
depends on (PPC_970_NAP || PPC_P7_NAP)
config PPC_INDIRECT_PIO
bool
select GENERIC_IOMAP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment