Commit bdc7c970 authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/ppc-kvm' into next

Merge our topic branch shared with KVM. In particular this includes the
rewrite of the idle code into C.
parents 7ae3f6e1 e9cef018
...@@ -56,3 +56,35 @@ POWER9. Loads and stores to the watchpoint locations will not be ...@@ -56,3 +56,35 @@ POWER9. Loads and stores to the watchpoint locations will not be
trapped in GDB. The watchpoint is remembered, so if the guest is trapped in GDB. The watchpoint is remembered, so if the guest is
migrated back to the POWER8 host, it will start working again. migrated back to the POWER8 host, it will start working again.
Force enabling the DAWR
=============================
Kernels (since ~v5.2) have an option to force enable the DAWR via:
echo Y > /sys/kernel/debug/powerpc/dawr_enable_dangerous
This enables the DAWR even on POWER9.
This is a dangerous setting, USE AT YOUR OWN RISK.
Some users may not care about a bad user crashing their box
(ie. single user/desktop systems) and really want the DAWR. This
allows them to force enable DAWR.
This flag can also be used to disable DAWR access. Once this is
cleared, all DAWR access should be cleared immediately and your
machine once again safe from crashing.
Userspace may get confused by toggling this. If DAWR is force
enabled/disabled between getting the number of breakpoints (via
PTRACE_GETHWDBGINFO) and setting the breakpoint, userspace will get an
inconsistent view of what's available. Similarly for guests.
For the DAWR to be enabled in a KVM guest, the DAWR needs to be force
enabled in the host AND the guest. For this reason, this won't work on
POWERVM as it doesn't allow the HCALL to work. Writes of 'Y' to the
dawr_enable_dangerous file will fail if the hypervisor doesn't support
writing the DAWR.
To double check the DAWR is working, run this kernel selftest:
tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
Any errors/failures/skips mean something is wrong.
...@@ -27,10 +27,11 @@ ...@@ -27,10 +27,11 @@
* the THREAD_WINKLE_BITS are set, which indicate which threads have not * the THREAD_WINKLE_BITS are set, which indicate which threads have not
* yet woken from the winkle state. * yet woken from the winkle state.
*/ */
#define PNV_CORE_IDLE_LOCK_BIT 0x10000000 #define NR_PNV_CORE_IDLE_LOCK_BIT 28
#define PNV_CORE_IDLE_LOCK_BIT (1ULL << NR_PNV_CORE_IDLE_LOCK_BIT)
#define PNV_CORE_IDLE_WINKLE_COUNT_SHIFT 16
#define PNV_CORE_IDLE_WINKLE_COUNT 0x00010000 #define PNV_CORE_IDLE_WINKLE_COUNT 0x00010000
#define PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT 0x00080000
#define PNV_CORE_IDLE_WINKLE_COUNT_BITS 0x000F0000 #define PNV_CORE_IDLE_WINKLE_COUNT_BITS 0x000F0000
#define PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT 8 #define PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT 8
#define PNV_CORE_IDLE_THREAD_WINKLE_BITS 0x0000FF00 #define PNV_CORE_IDLE_THREAD_WINKLE_BITS 0x0000FF00
...@@ -68,16 +69,6 @@ ...@@ -68,16 +69,6 @@
#define ERR_DEEP_STATE_ESL_MISMATCH -2 #define ERR_DEEP_STATE_ESL_MISMATCH -2
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* Additional SPRs that need to be saved/restored during stop */
struct stop_sprs {
u64 pid;
u64 ldbar;
u64 fscr;
u64 hfscr;
u64 mmcr1;
u64 mmcr2;
u64 mmcra;
};
#define PNV_IDLE_NAME_LEN 16 #define PNV_IDLE_NAME_LEN 16
struct pnv_idle_states_t { struct pnv_idle_states_t {
...@@ -92,10 +83,6 @@ struct pnv_idle_states_t { ...@@ -92,10 +83,6 @@ struct pnv_idle_states_t {
extern struct pnv_idle_states_t *pnv_idle_states; extern struct pnv_idle_states_t *pnv_idle_states;
extern int nr_pnv_idle_states; extern int nr_pnv_idle_states;
extern u32 pnv_fastsleep_workaround_at_entry[];
extern u32 pnv_fastsleep_workaround_at_exit[];
extern u64 pnv_first_deep_stop_state;
unsigned long pnv_cpu_offline(unsigned int cpu); unsigned long pnv_cpu_offline(unsigned int cpu);
int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags); int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
......
...@@ -90,10 +90,18 @@ static inline void hw_breakpoint_disable(void) ...@@ -90,10 +90,18 @@ static inline void hw_breakpoint_disable(void)
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
int hw_breakpoint_handler(struct die_args *args); int hw_breakpoint_handler(struct die_args *args);
extern int set_dawr(struct arch_hw_breakpoint *brk);
extern bool dawr_force_enable;
static inline bool dawr_enabled(void)
{
return dawr_force_enable;
}
#else /* CONFIG_HAVE_HW_BREAKPOINT */ #else /* CONFIG_HAVE_HW_BREAKPOINT */
static inline void hw_breakpoint_disable(void) { } static inline void hw_breakpoint_disable(void) { }
static inline void thread_change_pc(struct task_struct *tsk, static inline void thread_change_pc(struct task_struct *tsk,
struct pt_regs *regs) { } struct pt_regs *regs) { }
static inline bool dawr_enabled(void) { return false; }
#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */ #endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */
...@@ -186,8 +186,8 @@ ...@@ -186,8 +186,8 @@
#define OPAL_XIVE_FREE_IRQ 140 #define OPAL_XIVE_FREE_IRQ 140
#define OPAL_XIVE_SYNC 141 #define OPAL_XIVE_SYNC 141
#define OPAL_XIVE_DUMP 142 #define OPAL_XIVE_DUMP 142
#define OPAL_XIVE_RESERVED3 143 #define OPAL_XIVE_GET_QUEUE_STATE 143
#define OPAL_XIVE_RESERVED4 144 #define OPAL_XIVE_SET_QUEUE_STATE 144
#define OPAL_SIGNAL_SYSTEM_RESET 145 #define OPAL_SIGNAL_SYSTEM_RESET 145
#define OPAL_NPU_INIT_CONTEXT 146 #define OPAL_NPU_INIT_CONTEXT 146
#define OPAL_NPU_DESTROY_CONTEXT 147 #define OPAL_NPU_DESTROY_CONTEXT 147
...@@ -210,7 +210,8 @@ ...@@ -210,7 +210,8 @@
#define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164 #define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164
#define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165 #define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165
#define OPAL_NX_COPROC_INIT 167 #define OPAL_NX_COPROC_INIT 167
#define OPAL_LAST 167 #define OPAL_XIVE_GET_VP_STATE 170
#define OPAL_LAST 170
#define QUIESCE_HOLD 1 /* Spin all calls at entry */ #define QUIESCE_HOLD 1 /* Spin all calls at entry */
#define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */ #define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */
......
...@@ -279,6 +279,13 @@ int64_t opal_xive_allocate_irq(uint32_t chip_id); ...@@ -279,6 +279,13 @@ int64_t opal_xive_allocate_irq(uint32_t chip_id);
int64_t opal_xive_free_irq(uint32_t girq); int64_t opal_xive_free_irq(uint32_t girq);
int64_t opal_xive_sync(uint32_t type, uint32_t id); int64_t opal_xive_sync(uint32_t type, uint32_t id);
int64_t opal_xive_dump(uint32_t type, uint32_t id); int64_t opal_xive_dump(uint32_t type, uint32_t id);
int64_t opal_xive_get_queue_state(uint64_t vp, uint32_t prio,
__be32 *out_qtoggle,
__be32 *out_qindex);
int64_t opal_xive_set_queue_state(uint64_t vp, uint32_t prio,
uint32_t qtoggle,
uint32_t qindex);
int64_t opal_xive_get_vp_state(uint64_t vp, __be64 *out_w01);
int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target, int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target,
uint64_t desc, uint16_t pe_number); uint64_t desc, uint16_t pe_number);
......
...@@ -173,7 +173,6 @@ struct paca_struct { ...@@ -173,7 +173,6 @@ struct paca_struct {
u8 irq_happened; /* irq happened while soft-disabled */ u8 irq_happened; /* irq happened while soft-disabled */
u8 io_sync; /* writel() needs spin_unlock sync */ u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
u8 nap_state_lost; /* NV GPR values lost in power7_idle */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
u8 pmcregs_in_use; /* pseries puts this in lppaca */ u8 pmcregs_in_use; /* pseries puts this in lppaca */
#endif #endif
...@@ -183,23 +182,28 @@ struct paca_struct { ...@@ -183,23 +182,28 @@ struct paca_struct {
#endif #endif
#ifdef CONFIG_PPC_POWERNV #ifdef CONFIG_PPC_POWERNV
/* Per-core mask tracking idle threads and a lock bit-[L][TTTTTTTT] */ /* PowerNV idle fields */
u32 *core_idle_state_ptr; /* PNV_CORE_IDLE_* bits, all siblings work on thread 0 paca */
u8 thread_idle_state; /* PNV_THREAD_RUNNING/NAP/SLEEP */ unsigned long idle_state;
/* Mask to indicate thread id in core */ union {
u8 thread_mask; /* P7/P8 specific fields */
struct {
/* PNV_THREAD_RUNNING/NAP/SLEEP */
u8 thread_idle_state;
/* Mask to denote subcore sibling threads */ /* Mask to denote subcore sibling threads */
u8 subcore_sibling_mask; u8 subcore_sibling_mask;
/* Flag to request this thread not to stop */ };
atomic_t dont_stop;
/* P9 specific fields */
struct {
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* The PSSCR value that the kernel requested before going to stop */ /* The PSSCR value that the kernel requested before going to stop */
u64 requested_psscr; u64 requested_psscr;
/* Flag to request this thread not to stop */
/* atomic_t dont_stop;
* Save area for additional SPRs that need to be #endif
* saved/restored during cpuidle stop. };
*/ };
struct stop_sprs stop_sprs;
#endif #endif
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
......
...@@ -414,14 +414,17 @@ static inline unsigned long get_clean_sp(unsigned long sp, int is_32) ...@@ -414,14 +414,17 @@ static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
} }
#endif #endif
/* asm stubs */
extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
extern unsigned long cpuidle_disable; extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */ extern int powersave_nap; /* set if nap mode can be used in idle loop */
extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/
extern void power7_idle_type(unsigned long type); extern void power7_idle_type(unsigned long type);
extern unsigned long power9_idle_stop(unsigned long psscr_val);
extern unsigned long power9_offline_stop(unsigned long psscr_val);
extern void power9_idle_type(unsigned long stop_psscr_val, extern void power9_idle_type(unsigned long stop_psscr_val,
unsigned long stop_psscr_mask); unsigned long stop_psscr_mask);
......
...@@ -168,6 +168,7 @@ ...@@ -168,6 +168,7 @@
#define PSSCR_ESL 0x00200000 /* Enable State Loss */ #define PSSCR_ESL 0x00200000 /* Enable State Loss */
#define PSSCR_SD 0x00400000 /* Status Disable */ #define PSSCR_SD 0x00400000 /* Status Disable */
#define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */ #define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */
#define PSSCR_PLS_SHIFT 60
#define PSSCR_GUEST_VIS 0xf0000000000003ffUL /* Guest-visible PSSCR fields */ #define PSSCR_GUEST_VIS 0xf0000000000003ffUL /* Guest-visible PSSCR fields */
#define PSSCR_FAKE_SUSPEND 0x00000400 /* Fake-suspend bit (P9 DD2.2) */ #define PSSCR_FAKE_SUSPEND 0x00000400 /* Fake-suspend bit (P9 DD2.2) */
#define PSSCR_FAKE_SUSPEND_LG 10 /* Fake-suspend bit position */ #define PSSCR_FAKE_SUSPEND_LG 10 /* Fake-suspend bit position */
...@@ -758,10 +759,9 @@ ...@@ -758,10 +759,9 @@
#define SRR1_WAKERESET 0x00100000 /* System reset */ #define SRR1_WAKERESET 0x00100000 /* System reset */
#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */ #define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ #define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, #define SRR1_WS_HVLOSS 0x00030000 /* HV resources not maintained */
* may not be recoverable */ #define SRR1_WS_GPRLOSS 0x00020000 /* GPRs not maintained */
#define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */ #define SRR1_WS_NOLOSS 0x00010000 /* All resources maintained */
#define SRR1_WS_DEEP 0x00010000 /* All resources maintained */
#define SRR1_PROGTM 0x00200000 /* TM Bad Thing */ #define SRR1_PROGTM 0x00200000 /* TM Bad Thing */
#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */ #define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
#define SRR1_PROGILL 0x00080000 /* Illegal instruction */ #define SRR1_PROGILL 0x00080000 /* Illegal instruction */
......
...@@ -109,12 +109,26 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, ...@@ -109,12 +109,26 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
extern void xive_native_sync_source(u32 hw_irq); extern void xive_native_sync_source(u32 hw_irq);
extern void xive_native_sync_queue(u32 hw_irq);
extern bool is_xive_irq(struct irq_chip *chip); extern bool is_xive_irq(struct irq_chip *chip);
extern int xive_native_enable_vp(u32 vp_id, bool single_escalation); extern int xive_native_enable_vp(u32 vp_id, bool single_escalation);
extern int xive_native_disable_vp(u32 vp_id); extern int xive_native_disable_vp(u32 vp_id);
extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id); extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
extern bool xive_native_has_single_escalation(void); extern bool xive_native_has_single_escalation(void);
extern int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
u64 *out_qpage,
u64 *out_qsize,
u64 *out_qeoi_page,
u32 *out_escalate_irq,
u64 *out_qflags);
extern int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
u32 *qindex);
extern int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
u32 qindex);
extern int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
#else #else
static inline bool xive_enabled(void) { return false; } static inline bool xive_enabled(void) { return false; }
......
...@@ -271,7 +271,6 @@ int main(void) ...@@ -271,7 +271,6 @@ int main(void)
OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime); OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime); OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save); OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
OFFSET(PACA_NAPSTATELOST, paca_struct, nap_state_lost);
OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso); OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
#else /* CONFIG_PPC64 */ #else /* CONFIG_PPC64 */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
...@@ -773,23 +772,6 @@ int main(void) ...@@ -773,23 +772,6 @@ int main(void)
OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl); OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl);
#endif #endif
#ifdef CONFIG_PPC_POWERNV
OFFSET(PACA_CORE_IDLE_STATE_PTR, paca_struct, core_idle_state_ptr);
OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state);
OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask);
OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr);
OFFSET(PACA_DONT_STOP, paca_struct, dont_stop);
#define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f)
STOP_SPR(STOP_PID, pid);
STOP_SPR(STOP_LDBAR, ldbar);
STOP_SPR(STOP_FSCR, fscr);
STOP_SPR(STOP_HFSCR, hfscr);
STOP_SPR(STOP_MMCR1, mmcr1);
STOP_SPR(STOP_MMCR2, mmcr2);
STOP_SPR(STOP_MMCRA, mmcra);
#endif
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE); DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE);
......
...@@ -121,7 +121,9 @@ EXC_VIRT_NONE(0x4000, 0x100) ...@@ -121,7 +121,9 @@ EXC_VIRT_NONE(0x4000, 0x100)
mfspr r10,SPRN_SRR1 ; \ mfspr r10,SPRN_SRR1 ; \
rlwinm. r10,r10,47-31,30,31 ; \ rlwinm. r10,r10,47-31,30,31 ; \
beq- 1f ; \ beq- 1f ; \
cmpwi cr3,r10,2 ; \ cmpwi cr1,r10,2 ; \
mfspr r3,SPRN_SRR1 ; \
bltlr cr1 ; /* no state loss, return to idle caller */ \
BRANCH_TO_C000(r10, system_reset_idle_common) ; \ BRANCH_TO_C000(r10, system_reset_idle_common) ; \
1: \ 1: \
KVMTEST_PR(n) ; \ KVMTEST_PR(n) ; \
...@@ -145,8 +147,11 @@ TRAMP_KVM(PACA_EXNMI, 0x100) ...@@ -145,8 +147,11 @@ TRAMP_KVM(PACA_EXNMI, 0x100)
#ifdef CONFIG_PPC_P7_NAP #ifdef CONFIG_PPC_P7_NAP
EXC_COMMON_BEGIN(system_reset_idle_common) EXC_COMMON_BEGIN(system_reset_idle_common)
mfspr r12,SPRN_SRR1 /*
b pnv_powersave_wakeup * This must be a direct branch (without linker branch stub) because
* we can not use TOC at this point as r2 may not be restored yet.
*/
b idle_return_gpr_loss
#endif #endif
/* /*
...@@ -429,17 +434,17 @@ EXC_COMMON_BEGIN(machine_check_idle_common) ...@@ -429,17 +434,17 @@ EXC_COMMON_BEGIN(machine_check_idle_common)
* Then decrement MCE nesting after finishing with the stack. * Then decrement MCE nesting after finishing with the stack.
*/ */
ld r3,_MSR(r1) ld r3,_MSR(r1)
ld r4,_LINK(r1)
lhz r11,PACA_IN_MCE(r13) lhz r11,PACA_IN_MCE(r13)
subi r11,r11,1 subi r11,r11,1
sth r11,PACA_IN_MCE(r13) sth r11,PACA_IN_MCE(r13)
/* Turn off the RI bit because SRR1 is used by idle wakeup code. */ mtlr r4
/* Recoverability could be improved by reducing the use of SRR1. */ rlwinm r10,r3,47-31,30,31
li r11,0 cmpwi cr1,r10,2
mtmsrd r11,1 bltlr cr1 /* no state loss, return to idle caller */
b idle_return_gpr_loss
b pnv_powersave_wakeup_mce
#endif #endif
/* /*
* Handle machine check early in real mode. We come here with * Handle machine check early in real mode. We come here with
......
...@@ -29,11 +29,15 @@ ...@@ -29,11 +29,15 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/debugfs.h>
#include <linux/init.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sstep.h> #include <asm/sstep.h>
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/debugfs.h>
#include <asm/hvcall.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
/* /*
...@@ -174,7 +178,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, ...@@ -174,7 +178,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
if (!ppc_breakpoint_available()) if (!ppc_breakpoint_available())
return -ENODEV; return -ENODEV;
length_max = 8; /* DABR */ length_max = 8; /* DABR */
if (cpu_has_feature(CPU_FTR_DAWR)) { if (dawr_enabled()) {
length_max = 512 ; /* 64 doublewords */ length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */ /* DAWR region can't cross 512 boundary */
if ((attr->bp_addr >> 9) != if ((attr->bp_addr >> 9) !=
...@@ -376,3 +380,59 @@ void hw_breakpoint_pmu_read(struct perf_event *bp) ...@@ -376,3 +380,59 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
{ {
/* TODO */ /* TODO */
} }
bool dawr_force_enable;
EXPORT_SYMBOL_GPL(dawr_force_enable);
static ssize_t dawr_write_file_bool(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct arch_hw_breakpoint null_brk = {0, 0, 0};
size_t rc;
/* Send error to user if they hypervisor won't allow us to write DAWR */
if ((!dawr_force_enable) &&
(firmware_has_feature(FW_FEATURE_LPAR)) &&
(set_dawr(&null_brk) != H_SUCCESS))
return -1;
rc = debugfs_write_file_bool(file, user_buf, count, ppos);
if (rc)
return rc;
/* If we are clearing, make sure all CPUs have the DAWR cleared */
if (!dawr_force_enable)
smp_call_function((smp_call_func_t)set_dawr, &null_brk, 0);
return rc;
}
static const struct file_operations dawr_enable_fops = {
.read = debugfs_read_file_bool,
.write = dawr_write_file_bool,
.open = simple_open,
.llseek = default_llseek,
};
static int __init dawr_force_setup(void)
{
dawr_force_enable = false;
if (cpu_has_feature(CPU_FTR_DAWR)) {
/* Don't setup sysfs file for user control on P8 */
dawr_force_enable = true;
return 0;
}
if (PVR_VER(mfspr(SPRN_PVR)) == PVR_POWER9) {
/* Turn DAWR off by default, but allow admin to turn it on */
dawr_force_enable = false;
debugfs_create_file_unsafe("dawr_enable_dangerous", 0600,
powerpc_debugfs_root,
&dawr_force_enable,
&dawr_enable_fops);
}
return 0;
}
arch_initcall(dawr_force_setup);
This diff is collapsed.
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
#include <asm/cpu_has_feature.h> #include <asm/cpu_has_feature.h>
#include <asm/asm-prototypes.h> #include <asm/asm-prototypes.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/hw_breakpoint.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
...@@ -784,7 +785,7 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk) ...@@ -784,7 +785,7 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk)
return __set_dabr(dabr, dabrx); return __set_dabr(dabr, dabrx);
} }
static inline int set_dawr(struct arch_hw_breakpoint *brk) int set_dawr(struct arch_hw_breakpoint *brk)
{ {
unsigned long dawr, dawrx, mrd; unsigned long dawr, dawrx, mrd;
...@@ -816,7 +817,7 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk) ...@@ -816,7 +817,7 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk)
{ {
memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk)); memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
if (cpu_has_feature(CPU_FTR_DAWR)) if (dawr_enabled())
// Power8 or later // Power8 or later
set_dawr(brk); set_dawr(brk);
else if (!cpu_has_feature(CPU_FTR_ARCH_207S)) else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
...@@ -830,8 +831,8 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk) ...@@ -830,8 +831,8 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk)
/* Check if we have DAWR or DABR hardware */ /* Check if we have DAWR or DABR hardware */
bool ppc_breakpoint_available(void) bool ppc_breakpoint_available(void)
{ {
if (cpu_has_feature(CPU_FTR_DAWR)) if (dawr_enabled())
return true; /* POWER8 DAWR */ return true; /* POWER8 DAWR or POWER9 forced DAWR */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) if (cpu_has_feature(CPU_FTR_ARCH_207S))
return false; /* POWER9 with DAWR disabled */ return false; /* POWER9 with DAWR disabled */
/* DABR: Everything but POWER8 and POWER9 */ /* DABR: Everything but POWER8 and POWER9 */
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <asm/tm.h> #include <asm/tm.h>
#include <asm/asm-prototypes.h> #include <asm/asm-prototypes.h>
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/hw_breakpoint.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h> #include <trace/events/syscalls.h>
...@@ -3088,7 +3089,7 @@ long arch_ptrace(struct task_struct *child, long request, ...@@ -3088,7 +3089,7 @@ long arch_ptrace(struct task_struct *child, long request,
dbginfo.sizeof_condition = 0; dbginfo.sizeof_condition = 0;
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE; dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
if (cpu_has_feature(CPU_FTR_DAWR)) if (dawr_enabled())
dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR; dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
#else #else
dbginfo.features = 0; dbginfo.features = 0;
......
...@@ -401,8 +401,8 @@ void __init check_for_initrd(void) ...@@ -401,8 +401,8 @@ void __init check_for_initrd(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int threads_per_core, threads_per_subcore, threads_shift; int threads_per_core, threads_per_subcore, threads_shift __read_mostly;
cpumask_t threads_core_mask; cpumask_t threads_core_mask __read_mostly;
EXPORT_SYMBOL_GPL(threads_per_core); EXPORT_SYMBOL_GPL(threads_per_core);
EXPORT_SYMBOL_GPL(threads_per_subcore); EXPORT_SYMBOL_GPL(threads_per_subcore);
EXPORT_SYMBOL_GPL(threads_shift); EXPORT_SYMBOL_GPL(threads_shift);
......
...@@ -74,6 +74,7 @@ ...@@ -74,6 +74,7 @@
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/xive.h> #include <asm/xive.h>
#include <asm/hw_breakpoint.h>
#include "book3s.h" #include "book3s.h"
...@@ -3374,7 +3375,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3374,7 +3375,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_PURR, vcpu->arch.purr); mtspr(SPRN_PURR, vcpu->arch.purr);
mtspr(SPRN_SPURR, vcpu->arch.spurr); mtspr(SPRN_SPURR, vcpu->arch.spurr);
if (cpu_has_feature(CPU_FTR_DAWR)) { if (dawr_enabled()) {
mtspr(SPRN_DAWR, vcpu->arch.dawr); mtspr(SPRN_DAWR, vcpu->arch.dawr);
mtspr(SPRN_DAWRX, vcpu->arch.dawrx); mtspr(SPRN_DAWRX, vcpu->arch.dawrx);
} }
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/asm-compat.h> #include <asm/asm-compat.h>
#include <asm/feature-fixups.h> #include <asm/feature-fixups.h>
#include <asm/cpuidle.h>
/* Sign-extend HDEC if not on POWER9 */ /* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg) \ #define EXTEND_HDEC(reg) \
...@@ -45,6 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) ...@@ -45,6 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* Values in HSTATE_NAPPING(r13) */ /* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1 #define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2 #define NAPPING_NOVCPU 2
#define NAPPING_UNSPLIT 3
/* Stack frame offsets for kvmppc_hv_entry */ /* Stack frame offsets for kvmppc_hv_entry */
#define SFS 208 #define SFS 208
...@@ -290,17 +292,19 @@ kvm_novcpu_exit: ...@@ -290,17 +292,19 @@ kvm_novcpu_exit:
b kvmhv_switch_to_host b kvmhv_switch_to_host
/* /*
* We come in here when wakened from nap mode. * We come in here when wakened from Linux offline idle code.
* Relocation is off and most register values are lost. * Relocation is off
* r13 points to the PACA.
* r3 contains the SRR1 wakeup value, SRR1 is trashed. * r3 contains the SRR1 wakeup value, SRR1 is trashed.
*/ */
.globl kvm_start_guest _GLOBAL(idle_kvm_start_guest)
kvm_start_guest: ld r4,PACAEMERGSP(r13)
/* Set runlatch bit the minute you wake up from nap */ mfcr r5
mfspr r0, SPRN_CTRLF mflr r0
ori r0, r0, 1 std r1,0(r4)
mtspr SPRN_CTRLT, r0 std r5,8(r4)
std r0,16(r4)
subi r1,r4,STACK_FRAME_OVERHEAD
SAVE_NVGPRS(r1)
/* /*
* Could avoid this and pass it through in r3. For now, * Could avoid this and pass it through in r3. For now,
...@@ -308,27 +312,23 @@ kvm_start_guest: ...@@ -308,27 +312,23 @@ kvm_start_guest:
*/ */
mtspr SPRN_SRR1,r3 mtspr SPRN_SRR1,r3
ld r2,PACATOC(r13)
li r0,0 li r0,0
stb r0,PACA_FTRACE_ENABLED(r13) stb r0,PACA_FTRACE_ENABLED(r13)
li r0,KVM_HWTHREAD_IN_KVM li r0,KVM_HWTHREAD_IN_KVM
stb r0,HSTATE_HWTHREAD_STATE(r13) stb r0,HSTATE_HWTHREAD_STATE(r13)
/* NV GPR values from power7_idle() will no longer be valid */ /* kvm cede / napping does not come through here */
li r0,1
stb r0,PACA_NAPSTATELOST(r13)
/* were we napping due to cede? */
lbz r0,HSTATE_NAPPING(r13) lbz r0,HSTATE_NAPPING(r13)
cmpwi r0,NAPPING_CEDE twnei r0,0
beq kvm_end_cede
cmpwi r0,NAPPING_NOVCPU b 1f
beq kvm_novcpu_wakeup
kvm_unsplit_wakeup:
li r0, 0
stb r0, HSTATE_NAPPING(r13)
ld r1,PACAEMERGSP(r13) 1:
subi r1,r1,STACK_FRAME_OVERHEAD
/* /*
* We weren't napping due to cede, so this must be a secondary * We weren't napping due to cede, so this must be a secondary
...@@ -437,19 +437,25 @@ kvm_no_guest: ...@@ -437,19 +437,25 @@ kvm_no_guest:
lbz r3, HSTATE_HWTHREAD_REQ(r13) lbz r3, HSTATE_HWTHREAD_REQ(r13)
cmpwi r3, 0 cmpwi r3, 0
bne 54f bne 54f
/*
* We jump to pnv_wakeup_loss, which will return to the caller /*
* of power7_nap in the powernv cpu offline loop. The value we * Jump to idle_return_gpr_loss, which returns to the
* put in r3 becomes the return value for power7_nap. pnv_wakeup_loss * idle_kvm_start_guest caller.
* requires SRR1 in r12.
*/ */
li r3, LPCR_PECE0 li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4 mtspr SPRN_LPCR, r4
li r3, 0 /* set up r3 for return */
mfspr r12,SPRN_SRR1 mfspr r3,SPRN_SRR1
b pnv_wakeup_loss REST_NVGPRS(r1)
addi r1, r1, STACK_FRAME_OVERHEAD
ld r0, 16(r1)
ld r5, 8(r1)
ld r1, 0(r1)
mtlr r0
mtcr r5
blr
53: HMT_LOW 53: HMT_LOW
ld r5, HSTATE_KVM_VCORE(r13) ld r5, HSTATE_KVM_VCORE(r13)
...@@ -534,6 +540,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -534,6 +540,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
lbz r0, KVM_SPLIT_DO_NAP(r3) lbz r0, KVM_SPLIT_DO_NAP(r3)
cmpwi r0, 0 cmpwi r0, 0
beq 57f beq 57f
li r3, NAPPING_UNSPLIT
stb r3, HSTATE_NAPPING(r13)
li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
mfspr r5, SPRN_LPCR mfspr r5, SPRN_LPCR
rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
...@@ -822,18 +830,21 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -822,18 +830,21 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_IAMR, r5 mtspr SPRN_IAMR, r5
mtspr SPRN_PSPB, r6 mtspr SPRN_PSPB, r6
mtspr SPRN_FSCR, r7 mtspr SPRN_FSCR, r7
ld r5, VCPU_DAWR(r4)
ld r6, VCPU_DAWRX(r4)
ld r7, VCPU_CIABR(r4)
ld r8, VCPU_TAR(r4)
/* /*
* Handle broken DAWR case by not writing it. This means we * Handle broken DAWR case by not writing it. This means we
* can still store the DAWR register for migration. * can still store the DAWR register for migration.
*/ */
BEGIN_FTR_SECTION LOAD_REG_ADDR(r5, dawr_force_enable)
lbz r5, 0(r5)
cmpdi r5, 0
beq 1f
ld r5, VCPU_DAWR(r4)
ld r6, VCPU_DAWRX(r4)
mtspr SPRN_DAWR, r5 mtspr SPRN_DAWR, r5
mtspr SPRN_DAWRX, r6 mtspr SPRN_DAWRX, r6
END_FTR_SECTION_IFSET(CPU_FTR_DAWR) 1:
ld r7, VCPU_CIABR(r4)
ld r8, VCPU_TAR(r4)
mtspr SPRN_CIABR, r7 mtspr SPRN_CIABR, r7
mtspr SPRN_TAR, r8 mtspr SPRN_TAR, r8
ld r5, VCPU_IC(r4) ld r5, VCPU_IC(r4)
...@@ -2513,11 +2524,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2513,11 +2524,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
blr blr
2: 2:
BEGIN_FTR_SECTION LOAD_REG_ADDR(r11, dawr_force_enable)
/* POWER9 with disabled DAWR */ lbz r11, 0(r11)
cmpdi r11, 0
li r3, H_HARDWARE li r3, H_HARDWARE
blr beqlr
END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
rlwimi r5, r4, 2, DAWRX_WT rlwimi r5, r4, 2, DAWRX_WT
...@@ -2654,6 +2665,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ...@@ -2654,6 +2665,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
/* Go back to host stack */
ld r1, HSTATE_HOST_R1(r13)
/* /*
* Take a nap until a decrementer or external or doobell interrupt * Take a nap until a decrementer or external or doobell interrupt
* occurs, with PECE1 and PECE0 set in LPCR. * occurs, with PECE1 and PECE0 set in LPCR.
...@@ -2682,26 +2696,42 @@ BEGIN_FTR_SECTION ...@@ -2682,26 +2696,42 @@ BEGIN_FTR_SECTION
* requested level = 0 (just stop dispatching) * requested level = 0 (just stop dispatching)
*/ */
lis r3, (PSSCR_EC | PSSCR_ESL)@h lis r3, (PSSCR_EC | PSSCR_ESL)@h
mtspr SPRN_PSSCR, r3
/* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
li r4, LPCR_PECE_HVEE@higher li r4, LPCR_PECE_HVEE@higher
sldi r4, r4, 32 sldi r4, r4, 32
or r5, r5, r4 or r5, r5, r4
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) FTR_SECTION_ELSE
li r3, PNV_THREAD_NAP
ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_LPCR,r5 mtspr SPRN_LPCR,r5
isync isync
li r0, 0
std r0, HSTATE_SCRATCH0(r13)
ptesync
ld r0, HSTATE_SCRATCH0(r13)
1: cmpd r0, r0
bne 1b
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
nap bl isa300_idle_stop_mayloss
FTR_SECTION_ELSE FTR_SECTION_ELSE
PPC_STOP bl isa206_idle_insn_mayloss
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
b .
mfspr r0, SPRN_CTRLF
ori r0, r0, 1
mtspr SPRN_CTRLT, r0
mtspr SPRN_SRR1, r3
li r0, 0
stb r0, PACA_FTRACE_ENABLED(r13)
li r0, KVM_HWTHREAD_IN_KVM
stb r0, HSTATE_HWTHREAD_STATE(r13)
lbz r0, HSTATE_NAPPING(r13)
cmpwi r0, NAPPING_CEDE
beq kvm_end_cede
cmpwi r0, NAPPING_NOVCPU
beq kvm_novcpu_wakeup
cmpwi r0, NAPPING_UNSPLIT
beq kvm_unsplit_wakeup
twi 31,0,0 /* Nap state must not be zero */
33: mr r4, r3 33: mr r4, r3
li r3, 0 li r3, 0
...@@ -2709,12 +2739,11 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) ...@@ -2709,12 +2739,11 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
b 34f b 34f
kvm_end_cede: kvm_end_cede:
/* Woken by external or decrementer interrupt */
/* get vcpu pointer */ /* get vcpu pointer */
ld r4, HSTATE_KVM_VCPU(r13) ld r4, HSTATE_KVM_VCPU(r13)
/* Woken by external or decrementer interrupt */
ld r1, HSTATE_HOST_R1(r13)
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
addi r3, r4, VCPU_TB_RMINTR addi r3, r4, VCPU_TB_RMINTR
bl kvmhv_accumulate_time bl kvmhv_accumulate_time
......
This diff is collapsed.
...@@ -262,6 +262,9 @@ OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO); ...@@ -262,6 +262,9 @@ OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO);
OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO); OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO);
OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC); OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC);
OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP); OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP);
OPAL_CALL(opal_xive_get_queue_state, OPAL_XIVE_GET_QUEUE_STATE);
OPAL_CALL(opal_xive_set_queue_state, OPAL_XIVE_SET_QUEUE_STATE);
OPAL_CALL(opal_xive_get_vp_state, OPAL_XIVE_GET_VP_STATE);
OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET); OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET);
OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT); OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT);
OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT); OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT);
......
...@@ -183,7 +183,7 @@ static void unsplit_core(void) ...@@ -183,7 +183,7 @@ static void unsplit_core(void)
cpu = smp_processor_id(); cpu = smp_processor_id();
if (cpu_thread_in_core(cpu) != 0) { if (cpu_thread_in_core(cpu) != 0) {
while (mfspr(SPRN_HID0) & mask) while (mfspr(SPRN_HID0) & mask)
power7_idle_insn(PNV_THREAD_NAP); power7_idle_type(PNV_THREAD_NAP);
per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT; per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT;
return; return;
......
...@@ -437,6 +437,12 @@ void xive_native_sync_source(u32 hw_irq) ...@@ -437,6 +437,12 @@ void xive_native_sync_source(u32 hw_irq)
} }
EXPORT_SYMBOL_GPL(xive_native_sync_source); EXPORT_SYMBOL_GPL(xive_native_sync_source);
void xive_native_sync_queue(u32 hw_irq)
{
opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
}
EXPORT_SYMBOL_GPL(xive_native_sync_queue);
static const struct xive_ops xive_native_ops = { static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data, .populate_irq_data = xive_native_populate_irq_data,
.configure_irq = xive_native_configure_irq, .configure_irq = xive_native_configure_irq,
...@@ -711,3 +717,96 @@ bool xive_native_has_single_escalation(void) ...@@ -711,3 +717,96 @@ bool xive_native_has_single_escalation(void)
return xive_has_single_esc; return xive_has_single_esc;
} }
EXPORT_SYMBOL_GPL(xive_native_has_single_escalation); EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
int xive_native_get_queue_info(u32 vp_id, u32 prio,
u64 *out_qpage,
u64 *out_qsize,
u64 *out_qeoi_page,
u32 *out_escalate_irq,
u64 *out_qflags)
{
__be64 qpage;
__be64 qsize;
__be64 qeoi_page;
__be32 escalate_irq;
__be64 qflags;
s64 rc;
rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
&qeoi_page, &escalate_irq, &qflags);
if (rc) {
pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
vp_id, prio, rc);
return -EIO;
}
if (out_qpage)
*out_qpage = be64_to_cpu(qpage);
if (out_qsize)
*out_qsize = be32_to_cpu(qsize);
if (out_qeoi_page)
*out_qeoi_page = be64_to_cpu(qeoi_page);
if (out_escalate_irq)
*out_escalate_irq = be32_to_cpu(escalate_irq);
if (out_qflags)
*out_qflags = be64_to_cpu(qflags);
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
{
__be32 opal_qtoggle;
__be32 opal_qindex;
s64 rc;
rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
&opal_qindex);
if (rc) {
pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
vp_id, prio, rc);
return -EIO;
}
if (qtoggle)
*qtoggle = be32_to_cpu(opal_qtoggle);
if (qindex)
*qindex = be32_to_cpu(opal_qindex);
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
{
s64 rc;
rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
if (rc) {
pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
vp_id, prio, rc);
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
{
__be64 state;
s64 rc;
rc = opal_xive_get_vp_state(vp_id, &state);
if (rc) {
pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
vp_id, rc);
return -EIO;
}
if (out_state)
*out_state = be64_to_cpu(state);
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
...@@ -2431,7 +2431,6 @@ static void dump_one_paca(int cpu) ...@@ -2431,7 +2431,6 @@ static void dump_one_paca(int cpu)
DUMP(p, irq_happened, "%#-*x"); DUMP(p, irq_happened, "%#-*x");
DUMP(p, io_sync, "%#-*x"); DUMP(p, io_sync, "%#-*x");
DUMP(p, irq_work_pending, "%#-*x"); DUMP(p, irq_work_pending, "%#-*x");
DUMP(p, nap_state_lost, "%#-*x");
DUMP(p, sprg_vdso, "%#-*llx"); DUMP(p, sprg_vdso, "%#-*llx");
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
...@@ -2439,20 +2438,17 @@ static void dump_one_paca(int cpu) ...@@ -2439,20 +2438,17 @@ static void dump_one_paca(int cpu)
#endif #endif
#ifdef CONFIG_PPC_POWERNV #ifdef CONFIG_PPC_POWERNV
DUMP(p, core_idle_state_ptr, "%-*px"); DUMP(p, idle_state, "%#-*lx");
if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
DUMP(p, thread_idle_state, "%#-*x"); DUMP(p, thread_idle_state, "%#-*x");
DUMP(p, thread_mask, "%#-*x");
DUMP(p, subcore_sibling_mask, "%#-*x"); DUMP(p, subcore_sibling_mask, "%#-*x");
} else {
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
DUMP(p, requested_psscr, "%#-*llx"); DUMP(p, requested_psscr, "%#-*llx");
DUMP(p, stop_sprs.pid, "%#-*llx");
DUMP(p, stop_sprs.ldbar, "%#-*llx");
DUMP(p, stop_sprs.fscr, "%#-*llx");
DUMP(p, stop_sprs.hfscr, "%#-*llx");
DUMP(p, stop_sprs.mmcr1, "%#-*llx");
DUMP(p, stop_sprs.mmcr2, "%#-*llx");
DUMP(p, stop_sprs.mmcra, "%#-*llx");
DUMP(p, dont_stop.counter, "%#-*x"); DUMP(p, dont_stop.counter, "%#-*x");
#endif #endif
}
#endif
DUMP(p, accounting.utime, "%#-*lx"); DUMP(p, accounting.utime, "%#-*lx");
DUMP(p, accounting.stime, "%#-*lx"); DUMP(p, accounting.stime, "%#-*lx");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment