Commit a878957a authored by Paul Mackerras's avatar Paul Mackerras

Merge remote-tracking branch 'remotes/powerpc/topic/ppc-kvm' into kvm-ppc-next

This merges in the ppc-kvm topic branch from the powerpc tree to get
patches which touch both general powerpc code and KVM code, one of
which is a prerequisite for following patches.
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parents 44b198ae c1fe190c
...@@ -56,3 +56,35 @@ POWER9. Loads and stores to the watchpoint locations will not be ...@@ -56,3 +56,35 @@ POWER9. Loads and stores to the watchpoint locations will not be
trapped in GDB. The watchpoint is remembered, so if the guest is trapped in GDB. The watchpoint is remembered, so if the guest is
migrated back to the POWER8 host, it will start working again. migrated back to the POWER8 host, it will start working again.
Force enabling the DAWR
=============================
Kernels (since ~v5.2) have an option to force enable the DAWR via:
echo Y > /sys/kernel/debug/powerpc/dawr_enable_dangerous
This enables the DAWR even on POWER9.
This is a dangerous setting, USE AT YOUR OWN RISK.
Some users may not care about a bad user crashing their box
(ie. single user/desktop systems) and really want the DAWR. This
allows them to force enable DAWR.
This flag can also be used to disable DAWR access. Once this is
cleared, all DAWR access should be cleared immediately and your
machine once again safe from crashing.
Userspace may get confused by toggling this. If DAWR is force
enabled/disabled between getting the number of breakpoints (via
PTRACE_GETHWDBGINFO) and setting the breakpoint, userspace will get an
inconsistent view of what's available. Similarly for guests.
For the DAWR to be enabled in a KVM guest, the DAWR needs to be force
enabled in the host AND the guest. For this reason, this won't work on
POWERVM as it doesn't allow the HCALL to work. Writes of 'Y' to the
dawr_enable_dangerous file will fail if the hypervisor doesn't support
writing the DAWR.
To double check the DAWR is working, run this kernel selftest:
tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
Any errors/failures/skips mean something is wrong.
...@@ -90,10 +90,18 @@ static inline void hw_breakpoint_disable(void) ...@@ -90,10 +90,18 @@ static inline void hw_breakpoint_disable(void)
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
int hw_breakpoint_handler(struct die_args *args); int hw_breakpoint_handler(struct die_args *args);
extern int set_dawr(struct arch_hw_breakpoint *brk);
extern bool dawr_force_enable;
static inline bool dawr_enabled(void)
{
return dawr_force_enable;
}
#else /* CONFIG_HAVE_HW_BREAKPOINT */ #else /* CONFIG_HAVE_HW_BREAKPOINT */
static inline void hw_breakpoint_disable(void) { } static inline void hw_breakpoint_disable(void) { }
static inline void thread_change_pc(struct task_struct *tsk, static inline void thread_change_pc(struct task_struct *tsk,
struct pt_regs *regs) { } struct pt_regs *regs) { }
static inline bool dawr_enabled(void) { return false; }
#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */ #endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */
...@@ -186,8 +186,8 @@ ...@@ -186,8 +186,8 @@
#define OPAL_XIVE_FREE_IRQ 140 #define OPAL_XIVE_FREE_IRQ 140
#define OPAL_XIVE_SYNC 141 #define OPAL_XIVE_SYNC 141
#define OPAL_XIVE_DUMP 142 #define OPAL_XIVE_DUMP 142
#define OPAL_XIVE_RESERVED3 143 #define OPAL_XIVE_GET_QUEUE_STATE 143
#define OPAL_XIVE_RESERVED4 144 #define OPAL_XIVE_SET_QUEUE_STATE 144
#define OPAL_SIGNAL_SYSTEM_RESET 145 #define OPAL_SIGNAL_SYSTEM_RESET 145
#define OPAL_NPU_INIT_CONTEXT 146 #define OPAL_NPU_INIT_CONTEXT 146
#define OPAL_NPU_DESTROY_CONTEXT 147 #define OPAL_NPU_DESTROY_CONTEXT 147
...@@ -210,7 +210,8 @@ ...@@ -210,7 +210,8 @@
#define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164 #define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164
#define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165 #define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165
#define OPAL_NX_COPROC_INIT 167 #define OPAL_NX_COPROC_INIT 167
#define OPAL_LAST 167 #define OPAL_XIVE_GET_VP_STATE 170
#define OPAL_LAST 170
#define QUIESCE_HOLD 1 /* Spin all calls at entry */ #define QUIESCE_HOLD 1 /* Spin all calls at entry */
#define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */ #define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */
......
...@@ -279,6 +279,13 @@ int64_t opal_xive_allocate_irq(uint32_t chip_id); ...@@ -279,6 +279,13 @@ int64_t opal_xive_allocate_irq(uint32_t chip_id);
int64_t opal_xive_free_irq(uint32_t girq); int64_t opal_xive_free_irq(uint32_t girq);
int64_t opal_xive_sync(uint32_t type, uint32_t id); int64_t opal_xive_sync(uint32_t type, uint32_t id);
int64_t opal_xive_dump(uint32_t type, uint32_t id); int64_t opal_xive_dump(uint32_t type, uint32_t id);
int64_t opal_xive_get_queue_state(uint64_t vp, uint32_t prio,
__be32 *out_qtoggle,
__be32 *out_qindex);
int64_t opal_xive_set_queue_state(uint64_t vp, uint32_t prio,
uint32_t qtoggle,
uint32_t qindex);
int64_t opal_xive_get_vp_state(uint64_t vp, __be64 *out_w01);
int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target, int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target,
uint64_t desc, uint16_t pe_number); uint64_t desc, uint16_t pe_number);
......
...@@ -109,12 +109,26 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, ...@@ -109,12 +109,26 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
extern void xive_native_sync_source(u32 hw_irq); extern void xive_native_sync_source(u32 hw_irq);
extern void xive_native_sync_queue(u32 hw_irq);
extern bool is_xive_irq(struct irq_chip *chip); extern bool is_xive_irq(struct irq_chip *chip);
extern int xive_native_enable_vp(u32 vp_id, bool single_escalation); extern int xive_native_enable_vp(u32 vp_id, bool single_escalation);
extern int xive_native_disable_vp(u32 vp_id); extern int xive_native_disable_vp(u32 vp_id);
extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id); extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
extern bool xive_native_has_single_escalation(void); extern bool xive_native_has_single_escalation(void);
extern int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
u64 *out_qpage,
u64 *out_qsize,
u64 *out_qeoi_page,
u32 *out_escalate_irq,
u64 *out_qflags);
extern int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
u32 *qindex);
extern int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
u32 qindex);
extern int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
#else #else
static inline bool xive_enabled(void) { return false; } static inline bool xive_enabled(void) { return false; }
......
...@@ -29,11 +29,15 @@ ...@@ -29,11 +29,15 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/debugfs.h>
#include <linux/init.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sstep.h> #include <asm/sstep.h>
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/debugfs.h>
#include <asm/hvcall.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
/* /*
...@@ -174,7 +178,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, ...@@ -174,7 +178,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
if (!ppc_breakpoint_available()) if (!ppc_breakpoint_available())
return -ENODEV; return -ENODEV;
length_max = 8; /* DABR */ length_max = 8; /* DABR */
if (cpu_has_feature(CPU_FTR_DAWR)) { if (dawr_enabled()) {
length_max = 512 ; /* 64 doublewords */ length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */ /* DAWR region can't cross 512 boundary */
if ((attr->bp_addr >> 9) != if ((attr->bp_addr >> 9) !=
...@@ -376,3 +380,59 @@ void hw_breakpoint_pmu_read(struct perf_event *bp) ...@@ -376,3 +380,59 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
{ {
/* TODO */ /* TODO */
} }
bool dawr_force_enable;
EXPORT_SYMBOL_GPL(dawr_force_enable);
static ssize_t dawr_write_file_bool(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct arch_hw_breakpoint null_brk = {0, 0, 0};
size_t rc;
/* Send error to user if they hypervisor won't allow us to write DAWR */
if ((!dawr_force_enable) &&
(firmware_has_feature(FW_FEATURE_LPAR)) &&
(set_dawr(&null_brk) != H_SUCCESS))
return -1;
rc = debugfs_write_file_bool(file, user_buf, count, ppos);
if (rc)
return rc;
/* If we are clearing, make sure all CPUs have the DAWR cleared */
if (!dawr_force_enable)
smp_call_function((smp_call_func_t)set_dawr, &null_brk, 0);
return rc;
}
static const struct file_operations dawr_enable_fops = {
.read = debugfs_read_file_bool,
.write = dawr_write_file_bool,
.open = simple_open,
.llseek = default_llseek,
};
static int __init dawr_force_setup(void)
{
dawr_force_enable = false;
if (cpu_has_feature(CPU_FTR_DAWR)) {
/* Don't setup sysfs file for user control on P8 */
dawr_force_enable = true;
return 0;
}
if (PVR_VER(mfspr(SPRN_PVR)) == PVR_POWER9) {
/* Turn DAWR off by default, but allow admin to turn it on */
dawr_force_enable = false;
debugfs_create_file_unsafe("dawr_enable_dangerous", 0600,
powerpc_debugfs_root,
&dawr_force_enable,
&dawr_enable_fops);
}
return 0;
}
arch_initcall(dawr_force_setup);
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
#include <asm/cpu_has_feature.h> #include <asm/cpu_has_feature.h>
#include <asm/asm-prototypes.h> #include <asm/asm-prototypes.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/hw_breakpoint.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
...@@ -784,7 +785,7 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk) ...@@ -784,7 +785,7 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk)
return __set_dabr(dabr, dabrx); return __set_dabr(dabr, dabrx);
} }
static inline int set_dawr(struct arch_hw_breakpoint *brk) int set_dawr(struct arch_hw_breakpoint *brk)
{ {
unsigned long dawr, dawrx, mrd; unsigned long dawr, dawrx, mrd;
...@@ -816,7 +817,7 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk) ...@@ -816,7 +817,7 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk)
{ {
memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk)); memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
if (cpu_has_feature(CPU_FTR_DAWR)) if (dawr_enabled())
// Power8 or later // Power8 or later
set_dawr(brk); set_dawr(brk);
else if (!cpu_has_feature(CPU_FTR_ARCH_207S)) else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
...@@ -830,8 +831,8 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk) ...@@ -830,8 +831,8 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk)
/* Check if we have DAWR or DABR hardware */ /* Check if we have DAWR or DABR hardware */
bool ppc_breakpoint_available(void) bool ppc_breakpoint_available(void)
{ {
if (cpu_has_feature(CPU_FTR_DAWR)) if (dawr_enabled())
return true; /* POWER8 DAWR */ return true; /* POWER8 DAWR or POWER9 forced DAWR */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) if (cpu_has_feature(CPU_FTR_ARCH_207S))
return false; /* POWER9 with DAWR disabled */ return false; /* POWER9 with DAWR disabled */
/* DABR: Everything but POWER8 and POWER9 */ /* DABR: Everything but POWER8 and POWER9 */
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <asm/tm.h> #include <asm/tm.h>
#include <asm/asm-prototypes.h> #include <asm/asm-prototypes.h>
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/hw_breakpoint.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h> #include <trace/events/syscalls.h>
...@@ -3088,7 +3089,7 @@ long arch_ptrace(struct task_struct *child, long request, ...@@ -3088,7 +3089,7 @@ long arch_ptrace(struct task_struct *child, long request,
dbginfo.sizeof_condition = 0; dbginfo.sizeof_condition = 0;
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE; dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
if (cpu_has_feature(CPU_FTR_DAWR)) if (dawr_enabled())
dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR; dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
#else #else
dbginfo.features = 0; dbginfo.features = 0;
......
...@@ -74,6 +74,7 @@ ...@@ -74,6 +74,7 @@
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/xive.h> #include <asm/xive.h>
#include <asm/hw_breakpoint.h>
#include "book3s.h" #include "book3s.h"
...@@ -3410,7 +3411,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3410,7 +3411,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_PURR, vcpu->arch.purr); mtspr(SPRN_PURR, vcpu->arch.purr);
mtspr(SPRN_SPURR, vcpu->arch.spurr); mtspr(SPRN_SPURR, vcpu->arch.spurr);
if (cpu_has_feature(CPU_FTR_DAWR)) { if (dawr_enabled()) {
mtspr(SPRN_DAWR, vcpu->arch.dawr); mtspr(SPRN_DAWR, vcpu->arch.dawr);
mtspr(SPRN_DAWRX, vcpu->arch.dawrx); mtspr(SPRN_DAWRX, vcpu->arch.dawrx);
} }
......
...@@ -794,18 +794,21 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -794,18 +794,21 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_IAMR, r5 mtspr SPRN_IAMR, r5
mtspr SPRN_PSPB, r6 mtspr SPRN_PSPB, r6
mtspr SPRN_FSCR, r7 mtspr SPRN_FSCR, r7
ld r5, VCPU_DAWR(r4)
ld r6, VCPU_DAWRX(r4)
ld r7, VCPU_CIABR(r4)
ld r8, VCPU_TAR(r4)
/* /*
* Handle broken DAWR case by not writing it. This means we * Handle broken DAWR case by not writing it. This means we
* can still store the DAWR register for migration. * can still store the DAWR register for migration.
*/ */
BEGIN_FTR_SECTION LOAD_REG_ADDR(r5, dawr_force_enable)
lbz r5, 0(r5)
cmpdi r5, 0
beq 1f
ld r5, VCPU_DAWR(r4)
ld r6, VCPU_DAWRX(r4)
mtspr SPRN_DAWR, r5 mtspr SPRN_DAWR, r5
mtspr SPRN_DAWRX, r6 mtspr SPRN_DAWRX, r6
END_FTR_SECTION_IFSET(CPU_FTR_DAWR) 1:
ld r7, VCPU_CIABR(r4)
ld r8, VCPU_TAR(r4)
mtspr SPRN_CIABR, r7 mtspr SPRN_CIABR, r7
mtspr SPRN_TAR, r8 mtspr SPRN_TAR, r8
ld r5, VCPU_IC(r4) ld r5, VCPU_IC(r4)
...@@ -2499,11 +2502,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2499,11 +2502,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
blr blr
2: 2:
BEGIN_FTR_SECTION LOAD_REG_ADDR(r11, dawr_force_enable)
/* POWER9 with disabled DAWR */ lbz r11, 0(r11)
cmpdi r11, 0
li r3, H_HARDWARE li r3, H_HARDWARE
blr beqlr
END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
rlwimi r5, r4, 2, DAWRX_WT rlwimi r5, r4, 2, DAWRX_WT
......
...@@ -260,6 +260,9 @@ OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO); ...@@ -260,6 +260,9 @@ OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO);
OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO); OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO);
OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC); OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC);
OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP); OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP);
OPAL_CALL(opal_xive_get_queue_state, OPAL_XIVE_GET_QUEUE_STATE);
OPAL_CALL(opal_xive_set_queue_state, OPAL_XIVE_SET_QUEUE_STATE);
OPAL_CALL(opal_xive_get_vp_state, OPAL_XIVE_GET_VP_STATE);
OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET); OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET);
OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT); OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT);
OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT); OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT);
......
...@@ -437,6 +437,12 @@ void xive_native_sync_source(u32 hw_irq) ...@@ -437,6 +437,12 @@ void xive_native_sync_source(u32 hw_irq)
} }
EXPORT_SYMBOL_GPL(xive_native_sync_source); EXPORT_SYMBOL_GPL(xive_native_sync_source);
void xive_native_sync_queue(u32 hw_irq)
{
opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
}
EXPORT_SYMBOL_GPL(xive_native_sync_queue);
static const struct xive_ops xive_native_ops = { static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data, .populate_irq_data = xive_native_populate_irq_data,
.configure_irq = xive_native_configure_irq, .configure_irq = xive_native_configure_irq,
...@@ -711,3 +717,96 @@ bool xive_native_has_single_escalation(void) ...@@ -711,3 +717,96 @@ bool xive_native_has_single_escalation(void)
return xive_has_single_esc; return xive_has_single_esc;
} }
EXPORT_SYMBOL_GPL(xive_native_has_single_escalation); EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
int xive_native_get_queue_info(u32 vp_id, u32 prio,
u64 *out_qpage,
u64 *out_qsize,
u64 *out_qeoi_page,
u32 *out_escalate_irq,
u64 *out_qflags)
{
__be64 qpage;
__be64 qsize;
__be64 qeoi_page;
__be32 escalate_irq;
__be64 qflags;
s64 rc;
rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
&qeoi_page, &escalate_irq, &qflags);
if (rc) {
pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
vp_id, prio, rc);
return -EIO;
}
if (out_qpage)
*out_qpage = be64_to_cpu(qpage);
if (out_qsize)
*out_qsize = be32_to_cpu(qsize);
if (out_qeoi_page)
*out_qeoi_page = be64_to_cpu(qeoi_page);
if (out_escalate_irq)
*out_escalate_irq = be32_to_cpu(escalate_irq);
if (out_qflags)
*out_qflags = be64_to_cpu(qflags);
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
{
__be32 opal_qtoggle;
__be32 opal_qindex;
s64 rc;
rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
&opal_qindex);
if (rc) {
pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
vp_id, prio, rc);
return -EIO;
}
if (qtoggle)
*qtoggle = be32_to_cpu(opal_qtoggle);
if (qindex)
*qindex = be32_to_cpu(opal_qindex);
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
{
s64 rc;
rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
if (rc) {
pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
vp_id, prio, rc);
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
{
__be64 state;
s64 rc;
rc = opal_xive_get_vp_state(vp_id, &state);
if (rc) {
pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
vp_id, rc);
return -EIO;
}
if (out_state)
*out_state = be64_to_cpu(state);
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment