Commit 460ba21d authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Permit SRR1 flags in more injected interrupt types

The prefix architecture in ISA v3.1 introduces a prefixed bit in SRR1
for many types of synchronous interrupts which is set when the interrupt
is caused by a prefixed instruction.

This requires KVM to be able to set this bit when injecting interrupts
into a guest. Plumb through the SRR1 "flags" argument to the core_queue
APIs where it's missing for this. For now they are set to 0, which is
no change.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
[mpe: Fixup kvmppc_core_queue_alignment() in booke.c]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230330103224.3589928-2-npiggin@gmail.com
parent 43d05c61
...@@ -126,25 +126,34 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); ...@@ -126,25 +126,34 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu,
ulong srr1_flags);
extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu,
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu); ulong srr1_flags);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu,
extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu); ulong srr1_flags);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu,
ulong srr1_flags);
extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu,
ulong srr1_flags);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq); struct kvm_interrupt *irq);
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags, extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
ulong dear_flags,
ulong esr_flags); ulong esr_flags);
extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
ulong dear_flags, ulong srr1_flags,
ulong esr_flags); ulong dar,
ulong dsisr);
extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
ulong esr_flags); ulong srr1_flags);
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
......
...@@ -188,10 +188,10 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) ...@@ -188,10 +188,10 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
} }
EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags) void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, srr1_flags);
} }
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check); EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
...@@ -201,29 +201,29 @@ void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu) ...@@ -201,29 +201,29 @@ void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL(kvmppc_core_queue_syscall); EXPORT_SYMBOL(kvmppc_core_queue_syscall);
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, srr1_flags);
} }
EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, srr1_flags);
} }
void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, srr1_flags);
} }
void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu) void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, srr1_flags);
} }
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
...@@ -278,18 +278,18 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) ...@@ -278,18 +278,18 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
} }
void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
ulong flags) ulong dar, ulong dsisr)
{ {
kvmppc_set_dar(vcpu, dar); kvmppc_set_dar(vcpu, dar);
kvmppc_set_dsisr(vcpu, flags); kvmppc_set_dsisr(vcpu, dsisr);
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, srr1_flags);
} }
EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags); kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, srr1_flags);
} }
EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage); EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
......
...@@ -954,7 +954,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ...@@ -954,7 +954,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
if (dsisr & DSISR_BADACCESS) { if (dsisr & DSISR_BADACCESS) {
/* Reflect to the guest as DSI */ /* Reflect to the guest as DSI */
pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
kvmppc_core_queue_data_storage(vcpu, ea, dsisr); kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr);
return RESUME_GUEST; return RESUME_GUEST;
} }
...@@ -979,7 +979,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ...@@ -979,7 +979,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
* Bad address in guest page table tree, or other * Bad address in guest page table tree, or other
* unusual error - reflect it to the guest as DSI. * unusual error - reflect it to the guest as DSI.
*/ */
kvmppc_core_queue_data_storage(vcpu, ea, dsisr); kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr);
return RESUME_GUEST; return RESUME_GUEST;
} }
return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
...@@ -988,8 +988,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ...@@ -988,8 +988,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
if (memslot->flags & KVM_MEM_READONLY) { if (memslot->flags & KVM_MEM_READONLY) {
if (writing) { if (writing) {
/* give the guest a DSI */ /* give the guest a DSI */
kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE | kvmppc_core_queue_data_storage(vcpu, 0, ea,
DSISR_PROTFAULT); DSISR_ISSTORE | DSISR_PROTFAULT);
return RESUME_GUEST; return RESUME_GUEST;
} }
kvm_ro = true; kvm_ro = true;
......
...@@ -1739,7 +1739,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1739,7 +1739,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
} }
if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
kvmppc_core_queue_data_storage(vcpu, kvmppc_core_queue_data_storage(vcpu, 0,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -1757,7 +1757,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1757,7 +1757,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
} else if (err == -1 || err == -2) { } else if (err == -1 || err == -2) {
r = RESUME_PAGE_FAULT; r = RESUME_PAGE_FAULT;
} else { } else {
kvmppc_core_queue_data_storage(vcpu, kvmppc_core_queue_data_storage(vcpu, 0,
vcpu->arch.fault_dar, err); vcpu->arch.fault_dar, err);
r = RESUME_GUEST; r = RESUME_GUEST;
} }
......
...@@ -1560,7 +1560,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, ...@@ -1560,7 +1560,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) { if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
/* unusual error -> reflect to the guest as a DSI */ /* unusual error -> reflect to the guest as a DSI */
kvmppc_core_queue_data_storage(vcpu, ea, dsisr); kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr);
return RESUME_GUEST; return RESUME_GUEST;
} }
...@@ -1570,7 +1570,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, ...@@ -1570,7 +1570,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
if (memslot->flags & KVM_MEM_READONLY) { if (memslot->flags & KVM_MEM_READONLY) {
if (writing) { if (writing) {
/* Give the guest a DSI */ /* Give the guest a DSI */
kvmppc_core_queue_data_storage(vcpu, ea, kvmppc_core_queue_data_storage(vcpu, 0, ea,
DSISR_ISSTORE | DSISR_PROTFAULT); DSISR_ISSTORE | DSISR_PROTFAULT);
return RESUME_GUEST; return RESUME_GUEST;
} }
......
...@@ -759,7 +759,7 @@ static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, ...@@ -759,7 +759,7 @@ static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
flags = DSISR_NOHPTE; flags = DSISR_NOHPTE;
if (data) { if (data) {
flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
kvmppc_core_queue_data_storage(vcpu, eaddr, flags); kvmppc_core_queue_data_storage(vcpu, 0, eaddr, flags);
} else { } else {
kvmppc_core_queue_inst_storage(vcpu, flags); kvmppc_core_queue_inst_storage(vcpu, flags);
} }
...@@ -1236,7 +1236,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1236,7 +1236,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
} else { } else {
kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); kvmppc_core_queue_data_storage(vcpu, 0, dar, fault_dsisr);
r = RESUME_GUEST; r = RESUME_GUEST;
} }
break; break;
......
...@@ -283,9 +283,10 @@ void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ...@@ -283,9 +283,10 @@ void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
} }
void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
ulong dear_flags, ulong esr_flags) ulong dear_flags, ulong esr_flags)
{ {
WARN_ON_ONCE(srr1_flags);
vcpu->arch.queued_dear = dear_flags; vcpu->arch.queued_dear = dear_flags;
vcpu->arch.queued_esr = esr_flags; vcpu->arch.queued_esr = esr_flags;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
...@@ -316,14 +317,16 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) ...@@ -316,14 +317,16 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
} }
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
WARN_ON_ONCE(srr1_flags);
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
} }
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
{ {
WARN_ON_ONCE(srr1_flags);
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
} }
#endif #endif
...@@ -1225,7 +1228,7 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1225,7 +1228,7 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
#endif #endif
case BOOKE_INTERRUPT_DATA_STORAGE: case BOOKE_INTERRUPT_DATA_STORAGE:
kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, kvmppc_core_queue_data_storage(vcpu, 0, vcpu->arch.fault_dear,
vcpu->arch.fault_esr); vcpu->arch.fault_esr);
kvmppc_account_exit(vcpu, DSI_EXITS); kvmppc_account_exit(vcpu, DSI_EXITS);
r = RESUME_GUEST; r = RESUME_GUEST;
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
{ {
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
kvmppc_core_queue_fpunavail(vcpu); kvmppc_core_queue_fpunavail(vcpu, 0);
return true; return true;
} }
...@@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) ...@@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
{ {
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
kvmppc_core_queue_vsx_unavail(vcpu); kvmppc_core_queue_vsx_unavail(vcpu, 0);
return true; return true;
} }
...@@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) ...@@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
{ {
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
kvmppc_core_queue_vec_unavail(vcpu); kvmppc_core_queue_vec_unavail(vcpu, 0);
return true; return true;
} }
......
...@@ -321,7 +321,8 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) ...@@ -321,7 +321,8 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
if (vcpu->mmio_is_write) if (vcpu->mmio_is_write)
dsisr |= DSISR_ISSTORE; dsisr |= DSISR_ISSTORE;
kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr); kvmppc_core_queue_data_storage(vcpu, 0,
vcpu->arch.vaddr_accessed, dsisr);
} else { } else {
/* /*
* BookE does not send a SIGBUS on a bad * BookE does not send a SIGBUS on a bad
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment