Commit 6cd5c1db authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV: Set SRR1[PREFIX] bit on injected interrupts

Pass the hypervisor (H)SRR1[PREFIX] indication through to synchronous
interrupts injected into the guest.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230330103224.3589928-3-npiggin@gmail.com
parent 460ba21d
...@@ -954,7 +954,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ...@@ -954,7 +954,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
if (dsisr & DSISR_BADACCESS) { if (dsisr & DSISR_BADACCESS) {
/* Reflect to the guest as DSI */ /* Reflect to the guest as DSI */
pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
ea, dsisr);
return RESUME_GUEST; return RESUME_GUEST;
} }
...@@ -979,7 +981,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ...@@ -979,7 +981,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
* Bad address in guest page table tree, or other * Bad address in guest page table tree, or other
* unusual error - reflect it to the guest as DSI. * unusual error - reflect it to the guest as DSI.
*/ */
kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
ea, dsisr);
return RESUME_GUEST; return RESUME_GUEST;
} }
return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
...@@ -988,8 +992,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ...@@ -988,8 +992,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
if (memslot->flags & KVM_MEM_READONLY) { if (memslot->flags & KVM_MEM_READONLY) {
if (writing) { if (writing) {
/* give the guest a DSI */ /* give the guest a DSI */
kvmppc_core_queue_data_storage(vcpu, 0, ea, kvmppc_core_queue_data_storage(vcpu,
DSISR_ISSTORE | DSISR_PROTFAULT); kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
ea, DSISR_ISSTORE | DSISR_PROTFAULT);
return RESUME_GUEST; return RESUME_GUEST;
} }
kvm_ro = true; kvm_ro = true;
......
...@@ -1428,7 +1428,8 @@ static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) ...@@ -1428,7 +1428,8 @@ static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
return RESUME_HOST; return RESUME_HOST;
} else { } else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
return RESUME_GUEST; return RESUME_GUEST;
} }
} }
...@@ -1630,7 +1631,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1630,7 +1631,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* so that it knows that the machine check occurred. * so that it knows that the machine check occurred.
*/ */
if (!vcpu->kvm->arch.fwnmi_enabled) { if (!vcpu->kvm->arch.fwnmi_enabled) {
ulong flags = vcpu->arch.shregs.msr & 0x083c0000; ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_machine_check(vcpu, flags); kvmppc_core_queue_machine_check(vcpu, flags);
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -1659,7 +1661,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1659,7 +1661,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* as a result of a hypervisor emulation interrupt * as a result of a hypervisor emulation interrupt
* (e40) getting turned into a 700 by BML RTAS. * (e40) getting turned into a 700 by BML RTAS.
*/ */
flags = vcpu->arch.shregs.msr & 0x1f0000ull; flags = (vcpu->arch.shregs.msr & 0x1f0000ull) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_program(vcpu, flags); kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -1739,7 +1742,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1739,7 +1742,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
} }
if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
kvmppc_core_queue_data_storage(vcpu, 0, kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -1757,7 +1761,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1757,7 +1761,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
} else if (err == -1 || err == -2) { } else if (err == -1 || err == -2) {
r = RESUME_PAGE_FAULT; r = RESUME_PAGE_FAULT;
} else { } else {
kvmppc_core_queue_data_storage(vcpu, 0, kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
vcpu->arch.fault_dar, err); vcpu->arch.fault_dar, err);
r = RESUME_GUEST; r = RESUME_GUEST;
} }
...@@ -1785,7 +1790,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1785,7 +1790,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
kvmppc_core_queue_inst_storage(vcpu, kvmppc_core_queue_inst_storage(vcpu,
vcpu->arch.fault_dsisr); vcpu->arch.fault_dsisr |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
} }
...@@ -1802,7 +1808,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1802,7 +1808,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
} else if (err == -1) { } else if (err == -1) {
r = RESUME_PAGE_FAULT; r = RESUME_PAGE_FAULT;
} else { } else {
kvmppc_core_queue_inst_storage(vcpu, err); kvmppc_core_queue_inst_storage(vcpu,
err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST; r = RESUME_GUEST;
} }
break; break;
...@@ -1823,7 +1830,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1823,7 +1830,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
r = kvmppc_emulate_debug_inst(vcpu); r = kvmppc_emulate_debug_inst(vcpu);
} else { } else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST; r = RESUME_GUEST;
} }
break; break;
...@@ -1864,7 +1872,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1864,7 +1872,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
r = kvmppc_tm_unavailable(vcpu); r = kvmppc_tm_unavailable(vcpu);
} }
if (r == EMULATE_FAIL) { if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL); kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED));
r = RESUME_GUEST; r = RESUME_GUEST;
} }
break; break;
......
...@@ -1560,7 +1560,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, ...@@ -1560,7 +1560,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) { if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
/* unusual error -> reflect to the guest as a DSI */ /* unusual error -> reflect to the guest as a DSI */
kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
ea, dsisr);
return RESUME_GUEST; return RESUME_GUEST;
} }
...@@ -1570,8 +1572,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, ...@@ -1570,8 +1572,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
if (memslot->flags & KVM_MEM_READONLY) { if (memslot->flags & KVM_MEM_READONLY) {
if (writing) { if (writing) {
/* Give the guest a DSI */ /* Give the guest a DSI */
kvmppc_core_queue_data_storage(vcpu, 0, ea, kvmppc_core_queue_data_storage(vcpu,
DSISR_ISSTORE | DSISR_PROTFAULT); kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
ea, DSISR_ISSTORE | DSISR_PROTFAULT);
return RESUME_GUEST; return RESUME_GUEST;
} }
kvm_ro = true; kvm_ro = true;
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
{ {
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
kvmppc_core_queue_fpunavail(vcpu, 0); kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
return true; return true;
} }
...@@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) ...@@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
{ {
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
kvmppc_core_queue_vsx_unavail(vcpu, 0); kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
return true; return true;
} }
...@@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) ...@@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
{ {
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
kvmppc_core_queue_vec_unavail(vcpu, 0); kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
return true; return true;
} }
......
...@@ -321,7 +321,8 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) ...@@ -321,7 +321,8 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
if (vcpu->mmio_is_write) if (vcpu->mmio_is_write)
dsisr |= DSISR_ISSTORE; dsisr |= DSISR_ISSTORE;
kvmppc_core_queue_data_storage(vcpu, 0, kvmppc_core_queue_data_storage(vcpu,
kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
vcpu->arch.vaddr_accessed, dsisr); vcpu->arch.vaddr_accessed, dsisr);
} else { } else {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment