Commit bd2be683 authored by Alexander Graf's avatar Alexander Graf

KVM: PPC: Book3S: PR: Rework irq disabling

Today, we disable preemption while inside guest context, because we need
to expose to the world that we are not in a preemptible context. However,
during that time we already have interrupts disabled, which would indicate
that we are in a non-preemptible context.

The reason the checks for irqs_disabled() fail for us though is that we
manually control hard IRQs and ignore all the lazy EE framework. Let's
stop doing that. Instead, let's always use lazy EE to indicate when we
want to disable IRQs, but do a special final switch that gets us into
EE disabled, but soft enabled state. That way when we get back out of
guest state, we are immediately ready to process interrupts.

This simplifies the code drastically and reduces the time that we appear
as preempt disabled.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 24afa37b
...@@ -234,5 +234,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn) ...@@ -234,5 +234,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
} }
} }
/* Please call after prepare_to_enter. This function puts the lazy ee state
back to normal mode, without actually enabling interrupts. */
static inline void kvmppc_lazy_ee_enable(void)
{
#ifdef CONFIG_PPC64
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
local_paca->soft_enabled = 1;
#endif
}
#endif /* __POWERPC_KVM_PPC_H__ */ #endif /* __POWERPC_KVM_PPC_H__ */
...@@ -52,8 +52,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ...@@ -52,8 +52,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#define MSR_USER32 MSR_USER #define MSR_USER32 MSR_USER
#define MSR_USER64 MSR_USER #define MSR_USER64 MSR_USER
#define HW_PAGE_SIZE PAGE_SIZE #define HW_PAGE_SIZE PAGE_SIZE
#define __hard_irq_disable local_irq_disable
#define __hard_irq_enable local_irq_enable
#endif #endif
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
...@@ -597,12 +595,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -597,12 +595,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->exit_reason = KVM_EXIT_UNKNOWN; run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1; run->ready_for_interrupt_injection = 1;
/* We get here with MSR.EE=0, so enable it to be a nice citizen */ /* We get here with MSR.EE=1 */
__hard_irq_enable();
trace_kvm_exit(exit_nr, vcpu); trace_kvm_exit(exit_nr, vcpu);
kvm_guest_exit(); kvm_guest_exit();
preempt_enable();
switch (exit_nr) { switch (exit_nr) {
case BOOK3S_INTERRUPT_INST_STORAGE: case BOOK3S_INTERRUPT_INST_STORAGE:
...@@ -854,7 +850,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -854,7 +850,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
} }
preempt_disable();
if (!(r & RESUME_HOST)) { if (!(r & RESUME_HOST)) {
/* To avoid clobbering exit_reason, only check for signals if /* To avoid clobbering exit_reason, only check for signals if
* we aren't already exiting to userspace for some other * we aren't already exiting to userspace for some other
...@@ -866,14 +861,15 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -866,14 +861,15 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* and if we really did time things so badly, then we just exit * and if we really did time things so badly, then we just exit
* again due to a host external interrupt. * again due to a host external interrupt.
*/ */
__hard_irq_disable(); local_irq_disable();
if (kvmppc_prepare_to_enter(vcpu)) { if (kvmppc_prepare_to_enter(vcpu)) {
/* local_irq_enable(); */ local_irq_enable();
run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
r = -EINTR; r = -EINTR;
} else { } else {
/* Going back to guest */ /* Going back to guest */
kvm_guest_enter(); kvm_guest_enter();
kvmppc_lazy_ee_enable();
} }
} }
...@@ -1066,8 +1062,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1066,8 +1062,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#endif #endif
ulong ext_msr; ulong ext_msr;
preempt_disable();
/* Check if we can run the vcpu at all */ /* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
...@@ -1081,9 +1075,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1081,9 +1075,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* really did time things so badly, then we just exit again due to * really did time things so badly, then we just exit again due to
* a host external interrupt. * a host external interrupt.
*/ */
__hard_irq_disable(); local_irq_disable();
if (kvmppc_prepare_to_enter(vcpu)) { if (kvmppc_prepare_to_enter(vcpu)) {
__hard_irq_enable(); local_irq_enable();
kvm_run->exit_reason = KVM_EXIT_INTR; kvm_run->exit_reason = KVM_EXIT_INTR;
ret = -EINTR; ret = -EINTR;
goto out; goto out;
...@@ -1122,7 +1116,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1122,7 +1116,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_FP) if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
kvm_guest_enter(); kvmppc_lazy_ee_enable();
ret = __kvmppc_vcpu_run(kvm_run, vcpu); ret = __kvmppc_vcpu_run(kvm_run, vcpu);
...@@ -1157,7 +1151,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1157,7 +1151,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
out: out:
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
preempt_enable();
return ret; return ret;
} }
......
...@@ -170,20 +170,21 @@ kvmppc_handler_skip_ins: ...@@ -170,20 +170,21 @@ kvmppc_handler_skip_ins:
* Call kvmppc_handler_trampoline_enter in real mode * Call kvmppc_handler_trampoline_enter in real mode
* *
* On entry, r4 contains the guest shadow MSR * On entry, r4 contains the guest shadow MSR
* MSR.EE has to be 0 when calling this function
*/ */
_GLOBAL(kvmppc_entry_trampoline) _GLOBAL(kvmppc_entry_trampoline)
mfmsr r5 mfmsr r5
LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
toreal(r7) toreal(r7)
li r9, MSR_RI
ori r9, r9, MSR_EE
andc r9, r5, r9 /* Clear EE and RI in MSR value */
li r6, MSR_IR | MSR_DR li r6, MSR_IR | MSR_DR
ori r6, r6, MSR_EE andc r6, r5, r6 /* Clear DR and IR in MSR value */
andc r6, r5, r6 /* Clear EE, DR and IR in MSR value */ /*
MTMSR_EERI(r9) /* Clear EE and RI in MSR */ * Set EE in HOST_MSR so that it's enabled when we get into our
mtsrr0 r7 /* before we set srr0/1 */ * C exit handler function
*/
ori r5, r5, MSR_EE
mtsrr0 r7
mtsrr1 r6 mtsrr1 r6
RFI RFI
......
...@@ -486,6 +486,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -486,6 +486,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
ret = -EINTR; ret = -EINTR;
goto out; goto out;
} }
kvmppc_lazy_ee_enable();
kvm_guest_enter(); kvm_guest_enter();
...@@ -955,6 +956,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -955,6 +956,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
} else { } else {
/* Going back to guest */ /* Going back to guest */
kvm_guest_enter(); kvm_guest_enter();
kvmppc_lazy_ee_enable();
} }
} }
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#include <asm/irqflags.h>
#include "timing.h" #include "timing.h"
#include "../mm/mmu_decl.h" #include "../mm/mmu_decl.h"
...@@ -93,6 +94,19 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -93,6 +94,19 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
break; break;
} }
#ifdef CONFIG_PPC64
/* lazy EE magic */
hard_irq_disable();
if (lazy_irq_pending()) {
/* Got an interrupt in between, try again */
local_irq_enable();
local_irq_disable();
continue;
}
trace_hardirqs_on();
#endif
/* Going into guest context! Yay! */ /* Going into guest context! Yay! */
vcpu->mode = IN_GUEST_MODE; vcpu->mode = IN_GUEST_MODE;
smp_wmb(); smp_wmb();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment