Commit 2267ea76 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras

KVM: PPC: Book3S HV: Don't use existing "prodded" flag for XIVE escalations

The prodded flag is only cleared at the beginning of H_CEDE,
so every time we have an escalation, we will cause the *next*
H_CEDE to return immediately.

Instead use a dedicated "irq_pending" flag to indicate that
a guest interrupt is pending for the VCPU. We don't reuse the
existing exception bitmap so as to avoid expensive atomic ops.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent bf4159da
...@@ -709,6 +709,7 @@ struct kvm_vcpu_arch { ...@@ -709,6 +709,7 @@ struct kvm_vcpu_arch {
u8 ceded; u8 ceded;
u8 prodded; u8 prodded;
u8 doorbell_request; u8 doorbell_request;
u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
u32 last_inst; u32 last_inst;
struct swait_queue_head *wqp; struct swait_queue_head *wqp;
......
...@@ -514,6 +514,7 @@ int main(void) ...@@ -514,6 +514,7 @@ int main(void)
OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions); OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded); OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded); OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded);
OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending);
OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request); OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request);
OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr); OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc); OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
......
...@@ -2999,7 +2999,7 @@ static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) ...@@ -2999,7 +2999,7 @@ static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
{ {
if (!xive_enabled()) if (!xive_enabled())
return false; return false;
return vcpu->arch.xive_saved_state.pipr < return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
vcpu->arch.xive_saved_state.cppr; vcpu->arch.xive_saved_state.cppr;
} }
#else #else
......
...@@ -1035,6 +1035,16 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) ...@@ -1035,6 +1035,16 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
li r9, 1 li r9, 1
stw r9, VCPU_XIVE_PUSHED(r4) stw r9, VCPU_XIVE_PUSHED(r4)
eieio eieio
/*
* We clear the irq_pending flag. There is a small chance of a
* race vs. the escalation interrupt happening on another
* processor setting it again, but the only consequence is to
* cause a spurrious wakeup on the next H_CEDE which is not an
* issue.
*/
li r0,0
stb r0, VCPU_IRQ_PENDING(r4)
no_xive: no_xive:
#endif /* CONFIG_KVM_XICS */ #endif /* CONFIG_KVM_XICS */
......
...@@ -84,8 +84,7 @@ static irqreturn_t xive_esc_irq(int irq, void *data) ...@@ -84,8 +84,7 @@ static irqreturn_t xive_esc_irq(int irq, void *data)
{ {
struct kvm_vcpu *vcpu = data; struct kvm_vcpu *vcpu = data;
/* We use the existing H_PROD mechanism to wake up the target */ vcpu->arch.irq_pending = 1;
vcpu->arch.prodded = 1;
smp_mb(); smp_mb();
if (vcpu->arch.ceded) if (vcpu->arch.ceded)
kvmppc_fast_vcpu_kick(vcpu); kvmppc_fast_vcpu_kick(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment