Commit 268f4ef9 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Paul Mackerras

KVM: PPC: Book3S HV: Reuse kvmppc_inject_interrupt for async guest delivery

This consolidates the HV interrupt delivery logic into one place.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 87a45e07
...@@ -32,4 +32,7 @@ extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val); ...@@ -32,4 +32,7 @@ extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val);
static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {} static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {}
#endif #endif
extern void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
extern void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
#endif #endif
...@@ -133,7 +133,6 @@ static inline bool nesting_enabled(struct kvm *kvm) ...@@ -133,7 +133,6 @@ static inline bool nesting_enabled(struct kvm *kvm)
/* If set, the threads on each CPU core have to be in the same MMU mode */ /* If set, the threads on each CPU core have to be in the same MMU mode */
static bool no_mixing_hpt_and_radix; static bool no_mixing_hpt_and_radix;
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
/* /*
...@@ -338,39 +337,6 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) ...@@ -338,39 +337,6 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
} }
static void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
{
unsigned long msr, pc, new_msr, new_pc;
msr = kvmppc_get_msr(vcpu);
pc = kvmppc_get_pc(vcpu);
new_msr = vcpu->arch.intr_msr;
new_pc = vec;
/* If transactional, change to suspend mode on IRQ delivery */
if (MSR_TM_TRANSACTIONAL(msr))
new_msr |= MSR_TS_S;
else
new_msr |= msr & MSR_TS_MASK;
kvmppc_set_srr0(vcpu, pc);
kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
kvmppc_set_pc(vcpu, new_pc);
kvmppc_set_msr(vcpu, new_msr);
}
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
{
/*
* Check for illegal transactional state bit combination
* and if we find it, force the TS field to a safe state.
*/
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
msr &= ~MSR_TS_MASK;
vcpu->arch.shregs.msr = msr;
kvmppc_end_cede(vcpu);
}
static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
{ {
vcpu->arch.pvr = pvr; vcpu->arch.pvr = pvr;
...@@ -2475,15 +2441,6 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu) ...@@ -2475,15 +2441,6 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
vcpu->arch.timer_running = 1; vcpu->arch.timer_running = 1;
} }
static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.ceded = 0;
if (vcpu->arch.timer_running) {
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
vcpu->arch.timer_running = 0;
}
}
extern int __kvmppc_vcore_entry(void); extern int __kvmppc_vcore_entry(void);
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
......
...@@ -755,6 +755,56 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) ...@@ -755,6 +755,56 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
local_paca->kvm_hstate.kvm_split_mode = NULL; local_paca->kvm_hstate.kvm_split_mode = NULL;
} }
static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.ceded = 0;
if (vcpu->arch.timer_running) {
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
vcpu->arch.timer_running = 0;
}
}
void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
{
/*
* Check for illegal transactional state bit combination
* and if we find it, force the TS field to a safe state.
*/
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
msr &= ~MSR_TS_MASK;
vcpu->arch.shregs.msr = msr;
kvmppc_end_cede(vcpu);
}
EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
{
unsigned long msr, pc, new_msr, new_pc;
msr = kvmppc_get_msr(vcpu);
pc = kvmppc_get_pc(vcpu);
new_msr = vcpu->arch.intr_msr;
new_pc = vec;
/* If transactional, change to suspend mode on IRQ delivery */
if (MSR_TM_TRANSACTIONAL(msr))
new_msr |= MSR_TS_S;
else
new_msr |= msr & MSR_TS_MASK;
kvmppc_set_srr0(vcpu, pc);
kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
kvmppc_set_pc(vcpu, new_pc);
vcpu->arch.shregs.msr = new_msr;
}
void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
{
inject_interrupt(vcpu, vec, srr1_flags);
kvmppc_end_cede(vcpu);
}
EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
/* /*
* Is there a PRIV_DOORBELL pending for the guest (on POWER9)? * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
* Can we inject a Decrementer or a External interrupt? * Can we inject a Decrementer or a External interrupt?
...@@ -762,7 +812,6 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) ...@@ -762,7 +812,6 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
{ {
int ext; int ext;
unsigned long vec = 0;
unsigned long lpcr; unsigned long lpcr;
/* Insert EXTERNAL bit into LPCR at the MER bit position */ /* Insert EXTERNAL bit into LPCR at the MER bit position */
...@@ -774,26 +823,16 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) ...@@ -774,26 +823,16 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
if (vcpu->arch.shregs.msr & MSR_EE) { if (vcpu->arch.shregs.msr & MSR_EE) {
if (ext) { if (ext) {
vec = BOOK3S_INTERRUPT_EXTERNAL; inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
} else { } else {
long int dec = mfspr(SPRN_DEC); long int dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) if (!(lpcr & LPCR_LD))
dec = (int) dec; dec = (int) dec;
if (dec < 0) if (dec < 0)
vec = BOOK3S_INTERRUPT_DECREMENTER; inject_interrupt(vcpu,
BOOK3S_INTERRUPT_DECREMENTER, 0);
} }
} }
if (vec) {
unsigned long msr, old_msr = vcpu->arch.shregs.msr;
kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
kvmppc_set_srr1(vcpu, old_msr);
kvmppc_set_pc(vcpu, vec);
msr = vcpu->arch.intr_msr;
if (MSR_TM_ACTIVE(old_msr))
msr |= MSR_TS_S;
vcpu->arch.shregs.msr = msr;
}
if (vcpu->arch.doorbell_request) { if (vcpu->arch.doorbell_request) {
mtspr(SPRN_DPDES, 1); mtspr(SPRN_DPDES, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment