Commit b3f15ec3 authored by Mark Rutland's avatar Mark Rutland Committed by Marc Zyngier

kvm: arm/arm64: Fold VHE entry/exit work into kvm_vcpu_run_vhe()

With VHE, running a vCPU always requires the sequence:

1. kvm_arm_vhe_guest_enter();
2. kvm_vcpu_run_vhe();
3. kvm_arm_vhe_guest_exit()

... and as we invoke this from the shared arm/arm64 KVM code, 32-bit arm
has to provide stubs for all three functions.

To simplify the common code, and make it easier to make further
modifications to the arm64-specific portions in the near future, let's
fold kvm_arm_vhe_guest_enter() and kvm_arm_vhe_guest_exit() into
kvm_vcpu_run_vhe().

The 32-bit stubs for kvm_arm_vhe_guest_enter() and
kvm_arm_vhe_guest_exit() are removed, as they are no longer used. The
32-bit stub for kvm_vcpu_run_vhe() is left as-is.

There should be no functional change as a result of this patch.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200210114757.2889-1-mark.rutland@arm.com
parent 51b25694
...@@ -394,9 +394,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} ...@@ -394,9 +394,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_vhe_guest_enter(void) {}
static inline void kvm_arm_vhe_guest_exit(void) {}
#define KVM_BP_HARDEN_UNKNOWN -1 #define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0 #define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1 #define KVM_BP_HARDEN_NOT_REQUIRED 1
......
...@@ -628,38 +628,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} ...@@ -628,38 +628,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {} static inline void kvm_clr_pmu_events(u32 clr) {}
#endif #endif
static inline void kvm_arm_vhe_guest_enter(void)
{
local_daif_mask();
/*
* Having IRQs masked via PMR when entering the guest means the GIC
* will not signal the CPU of interrupts of lower priority, and the
* only way to get out will be via guest exceptions.
* Naturally, we want to avoid this.
*
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
*/
pmr_sync();
}
static inline void kvm_arm_vhe_guest_exit(void)
{
/*
* local_daif_restore() takes care to properly restore PSTATE.DAIF
* and the GIC PMR if the host is using IRQ priorities.
*/
local_daif_restore(DAIF_PROCCTX_NOIRQ);
/*
* When we exit from the guest we change a number of CPU configuration
* parameters, such as traps. Make sure these changes take effect
* before running the host or additional guests.
*/
isb();
}
#define KVM_BP_HARDEN_UNKNOWN -1 #define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0 #define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1 #define KVM_BP_HARDEN_NOT_REQUIRED 1
......
...@@ -617,7 +617,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) ...@@ -617,7 +617,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
} }
/* Switch to the guest for VHE systems running in EL2 */ /* Switch to the guest for VHE systems running in EL2 */
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt; struct kvm_cpu_context *guest_ctxt;
...@@ -670,7 +670,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -670,7 +670,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
return exit_code; return exit_code;
} }
NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
int ret;
local_daif_mask();
/*
* Having IRQs masked via PMR when entering the guest means the GIC
* will not signal the CPU of interrupts of lower priority, and the
* only way to get out will be via guest exceptions.
* Naturally, we want to avoid this.
*
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
*/
pmr_sync();
ret = __kvm_vcpu_run_vhe(vcpu);
/*
* local_daif_restore() takes care to properly restore PSTATE.DAIF
* and the GIC PMR if the host is using IRQ priorities.
*/
local_daif_restore(DAIF_PROCCTX_NOIRQ);
/*
* When we exit from the guest we change a number of CPU configuration
* parameters, such as traps. Make sure these changes take effect
* before running the host or additional guests.
*/
isb();
return ret;
}
/* Switch to the guest for legacy non-VHE systems */ /* Switch to the guest for legacy non-VHE systems */
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
......
...@@ -797,9 +797,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -797,9 +797,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
guest_enter_irqoff(); guest_enter_irqoff();
if (has_vhe()) { if (has_vhe()) {
kvm_arm_vhe_guest_enter();
ret = kvm_vcpu_run_vhe(vcpu); ret = kvm_vcpu_run_vhe(vcpu);
kvm_arm_vhe_guest_exit();
} else { } else {
ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu); ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment