Commit 688c50aa authored by Christoffer Dall's avatar Christoffer Dall Committed by Christoffer Dall

KVM: arm/arm64: Move timer save/restore out of the hyp code

As we are about to be lazy with saving and restoring the timer
registers, we prepare by moving all possible timer configuration logic
out of the hyp code.  All virtual timer registers can be programmed from
EL1 and since the arch timer is always a level triggered interrupt we
can safely do this with interrupts disabled in the host kernel on the
way to the guest without taking vtimer interrupts in the host kernel
(yet).

The downside is that the cntvoff register can only be programmed from
hyp mode, so we jump into hyp mode and back to program it.  This is also
safe, because the host kernel doesn't use the virtual timer in the KVM
code.  It may add a little performance performance penalty, but only
until following commits where we move this operation to vcpu load/put.
Signed-off-by: default avatarChristoffer Dall <cdall@linaro.org>
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent f2a2129e
...@@ -68,6 +68,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); ...@@ -68,6 +68,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern void __init_stage2_translation(void); extern void __init_stage2_translation(void);
......
...@@ -98,8 +98,8 @@ ...@@ -98,8 +98,8 @@
#define cntvoff_el2 CNTVOFF #define cntvoff_el2 CNTVOFF
#define cnthctl_el2 CNTHCTL #define cnthctl_el2 CNTHCTL
void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_enable_traps(struct kvm_vcpu *vcpu);
void __timer_restore_state(struct kvm_vcpu *vcpu); void __timer_disable_traps(struct kvm_vcpu *vcpu);
void __vgic_v2_save_state(struct kvm_vcpu *vcpu); void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
......
...@@ -174,7 +174,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -174,7 +174,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__activate_vm(vcpu); __activate_vm(vcpu);
__vgic_restore_state(vcpu); __vgic_restore_state(vcpu);
__timer_restore_state(vcpu); __timer_enable_traps(vcpu);
__sysreg_restore_state(guest_ctxt); __sysreg_restore_state(guest_ctxt);
__banked_restore_state(guest_ctxt); __banked_restore_state(guest_ctxt);
...@@ -191,7 +191,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -191,7 +191,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__banked_save_state(guest_ctxt); __banked_save_state(guest_ctxt);
__sysreg_save_state(guest_ctxt); __sysreg_save_state(guest_ctxt);
__timer_save_state(vcpu); __timer_disable_traps(vcpu);
__vgic_save_state(vcpu); __vgic_save_state(vcpu);
__deactivate_traps(vcpu); __deactivate_traps(vcpu);
...@@ -237,7 +238,7 @@ void __hyp_text __noreturn __hyp_panic(int cause) ...@@ -237,7 +238,7 @@ void __hyp_text __noreturn __hyp_panic(int cause)
vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR); vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
__timer_save_state(vcpu); __timer_disable_traps(vcpu);
__deactivate_traps(vcpu); __deactivate_traps(vcpu);
__deactivate_vm(vcpu); __deactivate_vm(vcpu);
__banked_restore_state(host_ctxt); __banked_restore_state(host_ctxt);
......
...@@ -55,6 +55,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); ...@@ -55,6 +55,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_ich_vtr_el2(void); extern u64 __vgic_v3_get_ich_vtr_el2(void);
......
...@@ -129,8 +129,8 @@ void __vgic_v3_save_state(struct kvm_vcpu *vcpu); ...@@ -129,8 +129,8 @@ void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_enable_traps(struct kvm_vcpu *vcpu);
void __timer_restore_state(struct kvm_vcpu *vcpu); void __timer_disable_traps(struct kvm_vcpu *vcpu);
void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); void __sysreg_save_host_state(struct kvm_cpu_context *ctxt);
void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt);
......
...@@ -298,7 +298,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -298,7 +298,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__activate_vm(vcpu); __activate_vm(vcpu);
__vgic_restore_state(vcpu); __vgic_restore_state(vcpu);
__timer_restore_state(vcpu); __timer_enable_traps(vcpu);
/* /*
* We must restore the 32-bit state before the sysregs, thanks * We must restore the 32-bit state before the sysregs, thanks
...@@ -368,7 +368,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -368,7 +368,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg_save_guest_state(guest_ctxt); __sysreg_save_guest_state(guest_ctxt);
__sysreg32_save_state(vcpu); __sysreg32_save_state(vcpu);
__timer_save_state(vcpu); __timer_disable_traps(vcpu);
__vgic_save_state(vcpu); __vgic_save_state(vcpu);
__deactivate_traps(vcpu); __deactivate_traps(vcpu);
...@@ -436,7 +436,7 @@ void __hyp_text __noreturn __hyp_panic(void) ...@@ -436,7 +436,7 @@ void __hyp_text __noreturn __hyp_panic(void)
vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
__timer_save_state(vcpu); __timer_disable_traps(vcpu);
__deactivate_traps(vcpu); __deactivate_traps(vcpu);
__deactivate_vm(vcpu); __deactivate_vm(vcpu);
__sysreg_restore_host_state(host_ctxt); __sysreg_restore_host_state(host_ctxt);
......
...@@ -271,6 +271,20 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu, ...@@ -271,6 +271,20 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu,
soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(timer_ctx)); soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(timer_ctx));
} }
static void timer_save_state(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
if (timer->enabled) {
vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
}
/* Disable the virtual timer */
write_sysreg_el0(0, cntv_ctl);
}
/* /*
* Schedule the background timer before calling kvm_vcpu_block, so that this * Schedule the background timer before calling kvm_vcpu_block, so that this
* thread is removed from its waitqueue and made runnable when there's a timer * thread is removed from its waitqueue and made runnable when there's a timer
...@@ -304,6 +318,18 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) ...@@ -304,6 +318,18 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
} }
static void timer_restore_state(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
if (timer->enabled) {
write_sysreg_el0(vtimer->cnt_cval, cntv_cval);
isb();
write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);
}
}
void kvm_timer_unschedule(struct kvm_vcpu *vcpu) void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
...@@ -311,6 +337,21 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu) ...@@ -311,6 +337,21 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
soft_timer_cancel(&timer->bg_timer, &timer->expired); soft_timer_cancel(&timer->bg_timer, &timer->expired);
} }
static void set_cntvoff(u64 cntvoff)
{
u32 low = lower_32_bits(cntvoff);
u32 high = upper_32_bits(cntvoff);
/*
* Since kvm_call_hyp doesn't fully support the ARM PCS especially on
* 32-bit systems, but rather passes register by register shifted one
* place (we put the function address in r0/x0), we cannot simply pass
* a 64-bit value as an argument, but have to split the value in two
* 32-bit halves.
*/
kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
}
static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu) static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
...@@ -414,6 +455,7 @@ static void kvm_timer_flush_hwstate_user(struct kvm_vcpu *vcpu) ...@@ -414,6 +455,7 @@ static void kvm_timer_flush_hwstate_user(struct kvm_vcpu *vcpu)
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
if (unlikely(!timer->enabled)) if (unlikely(!timer->enabled))
return; return;
...@@ -427,6 +469,9 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -427,6 +469,9 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
kvm_timer_flush_hwstate_user(vcpu); kvm_timer_flush_hwstate_user(vcpu);
else else
kvm_timer_flush_hwstate_vgic(vcpu); kvm_timer_flush_hwstate_vgic(vcpu);
set_cntvoff(vtimer->cntvoff);
timer_restore_state(vcpu);
} }
/** /**
...@@ -446,6 +491,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -446,6 +491,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
*/ */
soft_timer_cancel(&timer->phys_timer, NULL); soft_timer_cancel(&timer->phys_timer, NULL);
timer_save_state(vcpu);
set_cntvoff(0);
/* /*
* The guest could have modified the timer registers or the timer * The guest could have modified the timer registers or the timer
* could have expired, update the timer state. * could have expired, update the timer state.
......
...@@ -21,58 +21,48 @@ ...@@ -21,58 +21,48 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
/* vcpu is already in the HYP VA space */ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) {
u64 cntvoff = (u64)cntvoff_high << 32 | cntvoff_low;
write_sysreg(cntvoff, cntvoff_el2);
}
void __hyp_text enable_el1_phys_timer_access(void)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
u64 val; u64 val;
if (timer->enabled) { /* Allow physical timer/counter access for the host */
vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); val = read_sysreg(cnthctl_el2);
vtimer->cnt_cval = read_sysreg_el0(cntv_cval); val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
} write_sysreg(val, cnthctl_el2);
}
/* Disable the virtual timer */ void __hyp_text disable_el1_phys_timer_access(void)
write_sysreg_el0(0, cntv_ctl); {
u64 val;
/*
* Disallow physical timer access for the guest
* Physical counter access is allowed
*/
val = read_sysreg(cnthctl_el2);
val &= ~CNTHCTL_EL1PCEN;
val |= CNTHCTL_EL1PCTEN;
write_sysreg(val, cnthctl_el2);
}
void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
{
/* /*
* We don't need to do this for VHE since the host kernel runs in EL2 * We don't need to do this for VHE since the host kernel runs in EL2
* with HCR_EL2.TGE ==1, which makes those bits have no impact. * with HCR_EL2.TGE ==1, which makes those bits have no impact.
*/ */
if (!has_vhe()) { if (!has_vhe())
/* Allow physical timer/counter access for the host */ enable_el1_phys_timer_access();
val = read_sysreg(cnthctl_el2);
val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
write_sysreg(val, cnthctl_el2);
}
/* Clear cntvoff for the host */
write_sysreg(0, cntvoff_el2);
} }
void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; if (!has_vhe())
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); disable_el1_phys_timer_access();
u64 val;
/* Those bits are already configured at boot on VHE-system */
if (!has_vhe()) {
/*
* Disallow physical timer access for the guest
* Physical counter access is allowed
*/
val = read_sysreg(cnthctl_el2);
val &= ~CNTHCTL_EL1PCEN;
val |= CNTHCTL_EL1PCTEN;
write_sysreg(val, cnthctl_el2);
}
if (timer->enabled) {
write_sysreg(vtimer->cntvoff, cntvoff_el2);
write_sysreg_el0(vtimer->cnt_cval, cntv_cval);
isb();
write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);
}
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment