Commit 94046732 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: timers: Correctly handle TGE flip with CNTPOFF_EL2

Contrary to common belief, HCR_EL2.TGE has a direct and immediate
effect on the way the EL0 physical counter is offset. Flipping
TGE from 1 to 0 while at EL2 immediately changes the way the counter
compared to the CVAL limit.

This means that we cannot directly save/restore the guest's view of
CVAL, but that we instead must treat it as if CNTPOFF didn't exist.
Only in the world switch, once we figure out that we do have CNTPOFF,
can we must the offset back and forth depending on the polarity of
TGE.

Fixes: 2b4825a8 ("KVM: arm64: timers: Use CNTPOFF_EL2 to offset the physical timer")
Reported-by: default avatarGanapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Tested-by: default avatarGanapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 839d9035
...@@ -55,11 +55,6 @@ static struct irq_ops arch_timer_irq_ops = { ...@@ -55,11 +55,6 @@ static struct irq_ops arch_timer_irq_ops = {
.get_input_level = kvm_arch_timer_get_input_level, .get_input_level = kvm_arch_timer_get_input_level,
}; };
static bool has_cntpoff(void)
{
return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
}
static int nr_timers(struct kvm_vcpu *vcpu) static int nr_timers(struct kvm_vcpu *vcpu)
{ {
if (!vcpu_has_nv(vcpu)) if (!vcpu_has_nv(vcpu))
...@@ -180,7 +175,7 @@ u64 kvm_phys_timer_read(void) ...@@ -180,7 +175,7 @@ u64 kvm_phys_timer_read(void)
return timecounter->cc->read(timecounter->cc); return timecounter->cc->read(timecounter->cc);
} }
static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map) void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
{ {
if (vcpu_has_nv(vcpu)) { if (vcpu_has_nv(vcpu)) {
if (is_hyp_ctxt(vcpu)) { if (is_hyp_ctxt(vcpu)) {
...@@ -548,8 +543,7 @@ static void timer_save_state(struct arch_timer_context *ctx) ...@@ -548,8 +543,7 @@ static void timer_save_state(struct arch_timer_context *ctx)
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL)); timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
cval = read_sysreg_el0(SYS_CNTP_CVAL); cval = read_sysreg_el0(SYS_CNTP_CVAL);
if (!has_cntpoff()) cval -= timer_get_offset(ctx);
cval -= timer_get_offset(ctx);
timer_set_cval(ctx, cval); timer_set_cval(ctx, cval);
...@@ -636,8 +630,7 @@ static void timer_restore_state(struct arch_timer_context *ctx) ...@@ -636,8 +630,7 @@ static void timer_restore_state(struct arch_timer_context *ctx)
cval = timer_get_cval(ctx); cval = timer_get_cval(ctx);
offset = timer_get_offset(ctx); offset = timer_get_offset(ctx);
set_cntpoff(offset); set_cntpoff(offset);
if (!has_cntpoff()) cval += offset;
cval += offset;
write_sysreg_el0(cval, SYS_CNTP_CVAL); write_sysreg_el0(cval, SYS_CNTP_CVAL);
isb(); isb();
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL); write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
......
...@@ -39,6 +39,26 @@ static void __activate_traps(struct kvm_vcpu *vcpu) ...@@ -39,6 +39,26 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
___activate_traps(vcpu); ___activate_traps(vcpu);
if (has_cntpoff()) {
struct timer_map map;
get_timer_map(vcpu, &map);
/*
* We're entrering the guest. Reload the correct
* values from memory now that TGE is clear.
*/
if (map.direct_ptimer == vcpu_ptimer(vcpu))
val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
if (map.direct_ptimer == vcpu_hptimer(vcpu))
val = __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
if (map.direct_ptimer) {
write_sysreg_el0(val, SYS_CNTP_CVAL);
isb();
}
}
val = read_sysreg(cpacr_el1); val = read_sysreg(cpacr_el1);
val |= CPACR_ELx_TTA; val |= CPACR_ELx_TTA;
val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN | val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
...@@ -77,6 +97,30 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -77,6 +97,30 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
if (has_cntpoff()) {
struct timer_map map;
u64 val, offset;
get_timer_map(vcpu, &map);
/*
* We're exiting the guest. Save the latest CVAL value
* to memory and apply the offset now that TGE is set.
*/
val = read_sysreg_el0(SYS_CNTP_CVAL);
if (map.direct_ptimer == vcpu_ptimer(vcpu))
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
if (map.direct_ptimer == vcpu_hptimer(vcpu))
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
offset = read_sysreg_s(SYS_CNTPOFF_EL2);
if (map.direct_ptimer && offset) {
write_sysreg_el0(val + offset, SYS_CNTP_CVAL);
isb();
}
}
/* /*
* ARM errata 1165522 and 1530923 require the actual execution of the * ARM errata 1165522 and 1530923 require the actual execution of the
* above before we can switch to the EL2/EL0 translation regime used by * above before we can switch to the EL2/EL0 translation regime used by
......
...@@ -82,6 +82,8 @@ struct timer_map { ...@@ -82,6 +82,8 @@ struct timer_map {
struct arch_timer_context *emul_ptimer; struct arch_timer_context *emul_ptimer;
}; };
void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
struct arch_timer_cpu { struct arch_timer_cpu {
struct arch_timer_context timers[NR_KVM_TIMERS]; struct arch_timer_context timers[NR_KVM_TIMERS];
...@@ -145,4 +147,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt); ...@@ -145,4 +147,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt);
void kvm_timer_cpu_up(void); void kvm_timer_cpu_up(void);
void kvm_timer_cpu_down(void); void kvm_timer_cpu_down(void);
static inline bool has_cntpoff(void)
{
return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment