Commit fbb4aeec authored by Jintack Lim's avatar Jintack Lim Committed by Marc Zyngier

KVM: arm/arm64: Abstract virtual timer context into separate structure

Abstract virtual timer context into a separate structure and change all
callers referring to timer registers, irq state and so on. No change in
functionality.

This is about to become very handy when adding the EL1 physical timer.
Signed-off-by: default avatarJintack Lim <jintack@cs.columbia.edu>
Acked-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 0bdbf3b0
...@@ -28,15 +28,20 @@ struct arch_timer_kvm { ...@@ -28,15 +28,20 @@ struct arch_timer_kvm {
u64 cntvoff; u64 cntvoff;
}; };
struct arch_timer_cpu { struct arch_timer_context {
/* Registers: control register, timer value */ /* Registers: control register, timer value */
u32 cntv_ctl; /* Saved/restored */ u32 cnt_ctl;
u64 cntv_cval; /* Saved/restored */ u64 cnt_cval;
/* Timer IRQ */
struct kvm_irq_level irq;
/* Active IRQ state caching */
bool active_cleared_last;
};
/* struct arch_timer_cpu {
* Anything that is not used directly from assembly code goes struct arch_timer_context vtimer;
* here.
*/
/* Background timer used when the guest is not running */ /* Background timer used when the guest is not running */
struct hrtimer timer; struct hrtimer timer;
...@@ -47,12 +52,6 @@ struct arch_timer_cpu { ...@@ -47,12 +52,6 @@ struct arch_timer_cpu {
/* Background timer active */ /* Background timer active */
bool armed; bool armed;
/* Timer IRQ */
struct kvm_irq_level irq;
/* Active IRQ state caching */
bool active_cleared_last;
/* Is the timer enabled */ /* Is the timer enabled */
bool enabled; bool enabled;
}; };
...@@ -77,4 +76,6 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu); ...@@ -77,4 +76,6 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
void kvm_timer_init_vhe(void); void kvm_timer_init_vhe(void);
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
#endif #endif
...@@ -37,7 +37,7 @@ static u32 host_vtimer_irq_flags; ...@@ -37,7 +37,7 @@ static u32 host_vtimer_irq_flags;
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.timer_cpu.active_cleared_last = false; vcpu_vtimer(vcpu)->active_cleared_last = false;
} }
static u64 kvm_phys_timer_read(void) static u64 kvm_phys_timer_read(void)
...@@ -102,7 +102,7 @@ static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu) ...@@ -102,7 +102,7 @@ static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
{ {
u64 cval, now; u64 cval, now;
cval = vcpu->arch.timer_cpu.cntv_cval; cval = vcpu_vtimer(vcpu)->cnt_cval;
now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
if (now < cval) { if (now < cval) {
...@@ -144,21 +144,21 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) ...@@ -144,21 +144,21 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu) static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) && return !(vtimer->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE); (vtimer->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
} }
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
u64 cval, now; u64 cval, now;
if (!kvm_timer_irq_can_fire(vcpu)) if (!kvm_timer_irq_can_fire(vcpu))
return false; return false;
cval = timer->cntv_cval; cval = vtimer->cnt_cval;
now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
return cval <= now; return cval <= now;
...@@ -167,18 +167,18 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) ...@@ -167,18 +167,18 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level) static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
{ {
int ret; int ret;
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
BUG_ON(!vgic_initialized(vcpu->kvm)); BUG_ON(!vgic_initialized(vcpu->kvm));
timer->active_cleared_last = false; vtimer->active_cleared_last = false;
timer->irq.level = new_level; vtimer->irq.level = new_level;
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->irq.irq, trace_kvm_timer_update_irq(vcpu->vcpu_id, vtimer->irq.irq,
timer->irq.level); vtimer->irq.level);
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
timer->irq.irq, vtimer->irq.irq,
timer->irq.level); vtimer->irq.level);
WARN_ON(ret); WARN_ON(ret);
} }
...@@ -189,18 +189,19 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level) ...@@ -189,18 +189,19 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
static int kvm_timer_update_state(struct kvm_vcpu *vcpu) static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
/* /*
* If userspace modified the timer registers via SET_ONE_REG before * If userspace modified the timer registers via SET_ONE_REG before
* the vgic was initialized, we mustn't set the timer->irq.level value * the vgic was initialized, we mustn't set the vtimer->irq.level value
* because the guest would never see the interrupt. Instead wait * because the guest would never see the interrupt. Instead wait
* until we call this function from kvm_timer_flush_hwstate. * until we call this function from kvm_timer_flush_hwstate.
*/ */
if (!vgic_initialized(vcpu->kvm) || !timer->enabled) if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
return -ENODEV; return -ENODEV;
if (kvm_timer_should_fire(vcpu) != timer->irq.level) if (kvm_timer_should_fire(vcpu) != vtimer->irq.level)
kvm_timer_update_irq(vcpu, !timer->irq.level); kvm_timer_update_irq(vcpu, !vtimer->irq.level);
return 0; return 0;
} }
...@@ -250,7 +251,7 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu) ...@@ -250,7 +251,7 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
*/ */
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
bool phys_active; bool phys_active;
int ret; int ret;
...@@ -274,8 +275,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -274,8 +275,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
* to ensure that hardware interrupts from the timer triggers a guest * to ensure that hardware interrupts from the timer triggers a guest
* exit. * exit.
*/ */
phys_active = timer->irq.level || phys_active = vtimer->irq.level ||
kvm_vgic_map_is_active(vcpu, timer->irq.irq); kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
/* /*
* We want to avoid hitting the (re)distributor as much as * We want to avoid hitting the (re)distributor as much as
...@@ -297,7 +298,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -297,7 +298,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
* - cached value is "active clear" * - cached value is "active clear"
* - value to be programmed is "active clear" * - value to be programmed is "active clear"
*/ */
if (timer->active_cleared_last && !phys_active) if (vtimer->active_cleared_last && !phys_active)
return; return;
ret = irq_set_irqchip_state(host_vtimer_irq, ret = irq_set_irqchip_state(host_vtimer_irq,
...@@ -305,7 +306,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -305,7 +306,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
phys_active); phys_active);
WARN_ON(ret); WARN_ON(ret);
timer->active_cleared_last = !phys_active; vtimer->active_cleared_last = !phys_active;
} }
/** /**
...@@ -331,7 +332,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -331,7 +332,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq) const struct kvm_irq_level *irq)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
/* /*
* The vcpu timer irq number cannot be determined in * The vcpu timer irq number cannot be determined in
...@@ -339,7 +340,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, ...@@ -339,7 +340,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
* kvm_vcpu_set_target(). To handle this, we determine * kvm_vcpu_set_target(). To handle this, we determine
* vcpu timer irq number when the vcpu is reset. * vcpu timer irq number when the vcpu is reset.
*/ */
timer->irq.irq = irq->irq; vtimer->irq.irq = irq->irq;
/* /*
* The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
...@@ -347,7 +348,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, ...@@ -347,7 +348,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
* resets the timer to be disabled and unmasked and is compliant with * resets the timer to be disabled and unmasked and is compliant with
* the ARMv7 architecture. * the ARMv7 architecture.
*/ */
timer->cntv_ctl = 0; vtimer->cnt_ctl = 0;
kvm_timer_update_state(vcpu); kvm_timer_update_state(vcpu);
return 0; return 0;
...@@ -369,17 +370,17 @@ static void kvm_timer_init_interrupt(void *info) ...@@ -369,17 +370,17 @@ static void kvm_timer_init_interrupt(void *info)
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
switch (regid) { switch (regid) {
case KVM_REG_ARM_TIMER_CTL: case KVM_REG_ARM_TIMER_CTL:
timer->cntv_ctl = value; vtimer->cnt_ctl = value;
break; break;
case KVM_REG_ARM_TIMER_CNT: case KVM_REG_ARM_TIMER_CNT:
vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value; vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
break; break;
case KVM_REG_ARM_TIMER_CVAL: case KVM_REG_ARM_TIMER_CVAL:
timer->cntv_cval = value; vtimer->cnt_cval = value;
break; break;
default: default:
return -1; return -1;
...@@ -391,15 +392,15 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) ...@@ -391,15 +392,15 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
switch (regid) { switch (regid) {
case KVM_REG_ARM_TIMER_CTL: case KVM_REG_ARM_TIMER_CTL:
return timer->cntv_ctl; return vtimer->cnt_ctl;
case KVM_REG_ARM_TIMER_CNT: case KVM_REG_ARM_TIMER_CNT:
return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
case KVM_REG_ARM_TIMER_CVAL: case KVM_REG_ARM_TIMER_CVAL:
return timer->cntv_cval; return vtimer->cnt_cval;
} }
return (u64)-1; return (u64)-1;
} }
...@@ -463,14 +464,16 @@ int kvm_timer_hyp_init(void) ...@@ -463,14 +464,16 @@ int kvm_timer_hyp_init(void)
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
timer_disarm(timer); timer_disarm(timer);
kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq); kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
} }
int kvm_timer_enable(struct kvm_vcpu *vcpu) int kvm_timer_enable(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct irq_desc *desc; struct irq_desc *desc;
struct irq_data *data; struct irq_data *data;
int phys_irq; int phys_irq;
...@@ -498,7 +501,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) ...@@ -498,7 +501,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
* Tell the VGIC that the virtual interrupt is tied to a * Tell the VGIC that the virtual interrupt is tied to a
* physical interrupt. We do that once per VCPU. * physical interrupt. We do that once per VCPU.
*/ */
ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq); ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
if (ret) if (ret)
return ret; return ret;
......
...@@ -25,11 +25,12 @@ ...@@ -25,11 +25,12 @@
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
u64 val; u64 val;
if (timer->enabled) { if (timer->enabled) {
timer->cntv_ctl = read_sysreg_el0(cntv_ctl); vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
timer->cntv_cval = read_sysreg_el0(cntv_cval); vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
} }
/* Disable the virtual timer */ /* Disable the virtual timer */
...@@ -54,6 +55,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) ...@@ -54,6 +55,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
u64 val; u64 val;
/* Those bits are already configured at boot on VHE-system */ /* Those bits are already configured at boot on VHE-system */
...@@ -70,8 +72,8 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) ...@@ -70,8 +72,8 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
if (timer->enabled) { if (timer->enabled) {
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
write_sysreg_el0(timer->cntv_cval, cntv_cval); write_sysreg_el0(vtimer->cnt_cval, cntv_cval);
isb(); isb();
write_sysreg_el0(timer->cntv_ctl, cntv_ctl); write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment