Commit 3df59d8d authored by Christoffer Dall's avatar Christoffer Dall Committed by Marc Zyngier

KVM: arm/arm64: Get rid of vcpu->arch.irq_lines

We currently have a separate read-modify-write of the HCR_EL2 on entry
to the guest for the sole purpose of setting the VF and VI bits, if set.
Since this is most rarely the case (only when using userspace IRQ chip
and interrupts are in flight), let's get rid of this operation and
instead modify the bits in the vcpu->arch.hcr[_el2] directly when
needed.
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Reviewed-by: default avatarJulien Thierry <julien.thierry@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 35a84dec
...@@ -92,14 +92,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) ...@@ -92,14 +92,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr = HCR_GUEST_MASK; vcpu->arch.hcr = HCR_GUEST_MASK;
} }
static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu) static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.hcr; return (unsigned long *)&vcpu->arch.hcr;
}
static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
{
vcpu->arch.hcr = hcr;
} }
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
......
...@@ -155,9 +155,6 @@ struct kvm_vcpu_arch { ...@@ -155,9 +155,6 @@ struct kvm_vcpu_arch {
/* HYP trapping configuration */ /* HYP trapping configuration */
u32 hcr; u32 hcr;
/* Interrupt related fields */
u32 irq_lines; /* IRQ and FIQ levels */
/* Exception Information */ /* Exception Information */
struct kvm_vcpu_fault_info fault; struct kvm_vcpu_fault_info fault;
......
...@@ -174,5 +174,5 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) ...@@ -174,5 +174,5 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
*/ */
void kvm_inject_vabt(struct kvm_vcpu *vcpu) void kvm_inject_vabt(struct kvm_vcpu *vcpu)
{ {
vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VA); *vcpu_hcr(vcpu) |= HCR_VA;
} }
...@@ -44,7 +44,7 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host) ...@@ -44,7 +44,7 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
isb(); isb();
} }
write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR); write_sysreg(vcpu->arch.hcr, HCR);
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(HSTR_T(15), HSTR); write_sysreg(HSTR_T(15), HSTR);
write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
......
...@@ -69,14 +69,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) ...@@ -69,14 +69,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TID3; vcpu->arch.hcr_el2 |= HCR_TID3;
} }
static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.hcr_el2; return (unsigned long *)&vcpu->arch.hcr_el2;
}
static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
{
vcpu->arch.hcr_el2 = hcr;
} }
static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
......
...@@ -272,9 +272,6 @@ struct kvm_vcpu_arch { ...@@ -272,9 +272,6 @@ struct kvm_vcpu_arch {
/* IO related fields */ /* IO related fields */
struct kvm_decode mmio_decode; struct kvm_decode mmio_decode;
/* Interrupt related fields */
u64 irq_lines; /* IRQ and FIQ levels */
/* Cache some mmu pages needed inside spinlock regions */ /* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;
......
...@@ -178,12 +178,6 @@ static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) ...@@ -178,12 +178,6 @@ static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
{ {
u64 val;
val = read_sysreg(hcr_el2);
val |= vcpu->arch.irq_lines;
write_sysreg(val, hcr_el2);
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
__vgic_v3_restore_state(vcpu); __vgic_v3_restore_state(vcpu);
else else
......
...@@ -167,7 +167,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) ...@@ -167,7 +167,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr) static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
{ {
vcpu_set_vsesr(vcpu, esr); vcpu_set_vsesr(vcpu, esr);
vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE); *vcpu_hcr(vcpu) |= HCR_VSE;
} }
/** /**
......
...@@ -420,7 +420,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ...@@ -420,7 +420,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
*/ */
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{ {
return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v)) bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
&& !v->arch.power_off && !v->arch.pause); && !v->arch.power_off && !v->arch.pause);
} }
...@@ -814,18 +815,18 @@ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) ...@@ -814,18 +815,18 @@ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
{ {
int bit_index; int bit_index;
bool set; bool set;
unsigned long *ptr; unsigned long *hcr;
if (number == KVM_ARM_IRQ_CPU_IRQ) if (number == KVM_ARM_IRQ_CPU_IRQ)
bit_index = __ffs(HCR_VI); bit_index = __ffs(HCR_VI);
else /* KVM_ARM_IRQ_CPU_FIQ */ else /* KVM_ARM_IRQ_CPU_FIQ */
bit_index = __ffs(HCR_VF); bit_index = __ffs(HCR_VF);
ptr = (unsigned long *)&vcpu->arch.irq_lines; hcr = vcpu_hcr(vcpu);
if (level) if (level)
set = test_and_set_bit(bit_index, ptr); set = test_and_set_bit(bit_index, hcr);
else else
set = test_and_clear_bit(bit_index, ptr); set = test_and_clear_bit(bit_index, hcr);
/* /*
* If we didn't change anything, no need to wake up or kick other CPUs * If we didn't change anything, no need to wake up or kick other CPUs
......
...@@ -2035,7 +2035,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, ...@@ -2035,7 +2035,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
*/ */
void kvm_set_way_flush(struct kvm_vcpu *vcpu) void kvm_set_way_flush(struct kvm_vcpu *vcpu)
{ {
unsigned long hcr = vcpu_get_hcr(vcpu); unsigned long hcr = *vcpu_hcr(vcpu);
/* /*
* If this is the first time we do a S/W operation * If this is the first time we do a S/W operation
...@@ -2050,7 +2050,7 @@ void kvm_set_way_flush(struct kvm_vcpu *vcpu) ...@@ -2050,7 +2050,7 @@ void kvm_set_way_flush(struct kvm_vcpu *vcpu)
trace_kvm_set_way_flush(*vcpu_pc(vcpu), trace_kvm_set_way_flush(*vcpu_pc(vcpu),
vcpu_has_cache_enabled(vcpu)); vcpu_has_cache_enabled(vcpu));
stage2_flush_vm(vcpu->kvm); stage2_flush_vm(vcpu->kvm);
vcpu_set_hcr(vcpu, hcr | HCR_TVM); *vcpu_hcr(vcpu) = hcr | HCR_TVM;
} }
} }
...@@ -2068,7 +2068,7 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) ...@@ -2068,7 +2068,7 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
/* Caches are now on, stop trapping VM ops (until a S/W op) */ /* Caches are now on, stop trapping VM ops (until a S/W op) */
if (now_enabled) if (now_enabled)
vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM); *vcpu_hcr(vcpu) &= ~HCR_TVM;
trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment