Commit 3ed0b512 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: nv: Compute NV view of idregs as a one-off

Now that we have a full copy of the idregs for each VM, there is
no point in repainting the sysregs on each access. Instead, we
can simply perform the transmation as a one-off and be done
with it.
Reviewed-by: default avatarOliver Upton <oliver.upton@linux.dev>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 111903d1
...@@ -306,6 +306,7 @@ struct kvm_arch { ...@@ -306,6 +306,7 @@ struct kvm_arch {
* Atomic access to multiple idregs are guarded by kvm_arch.config_lock. * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
*/ */
#define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id)) #define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
#define IDX_IDREG(idx) sys_reg(3, 0, 0, ((idx) >> 3) + 1, (idx) & Op2_mask)
#define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)]) #define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1) #define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
u64 id_regs[KVM_ARM_ID_REG_NUM]; u64 id_regs[KVM_ARM_ID_REG_NUM];
......
...@@ -14,10 +14,6 @@ static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu) ...@@ -14,10 +14,6 @@ static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu); extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
struct sys_reg_params; int kvm_init_nv_sysregs(struct kvm *kvm);
struct sys_reg_desc;
void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
const struct sys_reg_desc *r);
#endif /* __ARM64_KVM_NESTED_H */ #endif /* __ARM64_KVM_NESTED_H */
...@@ -669,6 +669,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) ...@@ -669,6 +669,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
if (vcpu_has_nv(vcpu)) {
ret = kvm_init_nv_sysregs(vcpu->kvm);
if (ret)
return ret;
}
ret = kvm_timer_enable(vcpu); ret = kvm_timer_enable(vcpu);
if (ret) if (ret)
return ret; return ret;
......
...@@ -23,13 +23,9 @@ ...@@ -23,13 +23,9 @@
* This list should get updated as new features get added to the NV * This list should get updated as new features get added to the NV
* support, and new extension to the architecture. * support, and new extension to the architecture.
*/ */
void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p, static u64 limit_nv_id_reg(u32 id, u64 val)
const struct sys_reg_desc *r)
{ {
u32 id = reg_to_encoding(r); u64 tmp;
u64 val, tmp;
val = p->regval;
switch (id) { switch (id) {
case SYS_ID_AA64ISAR0_EL1: case SYS_ID_AA64ISAR0_EL1:
...@@ -158,5 +154,17 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p, ...@@ -158,5 +154,17 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
break; break;
} }
p->regval = val; return val;
}
int kvm_init_nv_sysregs(struct kvm *kvm)
{
mutex_lock(&kvm->arch.config_lock);
for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
kvm->arch.id_regs[i]);
mutex_unlock(&kvm->arch.config_lock);
return 0;
} }
...@@ -1505,8 +1505,6 @@ static bool access_id_reg(struct kvm_vcpu *vcpu, ...@@ -1505,8 +1505,6 @@ static bool access_id_reg(struct kvm_vcpu *vcpu,
return write_to_read_only(vcpu, p, r); return write_to_read_only(vcpu, p, r);
p->regval = read_id_reg(vcpu, r); p->regval = read_id_reg(vcpu, r);
if (vcpu_has_nv(vcpu))
access_nested_id_reg(vcpu, p, r);
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment