Commit 06394531 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Generalise VM features into a set of flags

We currently deal with a set of booleans for VM features,
while they could be better represented as set of flags
contained in an unsigned long, similarily to what we are
doing on the CPU side.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
[Oliver: Flag-ify the 'ran_once' boolean]
Signed-off-by: default avatarOliver Upton <oupton@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220311174001.605719-2-oupton@google.com
parent 9872e6bc
...@@ -122,7 +122,12 @@ struct kvm_arch { ...@@ -122,7 +122,12 @@ struct kvm_arch {
* should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
* supported. * supported.
*/ */
bool return_nisv_io_abort_to_user; #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
/* Memory Tagging Extension enabled for the guest */
#define KVM_ARCH_FLAG_MTE_ENABLED 1
/* At least one vCPU has ran in the VM */
#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
unsigned long flags;
/* /*
* VM-wide PMU filter, implemented as a bitmap and big enough for * VM-wide PMU filter, implemented as a bitmap and big enough for
...@@ -135,10 +140,6 @@ struct kvm_arch { ...@@ -135,10 +140,6 @@ struct kvm_arch {
u8 pfr0_csv2; u8 pfr0_csv2;
u8 pfr0_csv3; u8 pfr0_csv3;
/* Memory Tagging Extension enabled for the guest */
bool mte_enabled;
bool ran_once;
}; };
struct kvm_vcpu_fault_info { struct kvm_vcpu_fault_info {
...@@ -810,7 +811,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); ...@@ -810,7 +811,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \ #define kvm_arm_vcpu_sve_finalized(vcpu) \
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled) #define kvm_has_mte(kvm) \
(system_supports_mte() && \
test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
#define kvm_vcpu_has_pmu(vcpu) \ #define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
......
...@@ -84,7 +84,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, ...@@ -84,7 +84,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
switch (cap->cap) { switch (cap->cap) {
case KVM_CAP_ARM_NISV_TO_USER: case KVM_CAP_ARM_NISV_TO_USER:
r = 0; r = 0;
kvm->arch.return_nisv_io_abort_to_user = true; set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
&kvm->arch.flags);
break; break;
case KVM_CAP_ARM_MTE: case KVM_CAP_ARM_MTE:
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
...@@ -92,7 +93,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, ...@@ -92,7 +93,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = -EINVAL; r = -EINVAL;
} else { } else {
r = 0; r = 0;
kvm->arch.mte_enabled = true; set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
} }
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
break; break;
...@@ -559,7 +560,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) ...@@ -559,7 +560,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm->arch.ran_once = true; set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return ret; return ret;
......
...@@ -135,7 +135,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) ...@@ -135,7 +135,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
* volunteered to do so, and bail out otherwise. * volunteered to do so, and bail out otherwise.
*/ */
if (!kvm_vcpu_dabt_isvalid(vcpu)) { if (!kvm_vcpu_dabt_isvalid(vcpu)) {
if (vcpu->kvm->arch.return_nisv_io_abort_to_user) { if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
&vcpu->kvm->arch.flags)) {
run->exit_reason = KVM_EXIT_ARM_NISV; run->exit_reason = KVM_EXIT_ARM_NISV;
run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu); run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
run->arm_nisv.fault_ipa = fault_ipa; run->arm_nisv.fault_ipa = fault_ipa;
......
...@@ -961,7 +961,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) ...@@ -961,7 +961,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
list_for_each_entry(entry, &arm_pmus, entry) { list_for_each_entry(entry, &arm_pmus, entry) {
arm_pmu = entry->arm_pmu; arm_pmu = entry->arm_pmu;
if (arm_pmu->pmu.type == pmu_id) { if (arm_pmu->pmu.type == pmu_id) {
if (kvm->arch.ran_once || if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) ||
(kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
ret = -EBUSY; ret = -EBUSY;
break; break;
...@@ -1044,7 +1044,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -1044,7 +1044,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (kvm->arch.ran_once) { if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return -EBUSY; return -EBUSY;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment