Commit d47dcb67 authored by Oliver Upton's avatar Oliver Upton

Merge branch kvm-arm64/feature-flag-refactor into kvmarm/next

* kvm-arm64/feature-flag-refactor:
  : vCPU feature flag cleanup
  :
  : Clean up KVM's handling of vCPU feature flags to get rid of the
  : vCPU-scoped bitmaps and remove failure paths from kvm_reset_vcpu().
  KVM: arm64: Get rid of vCPU-scoped feature bitmap
  KVM: arm64: Remove unused return value from kvm_reset_vcpu()
  KVM: arm64: Hoist NV+SVE check into KVM_ARM_VCPU_INIT ioctl handler
  KVM: arm64: Prevent NV feature flag on systems w/o nested virt
  KVM: arm64: Hoist PAuth checks into KVM_ARM_VCPU_INIT ioctl
  KVM: arm64: Hoist SVE check into KVM_ARM_VCPU_INIT ioctl handler
  KVM: arm64: Hoist PMUv3 check into KVM_ARM_VCPU_INIT ioctl handler
  KVM: arm64: Add generic check for system-supported vCPU features
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parents 054056bf 1de10b7d
...@@ -54,6 +54,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu); ...@@ -54,6 +54,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2); int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu); int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
static inline bool vcpu_has_feature(const struct kvm_vcpu *vcpu, int feature)
{
return test_bit(feature, vcpu->kvm->arch.vcpu_features);
}
#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__) #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{ {
...@@ -62,7 +67,7 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) ...@@ -62,7 +67,7 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
#else #else
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{ {
return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features); return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
} }
#endif #endif
...@@ -565,12 +570,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) ...@@ -565,12 +570,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu_set_flag((v), e); \ vcpu_set_flag((v), e); \
} while (0) } while (0)
static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
{
return test_bit(feature, vcpu->arch.features);
}
static __always_inline void kvm_write_cptr_el2(u64 val) static __always_inline void kvm_write_cptr_el2(u64 val)
{ {
if (has_vhe() || has_hvhe()) if (has_vhe() || has_hvhe())
......
...@@ -78,7 +78,7 @@ extern unsigned int __ro_after_init kvm_sve_max_vl; ...@@ -78,7 +78,7 @@ extern unsigned int __ro_after_init kvm_sve_max_vl;
int __init kvm_arm_init_sve(void); int __init kvm_arm_init_sve(void);
u32 __attribute_const__ kvm_target_cpu(void); u32 __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu); void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
struct kvm_hyp_memcache { struct kvm_hyp_memcache {
...@@ -574,9 +574,6 @@ struct kvm_vcpu_arch { ...@@ -574,9 +574,6 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */ /* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;
/* feature flags */
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */ /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2; u64 vsesr_el2;
......
...@@ -2,13 +2,14 @@ ...@@ -2,13 +2,14 @@
#ifndef __ARM64_KVM_NESTED_H #ifndef __ARM64_KVM_NESTED_H
#define __ARM64_KVM_NESTED_H #define __ARM64_KVM_NESTED_H
#include <asm/kvm_emulate.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu) static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
{ {
return (!__is_defined(__KVM_NVHE_HYPERVISOR__) && return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) && cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features)); vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
} }
extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu); extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
......
...@@ -943,7 +943,7 @@ void kvm_timer_sync_user(struct kvm_vcpu *vcpu) ...@@ -943,7 +943,7 @@ void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
unmask_vtimer_irq_user(vcpu); unmask_vtimer_irq_user(vcpu);
} }
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map; struct timer_map map;
...@@ -987,8 +987,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -987,8 +987,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
soft_timer_cancel(&map.emul_vtimer->hrtimer); soft_timer_cancel(&map.emul_vtimer->hrtimer);
if (map.emul_ptimer) if (map.emul_ptimer)
soft_timer_cancel(&map.emul_ptimer->hrtimer); soft_timer_cancel(&map.emul_ptimer->hrtimer);
return 0;
} }
static void timer_context_init(struct kvm_vcpu *vcpu, int timerid) static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
......
...@@ -367,7 +367,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -367,7 +367,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Force users to call KVM_ARM_VCPU_INIT */ /* Force users to call KVM_ARM_VCPU_INIT */
vcpu_clear_flag(vcpu, VCPU_INITIALIZED); vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
...@@ -1190,6 +1189,30 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, ...@@ -1190,6 +1189,30 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
return -EINVAL; return -EINVAL;
} }
static unsigned long system_supported_vcpu_features(void)
{
unsigned long features = KVM_VCPU_VALID_FEATURES;
if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
if (!kvm_arm_support_pmu_v3())
clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
if (!system_supports_sve())
clear_bit(KVM_ARM_VCPU_SVE, &features);
if (!system_has_full_ptr_auth()) {
clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
}
if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
clear_bit(KVM_ARM_VCPU_HAS_EL2, &features);
return features;
}
static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init) const struct kvm_vcpu_init *init)
{ {
...@@ -1204,12 +1227,25 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, ...@@ -1204,12 +1227,25 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
return -ENOENT; return -ENOENT;
} }
if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features)) if (features & ~system_supported_vcpu_features())
return 0; return -EINVAL;
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) /*
* For now make sure that both address/generic pointer authentication
* features are requested by the userspace together.
*/
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) !=
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
return -EINVAL;
/* Disallow NV+SVE for the time being */
if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features) &&
test_bit(KVM_ARM_VCPU_SVE, &features))
return -EINVAL; return -EINVAL;
if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
return 0;
/* MTE is incompatible with AArch32 */ /* MTE is incompatible with AArch32 */
if (kvm_has_mte(vcpu->kvm)) if (kvm_has_mte(vcpu->kvm))
return -EINVAL; return -EINVAL;
...@@ -1226,7 +1262,8 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, ...@@ -1226,7 +1262,8 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
{ {
unsigned long features = init->features[0]; unsigned long features = init->features[0];
return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
KVM_VCPU_MAX_FEATURES);
} }
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
...@@ -1239,21 +1276,17 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, ...@@ -1239,21 +1276,17 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->arch.config_lock);
if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) && if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
!bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES)) kvm_vcpu_init_changed(vcpu, init))
goto out_unlock; goto out_unlock;
bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
/* Now we know what it is, we can reset it. */ /* Now we know what it is, we can reset it. */
ret = kvm_reset_vcpu(vcpu); kvm_reset_vcpu(vcpu);
if (ret) {
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
goto out_unlock;
}
bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
vcpu_set_flag(vcpu, VCPU_INITIALIZED); vcpu_set_flag(vcpu, VCPU_INITIALIZED);
ret = 0;
out_unlock: out_unlock:
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->arch.config_lock);
return ret; return ret;
...@@ -1278,7 +1311,8 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, ...@@ -1278,7 +1311,8 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
if (kvm_vcpu_init_changed(vcpu, init)) if (kvm_vcpu_init_changed(vcpu, init))
return -EINVAL; return -EINVAL;
return kvm_reset_vcpu(vcpu); kvm_reset_vcpu(vcpu);
return 0;
} }
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
......
...@@ -554,7 +554,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -554,7 +554,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{ {
bool wants_02; bool wants_02;
wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features); wants_02 = vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2);
switch (val) { switch (val) {
case KVM_ARM_PSCI_0_1: case KVM_ARM_PSCI_0_1:
......
...@@ -73,11 +73,8 @@ int __init kvm_arm_init_sve(void) ...@@ -73,11 +73,8 @@ int __init kvm_arm_init_sve(void)
return 0; return 0;
} }
static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
{ {
if (!system_supports_sve())
return -EINVAL;
vcpu->arch.sve_max_vl = kvm_sve_max_vl; vcpu->arch.sve_max_vl = kvm_sve_max_vl;
/* /*
...@@ -86,8 +83,6 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) ...@@ -86,8 +83,6 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
* kvm_arm_vcpu_finalize(), which freezes the configuration. * kvm_arm_vcpu_finalize(), which freezes the configuration.
*/ */
vcpu_set_flag(vcpu, GUEST_HAS_SVE); vcpu_set_flag(vcpu, GUEST_HAS_SVE);
return 0;
} }
/* /*
...@@ -170,20 +165,9 @@ static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) ...@@ -170,20 +165,9 @@ static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
} }
static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) static void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
{ {
/*
* For now make sure that both address/generic pointer authentication
* features are requested by the userspace together and the system
* supports these capabilities.
*/
if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
!test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
!system_has_full_ptr_auth())
return -EINVAL;
vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH); vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
return 0;
} }
/** /**
...@@ -204,10 +188,9 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) ...@@ -204,10 +188,9 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
* disable preemption around the vcpu reset as we would otherwise race with * disable preemption around the vcpu reset as we would otherwise race with
* preempt notifiers which also call put/load. * preempt notifiers which also call put/load.
*/ */
int kvm_reset_vcpu(struct kvm_vcpu *vcpu) void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{ {
struct vcpu_reset_state reset_state; struct vcpu_reset_state reset_state;
int ret;
bool loaded; bool loaded;
u32 pstate; u32 pstate;
...@@ -224,29 +207,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -224,29 +207,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (loaded) if (loaded)
kvm_arch_vcpu_put(vcpu); kvm_arch_vcpu_put(vcpu);
/* Disallow NV+SVE for the time being */
if (vcpu_has_nv(vcpu) && vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
ret = -EINVAL;
goto out;
}
if (!kvm_arm_vcpu_sve_finalized(vcpu)) { if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
ret = kvm_vcpu_enable_sve(vcpu); kvm_vcpu_enable_sve(vcpu);
if (ret)
goto out;
}
} else { } else {
kvm_vcpu_reset_sve(vcpu); kvm_vcpu_reset_sve(vcpu);
} }
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))
if (kvm_vcpu_enable_ptrauth(vcpu)) { kvm_vcpu_enable_ptrauth(vcpu);
ret = -EINVAL;
goto out;
}
}
if (vcpu_el1_is_32bit(vcpu)) if (vcpu_el1_is_32bit(vcpu))
pstate = VCPU_RESET_PSTATE_SVC; pstate = VCPU_RESET_PSTATE_SVC;
...@@ -255,11 +225,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -255,11 +225,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
else else
pstate = VCPU_RESET_PSTATE_EL1; pstate = VCPU_RESET_PSTATE_EL1;
if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
ret = -EINVAL;
goto out;
}
/* Reset core registers */ /* Reset core registers */
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu))); memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs)); memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
...@@ -294,12 +259,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -294,12 +259,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
} }
/* Reset timer */ /* Reset timer */
ret = kvm_timer_vcpu_reset(vcpu); kvm_timer_vcpu_reset(vcpu);
out:
if (loaded) if (loaded)
kvm_arch_vcpu_load(vcpu, smp_processor_id()); kvm_arch_vcpu_load(vcpu, smp_processor_id());
preempt_enable(); preempt_enable();
return ret;
} }
u32 get_kvm_ipa_limit(void) u32 get_kvm_ipa_limit(void)
......
...@@ -94,7 +94,7 @@ struct arch_timer_cpu { ...@@ -94,7 +94,7 @@ struct arch_timer_cpu {
int __init kvm_timer_hyp_init(bool has_gic); int __init kvm_timer_hyp_init(bool has_gic);
int kvm_timer_enable(struct kvm_vcpu *vcpu); int kvm_timer_enable(struct kvm_vcpu *vcpu);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_sync_user(struct kvm_vcpu *vcpu); void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
......
...@@ -77,7 +77,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); ...@@ -77,7 +77,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_resync_el0(void); void kvm_vcpu_pmu_resync_el0(void);
#define kvm_vcpu_has_pmu(vcpu) \ #define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
/* /*
* Updates the vcpu's view of the pmu events for this cpu. * Updates the vcpu's view of the pmu events for this cpu.
......
...@@ -26,7 +26,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu) ...@@ -26,7 +26,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu)
* revisions. It is thus safe to return the latest, unless * revisions. It is thus safe to return the latest, unless
* userspace has instructed us otherwise. * userspace has instructed us otherwise.
*/ */
if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) { if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) {
if (vcpu->kvm->arch.psci_version) if (vcpu->kvm->arch.psci_version)
return vcpu->kvm->arch.psci_version; return vcpu->kvm->arch.psci_version;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment