Commit d34b7648 authored by Oliver Upton's avatar Oliver Upton

KVM: arm64: Only insert reserved ranges when SMCCC filter is used

The reserved ranges are only useful for preventing userspace from
adding a rule that intersects with functions we must handle in KVM. If
userspace never writes to the SMCCC filter than this is all just wasted
work/memory.

Insert reserved ranges on the first call to KVM_ARM_VM_SMCCC_FILTER.
Reviewed-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20231004234947.207507-3-oliver.upton@linux.devSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent bb17fb31
...@@ -133,12 +133,10 @@ static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id) ...@@ -133,12 +133,10 @@ static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id)
ARM_SMCCC_SMC_64, \ ARM_SMCCC_SMC_64, \
0, ARM_SMCCC_FUNC_MASK) 0, ARM_SMCCC_FUNC_MASK)
static void init_smccc_filter(struct kvm *kvm) static int kvm_smccc_filter_insert_reserved(struct kvm *kvm)
{ {
int r; int r;
mt_init(&kvm->arch.smccc_filter);
/* /*
* Prevent userspace from handling any SMCCC calls in the architecture * Prevent userspace from handling any SMCCC calls in the architecture
* range, avoiding the risk of misrepresenting Spectre mitigation status * range, avoiding the risk of misrepresenting Spectre mitigation status
...@@ -148,14 +146,20 @@ static void init_smccc_filter(struct kvm *kvm) ...@@ -148,14 +146,20 @@ static void init_smccc_filter(struct kvm *kvm)
SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END, SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END,
xa_mk_value(KVM_SMCCC_FILTER_HANDLE), xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
GFP_KERNEL_ACCOUNT); GFP_KERNEL_ACCOUNT);
WARN_ON_ONCE(r); if (r)
goto out_destroy;
r = mtree_insert_range(&kvm->arch.smccc_filter, r = mtree_insert_range(&kvm->arch.smccc_filter,
SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END, SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END,
xa_mk_value(KVM_SMCCC_FILTER_HANDLE), xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
GFP_KERNEL_ACCOUNT); GFP_KERNEL_ACCOUNT);
WARN_ON_ONCE(r); if (r)
goto out_destroy;
return 0;
out_destroy:
mtree_destroy(&kvm->arch.smccc_filter);
return r;
} }
static bool kvm_smccc_filter_configured(struct kvm *kvm) static bool kvm_smccc_filter_configured(struct kvm *kvm)
...@@ -189,6 +193,12 @@ static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user ...@@ -189,6 +193,12 @@ static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user
goto out_unlock; goto out_unlock;
} }
if (!kvm_smccc_filter_configured(kvm)) {
r = kvm_smccc_filter_insert_reserved(kvm);
if (WARN_ON_ONCE(r))
goto out_unlock;
}
r = mtree_insert_range(&kvm->arch.smccc_filter, start, end, r = mtree_insert_range(&kvm->arch.smccc_filter, start, end,
xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT); xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT);
if (r) if (r)
...@@ -392,7 +402,7 @@ void kvm_arm_init_hypercalls(struct kvm *kvm) ...@@ -392,7 +402,7 @@ void kvm_arm_init_hypercalls(struct kvm *kvm)
smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES; smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES;
smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES; smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
init_smccc_filter(kvm); mt_init(&kvm->arch.smccc_filter);
} }
void kvm_arm_teardown_hypercalls(struct kvm *kvm) void kvm_arm_teardown_hypercalls(struct kvm *kvm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment