Commit b4128000 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: x86: hyper-v: Prepare to check access to Hyper-V MSRs

Introduce hv_check_msr_access() to check if the particular MSR
should be accessible by guest, this will be used with
KVM_CAP_HYPERV_ENFORCE_CPUID mode.

No functional change intended.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210521095204.2161214-5-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 10d7bf1e
...@@ -1202,12 +1202,21 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm) ...@@ -1202,12 +1202,21 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
mutex_unlock(&hv->hv_lock); mutex_unlock(&hv->hv_lock);
} }
static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
{
return true;
}
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host) bool host)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = to_kvm_hv(kvm); struct kvm_hv *hv = to_kvm_hv(kvm);
if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
return 1;
switch (msr) { switch (msr) {
case HV_X64_MSR_GUEST_OS_ID: case HV_X64_MSR_GUEST_OS_ID:
hv->hv_guest_os_id = data; hv->hv_guest_os_id = data;
...@@ -1336,6 +1345,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) ...@@ -1336,6 +1345,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{ {
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
return 1;
switch (msr) { switch (msr) {
case HV_X64_MSR_VP_INDEX: { case HV_X64_MSR_VP_INDEX: {
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
...@@ -1450,6 +1462,9 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, ...@@ -1450,6 +1462,9 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = to_kvm_hv(kvm); struct kvm_hv *hv = to_kvm_hv(kvm);
if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
return 1;
switch (msr) { switch (msr) {
case HV_X64_MSR_GUEST_OS_ID: case HV_X64_MSR_GUEST_OS_ID:
data = hv->hv_guest_os_id; data = hv->hv_guest_os_id;
...@@ -1499,6 +1514,9 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, ...@@ -1499,6 +1514,9 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
u64 data = 0; u64 data = 0;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
return 1;
switch (msr) { switch (msr) {
case HV_X64_MSR_VP_INDEX: case HV_X64_MSR_VP_INDEX:
data = hv_vcpu->vp_index; data = hv_vcpu->vp_index;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment