Commit 05f04ae4 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: x86: hyper-v: Introduce to_kvm_hv() helper

Spelling '&kvm->arch.hyperv' correctly is hard. Also, this makes the code
more consistent with vmx/svm where to_kvm_vmx()/to_kvm_svm() are already
being used.

Opportunistically change kvm_hv_msr_{get,set}_crash_{data,ctl}() and
kvm_hv_msr_set_crash_data() to take 'kvm' instead of 'vcpu' as these
MSRs are partition wide.

No functional change intended.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210126134816.1880136-9-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f69b55ef
......@@ -283,8 +283,7 @@ static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = &kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
hv->hv_syndbg.control.status =
......@@ -515,7 +514,7 @@ static void synic_init(struct kvm_vcpu_hv_synic *synic)
static u64 get_time_ref_counter(struct kvm *kvm)
{
struct kvm_hv *hv = &kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(kvm);
struct kvm_vcpu *vcpu;
u64 tsc;
......@@ -940,10 +939,9 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
return r;
}
static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
u32 index, u64 *pdata)
static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(kvm);
size_t size = ARRAY_SIZE(hv->hv_crash_param);
if (WARN_ON_ONCE(index >= size))
......@@ -953,41 +951,26 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
return 0;
}
static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(kvm);
*pdata = hv->hv_crash_ctl;
return 0;
}
static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
if (host)
hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) {
struct kvm_hv *hv = to_kvm_hv(kvm);
vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
hv->hv_crash_param[0],
hv->hv_crash_param[1],
hv->hv_crash_param[2],
hv->hv_crash_param[3],
hv->hv_crash_param[4]);
/* Send notification about crash to user space */
kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
}
hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
return 0;
}
static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
u32 index, u64 data)
static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(kvm);
size_t size = ARRAY_SIZE(hv->hv_crash_param);
if (WARN_ON_ONCE(index >= size))
......@@ -1069,7 +1052,7 @@ static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock)
{
struct kvm_hv *hv = &kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(kvm);
u32 tsc_seq;
u64 gfn;
......@@ -1079,7 +1062,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
return;
mutex_lock(&kvm->arch.hyperv.hv_lock);
mutex_lock(&hv->hv_lock);
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
goto out_unlock;
......@@ -1123,14 +1106,14 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
out_unlock:
mutex_unlock(&kvm->arch.hyperv.hv_lock);
mutex_unlock(&hv->hv_lock);
}
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = &kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(kvm);
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
......@@ -1186,11 +1169,25 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
break;
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
return kvm_hv_msr_set_crash_data(vcpu,
return kvm_hv_msr_set_crash_data(kvm,
msr - HV_X64_MSR_CRASH_P0,
data);
case HV_X64_MSR_CRASH_CTL:
return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
if (host)
return kvm_hv_msr_set_crash_ctl(kvm, data);
if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
hv->hv_crash_param[0],
hv->hv_crash_param[1],
hv->hv_crash_param[2],
hv->hv_crash_param[3],
hv->hv_crash_param[4]);
/* Send notification about crash to user space */
kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
}
break;
case HV_X64_MSR_RESET:
if (data == 1) {
vcpu_debug(vcpu, "hyper-v reset requested\n");
......@@ -1238,7 +1235,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
switch (msr) {
case HV_X64_MSR_VP_INDEX: {
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
int vcpu_idx = kvm_vcpu_get_idx(vcpu);
u32 new_vp_index = (u32)data;
......@@ -1348,7 +1345,7 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
{
u64 data = 0;
struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = &kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(kvm);
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
......@@ -1364,11 +1361,11 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
data = hv->hv_tsc_page;
break;
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
return kvm_hv_msr_get_crash_data(vcpu,
return kvm_hv_msr_get_crash_data(kvm,
msr - HV_X64_MSR_CRASH_P0,
pdata);
case HV_X64_MSR_CRASH_CTL:
return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
return kvm_hv_msr_get_crash_ctl(kvm, pdata);
case HV_X64_MSR_RESET:
data = 0;
break;
......@@ -1456,12 +1453,14 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
mutex_lock(&hv->hv_lock);
r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
mutex_unlock(&hv->hv_lock);
return r;
} else
return kvm_hv_set_msr(vcpu, msr, data, host);
......@@ -1469,12 +1468,14 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
mutex_lock(&hv->hv_lock);
r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
mutex_unlock(&hv->hv_lock);
return r;
} else
return kvm_hv_get_msr(vcpu, msr, pdata, host);
......@@ -1484,7 +1485,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
u64 *vp_bitmap, unsigned long *vcpu_bitmap)
{
struct kvm_hv *hv = &kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(kvm);
struct kvm_vcpu *vcpu;
int i, bank, sbank = 0;
......@@ -1686,7 +1687,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{
return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0;
return to_kvm_hv(kvm)->hv_guest_os_id != 0;
}
static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
......@@ -1716,6 +1717,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
{
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
struct eventfd_ctx *eventfd;
if (unlikely(!fast)) {
......@@ -1744,7 +1746,7 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
rcu_read_lock();
eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
eventfd = idr_find(&hv->conn_to_evt, param);
rcu_read_unlock();
if (!eventfd)
return HV_STATUS_INVALID_PORT_ID;
......@@ -1903,23 +1905,26 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
void kvm_hv_init_vm(struct kvm *kvm)
{
mutex_init(&kvm->arch.hyperv.hv_lock);
idr_init(&kvm->arch.hyperv.conn_to_evt);
struct kvm_hv *hv = to_kvm_hv(kvm);
mutex_init(&hv->hv_lock);
idr_init(&hv->conn_to_evt);
}
void kvm_hv_destroy_vm(struct kvm *kvm)
{
struct kvm_hv *hv = to_kvm_hv(kvm);
struct eventfd_ctx *eventfd;
int i;
idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
eventfd_ctx_put(eventfd);
idr_destroy(&kvm->arch.hyperv.conn_to_evt);
idr_destroy(&hv->conn_to_evt);
}
static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
{
struct kvm_hv *hv = &kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(kvm);
struct eventfd_ctx *eventfd;
int ret;
......@@ -1943,7 +1948,7 @@ static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
{
struct kvm_hv *hv = &kvm->arch.hyperv;
struct kvm_hv *hv = to_kvm_hv(kvm);
struct eventfd_ctx *eventfd;
mutex_lock(&hv->hv_lock);
......
......@@ -50,6 +50,11 @@
/* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
#define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm)
{
return &kvm->arch.hyperv;
}
static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu)
{
return &vcpu->arch.hyperv;
......
......@@ -50,6 +50,7 @@
#include "capabilities.h"
#include "cpuid.h"
#include "evmcs.h"
#include "hyperv.h"
#include "irq.h"
#include "kvm_cache_regs.h"
#include "lapic.h"
......@@ -552,7 +553,7 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
{
struct hv_enlightened_vmcs *evmcs;
struct hv_partition_assist_pg **p_hv_pa_pg =
&vcpu->kvm->arch.hyperv.hv_pa_pg;
&to_kvm_hv(vcpu->kvm)->hv_pa_pg;
/*
* Synthetic VM-Exit is not enabled in current code and so All
* evmcs in singe VM shares same assist page.
......
......@@ -10433,7 +10433,7 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_free_vm(struct kvm *kvm)
{
kfree(kvm->arch.hyperv.hv_pa_pg);
kfree(to_kvm_hv(kvm)->hv_pa_pg);
vfree(kvm);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment