Commit a3fe4599 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Sasha Levin

KVM: x86: pass host_initiated to functions that read MSRs

[ Upstream commit 609e36d3 ]

SMBASE is only readable from SMM for the VCPU, but it must be always
accessible if userspace is accessing it.  Thus, all functions that
read MSRs are changed to accept a struct msr_data; the host_initiated
and index fields are pre-initialized, while the data field is filled
on return.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
parent bd21c23d
...@@ -718,7 +718,7 @@ struct kvm_x86_ops { ...@@ -718,7 +718,7 @@ struct kvm_x86_ops {
void (*vcpu_put)(struct kvm_vcpu *vcpu); void (*vcpu_put)(struct kvm_vcpu *vcpu);
void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
void (*get_segment)(struct kvm_vcpu *vcpu, void (*get_segment)(struct kvm_vcpu *vcpu,
...@@ -938,7 +938,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -938,7 +938,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
void kvm_enable_efer_bits(u64); void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
struct x86_emulate_ctxt; struct x86_emulate_ctxt;
...@@ -967,7 +967,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); ...@@ -967,7 +967,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
......
...@@ -3069,42 +3069,42 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) ...@@ -3069,42 +3069,42 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
svm_scale_tsc(vcpu, host_tsc); svm_scale_tsc(vcpu, host_tsc);
} }
static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
switch (ecx) { switch (msr_info->index) {
case MSR_IA32_TSC: { case MSR_IA32_TSC: {
*data = svm->vmcb->control.tsc_offset + msr_info->data = svm->vmcb->control.tsc_offset +
svm_scale_tsc(vcpu, native_read_tsc()); svm_scale_tsc(vcpu, native_read_tsc());
break; break;
} }
case MSR_STAR: case MSR_STAR:
*data = svm->vmcb->save.star; msr_info->data = svm->vmcb->save.star;
break; break;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
case MSR_LSTAR: case MSR_LSTAR:
*data = svm->vmcb->save.lstar; msr_info->data = svm->vmcb->save.lstar;
break; break;
case MSR_CSTAR: case MSR_CSTAR:
*data = svm->vmcb->save.cstar; msr_info->data = svm->vmcb->save.cstar;
break; break;
case MSR_KERNEL_GS_BASE: case MSR_KERNEL_GS_BASE:
*data = svm->vmcb->save.kernel_gs_base; msr_info->data = svm->vmcb->save.kernel_gs_base;
break; break;
case MSR_SYSCALL_MASK: case MSR_SYSCALL_MASK:
*data = svm->vmcb->save.sfmask; msr_info->data = svm->vmcb->save.sfmask;
break; break;
#endif #endif
case MSR_IA32_SYSENTER_CS: case MSR_IA32_SYSENTER_CS:
*data = svm->vmcb->save.sysenter_cs; msr_info->data = svm->vmcb->save.sysenter_cs;
break; break;
case MSR_IA32_SYSENTER_EIP: case MSR_IA32_SYSENTER_EIP:
*data = svm->sysenter_eip; msr_info->data = svm->sysenter_eip;
break; break;
case MSR_IA32_SYSENTER_ESP: case MSR_IA32_SYSENTER_ESP:
*data = svm->sysenter_esp; msr_info->data = svm->sysenter_esp;
break; break;
/* /*
* Nobody will change the following 5 values in the VMCB so we can * Nobody will change the following 5 values in the VMCB so we can
...@@ -3112,31 +3112,31 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) ...@@ -3112,31 +3112,31 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
* implemented. * implemented.
*/ */
case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_DEBUGCTLMSR:
*data = svm->vmcb->save.dbgctl; msr_info->data = svm->vmcb->save.dbgctl;
break; break;
case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHFROMIP:
*data = svm->vmcb->save.br_from; msr_info->data = svm->vmcb->save.br_from;
break; break;
case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTBRANCHTOIP:
*data = svm->vmcb->save.br_to; msr_info->data = svm->vmcb->save.br_to;
break; break;
case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTFROMIP:
*data = svm->vmcb->save.last_excp_from; msr_info->data = svm->vmcb->save.last_excp_from;
break; break;
case MSR_IA32_LASTINTTOIP: case MSR_IA32_LASTINTTOIP:
*data = svm->vmcb->save.last_excp_to; msr_info->data = svm->vmcb->save.last_excp_to;
break; break;
case MSR_VM_HSAVE_PA: case MSR_VM_HSAVE_PA:
*data = svm->nested.hsave_msr; msr_info->data = svm->nested.hsave_msr;
break; break;
case MSR_VM_CR: case MSR_VM_CR:
*data = svm->nested.vm_cr_msr; msr_info->data = svm->nested.vm_cr_msr;
break; break;
case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_REV:
*data = 0x01000065; msr_info->data = 0x01000065;
break; break;
default: default:
return kvm_get_msr_common(vcpu, ecx, data); return kvm_get_msr_common(vcpu, msr_info);
} }
return 0; return 0;
} }
...@@ -3144,16 +3144,20 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) ...@@ -3144,16 +3144,20 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
static int rdmsr_interception(struct vcpu_svm *svm) static int rdmsr_interception(struct vcpu_svm *svm)
{ {
u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
u64 data; struct msr_data msr_info;
if (svm_get_msr(&svm->vcpu, ecx, &data)) { msr_info.index = ecx;
msr_info.host_initiated = false;
if (svm_get_msr(&svm->vcpu, &msr_info)) {
trace_kvm_msr_read_ex(ecx); trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(&svm->vcpu, 0);
} else { } else {
trace_kvm_msr_read(ecx, data); trace_kvm_msr_read(ecx, msr_info.data);
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff); kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32); msr_info.data & 0xffffffff);
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
msr_info.data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
} }
......
...@@ -2640,76 +2640,69 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) ...@@ -2640,76 +2640,69 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
* Returns 0 on success, non-0 otherwise. * Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called. * Assumes vcpu_load() was already called.
*/ */
static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
u64 data;
struct shared_msr_entry *msr; struct shared_msr_entry *msr;
if (!pdata) { switch (msr_info->index) {
printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
return -EINVAL;
}
switch (msr_index) {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
case MSR_FS_BASE: case MSR_FS_BASE:
data = vmcs_readl(GUEST_FS_BASE); msr_info->data = vmcs_readl(GUEST_FS_BASE);
break; break;
case MSR_GS_BASE: case MSR_GS_BASE:
data = vmcs_readl(GUEST_GS_BASE); msr_info->data = vmcs_readl(GUEST_GS_BASE);
break; break;
case MSR_KERNEL_GS_BASE: case MSR_KERNEL_GS_BASE:
vmx_load_host_state(to_vmx(vcpu)); vmx_load_host_state(to_vmx(vcpu));
data = to_vmx(vcpu)->msr_guest_kernel_gs_base; msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
break; break;
#endif #endif
case MSR_EFER: case MSR_EFER:
return kvm_get_msr_common(vcpu, msr_index, pdata); return kvm_get_msr_common(vcpu, msr_info);
case MSR_IA32_TSC: case MSR_IA32_TSC:
data = guest_read_tsc(); msr_info->data = guest_read_tsc();
break; break;
case MSR_IA32_SYSENTER_CS: case MSR_IA32_SYSENTER_CS:
data = vmcs_read32(GUEST_SYSENTER_CS); msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
break; break;
case MSR_IA32_SYSENTER_EIP: case MSR_IA32_SYSENTER_EIP:
data = vmcs_readl(GUEST_SYSENTER_EIP); msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
break; break;
case MSR_IA32_SYSENTER_ESP: case MSR_IA32_SYSENTER_ESP:
data = vmcs_readl(GUEST_SYSENTER_ESP); msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break; break;
case MSR_IA32_BNDCFGS: case MSR_IA32_BNDCFGS:
if (!vmx_mpx_supported()) if (!vmx_mpx_supported())
return 1; return 1;
data = vmcs_read64(GUEST_BNDCFGS); msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break; break;
case MSR_IA32_FEATURE_CONTROL: case MSR_IA32_FEATURE_CONTROL:
if (!nested_vmx_allowed(vcpu)) if (!nested_vmx_allowed(vcpu))
return 1; return 1;
data = to_vmx(vcpu)->nested.msr_ia32_feature_control; msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
break; break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
if (!nested_vmx_allowed(vcpu)) if (!nested_vmx_allowed(vcpu))
return 1; return 1;
return vmx_get_vmx_msr(vcpu, msr_index, pdata); return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
case MSR_IA32_XSS: case MSR_IA32_XSS:
if (!vmx_xsaves_supported()) if (!vmx_xsaves_supported())
return 1; return 1;
data = vcpu->arch.ia32_xss; msr_info->data = vcpu->arch.ia32_xss;
break; break;
case MSR_TSC_AUX: case MSR_TSC_AUX:
if (!to_vmx(vcpu)->rdtscp_enabled) if (!to_vmx(vcpu)->rdtscp_enabled)
return 1; return 1;
/* Otherwise falls through */ /* Otherwise falls through */
default: default:
msr = find_msr_entry(to_vmx(vcpu), msr_index); msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
if (msr) { if (msr) {
data = msr->data; msr_info->data = msr->data;
break; break;
} }
return kvm_get_msr_common(vcpu, msr_index, pdata); return kvm_get_msr_common(vcpu, msr_info);
} }
*pdata = data;
return 0; return 0;
} }
...@@ -5496,19 +5489,21 @@ static int handle_cpuid(struct kvm_vcpu *vcpu) ...@@ -5496,19 +5489,21 @@ static int handle_cpuid(struct kvm_vcpu *vcpu)
static int handle_rdmsr(struct kvm_vcpu *vcpu) static int handle_rdmsr(struct kvm_vcpu *vcpu)
{ {
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
u64 data; struct msr_data msr_info;
if (vmx_get_msr(vcpu, ecx, &data)) { msr_info.index = ecx;
msr_info.host_initiated = false;
if (vmx_get_msr(vcpu, &msr_info)) {
trace_kvm_msr_read_ex(ecx); trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1; return 1;
} }
trace_kvm_msr_read(ecx, data); trace_kvm_msr_read(ecx, msr_info.data);
/* FIXME: handling of bits 32:63 of rax, rdx */ /* FIXME: handling of bits 32:63 of rax, rdx */
vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
return 1; return 1;
} }
...@@ -9038,6 +9033,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -9038,6 +9033,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
struct vmx_msr_entry e; struct vmx_msr_entry e;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct msr_data msr_info;
if (kvm_read_guest(vcpu->kvm, if (kvm_read_guest(vcpu->kvm,
gpa + i * sizeof(e), gpa + i * sizeof(e),
&e, 2 * sizeof(u32))) { &e, 2 * sizeof(u32))) {
...@@ -9052,7 +9048,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -9052,7 +9048,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
__func__, i, e.index, e.reserved); __func__, i, e.index, e.reserved);
return -EINVAL; return -EINVAL;
} }
if (kvm_get_msr(vcpu, e.index, &e.value)) { msr_info.host_initiated = false;
msr_info.index = e.index;
if (kvm_get_msr(vcpu, &msr_info)) {
pr_warn_ratelimited( pr_warn_ratelimited(
"%s cannot read MSR (%u, 0x%x)\n", "%s cannot read MSR (%u, 0x%x)\n",
__func__, i, e.index); __func__, i, e.index);
...@@ -9061,10 +9059,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -9061,10 +9059,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
if (kvm_write_guest(vcpu->kvm, if (kvm_write_guest(vcpu->kvm,
gpa + i * sizeof(e) + gpa + i * sizeof(e) +
offsetof(struct vmx_msr_entry, value), offsetof(struct vmx_msr_entry, value),
&e.value, sizeof(e.value))) { &msr_info.data, sizeof(msr_info.data))) {
pr_warn_ratelimited( pr_warn_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n", "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, e.value); __func__, i, e.index, msr_info.data);
return -EINVAL; return -EINVAL;
} }
} }
......
...@@ -1045,6 +1045,21 @@ EXPORT_SYMBOL_GPL(kvm_set_msr); ...@@ -1045,6 +1045,21 @@ EXPORT_SYMBOL_GPL(kvm_set_msr);
/* /*
* Adapt set_msr() to msr_io()'s calling convention * Adapt set_msr() to msr_io()'s calling convention
*/ */
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
struct msr_data msr;
int r;
msr.index = index;
msr.host_initiated = true;
r = kvm_get_msr(vcpu, &msr);
if (r)
return r;
*data = msr.data;
return 0;
}
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{ {
struct msr_data msr; struct msr_data msr;
...@@ -2374,9 +2389,9 @@ EXPORT_SYMBOL_GPL(kvm_set_msr_common); ...@@ -2374,9 +2389,9 @@ EXPORT_SYMBOL_GPL(kvm_set_msr_common);
* Returns 0 on success, non-0 otherwise. * Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called. * Assumes vcpu_load() was already called.
*/ */
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{ {
return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); return kvm_x86_ops->get_msr(vcpu, msr);
} }
EXPORT_SYMBOL_GPL(kvm_get_msr); EXPORT_SYMBOL_GPL(kvm_get_msr);
...@@ -2513,11 +2528,11 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2513,11 +2528,11 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
return 0; return 0;
} }
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
u64 data; u64 data;
switch (msr) { switch (msr_info->index) {
case MSR_IA32_PLATFORM_ID: case MSR_IA32_PLATFORM_ID:
case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_EBL_CR_POWERON:
case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_DEBUGCTLMSR:
...@@ -2540,26 +2555,26 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2540,26 +2555,26 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_AMD64_NB_CFG: case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE: case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2: case MSR_AMD64_BU_CFG2:
data = 0; msr_info->data = 0;
break; break;
case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1: case MSR_P6_PERFCTR1:
case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1: case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr)) if (kvm_pmu_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr, pdata); return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
data = 0; msr_info->data = 0;
break; break;
case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_REV:
data = 0x100000000ULL; msr_info->data = 0x100000000ULL;
break; break;
case MSR_MTRRcap: case MSR_MTRRcap:
data = 0x500 | KVM_NR_VAR_MTRR; msr_info->data = 0x500 | KVM_NR_VAR_MTRR;
break; break;
case 0x200 ... 0x2ff: case 0x200 ... 0x2ff:
return get_msr_mtrr(vcpu, msr, pdata); return get_msr_mtrr(vcpu, msr_info->index, &msr_info->data);
case 0xcd: /* fsb frequency */ case 0xcd: /* fsb frequency */
data = 3; msr_info->data = 3;
break; break;
/* /*
* MSR_EBC_FREQUENCY_ID * MSR_EBC_FREQUENCY_ID
...@@ -2573,48 +2588,48 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2573,48 +2588,48 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
* multiplying by zero otherwise. * multiplying by zero otherwise.
*/ */
case MSR_EBC_FREQUENCY_ID: case MSR_EBC_FREQUENCY_ID:
data = 1 << 24; msr_info->data = 1 << 24;
break; break;
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
data = kvm_get_apic_base(vcpu); msr_info->data = kvm_get_apic_base(vcpu);
break; break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_read(vcpu, msr, pdata); return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
break; break;
case MSR_IA32_TSCDEADLINE: case MSR_IA32_TSCDEADLINE:
data = kvm_get_lapic_tscdeadline_msr(vcpu); msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
break; break;
case MSR_IA32_TSC_ADJUST: case MSR_IA32_TSC_ADJUST:
data = (u64)vcpu->arch.ia32_tsc_adjust_msr; msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
break; break;
case MSR_IA32_MISC_ENABLE: case MSR_IA32_MISC_ENABLE:
data = vcpu->arch.ia32_misc_enable_msr; msr_info->data = vcpu->arch.ia32_misc_enable_msr;
break; break;
case MSR_IA32_PERF_STATUS: case MSR_IA32_PERF_STATUS:
/* TSC increment by tick */ /* TSC increment by tick */
data = 1000ULL; msr_info->data = 1000ULL;
/* CPU multiplier */ /* CPU multiplier */
data |= (((uint64_t)4ULL) << 40); data |= (((uint64_t)4ULL) << 40);
break; break;
case MSR_EFER: case MSR_EFER:
data = vcpu->arch.efer; msr_info->data = vcpu->arch.efer;
break; break;
case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK:
case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK_NEW:
data = vcpu->kvm->arch.wall_clock; msr_info->data = vcpu->kvm->arch.wall_clock;
break; break;
case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME:
case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME_NEW:
data = vcpu->arch.time; msr_info->data = vcpu->arch.time;
break; break;
case MSR_KVM_ASYNC_PF_EN: case MSR_KVM_ASYNC_PF_EN:
data = vcpu->arch.apf.msr_val; msr_info->data = vcpu->arch.apf.msr_val;
break; break;
case MSR_KVM_STEAL_TIME: case MSR_KVM_STEAL_TIME:
data = vcpu->arch.st.msr_val; msr_info->data = vcpu->arch.st.msr_val;
break; break;
case MSR_KVM_PV_EOI_EN: case MSR_KVM_PV_EOI_EN:
data = vcpu->arch.pv_eoi.msr_val; msr_info->data = vcpu->arch.pv_eoi.msr_val;
break; break;
case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE: case MSR_IA32_P5_MC_TYPE:
...@@ -2622,7 +2637,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2622,7 +2637,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS: case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
return get_msr_mce(vcpu, msr, pdata); return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
case MSR_K7_CLK_CTL: case MSR_K7_CLK_CTL:
/* /*
* Provide expected ramp-up count for K7. All other * Provide expected ramp-up count for K7. All other
...@@ -2633,17 +2648,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2633,17 +2648,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
* type 6, model 8 and higher from exploding due to * type 6, model 8 and higher from exploding due to
* the rdmsr failing. * the rdmsr failing.
*/ */
data = 0x20000000; msr_info->data = 0x20000000;
break; break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
if (kvm_hv_msr_partition_wide(msr)) { if (kvm_hv_msr_partition_wide(msr_info->index)) {
int r; int r;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
r = get_msr_hyperv_pw(vcpu, msr, pdata); r = get_msr_hyperv_pw(vcpu, msr_info->index, &msr_info->data);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
return r; return r;
} else } else
return get_msr_hyperv(vcpu, msr, pdata); return get_msr_hyperv(vcpu, msr_info->index, &msr_info->data);
break; break;
case MSR_IA32_BBL_CR_CTL3: case MSR_IA32_BBL_CR_CTL3:
/* This legacy MSR exists but isn't fully documented in current /* This legacy MSR exists but isn't fully documented in current
...@@ -2656,31 +2671,30 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2656,31 +2671,30 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
* L2 cache control register 3: 64GB range, 256KB size, * L2 cache control register 3: 64GB range, 256KB size,
* enabled, latency 0x1, configured * enabled, latency 0x1, configured
*/ */
data = 0xbe702111; msr_info->data = 0xbe702111;
break; break;
case MSR_AMD64_OSVW_ID_LENGTH: case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu)) if (!guest_cpuid_has_osvw(vcpu))
return 1; return 1;
data = vcpu->arch.osvw.length; msr_info->data = vcpu->arch.osvw.length;
break; break;
case MSR_AMD64_OSVW_STATUS: case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu)) if (!guest_cpuid_has_osvw(vcpu))
return 1; return 1;
data = vcpu->arch.osvw.status; msr_info->data = vcpu->arch.osvw.status;
break; break;
default: default:
if (kvm_pmu_msr(vcpu, msr)) if (kvm_pmu_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr, pdata); return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
if (!ignore_msrs) { if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index);
return 1; return 1;
} else { } else {
vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
data = 0; msr_info->data = 0;
} }
break; break;
} }
*pdata = data;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(kvm_get_msr_common); EXPORT_SYMBOL_GPL(kvm_get_msr_common);
...@@ -3453,7 +3467,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -3453,7 +3467,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break; break;
} }
case KVM_GET_MSRS: case KVM_GET_MSRS:
r = msr_io(vcpu, argp, kvm_get_msr, 1); r = msr_io(vcpu, argp, do_get_msr, 1);
break; break;
case KVM_SET_MSRS: case KVM_SET_MSRS:
r = msr_io(vcpu, argp, do_set_msr, 0); r = msr_io(vcpu, argp, do_set_msr, 0);
...@@ -4948,7 +4962,17 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, ...@@ -4948,7 +4962,17 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata) u32 msr_index, u64 *pdata)
{ {
return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); struct msr_data msr;
int r;
msr.index = msr_index;
msr.host_initiated = false;
r = kvm_get_msr(emul_to_vcpu(ctxt), &msr);
if (r)
return r;
*pdata = msr.data;
return 0;
} }
static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment