Commit ba904635 authored by Will Auld's avatar Will Auld Committed by Marcelo Tosatti

KVM: x86: Emulate IA32_TSC_ADJUST MSR

CPUID.7.0.EBX[1]=1 indicates IA32_TSC_ADJUST MSR 0x3b is supported

Basic design is to emulate the MSR by allowing reads and writes to a guest
vcpu specific location to store the value of the emulated MSR while adding
the value to the vmcs tsc_offset. In this way the IA32_TSC_ADJUST value will
be included in all reads to the TSC MSR whether through rdmsr or rdtsc. This
is of course as long as the "use TSC counter offsetting" VM-execution control
is enabled as well as the IA32_TSC_ADJUST control.

However, because hardware will only return the TSC + IA32_TSC_ADJUST +
vmsc tsc_offset for a guest process when it does and rdtsc (with the correct
settings) the value of our virtualized IA32_TSC_ADJUST must be stored in one
of these three locations. The argument against storing it in the actual MSR
is performance. This is likely to be seldom used while the save/restore is
required on every transition. IA32_TSC_ADJUST was created as a way to solve
some issues with writing TSC itself so that is not an option either.

The remaining option, defined above as our solution has the problem of
returning incorrect vmcs tsc_offset values (unless we intercept and fix, not
done here) as mentioned above. However, more problematic is that storing the
data in vmcs tsc_offset will have a different semantic effect on the system
than does using the actual MSR. This is illustrated in the following example:

The hypervisor set the IA32_TSC_ADJUST, then the guest sets it and a guest
process performs a rdtsc. In this case the guest process will get
TSC + IA32_TSC_ADJUST_hyperviser + vmsc tsc_offset including
IA32_TSC_ADJUST_guest. While the total system semantics changed the semantics
as seen by the guest do not and hence this will not cause a problem.
Signed-off-by: default avatarWill Auld <will.auld@intel.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 8fe8ab46
...@@ -202,6 +202,7 @@ ...@@ -202,6 +202,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
#define X86_FEATURE_TSC_ADJUST (9*32+ 1) /* TSC adjustment MSR 0x3b */
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */ #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */ #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */ #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
......
...@@ -444,6 +444,7 @@ struct kvm_vcpu_arch { ...@@ -444,6 +444,7 @@ struct kvm_vcpu_arch {
s8 virtual_tsc_shift; s8 virtual_tsc_shift;
u32 virtual_tsc_mult; u32 virtual_tsc_mult;
u32 virtual_tsc_khz; u32 virtual_tsc_khz;
s64 ia32_tsc_adjust_msr;
atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
unsigned nmi_pending; /* NMI queued after currently running handler */ unsigned nmi_pending; /* NMI queued after currently running handler */
...@@ -711,6 +712,7 @@ struct kvm_x86_ops { ...@@ -711,6 +712,7 @@ struct kvm_x86_ops {
bool (*has_wbinvd_exit)(void); bool (*has_wbinvd_exit)(void);
void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale); void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
......
...@@ -236,6 +236,7 @@ ...@@ -236,6 +236,7 @@
#define MSR_IA32_EBL_CR_POWERON 0x0000002a #define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_EBC_FREQUENCY_ID 0x0000002c #define MSR_EBC_FREQUENCY_ID 0x0000002c
#define MSR_IA32_FEATURE_CONTROL 0x0000003a #define MSR_IA32_FEATURE_CONTROL 0x0000003a
#define MSR_IA32_TSC_ADJUST 0x0000003b
#define FEATURE_CONTROL_LOCKED (1<<0) #define FEATURE_CONTROL_LOCKED (1<<0)
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
......
...@@ -320,6 +320,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, ...@@ -320,6 +320,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
if (index == 0) { if (index == 0) {
entry->ebx &= kvm_supported_word9_x86_features; entry->ebx &= kvm_supported_word9_x86_features;
cpuid_mask(&entry->ebx, 9); cpuid_mask(&entry->ebx, 9);
// TSC_ADJUST is emulated
entry->ebx |= F(TSC_ADJUST);
} else } else
entry->ebx = 0; entry->ebx = 0;
entry->eax = 0; entry->eax = 0;
......
...@@ -28,6 +28,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) ...@@ -28,6 +28,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
return best && (best->ecx & bit(X86_FEATURE_XSAVE)); return best && (best->ecx & bit(X86_FEATURE_XSAVE));
} }
static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
}
static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *best;
......
...@@ -1009,6 +1009,13 @@ static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) ...@@ -1009,6 +1009,13 @@ static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
svm->tsc_ratio = ratio; svm->tsc_ratio = ratio;
} }
static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
return svm->vmcb->control.tsc_offset;
}
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -4304,6 +4311,7 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -4304,6 +4311,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.has_wbinvd_exit = svm_has_wbinvd_exit, .has_wbinvd_exit = svm_has_wbinvd_exit,
.set_tsc_khz = svm_set_tsc_khz, .set_tsc_khz = svm_set_tsc_khz,
.read_tsc_offset = svm_read_tsc_offset,
.write_tsc_offset = svm_write_tsc_offset, .write_tsc_offset = svm_write_tsc_offset,
.adjust_tsc_offset = svm_adjust_tsc_offset, .adjust_tsc_offset = svm_adjust_tsc_offset,
.compute_tsc_offset = svm_compute_tsc_offset, .compute_tsc_offset = svm_compute_tsc_offset,
......
...@@ -1884,6 +1884,11 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) ...@@ -1884,6 +1884,11 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
WARN(1, "user requested TSC rate below hardware speed\n"); WARN(1, "user requested TSC rate below hardware speed\n");
} }
static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
{
return vmcs_read64(TSC_OFFSET);
}
/* /*
* writes 'offset' into guest's timestamp counter offset register * writes 'offset' into guest's timestamp counter offset register
*/ */
...@@ -2266,6 +2271,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2266,6 +2271,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
} }
ret = kvm_set_msr_common(vcpu, msr_info); ret = kvm_set_msr_common(vcpu, msr_info);
break; break;
case MSR_IA32_TSC_ADJUST:
ret = kvm_set_msr_common(vcpu, msr_info);
break;
case MSR_TSC_AUX: case MSR_TSC_AUX:
if (!vmx->rdtscp_enabled) if (!vmx->rdtscp_enabled)
return 1; return 1;
...@@ -7345,6 +7353,7 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -7345,6 +7353,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
.set_tsc_khz = vmx_set_tsc_khz, .set_tsc_khz = vmx_set_tsc_khz,
.read_tsc_offset = vmx_read_tsc_offset,
.write_tsc_offset = vmx_write_tsc_offset, .write_tsc_offset = vmx_write_tsc_offset,
.adjust_tsc_offset = vmx_adjust_tsc_offset, .adjust_tsc_offset = vmx_adjust_tsc_offset,
.compute_tsc_offset = vmx_compute_tsc_offset, .compute_tsc_offset = vmx_compute_tsc_offset,
......
...@@ -831,6 +831,7 @@ static u32 msrs_to_save[] = { ...@@ -831,6 +831,7 @@ static u32 msrs_to_save[] = {
static unsigned num_msrs_to_save; static unsigned num_msrs_to_save;
static const u32 emulated_msrs[] = { static const u32 emulated_msrs[] = {
MSR_IA32_TSC_ADJUST,
MSR_IA32_TSCDEADLINE, MSR_IA32_TSCDEADLINE,
MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE,
MSR_IA32_MCG_STATUS, MSR_IA32_MCG_STATUS,
...@@ -1135,6 +1136,12 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) ...@@ -1135,6 +1136,12 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
#endif #endif
} }
static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
{
u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
}
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -1222,6 +1229,8 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -1222,6 +1229,8 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
update_ia32_tsc_adjust_msr(vcpu, offset);
kvm_x86_ops->write_tsc_offset(vcpu, offset); kvm_x86_ops->write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
...@@ -1918,6 +1927,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1918,6 +1927,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSCDEADLINE: case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data); kvm_set_lapic_tscdeadline_msr(vcpu, data);
break; break;
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
break;
case MSR_IA32_MISC_ENABLE: case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data; vcpu->arch.ia32_misc_enable_msr = data;
break; break;
...@@ -2277,6 +2295,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2277,6 +2295,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_TSCDEADLINE: case MSR_IA32_TSCDEADLINE:
data = kvm_get_lapic_tscdeadline_msr(vcpu); data = kvm_get_lapic_tscdeadline_msr(vcpu);
break; break;
case MSR_IA32_TSC_ADJUST:
data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
break;
case MSR_IA32_MISC_ENABLE: case MSR_IA32_MISC_ENABLE:
data = vcpu->arch.ia32_misc_enable_msr; data = vcpu->arch.ia32_misc_enable_msr;
break; break;
...@@ -6607,6 +6628,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -6607,6 +6628,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
goto fail_free_mce_banks; goto fail_free_mce_banks;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
kvm_async_pf_hash_reset(vcpu); kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu); kvm_pmu_init(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment