Commit 5e3d394f authored by Xiaoyao Li's avatar Xiaoyao Li Committed by Paolo Bonzini

KVM: VMX: Fix the spelling of CPU_BASED_USE_TSC_OFFSETTING

The mis-spelling is found by checkpatch.pl, so fix them.
Signed-off-by: default avatarXiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4e2a0bc5
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* Definitions of Primary Processor-Based VM-Execution Controls. * Definitions of Primary Processor-Based VM-Execution Controls.
*/ */
#define CPU_BASED_INTR_WINDOW_EXITING 0x00000004 #define CPU_BASED_INTR_WINDOW_EXITING 0x00000004
#define CPU_BASED_USE_TSC_OFFSETING 0x00000008 #define CPU_BASED_USE_TSC_OFFSETTING 0x00000008
#define CPU_BASED_HLT_EXITING 0x00000080 #define CPU_BASED_HLT_EXITING 0x00000080
#define CPU_BASED_INVLPG_EXITING 0x00000200 #define CPU_BASED_INVLPG_EXITING 0x00000200
#define CPU_BASED_MWAIT_EXITING 0x00000400 #define CPU_BASED_MWAIT_EXITING 0x00000400
......
...@@ -3230,7 +3230,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, ...@@ -3230,7 +3230,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
} }
enter_guest_mode(vcpu); enter_guest_mode(vcpu);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
vcpu->arch.tsc_offset += vmcs12->tsc_offset; vcpu->arch.tsc_offset += vmcs12->tsc_offset;
if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
...@@ -3294,7 +3294,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, ...@@ -3294,7 +3294,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
* 26.7 "VM-entry failures during or after loading guest state". * 26.7 "VM-entry failures during or after loading guest state".
*/ */
vmentry_fail_vmexit_guest_mode: vmentry_fail_vmexit_guest_mode:
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset; vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
leave_guest_mode(vcpu); leave_guest_mode(vcpu);
...@@ -4209,7 +4209,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, ...@@ -4209,7 +4209,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
if (nested_cpu_has_preemption_timer(vmcs12)) if (nested_cpu_has_preemption_timer(vmcs12))
hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset; vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
if (likely(!vmx->fail)) { if (likely(!vmx->fail)) {
...@@ -6016,7 +6016,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, ...@@ -6016,7 +6016,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
msrs->procbased_ctls_high &= msrs->procbased_ctls_high &=
CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_INTR_WINDOW_EXITING |
CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING | CPU_BASED_CR3_STORE_EXITING |
......
...@@ -1716,7 +1716,7 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) ...@@ -1716,7 +1716,7 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (is_guest_mode(vcpu) && if (is_guest_mode(vcpu) &&
(vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
return vcpu->arch.tsc_offset - vmcs12->tsc_offset; return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
return vcpu->arch.tsc_offset; return vcpu->arch.tsc_offset;
...@@ -1734,7 +1734,7 @@ static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) ...@@ -1734,7 +1734,7 @@ static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
* to the newly set TSC to get L2's TSC. * to the newly set TSC to get L2's TSC.
*/ */
if (is_guest_mode(vcpu) && if (is_guest_mode(vcpu) &&
(vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
g_tsc_offset = vmcs12->tsc_offset; g_tsc_offset = vmcs12->tsc_offset;
trace_kvm_write_tsc_offset(vcpu->vcpu_id, trace_kvm_write_tsc_offset(vcpu->vcpu_id,
...@@ -2353,7 +2353,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, ...@@ -2353,7 +2353,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
CPU_BASED_CR3_STORE_EXITING | CPU_BASED_CR3_STORE_EXITING |
CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_MOV_DR_EXITING | CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_USE_TSC_OFFSETTING |
CPU_BASED_MWAIT_EXITING | CPU_BASED_MWAIT_EXITING |
CPU_BASED_MONITOR_EXITING | CPU_BASED_MONITOR_EXITING |
CPU_BASED_INVLPG_EXITING | CPU_BASED_INVLPG_EXITING |
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
* Definitions of Primary Processor-Based VM-Execution Controls. * Definitions of Primary Processor-Based VM-Execution Controls.
*/ */
#define CPU_BASED_INTR_WINDOW_EXITING 0x00000004 #define CPU_BASED_INTR_WINDOW_EXITING 0x00000004
#define CPU_BASED_USE_TSC_OFFSETING 0x00000008 #define CPU_BASED_USE_TSC_OFFSETTING 0x00000008
#define CPU_BASED_HLT_EXITING 0x00000080 #define CPU_BASED_HLT_EXITING 0x00000080
#define CPU_BASED_INVLPG_EXITING 0x00000200 #define CPU_BASED_INVLPG_EXITING 0x00000200
#define CPU_BASED_MWAIT_EXITING 0x00000400 #define CPU_BASED_MWAIT_EXITING 0x00000400
......
...@@ -98,7 +98,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) ...@@ -98,7 +98,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
prepare_vmcs(vmx_pages, l2_guest_code, prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]); &l2_guest_stack[L2_GUEST_STACK_SIZE]);
control = vmreadz(CPU_BASED_VM_EXEC_CONTROL); control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING; control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
vmwrite(CPU_BASED_VM_EXEC_CONTROL, control); vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE); vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment