Commit c992384b authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Radim Krčmář

KVM: vmx: speed up MSR bitmap merge

The bulk of the MSR bitmap is either immutable, or can be copied from
the L1 bitmap.  By initializing it at VMXON time, and copying the mutable
parts one long at a time on vmentry (rather than one bit), about 4000
clock cycles (30%) can be saved on a nested VMLAUNCH/VMRESUME.

The resulting for loop only has four iterations, so it is cheap enough
to reinitialize the MSR write bitmaps on every iteration, and it makes
the code simpler.
Suggested-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 1f6e5b25
...@@ -4972,11 +4972,6 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, ...@@ -4972,11 +4972,6 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
{ {
int f = sizeof(unsigned long); int f = sizeof(unsigned long);
if (!cpu_has_vmx_msr_bitmap()) {
WARN_ON(1);
return;
}
/* /*
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
* have the write-low and read-high bitmap offsets the wrong way round. * have the write-low and read-high bitmap offsets the wrong way round.
...@@ -7177,6 +7172,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) ...@@ -7177,6 +7172,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
(unsigned long *)__get_free_page(GFP_KERNEL); (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx->nested.msr_bitmap) if (!vmx->nested.msr_bitmap)
goto out_msr_bitmap; goto out_msr_bitmap;
memset(vmx->nested.msr_bitmap, 0xff, PAGE_SIZE);
} }
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
...@@ -9844,7 +9840,7 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, ...@@ -9844,7 +9840,7 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
} }
} }
static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12); struct vmcs12 *vmcs12);
static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
...@@ -9934,11 +9930,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, ...@@ -9934,11 +9930,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
(unsigned long)(vmcs12->posted_intr_desc_addr & (unsigned long)(vmcs12->posted_intr_desc_addr &
(PAGE_SIZE - 1))); (PAGE_SIZE - 1)));
} }
if (cpu_has_vmx_msr_bitmap() && if (!nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) &&
nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
;
else
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_USE_MSR_BITMAPS); CPU_BASED_USE_MSR_BITMAPS);
} }
...@@ -10006,7 +9998,7 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, ...@@ -10006,7 +9998,7 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
* Merge L0's and L1's MSR bitmap, return false to indicate that * Merge L0's and L1's MSR bitmap, return false to indicate that
* we do not use the hardware. * we do not use the hardware.
*/ */
static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
int msr; int msr;
...@@ -10014,6 +10006,11 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, ...@@ -10014,6 +10006,11 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
unsigned long *msr_bitmap_l1; unsigned long *msr_bitmap_l1;
unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap; unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
/* Nothing to do if the MSR bitmap is not in use. */
if (!cpu_has_vmx_msr_bitmap() ||
!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return false;
/* This shortcut is ok because we support only x2APIC MSRs so far. */ /* This shortcut is ok because we support only x2APIC MSRs so far. */
if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
return false; return false;
...@@ -10021,21 +10018,31 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, ...@@ -10021,21 +10018,31 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
if (is_error_page(page)) if (is_error_page(page))
return false; return false;
msr_bitmap_l1 = (unsigned long *)kmap(page);
memset(msr_bitmap_l0, 0xff, PAGE_SIZE); msr_bitmap_l1 = (unsigned long *)kmap(page);
if (nested_cpu_has_apic_reg_virt(vmcs12)) {
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { /*
if (nested_cpu_has_apic_reg_virt(vmcs12)) * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
for (msr = 0x800; msr <= 0x8ff; msr++) * just lets the processor take the value from the virtual-APIC page;
nested_vmx_disable_intercept_for_msr( * take those 256 bits directly from the L1 bitmap.
msr_bitmap_l1, msr_bitmap_l0, */
msr, MSR_TYPE_R); for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
unsigned word = msr / BITS_PER_LONG;
msr_bitmap_l0[word] = msr_bitmap_l1[word];
msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
}
} else {
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
unsigned word = msr / BITS_PER_LONG;
msr_bitmap_l0[word] = ~0;
msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
}
}
nested_vmx_disable_intercept_for_msr( nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0, msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_TASKPRI >> 4), APIC_BASE_MSR + (APIC_TASKPRI >> 4),
MSR_TYPE_R | MSR_TYPE_W); MSR_TYPE_W);
if (nested_cpu_has_vid(vmcs12)) { if (nested_cpu_has_vid(vmcs12)) {
nested_vmx_disable_intercept_for_msr( nested_vmx_disable_intercept_for_msr(
...@@ -10047,7 +10054,6 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, ...@@ -10047,7 +10054,6 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
APIC_BASE_MSR + (APIC_SELF_IPI >> 4), APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
MSR_TYPE_W); MSR_TYPE_W);
} }
}
kunmap(page); kunmap(page);
kvm_release_page_clean(page); kvm_release_page_clean(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment