Commit 08b297bb authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Paolo writes:
  "KVM changes for 4.19-rc7

   x86 and PPC bugfixes, mostly introduced in 4.19-rc1."

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm: nVMX: fix entry with pending interrupt if APICv is enabled
  KVM: VMX: hide flexpriority from guest when disabled at the module level
  KVM: VMX: check for existence of secondary exec controls before accessing
  KVM: PPC: Book3S HV: Avoid crash from THP collapse during radix page fault
  KVM: x86: fix L1TF's MMIO GFN calculation
  tools/kvm_stat: cut down decimal places in update interval dialog
  KVM: nVMX: Fix emulation of VM_ENTRY_LOAD_BNDCFGS
  KVM: x86: Do not use kvm_x86_ops->mpx_supported() directly
  KVM: nVMX: Do not expose MPX VMX controls when guest MPX disabled
  KVM: x86: never trap MSR_KERNEL_GS_BASE
parents 4fbeba43 cc906f07
...@@ -646,6 +646,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -646,6 +646,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/ */
local_irq_disable(); local_irq_disable();
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
/*
* If the PTE disappeared temporarily due to a THP
* collapse, just return and let the guest try again.
*/
if (!ptep) {
local_irq_enable();
if (page)
put_page(page);
return RESUME_GUEST;
}
pte = *ptep; pte = *ptep;
local_irq_enable(); local_irq_enable();
......
...@@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask; ...@@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
*/ */
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
/*
* In some cases, we need to preserve the GFN of a non-present or reserved
* SPTE when we usurp the upper five bits of the physical address space to
* defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
* shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
* left into the reserved bits, i.e. the GFN in the SPTE will be split into
* high and low parts. This mask covers the lower bits of the GFN.
*/
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
static void mmu_spte_set(u64 *sptep, u64 spte); static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
...@@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte) ...@@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
static gfn_t get_mmio_spte_gfn(u64 spte) static gfn_t get_mmio_spte_gfn(u64 spte)
{ {
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask | u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
shadow_nonpresent_or_rsvd_mask;
u64 gpa = spte & ~mask;
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
& shadow_nonpresent_or_rsvd_mask; & shadow_nonpresent_or_rsvd_mask;
...@@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); ...@@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
static void kvm_mmu_reset_all_pte_masks(void) static void kvm_mmu_reset_all_pte_masks(void)
{ {
u8 low_phys_bits;
shadow_user_mask = 0; shadow_user_mask = 0;
shadow_accessed_mask = 0; shadow_accessed_mask = 0;
shadow_dirty_mask = 0; shadow_dirty_mask = 0;
...@@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void) ...@@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
* appropriate mask to guard against L1TF attacks. Otherwise, it is * appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF. * assumed that the CPU is not vulnerable to L1TF.
*/ */
low_phys_bits = boot_cpu_data.x86_phys_bits;
if (boot_cpu_data.x86_phys_bits < if (boot_cpu_data.x86_phys_bits <
52 - shadow_nonpresent_or_rsvd_mask_len) 52 - shadow_nonpresent_or_rsvd_mask_len) {
shadow_nonpresent_or_rsvd_mask = shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_phys_bits - rsvd_bits(boot_cpu_data.x86_phys_bits -
shadow_nonpresent_or_rsvd_mask_len, shadow_nonpresent_or_rsvd_mask_len,
boot_cpu_data.x86_phys_bits - 1); boot_cpu_data.x86_phys_bits - 1);
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
}
shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
} }
static int is_cpuid_PSE36(void) static int is_cpuid_PSE36(void)
......
This diff is collapsed.
...@@ -4698,7 +4698,7 @@ static void kvm_init_msr_list(void) ...@@ -4698,7 +4698,7 @@ static void kvm_init_msr_list(void)
*/ */
switch (msrs_to_save[i]) { switch (msrs_to_save[i]) {
case MSR_IA32_BNDCFGS: case MSR_IA32_BNDCFGS:
if (!kvm_x86_ops->mpx_supported()) if (!kvm_mpx_supported())
continue; continue;
break; break;
case MSR_TSC_AUX: case MSR_TSC_AUX:
......
...@@ -1325,7 +1325,7 @@ class Tui(object): ...@@ -1325,7 +1325,7 @@ class Tui(object):
msg = '' msg = ''
while True: while True:
self.screen.erase() self.screen.erase()
self.screen.addstr(0, 0, 'Set update interval (defaults to %fs).' % self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).' %
DELAY_DEFAULT, curses.A_BOLD) DELAY_DEFAULT, curses.A_BOLD)
self.screen.addstr(4, 0, msg) self.screen.addstr(4, 0, msg)
self.screen.addstr(2, 0, 'Change delay from %.1fs to ' % self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment