Commit 208a352a authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-vmx-6.11' of https://github.com/kvm-x86/linux into HEAD

KVM VMX changes for 6.11

 - Remove an unnecessary EPT TLB flush when enabling hardware.

 - Fix a series of bugs that cause KVM to fail to detect nested pending posted
   interrupts as valid wake eents for a vCPU executing HLT in L2 (with
   HLT-exiting disable by L1).

 - Misc cleanups
parents 1229cbef 45405155
......@@ -85,7 +85,6 @@ KVM_X86_OP_OPTIONAL(update_cr8_intercept)
KVM_X86_OP(refresh_apicv_exec_ctrl)
KVM_X86_OP_OPTIONAL(hwapic_irr_update)
KVM_X86_OP_OPTIONAL(hwapic_isr_update)
KVM_X86_OP_OPTIONAL_RET0(guest_apic_has_interrupt)
KVM_X86_OP_OPTIONAL(load_eoi_exitmap)
KVM_X86_OP_OPTIONAL(set_virtual_apic_mode)
KVM_X86_OP_OPTIONAL(set_apic_access_page_addr)
......
......@@ -1731,7 +1731,6 @@ struct kvm_x86_ops {
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
void (*hwapic_isr_update)(int isr);
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
......@@ -1837,7 +1836,7 @@ struct kvm_x86_nested_ops {
bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
u32 error_code);
int (*check_events)(struct kvm_vcpu *vcpu);
bool (*has_events)(struct kvm_vcpu *vcpu);
bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection);
void (*triple_fault)(struct kvm_vcpu *vcpu);
int (*get_state)(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
......
......@@ -97,7 +97,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
.hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update,
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.deliver_interrupt = vmx_deliver_interrupt,
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
......
......@@ -12,6 +12,7 @@
#include "mmu.h"
#include "nested.h"
#include "pmu.h"
#include "posted_intr.h"
#include "sgx.h"
#include "trace.h"
#include "vmx.h"
......@@ -3899,8 +3900,8 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
if (!pi_test_and_clear_on(vmx->nested.pi_desc))
return 0;
max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
if (max_irr != 256) {
max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
if (max_irr > 0) {
vapic_page = vmx->nested.virtual_apic_map.hva;
if (!vapic_page)
goto mmio_needed;
......@@ -4031,10 +4032,46 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
to_vmx(vcpu)->nested.preemption_timer_expired;
}
static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
{
return nested_vmx_preemption_timer_pending(vcpu) ||
to_vmx(vcpu)->nested.mtf_pending;
struct vcpu_vmx *vmx = to_vmx(vcpu);
void *vapic = vmx->nested.virtual_apic_map.hva;
int max_irr, vppr;
if (nested_vmx_preemption_timer_pending(vcpu) ||
vmx->nested.mtf_pending)
return true;
/*
* Virtual Interrupt Delivery doesn't require manual injection. Either
* the interrupt is already in GUEST_RVI and will be recognized by CPU
* at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move
* the interrupt from the PIR to RVI prior to entering the guest.
*/
if (for_injection)
return false;
if (!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
__vmx_interrupt_blocked(vcpu))
return false;
if (!vapic)
return false;
vppr = *((u32 *)(vapic + APIC_PROCPRI));
max_irr = vmx_get_rvi();
if ((max_irr & 0xf0) > (vppr & 0xf0))
return true;
if (vmx->nested.pi_pending && vmx->nested.pi_desc &&
pi_test_on(vmx->nested.pi_desc)) {
max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
if (max_irr > 0 && (max_irr & 0xf0) > (vppr & 0xf0))
return true;
}
return false;
}
/*
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_VMX_POSTED_INTR_H
#define __KVM_X86_VMX_POSTED_INTR_H
#include <linux/find.h>
#include <asm/posted_intr.h>
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
......@@ -12,4 +14,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void vmx_pi_start_assignment(struct kvm *kvm);
static inline int pi_find_highest_vector(struct pi_desc *pi_desc)
{
int vec;
vec = find_last_bit((unsigned long *)pi_desc->pir, 256);
return vec < 256 ? vec : -1;
}
#endif /* __KVM_X86_VMX_POSTED_INTR_H */
......@@ -188,12 +188,13 @@ struct __packed vmcs12 {
};
/*
* VMCS12_REVISION is an arbitrary id that should be changed if the content or
* layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
* VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
* VMCS12_REVISION is KVM's arbitrary ID for the layout of struct vmcs12. KVM
* enumerates this value to L1 via MSR_IA32_VMX_BASIC, and checks the revision
* ID during nested VMPTRLD to verify that L1 is loading a VMCS that adhere's
* to KVM's virtual CPU definition.
*
* IMPORTANT: Changing this value will break save/restore compatibility with
* older kvm releases.
* DO NOT change this value, as it will break save/restore compatibility with
* older KVM releases.
*/
#define VMCS12_REVISION 0x11e57ed0
......@@ -206,7 +207,8 @@ struct __packed vmcs12 {
#define VMCS12_SIZE KVM_STATE_NESTED_VMX_VMCS_SIZE
/*
* For save/restore compatibility, the vmcs12 field offsets must not change.
* For save/restore compatibility, the vmcs12 field offsets must not change,
* although appending fields and/or filling gaps is obviously allowed.
*/
#define CHECK_OFFSET(field, loc) \
ASSERT_STRUCT_OFFSET(struct vmcs12, field, loc)
......
......@@ -2868,9 +2868,6 @@ int vmx_hardware_enable(void)
return r;
}
if (enable_ept)
ept_sync_global();
return 0;
}
......@@ -4142,26 +4139,6 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
}
}
bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
void *vapic_page;
u32 vppr;
int rvi;
if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
return false;
rvi = vmx_get_rvi();
vapic_page = vmx->nested.virtual_apic_map.hva;
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
return ((rvi & 0xf0) > (vppr & 0xf0));
}
void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
......@@ -5086,14 +5063,19 @@ int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !vmx_nmi_blocked(vcpu);
}
bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
{
return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
}
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
return false;
return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
return __vmx_interrupt_blocked(vcpu);
}
int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
......@@ -8610,9 +8592,9 @@ static void __vmx_exit(void)
static void vmx_exit(void)
{
kvm_exit();
__vmx_exit();
kvm_x86_vendor_exit();
__vmx_exit();
}
module_exit(vmx_exit);
......
......@@ -406,6 +406,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
......
......@@ -48,7 +48,6 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
void vmx_hwapic_isr_update(int max_isr);
bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu);
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
int trig_mode, int vector);
......
......@@ -10557,7 +10557,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
if (is_guest_mode(vcpu) &&
kvm_x86_ops.nested_ops->has_events &&
kvm_x86_ops.nested_ops->has_events(vcpu))
kvm_x86_ops.nested_ops->has_events(vcpu, true))
*req_immediate_exit = true;
/*
......@@ -11255,7 +11255,10 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
* causes a spurious wakeup from HLT).
*/
if (is_guest_mode(vcpu)) {
if (kvm_check_nested_events(vcpu) < 0)
int r = kvm_check_nested_events(vcpu);
WARN_ON_ONCE(r == -EBUSY);
if (r < 0)
return 0;
}
......@@ -13142,12 +13145,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm_arch_free_memslot(kvm, old);
}
static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
return (is_guest_mode(vcpu) &&
static_call(kvm_x86_guest_apic_has_interrupt)(vcpu));
}
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
if (!list_empty_careful(&vcpu->async_pf.done))
......@@ -13181,9 +13178,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
(kvm_cpu_has_interrupt(vcpu) ||
kvm_guest_apic_has_interrupt(vcpu)))
if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
return true;
if (kvm_hv_has_stimer_pending(vcpu))
......@@ -13191,7 +13186,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
if (is_guest_mode(vcpu) &&
kvm_x86_ops.nested_ops->has_events &&
kvm_x86_ops.nested_ops->has_events(vcpu))
kvm_x86_ops.nested_ops->has_events(vcpu, false))
return true;
if (kvm_xen_has_pending_events(vcpu))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment