Commit 9c9025ea authored by Sean Christopherson's avatar Sean Christopherson

KVM: x86: Plumb "force_immediate_exit" into kvm_entry() tracepoint

Annotate the kvm_entry() tracepoint with "immediate exit" when KVM is
forcing a VM-Exit immediately after VM-Enter, e.g. when KVM wants to
inject an event but needs to first complete some other operation.
Knowing that KVM is (or isn't) forcing an exit is useful information when
debugging issues related to event injection.
Suggested-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20240110012705.506918-2-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 2a5f091c
...@@ -1663,7 +1663,8 @@ struct kvm_x86_ops { ...@@ -1663,7 +1663,8 @@ struct kvm_x86_ops {
void (*flush_tlb_guest)(struct kvm_vcpu *vcpu); void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
int (*vcpu_pre_run)(struct kvm_vcpu *vcpu); int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu); enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
bool force_immediate_exit);
int (*handle_exit)(struct kvm_vcpu *vcpu, int (*handle_exit)(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion exit_fastpath); enum exit_fastpath_completion exit_fastpath);
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
......
...@@ -4112,12 +4112,13 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in ...@@ -4112,12 +4112,13 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
guest_state_exit_irqoff(); guest_state_exit_irqoff();
} }
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
bool force_immediate_exit)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL); bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
trace_kvm_entry(vcpu); trace_kvm_entry(vcpu, force_immediate_exit);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
......
...@@ -15,20 +15,23 @@ ...@@ -15,20 +15,23 @@
* Tracepoint for guest mode entry. * Tracepoint for guest mode entry.
*/ */
TRACE_EVENT(kvm_entry, TRACE_EVENT(kvm_entry,
TP_PROTO(struct kvm_vcpu *vcpu), TP_PROTO(struct kvm_vcpu *vcpu, bool force_immediate_exit),
TP_ARGS(vcpu), TP_ARGS(vcpu, force_immediate_exit),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( unsigned int, vcpu_id ) __field( unsigned int, vcpu_id )
__field( unsigned long, rip ) __field( unsigned long, rip )
__field( bool, immediate_exit )
), ),
TP_fast_assign( TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id; __entry->vcpu_id = vcpu->vcpu_id;
__entry->rip = kvm_rip_read(vcpu); __entry->rip = kvm_rip_read(vcpu);
__entry->immediate_exit = force_immediate_exit;
), ),
TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) TP_printk("vcpu %u, rip 0x%lx%s", __entry->vcpu_id, __entry->rip,
__entry->immediate_exit ? "[immediate exit]" : "")
); );
/* /*
......
...@@ -7265,7 +7265,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, ...@@ -7265,7 +7265,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
guest_state_exit_irqoff(); guest_state_exit_irqoff();
} }
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4; unsigned long cr3, cr4;
...@@ -7292,7 +7292,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -7292,7 +7292,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_NONE; return EXIT_FASTPATH_NONE;
} }
trace_kvm_entry(vcpu); trace_kvm_entry(vcpu, force_immediate_exit);
if (vmx->ple_window_dirty) { if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false; vmx->ple_window_dirty = false;
......
...@@ -10956,7 +10956,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -10956,7 +10956,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
(kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu, req_immediate_exit);
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment