Commit fde0451b authored by David Woodhouse's avatar David Woodhouse Committed by Paolo Bonzini

KVM: x86/xen: Support per-vCPU event channel upcall via local APIC

Windows uses a per-vCPU vector, and it's delivered via the local APIC
basically like an MSI (with associated EOI) unlike the traditional
guest-wide vector which is just magically asserted by Xen (and in the
KVM case by kvm_xen_has_interrupt() / kvm_cpu_get_extint()).

Now that the kernel is able to raise event channel events for itself,
being able to do so for Windows guests is also going to be useful.
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220303154127.202856-15-dwmw2@infradead.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 28d1629f
...@@ -606,6 +606,7 @@ struct kvm_vcpu_hv { ...@@ -606,6 +606,7 @@ struct kvm_vcpu_hv {
struct kvm_vcpu_xen { struct kvm_vcpu_xen {
u64 hypercall_rip; u64 hypercall_rip;
u32 current_runstate; u32 current_runstate;
u8 upcall_vector;
struct gfn_to_pfn_cache vcpu_info_cache; struct gfn_to_pfn_cache vcpu_info_cache;
struct gfn_to_pfn_cache vcpu_time_info_cache; struct gfn_to_pfn_cache vcpu_time_info_cache;
struct gfn_to_pfn_cache runstate_cache; struct gfn_to_pfn_cache runstate_cache;
......
...@@ -314,6 +314,22 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) ...@@ -314,6 +314,22 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
} }
static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
{
struct kvm_lapic_irq irq = { };
int r;
irq.dest_id = v->vcpu_id;
irq.vector = v->arch.xen.upcall_vector;
irq.dest_mode = APIC_DEST_PHYSICAL;
irq.shorthand = APIC_DEST_NOSHORT;
irq.delivery_mode = APIC_DM_FIXED;
irq.level = 1;
/* The fast version will always work for physical unicast */
WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL));
}
/* /*
* On event channel delivery, the vcpu_info may not have been accessible. * On event channel delivery, the vcpu_info may not have been accessible.
* In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
...@@ -374,6 +390,10 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v) ...@@ -374,6 +390,10 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
} }
read_unlock_irqrestore(&gpc->lock, flags); read_unlock_irqrestore(&gpc->lock, flags);
/* For the per-vCPU lapic vector, deliver it as MSI. */
if (v->arch.xen.upcall_vector)
kvm_xen_inject_vcpu_vector(v);
mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
} }
...@@ -708,6 +728,15 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) ...@@ -708,6 +728,15 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
r = 0; r = 0;
break; break;
case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
if (data->u.vector && data->u.vector < 0x10)
r = -EINVAL;
else {
vcpu->arch.xen.upcall_vector = data->u.vector;
r = 0;
}
break;
default: default:
break; break;
} }
...@@ -795,6 +824,11 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) ...@@ -795,6 +824,11 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
r = 0; r = 0;
break; break;
case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
data->u.vector = vcpu->arch.xen.upcall_vector;
r = 0;
break;
default: default:
break; break;
} }
...@@ -1228,6 +1262,12 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm) ...@@ -1228,6 +1262,12 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
kick_vcpu = true; kick_vcpu = true;
} }
} }
/* For the per-vCPU lapic vector, deliver it as MSI. */
if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
kvm_xen_inject_vcpu_vector(vcpu);
kick_vcpu = false;
}
} }
out_rcu: out_rcu:
......
...@@ -1754,6 +1754,7 @@ struct kvm_xen_vcpu_attr { ...@@ -1754,6 +1754,7 @@ struct kvm_xen_vcpu_attr {
__u32 priority; __u32 priority;
__u64 expires_ns; __u64 expires_ns;
} timer; } timer;
__u8 vector;
} u; } u;
}; };
...@@ -1767,6 +1768,7 @@ struct kvm_xen_vcpu_attr { ...@@ -1767,6 +1768,7 @@ struct kvm_xen_vcpu_attr {
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7 #define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
/* Secure Encrypted Virtualization command */ /* Secure Encrypted Virtualization command */
enum sev_cmd_id { enum sev_cmd_id {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment