Commit aaffcfd1 authored by Wanpeng Li's avatar Wanpeng Li Committed by Paolo Bonzini

KVM: X86: Implement PV IPIs in linux guest

Implement paravirtual apic hooks to enable PV IPIs for KVM if the "send IPI"
hypercall is available.  The hypercall lets a guest send IPIs, with
at most 128 destinations per hypercall in 64-bit mode and 64 vCPUs per
hypercall in 32-bit mode.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarWanpeng Li <wanpengli@tencent.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d63bae07
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#define KVM_FEATURE_PV_UNHALT 7 #define KVM_FEATURE_PV_UNHALT 7
#define KVM_FEATURE_PV_TLB_FLUSH 9 #define KVM_FEATURE_PV_TLB_FLUSH 9
#define KVM_FEATURE_ASYNC_PF_VMEXIT 10 #define KVM_FEATURE_ASYNC_PF_VMEXIT 10
#define KVM_FEATURE_PV_SEND_IPI 11
#define KVM_HINTS_REALTIME 0 #define KVM_HINTS_REALTIME 0
......
...@@ -454,6 +454,98 @@ static void __init sev_map_percpu_data(void) ...@@ -454,6 +454,98 @@ static void __init sev_map_percpu_data(void)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
static void __send_ipi_mask(const struct cpumask *mask, int vector)
{
unsigned long flags;
int cpu, apic_id, icr;
int min = 0, max = 0;
#ifdef CONFIG_X86_64
__uint128_t ipi_bitmap = 0;
#else
u64 ipi_bitmap = 0;
#endif
if (cpumask_empty(mask))
return;
local_irq_save(flags);
switch (vector) {
default:
icr = APIC_DM_FIXED | vector;
break;
case NMI_VECTOR:
icr = APIC_DM_NMI;
break;
}
for_each_cpu(cpu, mask) {
apic_id = per_cpu(x86_cpu_to_apicid, cpu);
if (!ipi_bitmap) {
min = max = apic_id;
} else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
ipi_bitmap <<= min - apic_id;
min = apic_id;
} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
max = apic_id < max ? max : apic_id;
} else {
kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
min = max = apic_id;
ipi_bitmap = 0;
}
__set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
}
if (ipi_bitmap) {
kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
}
local_irq_restore(flags);
}
static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
{
__send_ipi_mask(mask, vector);
}
static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
{
unsigned int this_cpu = smp_processor_id();
struct cpumask new_mask;
const struct cpumask *local_mask;
cpumask_copy(&new_mask, mask);
cpumask_clear_cpu(this_cpu, &new_mask);
local_mask = &new_mask;
__send_ipi_mask(local_mask, vector);
}
static void kvm_send_ipi_allbutself(int vector)
{
kvm_send_ipi_mask_allbutself(cpu_online_mask, vector);
}
static void kvm_send_ipi_all(int vector)
{
__send_ipi_mask(cpu_online_mask, vector);
}
/*
* Set the IPI entry points
*/
static void kvm_setup_pv_ipi(void)
{
apic->send_IPI_mask = kvm_send_ipi_mask;
apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
apic->send_IPI_allbutself = kvm_send_ipi_allbutself;
apic->send_IPI_all = kvm_send_ipi_all;
pr_info("KVM setup pv IPIs\n");
}
static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
{ {
native_smp_prepare_cpus(max_cpus); native_smp_prepare_cpus(max_cpus);
...@@ -626,6 +718,10 @@ static uint32_t __init kvm_detect(void) ...@@ -626,6 +718,10 @@ static uint32_t __init kvm_detect(void)
static void __init kvm_apic_init(void) static void __init kvm_apic_init(void)
{ {
#if defined(CONFIG_SMP)
if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
kvm_setup_pv_ipi();
#endif
} }
static void __init kvm_init_platform(void) static void __init kvm_init_platform(void)
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define KVM_HC_MIPS_EXIT_VM 7 #define KVM_HC_MIPS_EXIT_VM 7
#define KVM_HC_MIPS_CONSOLE_OUTPUT 8 #define KVM_HC_MIPS_CONSOLE_OUTPUT 8
#define KVM_HC_CLOCK_PAIRING 9 #define KVM_HC_CLOCK_PAIRING 9
#define KVM_HC_SEND_IPI 10
/* /*
* hypercalls use architecture specific * hypercalls use architecture specific
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment