Commit 023a6117 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/apic/x2apic: Simplify cluster management

The cluster management code creates a cluster mask per cpu, which requires
that on cpu on/offline all cluster masks have to be iterated and
updated. Other information about the cluster is in different per cpu
variables.

Create a data structure which holds all information about a cluster and
fill it in when the first CPU of a cluster comes online. If another CPU of
a cluster comes online it just finds the pointer to the existing cluster
structure and reuses it.

That simplifies all usage sites and gets rid of quite some pointless
iterations over the online cpus to find the cpus which belong to the
cluster.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJuergen Gross <jgross@suse.com>
Tested-by: default avatarYu Chen <yu.c.chen@intel.com>
Acked-by: default avatarJuergen Gross <jgross@suse.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Rui Zhang <rui.zhang@intel.com>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Len Brown <lenb@kernel.org>
Link: https://lkml.kernel.org/r/20170913213153.992629420@linutronix.de
parent 72f48a38
...@@ -10,20 +10,22 @@ ...@@ -10,20 +10,22 @@
#include <asm/smp.h> #include <asm/smp.h>
#include "x2apic.h" #include "x2apic.h"
struct cluster_mask {
unsigned int clusterid;
int node;
struct cpumask mask;
};
static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
static DEFINE_PER_CPU(cpumask_var_t, ipi_mask); static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
static DEFINE_PER_CPU(struct cluster_mask *, cluster_masks);
static struct cluster_mask *cluster_hotplug_mask;
static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{ {
return x2apic_enabled(); return x2apic_enabled();
} }
static inline u32 x2apic_cluster(int cpu)
{
return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
}
static void x2apic_send_IPI(int cpu, int vector) static void x2apic_send_IPI(int cpu, int vector)
{ {
u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
...@@ -35,49 +37,34 @@ static void x2apic_send_IPI(int cpu, int vector) ...@@ -35,49 +37,34 @@ static void x2apic_send_IPI(int cpu, int vector)
static void static void
__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
{ {
struct cpumask *cpus_in_cluster_ptr; unsigned int cpu, clustercpu;
struct cpumask *ipi_mask_ptr; struct cpumask *tmpmsk;
unsigned int cpu, this_cpu;
unsigned long flags; unsigned long flags;
u32 dest; u32 dest;
x2apic_wrmsr_fence(); x2apic_wrmsr_fence();
local_irq_save(flags); local_irq_save(flags);
this_cpu = smp_processor_id(); tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
cpumask_copy(tmpmsk, mask);
/* If IPI should not be sent to self, clear current CPU */
if (apic_dest != APIC_DEST_ALLINC)
cpumask_clear_cpu(smp_processor_id(), tmpmsk);
/* /* Collapse cpus in a cluster so a single IPI per cluster is sent */
* We are to modify mask, so we need an own copy for_each_cpu(cpu, tmpmsk) {
* and be sure it's manipulated with irq off. struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
*/
ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask);
cpumask_copy(ipi_mask_ptr, mask);
/*
* The idea is to send one IPI per cluster.
*/
for_each_cpu(cpu, ipi_mask_ptr) {
unsigned long i;
cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
dest = 0; dest = 0;
for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
/* Collect cpus in cluster. */ dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu);
for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
dest |= per_cpu(x86_cpu_to_logical_apicid, i);
}
if (!dest) if (!dest)
continue; continue;
__x2apic_send_IPI_dest(dest, vector, apic->dest_logical); __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
/* /* Remove cluster CPUs from tmpmask */
* Cluster sibling cpus should be discared now so cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
* we would not send IPI them second time.
*/
cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -109,91 +96,100 @@ x2apic_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata, ...@@ -109,91 +96,100 @@ x2apic_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
unsigned int *apicid) unsigned int *apicid)
{ {
struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata); struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
struct cluster_mask *cmsk;
unsigned int cpu; unsigned int cpu;
u32 dest = 0; u32 dest = 0;
u16 cluster;
cpu = cpumask_first(mask); cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids) if (cpu >= nr_cpu_ids)
return -EINVAL; return -EINVAL;
dest = per_cpu(x86_cpu_to_logical_apicid, cpu); cmsk = per_cpu(cluster_masks, cpu);
cluster = x2apic_cluster(cpu);
cpumask_clear(effmsk); cpumask_clear(effmsk);
for_each_cpu(cpu, mask) { for_each_cpu_and(cpu, &cmsk->mask, mask) {
if (cluster != x2apic_cluster(cpu))
continue;
dest |= per_cpu(x86_cpu_to_logical_apicid, cpu); dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
cpumask_set_cpu(cpu, effmsk); cpumask_set_cpu(cpu, effmsk);
} }
*apicid = dest; *apicid = dest;
return 0; return 0;
} }
static void init_x2apic_ldr(void) static void init_x2apic_ldr(void)
{ {
unsigned int this_cpu = smp_processor_id(); struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
u32 cluster, apicid = apic_read(APIC_LDR);
unsigned int cpu; unsigned int cpu;
per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); this_cpu_write(x86_cpu_to_logical_apicid, apicid);
cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); if (cmsk)
goto update;
cluster = apicid >> 16;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) cmsk = per_cpu(cluster_masks, cpu);
continue; /* Matching cluster found. Link and update it. */
cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); if (cmsk && cmsk->clusterid == cluster)
cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); goto update;
} }
cmsk = cluster_hotplug_mask;
cluster_hotplug_mask = NULL;
update:
this_cpu_write(cluster_masks, cmsk);
cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
} }
/* static int alloc_clustermask(unsigned int cpu, int node)
* At CPU state changes, update the x2apic cluster sibling info.
*/
static int x2apic_prepare_cpu(unsigned int cpu)
{ {
if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL)) if (per_cpu(cluster_masks, cpu))
return -ENOMEM; return 0;
/*
* If a hotplug spare mask exists, check whether it's on the right
* node. If not, free it and allocate a new one.
*/
if (cluster_hotplug_mask) {
if (cluster_hotplug_mask->node == node)
return 0;
kfree(cluster_hotplug_mask);
}
if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) { cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
free_cpumask_var(per_cpu(cpus_in_cluster, cpu)); GFP_KERNEL, node);
if (!cluster_hotplug_mask)
return -ENOMEM; return -ENOMEM;
} cluster_hotplug_mask->node = node;
return 0;
}
static int x2apic_prepare_cpu(unsigned int cpu)
{
if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
return -ENOMEM;
if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
return -ENOMEM;
return 0; return 0;
} }
static int x2apic_dead_cpu(unsigned int this_cpu) static int x2apic_dead_cpu(unsigned int dead_cpu)
{ {
int cpu; struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
for_each_online_cpu(cpu) { cpumask_clear_cpu(smp_processor_id(), &cmsk->mask);
if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
continue;
cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
}
free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
free_cpumask_var(per_cpu(ipi_mask, this_cpu));
return 0; return 0;
} }
static int x2apic_cluster_probe(void) static int x2apic_cluster_probe(void)
{ {
int cpu = smp_processor_id();
int ret;
if (!x2apic_mode) if (!x2apic_mode)
return 0; return 0;
ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare", if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
x2apic_prepare_cpu, x2apic_dead_cpu); x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
if (ret < 0) {
pr_err("Failed to register X2APIC_PREPARE\n"); pr_err("Failed to register X2APIC_PREPARE\n");
return 0; return 0;
} }
cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); init_x2apic_ldr();
return 1; return 1;
} }
...@@ -208,6 +204,8 @@ static const struct cpumask *x2apic_cluster_target_cpus(void) ...@@ -208,6 +204,8 @@ static const struct cpumask *x2apic_cluster_target_cpus(void)
static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask, static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask) const struct cpumask *mask)
{ {
struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
/* /*
* To minimize vector pressure, default case of boot, device bringup * To minimize vector pressure, default case of boot, device bringup
* etc will use a single cpu for the interrupt destination. * etc will use a single cpu for the interrupt destination.
...@@ -220,7 +218,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask, ...@@ -220,7 +218,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
if (mask == x2apic_cluster_target_cpus()) if (mask == x2apic_cluster_target_cpus())
cpumask_copy(retmask, cpumask_of(cpu)); cpumask_copy(retmask, cpumask_of(cpu));
else else
cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu)); cpumask_and(retmask, mask, &cmsk->mask);
} }
static struct apic apic_x2apic_cluster __ro_after_init = { static struct apic apic_x2apic_cluster __ro_after_init = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment