Commit baab1e84 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/apic: Remove unused callbacks

Now that the old allocator is gone, these apic functions are unused. Remove
them.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJuergen Gross <jgross@suse.com>
Tested-by: default avatarYu Chen <yu.c.chen@intel.com>
Acked-by: default avatarJuergen Gross <jgross@suse.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Rui Zhang <rui.zhang@intel.com>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Len Brown <lenb@kernel.org>
Link: https://lkml.kernel.org/r/20170913213155.524662349@linutronix.de
parent 69cde000
...@@ -11,64 +11,16 @@ u32 apic_default_calc_apicid(unsigned int cpu) ...@@ -11,64 +11,16 @@ u32 apic_default_calc_apicid(unsigned int cpu)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
} }
int default_cpu_mask_to_apicid(const struct cpumask *msk, struct irq_data *irqd,
unsigned int *apicid)
{
unsigned int cpu = cpumask_first(msk);
if (cpu >= nr_cpu_ids)
return -EINVAL;
*apicid = per_cpu(x86_cpu_to_apicid, cpu);
irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
return 0;
}
u32 apic_flat_calc_apicid(unsigned int cpu) u32 apic_flat_calc_apicid(unsigned int cpu)
{ {
return 1U << cpu; return 1U << cpu;
} }
int flat_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqd,
unsigned int *apicid)
{
struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqd);
unsigned long cpu_mask = cpumask_bits(mask)[0] & APIC_ALL_CPUS;
if (!cpu_mask)
return -EINVAL;
*apicid = (unsigned int)cpu_mask;
cpumask_bits(effmsk)[0] = cpu_mask;
return 0;
}
bool default_check_apicid_used(physid_mask_t *map, int apicid) bool default_check_apicid_used(physid_mask_t *map, int apicid)
{ {
return physid_isset(apicid, *map); return physid_isset(apicid, *map);
} }
void flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
/*
* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
void default_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
cpumask_copy(retmask, cpumask_of(cpu));
}
void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{ {
*retmap = *phys_map; *retmap = *phys_map;
......
...@@ -158,7 +158,6 @@ static struct apic apic_flat __ro_after_init = { ...@@ -158,7 +158,6 @@ static struct apic apic_flat __ro_after_init = {
.dest_logical = APIC_DEST_LOGICAL, .dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr, .init_apic_ldr = flat_init_apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
...@@ -171,7 +170,6 @@ static struct apic apic_flat __ro_after_init = { ...@@ -171,7 +170,6 @@ static struct apic apic_flat __ro_after_init = {
.get_apic_id = flat_get_apic_id, .get_apic_id = flat_get_apic_id,
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.calc_dest_apicid = apic_flat_calc_apicid, .calc_dest_apicid = apic_flat_calc_apicid,
.send_IPI = default_send_IPI_single, .send_IPI = default_send_IPI_single,
...@@ -253,7 +251,6 @@ static struct apic apic_physflat __ro_after_init = { ...@@ -253,7 +251,6 @@ static struct apic apic_physflat __ro_after_init = {
.dest_logical = 0, .dest_logical = 0,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.vector_allocation_domain = default_vector_allocation_domain,
/* not needed, but shouldn't hurt: */ /* not needed, but shouldn't hurt: */
.init_apic_ldr = flat_init_apic_ldr, .init_apic_ldr = flat_init_apic_ldr,
...@@ -267,7 +264,6 @@ static struct apic apic_physflat __ro_after_init = { ...@@ -267,7 +264,6 @@ static struct apic apic_physflat __ro_after_init = {
.get_apic_id = flat_get_apic_id, .get_apic_id = flat_get_apic_id,
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.calc_dest_apicid = apic_default_calc_apicid, .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = default_send_IPI_single_phys, .send_IPI = default_send_IPI_single_phys,
......
...@@ -83,14 +83,6 @@ static int noop_apic_id_registered(void) ...@@ -83,14 +83,6 @@ static int noop_apic_id_registered(void)
return physid_isset(0, phys_cpu_present_map); return physid_isset(0, phys_cpu_present_map);
} }
static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
if (cpu != 0)
pr_warning("APIC: Vector allocated for non-BSP cpu\n");
cpumask_copy(retmask, cpumask_of(cpu));
}
static u32 noop_apic_read(u32 reg) static u32 noop_apic_read(u32 reg)
{ {
WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic); WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
...@@ -125,7 +117,6 @@ struct apic apic_noop __ro_after_init = { ...@@ -125,7 +117,6 @@ struct apic apic_noop __ro_after_init = {
.dest_logical = APIC_DEST_LOGICAL, .dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = default_check_apicid_used, .check_apicid_used = default_check_apicid_used,
.vector_allocation_domain = noop_vector_allocation_domain,
.init_apic_ldr = noop_init_apic_ldr, .init_apic_ldr = noop_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map, .ioapic_phys_id_map = default_ioapic_phys_id_map,
...@@ -141,7 +132,6 @@ struct apic apic_noop __ro_after_init = { ...@@ -141,7 +132,6 @@ struct apic apic_noop __ro_after_init = {
.get_apic_id = noop_get_apic_id, .get_apic_id = noop_get_apic_id,
.set_apic_id = NULL, .set_apic_id = NULL,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.calc_dest_apicid = apic_flat_calc_apicid, .calc_dest_apicid = apic_flat_calc_apicid,
.send_IPI = noop_send_IPI, .send_IPI = noop_send_IPI,
......
...@@ -253,7 +253,6 @@ static const struct apic apic_numachip1 __refconst = { ...@@ -253,7 +253,6 @@ static const struct apic apic_numachip1 __refconst = {
.dest_logical = 0, .dest_logical = 0,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr, .init_apic_ldr = flat_init_apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
...@@ -266,7 +265,6 @@ static const struct apic apic_numachip1 __refconst = { ...@@ -266,7 +265,6 @@ static const struct apic apic_numachip1 __refconst = {
.get_apic_id = numachip1_get_apic_id, .get_apic_id = numachip1_get_apic_id,
.set_apic_id = numachip1_set_apic_id, .set_apic_id = numachip1_set_apic_id,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.calc_dest_apicid = apic_default_calc_apicid, .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = numachip_send_IPI_one, .send_IPI = numachip_send_IPI_one,
...@@ -304,7 +302,6 @@ static const struct apic apic_numachip2 __refconst = { ...@@ -304,7 +302,6 @@ static const struct apic apic_numachip2 __refconst = {
.dest_logical = 0, .dest_logical = 0,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr, .init_apic_ldr = flat_init_apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
...@@ -317,7 +314,6 @@ static const struct apic apic_numachip2 __refconst = { ...@@ -317,7 +314,6 @@ static const struct apic apic_numachip2 __refconst = {
.get_apic_id = numachip2_get_apic_id, .get_apic_id = numachip2_get_apic_id,
.set_apic_id = numachip2_set_apic_id, .set_apic_id = numachip2_set_apic_id,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.calc_dest_apicid = apic_default_calc_apicid, .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = numachip_send_IPI_one, .send_IPI = numachip_send_IPI_one,
......
...@@ -158,7 +158,6 @@ static struct apic apic_bigsmp __ro_after_init = { ...@@ -158,7 +158,6 @@ static struct apic apic_bigsmp __ro_after_init = {
.dest_logical = 0, .dest_logical = 0,
.check_apicid_used = bigsmp_check_apicid_used, .check_apicid_used = bigsmp_check_apicid_used,
.vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = bigsmp_init_apic_ldr, .init_apic_ldr = bigsmp_init_apic_ldr,
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
...@@ -171,7 +170,6 @@ static struct apic apic_bigsmp __ro_after_init = { ...@@ -171,7 +170,6 @@ static struct apic apic_bigsmp __ro_after_init = {
.get_apic_id = bigsmp_get_apic_id, .get_apic_id = bigsmp_get_apic_id,
.set_apic_id = NULL, .set_apic_id = NULL,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.calc_dest_apicid = apic_default_calc_apicid, .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = default_send_IPI_single_phys, .send_IPI = default_send_IPI_single_phys,
......
...@@ -113,7 +113,6 @@ static struct apic apic_default __ro_after_init = { ...@@ -113,7 +113,6 @@ static struct apic apic_default __ro_after_init = {
.dest_logical = APIC_DEST_LOGICAL, .dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = default_check_apicid_used, .check_apicid_used = default_check_apicid_used,
.vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = default_init_apic_ldr, .init_apic_ldr = default_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map, .ioapic_phys_id_map = default_ioapic_phys_id_map,
...@@ -126,7 +125,6 @@ static struct apic apic_default __ro_after_init = { ...@@ -126,7 +125,6 @@ static struct apic apic_default __ro_after_init = {
.get_apic_id = default_get_apic_id, .get_apic_id = default_get_apic_id,
.set_apic_id = NULL, .set_apic_id = NULL,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.calc_dest_apicid = apic_flat_calc_apicid, .calc_dest_apicid = apic_flat_calc_apicid,
.send_IPI = default_send_IPI_single, .send_IPI = default_send_IPI_single,
......
...@@ -91,29 +91,6 @@ static void x2apic_send_IPI_all(int vector) ...@@ -91,29 +91,6 @@ static void x2apic_send_IPI_all(int vector)
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
} }
static int
x2apic_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
unsigned int *apicid)
{
struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
struct cluster_mask *cmsk;
unsigned int cpu;
u32 dest = 0;
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
cmsk = per_cpu(cluster_masks, cpu);
cpumask_clear(effmsk);
for_each_cpu_and(cpu, &cmsk->mask, mask) {
dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
cpumask_set_cpu(cpu, effmsk);
}
*apicid = dest;
return 0;
}
static u32 x2apic_calc_apicid(unsigned int cpu) static u32 x2apic_calc_apicid(unsigned int cpu)
{ {
return per_cpu(x86_cpu_to_logical_apicid, cpu); return per_cpu(x86_cpu_to_logical_apicid, cpu);
...@@ -198,29 +175,6 @@ static int x2apic_cluster_probe(void) ...@@ -198,29 +175,6 @@ static int x2apic_cluster_probe(void)
return 1; return 1;
} }
/*
* Each x2apic cluster is an allocation domain.
*/
static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
/*
* To minimize vector pressure, default case of boot, device bringup
* etc will use a single cpu for the interrupt destination.
*
* On explicit migration requests coming from irqbalance etc,
* interrupts will be routed to the x2apic cluster (cluster-id
* derived from the first cpu in the mask) members specified
* in the mask.
*/
if (cpumask_equal(mask, cpu_online_mask))
cpumask_copy(retmask, cpumask_of(cpu));
else
cpumask_and(retmask, mask, &cmsk->mask);
}
static struct apic apic_x2apic_cluster __ro_after_init = { static struct apic apic_x2apic_cluster __ro_after_init = {
.name = "cluster x2apic", .name = "cluster x2apic",
...@@ -236,7 +190,6 @@ static struct apic apic_x2apic_cluster __ro_after_init = { ...@@ -236,7 +190,6 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
.dest_logical = APIC_DEST_LOGICAL, .dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.vector_allocation_domain = cluster_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr, .init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
...@@ -249,7 +202,6 @@ static struct apic apic_x2apic_cluster __ro_after_init = { ...@@ -249,7 +202,6 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
.get_apic_id = x2apic_get_apic_id, .get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id, .set_apic_id = x2apic_set_apic_id,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.calc_dest_apicid = x2apic_calc_apicid, .calc_dest_apicid = x2apic_calc_apicid,
.send_IPI = x2apic_send_IPI, .send_IPI = x2apic_send_IPI,
......
...@@ -151,7 +151,6 @@ static struct apic apic_x2apic_phys __ro_after_init = { ...@@ -151,7 +151,6 @@ static struct apic apic_x2apic_phys __ro_after_init = {
.dest_logical = 0, .dest_logical = 0,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr, .init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
...@@ -164,7 +163,6 @@ static struct apic apic_x2apic_phys __ro_after_init = { ...@@ -164,7 +163,6 @@ static struct apic apic_x2apic_phys __ro_after_init = {
.get_apic_id = x2apic_get_apic_id, .get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id, .set_apic_id = x2apic_set_apic_id,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.calc_dest_apicid = apic_default_calc_apicid, .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = x2apic_send_IPI, .send_IPI = x2apic_send_IPI,
......
...@@ -525,18 +525,6 @@ static void uv_init_apic_ldr(void) ...@@ -525,18 +525,6 @@ static void uv_init_apic_ldr(void)
{ {
} }
static int
uv_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
unsigned int *apicid)
{
int ret = default_cpu_mask_to_apicid(mask, irqdata, apicid);
if (!ret)
*apicid |= uv_apicid_hibits;
return ret;
}
static u32 apic_uv_calc_apicid(unsigned int cpu) static u32 apic_uv_calc_apicid(unsigned int cpu)
{ {
return apic_default_calc_apicid(cpu) | uv_apicid_hibits; return apic_default_calc_apicid(cpu) | uv_apicid_hibits;
...@@ -593,7 +581,6 @@ static struct apic apic_x2apic_uv_x __ro_after_init = { ...@@ -593,7 +581,6 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
.dest_logical = APIC_DEST_LOGICAL, .dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL, .check_apicid_used = NULL,
.vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = uv_init_apic_ldr, .init_apic_ldr = uv_init_apic_ldr,
.ioapic_phys_id_map = NULL, .ioapic_phys_id_map = NULL,
...@@ -606,7 +593,6 @@ static struct apic apic_x2apic_uv_x __ro_after_init = { ...@@ -606,7 +593,6 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
.get_apic_id = x2apic_get_apic_id, .get_apic_id = x2apic_get_apic_id,
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.calc_dest_apicid = apic_uv_calc_apicid, .calc_dest_apicid = apic_uv_calc_apicid,
.send_IPI = uv_send_IPI_one, .send_IPI = uv_send_IPI_one,
......
...@@ -26,9 +26,6 @@ ...@@ -26,9 +26,6 @@
#define TOPOLOGY_REGISTER_OFFSET 0x10 #define TOPOLOGY_REGISTER_OFFSET 0x10
/* Flag below is initialized once during vSMP PCI initialization. */
static int irq_routing_comply = 1;
#if defined CONFIG_PCI && defined CONFIG_PARAVIRT #if defined CONFIG_PCI && defined CONFIG_PARAVIRT
/* /*
* Interrupt control on vSMPowered systems: * Interrupt control on vSMPowered systems:
...@@ -105,9 +102,6 @@ static void __init set_vsmp_pv_ops(void) ...@@ -105,9 +102,6 @@ static void __init set_vsmp_pv_ops(void)
if (cap & ctl & BIT(8)) { if (cap & ctl & BIT(8)) {
ctl &= ~BIT(8); ctl &= ~BIT(8);
/* Interrupt routing set to ignore */
irq_routing_comply = 0;
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* Don't let users change irq affinity via procfs */ /* Don't let users change irq affinity via procfs */
no_irq_affinity = 1; no_irq_affinity = 1;
...@@ -211,23 +205,10 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb) ...@@ -211,23 +205,10 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
return hard_smp_processor_id() >> index_msb; return hard_smp_processor_id() >> index_msb;
} }
/*
* In vSMP, all cpus should be capable of handling interrupts, regardless of
* the APIC used.
*/
static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
cpumask_setall(retmask);
}
static void vsmp_apic_post_init(void) static void vsmp_apic_post_init(void)
{ {
/* need to update phys_pkg_id */ /* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id; apic->phys_pkg_id = apicid_phys_pkg_id;
if (!irq_routing_comply)
apic->vector_allocation_domain = fill_vector_allocation_domain;
} }
void __init vsmp_init(void) void __init vsmp_init(void)
......
...@@ -164,7 +164,6 @@ static struct apic xen_pv_apic = { ...@@ -164,7 +164,6 @@ static struct apic xen_pv_apic = {
/* .dest_logical - default_send_IPI_ use it but we use our own. */ /* .dest_logical - default_send_IPI_ use it but we use our own. */
.check_apicid_used = default_check_apicid_used, /* Used on 32-bit */ .check_apicid_used = default_check_apicid_used, /* Used on 32-bit */
.vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = xen_noop, /* setup_local_APIC calls it */ .init_apic_ldr = xen_noop, /* setup_local_APIC calls it */
.ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */ .ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */
...@@ -177,7 +176,6 @@ static struct apic xen_pv_apic = { ...@@ -177,7 +176,6 @@ static struct apic xen_pv_apic = {
.get_apic_id = xen_get_apic_id, .get_apic_id = xen_get_apic_id,
.set_apic_id = xen_set_apic_id, /* Can be NULL on 32-bit. */ .set_apic_id = xen_set_apic_id, /* Can be NULL on 32-bit. */
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.calc_dest_apicid = apic_flat_calc_apicid, .calc_dest_apicid = apic_flat_calc_apicid,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment