Commit 9df872fa authored by Jiang Liu's avatar Jiang Liu Committed by Thomas Gleixner

genirq: Move field 'affinity' from irq_data into irq_common_data

Irq affinity mask is per-irq instead of per irqchip, so move it into
struct irq_common_data.
Signed-off-by: default avatarJiang Liu <jiang.liu@linux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Kevin Cernekee <cernekee@gmail.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Link: http://lkml.kernel.org/r/1433303281-27688-1-git-send-email-jiang.liu@linux.intel.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent af7080e0
......@@ -489,10 +489,8 @@ static int apic_set_affinity(struct irq_data *irq_data,
err = assign_irq_vector(irq, data, dest);
if (err) {
struct irq_data *top = irq_get_irq_data(irq);
if (assign_irq_vector(irq, data,
irq_data_get_affinity_mask(top)))
irq_data_get_affinity_mask(irq_data)))
pr_err("Failed to recover vector for irq %d\n", irq);
return err;
}
......
......@@ -110,8 +110,8 @@ enum {
/*
* Return value for chip->irq_set_affinity()
*
* IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
* IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
* all descendent irqchips.
......@@ -131,6 +131,7 @@ struct irq_domain;
* Use accessor functions to deal with it
* @node: node index useful for balancing
* @handler_data: per-IRQ data for the irq_chip methods
* @affinity: IRQ affinity on SMP
*/
struct irq_common_data {
unsigned int state_use_accessors;
......@@ -138,6 +139,7 @@ struct irq_common_data {
unsigned int node;
#endif
void *handler_data;
cpumask_var_t affinity;
};
/**
......@@ -154,7 +156,6 @@ struct irq_common_data {
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
* @msi_desc: MSI descriptor
* @affinity: IRQ affinity on SMP
*/
struct irq_data {
u32 mask;
......@@ -168,7 +169,6 @@ struct irq_data {
#endif
void *chip_data;
struct msi_desc *msi_desc;
cpumask_var_t affinity;
};
/*
......@@ -684,12 +684,12 @@ static inline struct cpumask *irq_get_affinity_mask(int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->affinity : NULL;
return d ? d->common->affinity : NULL;
}
static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
return d->affinity;
return d->common->affinity;
}
unsigned int arch_dynirq_lower_bound(unsigned int from);
......
......@@ -38,12 +38,13 @@ static void __init init_irq_default_affinity(void)
#ifdef CONFIG_SMP
static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
{
if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
gfp, node))
return -ENOMEM;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
free_cpumask_var(desc->irq_data.affinity);
free_cpumask_var(desc->irq_common_data.affinity);
return -ENOMEM;
}
#endif
......@@ -52,7 +53,7 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
static void desc_smp_init(struct irq_desc *desc, int node)
{
cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_clear(desc->pending_mask);
#endif
......@@ -124,7 +125,7 @@ static void free_masks(struct irq_desc *desc)
#ifdef CONFIG_GENERIC_PENDING_IRQ
free_cpumask_var(desc->pending_mask);
#endif
free_cpumask_var(desc->irq_data.affinity);
free_cpumask_var(desc->irq_common_data.affinity);
}
#else
static inline void free_masks(struct irq_desc *desc) { }
......
......@@ -192,7 +192,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
cpumask_copy(data->affinity, mask);
cpumask_copy(desc->irq_common_data.affinity, mask);
case IRQ_SET_MASK_OK_NOCOPY:
irq_set_thread_affinity(desc);
ret = 0;
......@@ -304,7 +304,7 @@ static void irq_affinity_notify(struct work_struct *work)
if (irq_move_pending(&desc->irq_data))
irq_get_pending(cpumask, desc);
else
cpumask_copy(cpumask, desc->irq_data.affinity);
cpumask_copy(cpumask, desc->irq_common_data.affinity);
raw_spin_unlock_irqrestore(&desc->lock, flags);
notify->notify(notify, cpumask);
......@@ -375,9 +375,9 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
* one of the targets is online.
*/
if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
if (cpumask_intersects(desc->irq_data.affinity,
if (cpumask_intersects(desc->irq_common_data.affinity,
cpu_online_mask))
set = desc->irq_data.affinity;
set = desc->irq_common_data.affinity;
else
irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
}
......@@ -829,8 +829,8 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
* This code is triggered unconditionally. Check the affinity
* mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
*/
if (desc->irq_data.affinity)
cpumask_copy(mask, desc->irq_data.affinity);
if (desc->irq_common_data.affinity)
cpumask_copy(mask, desc->irq_common_data.affinity);
else
valid = false;
raw_spin_unlock_irq(&desc->lock);
......
......@@ -39,7 +39,7 @@ static struct proc_dir_entry *root_irq_dir;
static int show_irq_affinity(int type, struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
const struct cpumask *mask = desc->irq_data.affinity;
const struct cpumask *mask = desc->irq_common_data.affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (irqd_is_setaffinity_pending(&desc->irq_data))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment