Commit fbf19803 authored by Thomas Gleixner's avatar Thomas Gleixner

genirq: Add default affinity mask command line option

If we isolate CPUs, then we don't want random device interrupts on them. Even
w/o the user space irq balancer enabled we can end up with irqs on non boot
cpus and chasing newly requested interrupts is a tedious task.

Allow to restrict the default irq affinity mask.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1602031948190.25254@nanosSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent c7c42ec2
...@@ -1687,6 +1687,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -1687,6 +1687,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
ip= [IP_PNP] ip= [IP_PNP]
See Documentation/filesystems/nfs/nfsroot.txt. See Documentation/filesystems/nfs/nfsroot.txt.
irqaffinity= [SMP] Set the default irq affinity mask
Format:
<cpu number>,...,<cpu number>
or
<cpu number>-<cpu number>
(must be a positive range in ascending order)
or a mixture
<cpu number>,...,<cpu number>-<cpu number>
irqfixup [HW] irqfixup [HW]
When an interrupt is not handled search all handlers When an interrupt is not handled search all handlers
for it. Intended to get systems with badly broken for it. Intended to get systems with badly broken
......
...@@ -24,10 +24,27 @@ ...@@ -24,10 +24,27 @@
static struct lock_class_key irq_desc_lock_class; static struct lock_class_key irq_desc_lock_class;
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
static int __init irq_affinity_setup(char *str)
{
zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
cpulist_parse(str, irq_default_affinity);
/*
* Set at least the boot cpu. We don't want to end up with
* bugreports caused by random comandline masks
*/
cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
return 1;
}
__setup("irqaffinity=", irq_affinity_setup);
static void __init init_irq_default_affinity(void) static void __init init_irq_default_affinity(void)
{ {
alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); #ifdef CONFIG_CPUMASK_OFFSTACK
cpumask_setall(irq_default_affinity); if (!irq_default_affinity)
zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
#endif
if (cpumask_empty(irq_default_affinity))
cpumask_setall(irq_default_affinity);
} }
#else #else
static void __init init_irq_default_affinity(void) static void __init init_irq_default_affinity(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment