Commit 7f95ec9e authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86: move kstat_irqs from kstat to irq_desc

based on Eric's patch ...

together mold it with dyn_array for irq_desc, will allcate kstat_irqs for
nr_irq_desc alltogether if needed. -- at that point nr_cpus is known already.

v2: make sure system without generic_hardirqs works they don't have irq_desc
v3: fix merging
v4: [mingo@elte.hu] fix typo

[ mingo@elte.hu ] irq: build fix

fix:

 arch/x86/xen/spinlock.c: In function 'xen_spin_lock_slow':
 arch/x86/xen/spinlock.c:90: error: 'struct kernel_stat' has no member named 'irqs'
Signed-off-by: default avatarYinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3060d6fe
...@@ -526,7 +526,7 @@ static void do_irq_balance(void) ...@@ -526,7 +526,7 @@ static void do_irq_balance(void)
if (package_index == i) if (package_index == i)
IRQ_DELTA(package_index, j) = 0; IRQ_DELTA(package_index, j) = 0;
/* Determine the total count per processor per IRQ */ /* Determine the total count per processor per IRQ */
value_now = (unsigned long) kstat_cpu(i).irqs[j]; value_now = (unsigned long) kstat_irqs_cpu(j, i);
/* Determine the activity per processor per IRQ */ /* Determine the activity per processor per IRQ */
delta = value_now - LAST_CPU_IRQ(i, j); delta = value_now - LAST_CPU_IRQ(i, j);
......
...@@ -280,7 +280,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -280,7 +280,7 @@ int show_interrupts(struct seq_file *p, void *v)
any_count = kstat_irqs(i); any_count = kstat_irqs(i);
#else #else
for_each_online_cpu(j) for_each_online_cpu(j)
any_count |= kstat_cpu(j).irqs[i]; any_count |= kstat_irqs_cpu(i, j);
#endif #endif
action = desc->action; action = desc->action;
if (!action && !any_count) if (!action && !any_count)
...@@ -290,7 +290,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -290,7 +290,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_irqs(i)); seq_printf(p, "%10u ", kstat_irqs(i));
#else #else
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif #endif
seq_printf(p, " %8s", desc->chip->name); seq_printf(p, " %8s", desc->chip->name);
seq_printf(p, "-%-8s", desc->name); seq_printf(p, "-%-8s", desc->name);
......
...@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v)
any_count = kstat_irqs(i); any_count = kstat_irqs(i);
#else #else
for_each_online_cpu(j) for_each_online_cpu(j)
any_count |= kstat_cpu(j).irqs[i]; any_count |= kstat_irqs_cpu(i, j);
#endif #endif
action = desc->action; action = desc->action;
if (!action && !any_count) if (!action && !any_count)
...@@ -100,7 +100,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -100,7 +100,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_irqs(i)); seq_printf(p, "%10u ", kstat_irqs(i));
#else #else
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif #endif
seq_printf(p, " %8s", desc->chip->name); seq_printf(p, " %8s", desc->chip->name);
seq_printf(p, "-%-8s", desc->name); seq_printf(p, "-%-8s", desc->name);
......
...@@ -633,7 +633,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) ...@@ -633,7 +633,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
/* /*
* handle this 'virtual interrupt' as a Cobalt one now. * handle this 'virtual interrupt' as a Cobalt one now.
*/ */
kstat_cpu(smp_processor_id()).irqs[realirq]++; kstat_irqs_this_cpu(desc)++;
if (likely(desc->action != NULL)) if (likely(desc->action != NULL))
handle_IRQ_event(realirq, desc->action); handle_IRQ_event(realirq, desc->action);
......
...@@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl ...@@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
} while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
kstat_this_cpu.irqs[irq]++; kstat_irqs_this_cpu(irq_to_desc(irq))++;
out: out:
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
......
...@@ -532,7 +532,7 @@ static int show_stat(struct seq_file *p, void *v) ...@@ -532,7 +532,7 @@ static int show_stat(struct seq_file *p, void *v)
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
for (j = 0; j < nr_irqs; j++) { for (j = 0; j < nr_irqs; j++) {
unsigned int temp = kstat_cpu(i).irqs[j]; unsigned int temp = kstat_irqs_cpu(j, i);
sum += temp; sum += temp;
per_irq_sum[j] += temp; per_irq_sum[j] += temp;
} }
......
...@@ -157,6 +157,11 @@ struct irq_desc { ...@@ -157,6 +157,11 @@ struct irq_desc {
#ifdef CONFIG_HAVE_SPARSE_IRQ #ifdef CONFIG_HAVE_SPARSE_IRQ
struct irq_desc *next; struct irq_desc *next;
struct timer_rand_state *timer_rand_state; struct timer_rand_state *timer_rand_state;
#endif
#ifdef CONFIG_HAVE_DYN_ARRAY
unsigned int *kstat_irqs;
#else
unsigned int kstat_irqs[NR_CPUS];
#endif #endif
irq_flow_handler_t handle_irq; irq_flow_handler_t handle_irq;
struct irq_chip *chip; struct irq_chip *chip;
...@@ -190,6 +195,8 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); ...@@ -190,6 +195,8 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
/* could be removed if we get rid of all irq_desc reference */ /* could be removed if we get rid of all irq_desc reference */
extern struct irq_desc irq_desc[NR_IRQS]; extern struct irq_desc irq_desc[NR_IRQS];
#endif #endif
#define kstat_irqs_this_cpu(DESC) \
((DESC)->kstat_irqs[smp_processor_id()])
/* /*
* Migration helpers for obsolete names, they will go away: * Migration helpers for obsolete names, they will go away:
......
...@@ -28,10 +28,8 @@ struct cpu_usage_stat { ...@@ -28,10 +28,8 @@ struct cpu_usage_stat {
struct kernel_stat { struct kernel_stat {
struct cpu_usage_stat cpustat; struct cpu_usage_stat cpustat;
#ifdef CONFIG_HAVE_DYN_ARRAY #ifndef CONFIG_GENERIC_HARDIRQS
unsigned int *irqs; unsigned int irqs[NR_IRQS];
#else
unsigned int irqs[NR_IRQS];
#endif #endif
}; };
...@@ -43,15 +41,25 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); ...@@ -43,15 +41,25 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
extern unsigned long long nr_context_switches(void); extern unsigned long long nr_context_switches(void);
#ifndef CONFIG_GENERIC_HARDIRQS
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
return kstat_cpu(cpu).irqs[irq];
}
#else
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
#endif
/* /*
* Number of interrupts per specific IRQ source, since bootup * Number of interrupts per specific IRQ source, since bootup
*/ */
static inline int kstat_irqs(int irq) static inline unsigned int kstat_irqs(unsigned int irq)
{ {
int cpu, sum = 0; unsigned int sum = 0;
int cpu;
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
sum += kstat_cpu(cpu).irqs[irq]; sum += kstat_irqs_cpu(irq, cpu);
return sum; return sum;
} }
......
...@@ -312,14 +312,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) ...@@ -312,14 +312,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{ {
struct irqaction *action; struct irqaction *action;
irqreturn_t action_ret; irqreturn_t action_ret;
const unsigned int cpu = smp_processor_id();
spin_lock(&desc->lock); spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS)) if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock; goto out_unlock;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++; kstat_irqs_this_cpu(desc)++;
action = desc->action; action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED))) if (unlikely(!action || (desc->status & IRQ_DISABLED)))
...@@ -351,7 +350,6 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) ...@@ -351,7 +350,6 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
void void
handle_level_irq(unsigned int irq, struct irq_desc *desc) handle_level_irq(unsigned int irq, struct irq_desc *desc)
{ {
unsigned int cpu = smp_processor_id();
struct irqaction *action; struct irqaction *action;
irqreturn_t action_ret; irqreturn_t action_ret;
...@@ -361,7 +359,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) ...@@ -361,7 +359,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
if (unlikely(desc->status & IRQ_INPROGRESS)) if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock; goto out_unlock;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++; kstat_irqs_this_cpu(desc)++;
/* /*
* If its disabled or no action available * If its disabled or no action available
...@@ -399,7 +397,6 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) ...@@ -399,7 +397,6 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
void void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{ {
unsigned int cpu = smp_processor_id();
struct irqaction *action; struct irqaction *action;
irqreturn_t action_ret; irqreturn_t action_ret;
...@@ -409,7 +406,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) ...@@ -409,7 +406,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
goto out; goto out;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++; kstat_irqs_this_cpu(desc)++;
/* /*
* If its disabled or no action available * If its disabled or no action available
...@@ -458,8 +455,6 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) ...@@ -458,8 +455,6 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
void void
handle_edge_irq(unsigned int irq, struct irq_desc *desc) handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{ {
const unsigned int cpu = smp_processor_id();
spin_lock(&desc->lock); spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
...@@ -476,7 +471,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) ...@@ -476,7 +471,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
goto out_unlock; goto out_unlock;
} }
kstat_cpu(cpu).irqs[irq]++; kstat_irqs_this_cpu(desc)++;
/* Start handling the irq */ /* Start handling the irq */
desc->chip->ack(irq); desc->chip->ack(irq);
...@@ -531,7 +526,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) ...@@ -531,7 +526,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{ {
irqreturn_t action_ret; irqreturn_t action_ret;
kstat_this_cpu.irqs[irq]++; kstat_irqs_this_cpu(desc)++;
if (desc->chip->ack) if (desc->chip->ack)
desc->chip->ack(irq); desc->chip->ack(irq);
......
...@@ -37,7 +37,7 @@ void ...@@ -37,7 +37,7 @@ void
handle_bad_irq(unsigned int irq, struct irq_desc *desc) handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{ {
print_irq_desc(irq, desc); print_irq_desc(irq, desc);
kstat_this_cpu.irqs[irq]++; kstat_irqs_this_cpu(desc)++;
ack_bad_irq(irq); ack_bad_irq(irq);
} }
...@@ -80,17 +80,38 @@ static void init_one_irq_desc(struct irq_desc *desc) ...@@ -80,17 +80,38 @@ static void init_one_irq_desc(struct irq_desc *desc)
#endif #endif
} }
#ifdef CONFIG_HAVE_SPARSE_IRQ extern int after_bootmem;
static int nr_irq_desc = 32; extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal);
static int __init parse_nr_irq_desc(char *arg) static void init_kstat_irqs(struct irq_desc *desc, int nr_desc, int nr)
{ {
if (arg) unsigned long bytes, total_bytes;
nr_irq_desc = simple_strtoul(arg, NULL, 0); char *ptr;
return 0; int i;
unsigned long phys;
/* Compute how many bytes we need per irq and allocate them */
bytes = nr * sizeof(unsigned int);
total_bytes = bytes * nr_desc;
if (after_bootmem)
ptr = kzalloc(total_bytes, GFP_ATOMIC);
else
ptr = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
if (!ptr)
panic(" can not allocate kstat_irqs\n");
phys = __pa(ptr);
printk(KERN_DEBUG "kstat_irqs ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
for (i = 0; i < nr_desc; i++) {
desc[i].kstat_irqs = (unsigned int *)ptr;
ptr += bytes;
}
} }
early_param("nr_irq_desc", parse_nr_irq_desc);
static void __init init_work(void *data) static void __init init_work(void *data)
{ {
...@@ -100,25 +121,44 @@ static void __init init_work(void *data) ...@@ -100,25 +121,44 @@ static void __init init_work(void *data)
desc = *da->name; desc = *da->name;
for (i = 0; i < *da->nr; i++) for (i = 0; i < *da->nr; i++) {
init_one_irq_desc(&desc[i]); init_one_irq_desc(&desc[i]);
#ifndef CONFIG_HAVE_SPARSE_IRQ
desc[i].irq = i;
#endif
}
#ifdef CONFIG_HAVE_SPARSE_IRQ
for (i = 1; i < *da->nr; i++) for (i = 1; i < *da->nr; i++)
desc[i-1].next = &desc[i]; desc[i-1].next = &desc[i];
#endif
/* init kstat_irqs, nr_cpu_ids is ready already */
init_kstat_irqs(desc, *da->nr, nr_cpu_ids);
} }
#ifdef CONFIG_HAVE_SPARSE_IRQ
static int nr_irq_desc = 32;
static int __init parse_nr_irq_desc(char *arg)
{
if (arg)
nr_irq_desc = simple_strtoul(arg, NULL, 0);
return 0;
}
early_param("nr_irq_desc", parse_nr_irq_desc);
static struct irq_desc *sparse_irqs; static struct irq_desc *sparse_irqs;
DEFINE_DYN_ARRAY(sparse_irqs, sizeof(struct irq_desc), nr_irq_desc, PAGE_SIZE, init_work); DEFINE_DYN_ARRAY(sparse_irqs, sizeof(struct irq_desc), nr_irq_desc, PAGE_SIZE, init_work);
extern int after_bootmem;
extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal);
struct irq_desc *irq_to_desc(unsigned int irq) struct irq_desc *irq_to_desc(unsigned int irq)
{ {
struct irq_desc *desc, *desc_pri; struct irq_desc *desc, *desc_pri;
int i; int i;
int count = 0; int count = 0;
unsigned long phys;
unsigned long total_bytes;
BUG_ON(irq == -1U); BUG_ON(irq == -1U);
...@@ -141,38 +181,34 @@ struct irq_desc *irq_to_desc(unsigned int irq) ...@@ -141,38 +181,34 @@ struct irq_desc *irq_to_desc(unsigned int irq)
*/ */
printk(KERN_DEBUG "try to get more irq_desc %d\n", nr_irq_desc); printk(KERN_DEBUG "try to get more irq_desc %d\n", nr_irq_desc);
total_bytes = sizeof(struct irq_desc) * nr_irq_desc;
if (after_bootmem) if (after_bootmem)
desc = kzalloc(sizeof(struct irq_desc)*nr_irq_desc, GFP_ATOMIC); desc = kzalloc(total_bytes, GFP_ATOMIC);
else else
desc = __alloc_bootmem_nopanic(sizeof(struct irq_desc)*nr_irq_desc, PAGE_SIZE, 0); desc = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
if (!desc) if (!desc)
panic("please boot with nr_irq_desc= %d\n", count * 2); panic("please boot with nr_irq_desc= %d\n", count * 2);
phys = __pa(desc);
printk(KERN_DEBUG "irq_desc ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
for (i = 0; i < nr_irq_desc; i++) for (i = 0; i < nr_irq_desc; i++)
init_one_irq_desc(&desc[i]); init_one_irq_desc(&desc[i]);
for (i = 1; i < nr_irq_desc; i++) for (i = 1; i < nr_irq_desc; i++)
desc[i-1].next = &desc[i]; desc[i-1].next = &desc[i];
/* init kstat_irqs, nr_cpu_ids is ready already */
init_kstat_irqs(desc, nr_irq_desc, nr_cpu_ids);
desc->irq = irq; desc->irq = irq;
desc_pri->next = desc; desc_pri->next = desc;
return desc; return desc;
} }
#else #else
static void __init init_work(void *data)
{
struct dyn_array *da = data;
int i;
struct irq_desc *desc;
desc = *da->name;
for (i = 0; i < *da->nr; i++)
init_one_irq_desc(&desc[i]);
}
static struct irq_desc *irq_desc; static struct irq_desc *irq_desc;
DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work); DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
...@@ -315,7 +351,7 @@ unsigned int __do_IRQ(unsigned int irq) ...@@ -315,7 +351,7 @@ unsigned int __do_IRQ(unsigned int irq)
struct irqaction *action; struct irqaction *action;
unsigned int status; unsigned int status;
kstat_this_cpu.irqs[irq]++; kstat_irqs_this_cpu(desc)++;
if (CHECK_IRQ_PER_CPU(desc->status)) { if (CHECK_IRQ_PER_CPU(desc->status)) {
irqreturn_t action_ret; irqreturn_t action_ret;
...@@ -415,3 +451,10 @@ void early_init_irq_lock_class(void) ...@@ -415,3 +451,10 @@ void early_init_irq_lock_class(void)
} }
#endif #endif
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc->kstat_irqs[cpu];
}
EXPORT_SYMBOL(kstat_irqs_cpu);
...@@ -4048,11 +4048,8 @@ static inline void idle_balance(int cpu, struct rq *rq) ...@@ -4048,11 +4048,8 @@ static inline void idle_balance(int cpu, struct rq *rq)
#endif #endif
DEFINE_PER_CPU(struct kernel_stat, kstat); DEFINE_PER_CPU(struct kernel_stat, kstat);
EXPORT_PER_CPU_SYMBOL(kstat);
#ifdef CONFIG_HAVE_DYN_ARRAY EXPORT_PER_CPU_SYMBOL(kstat);
DEFINE_PER_CPU_DYN_ARRAY_ADDR(per_cpu__kstat_irqs, per_cpu__kstat.irqs, sizeof(unsigned int), nr_irqs, sizeof(unsigned long), NULL);
#endif
/* /*
* Return p->sum_exec_runtime plus any more ns on the sched_clock * Return p->sum_exec_runtime plus any more ns on the sched_clock
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment