Commit 2cc21ef8 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

genirq: remove sparse irq code

This code is not ready, but we need to rip it out instead of rebasing
as we would lose the APIC/IO_APIC unification otherwise.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent c6b7674f
......@@ -111,9 +111,6 @@ struct irq_cfg;
struct irq_pin_list;
struct irq_cfg {
unsigned int irq;
#ifdef CONFIG_HAVE_SPARSE_IRQ
struct irq_cfg *next;
#endif
struct irq_pin_list *irq_2_pin;
cpumask_t domain;
cpumask_t old_domain;
......@@ -151,15 +148,6 @@ static void init_one_irq_cfg(struct irq_cfg *cfg)
static struct irq_cfg *irq_cfgx;
#ifdef CONFIG_HAVE_SPARSE_IRQ
/*
* Protect the irq_cfgx_free freelist:
*/
static DEFINE_SPINLOCK(irq_cfg_lock);
static struct irq_cfg *irq_cfgx_free;
#endif
static void __init init_work(void *data)
{
struct dyn_array *da = data;
......@@ -174,114 +162,7 @@ static void __init init_work(void *data)
legacy_count = ARRAY_SIZE(irq_cfg_legacy);
for (i = legacy_count; i < *da->nr; i++)
init_one_irq_cfg(&cfg[i]);
#ifdef CONFIG_HAVE_SPARSE_IRQ
for (i = 1; i < *da->nr; i++)
cfg[i-1].next = &cfg[i];
irq_cfgx_free = &irq_cfgx[legacy_count];
irq_cfgx[legacy_count - 1].next = NULL;
#endif
}
#ifdef CONFIG_HAVE_SPARSE_IRQ
/* need to be biger than size of irq_cfg_legacy */
static int nr_irq_cfg = 32;
static int __init parse_nr_irq_cfg(char *arg)
{
if (arg) {
nr_irq_cfg = simple_strtoul(arg, NULL, 0);
if (nr_irq_cfg < 32)
nr_irq_cfg = 32;
}
return 0;
}
early_param("nr_irq_cfg", parse_nr_irq_cfg);
#define for_each_irq_cfg(irqX, cfg) \
for (cfg = irq_cfgx, irqX = cfg->irq; cfg; cfg = cfg->next, irqX = cfg ? cfg->irq : -1U)
DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
static struct irq_cfg *irq_cfg(unsigned int irq)
{
struct irq_cfg *cfg;
cfg = irq_cfgx;
while (cfg) {
if (cfg->irq == irq)
return cfg;
cfg = cfg->next;
}
return NULL;
}
static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
{
struct irq_cfg *cfg, *cfg_pri;
unsigned long flags;
int count = 0;
int i;
cfg_pri = cfg = irq_cfgx;
while (cfg) {
if (cfg->irq == irq)
return cfg;
cfg_pri = cfg;
cfg = cfg->next;
count++;
}
spin_lock_irqsave(&irq_cfg_lock, flags);
if (!irq_cfgx_free) {
unsigned long phys;
unsigned long total_bytes;
/*
* we run out of pre-allocate ones, allocate more
*/
printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
total_bytes = sizeof(struct irq_cfg) * nr_irq_cfg;
if (after_bootmem)
cfg = kzalloc(total_bytes, GFP_ATOMIC);
else
cfg = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
if (!cfg)
panic("please boot with nr_irq_cfg= %d\n", count * 2);
phys = __pa(cfg);
printk(KERN_DEBUG "irq_cfg ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
for (i = 0; i < nr_irq_cfg; i++)
init_one_irq_cfg(&cfg[i]);
for (i = 1; i < nr_irq_cfg; i++)
cfg[i-1].next = &cfg[i];
irq_cfgx_free = cfg;
}
cfg = irq_cfgx_free;
irq_cfgx_free = irq_cfgx_free->next;
cfg->next = NULL;
if (cfg_pri)
cfg_pri->next = cfg;
else
irq_cfgx = cfg;
cfg->irq = irq;
spin_unlock_irqrestore(&irq_cfg_lock, flags);
return cfg;
}
#else
#define for_each_irq_cfg(irq, cfg) \
for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq])
......@@ -290,17 +171,16 @@ DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work
struct irq_cfg *irq_cfg(unsigned int irq)
{
if (irq < nr_irqs)
return &irq_cfgx[irq];
if (irq < nr_irqs)
return &irq_cfgx[irq];
return NULL;
return NULL;
}
struct irq_cfg *irq_cfg_alloc(unsigned int irq)
{
return irq_cfg(irq);
return irq_cfg(irq);
}
#endif
/*
* This is performance-critical, we want to do it O(1)
*
......@@ -3068,9 +2948,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
unsigned long flags;
struct irq_cfg *cfg_new;
#ifndef CONFIG_HAVE_SPARSE_IRQ
irq_want = nr_irqs - 1;
#endif
irq = 0;
spin_lock_irqsave(&vector_lock, flags);
......
......@@ -272,20 +272,12 @@ int show_interrupts(struct seq_file *p, void *v)
struct irq_desc *desc = NULL;
int tail = 0;
#ifdef CONFIG_HAVE_SPARSE_IRQ
desc = (struct irq_desc *)v;
entries = -1U;
i = desc->irq;
if (!desc->next)
tail = 1;
#else
entries = nr_irqs - 1;
i = *(loff_t *) v;
if (i == nr_irqs)
tail = 1;
else
desc = irq_to_desc(i);
#endif
if (i == 0) {
seq_printf(p, " ");
......
......@@ -77,20 +77,12 @@ int show_interrupts(struct seq_file *p, void *v)
struct irq_desc *desc = NULL;
int tail = 0;
#ifdef CONFIG_HAVE_SPARSE_IRQ
desc = (struct irq_desc *)v;
entries = -1U;
i = desc->irq;
if (!desc->next)
tail = 1;
#else
entries = nr_irqs - 1;
i = *(loff_t *) v;
if (i == nr_irqs)
tail = 1;
else
desc = irq_to_desc(i);
#endif
if (i == 0) {
seq_printf(p, " ");
......
......@@ -558,8 +558,6 @@ struct timer_rand_state {
unsigned dont_count_entropy:1;
};
#ifndef CONFIG_HAVE_SPARSE_IRQ
#ifdef CONFIG_HAVE_DYN_ARRAY
static struct timer_rand_state **irq_timer_state;
DEFINE_DYN_ARRAY(irq_timer_state, sizeof(struct timer_rand_state *), nr_irqs, PAGE_SIZE, NULL);
......@@ -583,33 +581,6 @@ static void set_timer_rand_state(unsigned int irq, struct timer_rand_state *stat
irq_timer_state[irq] = state;
}
#else
static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
if (!desc)
return NULL;
return desc->timer_rand_state;
}
static void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
if (!desc)
return;
desc->timer_rand_state = state;
}
#endif
static struct timer_rand_state input_timer_state;
/*
......@@ -967,10 +938,8 @@ void rand_initialize_irq(int irq)
{
struct timer_rand_state *state;
#ifndef CONFIG_HAVE_SPARSE_IRQ
if (irq >= nr_irqs)
return;
#endif
state = get_timer_rand_state(irq);
......
......@@ -82,18 +82,6 @@ void unmask_ht_irq(unsigned int irq)
write_ht_irq_msg(irq, &msg);
}
static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
{
unsigned int irq;
irq = dev->bus->number;
irq <<= 8;
irq |= dev->devfn;
irq <<= 12;
return irq;
}
/**
* __ht_create_irq - create an irq and attach it to a device.
* @dev: The hypertransport device to find the irq capability on.
......@@ -110,7 +98,6 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
int max_irq;
int pos;
int irq;
unsigned int irq_want;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
if (!pos)
......@@ -138,12 +125,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
cfg->msg.address_lo = 0xffffffff;
cfg->msg.address_hi = 0xffffffff;
irq_want= build_irq_for_pci_dev(dev);
#ifdef CONFIG_HAVE_SPARSE_IRQ
irq = create_irq_nr(irq_want + idx);
#else
irq = create_irq();
#endif
if (irq <= 0) {
kfree(cfg);
return -EBUSY;
......
......@@ -19,78 +19,6 @@ struct irq_2_iommu {
u8 irte_mask;
};
#ifdef CONFIG_HAVE_SPARSE_IRQ
static struct irq_2_iommu *irq_2_iommuX;
/* fill one page ? */
static int nr_irq_2_iommu = 0x100;
static int irq_2_iommu_index;
DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irq_2_iommu, PAGE_SIZE, NULL);
extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal);
static struct irq_2_iommu *get_one_free_irq_2_iommu(int not_used)
{
struct irq_2_iommu *iommu;
unsigned long total_bytes;
if (irq_2_iommu_index >= nr_irq_2_iommu) {
/*
* we run out of pre-allocate ones, allocate more
*/
printk(KERN_DEBUG "try to get more irq_2_iommu %d\n", nr_irq_2_iommu);
total_bytes = sizeof(struct irq_2_iommu)*nr_irq_2_iommu;
if (after_bootmem)
iommu = kzalloc(total_bytes, GFP_ATOMIC);
else
iommu = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
if (!iommu)
panic("can not get more irq_2_iommu\n");
irq_2_iommuX = iommu;
irq_2_iommu_index = 0;
}
iommu = &irq_2_iommuX[irq_2_iommu_index];
irq_2_iommu_index++;
return iommu;
}
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
BUG_ON(!desc);
return desc->irq_2_iommu;
}
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
/*
* alloc irq desc if not allocated already.
*/
desc = irq_to_desc_alloc(irq);
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
desc->irq_2_iommu = get_one_free_irq_2_iommu(irq);
return desc->irq_2_iommu;
}
#else /* !CONFIG_HAVE_SPARSE_IRQ */
#ifdef CONFIG_HAVE_DYN_ARRAY
static struct irq_2_iommu *irq_2_iommuX;
DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
......@@ -109,7 +37,6 @@ static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
return irq_2_iommu(irq);
}
#endif
static DEFINE_SPINLOCK(irq_2_ir_lock);
......@@ -166,11 +93,9 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
if (!count)
return -1;
#ifndef CONFIG_HAVE_SPARSE_IRQ
/* protect irq_2_iommu_alloc later */
if (irq >= nr_irqs)
return -1;
#endif
/*
* start the IRTE search from index 0.
......
......@@ -529,13 +529,10 @@ static int show_stat(struct seq_file *p, void *v)
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
for_each_irq_desc(j, desc)
{
unsigned int temp;
sum += kstat_irqs_cpu(j, i);
temp = kstat_irqs_cpu(j, i);
sum += temp;
}
sum += arch_irq_stat_cpu(i);
}
sum += arch_irq_stat();
......@@ -578,21 +575,13 @@ static int show_stat(struct seq_file *p, void *v)
seq_printf(p, "intr %llu", (unsigned long long)sum);
/* sum again ? it could be updated? */
for_each_irq_desc(j, desc)
{
for_each_irq_desc(j, desc) {
per_irq_sum = 0;
for_each_possible_cpu(i) {
unsigned int temp;
temp = kstat_irqs_cpu(j, i);
per_irq_sum += temp;
}
for_each_possible_cpu(i)
per_irq_sum += kstat_irqs_cpu(j, i);
#ifdef CONFIG_HAVE_SPARSE_IRQ
seq_printf(p, " %#x:%u", j, per_irq_sum);
#else
seq_printf(p, " %u", per_irq_sum);
#endif
}
seq_printf(p,
......@@ -645,36 +634,14 @@ static const struct file_operations proc_stat_operations = {
*/
static void *int_seq_start(struct seq_file *f, loff_t *pos)
{
#ifdef CONFIG_HAVE_SPARSE_IRQ
struct irq_desc *desc;
int irq;
int count = *pos;
for_each_irq_desc(irq, desc) {
if (count-- == 0)
return desc;
}
return NULL;
#else
return (*pos <= nr_irqs) ? pos : NULL;
#endif
}
static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
{
#ifdef CONFIG_HAVE_SPARSE_IRQ
struct irq_desc *desc;
desc = ((struct irq_desc *)v)->next;
(*pos)++;
return desc;
#else
(*pos)++;
return (*pos <= nr_irqs) ? pos : NULL;
#endif
}
static void int_seq_stop(struct seq_file *f, void *v)
......
......@@ -167,15 +167,8 @@ struct irq_2_iommu;
*/
struct irq_desc {
unsigned int irq;
#ifdef CONFIG_HAVE_SPARSE_IRQ
struct irq_desc *next;
struct timer_rand_state *timer_rand_state;
#endif
#ifdef CONFIG_HAVE_DYN_ARRAY
unsigned int *kstat_irqs;
#endif
#if defined(CONFIG_INTR_REMAP) && defined(CONFIG_HAVE_SPARSE_IRQ)
struct irq_2_iommu *irq_2_iommu;
#endif
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
......@@ -205,8 +198,6 @@ struct irq_desc {
} ____cacheline_internodealigned_in_smp;
#ifndef CONFIG_HAVE_SPARSE_IRQ
#ifndef CONFIG_HAVE_DYN_ARRAY
/* could be removed if we get rid of all irq_desc reference */
extern struct irq_desc irq_desc[NR_IRQS];
......@@ -224,17 +215,6 @@ static inline struct irq_desc *irq_to_desc_alloc(unsigned int irq)
return irq_to_desc(irq);
}
#else
extern struct irq_desc *irq_to_desc(unsigned int irq);
extern struct irq_desc *irq_to_desc_alloc(unsigned int irq);
extern struct irq_desc *sparse_irqs;
#define for_each_irq_desc(irqX, desc) \
for (desc = sparse_irqs, irqX = desc->irq; desc; desc = desc->next, irqX = desc ? desc->irq : -1U)
#endif
#ifdef CONFIG_HAVE_DYN_ARRAY
#define kstat_irqs_this_cpu(DESC) \
((DESC)->kstat_irqs[smp_processor_id()])
......
......@@ -111,15 +111,6 @@ static void init_kstat_irqs(struct irq_desc *desc, int nr_desc, int nr)
}
}
#ifdef CONFIG_HAVE_SPARSE_IRQ
/*
* Protect the sparse_irqs_free freelist:
*/
static DEFINE_SPINLOCK(sparse_irq_lock);
static struct irq_desc *sparse_irqs_free;
struct irq_desc *sparse_irqs;
#endif
static void __init init_work(void *data)
{
struct dyn_array *da = data;
......@@ -130,121 +121,16 @@ static void __init init_work(void *data)
for (i = 0; i < *da->nr; i++) {
init_one_irq_desc(&desc[i]);
#ifndef CONFIG_HAVE_SPARSE_IRQ
desc[i].irq = i;
#endif
}
/* init kstat_irqs, nr_cpu_ids is ready already */
init_kstat_irqs(desc, *da->nr, nr_cpu_ids);
#ifdef CONFIG_HAVE_SPARSE_IRQ
for (i = 1; i < *da->nr; i++)
desc[i-1].next = &desc[i];
sparse_irqs_free = sparse_irqs;
sparse_irqs = NULL;
#endif
}
#ifdef CONFIG_HAVE_SPARSE_IRQ
static int nr_irq_desc = 32;
static int __init parse_nr_irq_desc(char *arg)
{
if (arg)
nr_irq_desc = simple_strtoul(arg, NULL, 0);
return 0;
}
early_param("nr_irq_desc", parse_nr_irq_desc);
DEFINE_DYN_ARRAY(sparse_irqs, sizeof(struct irq_desc), nr_irq_desc, PAGE_SIZE, init_work);
struct irq_desc *irq_to_desc(unsigned int irq)
{
struct irq_desc *desc;
desc = sparse_irqs;
while (desc) {
if (desc->irq == irq)
return desc;
desc = desc->next;
}
return NULL;
}
struct irq_desc *irq_to_desc_alloc(unsigned int irq)
{
struct irq_desc *desc, *desc_pri;
unsigned long flags;
int count = 0;
int i;
desc_pri = desc = sparse_irqs;
while (desc) {
if (desc->irq == irq)
return desc;
desc_pri = desc;
desc = desc->next;
count++;
}
spin_lock_irqsave(&sparse_irq_lock, flags);
/*
* we run out of pre-allocate ones, allocate more
*/
if (!sparse_irqs_free) {
unsigned long phys;
unsigned long total_bytes;
printk(KERN_DEBUG "try to get more irq_desc %d\n", nr_irq_desc);
total_bytes = sizeof(struct irq_desc) * nr_irq_desc;
if (after_bootmem)
desc = kzalloc(total_bytes, GFP_ATOMIC);
else
desc = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
if (!desc)
panic("please boot with nr_irq_desc= %d\n", count * 2);
phys = __pa(desc);
printk(KERN_DEBUG "irq_desc ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
for (i = 0; i < nr_irq_desc; i++)
init_one_irq_desc(&desc[i]);
for (i = 1; i < nr_irq_desc; i++)
desc[i-1].next = &desc[i];
/* init kstat_irqs, nr_cpu_ids is ready already */
init_kstat_irqs(desc, nr_irq_desc, nr_cpu_ids);
sparse_irqs_free = desc;
}
desc = sparse_irqs_free;
sparse_irqs_free = sparse_irqs_free->next;
desc->next = NULL;
if (desc_pri)
desc_pri->next = desc;
else
sparse_irqs = desc;
desc->irq = irq;
spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
#else
struct irq_desc *irq_desc;
DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
#endif
#else
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment