Commit b9e5b4e6 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras

[POWERPC] Use the genirq framework

This adapts the generic powerpc interrupt handling code, and all of
the platforms except for the embedded 6xx machines, to use the new
genirq framework.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 5a43a066
......@@ -62,28 +62,27 @@
#endif
int __irq_offset_value;
#ifdef CONFIG_PPC32
EXPORT_SYMBOL(__irq_offset_value);
#endif
static int ppc_spurious_interrupts;
#ifdef CONFIG_PPC32
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
EXPORT_SYMBOL(__irq_offset_value);
atomic_t ppc_n_lost_interrupts;
#ifndef CONFIG_PPC_MERGE
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
atomic_t ppc_n_lost_interrupts;
#endif
#ifdef CONFIG_TAU_INT
extern int tau_initialized;
extern int tau_interrupts(int);
#endif
#endif /* CONFIG_PPC32 */
#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
extern atomic_t ipi_recv;
extern atomic_t ipi_sent;
#endif
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
EXPORT_SYMBOL(irq_desc);
......@@ -219,15 +218,19 @@ void do_IRQ(struct pt_regs *regs)
curtp = current_thread_info();
irqtp = hardirq_ctx[smp_processor_id()];
if (curtp != irqtp) {
struct irq_desc *desc = irq_desc + irq;
void *handler = desc->handle_irq;
if (handler == NULL)
handler = &__do_IRQ;
irqtp->task = curtp->task;
irqtp->flags = 0;
call___do_IRQ(irq, regs, irqtp);
call_handle_irq(irq, desc, regs, irqtp, handler);
irqtp->task = NULL;
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
} else
#endif
__do_IRQ(irq, regs);
generic_handle_irq(irq, regs);
} else if (irq != -2)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
......@@ -245,15 +248,6 @@ void do_IRQ(struct pt_regs *regs)
void __init init_IRQ(void)
{
#ifdef CONFIG_PPC64
static int once = 0;
if (once)
return;
once++;
#endif
ppc_md.init_IRQ();
#ifdef CONFIG_PPC64
irq_ctx_init();
......
......@@ -51,12 +51,14 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
_GLOBAL(call___do_IRQ)
_GLOBAL(call_handle_irq)
ld r8,0(r7)
mflr r0
std r0,16(r1)
stdu r1,THREAD_SIZE-112(r5)
mr r1,r5
bl .__do_IRQ
mtctr r8
stdu r1,THREAD_SIZE-112(r6)
mr r1,r6
bctrl
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
......
......@@ -37,64 +37,51 @@
struct iic {
struct cbe_iic_thread_regs __iomem *regs;
u8 target_id;
u8 eoi_stack[16];
int eoi_ptr;
};
static DEFINE_PER_CPU(struct iic, iic);
void iic_local_enable(void)
static void iic_mask(unsigned int irq)
{
struct iic *iic = &__get_cpu_var(iic);
u64 tmp;
/*
* There seems to be a bug that is present in DD2.x CPUs
* and still only partially fixed in DD3.1.
* This bug causes a value written to the priority register
* not to make it there, resulting in a system hang unless we
* write it again.
* Masking with 0xf0 is done because the Cell BE does not
* implement the lower four bits of the interrupt priority,
* they always read back as zeroes, although future CPUs
* might implement different bits.
*/
do {
out_be64(&iic->regs->prio, 0xff);
tmp = in_be64(&iic->regs->prio);
} while ((tmp & 0xf0) != 0xf0);
}
void iic_local_disable(void)
{
out_be64(&__get_cpu_var(iic).regs->prio, 0x0);
}
static unsigned int iic_startup(unsigned int irq)
{
return 0;
}
static void iic_enable(unsigned int irq)
{
iic_local_enable();
}
static void iic_disable(unsigned int irq)
static void iic_unmask(unsigned int irq)
{
}
static void iic_end(unsigned int irq)
static void iic_eoi(unsigned int irq)
{
iic_local_enable();
struct iic *iic = &__get_cpu_var(iic);
out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
BUG_ON(iic->eoi_ptr < 0);
}
static struct hw_interrupt_type iic_pic = {
static struct irq_chip iic_chip = {
.typename = " CELL-IIC ",
.startup = iic_startup,
.enable = iic_enable,
.disable = iic_disable,
.end = iic_end,
.mask = iic_mask,
.unmask = iic_unmask,
.eoi = iic_eoi,
};
/* XXX All of this has to be reworked completely. We need to assign a real
* interrupt numbers to the external interrupts and remove all the hard coded
* interrupt maps (rely on the device-tree whenever possible).
*
* Basically, my scheme is to define the "pendings" bits to be the HW interrupt
* number (ignoring the data and flags here). That means we can sort-of split
* external sources based on priority, and we can use request_irq() on pretty
* much anything.
*
* For spider or axon, they have their own interrupt space. spider will just have
* local "hardward" interrupts 0...xx * node stride. The node stride is not
* necessary (separate interrupt chips will have separate HW number space), but
* will allow to be compatible with existing device-trees.
*
* All of thise little world will get a standard remapping scheme to map those HW
* numbers into the linux flat irq number space.
*/
static int iic_external_get_irq(struct cbe_iic_pending_bits pending)
{
int irq;
......@@ -118,9 +105,10 @@ static int iic_external_get_irq(struct cbe_iic_pending_bits pending)
*/
if (pending.class != 2)
break;
irq = IIC_EXT_OFFSET
+ spider_get_irq(node)
+ node * IIC_NODE_STRIDE;
/* TODO: We might want to silently ignore cascade interrupts
* when no cascade handler exist yet
*/
irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE;
break;
case 0x01 ... 0x04:
case 0x07 ... 0x0a:
......@@ -152,6 +140,8 @@ int iic_get_irq(struct pt_regs *regs)
iic = &__get_cpu_var(iic);
*(unsigned long *) &pending =
in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
BUG_ON(iic->eoi_ptr > 15);
irq = -1;
if (pending.flags & CBE_IIC_IRQ_VALID) {
......@@ -172,7 +162,7 @@ int iic_get_irq(struct pt_regs *regs)
/* hardcoded part to be compatible with older firmware */
static int setup_iic_hardcoded(void)
static int __init setup_iic_hardcoded(void)
{
struct device_node *np;
int nodeid, cpu;
......@@ -207,12 +197,13 @@ static int setup_iic_hardcoded(void)
printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs);
iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs));
iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe);
iic->eoi_stack[0] = 0xff;
}
return 0;
}
static int setup_iic(void)
static int __init setup_iic(void)
{
struct device_node *dn;
unsigned long *regs;
......@@ -248,11 +239,14 @@ static int setup_iic(void)
iic = &per_cpu(iic, np[0]);
iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs));
iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe);
iic->eoi_stack[0] = 0xff;
printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs);
iic = &per_cpu(iic, np[1]);
iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs));
iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe);
iic->eoi_stack[0] = 0xff;
printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs);
found++;
......@@ -304,10 +298,10 @@ static void iic_request_ipi(int ipi, const char *name)
int irq;
irq = iic_ipi_to_irq(ipi);
/* IPIs are marked IRQF_DISABLED as they must run with irqs
* disabled */
get_irq_desc(irq)->chip = &iic_pic;
get_irq_desc(irq)->status |= IRQ_PER_CPU;
set_irq_chip_and_handler(irq, &iic_chip, handle_percpu_irq);
request_irq(irq, iic_ipi_action, IRQF_DISABLED, name, NULL);
}
......@@ -321,20 +315,26 @@ void iic_request_IPIs(void)
}
#endif /* CONFIG_SMP */
static void iic_setup_spe_handlers(void)
static void __init iic_setup_builtin_handlers(void)
{
int be, isrc;
/* Assume two threads per BE are present */
/* XXX FIXME: Assume two threads per BE are present */
for (be=0; be < num_present_cpus() / 2; be++) {
int irq;
/* setup SPE chip and handlers */
for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) {
int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc;
get_irq_desc(irq)->chip = &iic_pic;
irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc;
set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq);
}
/* setup cascade chip */
irq = IIC_EXT_CASCADE + be * IIC_NODE_STRIDE;
set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq);
}
}
void iic_init_IRQ(void)
void __init iic_init_IRQ(void)
{
int cpu, irq_offset;
struct iic *iic;
......@@ -348,5 +348,6 @@ void iic_init_IRQ(void)
if (iic->regs)
out_be64(&iic->regs->prio, 0xff);
}
iic_setup_spe_handlers();
iic_setup_builtin_handlers();
}
......@@ -38,6 +38,7 @@
enum {
IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */
IIC_EXT_CASCADE = 0x20, /* There is no interrupt 32 on spider */
IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */
IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */
IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */
......@@ -51,13 +52,10 @@ extern int iic_get_irq(struct pt_regs *regs);
extern void iic_cause_IPI(int cpu, int mesg);
extern void iic_request_IPIs(void);
extern void iic_setup_cpu(void);
extern void iic_local_enable(void);
extern void iic_local_disable(void);
extern u8 iic_get_target_id(int cpu);
extern void spider_init_IRQ(void);
extern int spider_get_irq(int node);
#endif
#endif /* ASM_CELL_PIC_H */
......@@ -80,10 +80,14 @@ static void cell_progress(char *s, unsigned short hex)
printk("*** %04x : %s\n", hex, s ? s : "");
}
static void __init cell_init_irq(void)
{
iic_init_IRQ();
spider_init_IRQ();
}
static void __init cell_setup_arch(void)
{
ppc_md.init_IRQ = iic_init_IRQ;
ppc_md.get_irq = iic_get_irq;
#ifdef CONFIG_SPU_BASE
spu_priv1_ops = &spu_priv1_mmio_ops;
#endif
......@@ -109,7 +113,6 @@ static void __init cell_setup_arch(void)
/* Find and initialize PCI host bridges */
init_pci_config_tokens();
find_and_init_phbs();
spider_init_IRQ();
cbe_pervasive_init();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
......@@ -174,6 +177,9 @@ define_machine(cell) {
.calibrate_decr = generic_calibrate_decr,
.check_legacy_ioport = cell_check_legacy_ioport,
.progress = cell_progress,
.init_IRQ = cell_init_irq,
.get_irq = iic_get_irq,
#ifdef CONFIG_KEXEC
.machine_kexec = default_machine_kexec,
.machine_kexec_prepare = default_machine_kexec_prepare,
......
......@@ -82,17 +82,20 @@ static void __iomem *spider_get_irq_config(int irq)
return pic + TIR_CFGA + 8 * spider_get_nr(irq);
}
static void spider_enable_irq(unsigned int irq)
static void spider_unmask_irq(unsigned int irq)
{
int nodeid = (irq / IIC_NODE_STRIDE) * 0x10;
void __iomem *cfg = spider_get_irq_config(irq);
irq = spider_get_nr(irq);
/* FIXME: Most of that is configuration and has nothing to do with enabling/disable,
* besides, it's also partially bogus.
*/
out_be32(cfg, (in_be32(cfg) & ~0xf0)| 0x3107000eu | nodeid);
out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq);
}
static void spider_disable_irq(unsigned int irq)
static void spider_mask_irq(unsigned int irq)
{
void __iomem *cfg = spider_get_irq_config(irq);
irq = spider_get_nr(irq);
......@@ -100,39 +103,21 @@ static void spider_disable_irq(unsigned int irq)
out_be32(cfg, in_be32(cfg) & ~0x30000000u);
}
static unsigned int spider_startup_irq(unsigned int irq)
{
spider_enable_irq(irq);
return 0;
}
static void spider_shutdown_irq(unsigned int irq)
{
spider_disable_irq(irq);
}
static void spider_end_irq(unsigned int irq)
{
spider_enable_irq(irq);
}
static void spider_ack_irq(unsigned int irq)
{
spider_disable_irq(irq);
iic_local_enable();
/* Should reset edge detection logic but we don't configure any edge interrupt
* at the moment.
*/
}
static struct hw_interrupt_type spider_pic = {
static struct irq_chip spider_pic = {
.typename = " SPIDER ",
.startup = spider_startup_irq,
.shutdown = spider_shutdown_irq,
.enable = spider_enable_irq,
.disable = spider_disable_irq,
.unmask = spider_unmask_irq,
.mask = spider_mask_irq,
.ack = spider_ack_irq,
.end = spider_end_irq,
};
int spider_get_irq(int node)
static int spider_get_irq(int node)
{
unsigned long cs;
void __iomem *regs = spider_pics[node];
......@@ -145,95 +130,89 @@ int spider_get_irq(int node)
return cs;
}
static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs)
{
int node = (int)(long)desc->handler_data;
int cascade_irq;
cascade_irq = spider_get_irq(node);
generic_handle_irq(cascade_irq, regs);
desc->chip->eoi(irq);
}
/* hardcoded part to be compatible with older firmware */
void spider_init_IRQ_hardcoded(void)
static void __init spider_init_one(int node, unsigned long addr)
{
int node;
long spiderpic;
long pics[] = { 0x24000008000, 0x34000008000 };
int n;
pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__);
for (node = 0; node < num_present_cpus()/2; node++) {
spiderpic = pics[node];
printk(KERN_DEBUG "SPIDER addr: %lx\n", spiderpic);
spider_pics[node] = ioremap(spiderpic, 0x800);
for (n = 0; n < IIC_NUM_EXT; n++) {
int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE;
get_irq_desc(irq)->chip = &spider_pic;
}
/* do not mask any interrupts because of level */
out_be32(spider_pics[node] + TIR_MSK, 0x0);
/* disable edge detection clear */
/* out_be32(spider_pics[node] + TIR_EDC, 0x0); */
/* enable interrupt packets to be output */
out_be32(spider_pics[node] + TIR_PIEN,
in_be32(spider_pics[node] + TIR_PIEN) | 0x1);
/* Enable the interrupt detection enable bit. Do this last! */
out_be32(spider_pics[node] + TIR_DEN,
in_be32(spider_pics[node] + TIR_DEN) | 0x1);
int n, irq;
spider_pics[node] = ioremap(addr, 0x800);
if (spider_pics[node] == NULL)
panic("spider_pic: can't map registers !");
printk(KERN_INFO "spider_pic: mapped for node %d, addr: 0x%lx mapped to %p\n",
node, addr, spider_pics[node]);
for (n = 0; n < IIC_NUM_EXT; n++) {
if (n == IIC_EXT_CASCADE)
continue;
irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE;
set_irq_chip_and_handler(irq, &spider_pic, handle_level_irq);
get_irq_desc(irq)->status |= IRQ_LEVEL;
}
/* do not mask any interrupts because of level */
out_be32(spider_pics[node] + TIR_MSK, 0x0);
/* disable edge detection clear */
/* out_be32(spider_pics[node] + TIR_EDC, 0x0); */
/* enable interrupt packets to be output */
out_be32(spider_pics[node] + TIR_PIEN,
in_be32(spider_pics[node] + TIR_PIEN) | 0x1);
/* Hook up cascade */
irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE;
set_irq_data(irq, (void *)(long)node);
set_irq_chained_handler(irq, spider_irq_cascade);
/* Enable the interrupt detection enable bit. Do this last! */
out_be32(spider_pics[node] + TIR_DEN,
in_be32(spider_pics[node] + TIR_DEN) | 0x1);
}
void spider_init_IRQ(void)
void __init spider_init_IRQ(void)
{
long spider_reg;
unsigned long *spider_reg;
struct device_node *dn;
char *compatible;
int n, node = 0;
int node = 0;
/* XXX node numbers are totally bogus. We _hope_ we get the device nodes in the right
* order here but that's definitely not guaranteed, we need to get the node from the
* device tree instead. There is currently no proper property for it (but our whole
* device-tree is bogus anyway) so all we can do is pray or maybe test the address
* and deduce the node-id
*/
for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
compatible = (char *)get_property(dn, "compatible", NULL);
if (!compatible)
continue;
if (strstr(compatible, "CBEA,platform-spider-pic"))
spider_reg = *(long *)get_property(dn,"reg", NULL);
else if (strstr(compatible, "sti,platform-spider-pic")) {
spider_init_IRQ_hardcoded();
return;
if (strstr(compatible, "CBEA,platform-spider-pic"))
spider_reg = (unsigned long *)get_property(dn, "reg", NULL);
else if (strstr(compatible, "sti,platform-spider-pic") && (node < 2)) {
static long hard_coded_pics[] = { 0x24000008000, 0x34000008000 };
spider_reg = &hard_coded_pics[node];
} else
continue;
if (!spider_reg)
printk("interrupt controller does not have reg property !\n");
n = prom_n_addr_cells(dn);
if ( n != 2)
printk("reg property with invalid number of elements \n");
spider_pics[node] = ioremap(spider_reg, 0x800);
printk("SPIDER addr: %lx with %i addr_cells mapped to %p\n",
spider_reg, n, spider_pics[node]);
for (n = 0; n < IIC_NUM_EXT; n++) {
int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE;
get_irq_desc(irq)->chip = &spider_pic;
}
/* do not mask any interrupts because of level */
out_be32(spider_pics[node] + TIR_MSK, 0x0);
/* disable edge detection clear */
/* out_be32(spider_pics[node] + TIR_EDC, 0x0); */
/* enable interrupt packets to be output */
out_be32(spider_pics[node] + TIR_PIEN,
in_be32(spider_pics[node] + TIR_PIEN) | 0x1);
/* Enable the interrupt detection enable bit. Do this last! */
out_be32(spider_pics[node] + TIR_DEN,
in_be32(spider_pics[node] + TIR_DEN) | 0x1);
if (spider_reg == NULL)
printk(KERN_ERR "spider_pic: No address for node %d\n", node);
spider_init_one(node, *spider_reg);
node++;
}
}
......@@ -315,6 +315,21 @@ chrp_event_scan(unsigned long unused)
jiffies + event_scan_interval);
}
void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs)
{
unsigned int max = 100;
while(max--) {
int irq = i8259_irq(regs);
if (max == 99)
desc->chip->eoi(irq);
if (irq < 0)
break;
generic_handle_irq(irq, regs);
};
}
/*
* Finds the open-pic node and sets up the mpic driver.
*/
......@@ -402,7 +417,7 @@ static void __init chrp_find_openpic(void)
}
mpic_init(chrp_mpic);
mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
set_irq_chained_handler(NUM_ISA_INTERRUPTS, chrp_8259_cascade);
}
#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
......
......@@ -297,13 +297,13 @@ static void iseries_end_IRQ(unsigned int irq)
(REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
}
static hw_irq_controller iSeries_IRQ_handler = {
.typename = "iSeries irq controller",
.startup = iseries_startup_IRQ,
.shutdown = iseries_shutdown_IRQ,
.enable = iseries_enable_IRQ,
.disable = iseries_disable_IRQ,
.end = iseries_end_IRQ
static struct irq_chip iseries_pic = {
.typename = "iSeries irq controller",
.startup = iseries_startup_IRQ,
.shutdown = iseries_shutdown_IRQ,
.unmask = iseries_enable_IRQ,
.mask = iseries_disable_IRQ,
.eoi = iseries_end_IRQ
};
/*
......@@ -322,8 +322,7 @@ int __init iSeries_allocate_IRQ(HvBusNumber bus,
realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3)
+ function;
virtirq = virt_irq_create_mapping(realirq);
irq_desc[virtirq].chip = &iSeries_IRQ_handler;
set_irq_chip_and_handler(virtirq, &iseries_pic, handle_fasteoi_irq);
return virtirq;
}
......
This diff is collapsed.
......@@ -118,6 +118,21 @@ static void __init fwnmi_init(void)
fwnmi_active = 1;
}
void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs)
{
unsigned int max = 100;
while(max--) {
int cascade_irq = i8259_irq(regs);
if (max == 99)
desc->chip->eoi(irq);
if (cascade_irq < 0)
break;
generic_handle_irq(cascade_irq, regs);
};
}
static void __init pSeries_init_mpic(void)
{
unsigned int *addrp;
......@@ -140,7 +155,7 @@ static void __init pSeries_init_mpic(void)
i8259_init(intack, 0);
/* Hook cascade to mpic */
mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
set_irq_chained_handler(NUM_ISA_INTERRUPTS, pSeries_8259_cascade);
}
static void __init pSeries_setup_mpic(void)
......@@ -201,10 +216,8 @@ static void __init pSeries_setup_arch(void)
/* Allocate the mpic now, so that find_and_init_phbs() can
* fill the ISUs */
pSeries_setup_mpic();
} else {
} else
ppc_md.init_IRQ = xics_init_IRQ;
ppc_md.get_irq = xics_get_irq;
}
#ifdef CONFIG_SMP
smp_init_pSeries();
......@@ -291,10 +304,7 @@ static void pSeries_mach_cpu_die(void)
{
local_irq_disable();
idle_task_exit();
/* Some hardware requires clearing the CPPR, while other hardware does not
* it is safe either way
*/
pSeriesLP_cppr_info(0, 0);
xics_teardown_cpu(0);
rtas_stop_self();
/* Should never get here... */
BUG();
......
This diff is collapsed.
......@@ -14,13 +14,12 @@
#include <linux/cache.h>
void xics_init_IRQ(void);
int xics_get_irq(struct pt_regs *);
void xics_setup_cpu(void);
void xics_teardown_cpu(int secondary);
void xics_cause_IPI(int cpu);
void xics_request_IPIs(void);
void xics_migrate_irqs_away(void);
extern void xics_init_IRQ(void);
extern void xics_setup_cpu(void);
extern void xics_teardown_cpu(int secondary);
extern void xics_cause_IPI(int cpu);
extern void xics_request_IPIs(void);
extern void xics_migrate_irqs_away(void);
/* first argument is ignored for now*/
void pSeriesLP_cppr_info(int n_cpu, u8 value);
......@@ -31,4 +30,8 @@ struct xics_ipi_struct {
extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
struct irq_desc;
extern void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs);
#endif /* _POWERPC_KERNEL_XICS_H */
......@@ -69,11 +69,6 @@ int i8259_irq(struct pt_regs *regs)
return irq + i8259_pic_irq_offset;
}
int i8259_irq_cascade(struct pt_regs *regs, void *unused)
{
return i8259_irq(regs);
}
static void i8259_mask_and_ack_irq(unsigned int irq_nr)
{
unsigned long flags;
......@@ -129,19 +124,11 @@ static void i8259_unmask_irq(unsigned int irq_nr)
spin_unlock_irqrestore(&i8259_lock, flags);
}
static void i8259_end_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))
&& irq_desc[irq].action)
i8259_unmask_irq(irq);
}
struct hw_interrupt_type i8259_pic = {
.typename = " i8259 ",
.enable = i8259_unmask_irq,
.disable = i8259_mask_irq,
.ack = i8259_mask_and_ack_irq,
.end = i8259_end_irq,
static struct irq_chip i8259_pic = {
.typename = " i8259 ",
.mask = i8259_mask_irq,
.unmask = i8259_unmask_irq,
.mask_ack = i8259_mask_and_ack_irq,
};
static struct resource pic1_iores = {
......@@ -207,8 +194,11 @@ void __init i8259_init(unsigned long intack_addr, int offset)
spin_unlock_irqrestore(&i8259_lock, flags);
for (i = 0; i < NUM_ISA_INTERRUPTS; ++i)
irq_desc[offset + i].chip = &i8259_pic;
for (i = 0; i < NUM_ISA_INTERRUPTS; ++i) {
set_irq_chip_and_handler(offset + i, &i8259_pic,
handle_level_irq);
irq_desc[offset + i].status |= IRQ_LEVEL;
}
/* reserve our resources */
setup_irq(offset + 2, &i8259_irqaction);
......
......@@ -100,8 +100,8 @@ static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
if (mpic->flags & MPIC_PRIMARY)
cpu = hard_smp_processor_id();
return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg);
return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,
mpic->cpuregs[cpu], reg);
}
static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
......@@ -378,14 +378,14 @@ static inline u32 mpic_physmask(u32 cpumask)
/* Get the mpic structure from the IPI number */
static inline struct mpic * mpic_from_ipi(unsigned int ipi)
{
return container_of(irq_desc[ipi].chip, struct mpic, hc_ipi);
return irq_desc[ipi].chip_data;
}
#endif
/* Get the mpic structure from the irq number */
static inline struct mpic * mpic_from_irq(unsigned int irq)
{
return container_of(irq_desc[irq].chip, struct mpic, hc_irq);
return irq_desc[irq].chip_data;
}
/* Send an EOI */
......@@ -410,7 +410,7 @@ static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
*/
static void mpic_enable_irq(unsigned int irq)
static void mpic_unmask_irq(unsigned int irq)
{
unsigned int loops = 100000;
struct mpic *mpic = mpic_from_irq(irq);
......@@ -429,35 +429,9 @@ static void mpic_enable_irq(unsigned int irq)
break;
}
} while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
#ifdef CONFIG_MPIC_BROKEN_U3
if (mpic->flags & MPIC_BROKEN_U3) {
unsigned int src = irq - mpic->irq_offset;
if (mpic_is_ht_interrupt(mpic, src) &&
(irq_desc[irq].status & IRQ_LEVEL))
mpic_ht_end_irq(mpic, src);
}
#endif /* CONFIG_MPIC_BROKEN_U3 */
}
static unsigned int mpic_startup_irq(unsigned int irq)
{
#ifdef CONFIG_MPIC_BROKEN_U3
struct mpic *mpic = mpic_from_irq(irq);
unsigned int src = irq - mpic->irq_offset;
#endif /* CONFIG_MPIC_BROKEN_U3 */
mpic_enable_irq(irq);
#ifdef CONFIG_MPIC_BROKEN_U3
if (mpic_is_ht_interrupt(mpic, src))
mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
#endif /* CONFIG_MPIC_BROKEN_U3 */
return 0;
}
static void mpic_disable_irq(unsigned int irq)
static void mpic_mask_irq(unsigned int irq)
{
unsigned int loops = 100000;
struct mpic *mpic = mpic_from_irq(irq);
......@@ -478,23 +452,58 @@ static void mpic_disable_irq(unsigned int irq)
} while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
}
static void mpic_shutdown_irq(unsigned int irq)
static void mpic_end_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
#ifdef DEBUG_IRQ
DBG("%s: end_irq: %d\n", mpic->name, irq);
#endif
/* We always EOI on end_irq() even for edge interrupts since that
* should only lower the priority, the MPIC should have properly
* latched another edge interrupt coming in anyway
*/
mpic_eoi(mpic);
}
#ifdef CONFIG_MPIC_BROKEN_U3
static void mpic_unmask_ht_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
unsigned int src = irq - mpic->irq_offset;
if (mpic_is_ht_interrupt(mpic, src))
mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
mpic_unmask_irq(irq);
#endif /* CONFIG_MPIC_BROKEN_U3 */
if (irq_desc[irq].status & IRQ_LEVEL)
mpic_ht_end_irq(mpic, src);
}
static unsigned int mpic_startup_ht_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
unsigned int src = irq - mpic->irq_offset;
mpic_disable_irq(irq);
mpic_unmask_irq(irq);
mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
return 0;
}
static void mpic_end_irq(unsigned int irq)
static void mpic_shutdown_ht_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
unsigned int src = irq - mpic->irq_offset;
mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
mpic_mask_irq(irq);
}
static void mpic_end_ht_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
unsigned int src = irq - mpic->irq_offset;
#ifdef DEBUG_IRQ
DBG("%s: end_irq: %d\n", mpic->name, irq);
......@@ -504,21 +513,16 @@ static void mpic_end_irq(unsigned int irq)
* latched another edge interrupt coming in anyway
*/
#ifdef CONFIG_MPIC_BROKEN_U3
if (mpic->flags & MPIC_BROKEN_U3) {
unsigned int src = irq - mpic->irq_offset;
if (mpic_is_ht_interrupt(mpic, src) &&
(irq_desc[irq].status & IRQ_LEVEL))
mpic_ht_end_irq(mpic, src);
}
#endif /* CONFIG_MPIC_BROKEN_U3 */
if (irq_desc[irq].status & IRQ_LEVEL)
mpic_ht_end_irq(mpic, src);
mpic_eoi(mpic);
}
#endif /* CONFIG_MPIC_BROKEN_U3 */
#ifdef CONFIG_SMP
static void mpic_enable_ipi(unsigned int irq)
static void mpic_unmask_ipi(unsigned int irq)
{
struct mpic *mpic = mpic_from_ipi(irq);
unsigned int src = irq - mpic->ipi_offset;
......@@ -527,7 +531,7 @@ static void mpic_enable_ipi(unsigned int irq)
mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
}
static void mpic_disable_ipi(unsigned int irq)
static void mpic_mask_ipi(unsigned int irq)
{
/* NEVER disable an IPI... that's just plain wrong! */
}
......@@ -560,6 +564,30 @@ static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
mpic_physmask(cpus_addr(tmp)[0]));
}
static struct irq_chip mpic_irq_chip = {
.mask = mpic_mask_irq,
.unmask = mpic_unmask_irq,
.eoi = mpic_end_irq,
};
#ifdef CONFIG_SMP
static struct irq_chip mpic_ipi_chip = {
.mask = mpic_mask_ipi,
.unmask = mpic_unmask_ipi,
.eoi = mpic_end_ipi,
};
#endif /* CONFIG_SMP */
#ifdef CONFIG_MPIC_BROKEN_U3
static struct irq_chip mpic_irq_ht_chip = {
.startup = mpic_startup_ht_irq,
.shutdown = mpic_shutdown_ht_irq,
.mask = mpic_mask_irq,
.unmask = mpic_unmask_ht_irq,
.eoi = mpic_end_ht_irq,
};
#endif /* CONFIG_MPIC_BROKEN_U3 */
/*
* Exported functions
......@@ -589,19 +617,19 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
memset(mpic, 0, sizeof(struct mpic));
mpic->name = name;
mpic->hc_irq = mpic_irq_chip;
mpic->hc_irq.typename = name;
mpic->hc_irq.startup = mpic_startup_irq;
mpic->hc_irq.shutdown = mpic_shutdown_irq;
mpic->hc_irq.enable = mpic_enable_irq;
mpic->hc_irq.disable = mpic_disable_irq;
mpic->hc_irq.end = mpic_end_irq;
if (flags & MPIC_PRIMARY)
mpic->hc_irq.set_affinity = mpic_set_affinity;
#ifdef CONFIG_MPIC_BROKEN_U3
mpic->hc_ht_irq = mpic_irq_ht_chip;
mpic->hc_ht_irq.typename = name;
if (flags & MPIC_PRIMARY)
mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
#endif /* CONFIG_MPIC_BROKEN_U3 */
#ifdef CONFIG_SMP
mpic->hc_ipi.typename = name;
mpic->hc_ipi.enable = mpic_enable_ipi;
mpic->hc_ipi.disable = mpic_disable_ipi;
mpic->hc_ipi.end = mpic_end_ipi;
mpic->hc_ipi = mpic_ipi_chip;
#endif /* CONFIG_SMP */
mpic->flags = flags;
......@@ -697,28 +725,6 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
mpic->num_sources = isu_first + mpic->isu_size;
}
void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler,
void *data)
{
struct mpic *mpic = mpic_find(irq, NULL);
unsigned long flags;
/* Synchronization here is a bit dodgy, so don't try to replace cascade
* interrupts on the fly too often ... but normally it's set up at boot.
*/
spin_lock_irqsave(&mpic_lock, flags);
if (mpic->cascade)
mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset);
mpic->cascade = NULL;
wmb();
mpic->cascade_vec = irq - mpic->irq_offset;
mpic->cascade_data = data;
wmb();
mpic->cascade = handler;
mpic_enable_irq(irq);
spin_unlock_irqrestore(&mpic_lock, flags);
}
void __init mpic_init(struct mpic *mpic)
{
int i;
......@@ -750,8 +756,10 @@ void __init mpic_init(struct mpic *mpic)
#ifdef CONFIG_SMP
if (!(mpic->flags & MPIC_PRIMARY))
continue;
irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
irq_desc[mpic->ipi_offset+i].chip = &mpic->hc_ipi;
set_irq_chip_data(mpic->ipi_offset+i, mpic);
set_irq_chip_and_handler(mpic->ipi_offset+i,
&mpic->hc_ipi,
handle_percpu_irq);
#endif /* CONFIG_SMP */
}
......@@ -763,7 +771,7 @@ void __init mpic_init(struct mpic *mpic)
/* Do the HT PIC fixups on U3 broken mpic */
DBG("MPIC flags: %x\n", mpic->flags);
if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
mpic_scan_ht_pics(mpic);
mpic_scan_ht_pics(mpic);
#endif /* CONFIG_MPIC_BROKEN_U3 */
for (i = 0; i < mpic->num_sources; i++) {
......@@ -811,8 +819,17 @@ void __init mpic_init(struct mpic *mpic)
/* init linux descriptors */
if (i < mpic->irq_count) {
irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
irq_desc[mpic->irq_offset+i].chip = &mpic->hc_irq;
struct irq_chip *chip = &mpic->hc_irq;
irq_desc[mpic->irq_offset+i].status |=
level ? IRQ_LEVEL : 0;
#ifdef CONFIG_MPIC_BROKEN_U3
if (mpic_is_ht_interrupt(mpic, i))
chip = &mpic->hc_ht_irq;
#endif /* CONFIG_MPIC_BROKEN_U3 */
set_irq_chip_data(mpic->irq_offset+i, mpic);
set_irq_chip_and_handler(mpic->irq_offset+i, chip,
handle_fasteoi_irq);
}
}
......@@ -986,14 +1003,6 @@ int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
#ifdef DEBUG_LOW
DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
#endif
if (mpic->cascade && irq == mpic->cascade_vec) {
#ifdef DEBUG_LOW
DBG("%s: cascading ...\n", mpic->name);
#endif
irq = mpic->cascade(regs, mpic->cascade_data);
mpic_eoi(mpic);
return irq;
}
if (unlikely(irq == MPIC_VEC_SPURRIOUS))
return -1;
if (irq < MPIC_VEC_IPI_0) {
......
......@@ -4,11 +4,8 @@
#include <linux/irq.h>
extern struct hw_interrupt_type i8259_pic;
extern void i8259_init(unsigned long intack_addr, int offset);
extern int i8259_irq(struct pt_regs *regs);
extern int i8259_irq_cascade(struct pt_regs *regs, void *unused);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_I8259_H */
......@@ -514,9 +514,12 @@ extern u64 ppc64_interrupt_controller;
#endif
#ifndef CONFIG_PPC_MERGE
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
/* pedantic: these are long because they are used with set_bit --RR */
extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
#endif
extern atomic_t ppc_n_lost_interrupts;
#define virt_irq_create_mapping(x) (x)
......@@ -579,9 +582,8 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
extern void irq_ctx_init(void);
extern void call_do_softirq(struct thread_info *tp);
extern int call___do_IRQ(int irq, struct pt_regs *regs,
struct thread_info *tp);
extern int call_handle_irq(int irq, void *p1, void *p2,
struct thread_info *tp, void *func);
#else
#define irq_ctx_init()
......
......@@ -114,9 +114,6 @@
#define MPIC_VEC_TIMER_1 248
#define MPIC_VEC_TIMER_0 247
/* Type definition of the cascade handler */
typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data);
#ifdef CONFIG_MPIC_BROKEN_U3
/* Fixup table entry */
struct mpic_irq_fixup
......@@ -133,9 +130,12 @@ struct mpic_irq_fixup
struct mpic
{
/* The "linux" controller struct */
hw_irq_controller hc_irq;
struct irq_chip hc_irq;
#ifdef CONFIG_MPIC_BROKEN_U3
struct irq_chip hc_ht_irq;
#endif
#ifdef CONFIG_SMP
hw_irq_controller hc_ipi;
struct irq_chip hc_ipi;
#endif
const char *name;
/* Flags */
......@@ -153,10 +153,6 @@ struct mpic
unsigned int num_sources;
/* Number of CPUs */
unsigned int num_cpus;
/* cascade handler */
mpic_cascade_t cascade;
void *cascade_data;
unsigned int cascade_vec;
/* senses array */
unsigned char *senses;
unsigned int senses_count;
......@@ -237,17 +233,6 @@ extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
*/
extern void mpic_init(struct mpic *mpic);
/* Setup a cascade. Currently, only one cascade is supported this
* way, though you can always do a normal request_irq() and add
* other cascades this way. You should call this _after_ having
* added all the ISUs
*
* @irq_no: "linux" irq number of the cascade (that is offset'ed vector)
* @handler: cascade handler function
*/
extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder,
void *data);
/*
* All of the following functions must only be used after the
* ISUs have been assigned and the controller fully initialized
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment