irq.c 24.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 *  arch/ppc/kernel/irq.c
 *
 *  Derived from arch/i386/kernel/irq.c
 *    Copyright (C) 1992 Linus Torvalds
 *  Adapted from arch/i386 by Gary Thomas
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *  Updated and modified by Cort Dougan (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Cort Dougan
 *  Adapted for Power Macintosh by Paul Mackerras
 *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
 *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
 * 
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 *
 * This file contains the code used by various IRQ handling routines:
 * asking for different IRQ's should be done through these routines
 * instead of just grabbing them. Thus setups with different IRQ numbers
 * shouldn't result in any weird surprises, and installing new handlers
 * should be easier.
 */

#include <linux/errno.h>
27
#include <linux/module.h>
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
#include <linux/threads.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/config.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
43
#include <linux/kallsyms.h>
44 45 46 47 48 49 50 51 52 53 54 55

#include <asm/uaccess.h>
#include <asm/bitops.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/cache.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/iSeries/LparData.h>
#include <asm/machdep.h>
56
#include <asm/paca.h>
57 58 59 60 61 62 63

#ifdef CONFIG_SMP
extern void iSeries_smp_message_recv( struct pt_regs * );
#endif

static void register_irq_proc (unsigned int irq);

64 65 66 67 68
irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
	[0 ... NR_IRQS-1] = {
		.lock = SPIN_LOCK_UNLOCKED
	}
};
69

Andrew Morton's avatar
Andrew Morton committed
70
int __irq_offset_value;
71 72 73 74 75 76 77 78 79
int ppc_spurious_interrupts = 0;
unsigned long lpEvent_count = 0;

int
setup_irq(unsigned int irq, struct irqaction * new)
{
	int shared = 0;
	unsigned long flags;
	struct irqaction *old, **p;
Andrew Morton's avatar
Andrew Morton committed
80
	irq_desc_t *desc = get_irq_desc(irq);
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122

	/*
	 * Some drivers like serial.c use request_irq() heavily,
	 * so we have to be careful not to interfere with a
	 * running system.
	 */
	if (new->flags & SA_SAMPLE_RANDOM) {
		/*
		 * This function might sleep, we want to call it first,
		 * outside of the atomic block.
		 * Yes, this might clear the entropy pool if the wrong
		 * driver is attempted to be loaded, without actually
		 * installing a new handler, but is this really a problem,
		 * only the sysadmin is able to do this.
		 */
		rand_initialize_irq(irq);
	}

	/*
	 * The following block of code has to be executed atomically
	 */
	spin_lock_irqsave(&desc->lock,flags);
	p = &desc->action;
	if ((old = *p) != NULL) {
		/* Can't share interrupts unless both agree to */
		if (!(old->flags & new->flags & SA_SHIRQ)) {
			spin_unlock_irqrestore(&desc->lock,flags);
			return -EBUSY;
		}

		/* add new interrupt at end of irq queue */
		do {
			p = &old->next;
			old = *p;
		} while (old);
		shared = 1;
	}

	*p = new;

	if (!shared) {
		desc->depth = 0;
123
		desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
124 125
		if (desc->handler && desc->handler->startup)
			desc->handler->startup(irq);
126 127 128 129 130 131 132 133
		unmask_irq(irq);
	}
	spin_unlock_irqrestore(&desc->lock,flags);

	register_irq_proc(irq);
	return 0;
}

134 135 136 137
#ifdef CONFIG_SMP

inline void synchronize_irq(unsigned int irq)
{
Andrew Morton's avatar
Andrew Morton committed
138
	while (get_irq_desc(irq)->status & IRQ_INPROGRESS)
139 140 141
		cpu_relax();
}

142 143
EXPORT_SYMBOL(synchronize_irq);

144 145 146 147
#endif /* CONFIG_SMP */

/* XXX Make this into free_irq() - Anton */

148 149 150 151
/* This could be promoted to a real free_irq() ... */
static int
do_free_irq(int irq, void* dev_id)
{
Andrew Morton's avatar
Andrew Morton committed
152
	irq_desc_t *desc = get_irq_desc(irq);
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
	struct irqaction **p;
	unsigned long flags;

	spin_lock_irqsave(&desc->lock,flags);
	p = &desc->action;
	for (;;) {
		struct irqaction * action = *p;
		if (action) {
			struct irqaction **pp = p;
			p = &action->next;
			if (action->dev_id != dev_id)
				continue;

			/* Found it - now remove it from the list of entries */
			*pp = action->next;
			if (!desc->action) {
				desc->status |= IRQ_DISABLED;
				mask_irq(irq);
			}
			spin_unlock_irqrestore(&desc->lock,flags);

			/* Wait to make sure it's not being used on another CPU */
175
			synchronize_irq(irq);
176
			kfree(action);
177 178 179 180 181 182 183 184 185
			return 0;
		}
		printk("Trying to free free IRQ%d\n",irq);
		spin_unlock_irqrestore(&desc->lock,flags);
		break;
	}
	return -ENOENT;
}

186

187 188
int request_irq(unsigned int irq,
	irqreturn_t (*handler)(int, void *, struct pt_regs *),
189 190 191 192 193 194 195 196 197 198
	unsigned long irqflags, const char * devname, void *dev_id)
{
	struct irqaction *action;
	int retval;

	if (irq >= NR_IRQS)
		return -EINVAL;
	if (!handler)
		/* We could implement really free_irq() instead of that... */
		return do_free_irq(irq, dev_id);
199

200
	action = (struct irqaction *)
201
		kmalloc(sizeof(struct irqaction), GFP_KERNEL);
202
	if (!action) {
203
		printk(KERN_ERR "kmalloc() failed for irq %d !\n", irq);
204 205
		return -ENOMEM;
	}
206

207
	action->handler = handler;
208
	action->flags = irqflags;
209
	cpus_clear(action->mask);
210 211 212
	action->name = devname;
	action->dev_id = dev_id;
	action->next = NULL;
213

214 215 216
	retval = setup_irq(irq, action);
	if (retval)
		kfree(action);
217

218 219 220
	return 0;
}

221 222
EXPORT_SYMBOL(request_irq);

223 224 225 226 227
void free_irq(unsigned int irq, void *dev_id)
{
	request_irq(irq, NULL, 0, NULL, dev_id);
}

228 229
EXPORT_SYMBOL(free_irq);

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
/*
 * Generic enable/disable code: this just calls
 * down into the PIC-specific version for the actual
 * hardware disable after having gotten the irq
 * controller lock. 
 */
 
/**
 *	disable_irq_nosync - disable an irq without waiting
 *	@irq: Interrupt to disable
 *
 *	Disable the selected interrupt line. Disables of an interrupt
 *	stack. Unlike disable_irq(), this function does not ensure existing
 *	instances of the IRQ handler have completed before returning.
 *
 *	This function may be called from IRQ context.
 */
 
248
inline void disable_irq_nosync(unsigned int irq)
249
{
Andrew Morton's avatar
Andrew Morton committed
250
	irq_desc_t *desc = get_irq_desc(irq);
251 252 253 254 255 256 257 258 259 260 261
	unsigned long flags;

	spin_lock_irqsave(&desc->lock, flags);
	if (!desc->depth++) {
		if (!(desc->status & IRQ_PER_CPU))
			desc->status |= IRQ_DISABLED;
		mask_irq(irq);
	}
	spin_unlock_irqrestore(&desc->lock, flags);
}

262 263
EXPORT_SYMBOL(disable_irq_nosync);

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
/**
 *	disable_irq - disable an irq and wait for completion
 *	@irq: Interrupt to disable
 *
 *	Disable the selected interrupt line. Disables of an interrupt
 *	stack. That is for two disables you need two enables. This
 *	function waits for any pending IRQ handlers for this interrupt
 *	to complete before returning. If you use this function while
 *	holding a resource the IRQ handler may need you will deadlock.
 *
 *	This function may be called - with care - from IRQ context.
 */
 
void disable_irq(unsigned int irq)
{
Andrew Morton's avatar
Andrew Morton committed
279
	irq_desc_t *desc = get_irq_desc(irq);
280
	disable_irq_nosync(irq);
281 282
	if (desc->action)
		synchronize_irq(irq);
283 284
}

285 286
EXPORT_SYMBOL(disable_irq);

287 288 289 290 291 292 293 294 295 296 297 298
/**
 *	enable_irq - enable interrupt handling on an irq
 *	@irq: Interrupt to enable
 *
 *	Re-enables the processing of interrupts on this IRQ line
 *	providing no disable_irq calls are now in effect.
 *
 *	This function may be called from IRQ context.
 */
 
void enable_irq(unsigned int irq)
{
Andrew Morton's avatar
Andrew Morton committed
299
	irq_desc_t *desc = get_irq_desc(irq);
300 301 302 303 304
	unsigned long flags;

	spin_lock_irqsave(&desc->lock, flags);
	switch (desc->depth) {
	case 1: {
305
		unsigned int status = desc->status & ~IRQ_DISABLED;
306 307 308 309 310 311 312 313 314 315 316 317
		desc->status = status;
		if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
			desc->status = status | IRQ_REPLAY;
			hw_resend_irq(desc->handler,irq);
		}
		unmask_irq(irq);
		/* fall-through */
	}
	default:
		desc->depth--;
		break;
	case 0:
318 319
		printk("enable_irq(%u) unbalanced from %p\n", irq,
		       __builtin_return_address(0));
320 321 322 323
	}
	spin_unlock_irqrestore(&desc->lock, flags);
}

324 325
EXPORT_SYMBOL(enable_irq);

326 327
int show_interrupts(struct seq_file *p, void *v)
{
328
	int i = *(loff_t *) v, j;
329
	struct irqaction * action;
Andrew Morton's avatar
Andrew Morton committed
330
	irq_desc_t *desc;
331
	unsigned long flags;
332

333 334 335 336 337 338 339
	if (i == 0) {
		seq_printf(p, "           ");
		for (j=0; j<NR_CPUS; j++) {
			if (cpu_online(j))
				seq_printf(p, "CPU%d       ",j);
		}
		seq_putc(p, '\n');
340
	}
341

342
	if (i < NR_IRQS) {
Andrew Morton's avatar
Andrew Morton committed
343 344 345
		desc = get_irq_desc(i);
		spin_lock_irqsave(&desc->lock, flags);
		action = desc->action;
346
		if (!action || !action->handler)
347
			goto skip;
348
		seq_printf(p, "%3d: ", i);
349
#ifdef CONFIG_SMP
350 351
		for (j = 0; j < NR_CPUS; j++) {
			if (cpu_online(j))
352
				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
353
		}
354
#else
355 356
		seq_printf(p, "%10u ", kstat_irqs(i));
#endif /* CONFIG_SMP */
Andrew Morton's avatar
Andrew Morton committed
357 358
		if (desc->handler)
			seq_printf(p, " %s ", desc->handler->typename );
359 360
		else
			seq_printf(p, "  None      ");
Andrew Morton's avatar
Andrew Morton committed
361
		seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge  ");
362 363 364 365
		seq_printf(p, "    %s",action->name);
		for (action=action->next; action; action = action->next)
			seq_printf(p, ", %s", action->name);
		seq_putc(p, '\n');
366
skip:
Andrew Morton's avatar
Andrew Morton committed
367
		spin_unlock_irqrestore(&desc->lock, flags);
368 369
	} else if (i == NR_IRQS)
		seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
370 371 372
	return 0;
}

373
int handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
374 375
{
	int status = 0;
376
	int retval = 0;
377 378

	if (!(action->flags & SA_INTERRUPT))
379
		local_irq_enable();
380 381 382

	do {
		status |= action->flags;
383
		retval |= action->handler(irq, action->dev_id, regs);
384 385 386 387
		action = action->next;
	} while (action);
	if (status & SA_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
388
	local_irq_disable();
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
	return retval;
}

static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
{
	struct irqaction *action;

	if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
		printk(KERN_ERR "irq event %d: bogus return value %x\n",
				irq, action_ret);
	} else {
		printk(KERN_ERR "irq %d: nobody cared!\n", irq);
	}
	dump_stack();
	printk(KERN_ERR "handlers:\n");
	action = desc->action;
	do {
		printk(KERN_ERR "[<%p>]", action->handler);
		print_symbol(" (%s)",
			(unsigned long)action->handler);
		printk("\n");
		action = action->next;
	} while (action);
}

static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
{
	static int count = 100;

	if (count) {
		count--;
		__report_bad_irq(irq, desc, action_ret);
	}
}

static int noirqdebug;

static int __init noirqdebug_setup(char *str)
{
	noirqdebug = 1;
	printk("IRQ lockup detection disabled\n");
	return 1;
}

__setup("noirqdebug", noirqdebug_setup);

/*
 * If 99,900 of the previous 100,000 interrupts have not been handled then
 * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
 * turn the IRQ off.
 *
 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
 *  device sharing an IRQ with the failing one)
 *
 * Called under desc->lock
 */
static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
{
	if (action_ret != IRQ_HANDLED) {
		desc->irqs_unhandled++;
		if (action_ret != IRQ_NONE)
			report_bad_irq(irq, desc, action_ret);
451
	}
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470

	desc->irq_count++;
	if (desc->irq_count < 100000)
		return;

	desc->irq_count = 0;
	if (desc->irqs_unhandled > 99900) {
		/*
		 * The interrupt is stuck
		 */
		__report_bad_irq(irq, desc, action_ret);
		/*
		 * Now kill the IRQ
		 */
		printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
		desc->status |= IRQ_DISABLED;
		desc->handler->disable(irq);
	}
	desc->irqs_unhandled = 0;
471 472 473 474 475 476 477 478 479 480 481
}

/*
 * Eventually, this should take an array of interrupts and an array size
 * so it can dispatch multiple interrupts.
 */
void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
{
	int status;
	struct irqaction *action;
	int cpu = smp_processor_id();
Andrew Morton's avatar
Andrew Morton committed
482
	irq_desc_t *desc = get_irq_desc(irq);
483
	irqreturn_t action_ret;
484 485 486
#ifdef CONFIG_IRQSTACKS
	struct thread_info *curtp, *irqtp;
#endif
487

488
	kstat_cpu(cpu).irqs[irq]++;
489 490 491 492 493 494 495 496 497

	if (desc->status & IRQ_PER_CPU) {
		/* no locking required for CPU-local interrupts: */
		ack_irq(irq);
		action_ret = handle_irq_event(irq, regs, desc->action);
		desc->handler->end(irq);
		return;
	}

498 499 500 501 502 503 504
	spin_lock(&desc->lock);
	ack_irq(irq);	
	/*
	   REPLAY is when Linux resends an IRQ that was dropped earlier
	   WAITING is used by probe to mark irqs that are being tested
	   */
	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
505
	status |= IRQ_PENDING; /* we _want_ to handle it */
506 507 508 509 510 511

	/*
	 * If the IRQ is disabled for whatever reason, we cannot
	 * use the action we have.
	 */
	action = NULL;
512
	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
		action = desc->action;
		if (!action || !action->handler) {
			ppc_spurious_interrupts++;
			printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
			/* We can't call disable_irq here, it would deadlock */
			if (!desc->depth)
				desc->depth = 1;
			desc->status |= IRQ_DISABLED;
			/* This is not a real spurrious interrupt, we
			 * have to eoi it, so we jump to out
			 */
			mask_irq(irq);
			goto out;
		}
		status &= ~IRQ_PENDING; /* we commit to handling */
528
		status |= IRQ_INPROGRESS; /* we are handling it */
529 530 531 532 533 534 535 536 537
	}
	desc->status = status;

	/*
	 * If there is no IRQ handler or it was disabled, exit early.
	   Since we set PENDING, if another processor is handling
	   a different instance of this same irq, the other processor
	   will take care of it.
	 */
538
	if (unlikely(!action))
539 540 541 542 543 544 545 546 547 548 549 550 551 552
		goto out;

	/*
	 * Edge triggered interrupts need to remember
	 * pending events.
	 * This applies to any hw interrupts that allow a second
	 * instance of the same irq to arrive while we are in do_IRQ
	 * or in the handler. But the code here only handles the _second_
	 * instance of the irq, not the third or fourth. So it is mostly
	 * useful for irq hardware that does not mask cleanly in an
	 * SMP environment.
	 */
	for (;;) {
		spin_unlock(&desc->lock);
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568

#ifdef CONFIG_IRQSTACKS
		/* Switch to the irq stack to handle this */
		curtp = current_thread_info();
		irqtp = hardirq_ctx[smp_processor_id()];
		if (curtp != irqtp) {
			irqtp->task = curtp->task;
			irqtp->flags = 0;
			action_ret = call_handle_irq_event(irq, regs, action, irqtp);
			irqtp->task = NULL;
			if (irqtp->flags)
				set_bits(irqtp->flags, &curtp->flags);
		} else
#endif
			action_ret = handle_irq_event(irq, regs, action);

569
		spin_lock(&desc->lock);
570 571
		if (!noirqdebug)
			note_interrupt(irq, desc, action_ret);
572
		if (likely(!(desc->status & IRQ_PENDING)))
573 574 575 576
			break;
		desc->status &= ~IRQ_PENDING;
	}
out:
577
	desc->status &= ~IRQ_INPROGRESS;
578 579 580 581
	/*
	 * The ->end() handler has to deal with interrupts which got
	 * disabled while the handler was running.
	 */
Andrew Morton's avatar
Andrew Morton committed
582 583 584 585 586
	if (desc->handler) {
		if (desc->handler->end)
			desc->handler->end(irq);
		else if (desc->handler->enable)
			desc->handler->enable(irq);
587 588 589 590
	}
	spin_unlock(&desc->lock);
}

591
#ifdef CONFIG_PPC_ISERIES
592
int do_IRQ(struct pt_regs *regs)
593
{
594
	struct paca_struct *lpaca;
595
	struct ItLpQueue *lpq;
596

597
	irq_enter();
598

599
#ifdef CONFIG_DEBUG_STACKOVERFLOW
600
	/* Debugging check for stack overflow: is there less than 4KB free? */
601 602 603
	{
		long sp;

604
		sp = __get_SP() & (THREAD_SIZE-1);
605

606
		if (unlikely(sp < (sizeof(struct thread_info) + 4096))) {
607 608 609 610 611 612 613
			printk("do_IRQ: stack overflow: %ld\n",
				sp - sizeof(struct thread_info));
			dump_stack();
		}
	}
#endif

614
	lpaca = get_paca();
615
#ifdef CONFIG_SMP
616 617
	if (lpaca->xLpPaca.xIntDword.xFields.xIpiCnt) {
		lpaca->xLpPaca.xIntDword.xFields.xIpiCnt = 0;
618 619
		iSeries_smp_message_recv(regs);
	}
620
#endif /* CONFIG_SMP */
621
	lpq = lpaca->lpQueuePtr;
622 623
	if (lpq && ItLpQueue_isLpIntPending(lpq))
		lpEvent_count += ItLpQueue_process(lpq, regs);
624 625 626 627 628 629 630 631 632 633 634

	irq_exit();

	if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
		lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
		/* Signal a fake decrementer interrupt */
		timer_interrupt(regs);
	}

	return 1; /* lets ret_from_int know we can do checks */
}
635

636
#else	/* CONFIG_PPC_ISERIES */
637

638 639 640 641 642 643
int do_IRQ(struct pt_regs *regs)
{
	int irq, first = 1;

	irq_enter();

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
#ifdef CONFIG_DEBUG_STACKOVERFLOW
	/* Debugging check for stack overflow: is there less than 4KB free? */
	{
		long sp;

		sp = __get_SP() & (THREAD_SIZE-1);

		if (unlikely(sp < (sizeof(struct thread_info) + 4096))) {
			printk("do_IRQ: stack overflow: %ld\n",
				sp - sizeof(struct thread_info));
			dump_stack();
		}
	}
#endif

659 660 661 662 663 664 665 666 667 668 669
	/*
	 * Every arch is required to implement ppc_md.get_irq.
	 * This function will either return an irq number or -1 to
	 * indicate there are no more pending.  But the first time
	 * through the loop this means there wasn't an IRQ pending.
	 * The value -2 is for buggy hardware and means that this IRQ
	 * has already been handled. -- Tom
	 */
	while ((irq = ppc_md.get_irq(regs)) >= 0) {
		ppc_irq_dispatch_handler(regs, irq);
		first = 0;
670
	}
671 672 673 674
	if (irq != -2 && first)
		/* That's not SMP safe ... but who cares ? */
		ppc_spurious_interrupts++;

Anton Blanchard's avatar
Anton Blanchard committed
675
	irq_exit();
676 677 678

	return 1; /* lets ret_from_int know we can do checks */
}
679
#endif	/* CONFIG_PPC_ISERIES */
680 681 682 683 684 685

unsigned long probe_irq_on (void)
{
	return 0;
}

686 687
EXPORT_SYMBOL(probe_irq_on);

688 689 690 691 692
int probe_irq_off (unsigned long irqs)
{
	return 0;
}

693 694
EXPORT_SYMBOL(probe_irq_off);

695 696 697 698 699 700 701 702 703
unsigned int probe_irq_mask(unsigned long irqs)
{
	return 0;
}

void __init init_IRQ(void)
{
	static int once = 0;

704
	if (once)
705
		return;
706 707

	once++;
708

709
	ppc_md.init_IRQ();
710
	irq_ctx_init();
711 712 713 714 715 716
}

static struct proc_dir_entry * root_irq_dir;
static struct proc_dir_entry * irq_dir [NR_IRQS];
static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];

Andrew Morton's avatar
Andrew Morton committed
717
/* Protected by get_irq_desc(irq)->lock. */
718
#ifdef CONFIG_IRQ_ALL_CPUS
719
cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
720
#else  /* CONFIG_IRQ_ALL_CPUS */
721
cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_NONE };
722 723 724 725 726
#endif /* CONFIG_IRQ_ALL_CPUS */

static int irq_affinity_read_proc (char *page, char **start, off_t off,
			int count, int *eof, void *data)
{
727
	int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
728
	if (count - len < 2)
729
		return -EINVAL;
730
	len += sprintf(page + len, "\n");
731
	return len;
732 733
}

734
static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
735 736
					unsigned long count, void *data)
{
Andrew Morton's avatar
Andrew Morton committed
737 738
	unsigned int irq = (long)data;
	irq_desc_t *desc = get_irq_desc(irq);
739
	int ret;
740
	cpumask_t new_value, tmp;
741

Andrew Morton's avatar
Andrew Morton committed
742
	if (!desc->handler->set_affinity)
743 744
		return -EIO;

745 746 747
	ret = cpumask_parse(buffer, count, new_value);
	if (ret != 0)
		return ret;
748

749 750 751 752 753 754
	/*
	 * We check for CPU_MASK_ALL in xics to send irqs to all cpus.
	 * In some cases CPU_MASK_ALL is smaller than the cpumask (eg
	 * NR_CPUS == 32 and cpumask is a long), so we mask it here to
	 * be consistent.
	 */
755
	cpus_and(new_value, new_value, CPU_MASK_ALL);
756

757 758 759 760
	/*
	 * Grab lock here so cpu_online_map can't change, and also
	 * protect irq_affinity[].
	 */
Andrew Morton's avatar
Andrew Morton committed
761
	spin_lock(&desc->lock);
762

763 764 765 766 767
	/*
	 * Do not allow disabling IRQs completely - it's a too easy
	 * way to make the system unusable accidentally :-) At least
	 * one online CPU still has to be targeted.
	 */
768
	cpus_and(tmp, new_value, cpu_online_map);
769 770 771 772
	if (cpus_empty(tmp)) {
		ret = -EINVAL;
		goto out;
	}
773 774

	irq_affinity[irq] = new_value;
Andrew Morton's avatar
Andrew Morton committed
775
	desc->handler->set_affinity(irq, new_value);
776
	ret = count;
777

778
out:
Andrew Morton's avatar
Andrew Morton committed
779
	spin_unlock(&desc->lock);
780
	return ret;
781 782 783 784 785
}

static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
			int count, int *eof, void *data)
{
786
	int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
787
	if (count - len < 2)
788
		return -EINVAL;
789 790
	len += sprintf(page + len, "\n");
	return len;
791 792
}

793
static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
794 795
					unsigned long count, void *data)
{
796 797 798
	cpumask_t *mask = (cpumask_t *)data;
	unsigned long full_count = count, err;
	cpumask_t new_value;
799

800
	err = cpumask_parse(buffer, count, new_value);
801 802 803 804 805 806 807 808
	if (err)
		return err;

	*mask = new_value;

#ifdef CONFIG_PPC_ISERIES
	{
		unsigned i;
809
		for (i=0; i<NR_CPUS; ++i) {
810
			if ( paca[i].prof_buffer && cpu_isset(i, new_value) )
811
				paca[i].prof_enabled = 1;
812
			else
813
				paca[i].prof_enabled = 0;
814 815 816 817 818 819 820 821 822 823 824 825 826 827
		}
	}
#endif

	return full_count;
}

#define MAX_NAMELEN 10

static void register_irq_proc (unsigned int irq)
{
	struct proc_dir_entry *entry;
	char name [MAX_NAMELEN];

828
	if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
829 830 831 832 833 834 835 836 837 838 839
		return;

	memset(name, 0, MAX_NAMELEN);
	sprintf(name, "%d", irq);

	/* create /proc/irq/1234 */
	irq_dir[irq] = proc_mkdir(name, root_irq_dir);

	/* create /proc/irq/1234/smp_affinity */
	entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);

840 841 842 843 844 845
	if (entry) {
		entry->nlink = 1;
		entry->data = (void *)(long)irq;
		entry->read_proc = irq_affinity_read_proc;
		entry->write_proc = irq_affinity_write_proc;
	}
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862

	smp_affinity_entry[irq] = entry;
}

unsigned long prof_cpu_mask = -1;

void init_irq_proc (void)
{
	struct proc_dir_entry *entry;
	int i;

	/* create /proc/irq */
	root_irq_dir = proc_mkdir("irq", 0);

	/* create /proc/irq/prof_cpu_mask */
	entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);

863 864 865
	if (!entry)
		return;

866 867 868 869 870 871 872 873
	entry->nlink = 1;
	entry->data = (void *)&prof_cpu_mask;
	entry->read_proc = prof_cpu_mask_read_proc;
	entry->write_proc = prof_cpu_mask_write_proc;

	/*
	 * Create entries for all existing IRQs.
	 */
Andrew Morton's avatar
Andrew Morton committed
874 875
	for_each_irq(i) {
		if (get_irq_desc(i)->handler == NULL)
876 877 878 879 880
			continue;
		register_irq_proc(i);
	}
}

881
irqreturn_t no_action(int irq, void *dev, struct pt_regs *regs)
882
{
883
	return IRQ_NONE;
884
}
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902

#ifndef CONFIG_PPC_ISERIES
/*
 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
 */

#define UNDEFINED_IRQ 0xffffffff
unsigned int virt_irq_to_real_map[NR_IRQS];

/*
 * Don't use virtual irqs 0, 1, 2 for devices.
 * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
 * and 2 is the XICS IPI interrupt.
 * We limit virtual irqs to 17 less than NR_IRQS so that when we
 * offset them by 16 (to reserve the first 16 for ISA interrupts)
 * we don't end up with an interrupt number >= NR_IRQS.
 */
#define MIN_VIRT_IRQ	3
Andrew Morton's avatar
Andrew Morton committed
903
#define MAX_VIRT_IRQ	(NR_IRQS - NUM_ISA_INTERRUPTS - 1)
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
#define NR_VIRT_IRQS	(MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1)

void
virt_irq_init(void)
{
	int i;
	for (i = 0; i < NR_IRQS; i++)
		virt_irq_to_real_map[i] = UNDEFINED_IRQ;
}

/* Create a mapping for a real_irq if it doesn't already exist.
 * Return the virtual irq as a convenience.
 */
int virt_irq_create_mapping(unsigned int real_irq)
{
	unsigned int virq, first_virq;
	static int warned;

	if (naca->interrupt_controller == IC_OPEN_PIC)
		return real_irq;	/* no mapping for openpic (for now) */

	/* don't map interrupts < MIN_VIRT_IRQ */
	if (real_irq < MIN_VIRT_IRQ) {
		virt_irq_to_real_map[real_irq] = real_irq;
		return real_irq;
	}

	/* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */
	virq = real_irq;
	if (virq > MAX_VIRT_IRQ)
		virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;

	/* search for this number or a free slot */
	first_virq = virq;
	while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
		if (virt_irq_to_real_map[virq] == real_irq)
			return virq;
		if (++virq > MAX_VIRT_IRQ)
			virq = MIN_VIRT_IRQ;
		if (virq == first_virq)
			goto nospace;	/* oops, no free slots */
	}

	virt_irq_to_real_map[virq] = real_irq;
	return virq;

 nospace:
	if (!warned) {
		printk(KERN_CRIT "Interrupt table is full\n");
		printk(KERN_CRIT "Increase NR_IRQS (currently %d) "
		       "in your kernel sources and rebuild.\n", NR_IRQS);
		warned = 1;
	}
	return NO_IRQ;
}

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
/*
 * In most cases will get a hit on the very first slot checked in the
 * virt_irq_to_real_map.  Only when there are a large number of
 * IRQs will this be expensive.
 */
unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
{
	unsigned int virq;
	unsigned int first_virq;

	virq = real_irq;

	if (virq > MAX_VIRT_IRQ)
		virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;

	first_virq = virq;

	do {
		if (virt_irq_to_real_map[virq] == real_irq)
			return virq;

		virq++;

		if (virq >= MAX_VIRT_IRQ)
			virq = 0;

	} while (first_virq != virq);

	return NO_IRQ;

}

992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
#endif /* CONFIG_PPC_ISERIES */

#ifdef CONFIG_IRQSTACKS
struct thread_info *softirq_ctx[NR_CPUS];
struct thread_info *hardirq_ctx[NR_CPUS];

void irq_ctx_init(void)
{
	struct thread_info *tp;
	int i;

	for (i = 0; i < NR_CPUS; i++) {
		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
		tp = softirq_ctx[i];
		tp->cpu = i;
		tp->preempt_count = SOFTIRQ_OFFSET;

		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
		tp = hardirq_ctx[i];
		tp->cpu = i;
		tp->preempt_count = HARDIRQ_OFFSET;
	}
}

void do_softirq(void)
{
	unsigned long flags;
	struct thread_info *curtp, *irqtp;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	if (local_softirq_pending()) {
		curtp = current_thread_info();
		irqtp = softirq_ctx[smp_processor_id()];
		irqtp->task = curtp->task;
		call_do_softirq(irqtp);
		irqtp->task = NULL;
	}

	local_irq_restore(flags);
}
EXPORT_SYMBOL(do_softirq);

#endif /* CONFIG_IRQSTACKS */