Commit d31d4e52 authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] generic irq subsystem: x86 port

x86 port of generic hardirq handling.

akpm: (in response to build errors)

- remove APIC_MISMATCH_DEBUG altogether.  Just make it synonymous with
  CONFIG_X86_IO_APIC

- Move the definition of irq_mis_count over to io_apic.c
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 653853bc
...@@ -1194,6 +1194,13 @@ source "crypto/Kconfig" ...@@ -1194,6 +1194,13 @@ source "crypto/Kconfig"
source "lib/Kconfig" source "lib/Kconfig"
#
# Use the generic interrupt handling code in kernel/hardirq.c:
#
config GENERIC_HARDIRQS
bool
default y
config X86_SMP config X86_SMP
bool bool
depends on SMP && !X86_VOYAGER depends on SMP && !X86_VOYAGER
......
...@@ -47,6 +47,24 @@ int apic_verbosity; ...@@ -47,6 +47,24 @@ int apic_verbosity;
static void apic_pm_activate(void); static void apic_pm_activate(void);
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
printk("unexpected IRQ trap at vector %02x\n", irq);
/*
* Currently unexpected vectors happen only on SMP and APIC.
* We _must_ ack these because every local APIC has only N
* irq slots per priority level, and a 'hanging, unacked' IRQ
* holds up an irq slot - in excessive cases (when multiple
* unexpected vectors occur) that might lock up the APIC
* completely.
*/
ack_APIC_irq();
}
void __init apic_intr_init(void) void __init apic_intr_init(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -76,9 +76,6 @@ EXPORT_SYMBOL_GPL(kernel_fpu_begin); ...@@ -76,9 +76,6 @@ EXPORT_SYMBOL_GPL(kernel_fpu_begin);
EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(ioremap_nocache); EXPORT_SYMBOL(ioremap_nocache);
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync);
EXPORT_SYMBOL(probe_irq_mask); EXPORT_SYMBOL(probe_irq_mask);
EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(pm_idle); EXPORT_SYMBOL(pm_idle);
...@@ -146,7 +143,6 @@ EXPORT_SYMBOL(__write_lock_failed); ...@@ -146,7 +143,6 @@ EXPORT_SYMBOL(__write_lock_failed);
EXPORT_SYMBOL(__read_lock_failed); EXPORT_SYMBOL(__read_lock_failed);
/* Global SMP stuff */ /* Global SMP stuff */
EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
/* TLB flushing */ /* TLB flushing */
......
...@@ -42,6 +42,8 @@ ...@@ -42,6 +42,8 @@
#include "io_ports.h" #include "io_ports.h"
atomic_t irq_mis_count;
static spinlock_t ioapic_lock = SPIN_LOCK_UNLOCKED; static spinlock_t ioapic_lock = SPIN_LOCK_UNLOCKED;
/* /*
...@@ -255,8 +257,6 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask) ...@@ -255,8 +257,6 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
# define Dprintk(x...) # define Dprintk(x...)
# endif # endif
extern cpumask_t irq_affinity[NR_IRQS];
cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS]; cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS];
#define IRQBALANCE_CHECK_ARCH -999 #define IRQBALANCE_CHECK_ARCH -999
...@@ -1879,9 +1879,7 @@ static void end_level_ioapic_irq (unsigned int irq) ...@@ -1879,9 +1879,7 @@ static void end_level_ioapic_irq (unsigned int irq)
ack_APIC_irq(); ack_APIC_irq();
if (!(v & (1 << (i & 0x1f)))) { if (!(v & (1 << (i & 0x1f)))) {
#ifdef APIC_MISMATCH_DEBUG
atomic_inc(&irq_mis_count); atomic_inc(&irq_mis_count);
#endif
spin_lock(&ioapic_lock); spin_lock(&ioapic_lock);
__mask_and_edge_IO_APIC_irq(irq); __mask_and_edge_IO_APIC_irq(irq);
__unmask_and_level_IO_APIC_irq(irq); __unmask_and_level_IO_APIC_irq(irq);
......
This diff is collapsed.
...@@ -344,7 +344,7 @@ void release_thread(struct task_struct *dead_task) ...@@ -344,7 +344,7 @@ void release_thread(struct task_struct *dead_task)
} }
} }
release_x86_irqs(dead_task); release_vm86_irqs(dead_task);
} }
/* /*
......
...@@ -741,7 +741,7 @@ static inline void free_vm86_irq(int irqnumber) ...@@ -741,7 +741,7 @@ static inline void free_vm86_irq(int irqnumber)
spin_unlock_irqrestore(&irqbits_lock, flags); spin_unlock_irqrestore(&irqbits_lock, flags);
} }
void release_x86_irqs(struct task_struct *task) void release_vm86_irqs(struct task_struct *task)
{ {
int i; int i;
for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
......
...@@ -14,48 +14,6 @@ typedef struct { ...@@ -14,48 +14,6 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/* void ack_bad_irq(unsigned int irq);
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-23 are the hardirq count (max # of hardirqs: 256)
*
* - ( bit 26 is the PREEMPT_ACTIVE flag. )
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x00ff0000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 8
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
#define nmi_enter() (irq_enter())
#define nmi_exit() (preempt_count() -= HARDIRQ_OFFSET)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#endif /* __ASM_HARDIRQ_H */ #endif /* __ASM_HARDIRQ_H */
...@@ -45,8 +45,6 @@ asmlinkage void thermal_interrupt(struct pt_regs); ...@@ -45,8 +45,6 @@ asmlinkage void thermal_interrupt(struct pt_regs);
#define platform_legacy_irq(irq) ((irq) < 16) #define platform_legacy_irq(irq) ((irq) < 16)
#endif #endif
void mask_irq(unsigned int irq);
void unmask_irq(unsigned int irq);
void disable_8259A_irq(unsigned int irq); void disable_8259A_irq(unsigned int irq);
void enable_8259A_irq(unsigned int irq); void enable_8259A_irq(unsigned int irq);
int i8259A_irq_pending(unsigned int irq); int i8259A_irq_pending(unsigned int irq);
......
...@@ -53,8 +53,6 @@ static inline void end_edge_ioapic_irq (unsigned int irq) { } ...@@ -53,8 +53,6 @@ static inline void end_edge_ioapic_irq (unsigned int irq) { }
#define end_edge_ioapic end_edge_ioapic_irq #define end_edge_ioapic end_edge_ioapic_irq
#endif #endif
#define APIC_MISMATCH_DEBUG
#define IO_APIC_BASE(idx) \ #define IO_APIC_BASE(idx) \
((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \ ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \
+ (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK))) + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK)))
......
...@@ -21,38 +21,17 @@ static __inline__ int irq_canonicalize(int irq) ...@@ -21,38 +21,17 @@ static __inline__ int irq_canonicalize(int irq)
return ((irq == 2) ? 9 : irq); return ((irq == 2) ? 9 : irq);
} }
extern void disable_irq(unsigned int); extern void release_vm86_irqs(struct task_struct *);
extern void disable_irq_nosync(unsigned int);
extern void enable_irq(unsigned int);
extern void release_x86_irqs(struct task_struct *);
extern int can_request_irq(unsigned int, unsigned long flags);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ # define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
#endif #endif
#ifdef CONFIG_4KSTACKS #ifdef CONFIG_4KSTACKS
/* extern void irq_ctx_init(int cpu);
* per-CPU IRQ handling contexts (thread information and stack) # define __ARCH_HAS_DO_SOFTIRQ
*/
union irq_ctx {
struct thread_info tinfo;
u32 stack[THREAD_SIZE/sizeof(u32)];
};
extern union irq_ctx *hardirq_ctx[NR_CPUS];
extern union irq_ctx *softirq_ctx[NR_CPUS];
extern void irq_ctx_init(int cpu);
#define __ARCH_HAS_DO_SOFTIRQ
#else #else
#define irq_ctx_init(cpu) do { ; } while (0) # define irq_ctx_init(cpu) do { } while (0)
#endif #endif
struct irqaction;
struct pt_regs;
asmlinkage int handle_IRQ_event(unsigned int, struct pt_regs *,
struct irqaction *);
#endif /* _ASM_IRQ_H */ #endif /* _ASM_IRQ_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment