Commit 45640282 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: big IRQ lock removal

parent ea8ca97c
......@@ -165,6 +165,20 @@ setup_irq(unsigned int irq, struct irqaction * new)
return 0;
}
#ifdef CONFIG_SMP
inline void synchronize_irq(unsigned int irq)
{
while (irq_desc[irq].status & IRQ_INPROGRESS) {
barrier();
cpu_relax();
}
}
#endif /* CONFIG_SMP */
/* XXX Make this into free_irq() - Anton */
/* This could be promoted to a real free_irq() ... */
static int
do_free_irq(int irq, void* dev_id)
......@@ -192,11 +206,8 @@ do_free_irq(int irq, void* dev_id)
}
spin_unlock_irqrestore(&desc->lock,flags);
#ifdef CONFIG_SMP
/* Wait to make sure it's not being used on another CPU */
while (desc->status & IRQ_INPROGRESS)
barrier();
#endif
synchronize_irq(irq);
irq_kfree(action);
return 0;
}
......@@ -293,12 +304,7 @@ void free_irq(unsigned int irq, void *dev_id)
void disable_irq(unsigned int irq)
{
disable_irq_nosync(irq);
if (!local_irq_count(smp_processor_id())) {
do {
barrier();
} while (irq_desc[irq].status & IRQ_INPROGRESS);
}
synchronize_irq(irq);
}
/**
......@@ -568,7 +574,7 @@ int do_IRQ(struct pt_regs *regs)
struct ItLpQueue *lpq;
#endif
irq_enter(cpu);
irq_enter();
#ifdef CONFIG_PPC_ISERIES
lpaca = get_paca();
......@@ -599,7 +605,7 @@ int do_IRQ(struct pt_regs *regs)
ppc_spurious_interrupts++;
#endif
irq_exit(cpu);
irq_exit();
#ifdef CONFIG_PPC_ISERIES
if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
......@@ -643,160 +649,6 @@ void __init init_IRQ(void)
if(ppc_md.init_ras_IRQ) ppc_md.init_ras_IRQ();
}
#ifdef CONFIG_SMP
unsigned char global_irq_holder = NO_PROC_ID;
static void show(char * str)
{
int cpu = smp_processor_id();
int i;
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [ ", irqs_running());
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
}
printk("]\nbh: %d [ ",
(spin_is_locked(&global_bh_lock) ? 1 : 0));
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
printk("%u ", local_bh_count(i));
}
printk("]\n");
}
#define MAXCOUNT 10000000
void synchronize_irq(void)
{
if (irqs_running()) {
cli();
sti();
}
}
static inline void get_irqlock(int cpu)
{
int count;
if ((unsigned char)cpu == global_irq_holder)
return;
count = MAXCOUNT;
again:
br_write_lock(BR_GLOBALIRQ_LOCK);
for (;;) {
spinlock_t *lock;
if (!irqs_running() &&
(local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
break;
br_write_unlock(BR_GLOBALIRQ_LOCK);
lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
while (irqs_running() ||
spin_is_locked(lock) ||
(!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
if (!--count) {
show("get_irqlock");
count = (~0 >> 1);
}
__sti();
barrier();
__cli();
}
goto again;
}
global_irq_holder = cpu;
}
/*
* A global "cli()" while in an interrupt context
* turns into just a local cli(). Interrupts
* should use spinlocks for the (very unlikely)
* case that they ever want to protect against
* each other.
*
* If we already have local interrupts disabled,
* this will not turn a local disable into a
* global one (problems with spinlocks: this makes
* save_flags+cli+sti usable inside a spinlock).
*/
void __global_cli(void)
{
unsigned long flags;
__save_flags(flags);
if (flags & (1UL << 15)) {
int cpu = smp_processor_id();
__cli();
if (!local_irq_count(cpu))
get_irqlock(cpu);
}
}
void __global_sti(void)
{
int cpu = smp_processor_id();
if (!local_irq_count(cpu))
release_irqlock(cpu);
__sti();
}
/*
* SMP flags value to restore to:
* 0 - global cli
* 1 - global sti
* 2 - local cli
* 3 - local sti
*/
unsigned long __global_save_flags(void)
{
int retval;
int local_enabled;
unsigned long flags;
__save_flags(flags);
local_enabled = (flags >> 15) & 1;
/* default to local */
retval = 2 + local_enabled;
/* check for global flags if we're not in an interrupt */
if (!local_irq_count(smp_processor_id())) {
if (local_enabled)
retval = 1;
if (global_irq_holder == (unsigned char) smp_processor_id())
retval = 0;
}
return retval;
}
void __global_restore_flags(unsigned long flags)
{
switch (flags) {
case 0:
__global_cli();
break;
case 1:
__global_sti();
break;
case 2:
__cli();
break;
case 3:
__sti();
break;
default:
printk("global_restore_flags: %016lx caller %p\n",
flags, __builtin_return_address(0));
}
}
#endif /* CONFIG_SMP */
static struct proc_dir_entry * root_irq_dir;
static struct proc_dir_entry * irq_dir [NR_IRQS];
static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
......
......@@ -86,7 +86,6 @@ EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(synchronize_irq);
#endif /* CONFIG_SMP */
EXPORT_SYMBOL(register_ioctl32_conversion);
......@@ -211,10 +210,6 @@ EXPORT_SYMBOL(flush_icache_user_range);
EXPORT_SYMBOL(flush_icache_page);
EXPORT_SYMBOL(flush_dcache_page);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
#ifdef CONFIG_PPC_ISERIES
EXPORT_SYMBOL(__no_use_restore_flags);
EXPORT_SYMBOL(__no_use_save_flags);
......
......@@ -255,7 +255,7 @@ int timer_interrupt(struct pt_regs * regs)
unsigned long cpu = lpaca->xPacaIndex;
struct ItLpQueue * lpq;
irq_enter(cpu);
irq_enter();
#ifndef CONFIG_PPC_ISERIES
if (!user_mode(regs))
......@@ -291,7 +291,7 @@ int timer_interrupt(struct pt_regs * regs)
if (lpq && ItLpQueue_isLpIntPending(lpq))
lpEvent_count += ItLpQueue_process(lpq, regs);
irq_exit(cpu);
irq_exit();
if (softirq_pending(cpu))
do_softirq();
......
......@@ -3,9 +3,6 @@
#define __ASM_HARDIRQ_H
/*
* Use a brlock for the global irq lock, based on sparc64.
* Anton Blanchard <anton@au1.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
......@@ -13,88 +10,38 @@
*/
#include <linux/config.h>
#include <linux/brlock.h>
#include <linux/spinlock.h>
#include <asm/smp.h>
#include <linux/preempt.h>
typedef struct {
unsigned long __softirq_pending;
#ifndef CONFIG_SMP
unsigned int __local_irq_count;
#else
unsigned int __unused_on_SMP; /* We use brlocks on SMP */
#endif
unsigned int __local_bh_count;
unsigned int __syscall_count;
unsigned long idle_timestamp;
unsigned long __syscall_count;
struct task_struct * __ksoftirqd_task;
unsigned long idle_timestamp;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/* Note that local_irq_count() is replaced by ppc64 specific version for SMP */
#ifndef CONFIG_SMP
#define irq_enter(cpu) (local_irq_count(cpu)++)
#define irq_exit(cpu) (local_irq_count(cpu)--)
#else
#undef local_irq_count
#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
#define irq_enter(cpu) br_read_lock(BR_GLOBALIRQ_LOCK)
#define irq_exit(cpu) br_read_unlock(BR_GLOBALIRQ_LOCK)
#endif
#define IRQ_OFFSET 64
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
*/
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
/* This tests only the local processors hw IRQ context disposition. */
#define in_irq() (local_irq_count(smp_processor_id()) != 0)
#ifndef CONFIG_SMP
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
#define in_interrupt() \
((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
#define synchronize_irq() barrier()
#define release_irqlock(cpu) do { } while (0)
#define in_irq in_interrupt
#else /* CONFIG_SMP */
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
static __inline__ int irqs_running(void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
if (local_irq_count(i))
return 1;
return 0;
}
extern unsigned char global_irq_holder;
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore... */
if(global_irq_holder == (unsigned char) cpu) {
global_irq_holder = NO_PROC_ID;
br_write_unlock(BR_GLOBALIRQ_LOCK);
}
}
static inline int hardirq_trylock(int cpu)
{
spinlock_t *lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
return (!local_irq_count(cpu) && !spin_is_locked(lock));
}
#define hardirq_endlock(cpu) do { (void)(cpu); } while (0)
extern void synchronize_irq(void);
#define irq_enter() (preempt_count() += IRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
#else
extern void synchronize_irq(unsigned int irq);
#endif /* CONFIG_SMP */
#endif /* __KERNEL__ */
......
......@@ -18,13 +18,10 @@ extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
if (global_irq_holder == (cpu)) \
BUG(); \
} \
} while (0)
/*
......
......@@ -8,20 +8,28 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/preempt.h>
#include <asm/hardirq.h>
#define local_bh_disable() do { local_bh_count(smp_processor_id())++; barrier(); } while (0)
#define __local_bh_enable() do { barrier(); local_bh_count(smp_processor_id())--; } while (0)
#define local_bh_disable() \
do { preempt_count() += IRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \
do { barrier(); preempt_count() -= IRQ_OFFSET; } while (0)
#define local_bh_enable() \
do { \
barrier(); \
if (!--local_bh_count(smp_processor_id()) \
&& softirq_pending(smp_processor_id())) { \
if (unlikely((preempt_count() == IRQ_OFFSET) && \
softirq_pending(smp_processor_id()))) { \
__local_bh_enable(); \
do_softirq(); \
preempt_check_resched(); \
} else { \
__local_bh_enable(); \
preempt_check_resched(); \
} \
} while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#define in_softirq() in_interrupt()
#endif /* __ASM_SOFTIRQ_H */
......@@ -101,23 +101,15 @@ extern void dump_regs(struct pt_regs *);
#ifndef CONFIG_SMP
/*
* Compatibility macros - they will be removed after some time.
*/
#define cli() __cli()
#define sti() __sti()
#define save_flags(flags) __save_flags(flags)
#define restore_flags(flags) __restore_flags(flags)
#define save_and_cli(flags) __save_and_cli(flags)
#else /* CONFIG_SMP */
extern void __global_cli(void);
extern void __global_sti(void);
extern unsigned long __global_save_flags(void);
extern void __global_restore_flags(unsigned long);
#define cli() __global_cli()
#define sti() __global_sti()
#define save_flags(x) ((x)=__global_save_flags())
#define restore_flags(x) __global_restore_flags(x)
#endif /* !CONFIG_SMP */
#define local_irq_disable() __cli()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment