Commit 7559047b authored by David S. Miller's avatar David S. Miller

SPARC64: Fix cut at converting to new 2.5.x IRQ/BH scheme.

parent 1961c213
......@@ -566,151 +566,36 @@ void free_irq(unsigned int irq, void *dev_id)
}
#ifdef CONFIG_SMP
/* Who has the global irq brlock */
unsigned char global_irq_holder = NO_PROC_ID;
static void show(char * str)
void synchronize_irq(unsigned int irq)
{
int cpu = smp_processor_id();
int i;
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [ ", irqs_running());
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
}
printk("]\nbh: %d [ ",
(spin_is_locked(&global_bh_lock) ? 1 : 0));
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
printk("%u ", local_bh_count(i));
}
printk("]\n");
}
#define MAXCOUNT 100000000
struct ino_bucket *bucket = __bucket(irq);
#if 0
#define SYNC_OTHER_ULTRAS(x) udelay(x+1)
#else
#define SYNC_OTHER_ULTRAS(x) membar("#Sync");
#endif
void synchronize_irq(void)
{
if (irqs_running()) {
cli();
sti();
}
}
static inline void get_irqlock(int cpu)
{
int count;
if ((unsigned char)cpu == global_irq_holder)
return;
count = MAXCOUNT;
again:
br_write_lock(BR_GLOBALIRQ_LOCK);
for (;;) {
spinlock_t *lock;
if (!irqs_running() &&
(local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
break;
br_write_unlock(BR_GLOBALIRQ_LOCK);
lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
while (irqs_running() ||
spin_is_locked(lock) ||
(!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
if (!--count) {
show("get_irqlock");
count = (~0 >> 1);
}
local_irq_enable();
SYNC_OTHER_ULTRAS(cpu);
local_irq_disable();
/* The following is how I wish I could implement this.
* Unfortunately the ICLR registers are read-only, you can
* only write ICLR_foo values to them. To get the current
* IRQ status you would need to get at the IRQ diag registers
* in the PCI/SBUS controller and the layout of those vary
* from one controller to the next, sigh... -DaveM
*/
unsigned long iclr = bucket->iclr;
while (1) {
u32 tmp = upa_readl(iclr);
if (tmp == ICLR_TRANSMIT ||
tmp == ICLR_PENDING) {
cpu_relax();
continue;
}
goto again;
}
global_irq_holder = cpu;
}
void __global_cli(void)
{
unsigned long flags;
local_save_flags(flags);
if(flags == 0) {
int cpu;
local_irq_disable();
cpu = smp_processor_id();
if (! local_irq_count(cpu))
get_irqlock(cpu);
}
}
void __global_sti(void)
{
int cpu;
preempt_disable();
cpu = smp_processor_id();
if (! local_irq_count(cpu))
release_irqlock(cpu);
local_irq_enable();
preempt_enable();
}
unsigned long __global_save_flags(void)
{
unsigned long flags, local_enabled, retval;
local_save_flags(flags);
local_enabled = ((flags == 0) ? 1 : 0);
retval = 2 + local_enabled;
if (! local_irq_count(smp_processor_id())) {
if (local_enabled)
retval = 1;
if (global_irq_holder == (unsigned char) smp_processor_id())
retval = 0;
}
return retval;
}
void __global_restore_flags(unsigned long flags)
{
switch (flags) {
case 0:
__global_cli();
break;
case 1:
__global_sti();
break;
case 2:
local_irq_disable();
break;
case 3:
local_irq_enable();
break;
default:
{
unsigned long pc;
__asm__ __volatile__("mov %%i7, %0" : "=r" (pc));
printk("global_restore_flags: Bogon flags(%016lx) caller %016lx\n",
flags, pc);
}
}
#else
/* So we have to do this with a INPROGRESS bit just like x86. */
while (bucket->flags & IBF_INPROGRESS)
cpu_relax();
#endif
}
#endif /* CONFIG_SMP */
void catch_disabled_ivec(struct pt_regs *regs)
......@@ -831,7 +716,7 @@ void handler_irq(int irq, struct pt_regs *regs)
clear_softint(1 << irq);
#endif
irq_enter(cpu, irq);
irq_enter();
kstat.irqs[cpu][irq]++;
#ifdef CONFIG_PCI
......@@ -854,6 +739,8 @@ void handler_irq(int irq, struct pt_regs *regs)
nbp = __bucket(bp->irq_chain);
bp->irq_chain = 0;
bp->flags |= IBF_INPROGRESS;
if ((flags & IBF_ACTIVE) != 0) {
#ifdef CONFIG_PCI
if ((flags & IBF_DMA_SYNC) != 0) {
......@@ -891,8 +778,10 @@ void handler_irq(int irq, struct pt_regs *regs)
}
} else
bp->pending = 1;
bp->flags &= ~IBF_INPROGRESS;
}
irq_exit(cpu, irq);
irq_exit();
}
#ifdef CONFIG_BLK_DEV_FD
......@@ -904,16 +793,20 @@ void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
struct ino_bucket *bucket;
int cpu = smp_processor_id();
irq_enter(cpu, irq);
irq_enter();
kstat.irqs[cpu][irq]++;
*(irq_work(cpu, irq)) = 0;
bucket = get_ino_in_irqaction(action) + ivector_table;
bucket->flags |= IBF_INPROGRESS;
floppy_interrupt(irq, dev_cookie, regs);
upa_writel(ICLR_IDLE, bucket->iclr);
irq_exit(cpu, irq);
bucket->flags &= ~IBF_INPROGRESS;
irq_exit();
}
#endif
......
......@@ -107,7 +107,7 @@ int prom_callback(long *args)
* administrator has done a switch-cpu inside obp. In either
* case, the cpu is marked as in-interrupt. Drop IRQ locks.
*/
irq_exit(smp_processor_id(), 0);
irq_exit();
save_and_cli(flags);
spin_unlock(&prom_entry_lock);
cons = console_drivers;
......@@ -305,7 +305,7 @@ int prom_callback(long *args)
/*
* Restore in-interrupt status for a resume from obp.
*/
irq_enter(smp_processor_id(), 0);
irq_enter();
return 0;
}
......
......@@ -1049,12 +1049,12 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
regs->u_regs[UREG_RETPC]);
if (!--prof_counter(cpu)) {
if (cpu == boot_cpu_id) {
irq_enter(cpu, 0);
irq_enter();
kstat.irqs[cpu][0]++;
timer_tick_interrupt(regs);
irq_exit(cpu, 0);
irq_exit();
}
update_process_times(user);
......
......@@ -130,14 +130,9 @@ EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(kernel_flag);
/* Hard IRQ locking */
EXPORT_SYMBOL(global_irq_holder);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(synchronize_irq);
#endif
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
#if defined(CONFIG_MCOUNT)
extern void mcount(void);
......
......@@ -16,85 +16,32 @@
typedef struct {
unsigned int __softirq_pending;
unsigned int __unused_1;
#ifndef CONFIG_SMP
unsigned int __local_irq_count;
#else
unsigned int __unused_on_SMP; /* DaveM says use brlock for SMP irq. KAO */
#endif
unsigned int __local_bh_count;
#warning DaveM kill SMP irq brlock... no longer needed...
unsigned int __unused_2;
unsigned int __unused_3;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/* Note that local_irq_count() is replaced by sparc64 specific version for SMP */
#ifndef CONFIG_SMP
#define irq_enter(cpu, irq) ((void)(irq), local_irq_count(cpu)++)
#define irq_exit(cpu, irq) ((void)(irq), local_irq_count(cpu)--)
#else
#undef local_irq_count
#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
#define irq_enter(cpu, irq) br_read_lock(BR_GLOBALIRQ_LOCK)
#define irq_exit(cpu, irq) br_read_unlock(BR_GLOBALIRQ_LOCK)
#endif
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
*/
#define in_interrupt() ((local_irq_count(smp_processor_id()) + \
local_bh_count(smp_processor_id())) != 0)
/* This tests only the local processors hw IRQ context disposition. */
#define in_irq() (local_irq_count(smp_processor_id()) != 0)
#ifndef CONFIG_SMP
#define hardirq_trylock(cpu) ((void)(cpu), local_irq_count(smp_processor_id()) == 0)
#define hardirq_endlock(cpu) do { (void)(cpu); } while(0)
#define synchronize_irq() barrier()
#define release_irqlock(cpu) do { } while (0)
#else /* (CONFIG_SMP) */
static __inline__ int irqs_running(void)
{
int i;
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
if (local_irq_count(i))
return 1;
}
return 0;
}
#define IRQ_OFFSET 64
extern unsigned char global_irq_holder;
#define in_interrupt() \
((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
#define in_irq in_interrupt
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore... */
if(global_irq_holder == (unsigned char) cpu) {
global_irq_holder = NO_PROC_ID;
br_write_unlock(BR_GLOBALIRQ_LOCK);
}
}
static inline int hardirq_trylock(int cpu)
{
spinlock_t *lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
return (!local_irq_count(cpu) && !spin_is_locked(lock));
}
#define irq_enter() (preempt_count() += IRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
#define hardirq_endlock(cpu) do { (void)(cpu); } while (0)
extern void synchronize_irq(void);
#endif /* CONFIG_SMP */
#ifndef CONFIG_SMP
# define synchronize_irq() barrier()
#else
extern void synchronize_irq(void);
#endif
#endif /* !(__SPARC64_HARDIRQ_H) */
......@@ -91,6 +91,7 @@ extern unsigned char dma_sync_reg_table_entry;
#define IBF_PCI 0x02 /* Indicates PSYCHO/SABRE/SCHIZO PCI interrupt. */
#define IBF_ACTIVE 0x04 /* This interrupt is active and has a handler. */
#define IBF_MULTI 0x08 /* On PCI, indicates shared bucket. */
#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
#define NUM_IVECS 8192
extern struct ino_bucket ivector_table[NUM_IVECS];
......
......@@ -24,13 +24,10 @@ extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} \
} while (0)
/*
......
......@@ -6,21 +6,24 @@
#ifndef __SPARC64_SOFTIRQ_H
#define __SPARC64_SOFTIRQ_H
#include <asm/atomic.h>
#include <asm/preempt.h>
#include <asm/hardirq.h>
#include <asm/system.h> /* for membar() */
#define local_bh_disable() do { barrier(); preempt_disable(); local_bh_count(smp_processor_id())++; } while (0)
#define __local_bh_enable() do { local_bh_count(smp_processor_id())--; preempt_enable(); barrier(); } while (0)
#define local_bh_enable() \
do { if (!--local_bh_count(smp_processor_id()) && \
softirq_pending(smp_processor_id())) { \
do_softirq(); \
local_irq_enable(); \
} \
preempt_enable(); \
#define local_bh_disable() do { preempt_count() += IRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() do { barrier(); preempt_count() -= IRQ_OFFSET; } while (0)
#define local_bh_enable() \
do { if (unlikely((preempt_count() == IRQ_OFFSET) && \
softirq_pending(smp_processor_id())) { \
__local_bh_enable(); \
do_softirq(); \
preempt_check_resched(); \
} else { \
__local_bh_enable(); \
preempt_check_resched(); \
} \
} while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#define in_softirq() in_interrupt()
#endif /* !(__SPARC64_SOFTIRQ_H) */
......@@ -66,27 +66,14 @@ enum sparc_cpu {
#define local_irq_save(flags) ((flags) = read_pil_and_cli())
#define local_irq_restore(flags) setipl((flags))
/*
* Compatibility macros - they will be removed after some time.
*/
#ifndef CONFIG_SMP
#define cli() local_irq_disable()
#define sti() local_irq_enable()
#define save_flags(x) local_save_flags(x)
#define restore_flags(x) local_irq_restore(x)
#define save_and_cli(x) local_irq_save(x)
#else
#ifndef __ASSEMBLY__
extern void __global_cli(void);
extern void __global_sti(void);
extern unsigned long __global_save_flags(void);
extern void __global_restore_flags(unsigned long flags);
#endif
#define cli() __global_cli()
#define sti() __global_sti()
#define save_flags(x) ((x) = __global_save_flags())
#define restore_flags(flags) __global_restore_flags(flags)
#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
#endif
#define nop() __asm__ __volatile__ ("nop")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment