Commit b457c5cd authored by David S. Miller's avatar David S. Miller

Merge bk://kernel.bkbits.net/wesolows/sparc32-2.6

into nuts.davemloft.net:/disk1/BK/sparc-2.6
parents 6e9aed30 1d0b87e8
...@@ -49,9 +49,6 @@ ...@@ -49,9 +49,6 @@
#include <asm/pcic.h> #include <asm/pcic.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
/* Used to protect the IRQ action lists */
spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define SMP_NOP2 "nop; nop;\n\t" #define SMP_NOP2 "nop; nop;\n\t"
#define SMP_NOP3 "nop; nop; nop;\n\t" #define SMP_NOP3 "nop; nop; nop;\n\t"
...@@ -159,10 +156,12 @@ struct irqaction static_irqaction[MAX_STATIC_ALLOC]; ...@@ -159,10 +156,12 @@ struct irqaction static_irqaction[MAX_STATIC_ALLOC];
int static_irq_count; int static_irq_count;
struct irqaction *irq_action[NR_IRQS] = { struct irqaction *irq_action[NR_IRQS] = {
NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, [0 ... (NR_IRQS-1)] = NULL
NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
}; };
/* Used to protect the IRQ action lists */
spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED;
int show_interrupts(struct seq_file *p, void *v) int show_interrupts(struct seq_file *p, void *v)
{ {
int i = *(loff_t *) v; int i = *(loff_t *) v;
...@@ -177,11 +176,11 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -177,11 +176,11 @@ int show_interrupts(struct seq_file *p, void *v)
return show_sun4d_interrupts(p, v); return show_sun4d_interrupts(p, v);
} }
spin_lock_irqsave(&irq_action_lock, flags);
if (i < NR_IRQS) { if (i < NR_IRQS) {
local_irq_save(flags);
action = *(i + irq_action); action = *(i + irq_action);
if (!action) if (!action)
goto skip; goto out_unlock;
seq_printf(p, "%3d: ", i); seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i)); seq_printf(p, "%10u ", kstat_irqs(i));
...@@ -201,9 +200,9 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -201,9 +200,9 @@ int show_interrupts(struct seq_file *p, void *v)
action->name); action->name);
} }
seq_putc(p, '\n'); seq_putc(p, '\n');
skip:
local_irq_restore(flags);
} }
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
return 0; return 0;
} }
...@@ -220,14 +219,18 @@ void free_irq(unsigned int irq, void *dev_id) ...@@ -220,14 +219,18 @@ void free_irq(unsigned int irq, void *dev_id)
return sun4d_free_irq(irq, dev_id); return sun4d_free_irq(irq, dev_id);
} }
cpu_irq = irq & (NR_IRQS - 1); cpu_irq = irq & (NR_IRQS - 1);
action = *(cpu_irq + irq_action);
if (cpu_irq > 14) { /* 14 irq levels on the sparc */ if (cpu_irq > 14) { /* 14 irq levels on the sparc */
printk("Trying to free bogus IRQ %d\n", irq); printk("Trying to free bogus IRQ %d\n", irq);
return; return;
} }
spin_lock_irqsave(&irq_action_lock, flags);
action = *(cpu_irq + irq_action);
if (!action->handler) { if (!action->handler) {
printk("Trying to free free IRQ%d\n",irq); printk("Trying to free free IRQ%d\n",irq);
return; goto out_unlock;
} }
if (dev_id) { if (dev_id) {
for (; action; action = action->next) { for (; action; action = action->next) {
...@@ -237,11 +240,11 @@ void free_irq(unsigned int irq, void *dev_id) ...@@ -237,11 +240,11 @@ void free_irq(unsigned int irq, void *dev_id)
} }
if (!action) { if (!action) {
printk("Trying to free free shared IRQ%d\n",irq); printk("Trying to free free shared IRQ%d\n",irq);
return; goto out_unlock;
} }
} else if (action->flags & SA_SHIRQ) { } else if (action->flags & SA_SHIRQ) {
printk("Trying to free shared IRQ%d with NULL device ID\n", irq); printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
return; goto out_unlock;
} }
if (action->flags & SA_STATIC_ALLOC) if (action->flags & SA_STATIC_ALLOC)
{ {
...@@ -250,68 +253,31 @@ void free_irq(unsigned int irq, void *dev_id) ...@@ -250,68 +253,31 @@ void free_irq(unsigned int irq, void *dev_id)
*/ */
printk("Attempt to free statically allocated IRQ%d (%s)\n", printk("Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name); irq, action->name);
return; goto out_unlock;
} }
save_and_cli(flags);
if (action && tmp) if (action && tmp)
tmp->next = action->next; tmp->next = action->next;
else else
*(cpu_irq + irq_action) = action->next; *(cpu_irq + irq_action) = action->next;
spin_unlock_irqrestore(&irq_action_lock, flags);
synchronize_irq(irq);
spin_lock_irqsave(&irq_action_lock, flags);
kfree(action); kfree(action);
if (!(*(cpu_irq + irq_action))) if (!(*(cpu_irq + irq_action)))
disable_irq(irq); disable_irq(irq);
restore_flags(flags); out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
} }
EXPORT_SYMBOL(free_irq); EXPORT_SYMBOL(free_irq);
#ifdef CONFIG_SMP
/* Who has the global irq brlock */
unsigned char global_irq_holder = NO_PROC_ID;
void smp_show_backtrace_all_cpus(void);
void show_backtrace(void);
#define VERBOSE_DEBUG_IRQLOCK
#define MAXCOUNT 100000000
static void show(char * str)
{
int cpu = smp_processor_id();
int i;
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [ ", irqs_running());
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
}
printk("]\nbh: %d [ ",
(spin_is_locked(&global_bh_lock) ? 1 : 0));
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
printk("%u ", local_bh_count(i));
}
printk("]\n");
#ifdef VERBOSE_DEBUG_IRQLOCK
smp_show_backtrace_all_cpus();
#else
show_backtrace();
#endif
}
/*
* We have to allow irqs to arrive between local_irq_enable and local_irq_disable
*/
#define SYNC_OTHER_CORES(x) barrier()
/* /*
* This is called when we want to synchronize with * This is called when we want to synchronize with
* interrupts. We may for example tell a device to * interrupts. We may for example tell a device to
...@@ -319,140 +285,13 @@ static void show(char * str) ...@@ -319,140 +285,13 @@ static void show(char * str)
* are no interrupts that are executing on another * are no interrupts that are executing on another
* CPU we need to call this function. * CPU we need to call this function.
*/ */
void synchronize_irq(void) #ifdef CONFIG_SMP
{ void synchronize_irq(unsigned int irq)
if (irqs_running()) {
cli();
sti();
}
}
static inline void get_irqlock(int cpu)
{
int count;
if ((unsigned char)cpu == global_irq_holder)
return;
count = MAXCOUNT;
again:
br_write_lock(BR_GLOBALIRQ_LOCK);
for (;;) {
spinlock_t *lock;
if (!irqs_running() &&
(local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
break;
br_write_unlock(BR_GLOBALIRQ_LOCK);
lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
while (irqs_running() ||
spin_is_locked(lock) ||
(!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
if (!--count) {
show("get_irqlock");
count = (~0 >> 1);
}
local_irq_enable();
SYNC_OTHER_CORES(cpu);
local_irq_disable();
}
goto again;
}
global_irq_holder = cpu;
}
/*
* A global "cli()" while in an interrupt context
* turns into just a local cli(). Interrupts
* should use spinlocks for the (very unlikely)
* case that they ever want to protect against
* each other.
*
* If we already have local interrupts disabled,
* this will not turn a local disable into a
* global one (problems with spinlocks: this makes
* save_flags+cli+sti usable inside a spinlock).
*/
void __global_cli(void)
{
unsigned long flags;
local_save_flags(flags);
if ((flags & PSR_PIL) != PSR_PIL) {
int cpu = smp_processor_id();
local_irq_disable();
if (!local_irq_count(cpu))
get_irqlock(cpu);
}
}
void __global_sti(void)
{
int cpu = smp_processor_id();
if (!local_irq_count(cpu))
release_irqlock(cpu);
local_irq_enable();
}
/*
* SMP flags value to restore to:
* 0 - global cli
* 1 - global sti
* 2 - local cli
* 3 - local sti
*/
unsigned long __global_save_flags(void)
{
unsigned long flags, retval;
unsigned long local_enabled = 0;
local_save_flags(flags);
if ((flags & PSR_PIL) != PSR_PIL)
local_enabled = 1;
/* default to local */
retval = 2 + local_enabled;
/* check for global flags if we're not in an interrupt */
if (!local_irq_count(smp_processor_id())) {
if (local_enabled)
retval = 1;
if (global_irq_holder == (unsigned char) smp_processor_id())
retval = 0;
}
return retval;
}
void __global_restore_flags(unsigned long flags)
{ {
switch (flags) { printk("synchronize_irq says: implement me!\n");
case 0: BUG();
__global_cli();
break;
case 1:
__global_sti();
break;
case 2:
local_irq_disable();
break;
case 3:
local_irq_enable();
break;
default:
{
unsigned long pc;
__asm__ __volatile__("mov %%i7, %0" : "=r" (pc));
printk("global_restore_flags: Bogon flags(%08lx) caller %08lx\n", flags, pc);
}
}
} }
#endif /* SMP */
#endif /* CONFIG_SMP */
void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs) void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
{ {
...@@ -533,16 +372,24 @@ int request_fast_irq(unsigned int irq, ...@@ -533,16 +372,24 @@ int request_fast_irq(unsigned int irq,
struct irqaction *action; struct irqaction *action;
unsigned long flags; unsigned long flags;
unsigned int cpu_irq; unsigned int cpu_irq;
int ret;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
struct tt_entry *trap_table; struct tt_entry *trap_table;
extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3; extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
#endif #endif
cpu_irq = irq & (NR_IRQS - 1); cpu_irq = irq & (NR_IRQS - 1);
if(cpu_irq > 14) if(cpu_irq > 14) {
return -EINVAL; ret = -EINVAL;
if(!handler) goto out;
return -EINVAL; }
if(!handler) {
ret = -EINVAL;
goto out;
}
spin_lock_irqsave(&irq_action_lock, flags);
action = *(cpu_irq + irq_action); action = *(cpu_irq + irq_action);
if(action) { if(action) {
if(action->flags & SA_SHIRQ) if(action->flags & SA_SHIRQ)
...@@ -552,11 +399,10 @@ int request_fast_irq(unsigned int irq, ...@@ -552,11 +399,10 @@ int request_fast_irq(unsigned int irq,
/* Anyway, someone already owns it so cannot be made fast. */ /* Anyway, someone already owns it so cannot be made fast. */
printk("request_fast_irq: Trying to register yet already owned.\n"); printk("request_fast_irq: Trying to register yet already owned.\n");
return -EBUSY; ret = -EBUSY;
goto out_unlock;
} }
spin_lock_irqsave(&irq_action_lock, flags);
/* If this is flagged as statically allocated then we use our /* If this is flagged as statically allocated then we use our
* private struct which is never freed. * private struct which is never freed.
*/ */
...@@ -573,8 +419,8 @@ int request_fast_irq(unsigned int irq, ...@@ -573,8 +419,8 @@ int request_fast_irq(unsigned int irq,
GFP_ATOMIC); GFP_ATOMIC);
if (!action) { if (!action) {
spin_unlock_irqrestore(&irq_action_lock, flags); ret = -ENOMEM;
return -ENOMEM; goto out_unlock;
} }
/* Dork with trap table if we get this far. */ /* Dork with trap table if we get this far. */
...@@ -610,8 +456,12 @@ int request_fast_irq(unsigned int irq, ...@@ -610,8 +456,12 @@ int request_fast_irq(unsigned int irq,
*(cpu_irq + irq_action) = action; *(cpu_irq + irq_action) = action;
enable_irq(irq); enable_irq(irq);
ret = 0;
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags); spin_unlock_irqrestore(&irq_action_lock, flags);
return 0; out:
return ret;
} }
int request_irq(unsigned int irq, int request_irq(unsigned int irq,
...@@ -621,6 +471,7 @@ int request_irq(unsigned int irq, ...@@ -621,6 +471,7 @@ int request_irq(unsigned int irq,
struct irqaction * action, *tmp = NULL; struct irqaction * action, *tmp = NULL;
unsigned long flags; unsigned long flags;
unsigned int cpu_irq; unsigned int cpu_irq;
int ret;
if (sparc_cpu_model == sun4d) { if (sparc_cpu_model == sun4d) {
extern int sun4d_request_irq(unsigned int, extern int sun4d_request_irq(unsigned int,
...@@ -629,28 +480,33 @@ int request_irq(unsigned int irq, ...@@ -629,28 +480,33 @@ int request_irq(unsigned int irq,
return sun4d_request_irq(irq, handler, irqflags, devname, dev_id); return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
} }
cpu_irq = irq & (NR_IRQS - 1); cpu_irq = irq & (NR_IRQS - 1);
if(cpu_irq > 14) if(cpu_irq > 14) {
return -EINVAL; ret = -EINVAL;
goto out;
}
if (!handler) {
ret = -EINVAL;
goto out;
}
if (!handler) spin_lock_irqsave(&irq_action_lock, flags);
return -EINVAL;
action = *(cpu_irq + irq_action); action = *(cpu_irq + irq_action);
if (action) { if (action) {
if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
for (tmp = action; tmp->next; tmp = tmp->next); for (tmp = action; tmp->next; tmp = tmp->next);
} else { } else {
return -EBUSY; ret = -EBUSY;
goto out_unlock;
} }
if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) { if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
return -EBUSY; ret = -EBUSY;
goto out_unlock;
} }
action = NULL; /* Or else! */ action = NULL; /* Or else! */
} }
spin_lock_irqsave(&irq_action_lock, flags);
/* If this is flagged as statically allocated then we use our /* If this is flagged as statically allocated then we use our
* private struct which is never freed. * private struct which is never freed.
*/ */
...@@ -658,7 +514,7 @@ int request_irq(unsigned int irq, ...@@ -658,7 +514,7 @@ int request_irq(unsigned int irq,
if (static_irq_count < MAX_STATIC_ALLOC) if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++]; action = &static_irqaction[static_irq_count++];
else else
printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq, devname); printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
} }
if (action == NULL) if (action == NULL)
...@@ -666,8 +522,8 @@ int request_irq(unsigned int irq, ...@@ -666,8 +522,8 @@ int request_irq(unsigned int irq,
GFP_ATOMIC); GFP_ATOMIC);
if (!action) { if (!action) {
spin_unlock_irqrestore(&irq_action_lock, flags); ret = -ENOMEM;
return -ENOMEM; goto out_unlock;
} }
action->handler = handler; action->handler = handler;
...@@ -683,8 +539,12 @@ int request_irq(unsigned int irq, ...@@ -683,8 +539,12 @@ int request_irq(unsigned int irq,
*(cpu_irq + irq_action) = action; *(cpu_irq + irq_action) = action;
enable_irq(irq); enable_irq(irq);
ret = 0;
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags); spin_unlock_irqrestore(&irq_action_lock, flags);
return 0; out:
return ret;
} }
EXPORT_SYMBOL(request_irq); EXPORT_SYMBOL(request_irq);
......
...@@ -940,8 +940,8 @@ static void pcic_load_profile_irq(int cpu, unsigned int limit) ...@@ -940,8 +940,8 @@ static void pcic_load_profile_irq(int cpu, unsigned int limit)
printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__); printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
} }
/* We assume the caller is local cli()'d when these are called, or else /* We assume the caller has disabled local interrupts when these are called,
* very bizarre behavior will result. * or else very bizarre behavior will result.
*/ */
static void pcic_disable_pil_irq(unsigned int pil) static void pcic_disable_pil_irq(unsigned int pil)
{ {
......
...@@ -346,7 +346,7 @@ void exit_thread(void) ...@@ -346,7 +346,7 @@ void exit_thread(void)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
if(last_task_used_math == current) { if(last_task_used_math == current) {
#else #else
if(current->flags & PF_USEDFPU) { if(current_thread_info()->flags & _TIF_USEDFPU) {
#endif #endif
/* Keep process from leaving FPU in a bogon state. */ /* Keep process from leaving FPU in a bogon state. */
put_psr(get_psr() | PSR_EF); put_psr(get_psr() | PSR_EF);
...@@ -355,7 +355,7 @@ void exit_thread(void) ...@@ -355,7 +355,7 @@ void exit_thread(void)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
last_task_used_math = NULL; last_task_used_math = NULL;
#else #else
current->flags &= ~PF_USEDFPU; current_thread_info()->flags &= ~_TIF_USEDFPU;
#endif #endif
} }
} }
...@@ -369,7 +369,7 @@ void flush_thread(void) ...@@ -369,7 +369,7 @@ void flush_thread(void)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
if(last_task_used_math == current) { if(last_task_used_math == current) {
#else #else
if(current->flags & PF_USEDFPU) { if(current_thread_info()->flags & _TIF_USEDFPU) {
#endif #endif
/* Clean the fpu. */ /* Clean the fpu. */
put_psr(get_psr() | PSR_EF); put_psr(get_psr() | PSR_EF);
...@@ -378,7 +378,7 @@ void flush_thread(void) ...@@ -378,7 +378,7 @@ void flush_thread(void)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
last_task_used_math = NULL; last_task_used_math = NULL;
#else #else
current->flags &= ~PF_USEDFPU; current_thread_info()->flags &= ~_TIF_USEDFPU;
#endif #endif
} }
...@@ -459,13 +459,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -459,13 +459,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
if(last_task_used_math == current) { if(last_task_used_math == current) {
#else #else
if(current->flags & PF_USEDFPU) { if(current_thread_info()->flags & _TIF_USEDFPU) {
#endif #endif
put_psr(get_psr() | PSR_EF); put_psr(get_psr() | PSR_EF);
fpsave(&p->thread.float_regs[0], &p->thread.fsr, fpsave(&p->thread.float_regs[0], &p->thread.fsr,
&p->thread.fpqueue[0], &p->thread.fpqdepth); &p->thread.fpqueue[0], &p->thread.fpqdepth);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
current->flags &= ~PF_USEDFPU; current_thread_info()->flags &= ~_TIF_USEDFPU;
#endif #endif
} }
...@@ -597,13 +597,13 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) ...@@ -597,13 +597,13 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
return 1; return 1;
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (current->flags & PF_USEDFPU) { if (current_thread_info()->flags & _TIF_USEDFPU) {
put_psr(get_psr() | PSR_EF); put_psr(get_psr() | PSR_EF);
fpsave(&current->thread.float_regs[0], &current->thread.fsr, fpsave(&current->thread.float_regs[0], &current->thread.fsr,
&current->thread.fpqueue[0], &current->thread.fpqdepth); &current->thread.fpqueue[0], &current->thread.fpqdepth);
if (regs != NULL) { if (regs != NULL) {
regs->psr &= ~(PSR_EF); regs->psr &= ~(PSR_EF);
current->flags &= ~(PF_USEDFPU); current_thread_info()->flags &= ~(_TIF_USEDFPU);
} }
} }
#else #else
......
...@@ -74,7 +74,7 @@ void prom_sync_me(void) ...@@ -74,7 +74,7 @@ void prom_sync_me(void)
unsigned long prom_tbr, flags; unsigned long prom_tbr, flags;
/* XXX Badly broken. FIX! - Anton */ /* XXX Badly broken. FIX! - Anton */
save_and_cli(flags); local_irq_save(flags);
__asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr)); __asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr));
__asm__ __volatile__("wr %0, 0x0, %%tbr\n\t" __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
"nop\n\t" "nop\n\t"
...@@ -86,9 +86,9 @@ void prom_sync_me(void) ...@@ -86,9 +86,9 @@ void prom_sync_me(void)
prom_printf("PROM SYNC COMMAND...\n"); prom_printf("PROM SYNC COMMAND...\n");
show_free_areas(); show_free_areas();
if(current->pid != 0) { if(current->pid != 0) {
sti(); local_irq_enable();
sys_sync(); sys_sync();
cli(); local_irq_disable();
} }
prom_printf("Returning to prom\n"); prom_printf("Returning to prom\n");
...@@ -96,7 +96,7 @@ void prom_sync_me(void) ...@@ -96,7 +96,7 @@ void prom_sync_me(void)
"nop\n\t" "nop\n\t"
"nop\n\t" "nop\n\t"
"nop\n\t" : : "r" (prom_tbr)); "nop\n\t" : : "r" (prom_tbr));
restore_flags(flags); local_irq_restore(flags);
return; return;
} }
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#define __KERNEL_SYSCALLS__ #define __KERNEL_SYSCALLS__
#include <linux/unistd.h> #include <linux/unistd.h>
...@@ -261,6 +263,9 @@ unsigned int prof_multiplier[NR_CPUS]; ...@@ -261,6 +263,9 @@ unsigned int prof_multiplier[NR_CPUS];
unsigned int prof_counter[NR_CPUS]; unsigned int prof_counter[NR_CPUS];
extern unsigned int lvl14_resolution; extern unsigned int lvl14_resolution;
/* /proc/profile writes can call this, don't __init it please. */
static spinlock_t prof_setup_lock = SPIN_LOCK_UNLOCKED;
int setup_profiling_timer(unsigned int multiplier) int setup_profiling_timer(unsigned int multiplier)
{ {
int i; int i;
...@@ -270,14 +275,14 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -270,14 +275,14 @@ int setup_profiling_timer(unsigned int multiplier)
if((!multiplier) || (lvl14_resolution / multiplier) < 500) if((!multiplier) || (lvl14_resolution / multiplier) < 500)
return -EINVAL; return -EINVAL;
save_and_cli(flags); spin_lock_irqsave(&prof_setup_lock, flags);
for(i = 0; i < NR_CPUS; i++) { for(i = 0; i < NR_CPUS; i++) {
if(cpu_present_map & (1 << i)) { if(cpu_present_map & (1 << i)) {
load_profile_irq(mid_xlate[i], lvl14_resolution / multiplier); load_profile_irq(mid_xlate[i], lvl14_resolution / multiplier);
prof_multiplier[i] = multiplier; prof_multiplier[i] = multiplier;
} }
} }
restore_flags(flags); spin_unlock_irqrestore(&prof_setup_lock, flags);
return 0; return 0;
} }
......
...@@ -68,7 +68,7 @@ static void sun4c_disable_irq(unsigned int irq_nr) ...@@ -68,7 +68,7 @@ static void sun4c_disable_irq(unsigned int irq_nr)
unsigned long flags; unsigned long flags;
unsigned char current_mask, new_mask; unsigned char current_mask, new_mask;
save_and_cli(flags); local_irq_save(flags);
irq_nr &= (NR_IRQS - 1); irq_nr &= (NR_IRQS - 1);
current_mask = *interrupt_enable; current_mask = *interrupt_enable;
switch(irq_nr) { switch(irq_nr) {
...@@ -85,11 +85,11 @@ static void sun4c_disable_irq(unsigned int irq_nr) ...@@ -85,11 +85,11 @@ static void sun4c_disable_irq(unsigned int irq_nr)
new_mask = ((current_mask) & (~(SUN4C_INT_E14))); new_mask = ((current_mask) & (~(SUN4C_INT_E14)));
break; break;
default: default:
restore_flags(flags); local_irq_restore(flags);
return; return;
} }
*interrupt_enable = new_mask; *interrupt_enable = new_mask;
restore_flags(flags); local_irq_restore(flags);
} }
static void sun4c_enable_irq(unsigned int irq_nr) static void sun4c_enable_irq(unsigned int irq_nr)
...@@ -97,7 +97,7 @@ static void sun4c_enable_irq(unsigned int irq_nr) ...@@ -97,7 +97,7 @@ static void sun4c_enable_irq(unsigned int irq_nr)
unsigned long flags; unsigned long flags;
unsigned char current_mask, new_mask; unsigned char current_mask, new_mask;
save_and_cli(flags); local_irq_save(flags);
irq_nr &= (NR_IRQS - 1); irq_nr &= (NR_IRQS - 1);
current_mask = *interrupt_enable; current_mask = *interrupt_enable;
switch(irq_nr) { switch(irq_nr) {
...@@ -114,11 +114,11 @@ static void sun4c_enable_irq(unsigned int irq_nr) ...@@ -114,11 +114,11 @@ static void sun4c_enable_irq(unsigned int irq_nr)
new_mask = ((current_mask) | SUN4C_INT_E14); new_mask = ((current_mask) | SUN4C_INT_E14);
break; break;
default: default:
restore_flags(flags); local_irq_restore(flags);
return; return;
} }
*interrupt_enable = new_mask; *interrupt_enable = new_mask;
restore_flags(flags); local_irq_restore(flags);
} }
#define TIMER_IRQ 10 /* Also at level 14, but we ignore that one. */ #define TIMER_IRQ 10 /* Also at level 14, but we ignore that one. */
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/sbus.h> #include <asm/sbus.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/cacheflush.h>
/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */ /* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
/* #define DISTRIBUTE_IRQS */ /* #define DISTRIBUTE_IRQS */
...@@ -54,6 +55,7 @@ unsigned char sbus_tid[32]; ...@@ -54,6 +55,7 @@ unsigned char sbus_tid[32];
#endif #endif
extern struct irqaction *irq_action[]; extern struct irqaction *irq_action[];
extern spinlock_t irq_action_lock;
struct sbus_action { struct sbus_action {
struct irqaction *action; struct irqaction *action;
...@@ -77,30 +79,32 @@ int show_sun4d_interrupts(struct seq_file *p, void *v) ...@@ -77,30 +79,32 @@ int show_sun4d_interrupts(struct seq_file *p, void *v)
{ {
int i = *(loff_t *) v, j = 0, k = 0, sbusl; int i = *(loff_t *) v, j = 0, k = 0, sbusl;
struct irqaction * action; struct irqaction * action;
unsigned long flags;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int x; int x;
#endif #endif
spin_lock_irqsave(&irq_action_lock, flags);
if (i < NR_IRQS) { if (i < NR_IRQS) {
sbusl = pil_to_sbus[i]; sbusl = pil_to_sbus[i];
if (!sbusl) { if (!sbusl) {
action = *(i + irq_action); action = *(i + irq_action);
if (!action) if (!action)
goto out; goto out_unlock;
} else { } else {
for (j = 0; j < nsbi; j++) { for (j = 0; j < nsbi; j++) {
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action)) if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
goto found_it; goto found_it;
} }
goto out; goto out_unlock;
} }
found_it: seq_printf(p, "%3d: ", i); found_it: seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i)); seq_printf(p, "%10u ", kstat_irqs(i));
#else #else
for (x = 0; x < NR_CPUS; x++) { for (x = 0; x < NR_CPUS; x++) {
if (cpu_online) if (cpu_online(x))
seq_printf(p, "%10u ", seq_printf(p, "%10u ",
kstat_cpu(cpu_logical_map(x)).irqs[i]); kstat_cpu(cpu_logical_map(x)).irqs[i]);
} }
...@@ -128,7 +132,8 @@ found_it: seq_printf(p, "%3d: ", i); ...@@ -128,7 +132,8 @@ found_it: seq_printf(p, "%3d: ", i);
} }
seq_putc(p, '\n'); seq_putc(p, '\n');
} }
out: out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
return 0; return 0;
} }
...@@ -138,6 +143,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id) ...@@ -138,6 +143,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
struct irqaction *tmp = NULL; struct irqaction *tmp = NULL;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&irq_action_lock, flags);
if (irq < 15) if (irq < 15)
actionp = irq + irq_action; actionp = irq + irq_action;
else else
...@@ -145,7 +151,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id) ...@@ -145,7 +151,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
action = *actionp; action = *actionp;
if (!action) { if (!action) {
printk("Trying to free free IRQ%d\n",irq); printk("Trying to free free IRQ%d\n",irq);
return; goto out_unlock;
} }
if (dev_id) { if (dev_id) {
for (; action; action = action->next) { for (; action; action = action->next) {
...@@ -155,11 +161,11 @@ void sun4d_free_irq(unsigned int irq, void *dev_id) ...@@ -155,11 +161,11 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
} }
if (!action) { if (!action) {
printk("Trying to free free shared IRQ%d\n",irq); printk("Trying to free free shared IRQ%d\n",irq);
return; goto out_unlock;
} }
} else if (action->flags & SA_SHIRQ) { } else if (action->flags & SA_SHIRQ) {
printk("Trying to free shared IRQ%d with NULL device ID\n", irq); printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
return; goto out_unlock;
} }
if (action->flags & SA_STATIC_ALLOC) if (action->flags & SA_STATIC_ALLOC)
{ {
...@@ -168,21 +174,27 @@ void sun4d_free_irq(unsigned int irq, void *dev_id) ...@@ -168,21 +174,27 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
*/ */
printk("Attempt to free statically allocated IRQ%d (%s)\n", printk("Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name); irq, action->name);
return; goto out_unlock;
} }
save_and_cli(flags);
if (action && tmp) if (action && tmp)
tmp->next = action->next; tmp->next = action->next;
else else
*actionp = action->next; *actionp = action->next;
spin_unlock_irqrestore(&irq_action_lock, flags);
synchronize_irq(irq);
spin_lock_irqsave(&irq_action_lock, flags);
kfree(action); kfree(action);
if (!(*actionp)) if (!(*actionp))
disable_irq(irq); disable_irq(irq);
restore_flags(flags); out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
} }
extern void unexpected_irq(int, void *, struct pt_regs *); extern void unexpected_irq(int, void *, struct pt_regs *);
...@@ -268,12 +280,19 @@ int sun4d_request_irq(unsigned int irq, ...@@ -268,12 +280,19 @@ int sun4d_request_irq(unsigned int irq,
{ {
struct irqaction *action, *tmp = NULL, **actionp; struct irqaction *action, *tmp = NULL, **actionp;
unsigned long flags; unsigned long flags;
int ret;
if(irq > 14 && irq < (1 << 5)) {
ret = -EINVAL;
goto out;
}
if(irq > 14 && irq < (1 << 5)) if (!handler) {
return -EINVAL; ret = -EINVAL;
goto out;
}
if (!handler) spin_lock_irqsave(&irq_action_lock, flags);
return -EINVAL;
if (irq >= (1 << 5)) if (irq >= (1 << 5))
actionp = &(sbus_actions[irq - (1 << 5)].action); actionp = &(sbus_actions[irq - (1 << 5)].action);
...@@ -285,17 +304,17 @@ int sun4d_request_irq(unsigned int irq, ...@@ -285,17 +304,17 @@ int sun4d_request_irq(unsigned int irq,
if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
for (tmp = action; tmp->next; tmp = tmp->next); for (tmp = action; tmp->next; tmp = tmp->next);
} else { } else {
return -EBUSY; ret = -EBUSY;
goto out_unlock;
} }
if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) { if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
return -EBUSY; ret = -EBUSY;
goto out_unlock;
} }
action = NULL; /* Or else! */ action = NULL; /* Or else! */
} }
save_and_cli(flags);
/* If this is flagged as statically allocated then we use our /* If this is flagged as statically allocated then we use our
* private struct which is never freed. * private struct which is never freed.
*/ */
...@@ -303,16 +322,16 @@ int sun4d_request_irq(unsigned int irq, ...@@ -303,16 +322,16 @@ int sun4d_request_irq(unsigned int irq,
if (static_irq_count < MAX_STATIC_ALLOC) if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++]; action = &static_irqaction[static_irq_count++];
else else
printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq, devname); printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
} }
if (action == NULL) if (action == NULL)
action = (struct irqaction *)kmalloc(sizeof(struct irqaction), action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
GFP_KERNEL); GFP_ATOMIC);
if (!action) { if (!action) {
restore_flags(flags); ret = -ENOMEM;
return -ENOMEM; goto out_unlock;
} }
action->handler = handler; action->handler = handler;
...@@ -328,8 +347,12 @@ int sun4d_request_irq(unsigned int irq, ...@@ -328,8 +347,12 @@ int sun4d_request_irq(unsigned int irq,
*actionp = action; *actionp = action;
enable_irq(irq); enable_irq(irq);
restore_flags(flags);
return 0; ret = 0;
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
out:
return ret;
} }
static void sun4d_disable_irq(unsigned int irq) static void sun4d_disable_irq(unsigned int irq)
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/sbus.h> #include <asm/sbus.h>
#include <asm/cacheflush.h>
static unsigned long dummy; static unsigned long dummy;
...@@ -116,12 +117,12 @@ static void sun4m_disable_irq(unsigned int irq_nr) ...@@ -116,12 +117,12 @@ static void sun4m_disable_irq(unsigned int irq_nr)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
mask = sun4m_get_irqmask(irq_nr); mask = sun4m_get_irqmask(irq_nr);
save_and_cli(flags); local_irq_save(flags);
if (irq_nr > 15) if (irq_nr > 15)
sun4m_interrupts->set = mask; sun4m_interrupts->set = mask;
else else
sun4m_interrupts->cpu_intregs[cpu].set = mask; sun4m_interrupts->cpu_intregs[cpu].set = mask;
restore_flags(flags); local_irq_restore(flags);
} }
static void sun4m_enable_irq(unsigned int irq_nr) static void sun4m_enable_irq(unsigned int irq_nr)
...@@ -135,16 +136,16 @@ static void sun4m_enable_irq(unsigned int irq_nr) ...@@ -135,16 +136,16 @@ static void sun4m_enable_irq(unsigned int irq_nr)
*/ */
if (irq_nr != 0x0b) { if (irq_nr != 0x0b) {
mask = sun4m_get_irqmask(irq_nr); mask = sun4m_get_irqmask(irq_nr);
save_and_cli(flags); local_irq_save(flags);
if (irq_nr > 15) if (irq_nr > 15)
sun4m_interrupts->clear = mask; sun4m_interrupts->clear = mask;
else else
sun4m_interrupts->cpu_intregs[cpu].clear = mask; sun4m_interrupts->cpu_intregs[cpu].clear = mask;
restore_flags(flags); local_irq_restore(flags);
} else { } else {
save_and_cli(flags); local_irq_save(flags);
sun4m_interrupts->clear = SUN4M_INT_FLOPPY; sun4m_interrupts->clear = SUN4M_INT_FLOPPY;
restore_flags(flags); local_irq_restore(flags);
} }
} }
...@@ -167,8 +168,8 @@ static unsigned long cpu_pil_to_imask[16] = { ...@@ -167,8 +168,8 @@ static unsigned long cpu_pil_to_imask[16] = {
/*15*/ 0x00000000 /*15*/ 0x00000000
}; };
/* We assume the caller is local cli()'d when these are called, or else /* We assume the caller has disabled local interrupts when these are called,
* very bizarre behavior will result. * or else very bizarre behavior will result.
*/ */
static void sun4m_disable_pil_irq(unsigned int pil) static void sun4m_disable_pil_irq(unsigned int pil)
{ {
......
...@@ -260,7 +260,7 @@ void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, ...@@ -260,7 +260,7 @@ void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
} else { } else {
fpload(&current->thread.float_regs[0], &current->thread.fsr); fpload(&current->thread.float_regs[0], &current->thread.fsr);
} }
current->flags |= PF_USEDFPU; current_thread_info()->flags |= _TIF_USEDFPU;
#endif #endif
} }
...@@ -291,7 +291,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, ...@@ -291,7 +291,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
if(!fpt) { if(!fpt) {
#else #else
if(!(fpt->flags & PF_USEDFPU)) { if(!(fpt->thread_info->flags & _TIF_USEDFPU)) {
#endif #endif
fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth); fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth);
regs->psr &= ~PSR_EF; regs->psr &= ~PSR_EF;
...@@ -334,7 +334,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, ...@@ -334,7 +334,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
/* nope, better SIGFPE the offending process... */ /* nope, better SIGFPE the offending process... */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
fpt->flags &= ~PF_USEDFPU; fpt->thread_info->flags &= ~_TIF_USEDFPU;
#endif #endif
if(psr & PSR_PS) { if(psr & PSR_PS) {
/* The first fsr store/load we tried trapped, /* The first fsr store/load we tried trapped,
......
...@@ -311,15 +311,19 @@ __asm__ __volatile__ ( \ ...@@ -311,15 +311,19 @@ __asm__ __volatile__ ( \
store_common(dst_addr, size, src_val, errh); \ store_common(dst_addr, size, src_val, errh); \
}) })
/* XXX Need to capture/release other cpu's for SMP around this. */ extern void smp_capture(void);
extern void smp_release(void);
#define do_atomic(srcdest_reg, mem, errh) ({ \ #define do_atomic(srcdest_reg, mem, errh) ({ \
unsigned long flags, tmp; \ unsigned long flags, tmp; \
\ \
save_and_cli(flags); \ smp_capture(); \
local_irq_save(flags); \
tmp = *srcdest_reg; \ tmp = *srcdest_reg; \
do_integer_load(srcdest_reg, 4, mem, 0, errh); \ do_integer_load(srcdest_reg, 4, mem, 0, errh); \
store_common(mem, 4, &tmp, errh); \ store_common(mem, 4, &tmp, errh); \
restore_flags(flags); \ local_irq_restore(flags); \
smp_release(); \
}) })
static inline void advance(struct pt_regs *regs) static inline void advance(struct pt_regs *regs)
......
...@@ -441,13 +441,13 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -441,13 +441,13 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
_SUN4C_PAGE_VALID | _SUN4C_PAGE_VALID |
_SUN4C_PAGE_DIRTY); _SUN4C_PAGE_DIRTY);
save_and_cli(flags); local_irq_save(flags);
if (sun4c_get_segmap(address) != invalid_segment) { if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep)); sun4c_put_pte(address, pte_val(*ptep));
restore_flags(flags); local_irq_restore(flags);
return; return;
} }
restore_flags(flags); local_irq_restore(flags);
} }
} else { } else {
if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
...@@ -457,13 +457,13 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -457,13 +457,13 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
_SUN4C_PAGE_VALID); _SUN4C_PAGE_VALID);
save_and_cli(flags); local_irq_save(flags);
if (sun4c_get_segmap(address) != invalid_segment) { if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep)); sun4c_put_pte(address, pte_val(*ptep));
restore_flags(flags); local_irq_restore(flags);
return; return;
} }
restore_flags(flags); local_irq_restore(flags);
} }
} }
} }
......
...@@ -836,7 +836,7 @@ static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx) ...@@ -836,7 +836,7 @@ static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx)
struct sun4c_mmu_entry *head = &crp->ringhd; struct sun4c_mmu_entry *head = &crp->ringhd;
unsigned long flags; unsigned long flags;
save_and_cli(flags); local_irq_save(flags);
if (head->next != head) { if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next; struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context(); int savectx = sun4c_get_context();
...@@ -854,7 +854,7 @@ static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx) ...@@ -854,7 +854,7 @@ static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx)
} while (entry != head); } while (entry != head);
sun4c_set_context(savectx); sun4c_set_context(savectx);
} }
restore_flags(flags); local_irq_restore(flags);
} }
static int sun4c_user_taken_entries; /* This is how much we have. */ static int sun4c_user_taken_entries; /* This is how much we have. */
...@@ -978,14 +978,14 @@ static void get_locked_segment(unsigned long addr) ...@@ -978,14 +978,14 @@ static void get_locked_segment(unsigned long addr)
struct sun4c_mmu_entry *stolen; struct sun4c_mmu_entry *stolen;
unsigned long flags; unsigned long flags;
save_and_cli(flags); local_irq_save(flags);
addr &= SUN4C_REAL_PGDIR_MASK; addr &= SUN4C_REAL_PGDIR_MASK;
stolen = sun4c_user_strategy(); stolen = sun4c_user_strategy();
max_user_taken_entries--; max_user_taken_entries--;
stolen->vaddr = addr; stolen->vaddr = addr;
flush_user_windows(); flush_user_windows();
sun4c_kernel_map(stolen); sun4c_kernel_map(stolen);
restore_flags(flags); local_irq_restore(flags);
} }
static void free_locked_segment(unsigned long addr) static void free_locked_segment(unsigned long addr)
...@@ -994,7 +994,7 @@ static void free_locked_segment(unsigned long addr) ...@@ -994,7 +994,7 @@ static void free_locked_segment(unsigned long addr)
unsigned long flags; unsigned long flags;
unsigned char pseg; unsigned char pseg;
save_and_cli(flags); local_irq_save(flags);
addr &= SUN4C_REAL_PGDIR_MASK; addr &= SUN4C_REAL_PGDIR_MASK;
pseg = sun4c_get_segmap(addr); pseg = sun4c_get_segmap(addr);
entry = &mmu_entry_pool[pseg]; entry = &mmu_entry_pool[pseg];
...@@ -1004,7 +1004,7 @@ static void free_locked_segment(unsigned long addr) ...@@ -1004,7 +1004,7 @@ static void free_locked_segment(unsigned long addr)
sun4c_kernel_unmap(entry); sun4c_kernel_unmap(entry);
add_ring(&sun4c_ufree_ring, entry); add_ring(&sun4c_ufree_ring, entry);
max_user_taken_entries++; max_user_taken_entries++;
restore_flags(flags); local_irq_restore(flags);
} }
static inline void garbage_collect(int entry) static inline void garbage_collect(int entry)
...@@ -1123,7 +1123,7 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size) ...@@ -1123,7 +1123,7 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size)
size + (PAGE_SIZE-1)) >> PAGE_SHIFT; size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
scan = 0; scan = 0;
save_and_cli(flags); local_irq_save(flags);
for (;;) { for (;;) {
scan = find_next_zero_bit(sun4c_iobuffer_map, scan = find_next_zero_bit(sun4c_iobuffer_map,
iobuffer_map_size, scan); iobuffer_map_size, scan);
...@@ -1157,12 +1157,12 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size) ...@@ -1157,12 +1157,12 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size)
sun4c_put_pte(apage, pte); sun4c_put_pte(apage, pte);
vpage += PAGE_SIZE; vpage += PAGE_SIZE;
} }
restore_flags(flags); local_irq_restore(flags);
return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start + return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start +
(((unsigned long) vaddr) & ~PAGE_MASK)); (((unsigned long) vaddr) & ~PAGE_MASK));
abend: abend:
restore_flags(flags); local_irq_restore(flags);
printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size); printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size);
panic("Out of iobuffer table"); panic("Out of iobuffer table");
return 0; return 0;
...@@ -1178,7 +1178,7 @@ static void sun4c_unlockarea(char *vaddr, unsigned long size) ...@@ -1178,7 +1178,7 @@ static void sun4c_unlockarea(char *vaddr, unsigned long size)
npages = (((unsigned long)vaddr & ~PAGE_MASK) + npages = (((unsigned long)vaddr & ~PAGE_MASK) +
size + (PAGE_SIZE-1)) >> PAGE_SHIFT; size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
save_and_cli(flags); local_irq_save(flags);
while (npages != 0) { while (npages != 0) {
--npages; --npages;
...@@ -1200,7 +1200,7 @@ static void sun4c_unlockarea(char *vaddr, unsigned long size) ...@@ -1200,7 +1200,7 @@ static void sun4c_unlockarea(char *vaddr, unsigned long size)
sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE; sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE;
free_locked_segment(sun4c_iobuffer_high); free_locked_segment(sun4c_iobuffer_high);
} }
restore_flags(flags); local_irq_restore(flags);
} }
/* Note the scsi code at init time passes to here buffers /* Note the scsi code at init time passes to here buffers
...@@ -1349,7 +1349,7 @@ static void sun4c_flush_cache_mm(struct mm_struct *mm) ...@@ -1349,7 +1349,7 @@ static void sun4c_flush_cache_mm(struct mm_struct *mm)
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
unsigned long flags; unsigned long flags;
save_and_cli(flags); local_irq_save(flags);
if (head->next != head) { if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next; struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context(); int savectx = sun4c_get_context();
...@@ -1366,7 +1366,7 @@ static void sun4c_flush_cache_mm(struct mm_struct *mm) ...@@ -1366,7 +1366,7 @@ static void sun4c_flush_cache_mm(struct mm_struct *mm)
} while (entry != head); } while (entry != head);
sun4c_set_context(savectx); sun4c_set_context(savectx);
} }
restore_flags(flags); local_irq_restore(flags);
} }
} }
} }
...@@ -1383,7 +1383,7 @@ static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long st ...@@ -1383,7 +1383,7 @@ static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long st
flush_user_windows(); flush_user_windows();
save_and_cli(flags); local_irq_save(flags);
/* All user segmap chains are ordered on entry->vaddr. */ /* All user segmap chains are ordered on entry->vaddr. */
for (entry = head->next; for (entry = head->next;
(entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
...@@ -1427,7 +1427,7 @@ static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long st ...@@ -1427,7 +1427,7 @@ static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long st
} while ((entry != head) && (entry->vaddr < end)); } while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx); sun4c_set_context(octx);
} }
restore_flags(flags); local_irq_restore(flags);
} }
} }
...@@ -1444,11 +1444,11 @@ static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long pag ...@@ -1444,11 +1444,11 @@ static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long pag
unsigned long flags; unsigned long flags;
flush_user_windows(); flush_user_windows();
save_and_cli(flags); local_irq_save(flags);
sun4c_set_context(new_ctx); sun4c_set_context(new_ctx);
sun4c_flush_page(page); sun4c_flush_page(page);
sun4c_set_context(octx); sun4c_set_context(octx);
restore_flags(flags); local_irq_restore(flags);
} }
} }
...@@ -1456,9 +1456,9 @@ static void sun4c_flush_page_to_ram(unsigned long page) ...@@ -1456,9 +1456,9 @@ static void sun4c_flush_page_to_ram(unsigned long page)
{ {
unsigned long flags; unsigned long flags;
save_and_cli(flags); local_irq_save(flags);
sun4c_flush_page(page); sun4c_flush_page(page);
restore_flags(flags); local_irq_restore(flags);
} }
/* Sun4c cache is unified, both instructions and data live there, so /* Sun4c cache is unified, both instructions and data live there, so
...@@ -1479,7 +1479,7 @@ static void sun4c_flush_tlb_all(void) ...@@ -1479,7 +1479,7 @@ static void sun4c_flush_tlb_all(void)
unsigned long flags; unsigned long flags;
int savectx, ctx; int savectx, ctx;
save_and_cli(flags); local_irq_save(flags);
this_entry = sun4c_kernel_ring.ringhd.next; this_entry = sun4c_kernel_ring.ringhd.next;
savectx = sun4c_get_context(); savectx = sun4c_get_context();
flush_user_windows(); flush_user_windows();
...@@ -1494,7 +1494,7 @@ static void sun4c_flush_tlb_all(void) ...@@ -1494,7 +1494,7 @@ static void sun4c_flush_tlb_all(void)
this_entry = next_entry; this_entry = next_entry;
} }
sun4c_set_context(savectx); sun4c_set_context(savectx);
restore_flags(flags); local_irq_restore(flags);
} }
static void sun4c_flush_tlb_mm(struct mm_struct *mm) static void sun4c_flush_tlb_mm(struct mm_struct *mm)
...@@ -1505,7 +1505,7 @@ static void sun4c_flush_tlb_mm(struct mm_struct *mm) ...@@ -1505,7 +1505,7 @@ static void sun4c_flush_tlb_mm(struct mm_struct *mm)
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
unsigned long flags; unsigned long flags;
save_and_cli(flags); local_irq_save(flags);
if (head->next != head) { if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next; struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context(); int savectx = sun4c_get_context();
...@@ -1522,7 +1522,7 @@ static void sun4c_flush_tlb_mm(struct mm_struct *mm) ...@@ -1522,7 +1522,7 @@ static void sun4c_flush_tlb_mm(struct mm_struct *mm)
} while (entry != head); } while (entry != head);
sun4c_set_context(savectx); sun4c_set_context(savectx);
} }
restore_flags(flags); local_irq_restore(flags);
} }
} }
...@@ -1536,7 +1536,7 @@ static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long star ...@@ -1536,7 +1536,7 @@ static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long star
struct sun4c_mmu_entry *entry; struct sun4c_mmu_entry *entry;
unsigned long flags; unsigned long flags;
save_and_cli(flags); local_irq_save(flags);
/* See commentary in sun4c_flush_cache_range(). */ /* See commentary in sun4c_flush_cache_range(). */
for (entry = head->next; for (entry = head->next;
(entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
...@@ -1558,7 +1558,7 @@ static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long star ...@@ -1558,7 +1558,7 @@ static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long star
} while ((entry != head) && (entry->vaddr < end)); } while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx); sun4c_set_context(octx);
} }
restore_flags(flags); local_irq_restore(flags);
} }
} }
...@@ -1571,13 +1571,13 @@ static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -1571,13 +1571,13 @@ static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
int savectx = sun4c_get_context(); int savectx = sun4c_get_context();
unsigned long flags; unsigned long flags;
save_and_cli(flags); local_irq_save(flags);
sun4c_set_context(new_ctx); sun4c_set_context(new_ctx);
page &= PAGE_MASK; page &= PAGE_MASK;
sun4c_flush_page(page); sun4c_flush_page(page);
sun4c_put_pte(page, 0); sun4c_put_pte(page, 0);
sun4c_set_context(savectx); sun4c_set_context(savectx);
restore_flags(flags); local_irq_restore(flags);
} }
} }
...@@ -1974,7 +1974,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p ...@@ -1974,7 +1974,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p
unsigned long flags; unsigned long flags;
int pseg; int pseg;
save_and_cli(flags); local_irq_save(flags);
address &= PAGE_MASK; address &= PAGE_MASK;
if ((pseg = sun4c_get_segmap(address)) == invalid_segment) { if ((pseg = sun4c_get_segmap(address)) == invalid_segment) {
struct sun4c_mmu_entry *entry = sun4c_user_strategy(); struct sun4c_mmu_entry *entry = sun4c_user_strategy();
...@@ -2010,7 +2010,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p ...@@ -2010,7 +2010,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p
#ifndef SUN4C_PRELOAD_PSEG #ifndef SUN4C_PRELOAD_PSEG
sun4c_put_pte(address, pte_val(pte)); sun4c_put_pte(address, pte_val(pte));
#endif #endif
restore_flags(flags); local_irq_restore(flags);
return; return;
} else { } else {
struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg]; struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg];
...@@ -2020,7 +2020,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p ...@@ -2020,7 +2020,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p
} }
sun4c_put_pte(address, pte_val(pte)); sun4c_put_pte(address, pte_val(pte));
restore_flags(flags); local_irq_restore(flags);
} }
extern void sparc_context_init(int); extern void sparc_context_init(int);
......
...@@ -15,15 +15,6 @@ ...@@ -15,15 +15,6 @@
/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */ /* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
typedef struct { typedef struct {
unsigned int __softirq_pending; unsigned int __softirq_pending;
unsigned int __unused_1;
#ifndef CONFIG_SMP
unsigned int WAS__local_irq_count;
#else
unsigned int __unused_on_SMP; /* DaveM says use brlock for SMP irq. KAO */
#endif
unsigned int WAS__local_bh_count;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
...@@ -86,11 +77,11 @@ typedef struct { ...@@ -86,11 +77,11 @@ typedef struct {
#define hardirq_trylock() (!in_interrupt()) #define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0) #define hardirq_endlock() do { } while (0)
#ifndef CONFIG_SMP
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) #define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked()) #include <linux/smp_lock.h>
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else #else
# define in_atomic() (preempt_count() != 0) # define in_atomic() (preempt_count() != 0)
...@@ -104,63 +95,10 @@ do { \ ...@@ -104,63 +95,10 @@ do { \
preempt_enable_no_resched(); \ preempt_enable_no_resched(); \
} while (0) } while (0)
#else
/* Note that local_irq_count() is replaced by sparc64 specific version for SMP */
/* XXX This is likely to be broken by the above preempt-based IRQs */
#define irq_enter() br_read_lock(BR_GLOBALIRQ_LOCK)
#undef local_irq_count
#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
#define irq_exit() br_read_unlock(BR_GLOBALIRQ_LOCK)
#endif
#ifdef CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
#else
# define in_atomic() (preempt_count() != 0)
#endif
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
#define synchronize_irq(irq) barrier() #else /* SMP */
#else /* (CONFIG_SMP) */
static __inline__ int irqs_running(void)
{
int i;
for (i = 0; i < smp_num_cpus; i++)
if (local_irq_count(cpu_logical_map(i)))
return 1;
return 0;
}
extern unsigned char global_irq_holder;
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore... */
if(global_irq_holder == (unsigned char) cpu) {
global_irq_holder = NO_PROC_ID;
br_write_unlock(BR_GLOBALIRQ_LOCK);
}
}
#if 0
static inline int hardirq_trylock(int cpu)
{
spinlock_t *lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
return (!local_irq_count(cpu) && !spin_is_locked(lock));
}
#endif
extern void synchronize_irq(unsigned int irq); extern void synchronize_irq(unsigned int irq);
#endif /* SMP */
#endif /* CONFIG_SMP */
// extern void show_stack(unsigned long * esp);
#endif /* __SPARC_HARDIRQ_H */ #endif /* __SPARC_HARDIRQ_H */
...@@ -53,6 +53,8 @@ struct thread_info { ...@@ -53,6 +53,8 @@ struct thread_info {
/* /*
* macros/functions for gaining access to the thread information structure * macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/ */
#define INIT_THREAD_INFO(tsk) \ #define INIT_THREAD_INFO(tsk) \
{ \ { \
...@@ -61,6 +63,7 @@ struct thread_info { ...@@ -61,6 +63,7 @@ struct thread_info {
.exec_domain = &default_exec_domain, \ .exec_domain = &default_exec_domain, \
.flags = 0, \ .flags = 0, \
.cpu = 0, \ .cpu = 0, \
.preempt_count = 1, \
.restart_block = { \ .restart_block = { \
.fn = do_no_restart_syscall, \ .fn = do_no_restart_syscall, \
}, \ }, \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment