Commit b457c5cd authored by David S. Miller's avatar David S. Miller

Merge bk://kernel.bkbits.net/wesolows/sparc32-2.6

into nuts.davemloft.net:/disk1/BK/sparc-2.6
parents 6e9aed30 1d0b87e8
......@@ -49,9 +49,6 @@
#include <asm/pcic.h>
#include <asm/cacheflush.h>
/* Used to protect the IRQ action lists */
spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_SMP
#define SMP_NOP2 "nop; nop;\n\t"
#define SMP_NOP3 "nop; nop; nop;\n\t"
......@@ -159,10 +156,12 @@ struct irqaction static_irqaction[MAX_STATIC_ALLOC];
int static_irq_count;
struct irqaction *irq_action[NR_IRQS] = {
NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
[0 ... (NR_IRQS-1)] = NULL
};
/* Used to protect the IRQ action lists */
spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED;
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v;
......@@ -177,11 +176,11 @@ int show_interrupts(struct seq_file *p, void *v)
return show_sun4d_interrupts(p, v);
}
spin_lock_irqsave(&irq_action_lock, flags);
if (i < NR_IRQS) {
local_irq_save(flags);
action = *(i + irq_action);
if (!action)
goto skip;
goto out_unlock;
seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
......@@ -201,9 +200,9 @@ int show_interrupts(struct seq_file *p, void *v)
action->name);
}
seq_putc(p, '\n');
skip:
local_irq_restore(flags);
}
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
return 0;
}
......@@ -220,14 +219,18 @@ void free_irq(unsigned int irq, void *dev_id)
return sun4d_free_irq(irq, dev_id);
}
cpu_irq = irq & (NR_IRQS - 1);
action = *(cpu_irq + irq_action);
if (cpu_irq > 14) { /* 14 irq levels on the sparc */
printk("Trying to free bogus IRQ %d\n", irq);
return;
}
spin_lock_irqsave(&irq_action_lock, flags);
action = *(cpu_irq + irq_action);
if (!action->handler) {
printk("Trying to free free IRQ%d\n",irq);
return;
goto out_unlock;
}
if (dev_id) {
for (; action; action = action->next) {
......@@ -237,11 +240,11 @@ void free_irq(unsigned int irq, void *dev_id)
}
if (!action) {
printk("Trying to free free shared IRQ%d\n",irq);
return;
goto out_unlock;
}
} else if (action->flags & SA_SHIRQ) {
printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
return;
goto out_unlock;
}
if (action->flags & SA_STATIC_ALLOC)
{
......@@ -250,68 +253,31 @@ void free_irq(unsigned int irq, void *dev_id)
*/
printk("Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name);
return;
goto out_unlock;
}
save_and_cli(flags);
if (action && tmp)
tmp->next = action->next;
else
*(cpu_irq + irq_action) = action->next;
spin_unlock_irqrestore(&irq_action_lock, flags);
synchronize_irq(irq);
spin_lock_irqsave(&irq_action_lock, flags);
kfree(action);
if (!(*(cpu_irq + irq_action)))
disable_irq(irq);
restore_flags(flags);
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
}
EXPORT_SYMBOL(free_irq);
#ifdef CONFIG_SMP
/* Who has the global irq brlock */
unsigned char global_irq_holder = NO_PROC_ID;
void smp_show_backtrace_all_cpus(void);
void show_backtrace(void);
#define VERBOSE_DEBUG_IRQLOCK
#define MAXCOUNT 100000000
static void show(char * str)
{
int cpu = smp_processor_id();
int i;
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [ ", irqs_running());
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
}
printk("]\nbh: %d [ ",
(spin_is_locked(&global_bh_lock) ? 1 : 0));
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
printk("%u ", local_bh_count(i));
}
printk("]\n");
#ifdef VERBOSE_DEBUG_IRQLOCK
smp_show_backtrace_all_cpus();
#else
show_backtrace();
#endif
}
/*
* We have to allow irqs to arrive between local_irq_enable and local_irq_disable
*/
#define SYNC_OTHER_CORES(x) barrier()
/*
* This is called when we want to synchronize with
* interrupts. We may for example tell a device to
......@@ -319,140 +285,13 @@ static void show(char * str)
* are no interrupts that are executing on another
* CPU we need to call this function.
*/
void synchronize_irq(void)
{
if (irqs_running()) {
cli();
sti();
}
}
static inline void get_irqlock(int cpu)
{
int count;
if ((unsigned char)cpu == global_irq_holder)
return;
count = MAXCOUNT;
again:
br_write_lock(BR_GLOBALIRQ_LOCK);
for (;;) {
spinlock_t *lock;
if (!irqs_running() &&
(local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
break;
br_write_unlock(BR_GLOBALIRQ_LOCK);
lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
while (irqs_running() ||
spin_is_locked(lock) ||
(!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
if (!--count) {
show("get_irqlock");
count = (~0 >> 1);
}
local_irq_enable();
SYNC_OTHER_CORES(cpu);
local_irq_disable();
}
goto again;
}
global_irq_holder = cpu;
}
/*
* A global "cli()" while in an interrupt context
* turns into just a local cli(). Interrupts
* should use spinlocks for the (very unlikely)
* case that they ever want to protect against
* each other.
*
* If we already have local interrupts disabled,
* this will not turn a local disable into a
* global one (problems with spinlocks: this makes
* save_flags+cli+sti usable inside a spinlock).
*/
void __global_cli(void)
{
unsigned long flags;
local_save_flags(flags);
if ((flags & PSR_PIL) != PSR_PIL) {
int cpu = smp_processor_id();
local_irq_disable();
if (!local_irq_count(cpu))
get_irqlock(cpu);
}
}
void __global_sti(void)
{
int cpu = smp_processor_id();
if (!local_irq_count(cpu))
release_irqlock(cpu);
local_irq_enable();
}
/*
* SMP flags value to restore to:
* 0 - global cli
* 1 - global sti
* 2 - local cli
* 3 - local sti
*/
unsigned long __global_save_flags(void)
{
unsigned long flags, retval;
unsigned long local_enabled = 0;
local_save_flags(flags);
if ((flags & PSR_PIL) != PSR_PIL)
local_enabled = 1;
/* default to local */
retval = 2 + local_enabled;
/* check for global flags if we're not in an interrupt */
if (!local_irq_count(smp_processor_id())) {
if (local_enabled)
retval = 1;
if (global_irq_holder == (unsigned char) smp_processor_id())
retval = 0;
}
return retval;
}
void __global_restore_flags(unsigned long flags)
#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq)
{
switch (flags) {
case 0:
__global_cli();
break;
case 1:
__global_sti();
break;
case 2:
local_irq_disable();
break;
case 3:
local_irq_enable();
break;
default:
{
unsigned long pc;
__asm__ __volatile__("mov %%i7, %0" : "=r" (pc));
printk("global_restore_flags: Bogon flags(%08lx) caller %08lx\n", flags, pc);
}
}
printk("synchronize_irq says: implement me!\n");
BUG();
}
#endif /* CONFIG_SMP */
#endif /* SMP */
void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
{
......@@ -533,16 +372,24 @@ int request_fast_irq(unsigned int irq,
struct irqaction *action;
unsigned long flags;
unsigned int cpu_irq;
int ret;
#ifdef CONFIG_SMP
struct tt_entry *trap_table;
extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
#endif
cpu_irq = irq & (NR_IRQS - 1);
if(cpu_irq > 14)
return -EINVAL;
if(!handler)
return -EINVAL;
if(cpu_irq > 14) {
ret = -EINVAL;
goto out;
}
if(!handler) {
ret = -EINVAL;
goto out;
}
spin_lock_irqsave(&irq_action_lock, flags);
action = *(cpu_irq + irq_action);
if(action) {
if(action->flags & SA_SHIRQ)
......@@ -552,11 +399,10 @@ int request_fast_irq(unsigned int irq,
/* Anyway, someone already owns it so cannot be made fast. */
printk("request_fast_irq: Trying to register yet already owned.\n");
return -EBUSY;
ret = -EBUSY;
goto out_unlock;
}
spin_lock_irqsave(&irq_action_lock, flags);
/* If this is flagged as statically allocated then we use our
* private struct which is never freed.
*/
......@@ -573,8 +419,8 @@ int request_fast_irq(unsigned int irq,
GFP_ATOMIC);
if (!action) {
spin_unlock_irqrestore(&irq_action_lock, flags);
return -ENOMEM;
ret = -ENOMEM;
goto out_unlock;
}
/* Dork with trap table if we get this far. */
......@@ -610,8 +456,12 @@ int request_fast_irq(unsigned int irq,
*(cpu_irq + irq_action) = action;
enable_irq(irq);
ret = 0;
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
return 0;
out:
return ret;
}
int request_irq(unsigned int irq,
......@@ -621,6 +471,7 @@ int request_irq(unsigned int irq,
struct irqaction * action, *tmp = NULL;
unsigned long flags;
unsigned int cpu_irq;
int ret;
if (sparc_cpu_model == sun4d) {
extern int sun4d_request_irq(unsigned int,
......@@ -629,28 +480,33 @@ int request_irq(unsigned int irq,
return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
}
cpu_irq = irq & (NR_IRQS - 1);
if(cpu_irq > 14)
return -EINVAL;
if(cpu_irq > 14) {
ret = -EINVAL;
goto out;
}
if (!handler) {
ret = -EINVAL;
goto out;
}
if (!handler)
return -EINVAL;
spin_lock_irqsave(&irq_action_lock, flags);
action = *(cpu_irq + irq_action);
if (action) {
if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
for (tmp = action; tmp->next; tmp = tmp->next);
} else {
return -EBUSY;
ret = -EBUSY;
goto out_unlock;
}
if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
return -EBUSY;
ret = -EBUSY;
goto out_unlock;
}
action = NULL; /* Or else! */
}
spin_lock_irqsave(&irq_action_lock, flags);
/* If this is flagged as statically allocated then we use our
* private struct which is never freed.
*/
......@@ -658,7 +514,7 @@ int request_irq(unsigned int irq,
if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq, devname);
printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
}
if (action == NULL)
......@@ -666,8 +522,8 @@ int request_irq(unsigned int irq,
GFP_ATOMIC);
if (!action) {
spin_unlock_irqrestore(&irq_action_lock, flags);
return -ENOMEM;
ret = -ENOMEM;
goto out_unlock;
}
action->handler = handler;
......@@ -683,8 +539,12 @@ int request_irq(unsigned int irq,
*(cpu_irq + irq_action) = action;
enable_irq(irq);
ret = 0;
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
return 0;
out:
return ret;
}
EXPORT_SYMBOL(request_irq);
......
......@@ -940,8 +940,8 @@ static void pcic_load_profile_irq(int cpu, unsigned int limit)
printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
}
/* We assume the caller is local cli()'d when these are called, or else
* very bizarre behavior will result.
/* We assume the caller has disabled local interrupts when these are called,
* or else very bizarre behavior will result.
*/
static void pcic_disable_pil_irq(unsigned int pil)
{
......
......@@ -346,7 +346,7 @@ void exit_thread(void)
#ifndef CONFIG_SMP
if(last_task_used_math == current) {
#else
if(current->flags & PF_USEDFPU) {
if(current_thread_info()->flags & _TIF_USEDFPU) {
#endif
/* Keep process from leaving FPU in a bogon state. */
put_psr(get_psr() | PSR_EF);
......@@ -355,7 +355,7 @@ void exit_thread(void)
#ifndef CONFIG_SMP
last_task_used_math = NULL;
#else
current->flags &= ~PF_USEDFPU;
current_thread_info()->flags &= ~_TIF_USEDFPU;
#endif
}
}
......@@ -369,7 +369,7 @@ void flush_thread(void)
#ifndef CONFIG_SMP
if(last_task_used_math == current) {
#else
if(current->flags & PF_USEDFPU) {
if(current_thread_info()->flags & _TIF_USEDFPU) {
#endif
/* Clean the fpu. */
put_psr(get_psr() | PSR_EF);
......@@ -378,7 +378,7 @@ void flush_thread(void)
#ifndef CONFIG_SMP
last_task_used_math = NULL;
#else
current->flags &= ~PF_USEDFPU;
current_thread_info()->flags &= ~_TIF_USEDFPU;
#endif
}
......@@ -459,13 +459,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
#ifndef CONFIG_SMP
if(last_task_used_math == current) {
#else
if(current->flags & PF_USEDFPU) {
if(current_thread_info()->flags & _TIF_USEDFPU) {
#endif
put_psr(get_psr() | PSR_EF);
fpsave(&p->thread.float_regs[0], &p->thread.fsr,
&p->thread.fpqueue[0], &p->thread.fpqdepth);
#ifdef CONFIG_SMP
current->flags &= ~PF_USEDFPU;
current_thread_info()->flags &= ~_TIF_USEDFPU;
#endif
}
......@@ -597,13 +597,13 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
return 1;
}
#ifdef CONFIG_SMP
if (current->flags & PF_USEDFPU) {
if (current_thread_info()->flags & _TIF_USEDFPU) {
put_psr(get_psr() | PSR_EF);
fpsave(&current->thread.float_regs[0], &current->thread.fsr,
&current->thread.fpqueue[0], &current->thread.fpqdepth);
if (regs != NULL) {
regs->psr &= ~(PSR_EF);
current->flags &= ~(PF_USEDFPU);
current_thread_info()->flags &= ~(_TIF_USEDFPU);
}
}
#else
......
......@@ -74,7 +74,7 @@ void prom_sync_me(void)
unsigned long prom_tbr, flags;
/* XXX Badly broken. FIX! - Anton */
save_and_cli(flags);
local_irq_save(flags);
__asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr));
__asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
"nop\n\t"
......@@ -86,9 +86,9 @@ void prom_sync_me(void)
prom_printf("PROM SYNC COMMAND...\n");
show_free_areas();
if(current->pid != 0) {
sti();
local_irq_enable();
sys_sync();
cli();
local_irq_disable();
}
prom_printf("Returning to prom\n");
......@@ -96,7 +96,7 @@ void prom_sync_me(void)
"nop\n\t"
"nop\n\t"
"nop\n\t" : : "r" (prom_tbr));
restore_flags(flags);
local_irq_restore(flags);
return;
}
......
......@@ -30,6 +30,8 @@
#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/hardirq.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
......@@ -261,6 +263,9 @@ unsigned int prof_multiplier[NR_CPUS];
unsigned int prof_counter[NR_CPUS];
extern unsigned int lvl14_resolution;
/* /proc/profile writes can call this, don't __init it please. */
static spinlock_t prof_setup_lock = SPIN_LOCK_UNLOCKED;
int setup_profiling_timer(unsigned int multiplier)
{
int i;
......@@ -270,14 +275,14 @@ int setup_profiling_timer(unsigned int multiplier)
if((!multiplier) || (lvl14_resolution / multiplier) < 500)
return -EINVAL;
save_and_cli(flags);
spin_lock_irqsave(&prof_setup_lock, flags);
for(i = 0; i < NR_CPUS; i++) {
if(cpu_present_map & (1 << i)) {
load_profile_irq(mid_xlate[i], lvl14_resolution / multiplier);
prof_multiplier[i] = multiplier;
}
}
restore_flags(flags);
spin_unlock_irqrestore(&prof_setup_lock, flags);
return 0;
}
......
......@@ -68,7 +68,7 @@ static void sun4c_disable_irq(unsigned int irq_nr)
unsigned long flags;
unsigned char current_mask, new_mask;
save_and_cli(flags);
local_irq_save(flags);
irq_nr &= (NR_IRQS - 1);
current_mask = *interrupt_enable;
switch(irq_nr) {
......@@ -85,11 +85,11 @@ static void sun4c_disable_irq(unsigned int irq_nr)
new_mask = ((current_mask) & (~(SUN4C_INT_E14)));
break;
default:
restore_flags(flags);
local_irq_restore(flags);
return;
}
*interrupt_enable = new_mask;
restore_flags(flags);
local_irq_restore(flags);
}
static void sun4c_enable_irq(unsigned int irq_nr)
......@@ -97,7 +97,7 @@ static void sun4c_enable_irq(unsigned int irq_nr)
unsigned long flags;
unsigned char current_mask, new_mask;
save_and_cli(flags);
local_irq_save(flags);
irq_nr &= (NR_IRQS - 1);
current_mask = *interrupt_enable;
switch(irq_nr) {
......@@ -114,11 +114,11 @@ static void sun4c_enable_irq(unsigned int irq_nr)
new_mask = ((current_mask) | SUN4C_INT_E14);
break;
default:
restore_flags(flags);
local_irq_restore(flags);
return;
}
*interrupt_enable = new_mask;
restore_flags(flags);
local_irq_restore(flags);
}
#define TIMER_IRQ 10 /* Also at level 14, but we ignore that one. */
......
......@@ -38,6 +38,7 @@
#include <asm/pgtable.h>
#include <asm/sbus.h>
#include <asm/sbi.h>
#include <asm/cacheflush.h>
/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
/* #define DISTRIBUTE_IRQS */
......@@ -54,6 +55,7 @@ unsigned char sbus_tid[32];
#endif
extern struct irqaction *irq_action[];
extern spinlock_t irq_action_lock;
struct sbus_action {
struct irqaction *action;
......@@ -77,30 +79,32 @@ int show_sun4d_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j = 0, k = 0, sbusl;
struct irqaction * action;
unsigned long flags;
#ifdef CONFIG_SMP
int x;
#endif
spin_lock_irqsave(&irq_action_lock, flags);
if (i < NR_IRQS) {
sbusl = pil_to_sbus[i];
if (!sbusl) {
action = *(i + irq_action);
if (!action)
goto out;
goto out_unlock;
} else {
for (j = 0; j < nsbi; j++) {
for (k = 0; k < 4; k++)
if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
goto found_it;
}
goto out;
goto out_unlock;
}
found_it: seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for (x = 0; x < NR_CPUS; x++) {
if (cpu_online)
if (cpu_online(x))
seq_printf(p, "%10u ",
kstat_cpu(cpu_logical_map(x)).irqs[i]);
}
......@@ -128,7 +132,8 @@ found_it: seq_printf(p, "%3d: ", i);
}
seq_putc(p, '\n');
}
out:
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
return 0;
}
......@@ -138,6 +143,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
struct irqaction *tmp = NULL;
unsigned long flags;
spin_lock_irqsave(&irq_action_lock, flags);
if (irq < 15)
actionp = irq + irq_action;
else
......@@ -145,7 +151,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
action = *actionp;
if (!action) {
printk("Trying to free free IRQ%d\n",irq);
return;
goto out_unlock;
}
if (dev_id) {
for (; action; action = action->next) {
......@@ -155,11 +161,11 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
}
if (!action) {
printk("Trying to free free shared IRQ%d\n",irq);
return;
goto out_unlock;
}
} else if (action->flags & SA_SHIRQ) {
printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
return;
goto out_unlock;
}
if (action->flags & SA_STATIC_ALLOC)
{
......@@ -168,21 +174,27 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
*/
printk("Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name);
return;
goto out_unlock;
}
save_and_cli(flags);
if (action && tmp)
tmp->next = action->next;
else
*actionp = action->next;
spin_unlock_irqrestore(&irq_action_lock, flags);
synchronize_irq(irq);
spin_lock_irqsave(&irq_action_lock, flags);
kfree(action);
if (!(*actionp))
disable_irq(irq);
restore_flags(flags);
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
}
extern void unexpected_irq(int, void *, struct pt_regs *);
......@@ -268,12 +280,19 @@ int sun4d_request_irq(unsigned int irq,
{
struct irqaction *action, *tmp = NULL, **actionp;
unsigned long flags;
int ret;
if(irq > 14 && irq < (1 << 5)) {
ret = -EINVAL;
goto out;
}
if(irq > 14 && irq < (1 << 5))
return -EINVAL;
if (!handler) {
ret = -EINVAL;
goto out;
}
if (!handler)
return -EINVAL;
spin_lock_irqsave(&irq_action_lock, flags);
if (irq >= (1 << 5))
actionp = &(sbus_actions[irq - (1 << 5)].action);
......@@ -285,17 +304,17 @@ int sun4d_request_irq(unsigned int irq,
if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
for (tmp = action; tmp->next; tmp = tmp->next);
} else {
return -EBUSY;
ret = -EBUSY;
goto out_unlock;
}
if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
return -EBUSY;
ret = -EBUSY;
goto out_unlock;
}
action = NULL; /* Or else! */
}
save_and_cli(flags);
/* If this is flagged as statically allocated then we use our
* private struct which is never freed.
*/
......@@ -303,16 +322,16 @@ int sun4d_request_irq(unsigned int irq,
if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq, devname);
printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
}
if (action == NULL)
action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
GFP_KERNEL);
GFP_ATOMIC);
if (!action) {
restore_flags(flags);
return -ENOMEM;
ret = -ENOMEM;
goto out_unlock;
}
action->handler = handler;
......@@ -328,8 +347,12 @@ int sun4d_request_irq(unsigned int irq,
*actionp = action;
enable_irq(irq);
restore_flags(flags);
return 0;
ret = 0;
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
out:
return ret;
}
static void sun4d_disable_irq(unsigned int irq)
......
......@@ -37,6 +37,7 @@
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/sbus.h>
#include <asm/cacheflush.h>
static unsigned long dummy;
......@@ -116,12 +117,12 @@ static void sun4m_disable_irq(unsigned int irq_nr)
int cpu = smp_processor_id();
mask = sun4m_get_irqmask(irq_nr);
save_and_cli(flags);
local_irq_save(flags);
if (irq_nr > 15)
sun4m_interrupts->set = mask;
else
sun4m_interrupts->cpu_intregs[cpu].set = mask;
restore_flags(flags);
local_irq_restore(flags);
}
static void sun4m_enable_irq(unsigned int irq_nr)
......@@ -135,16 +136,16 @@ static void sun4m_enable_irq(unsigned int irq_nr)
*/
if (irq_nr != 0x0b) {
mask = sun4m_get_irqmask(irq_nr);
save_and_cli(flags);
local_irq_save(flags);
if (irq_nr > 15)
sun4m_interrupts->clear = mask;
else
sun4m_interrupts->cpu_intregs[cpu].clear = mask;
restore_flags(flags);
local_irq_restore(flags);
} else {
save_and_cli(flags);
local_irq_save(flags);
sun4m_interrupts->clear = SUN4M_INT_FLOPPY;
restore_flags(flags);
local_irq_restore(flags);
}
}
......@@ -167,8 +168,8 @@ static unsigned long cpu_pil_to_imask[16] = {
/*15*/ 0x00000000
};
/* We assume the caller is local cli()'d when these are called, or else
* very bizarre behavior will result.
/* We assume the caller has disabled local interrupts when these are called,
* or else very bizarre behavior will result.
*/
static void sun4m_disable_pil_irq(unsigned int pil)
{
......
......@@ -260,7 +260,7 @@ void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
} else {
fpload(&current->thread.float_regs[0], &current->thread.fsr);
}
current->flags |= PF_USEDFPU;
current_thread_info()->flags |= _TIF_USEDFPU;
#endif
}
......@@ -291,7 +291,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
#ifndef CONFIG_SMP
if(!fpt) {
#else
if(!(fpt->flags & PF_USEDFPU)) {
if(!(fpt->thread_info->flags & _TIF_USEDFPU)) {
#endif
fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth);
regs->psr &= ~PSR_EF;
......@@ -334,7 +334,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
/* nope, better SIGFPE the offending process... */
#ifdef CONFIG_SMP
fpt->flags &= ~PF_USEDFPU;
fpt->thread_info->flags &= ~_TIF_USEDFPU;
#endif
if(psr & PSR_PS) {
/* The first fsr store/load we tried trapped,
......
......@@ -311,15 +311,19 @@ __asm__ __volatile__ ( \
store_common(dst_addr, size, src_val, errh); \
})
/* XXX Need to capture/release other cpu's for SMP around this. */
extern void smp_capture(void);
extern void smp_release(void);
#define do_atomic(srcdest_reg, mem, errh) ({ \
unsigned long flags, tmp; \
\
save_and_cli(flags); \
smp_capture(); \
local_irq_save(flags); \
tmp = *srcdest_reg; \
do_integer_load(srcdest_reg, 4, mem, 0, errh); \
store_common(mem, 4, &tmp, errh); \
restore_flags(flags); \
local_irq_restore(flags); \
smp_release(); \
})
static inline void advance(struct pt_regs *regs)
......
......@@ -441,13 +441,13 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
_SUN4C_PAGE_VALID |
_SUN4C_PAGE_DIRTY);
save_and_cli(flags);
local_irq_save(flags);
if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep));
restore_flags(flags);
local_irq_restore(flags);
return;
}
restore_flags(flags);
local_irq_restore(flags);
}
} else {
if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
......@@ -457,13 +457,13 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
_SUN4C_PAGE_VALID);
save_and_cli(flags);
local_irq_save(flags);
if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep));
restore_flags(flags);
local_irq_restore(flags);
return;
}
restore_flags(flags);
local_irq_restore(flags);
}
}
}
......
......@@ -836,7 +836,7 @@ static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx)
struct sun4c_mmu_entry *head = &crp->ringhd;
unsigned long flags;
save_and_cli(flags);
local_irq_save(flags);
if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
......@@ -854,7 +854,7 @@ static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx)
} while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
local_irq_restore(flags);
}
static int sun4c_user_taken_entries; /* This is how much we have. */
......@@ -978,14 +978,14 @@ static void get_locked_segment(unsigned long addr)
struct sun4c_mmu_entry *stolen;
unsigned long flags;
save_and_cli(flags);
local_irq_save(flags);
addr &= SUN4C_REAL_PGDIR_MASK;
stolen = sun4c_user_strategy();
max_user_taken_entries--;
stolen->vaddr = addr;
flush_user_windows();
sun4c_kernel_map(stolen);
restore_flags(flags);
local_irq_restore(flags);
}
static void free_locked_segment(unsigned long addr)
......@@ -994,7 +994,7 @@ static void free_locked_segment(unsigned long addr)
unsigned long flags;
unsigned char pseg;
save_and_cli(flags);
local_irq_save(flags);
addr &= SUN4C_REAL_PGDIR_MASK;
pseg = sun4c_get_segmap(addr);
entry = &mmu_entry_pool[pseg];
......@@ -1004,7 +1004,7 @@ static void free_locked_segment(unsigned long addr)
sun4c_kernel_unmap(entry);
add_ring(&sun4c_ufree_ring, entry);
max_user_taken_entries++;
restore_flags(flags);
local_irq_restore(flags);
}
static inline void garbage_collect(int entry)
......@@ -1123,7 +1123,7 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size)
size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
scan = 0;
save_and_cli(flags);
local_irq_save(flags);
for (;;) {
scan = find_next_zero_bit(sun4c_iobuffer_map,
iobuffer_map_size, scan);
......@@ -1157,12 +1157,12 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size)
sun4c_put_pte(apage, pte);
vpage += PAGE_SIZE;
}
restore_flags(flags);
local_irq_restore(flags);
return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start +
(((unsigned long) vaddr) & ~PAGE_MASK));
abend:
restore_flags(flags);
local_irq_restore(flags);
printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size);
panic("Out of iobuffer table");
return 0;
......@@ -1178,7 +1178,7 @@ static void sun4c_unlockarea(char *vaddr, unsigned long size)
npages = (((unsigned long)vaddr & ~PAGE_MASK) +
size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
save_and_cli(flags);
local_irq_save(flags);
while (npages != 0) {
--npages;
......@@ -1200,7 +1200,7 @@ static void sun4c_unlockarea(char *vaddr, unsigned long size)
sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE;
free_locked_segment(sun4c_iobuffer_high);
}
restore_flags(flags);
local_irq_restore(flags);
}
/* Note the scsi code at init time passes to here buffers
......@@ -1349,7 +1349,7 @@ static void sun4c_flush_cache_mm(struct mm_struct *mm)
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
unsigned long flags;
save_and_cli(flags);
local_irq_save(flags);
if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
......@@ -1366,7 +1366,7 @@ static void sun4c_flush_cache_mm(struct mm_struct *mm)
} while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
local_irq_restore(flags);
}
}
}
......@@ -1383,7 +1383,7 @@ static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long st
flush_user_windows();
save_and_cli(flags);
local_irq_save(flags);
/* All user segmap chains are ordered on entry->vaddr. */
for (entry = head->next;
(entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
......@@ -1427,7 +1427,7 @@ static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long st
} while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx);
}
restore_flags(flags);
local_irq_restore(flags);
}
}
......@@ -1444,11 +1444,11 @@ static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long pag
unsigned long flags;
flush_user_windows();
save_and_cli(flags);
local_irq_save(flags);
sun4c_set_context(new_ctx);
sun4c_flush_page(page);
sun4c_set_context(octx);
restore_flags(flags);
local_irq_restore(flags);
}
}
......@@ -1456,9 +1456,9 @@ static void sun4c_flush_page_to_ram(unsigned long page)
{
unsigned long flags;
save_and_cli(flags);
local_irq_save(flags);
sun4c_flush_page(page);
restore_flags(flags);
local_irq_restore(flags);
}
/* Sun4c cache is unified, both instructions and data live there, so
......@@ -1479,7 +1479,7 @@ static void sun4c_flush_tlb_all(void)
unsigned long flags;
int savectx, ctx;
save_and_cli(flags);
local_irq_save(flags);
this_entry = sun4c_kernel_ring.ringhd.next;
savectx = sun4c_get_context();
flush_user_windows();
......@@ -1494,7 +1494,7 @@ static void sun4c_flush_tlb_all(void)
this_entry = next_entry;
}
sun4c_set_context(savectx);
restore_flags(flags);
local_irq_restore(flags);
}
static void sun4c_flush_tlb_mm(struct mm_struct *mm)
......@@ -1505,7 +1505,7 @@ static void sun4c_flush_tlb_mm(struct mm_struct *mm)
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
unsigned long flags;
save_and_cli(flags);
local_irq_save(flags);
if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
......@@ -1522,7 +1522,7 @@ static void sun4c_flush_tlb_mm(struct mm_struct *mm)
} while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
local_irq_restore(flags);
}
}
......@@ -1536,7 +1536,7 @@ static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long star
struct sun4c_mmu_entry *entry;
unsigned long flags;
save_and_cli(flags);
local_irq_save(flags);
/* See commentary in sun4c_flush_cache_range(). */
for (entry = head->next;
(entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
......@@ -1558,7 +1558,7 @@ static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long star
} while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx);
}
restore_flags(flags);
local_irq_restore(flags);
}
}
......@@ -1571,13 +1571,13 @@ static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
int savectx = sun4c_get_context();
unsigned long flags;
save_and_cli(flags);
local_irq_save(flags);
sun4c_set_context(new_ctx);
page &= PAGE_MASK;
sun4c_flush_page(page);
sun4c_put_pte(page, 0);
sun4c_set_context(savectx);
restore_flags(flags);
local_irq_restore(flags);
}
}
......@@ -1974,7 +1974,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p
unsigned long flags;
int pseg;
save_and_cli(flags);
local_irq_save(flags);
address &= PAGE_MASK;
if ((pseg = sun4c_get_segmap(address)) == invalid_segment) {
struct sun4c_mmu_entry *entry = sun4c_user_strategy();
......@@ -2010,7 +2010,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p
#ifndef SUN4C_PRELOAD_PSEG
sun4c_put_pte(address, pte_val(pte));
#endif
restore_flags(flags);
local_irq_restore(flags);
return;
} else {
struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg];
......@@ -2020,7 +2020,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p
}
sun4c_put_pte(address, pte_val(pte));
restore_flags(flags);
local_irq_restore(flags);
}
extern void sparc_context_init(int);
......
......@@ -15,15 +15,6 @@
/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
typedef struct {
unsigned int __softirq_pending;
unsigned int __unused_1;
#ifndef CONFIG_SMP
unsigned int WAS__local_irq_count;
#else
unsigned int __unused_on_SMP; /* DaveM says use brlock for SMP irq. KAO */
#endif
unsigned int WAS__local_bh_count;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
......@@ -86,11 +77,11 @@ typedef struct {
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#ifndef CONFIG_SMP
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#ifdef CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
#include <linux/smp_lock.h>
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define in_atomic() (preempt_count() != 0)
......@@ -104,63 +95,10 @@ do { \
preempt_enable_no_resched(); \
} while (0)
#else
/* Note that local_irq_count() is replaced by sparc64 specific version for SMP */
/* XXX This is likely to be broken by the above preempt-based IRQs */
#define irq_enter() br_read_lock(BR_GLOBALIRQ_LOCK)
#undef local_irq_count
#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
#define irq_exit() br_read_unlock(BR_GLOBALIRQ_LOCK)
#endif
#ifdef CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
#else
# define in_atomic() (preempt_count() != 0)
#endif
#ifndef CONFIG_SMP
#define synchronize_irq(irq) barrier()
#else /* (CONFIG_SMP) */
static __inline__ int irqs_running(void)
{
int i;
for (i = 0; i < smp_num_cpus; i++)
if (local_irq_count(cpu_logical_map(i)))
return 1;
return 0;
}
extern unsigned char global_irq_holder;
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore... */
if(global_irq_holder == (unsigned char) cpu) {
global_irq_holder = NO_PROC_ID;
br_write_unlock(BR_GLOBALIRQ_LOCK);
}
}
#if 0
static inline int hardirq_trylock(int cpu)
{
spinlock_t *lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
return (!local_irq_count(cpu) && !spin_is_locked(lock));
}
#endif
# define synchronize_irq(irq) barrier()
#else /* SMP */
extern void synchronize_irq(unsigned int irq);
#endif /* CONFIG_SMP */
// extern void show_stack(unsigned long * esp);
#endif /* SMP */
#endif /* __SPARC_HARDIRQ_H */
......@@ -53,6 +53,8 @@ struct thread_info {
/*
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#define INIT_THREAD_INFO(tsk) \
{ \
......@@ -61,6 +63,7 @@ struct thread_info {
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment