Commit f9db97d7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
 "Some cleanups in arch_rw locking functions, improved interrupt
  handling in arch spinlocks, coversions to request_irq() and syscall
  table generation cleanups"

* 'parisc-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: remove nargs from __SYSCALL
  parisc: Refactor alternative code to accept multiple conditions
  parisc: Rework arch_rw locking functions
  parisc: Improve interrupt handling in arch_spin_lock_flags()
  parisc: Replace setup_irq() by request_irq()
parents 12782fbe 106c9092
...@@ -10,25 +10,34 @@ ...@@ -10,25 +10,34 @@
static inline int arch_spin_is_locked(arch_spinlock_t *x) static inline int arch_spin_is_locked(arch_spinlock_t *x)
{ {
volatile unsigned int *a = __ldcw_align(x); volatile unsigned int *a = __ldcw_align(x);
smp_mb();
return *a == 0; return *a == 0;
} }
#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) static inline void arch_spin_lock(arch_spinlock_t *x)
{
volatile unsigned int *a;
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
cpu_relax();
}
static inline void arch_spin_lock_flags(arch_spinlock_t *x, static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags) unsigned long flags)
{ {
volatile unsigned int *a; volatile unsigned int *a;
unsigned long flags_dis;
a = __ldcw_align(x); a = __ldcw_align(x);
while (__ldcw(a) == 0) while (__ldcw(a) == 0) {
local_save_flags(flags_dis);
local_irq_restore(flags);
while (*a == 0) while (*a == 0)
if (flags & PSW_SM_I) { cpu_relax();
local_irq_enable(); local_irq_restore(flags_dis);
cpu_relax(); }
local_irq_disable();
} else
cpu_relax();
} }
#define arch_spin_lock_flags arch_spin_lock_flags #define arch_spin_lock_flags arch_spin_lock_flags
...@@ -58,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) ...@@ -58,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
/* /*
* Read-write spinlocks, allowing multiple readers but only one writer. * Read-write spinlocks, allowing multiple readers but only one writer.
* Linux rwlocks are unfair to writers; they can be starved for an indefinite * Unfair locking as Writers could be starved indefinitely by Reader(s)
* time by readers. With care, they can also be taken in interrupt context.
* *
* In the PA-RISC implementation, we have a spinlock and a counter. * The spinlock itself is contained in @counter and access to it is
* Readers use the lock to serialise their access to the counter (which * serialized with @lock_mutex.
* records how many readers currently hold the lock).
* Writers hold the spinlock, preventing any readers or other writers from
* grabbing the rwlock.
*/ */
/* Note that we have to ensure interrupts are disabled in case we're /* 1 - lock taken successfully */
* interrupted by some other code that wants to grab the same read lock */ static inline int arch_read_trylock(arch_rwlock_t *rw)
static __inline__ void arch_read_lock(arch_rwlock_t *rw)
{ {
int ret = 0;
unsigned long flags; unsigned long flags;
local_irq_save(flags);
arch_spin_lock_flags(&rw->lock, flags);
rw->counter++;
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
arch_spin_lock_flags(&rw->lock, flags); arch_spin_lock(&(rw->lock_mutex));
rw->counter--;
arch_spin_unlock(&rw->lock); /*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*/
if (rw->counter > 0) {
rw->counter--;
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags); local_irq_restore(flags);
return ret;
} }
/* Note that we have to ensure interrupts are disabled in case we're /* 1 - lock taken successfully */
* interrupted by some other code that wants to grab the same read lock */ static inline int arch_write_trylock(arch_rwlock_t *rw)
static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
{ {
int ret = 0;
unsigned long flags; unsigned long flags;
retry:
local_irq_save(flags); local_irq_save(flags);
if (arch_spin_trylock(&rw->lock)) { arch_spin_lock(&(rw->lock_mutex));
rw->counter++;
arch_spin_unlock(&rw->lock); /*
local_irq_restore(flags); * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
return 1; * deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*/
if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
rw->counter = 0;
ret = 1;
} }
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags); local_irq_restore(flags);
/* If write-locked, we fail to acquire the lock */
if (rw->counter < 0)
return 0;
/* Wait until we have a realistic chance at the lock */ return ret;
while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) }
static inline void arch_read_lock(arch_rwlock_t *rw)
{
while (!arch_read_trylock(rw))
cpu_relax(); cpu_relax();
}
goto retry; static inline void arch_write_lock(arch_rwlock_t *rw)
{
while (!arch_write_trylock(rw))
cpu_relax();
} }
/* Note that we have to ensure interrupts are disabled in case we're static inline void arch_read_unlock(arch_rwlock_t *rw)
* interrupted by some other code that wants to read_trylock() this lock */
static __inline__ void arch_write_lock(arch_rwlock_t *rw)
{ {
unsigned long flags; unsigned long flags;
retry:
local_irq_save(flags);
arch_spin_lock_flags(&rw->lock, flags);
if (rw->counter != 0) { local_irq_save(flags);
arch_spin_unlock(&rw->lock); arch_spin_lock(&(rw->lock_mutex));
local_irq_restore(flags); rw->counter++;
arch_spin_unlock(&(rw->lock_mutex));
while (rw->counter != 0)
cpu_relax();
goto retry;
}
rw->counter = -1; /* mark as write-locked */
mb();
local_irq_restore(flags); local_irq_restore(flags);
} }
static __inline__ void arch_write_unlock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
arch_spin_unlock(&rw->lock);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
{ {
unsigned long flags; unsigned long flags;
int result = 0;
local_irq_save(flags); local_irq_save(flags);
if (arch_spin_trylock(&rw->lock)) { arch_spin_lock(&(rw->lock_mutex));
if (rw->counter == 0) { rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
rw->counter = -1; arch_spin_unlock(&(rw->lock_mutex));
result = 1;
} else {
/* Read-locked. Oh well. */
arch_spin_unlock(&rw->lock);
}
}
local_irq_restore(flags); local_irq_restore(flags);
return result;
} }
#endif /* __ASM_SPINLOCK_H */ #endif /* __ASM_SPINLOCK_H */
...@@ -12,11 +12,19 @@ typedef struct { ...@@ -12,11 +12,19 @@ typedef struct {
#endif #endif
} arch_spinlock_t; } arch_spinlock_t;
/* counter:
* Unlocked : 0x0100_0000
* Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
* Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
*/
typedef struct { typedef struct {
arch_spinlock_t lock; arch_spinlock_t lock_mutex;
volatile int counter; volatile unsigned int counter;
} arch_rwlock_t; } arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
.counter = __ARCH_RW_LOCK_UNLOCKED__ }
#endif #endif
...@@ -25,6 +25,22 @@ void __init_or_module apply_alternatives(struct alt_instr *start, ...@@ -25,6 +25,22 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *entry; struct alt_instr *entry;
int index = 0, applied = 0; int index = 0, applied = 0;
int num_cpus = num_online_cpus(); int num_cpus = num_online_cpus();
u32 cond_check;
cond_check = ALT_COND_ALWAYS |
((num_cpus == 1) ? ALT_COND_NO_SMP : 0) |
((cache_info.dc_size == 0) ? ALT_COND_NO_DCACHE : 0) |
((cache_info.ic_size == 0) ? ALT_COND_NO_ICACHE : 0) |
(running_on_qemu ? ALT_COND_RUN_ON_QEMU : 0) |
((split_tlb == 0) ? ALT_COND_NO_SPLIT_TLB : 0) |
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
* set (bit #61, big endian), we have to flush and sync every
* time IO-PDIR is changed in Ike/Astro.
*/
(((boot_cpu_data.cpu_type > pcxw_) &&
((boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) == 0))
? ALT_COND_NO_IOC_FDC : 0);
for (entry = start; entry < end; entry++, index++) { for (entry = start; entry < end; entry++, index++) {
...@@ -38,29 +54,14 @@ void __init_or_module apply_alternatives(struct alt_instr *start, ...@@ -38,29 +54,14 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
WARN_ON(!cond); WARN_ON(!cond);
if (cond != ALT_COND_ALWAYS && no_alternatives) if ((cond & ALT_COND_ALWAYS) == 0 && no_alternatives)
continue; continue;
pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
index, cond, len, from, replacement); index, cond, len, from, replacement);
if ((cond & ALT_COND_NO_SMP) && (num_cpus != 1)) /* Bounce out if none of the conditions are true. */
continue; if ((cond & cond_check) == 0)
if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0))
continue;
if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0))
continue;
if ((cond & ALT_COND_RUN_ON_QEMU) && !running_on_qemu)
continue;
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
* set (bit #61, big endian), we have to flush and sync every
* time IO-PDIR is changed in Ike/Astro.
*/
if ((cond & ALT_COND_NO_IOC_FDC) &&
((boot_cpu_data.cpu_type <= pcxw_) ||
(boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)))
continue; continue;
/* Want to replace pdtlb by a pdtlb,l instruction? */ /* Want to replace pdtlb by a pdtlb,l instruction? */
......
...@@ -560,33 +560,23 @@ void do_cpu_irq_mask(struct pt_regs *regs) ...@@ -560,33 +560,23 @@ void do_cpu_irq_mask(struct pt_regs *regs)
goto out; goto out;
} }
static struct irqaction timer_action = {
.handler = timer_interrupt,
.name = "timer",
.flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
};
#ifdef CONFIG_SMP
static struct irqaction ipi_action = {
.handler = ipi_interrupt,
.name = "IPI",
.flags = IRQF_PERCPU,
};
#endif
static void claim_cpu_irqs(void) static void claim_cpu_irqs(void)
{ {
unsigned long flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL;
int i; int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
irq_set_chip_and_handler(i, &cpu_interrupt_type, irq_set_chip_and_handler(i, &cpu_interrupt_type,
handle_percpu_irq); handle_percpu_irq);
} }
irq_set_handler(TIMER_IRQ, handle_percpu_irq); irq_set_handler(TIMER_IRQ, handle_percpu_irq);
setup_irq(TIMER_IRQ, &timer_action); if (request_irq(TIMER_IRQ, timer_interrupt, flags, "timer", NULL))
pr_err("Failed to register timer interrupt\n");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
irq_set_handler(IPI_IRQ, handle_percpu_irq); irq_set_handler(IPI_IRQ, handle_percpu_irq);
setup_irq(IPI_IRQ, &ipi_action); if (request_irq(IPI_IRQ, ipi_interrupt, IRQF_PERCPU, "IPI", NULL))
pr_err("Failed to register IPI interrupt\n");
#endif #endif
} }
......
...@@ -935,7 +935,7 @@ ENTRY(lws_table) ...@@ -935,7 +935,7 @@ ENTRY(lws_table)
END(lws_table) END(lws_table)
/* End of lws table */ /* End of lws table */
#define __SYSCALL(nr, entry, nargs) ASM_ULONG_INSN entry #define __SYSCALL(nr, entry) ASM_ULONG_INSN entry
.align 8 .align 8
ENTRY(sys_call_table) ENTRY(sys_call_table)
.export sys_call_table,data .export sys_call_table,data
......
...@@ -13,10 +13,10 @@ emit() { ...@@ -13,10 +13,10 @@ emit() {
t_entry="$3" t_entry="$3"
while [ $t_nxt -lt $t_nr ]; do while [ $t_nxt -lt $t_nr ]; do
printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
t_nxt=$((t_nxt+1)) t_nxt=$((t_nxt+1))
done done
printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
} }
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
......
...@@ -243,11 +243,6 @@ static irqreturn_t dummy_irq2_handler(int _, void *dev) ...@@ -243,11 +243,6 @@ static irqreturn_t dummy_irq2_handler(int _, void *dev)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct irqaction irq2_action = {
.handler = dummy_irq2_handler,
.name = "cascade",
};
static void init_eisa_pic(void) static void init_eisa_pic(void)
{ {
unsigned long flags; unsigned long flags;
...@@ -335,7 +330,8 @@ static int __init eisa_probe(struct parisc_device *dev) ...@@ -335,7 +330,8 @@ static int __init eisa_probe(struct parisc_device *dev)
} }
/* Reserve IRQ2 */ /* Reserve IRQ2 */
setup_irq(2, &irq2_action); if (request_irq(2, dummy_irq2_handler, 0, "cascade", NULL))
pr_err("Failed to request irq 2 (cascade)\n");
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
irq_set_chip_and_handler(i, &eisa_interrupt_type, irq_set_chip_and_handler(i, &eisa_interrupt_type,
handle_simple_irq); handle_simple_irq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment