Commit 7a39f522 authored by Al Viro's avatar Al Viro Committed by Linus Torvalds

[PATCH] sparc32 rwlock fix

read_trylock() is broken on sparc32 (doesn't build and didn't work
right, actually).  Proposed fix:

 - make "writer holds lock" distinguishable from "reader tries to grab
   lock"

 - have __raw_read_trylock() try to acquire the mutex (in LSB of lock),
   terminating spin if we see that there's writer holding it.  Then do
   the rest as we do in read_lock().

Thanks to Ingo for discussion...
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6d24c8dc
......@@ -87,6 +87,7 @@ extern void ___set_bit(void);
extern void ___clear_bit(void);
extern void ___change_bit(void);
extern void ___rw_read_enter(void);
extern void ___rw_read_try(void);
extern void ___rw_read_exit(void);
extern void ___rw_write_enter(void);
......@@ -104,8 +105,9 @@ extern unsigned _Urem(unsigned, unsigned);
EXPORT_SYMBOL(sparc_cpu_model);
EXPORT_SYMBOL(kernel_thread);
#ifdef CONFIG_SMP
// XXX find what uses (or used) these.
// XXX find what uses (or used) these. AV: see asm/spinlock.h
EXPORT_SYMBOL(___rw_read_enter);
EXPORT_SYMBOL(___rw_read_try);
EXPORT_SYMBOL(___rw_read_exit);
EXPORT_SYMBOL(___rw_write_enter);
#endif
......
......@@ -25,6 +25,15 @@ ___rw_read_enter_spin_on_wlock:
ldstub [%g1 + 3], %g2
b ___rw_read_enter_spin_on_wlock
ldub [%g1 + 3], %g2
___rw_read_try_spin_on_wlock:
andcc %g2, 0xff, %g0
be,a ___rw_read_try
ldstub [%g1 + 3], %g2
xnorcc %g2, 0x0, %o0 /* if g2 is ~0, set o0 to 0 and bugger off */
bne,a ___rw_read_enter_spin_on_wlock
ld [%g1], %g2
retl
mov %g4, %o7
___rw_read_exit_spin_on_wlock:
orcc %g2, 0x0, %g0
be,a ___rw_read_exit
......@@ -60,6 +69,17 @@ ___rw_read_exit:
retl
mov %g4, %o7
.globl ___rw_read_try
___rw_read_try:
orcc %g2, 0x0, %g0
bne ___rw_read_try_spin_on_wlock
ld [%g1], %g2
add %g2, 1, %g2
st %g2, [%g1]
set 1, %o1
retl
mov %g4, %o7
.globl ___rw_write_enter
___rw_write_enter:
orcc %g2, 0x0, %g0
......
......@@ -129,6 +129,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
: /* no outputs */
: "r" (lp)
: "g2", "g4", "memory", "cc");
*(volatile __u32 *)&lp->lock = ~0U;
}
static inline int __raw_write_trylock(raw_rwlock_t *rw)
......@@ -144,15 +145,40 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
val = rw->lock & ~0xff;
if (val)
((volatile u8*)&rw->lock)[3] = 0;
else
*(volatile u32*)&rw->lock = ~0U;
}
return (val == 0);
}
static inline int __read_trylock(raw_rwlock_t *rw)
{
register raw_rwlock_t *lp asm("g1");
register int res asm("o0");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___rw_read_try\n\t"
" ldstub [%%g1 + 3], %%g2\n"
: "=r" (res)
: "r" (lp)
: "g2", "g4", "memory", "cc");
return res;
}
#define __raw_read_trylock(lock) \
({ unsigned long flags; \
int res; \
local_irq_save(flags); \
res = __read_trylock(lock); \
local_irq_restore(flags); \
res; \
})
#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment