Commit ffb70c92 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Correct rwlock membars.

read_unlock should order all previous memory operations
before the atomic counter update to drop the lock.
The debugging version of write_unlock had a similar error.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 221a5a6b
......@@ -172,6 +172,7 @@ void _do_read_unlock (rwlock_t *rw, char *str)
runlock_again:
/* Spin trying to decrement the counter using casx. */
__asm__ __volatile__(
" membar #StoreLoad | #LoadLoad\n"
" ldx [%0], %%g5\n"
" sub %%g5, 1, %%g7\n"
" casx [%0], %%g5, %%g7\n"
......@@ -290,6 +291,7 @@ void _do_write_unlock(rwlock_t *rw)
current->thread.smp_lock_count--;
wlock_again:
__asm__ __volatile__(
" membar #StoreLoad | #LoadLoad\n"
" mov 1, %%g3\n"
" sllx %%g3, 63, %%g3\n"
" ldx [%0], %%g5\n"
......
......@@ -171,12 +171,13 @@ static void inline __read_unlock(rwlock_t *lock)
unsigned long tmp1, tmp2;
__asm__ __volatile__(
" membar #StoreLoad | #LoadLoad\n"
"1: lduw [%2], %0\n"
" sub %0, 1, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%xcc, 1b\n"
" membar #StoreLoad | #StoreStore"
" nop"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
: "memory");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment