Commit 9c897096 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Thomas Gleixner:
 "Three patches to fix memory ordering issues on ALPHA and a comment to
  clarify the usage scope of a mutex internal function"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/xchg/alpha: Fix xchg() and cmpxchg() memory ordering bugs
  locking/xchg/alpha: Clean up barrier usage by using smp_mb() in place of __ASM__MB
  locking/xchg/alpha: Add unconditional memory barrier to cmpxchg()
  locking/mutex: Add comment to __mutex_owner() to deter usage
parents 297ea1b7 472e8c55
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* Atomic exchange routines. * Atomic exchange routines.
*/ */
#define __ASM__MB
#define ____xchg(type, args...) __xchg ## type ## _local(args) #define ____xchg(type, args...) __xchg ## type ## _local(args)
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
#include <asm/xchg.h> #include <asm/xchg.h>
...@@ -33,10 +32,6 @@ ...@@ -33,10 +32,6 @@
cmpxchg_local((ptr), (o), (n)); \ cmpxchg_local((ptr), (o), (n)); \
}) })
#ifdef CONFIG_SMP
#undef __ASM__MB
#define __ASM__MB "\tmb\n"
#endif
#undef ____xchg #undef ____xchg
#undef ____cmpxchg #undef ____cmpxchg
#define ____xchg(type, args...) __xchg ##type(args) #define ____xchg(type, args...) __xchg ##type(args)
...@@ -64,7 +59,6 @@ ...@@ -64,7 +59,6 @@
cmpxchg((ptr), (o), (n)); \ cmpxchg((ptr), (o), (n)); \
}) })
#undef __ASM__MB
#undef ____cmpxchg #undef ____cmpxchg
#endif /* _ALPHA_CMPXCHG_H */ #endif /* _ALPHA_CMPXCHG_H */
...@@ -12,6 +12,10 @@ ...@@ -12,6 +12,10 @@
* Atomic exchange. * Atomic exchange.
* Since it can be used to implement critical sections * Since it can be used to implement critical sections
* it must clobber "memory" (also for interrupts in UP). * it must clobber "memory" (also for interrupts in UP).
*
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*
*/ */
static inline unsigned long static inline unsigned long
...@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val) ...@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
{ {
unsigned long ret, tmp, addr64; unsigned long ret, tmp, addr64;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
" andnot %4,7,%3\n" " andnot %4,7,%3\n"
" insbl %1,%4,%1\n" " insbl %1,%4,%1\n"
...@@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val) ...@@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
" or %1,%2,%2\n" " or %1,%2,%2\n"
" stq_c %2,0(%3)\n" " stq_c %2,0(%3)\n"
" beq %2,2f\n" " beq %2,2f\n"
__ASM__MB
".subsection 2\n" ".subsection 2\n"
"2: br 1b\n" "2: br 1b\n"
".previous" ".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory"); : "r" ((long)m), "1" (val) : "memory");
smp_mb();
return ret; return ret;
} }
...@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val) ...@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
{ {
unsigned long ret, tmp, addr64; unsigned long ret, tmp, addr64;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
" andnot %4,7,%3\n" " andnot %4,7,%3\n"
" inswl %1,%4,%1\n" " inswl %1,%4,%1\n"
...@@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val) ...@@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
" or %1,%2,%2\n" " or %1,%2,%2\n"
" stq_c %2,0(%3)\n" " stq_c %2,0(%3)\n"
" beq %2,2f\n" " beq %2,2f\n"
__ASM__MB
".subsection 2\n" ".subsection 2\n"
"2: br 1b\n" "2: br 1b\n"
".previous" ".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory"); : "r" ((long)m), "1" (val) : "memory");
smp_mb();
return ret; return ret;
} }
...@@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val) ...@@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val)
{ {
unsigned long dummy; unsigned long dummy;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldl_l %0,%4\n" "1: ldl_l %0,%4\n"
" bis $31,%3,%1\n" " bis $31,%3,%1\n"
" stl_c %1,%2\n" " stl_c %1,%2\n"
" beq %1,2f\n" " beq %1,2f\n"
__ASM__MB
".subsection 2\n" ".subsection 2\n"
"2: br 1b\n" "2: br 1b\n"
".previous" ".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m) : "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory"); : "rI" (val), "m" (*m) : "memory");
smp_mb();
return val; return val;
} }
...@@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val) ...@@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val)
{ {
unsigned long dummy; unsigned long dummy;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldq_l %0,%4\n" "1: ldq_l %0,%4\n"
" bis $31,%3,%1\n" " bis $31,%3,%1\n"
" stq_c %1,%2\n" " stq_c %1,%2\n"
" beq %1,2f\n" " beq %1,2f\n"
__ASM__MB
".subsection 2\n" ".subsection 2\n"
"2: br 1b\n" "2: br 1b\n"
".previous" ".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m) : "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory"); : "rI" (val), "m" (*m) : "memory");
smp_mb();
return val; return val;
} }
...@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size) ...@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
* store NEW in MEM. Return the initial value in MEM. Success is * store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD. * indicated by comparing RETURN with OLD.
* *
* The memory barrier should be placed in SMP only when we actually * The leading and the trailing memory barriers guarantee that these
* make the change. If we don't change anything (so if the returned * operations are fully ordered.
* prev is equal to old) then we aren't acquiring anything new and *
* we don't need any memory barrier as far I can tell. * The trailing memory barrier is placed in SMP unconditionally, in
* order to guarantee that dependency ordering is preserved when a
* dependency is headed by an unsuccessful operation.
*/ */
static inline unsigned long static inline unsigned long
...@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) ...@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
{ {
unsigned long prev, tmp, cmp, addr64; unsigned long prev, tmp, cmp, addr64;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
" andnot %5,7,%4\n" " andnot %5,7,%4\n"
" insbl %1,%5,%1\n" " insbl %1,%5,%1\n"
...@@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) ...@@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
" or %1,%2,%2\n" " or %1,%2,%2\n"
" stq_c %2,0(%4)\n" " stq_c %2,0(%4)\n"
" beq %2,3f\n" " beq %2,3f\n"
__ASM__MB
"2:\n" "2:\n"
".subsection 2\n" ".subsection 2\n"
"3: br 1b\n" "3: br 1b\n"
".previous" ".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory"); : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
smp_mb();
return prev; return prev;
} }
...@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) ...@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
{ {
unsigned long prev, tmp, cmp, addr64; unsigned long prev, tmp, cmp, addr64;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
" andnot %5,7,%4\n" " andnot %5,7,%4\n"
" inswl %1,%5,%1\n" " inswl %1,%5,%1\n"
...@@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) ...@@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
" or %1,%2,%2\n" " or %1,%2,%2\n"
" stq_c %2,0(%4)\n" " stq_c %2,0(%4)\n"
" beq %2,3f\n" " beq %2,3f\n"
__ASM__MB
"2:\n" "2:\n"
".subsection 2\n" ".subsection 2\n"
"3: br 1b\n" "3: br 1b\n"
".previous" ".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory"); : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
smp_mb();
return prev; return prev;
} }
...@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) ...@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
{ {
unsigned long prev, cmp; unsigned long prev, cmp;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldl_l %0,%5\n" "1: ldl_l %0,%5\n"
" cmpeq %0,%3,%1\n" " cmpeq %0,%3,%1\n"
...@@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) ...@@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
" mov %4,%1\n" " mov %4,%1\n"
" stl_c %1,%2\n" " stl_c %1,%2\n"
" beq %1,3f\n" " beq %1,3f\n"
__ASM__MB
"2:\n" "2:\n"
".subsection 2\n" ".subsection 2\n"
"3: br 1b\n" "3: br 1b\n"
".previous" ".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m) : "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory"); : "r"((long) old), "r"(new), "m"(*m) : "memory");
smp_mb();
return prev; return prev;
} }
...@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) ...@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
{ {
unsigned long prev, cmp; unsigned long prev, cmp;
smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"1: ldq_l %0,%5\n" "1: ldq_l %0,%5\n"
" cmpeq %0,%3,%1\n" " cmpeq %0,%3,%1\n"
...@@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) ...@@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
" mov %4,%1\n" " mov %4,%1\n"
" stq_c %1,%2\n" " stq_c %1,%2\n"
" beq %1,3f\n" " beq %1,3f\n"
__ASM__MB
"2:\n" "2:\n"
".subsection 2\n" ".subsection 2\n"
"3: br 1b\n" "3: br 1b\n"
".previous" ".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m) : "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory"); : "r"((long) old), "r"(new), "m"(*m) : "memory");
smp_mb();
return prev; return prev;
} }
......
...@@ -66,6 +66,11 @@ struct mutex { ...@@ -66,6 +66,11 @@ struct mutex {
#endif #endif
}; };
/*
* Internal helper function; C doesn't allow us to hide it :/
*
* DO NOT USE (outside of mutex code).
*/
static inline struct task_struct *__mutex_owner(struct mutex *lock) static inline struct task_struct *__mutex_owner(struct mutex *lock)
{ {
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07); return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment