Commit e4a65e9d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

arch,parisc: Convert smp_mb__*()

parisc fully relies on asm-generic/barrier.h, therefore its smp_mb()
is barrier and the default implementation suffices.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-mxs4aubiyesi79v8xx53093q@git.kernel.org
Cc: Helge Deller <deller@gmx.de>
Cc: James E.J. Bottomley <jejb@parisc-linux.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Cc: linux-parisc@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0f5c6f9e
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/barrier.h>
/* /*
* Atomic operations that C can't guarantee us. Useful for * Atomic operations that C can't guarantee us. Useful for
...@@ -143,11 +144,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -143,11 +144,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/barrier.h>
#include <linux/atomic.h> #include <linux/atomic.h>
/* /*
...@@ -19,9 +20,6 @@ ...@@ -19,9 +20,6 @@
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __*_bit() (set/clear/change): * on use of volatile and __*_bit() (set/clear/change):
* *_bit() want use of volatile. * *_bit() want use of volatile.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment