Commit 943aee0c authored by Graf Yang's avatar Graf Yang Committed by Mike Frysinger

Blackfin: SMP: make all barriers handle cache issues

When suspending/resuming, the common task freezing code will run in
parallel and freeze processes on each core.  This is because the code
uses the non-smp version of memory barriers (as well it should).

The Blackfin smp barrier logic at the moment contains the cache sync
logic, but the non-smp barriers do not.  This is incorrect as Rafel
summarized:
> ...
> The existing memory barriers are SMP barriers too, but they are more
> than _just_ SMP barriers.  At least that's how it is _supposed_ to be
> (eg. rmb() is supposed to be stronger than smp_rmb()).
> ...
> However, looking at the blackfin's definitions of SMP barriers I see
> that it uses extra stuff that should _also_ be used in the definitions
> of the mandatory barriers.
> ...

URL: http://lkml.org/lkml/2011/4/13/11
LKML-Reference: <BANLkTi=F-C-vwX4PGGfbkdTBw3OWL-twfg@mail.gmail.com>
Signed-off-by: default avatarGraf Yang <graf.yang@analog.com>
Signed-off-by: default avatarMike Frysinger <vapier@gentoo.org>
parent 85f2e689
......@@ -19,11 +19,11 @@
* Force strict CPU ordering.
*/
#define nop() __asm__ __volatile__ ("nop;\n\t" : : )
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() __asm__ __volatile__ ("" : : : "memory")
#define wmb() __asm__ __volatile__ ("" : : : "memory")
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#define read_barrier_depends() do { } while(0)
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_read_barrier_depends() read_barrier_depends()
#ifdef CONFIG_SMP
asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
......@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
unsigned long new, unsigned long old);
#ifdef __ARCH_SYNC_CORE_DCACHE
# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0)
# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0)
#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
/* Force Core data cache coherence */
# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
# define rmb() do { barrier(); smp_check_barrier(); } while (0)
# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
#else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
#define smp_read_barrier_depends() barrier()
# define mb() barrier()
# define rmb() barrier()
# define wmb() barrier()
# define read_barrier_depends() do { } while (0)
#endif
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
......@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#else /* !CONFIG_SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define read_barrier_depends() do { } while (0)
struct __xchg_dummy {
unsigned long a[100];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment