Commit e38f5b74 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier/blackfin

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier/blackfin:
  Blackfin: SMP: fix cache flush loop
  Blackfin: time-ts: ack gptimer sooner to avoid missing short ints
  Blackfin: gptimers: fix thinko when disabling timers
  Blackfin: SMP: make all barriers handle cache issues
parents e6d28318 8d50de9e
...@@ -19,11 +19,11 @@ ...@@ -19,11 +19,11 @@
* Force strict CPU ordering. * Force strict CPU ordering.
*/ */
#define nop() __asm__ __volatile__ ("nop;\n\t" : : ) #define nop() __asm__ __volatile__ ("nop;\n\t" : : )
#define mb() __asm__ __volatile__ ("" : : : "memory") #define smp_mb() mb()
#define rmb() __asm__ __volatile__ ("" : : : "memory") #define smp_rmb() rmb()
#define wmb() __asm__ __volatile__ ("" : : : "memory") #define smp_wmb() wmb()
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #define set_mb(var, value) do { var = value; mb(); } while (0)
#define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() read_barrier_depends()
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
...@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, ...@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
unsigned long new, unsigned long old); unsigned long new, unsigned long old);
#ifdef __ARCH_SYNC_CORE_DCACHE #ifdef __ARCH_SYNC_CORE_DCACHE
# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) /* Force Core data cache coherence */
# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) # define rmb() do { barrier(); smp_check_barrier(); } while (0)
#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) # define wmb() do { barrier(); smp_mark_barrier(); } while (0)
# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
#else #else
# define smp_mb() barrier() # define mb() barrier()
# define smp_rmb() barrier() # define rmb() barrier()
# define smp_wmb() barrier() # define wmb() barrier()
#define smp_read_barrier_depends() barrier() # define read_barrier_depends() do { } while (0)
#endif #endif
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
...@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
#define smp_mb() barrier() #define mb() barrier()
#define smp_rmb() barrier() #define rmb() barrier()
#define smp_wmb() barrier() #define wmb() barrier()
#define smp_read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while (0)
struct __xchg_dummy { struct __xchg_dummy {
unsigned long a[100]; unsigned long a[100];
......
...@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask) ...@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask)
_disable_gptimers(mask); _disable_gptimers(mask);
for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
if (mask & (1 << i)) if (mask & (1 << i))
group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
SSYNC(); SSYNC();
} }
EXPORT_SYMBOL(disable_gptimers); EXPORT_SYMBOL(disable_gptimers);
......
...@@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) ...@@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
{ {
struct clock_event_device *evt = dev_id; struct clock_event_device *evt = dev_id;
smp_mb(); smp_mb();
evt->event_handler(evt); /*
* We want to ACK before we handle so that we can handle smaller timer
* intervals. This way if the timer expires again while we're handling
* things, we're more likely to see that 2nd int rather than swallowing
* it by ACKing the int at the end of this handler.
*/
bfin_gptmr0_ack(); bfin_gptmr0_ack();
evt->event_handler(evt);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info) ...@@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info)
struct blackfin_flush_data *fdata = info; struct blackfin_flush_data *fdata = info;
/* Invalidate the memory holding the bounds of the flushed region. */ /* Invalidate the memory holding the bounds of the flushed region. */
invalidate_dcache_range((unsigned long)fdata, blackfin_dcache_invalidate_range((unsigned long)fdata,
(unsigned long)fdata + sizeof(*fdata)); (unsigned long)fdata + sizeof(*fdata));
/* Make sure all write buffers in the data side of the core
* are flushed before trying to invalidate the icache. This
* needs to be after the data flush and before the icache
* flush so that the SSYNC does the right thing in preventing
* the instruction prefetcher from hitting things in cached
* memory at the wrong time -- it runs much further ahead than
* the pipeline.
*/
SSYNC();
flush_icache_range(fdata->start, fdata->end); /* ipi_flaush_icache is invoked by generic flush_icache_range,
* so call blackfin arch icache flush directly here.
*/
blackfin_icache_flush_range(fdata->start, fdata->end);
} }
static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment