Commit e38f5b74 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier/blackfin

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier/blackfin:
  Blackfin: SMP: fix cache flush loop
  Blackfin: time-ts: ack gptimer sooner to avoid missing short ints
  Blackfin: gptimers: fix thinko when disabling timers
  Blackfin: SMP: make all barriers handle cache issues
parents e6d28318 8d50de9e
......@@ -19,11 +19,11 @@
* Force strict CPU ordering.
*/
#define nop() __asm__ __volatile__ ("nop;\n\t" : : )
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() __asm__ __volatile__ ("" : : : "memory")
#define wmb() __asm__ __volatile__ ("" : : : "memory")
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#define read_barrier_depends() do { } while(0)
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_read_barrier_depends() read_barrier_depends()
#ifdef CONFIG_SMP
asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
......@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
unsigned long new, unsigned long old);
#ifdef __ARCH_SYNC_CORE_DCACHE
# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0)
# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0)
#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
/* Force Core data cache coherence */
# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
# define rmb() do { barrier(); smp_check_barrier(); } while (0)
# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
#else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
#define smp_read_barrier_depends() barrier()
# define mb() barrier()
# define rmb() barrier()
# define wmb() barrier()
# define read_barrier_depends() do { } while (0)
#endif
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
......@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#else /* !CONFIG_SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define read_barrier_depends() do { } while (0)
struct __xchg_dummy {
unsigned long a[100];
......
......@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask)
_disable_gptimers(mask);
for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
if (mask & (1 << i))
group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i];
group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
SSYNC();
}
EXPORT_SYMBOL(disable_gptimers);
......
......@@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
smp_mb();
evt->event_handler(evt);
/*
* We want to ACK before we handle so that we can handle smaller timer
* intervals. This way if the timer expires again while we're handling
* things, we're more likely to see that 2nd int rather than swallowing
* it by ACKing the int at the end of this handler.
*/
bfin_gptmr0_ack();
evt->event_handler(evt);
return IRQ_HANDLED;
}
......
......@@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info)
struct blackfin_flush_data *fdata = info;
/* Invalidate the memory holding the bounds of the flushed region. */
invalidate_dcache_range((unsigned long)fdata,
(unsigned long)fdata + sizeof(*fdata));
blackfin_dcache_invalidate_range((unsigned long)fdata,
(unsigned long)fdata + sizeof(*fdata));
/* Make sure all write buffers in the data side of the core
* are flushed before trying to invalidate the icache. This
* needs to be after the data flush and before the icache
* flush so that the SSYNC does the right thing in preventing
* the instruction prefetcher from hitting things in cached
* memory at the wrong time -- it runs much further ahead than
* the pipeline.
*/
SSYNC();
flush_icache_range(fdata->start, fdata->end);
/* ipi_flaush_icache is invoked by generic flush_icache_range,
* so call blackfin arch icache flush directly here.
*/
blackfin_icache_flush_range(fdata->start, fdata->end);
}
static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment