Commit 6b3087c6 authored by Graf Yang's avatar Graf Yang Committed by Bryan Wu

Blackfin arch: SMP supporting patchset: Blackfin header files and machine common code

Blackfin dual core BF561 processor can support SMP like features.
https://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:smp-like

In this patch, we provide SMP extend to Blackfin header files
and machine common code
Signed-off-by: default avatarGraf Yang <graf.yang@analog.com>
Signed-off-by: default avatarBryan Wu <cooloney@kernel.org>
parent c51b4488
...@@ -15,11 +15,80 @@ ...@@ -15,11 +15,80 @@
*/ */
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define atomic_set(v, i) (((v)->counter) = i)
#ifdef CONFIG_SMP
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
static inline void atomic_add(int i, atomic_t *v)
{
__raw_atomic_update_asm(&v->counter, i);
}
static inline void atomic_sub(int i, atomic_t *v)
{
__raw_atomic_update_asm(&v->counter, -i);
}
static inline int atomic_add_return(int i, atomic_t *v)
{
return __raw_atomic_update_asm(&v->counter, i);
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
return __raw_atomic_update_asm(&v->counter, -i);
}
static inline void atomic_inc(volatile atomic_t *v)
{
__raw_atomic_update_asm(&v->counter, 1);
}
static inline void atomic_dec(volatile atomic_t *v)
{
__raw_atomic_update_asm(&v->counter, -1);
}
static inline void atomic_clear_mask(int mask, atomic_t *v)
{
__raw_atomic_clear_asm(&v->counter, mask);
}
static inline void atomic_set_mask(int mask, atomic_t *v)
{
__raw_atomic_set_asm(&v->counter, mask);
}
static inline int atomic_test_mask(int mask, atomic_t *v)
{
return __raw_atomic_test_asm(&v->counter, mask);
}
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#else /* !CONFIG_SMP */
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
static __inline__ void atomic_add(int i, atomic_t * v) static inline void atomic_add(int i, atomic_t *v)
{ {
long flags; long flags;
...@@ -28,7 +97,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) ...@@ -28,7 +97,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
local_irq_restore(flags); local_irq_restore(flags);
} }
static __inline__ void atomic_sub(int i, atomic_t * v) static inline void atomic_sub(int i, atomic_t *v)
{ {
long flags; long flags;
...@@ -38,7 +107,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v) ...@@ -38,7 +107,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
} }
static inline int atomic_add_return(int i, atomic_t * v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
int __temp = 0; int __temp = 0;
long flags; long flags;
...@@ -52,8 +121,7 @@ static inline int atomic_add_return(int i, atomic_t * v) ...@@ -52,8 +121,7 @@ static inline int atomic_add_return(int i, atomic_t * v)
return __temp; return __temp;
} }
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) static inline int atomic_sub_return(int i, atomic_t *v)
static inline int atomic_sub_return(int i, atomic_t * v)
{ {
int __temp = 0; int __temp = 0;
long flags; long flags;
...@@ -66,7 +134,7 @@ static inline int atomic_sub_return(int i, atomic_t * v) ...@@ -66,7 +134,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
return __temp; return __temp;
} }
static __inline__ void atomic_inc(volatile atomic_t * v) static inline void atomic_inc(volatile atomic_t *v)
{ {
long flags; long flags;
...@@ -75,20 +143,7 @@ static __inline__ void atomic_inc(volatile atomic_t * v) ...@@ -75,20 +143,7 @@ static __inline__ void atomic_inc(volatile atomic_t * v)
local_irq_restore(flags); local_irq_restore(flags);
} }
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) static inline void atomic_dec(volatile atomic_t *v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static __inline__ void atomic_dec(volatile atomic_t * v)
{ {
long flags; long flags;
...@@ -97,7 +152,7 @@ static __inline__ void atomic_dec(volatile atomic_t * v) ...@@ -97,7 +152,7 @@ static __inline__ void atomic_dec(volatile atomic_t * v)
local_irq_restore(flags); local_irq_restore(flags);
} }
static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
long flags; long flags;
...@@ -106,7 +161,7 @@ static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v) ...@@ -106,7 +161,7 @@ static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
local_irq_restore(flags); local_irq_restore(flags);
} }
static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v) static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{ {
long flags; long flags;
...@@ -121,9 +176,25 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v) ...@@ -121,9 +176,25 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#endif /* !CONFIG_SMP */
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v))
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* /*
* atomic_inc_and_test - increment and test * atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
......
...@@ -47,6 +47,9 @@ ...@@ -47,6 +47,9 @@
# define DMA_UNCACHED_REGION (0) # define DMA_UNCACHED_REGION (0)
#endif #endif
extern void bfin_setup_caches(unsigned int cpu);
extern void bfin_setup_cpudata(unsigned int cpu);
extern unsigned long get_cclk(void); extern unsigned long get_cclk(void);
extern unsigned long get_sclk(void); extern unsigned long get_sclk(void);
extern unsigned long sclk_to_usecs(unsigned long sclk); extern unsigned long sclk_to_usecs(unsigned long sclk);
...@@ -58,8 +61,6 @@ extern void dump_bfin_trace_buffer(void); ...@@ -58,8 +61,6 @@ extern void dump_bfin_trace_buffer(void);
/* init functions only */ /* init functions only */
extern int init_arch_irq(void); extern int init_arch_irq(void);
extern void bfin_icache_init(void);
extern void bfin_dcache_init(void);
extern void init_exception_vectors(void); extern void init_exception_vectors(void);
extern void program_IAR(void); extern void program_IAR(void);
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/byteorder.h> /* swab32 */ #include <asm/byteorder.h> /* swab32 */
#include <asm/system.h> /* save_flags */
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -20,36 +19,71 @@ ...@@ -20,36 +19,71 @@
#include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/ffz.h>
static __inline__ void set_bit(int nr, volatile unsigned long *addr) #ifdef CONFIG_SMP
#include <linux/linkage.h>
asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr);
asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr);
asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr);
asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr);
asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr);
asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr);
asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr);
static inline void set_bit(int nr, volatile unsigned long *addr)
{ {
int *a = (int *)addr; volatile unsigned long *a = addr + (nr >> 5);
int mask; __raw_bit_set_asm(a, nr & 0x1f);
unsigned long flags; }
a += nr >> 5; static inline void clear_bit(int nr, volatile unsigned long *addr)
mask = 1 << (nr & 0x1f); {
local_irq_save(flags); volatile unsigned long *a = addr + (nr >> 5);
*a |= mask; __raw_bit_clear_asm(a, nr & 0x1f);
local_irq_restore(flags);
} }
static __inline__ void __set_bit(int nr, volatile unsigned long *addr) static inline void change_bit(int nr, volatile unsigned long *addr)
{ {
int *a = (int *)addr; volatile unsigned long *a = addr + (nr >> 5);
int mask; __raw_bit_toggle_asm(a, nr & 0x1f);
}
a += nr >> 5; static inline int test_bit(int nr, const volatile unsigned long *addr)
mask = 1 << (nr & 0x1f); {
*a |= mask; volatile const unsigned long *a = addr + (nr >> 5);
return __raw_bit_test_asm(a, nr & 0x1f) != 0;
} }
/* static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
* clear_bit() doesn't provide any barrier for the compiler. {
*/ volatile unsigned long *a = addr + (nr >> 5);
#define smp_mb__before_clear_bit() barrier() return __raw_bit_test_set_asm(a, nr & 0x1f);
#define smp_mb__after_clear_bit() barrier() }
static __inline__ void clear_bit(int nr, volatile unsigned long *addr) static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr + (nr >> 5);
return __raw_bit_test_clear_asm(a, nr & 0x1f);
}
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr + (nr >> 5);
return __raw_bit_test_toggle_asm(a, nr & 0x1f);
}
#else /* !CONFIG_SMP */
#include <asm/system.h> /* save_flags */
static inline void set_bit(int nr, volatile unsigned long *addr)
{ {
int *a = (int *)addr; int *a = (int *)addr;
int mask; int mask;
...@@ -57,21 +91,23 @@ static __inline__ void clear_bit(int nr, volatile unsigned long *addr) ...@@ -57,21 +91,23 @@ static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
a += nr >> 5; a += nr >> 5;
mask = 1 << (nr & 0x1f); mask = 1 << (nr & 0x1f);
local_irq_save(flags); local_irq_save(flags);
*a &= ~mask; *a |= mask;
local_irq_restore(flags); local_irq_restore(flags);
} }
static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) static inline void clear_bit(int nr, volatile unsigned long *addr)
{ {
int *a = (int *)addr; int *a = (int *)addr;
int mask; int mask;
unsigned long flags;
a += nr >> 5; a += nr >> 5;
mask = 1 << (nr & 0x1f); mask = 1 << (nr & 0x1f);
local_irq_save(flags);
*a &= ~mask; *a &= ~mask;
local_irq_restore(flags);
} }
static __inline__ void change_bit(int nr, volatile unsigned long *addr) static inline void change_bit(int nr, volatile unsigned long *addr)
{ {
int mask, flags; int mask, flags;
unsigned long *ADDR = (unsigned long *)addr; unsigned long *ADDR = (unsigned long *)addr;
...@@ -83,17 +119,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr) ...@@ -83,17 +119,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr)
local_irq_restore(flags); local_irq_restore(flags);
} }
static __inline__ void __change_bit(int nr, volatile unsigned long *addr) static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
int mask;
unsigned long *ADDR = (unsigned long *)addr;
ADDR += nr >> 5;
mask = 1 << (nr & 31);
*ADDR ^= mask;
}
static __inline__ int test_and_set_bit(int nr, void *addr)
{ {
int mask, retval; int mask, retval;
volatile unsigned int *a = (volatile unsigned int *)addr; volatile unsigned int *a = (volatile unsigned int *)addr;
...@@ -109,19 +135,23 @@ static __inline__ int test_and_set_bit(int nr, void *addr) ...@@ -109,19 +135,23 @@ static __inline__ int test_and_set_bit(int nr, void *addr)
return retval; return retval;
} }
static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{ {
int mask, retval; int mask, retval;
volatile unsigned int *a = (volatile unsigned int *)addr; volatile unsigned int *a = (volatile unsigned int *)addr;
unsigned long flags;
a += nr >> 5; a += nr >> 5;
mask = 1 << (nr & 0x1f); mask = 1 << (nr & 0x1f);
local_irq_save(flags);
retval = (mask & *a) != 0; retval = (mask & *a) != 0;
*a |= mask; *a &= ~mask;
local_irq_restore(flags);
return retval; return retval;
} }
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{ {
int mask, retval; int mask, retval;
volatile unsigned int *a = (volatile unsigned int *)addr; volatile unsigned int *a = (volatile unsigned int *)addr;
...@@ -131,13 +161,50 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) ...@@ -131,13 +161,50 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
mask = 1 << (nr & 0x1f); mask = 1 << (nr & 0x1f);
local_irq_save(flags); local_irq_save(flags);
retval = (mask & *a) != 0; retval = (mask & *a) != 0;
*a &= ~mask; *a ^= mask;
local_irq_restore(flags); local_irq_restore(flags);
return retval; return retval;
} }
static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) #endif /* CONFIG_SMP */
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
int *a = (int *)addr;
int mask;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
*a |= mask;
}
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
int *a = (int *)addr;
int mask;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
*a &= ~mask;
}
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
int mask;
unsigned long *ADDR = (unsigned long *)addr;
ADDR += nr >> 5;
mask = 1 << (nr & 31);
*ADDR ^= mask;
}
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{ {
int mask, retval; int mask, retval;
volatile unsigned int *a = (volatile unsigned int *)addr; volatile unsigned int *a = (volatile unsigned int *)addr;
...@@ -145,26 +212,23 @@ static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) ...@@ -145,26 +212,23 @@ static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
a += nr >> 5; a += nr >> 5;
mask = 1 << (nr & 0x1f); mask = 1 << (nr & 0x1f);
retval = (mask & *a) != 0; retval = (mask & *a) != 0;
*a &= ~mask; *a |= mask;
return retval; return retval;
} }
static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr) static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{ {
int mask, retval; int mask, retval;
volatile unsigned int *a = (volatile unsigned int *)addr; volatile unsigned int *a = (volatile unsigned int *)addr;
unsigned long flags;
a += nr >> 5; a += nr >> 5;
mask = 1 << (nr & 0x1f); mask = 1 << (nr & 0x1f);
local_irq_save(flags);
retval = (mask & *a) != 0; retval = (mask & *a) != 0;
*a ^= mask; *a &= ~mask;
local_irq_restore(flags);
return retval; return retval;
} }
static __inline__ int __test_and_change_bit(int nr, static inline int __test_and_change_bit(int nr,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
int mask, retval; int mask, retval;
...@@ -177,16 +241,7 @@ static __inline__ int __test_and_change_bit(int nr, ...@@ -177,16 +241,7 @@ static __inline__ int __test_and_change_bit(int nr,
return retval; return retval;
} }
/* static inline int __test_bit(int nr, const void *addr)
* This routine doesn't need to be atomic.
*/
static __inline__ int __constant_test_bit(int nr, const void *addr)
{
return ((1UL << (nr & 31)) &
(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
}
static __inline__ int __test_bit(int nr, const void *addr)
{ {
int *a = (int *)addr; int *a = (int *)addr;
int mask; int mask;
...@@ -196,10 +251,16 @@ static __inline__ int __test_bit(int nr, const void *addr) ...@@ -196,10 +251,16 @@ static __inline__ int __test_bit(int nr, const void *addr)
return ((mask & *a) != 0); return ((mask & *a) != 0);
} }
#define test_bit(nr,addr) \ #ifndef CONFIG_SMP
(__builtin_constant_p(nr) ? \ /*
__constant_test_bit((nr),(addr)) : \ * This routine doesn't need irq save and restore ops in UP
__test_bit((nr),(addr))) * context.
*/
static inline int test_bit(int nr, const void *addr)
{
return __test_bit(nr, addr);
}
#endif
#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/hweight.h>
......
...@@ -12,6 +12,11 @@ ...@@ -12,6 +12,11 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES
#ifdef CONFIG_SMP
#define __cacheline_aligned
#else
#define ____cacheline_aligned
/* /*
* Put cacheline_aliged data to L1 data memory * Put cacheline_aliged data to L1 data memory
*/ */
...@@ -21,9 +26,33 @@ ...@@ -21,9 +26,33 @@
__section__(".data_l1.cacheline_aligned"))) __section__(".data_l1.cacheline_aligned")))
#endif #endif
#endif
/* /*
* largest L1 which this arch supports * largest L1 which this arch supports
*/ */
#define L1_CACHE_SHIFT_MAX 5 #define L1_CACHE_SHIFT_MAX 5
#if defined(CONFIG_SMP) && \
!defined(CONFIG_BFIN_CACHE_COHERENT) && \
defined(CONFIG_BFIN_DCACHE)
#define __ARCH_SYNC_CORE_DCACHE
#ifndef __ASSEMBLY__
asmlinkage void __raw_smp_mark_barrier_asm(void);
asmlinkage void __raw_smp_check_barrier_asm(void);
static inline void smp_mark_barrier(void)
{
__raw_smp_mark_barrier_asm();
}
static inline void smp_check_barrier(void)
{
__raw_smp_check_barrier_asm();
}
void resync_core_dcache(void);
#endif
#endif
#endif #endif
...@@ -35,6 +35,7 @@ extern void blackfin_icache_flush_range(unsigned long start_address, unsigned lo ...@@ -35,6 +35,7 @@ extern void blackfin_icache_flush_range(unsigned long start_address, unsigned lo
extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address); extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address); extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dflush_page(void *page); extern void blackfin_dflush_page(void *page);
extern void blackfin_invalidate_entire_dcache(void);
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
...@@ -44,12 +45,20 @@ extern void blackfin_dflush_page(void *page); ...@@ -44,12 +45,20 @@ extern void blackfin_dflush_page(void *page);
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
#ifdef CONFIG_SMP
#define flush_icache_range_others(start, end) \
smp_icache_flush_range_others((start), (end))
#else
#define flush_icache_range_others(start, end) do { } while (0)
#endif
static inline void flush_icache_range(unsigned start, unsigned end) static inline void flush_icache_range(unsigned start, unsigned end)
{ {
#if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_ICACHE) #if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_ICACHE)
# if defined(CONFIG_BFIN_WT) # if defined(CONFIG_BFIN_WT)
blackfin_icache_flush_range((start), (end)); blackfin_icache_flush_range((start), (end));
flush_icache_range_others(start, end);
# else # else
blackfin_icache_dcache_flush_range((start), (end)); blackfin_icache_dcache_flush_range((start), (end));
# endif # endif
...@@ -58,6 +67,7 @@ static inline void flush_icache_range(unsigned start, unsigned end) ...@@ -58,6 +67,7 @@ static inline void flush_icache_range(unsigned start, unsigned end)
# if defined(CONFIG_BFIN_ICACHE) # if defined(CONFIG_BFIN_ICACHE)
blackfin_icache_flush_range((start), (end)); blackfin_icache_flush_range((start), (end));
flush_icache_range_others(start, end);
# endif # endif
# if defined(CONFIG_BFIN_DCACHE) # if defined(CONFIG_BFIN_DCACHE)
blackfin_dcache_flush_range((start), (end)); blackfin_dcache_flush_range((start), (end));
...@@ -68,8 +78,10 @@ static inline void flush_icache_range(unsigned start, unsigned end) ...@@ -68,8 +78,10 @@ static inline void flush_icache_range(unsigned start, unsigned end)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { memcpy(dst, src, len); \ do { memcpy(dst, src, len); \
flush_icache_range ((unsigned) (dst), (unsigned) (dst) + (len)); \ flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
flush_icache_range_others((unsigned long) (dst), (unsigned long) (dst) + (len));\
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len) #define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len)
#if defined(CONFIG_BFIN_DCACHE) #if defined(CONFIG_BFIN_DCACHE)
......
...@@ -303,9 +303,14 @@ ...@@ -303,9 +303,14 @@
RETI = [sp++]; RETI = [sp++];
RETS = [sp++]; RETS = [sp++];
#ifdef CONFIG_SMP
GET_PDA(p0, r0);
r0 = [p0 + PDA_IRQFLAGS];
#else
p0.h = _irq_flags; p0.h = _irq_flags;
p0.l = _irq_flags; p0.l = _irq_flags;
r0 = [p0]; r0 = [p0];
#endif
sti r0; sti r0;
sp += 4; /* Skip Reserved */ sp += 4; /* Skip Reserved */
...@@ -352,4 +357,3 @@ ...@@ -352,4 +357,3 @@
SYSCFG = [sp++]; SYSCFG = [sp++];
csync; csync;
.endm .endm
/*
* File: arch/blackfin/include/asm/cpu.h.
* Author: Philippe Gerum <rpm@xenomai.org>
*
* Copyright 2007 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __ASM_BLACKFIN_CPU_H
#define __ASM_BLACKFIN_CPU_H
#include <linux/percpu.h>
struct task_struct;
struct blackfin_cpudata {
struct cpu cpu;
struct task_struct *idle;
unsigned long cclk;
unsigned int imemctl;
unsigned int dmemctl;
unsigned long loops_per_jiffy;
unsigned long dcache_invld_count;
};
DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
#endif
...@@ -24,7 +24,8 @@ struct l1_scratch_task_info ...@@ -24,7 +24,8 @@ struct l1_scratch_task_info
}; };
/* A pointer to the structure in memory. */ /* A pointer to the structure in memory. */
#define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)L1_SCRATCH_START) #define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)\
get_l1_scratch_start())
#endif #endif
......
/*
* include/asm-generic/mutex-dec.h
*
* Generic implementation of the mutex fastpath, based on atomic
* decrement/increment.
*/
#ifndef _ASM_GENERIC_MUTEX_DEC_H
#define _ASM_GENERIC_MUTEX_DEC_H
/**
* __mutex_fastpath_lock - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
* it wasn't 1 originally. This function MUST leave the value lower than
* 1 even when the "1" assertion wasn't true.
*/
static inline void
__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
fail_fn(count);
else
smp_mb();
}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns.
*/
static inline int
__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
else {
smp_mb();
return 0;
}
}
/**
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 0
*
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
* In the failure case, this function is allowed to either set the value to
* 1, or to set it to a value lower than 1.
*
* If the implementation sets it to a value of lower than 1, then the
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
static inline void
__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
smp_mb();
if (unlikely(atomic_inc_return(count) <= 0))
fail_fn(count);
}
#define __mutex_slowpath_needs_to_unlock() 1
/**
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
*
* @count: pointer of type atomic_t
* @fail_fn: fallback function
*
* Change the count from 1 to a value lower than 1, and return 0 (failure)
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
* Additionally, if the value was < 0 originally, this function must not leave
* it to 0 on failure.
*
* If the architecture has no effective trylock variant, it should call the
* <fail_fn> spinlock-based trylock variant unconditionally.
*/
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
/*
* We have two variants here. The cmpxchg based one is the best one
* because it never induce a false contention state. It is included
* here because architectures using the inc/dec algorithms over the
* xchg ones are much more likely to support cmpxchg natively.
*
* If not we fall back to the spinlock based variant - that is
* just as efficient (and simpler) as a 'destructive' probing of
* the mutex state would be.
*/
#ifdef __HAVE_ARCH_CMPXCHG
if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
smp_mb();
return 1;
}
return 0;
#else
return fail_fn(count);
#endif
}
#endif
...@@ -6,4 +6,67 @@ ...@@ -6,4 +6,67 @@
* implementation. (see asm-generic/mutex-xchg.h for details) * implementation. (see asm-generic/mutex-xchg.h for details)
*/ */
#ifndef _ASM_MUTEX_H
#define _ASM_MUTEX_H
#ifndef CONFIG_SMP
#include <asm-generic/mutex-dec.h> #include <asm-generic/mutex-dec.h>
#else
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
fail_fn(count);
else
smp_mb();
}
static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
else {
smp_mb();
return 0;
}
}
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
smp_mb();
if (unlikely(atomic_inc_return(count) <= 0))
fail_fn(count);
}
#define __mutex_slowpath_needs_to_unlock() 1
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
/*
* We have two variants here. The cmpxchg based one is the best one
* because it never induce a false contention state. It is included
* here because architectures using the inc/dec algorithms over the
* xchg ones are much more likely to support cmpxchg natively.
*
* If not we fall back to the spinlock based variant - that is
* just as efficient (and simpler) as a 'destructive' probing of
* the mutex state would be.
*/
#ifdef __HAVE_ARCH_CMPXCHG
if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
smp_mb();
return 1;
}
return 0;
#else
return fail_fn(count);
#endif
}
#endif
#endif
/*
* File: arch/blackfin/include/asm/pda.h
* Author: Philippe Gerum <rpm@xenomai.org>
*
* Copyright 2007 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _ASM_BLACKFIN_PDA_H
#define _ASM_BLACKFIN_PDA_H
#include <asm/mem_map.h>
#ifndef __ASSEMBLY__
struct blackfin_pda { /* Per-processor Data Area */
struct blackfin_pda *next;
unsigned long syscfg;
#ifdef CONFIG_SMP
unsigned long imask; /* Current IMASK value */
#endif
unsigned long *ipdt; /* Start of switchable I-CPLB table */
unsigned long *ipdt_swapcount; /* Number of swaps in ipdt */
unsigned long *dpdt; /* Start of switchable D-CPLB table */
unsigned long *dpdt_swapcount; /* Number of swaps in dpdt */
/*
* Single instructions can have multiple faults, which
* need to be handled by traps.c, in irq5. We store
* the exception cause to ensure we don't miss a
* double fault condition
*/
unsigned long ex_iptr;
unsigned long ex_optr;
unsigned long ex_buf[4];
unsigned long ex_imask; /* Saved imask from exception */
unsigned long *ex_stack; /* Exception stack space */
#ifdef ANOMALY_05000261
unsigned long last_cplb_fault_retx;
#endif
unsigned long dcplb_fault_addr;
unsigned long icplb_fault_addr;
unsigned long retx;
unsigned long seqstat;
};
extern struct blackfin_pda cpu_pda[];
void reserve_pda(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_BLACKFIN_PDA_H */
...@@ -3,4 +3,14 @@ ...@@ -3,4 +3,14 @@
#include <asm-generic/percpu.h> #include <asm-generic/percpu.h>
#ifdef CONFIG_MODULES
#define PERCPU_MODULE_RESERVE 8192
#else
#define PERCPU_MODULE_RESERVE 0
#endif
#define PERCPU_ENOUGH_ROOM \
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
PERCPU_MODULE_RESERVE)
#endif /* __ARCH_BLACKFIN_PERCPU__ */ #endif /* __ARCH_BLACKFIN_PERCPU__ */
...@@ -106,7 +106,8 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -106,7 +106,8 @@ unsigned long get_wchan(struct task_struct *p);
eip; }) eip; })
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define cpu_relax() barrier() #define cpu_relax() smp_mb()
/* Get the Silicon Revision of the chip */ /* Get the Silicon Revision of the chip */
static inline uint32_t __pure bfin_revid(void) static inline uint32_t __pure bfin_revid(void)
...@@ -137,7 +138,11 @@ static inline uint32_t __pure bfin_revid(void) ...@@ -137,7 +138,11 @@ static inline uint32_t __pure bfin_revid(void)
static inline uint16_t __pure bfin_cpuid(void) static inline uint16_t __pure bfin_cpuid(void)
{ {
return (bfin_read_CHIPID() & CHIPID_FAMILY) >> 12; return (bfin_read_CHIPID() & CHIPID_FAMILY) >> 12;
}
static inline uint32_t __pure bfin_dspid(void)
{
return bfin_read_DSPID();
} }
static inline uint32_t __pure bfin_compiled_revid(void) static inline uint32_t __pure bfin_compiled_revid(void)
......
#ifndef _ASM_BLACKFIN_RWLOCK_H
#define _ASM_BLACKFIN_RWLOCK_H
#define RW_LOCK_BIAS 0x01000000
#endif
/*
* File: arch/blackfin/include/asm/smp.h
* Author: Philippe Gerum <rpm@xenomai.org>
*
* Copyright 2007 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __ASM_BLACKFIN_SMP_H
#define __ASM_BLACKFIN_SMP_H
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/cache.h>
#include <asm/blackfin.h>
#include <mach/smp.h>
#define raw_smp_processor_id() blackfin_core_id()
struct corelock_slot {
int lock;
};
void smp_icache_flush_range_others(unsigned long start,
unsigned long end);
#endif /* !__ASM_BLACKFIN_SMP_H */
#ifndef __BFIN_SPINLOCK_H #ifndef __BFIN_SPINLOCK_H
#define __BFIN_SPINLOCK_H #define __BFIN_SPINLOCK_H
#error blackfin architecture does not support SMP spin lock yet #include <asm/atomic.h>
#endif asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
asmlinkage void __raw_read_lock_asm(volatile int *ptr);
asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
asmlinkage void __raw_write_lock_asm(volatile int *ptr);
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
return __raw_spin_is_locked_asm(&lock->lock);
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
__raw_spin_lock_asm(&lock->lock);
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
__raw_spin_unlock_asm(&lock->lock);
}
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
cpu_relax();
}
static inline int __raw_read_can_lock(raw_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
}
static inline int __raw_write_can_lock(raw_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}
static inline void __raw_read_lock(raw_rwlock_t *rw)
{
__raw_read_lock_asm(&rw->lock);
}
static inline int __raw_read_trylock(raw_rwlock_t *rw)
{
return __raw_read_trylock_asm(&rw->lock);
}
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
__raw_read_unlock_asm(&rw->lock);
}
static inline void __raw_write_lock(raw_rwlock_t *rw)
{
__raw_write_lock_asm(&rw->lock);
}
static inline int __raw_write_trylock(raw_rwlock_t *rw)
{
return __raw_write_trylock_asm(&rw->lock);
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
__raw_write_unlock_asm(&rw->lock);
}
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#endif /* !__BFIN_SPINLOCK_H */
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
#include <asm/rwlock.h>
typedef struct {
volatile unsigned int lock;
} raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
} raw_rwlock_t;
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
...@@ -37,20 +37,16 @@ ...@@ -37,20 +37,16 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <mach/anomaly.h> #include <mach/anomaly.h>
#include <asm/pda.h>
#include <asm/processor.h>
/* Forward decl needed due to cdef inter dependencies */
static inline uint32_t __pure bfin_dspid(void);
#define blackfin_core_id() (bfin_dspid() & 0xff)
/* /*
* Interrupt configuring macros. * Interrupt configuring macros.
*/ */
extern unsigned long irq_flags;
#define local_irq_enable() \
__asm__ __volatile__( \
"sti %0;" \
: \
: "d" (irq_flags) \
)
#define local_irq_disable() \ #define local_irq_disable() \
do { \ do { \
int __tmp_dummy; \ int __tmp_dummy; \
...@@ -66,6 +62,18 @@ extern unsigned long irq_flags; ...@@ -66,6 +62,18 @@ extern unsigned long irq_flags;
# define NOP_PAD_ANOMALY_05000244 # define NOP_PAD_ANOMALY_05000244
#endif #endif
#ifdef CONFIG_SMP
# define irq_flags cpu_pda[blackfin_core_id()].imask
#else
extern unsigned long irq_flags;
#endif
#define local_irq_enable() \
__asm__ __volatile__( \
"sti %0;" \
: \
: "d" (irq_flags) \
)
#define idle_with_irq_disabled() \ #define idle_with_irq_disabled() \
__asm__ __volatile__( \ __asm__ __volatile__( \
NOP_PAD_ANOMALY_05000244 \ NOP_PAD_ANOMALY_05000244 \
...@@ -129,22 +137,85 @@ extern unsigned long irq_flags; ...@@ -129,22 +137,85 @@ extern unsigned long irq_flags;
#define rmb() asm volatile ("" : : :"memory") #define rmb() asm volatile ("" : : :"memory")
#define wmb() asm volatile ("" : : :"memory") #define wmb() asm volatile ("" : : :"memory")
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define smp_mb() mb() asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
#define smp_rmb() rmb() asmlinkage unsigned long __raw_xchg_2_asm(volatile void *ptr, unsigned long value);
#define smp_wmb() wmb() asmlinkage unsigned long __raw_xchg_4_asm(volatile void *ptr, unsigned long value);
#define smp_read_barrier_depends() read_barrier_depends() asmlinkage unsigned long __raw_cmpxchg_1_asm(volatile void *ptr,
unsigned long new, unsigned long old);
asmlinkage unsigned long __raw_cmpxchg_2_asm(volatile void *ptr,
unsigned long new, unsigned long old);
asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
unsigned long new, unsigned long old);
#ifdef __ARCH_SYNC_CORE_DCACHE
# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0)
# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0)
#else #else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
#endif
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{
unsigned long tmp;
switch (size) {
case 1:
tmp = __raw_xchg_1_asm(ptr, x);
break;
case 2:
tmp = __raw_xchg_2_asm(ptr, x);
break;
case 4:
tmp = __raw_xchg_4_asm(ptr, x);
break;
}
return tmp;
}
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long tmp;
switch (size) {
case 1:
tmp = __raw_cmpxchg_1_asm(ptr, new, old);
break;
case 2:
tmp = __raw_cmpxchg_2_asm(ptr, new, old);
break;
case 4:
tmp = __raw_cmpxchg_4_asm(ptr, new, old);
break;
}
return tmp;
}
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#define smp_read_barrier_depends() smp_check_barrier()
#else /* !CONFIG_SMP */
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0)
#endif
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { struct __xchg_dummy {
unsigned long a[100]; unsigned long a[100];
...@@ -194,9 +265,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, ...@@ -194,9 +265,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
#include <asm-generic/cmpxchg.h> #include <asm-generic/cmpxchg.h>
#endif
#endif /* !CONFIG_SMP */
#define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
#define tas(ptr) ((void)xchg((ptr), 1))
#define prepare_to_switch() do { } while(0) #define prepare_to_switch() do { } while(0)
......
...@@ -10,3 +10,4 @@ obj-$(CONFIG_BFIN_ICACHE_LOCK) += lock.o ...@@ -10,3 +10,4 @@ obj-$(CONFIG_BFIN_ICACHE_LOCK) += lock.o
obj-$(CONFIG_PM) += pm.o dpmc_modes.o obj-$(CONFIG_PM) += pm.o dpmc_modes.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
obj-$(CONFIG_SMP) += smp.o
...@@ -97,3 +97,39 @@ ENTRY(_blackfin_dflush_page) ...@@ -97,3 +97,39 @@ ENTRY(_blackfin_dflush_page)
P1 = 1 << (PAGE_SHIFT - L1_CACHE_SHIFT); P1 = 1 << (PAGE_SHIFT - L1_CACHE_SHIFT);
jump .Ldfr; jump .Ldfr;
ENDPROC(_blackfin_dflush_page) ENDPROC(_blackfin_dflush_page)
/* Invalidate the Entire Data cache by
* clearing DMC[1:0] bits
*/
ENTRY(_blackfin_invalidate_entire_dcache)
[--SP] = ( R7:5);
P0.L = LO(DMEM_CONTROL);
P0.H = HI(DMEM_CONTROL);
R7 = [P0];
R5 = R7; /* Save DMEM_CNTR */
/* Clear the DMC[1:0] bits, All valid bits in the data
* cache are set to the invalid state
*/
BITCLR(R7,DMC0_P);
BITCLR(R7,DMC1_P);
CLI R6;
SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
.align 8;
[P0] = R7;
SSYNC;
STI R6;
/* Configures the data cache again */
CLI R6;
SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
.align 8;
[P0] = R5;
SSYNC;
STI R6;
( R7:5) = [SP++];
RTS;
ENDPROC(_blackfin_invalidate_entire_dcache)
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/unistd.h> #include <linux/unistd.h>
#include <linux/threads.h>
#include <asm/blackfin.h> #include <asm/blackfin.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/fixed_code.h> #include <asm/fixed_code.h>
...@@ -75,11 +76,11 @@ ENTRY(_ex_workaround_261) ...@@ -75,11 +76,11 @@ ENTRY(_ex_workaround_261)
* handle it. * handle it.
*/ */
P4 = R7; /* Store EXCAUSE */ P4 = R7; /* Store EXCAUSE */
p5.l = _last_cplb_fault_retx;
p5.h = _last_cplb_fault_retx; GET_PDA(p5, r7);
r7 = [p5]; r7 = [p5 + PDA_LFRETX];
r6 = retx; r6 = retx;
[p5] = r6; [p5 + PDA_LFRETX] = r6;
cc = r6 == r7; cc = r6 == r7;
if !cc jump _bfin_return_from_exception; if !cc jump _bfin_return_from_exception;
/* fall through */ /* fall through */
...@@ -324,7 +325,9 @@ ENTRY(_ex_trap_c) ...@@ -324,7 +325,9 @@ ENTRY(_ex_trap_c)
[p4] = p5; [p4] = p5;
csync; csync;
GET_PDA(p5, r6);
#ifndef CONFIG_DEBUG_DOUBLEFAULT #ifndef CONFIG_DEBUG_DOUBLEFAULT
/* /*
* Save these registers, as they are only valid in exception context * Save these registers, as they are only valid in exception context
* (where we are now - as soon as we defer to IRQ5, they can change) * (where we are now - as soon as we defer to IRQ5, they can change)
...@@ -335,29 +338,25 @@ ENTRY(_ex_trap_c) ...@@ -335,29 +338,25 @@ ENTRY(_ex_trap_c)
p4.l = lo(DCPLB_FAULT_ADDR); p4.l = lo(DCPLB_FAULT_ADDR);
p4.h = hi(DCPLB_FAULT_ADDR); p4.h = hi(DCPLB_FAULT_ADDR);
r7 = [p4]; r7 = [p4];
p5.h = _saved_dcplb_fault_addr; [p5 + PDA_DCPLB] = r7;
p5.l = _saved_dcplb_fault_addr;
[p5] = r7;
r7 = [p4 + (ICPLB_FAULT_ADDR - DCPLB_FAULT_ADDR)]; p4.l = lo(ICPLB_FAULT_ADDR);
p5.h = _saved_icplb_fault_addr; p4.h = hi(ICPLB_FAULT_ADDR);
p5.l = _saved_icplb_fault_addr; r6 = [p4];
[p5] = r7; [p5 + PDA_ICPLB] = r6;
r6 = retx; r6 = retx;
p4.l = _saved_retx; [p5 + PDA_RETX] = r6;
p4.h = _saved_retx;
[p4] = r6;
#endif #endif
r6 = SYSCFG; r6 = SYSCFG;
[p4 + 4] = r6; [p5 + PDA_SYSCFG] = r6;
BITCLR(r6, 0); BITCLR(r6, 0);
SYSCFG = r6; SYSCFG = r6;
/* Disable all interrupts, but make sure level 5 is enabled so /* Disable all interrupts, but make sure level 5 is enabled so
* we can switch to that level. Save the old mask. */ * we can switch to that level. Save the old mask. */
cli r6; cli r6;
[p4 + 8] = r6; [p5 + PDA_EXIMASK] = r6;
p4.l = lo(SAFE_USER_INSTRUCTION); p4.l = lo(SAFE_USER_INSTRUCTION);
p4.h = hi(SAFE_USER_INSTRUCTION); p4.h = hi(SAFE_USER_INSTRUCTION);
...@@ -424,17 +423,16 @@ ENDPROC(_double_fault) ...@@ -424,17 +423,16 @@ ENDPROC(_double_fault)
ENTRY(_exception_to_level5) ENTRY(_exception_to_level5)
SAVE_ALL_SYS SAVE_ALL_SYS
p4.l = _saved_retx; GET_PDA(p4, r7); /* Fetch current PDA */
p4.h = _saved_retx; r6 = [p4 + PDA_RETX];
r6 = [p4];
[sp + PT_PC] = r6; [sp + PT_PC] = r6;
r6 = [p4 + 4]; r6 = [p4 + PDA_SYSCFG];
[sp + PT_SYSCFG] = r6; [sp + PT_SYSCFG] = r6;
/* Restore interrupt mask. We haven't pushed RETI, so this /* Restore interrupt mask. We haven't pushed RETI, so this
* doesn't enable interrupts until we return from this handler. */ * doesn't enable interrupts until we return from this handler. */
r6 = [p4 + 8]; r6 = [p4 + PDA_EXIMASK];
sti r6; sti r6;
/* Restore the hardware error vector. */ /* Restore the hardware error vector. */
...@@ -478,8 +476,8 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ ...@@ -478,8 +476,8 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
* scratch register (for want of a better option). * scratch register (for want of a better option).
*/ */
EX_SCRATCH_REG = sp; EX_SCRATCH_REG = sp;
sp.l = _exception_stack_top; GET_PDA_SAFE(sp);
sp.h = _exception_stack_top; sp = [sp + PDA_EXSTACK]
/* Try to deal with syscalls quickly. */ /* Try to deal with syscalls quickly. */
[--sp] = ASTAT; [--sp] = ASTAT;
[--sp] = (R7:6,P5:4); [--sp] = (R7:6,P5:4);
...@@ -501,27 +499,22 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ ...@@ -501,27 +499,22 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
* but they are not very interesting, so don't save them * but they are not very interesting, so don't save them
*/ */
GET_PDA(p5, r7);
p4.l = lo(DCPLB_FAULT_ADDR); p4.l = lo(DCPLB_FAULT_ADDR);
p4.h = hi(DCPLB_FAULT_ADDR); p4.h = hi(DCPLB_FAULT_ADDR);
r7 = [p4]; r7 = [p4];
p5.h = _saved_dcplb_fault_addr; [p5 + PDA_DCPLB] = r7;
p5.l = _saved_dcplb_fault_addr;
[p5] = r7;
r7 = [p4 + (ICPLB_FAULT_ADDR - DCPLB_FAULT_ADDR)]; p4.l = lo(ICPLB_FAULT_ADDR);
p5.h = _saved_icplb_fault_addr; p4.h = hi(ICPLB_FAULT_ADDR);
p5.l = _saved_icplb_fault_addr; r7 = [p4];
[p5] = r7; [p5 + PDA_ICPLB] = r7;
p4.l = _saved_retx;
p4.h = _saved_retx;
r6 = retx; r6 = retx;
[p4] = r6; [p5 + PDA_RETX] = r6;
r7 = SEQSTAT; /* reason code is in bit 5:0 */ r7 = SEQSTAT; /* reason code is in bit 5:0 */
p4.l = _saved_seqstat; [p5 + PDA_SEQSTAT] = r7;
p4.h = _saved_seqstat;
[p4] = r7;
#else #else
r7 = SEQSTAT; /* reason code is in bit 5:0 */ r7 = SEQSTAT; /* reason code is in bit 5:0 */
#endif #endif
...@@ -546,11 +539,11 @@ ENTRY(_kernel_execve) ...@@ -546,11 +539,11 @@ ENTRY(_kernel_execve)
p0 = sp; p0 = sp;
r3 = SIZEOF_PTREGS / 4; r3 = SIZEOF_PTREGS / 4;
r4 = 0(x); r4 = 0(x);
0: .Lclear_regs:
[p0++] = r4; [p0++] = r4;
r3 += -1; r3 += -1;
cc = r3 == 0; cc = r3 == 0;
if !cc jump 0b (bp); if !cc jump .Lclear_regs (bp);
p0 = sp; p0 = sp;
sp += -16; sp += -16;
...@@ -558,7 +551,7 @@ ENTRY(_kernel_execve) ...@@ -558,7 +551,7 @@ ENTRY(_kernel_execve)
call _do_execve; call _do_execve;
SP += 16; SP += 16;
cc = r0 == 0; cc = r0 == 0;
if ! cc jump 1f; if ! cc jump .Lexecve_failed;
/* Success. Copy our temporary pt_regs to the top of the kernel /* Success. Copy our temporary pt_regs to the top of the kernel
* stack and do a normal exception return. * stack and do a normal exception return.
*/ */
...@@ -574,12 +567,12 @@ ENTRY(_kernel_execve) ...@@ -574,12 +567,12 @@ ENTRY(_kernel_execve)
p0 = fp; p0 = fp;
r4 = [p0--]; r4 = [p0--];
r3 = SIZEOF_PTREGS / 4; r3 = SIZEOF_PTREGS / 4;
0: .Lcopy_regs:
r4 = [p0--]; r4 = [p0--];
[p1--] = r4; [p1--] = r4;
r3 += -1; r3 += -1;
cc = r3 == 0; cc = r3 == 0;
if ! cc jump 0b (bp); if ! cc jump .Lcopy_regs (bp);
r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z); r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
p1 = r0; p1 = r0;
...@@ -591,7 +584,7 @@ ENTRY(_kernel_execve) ...@@ -591,7 +584,7 @@ ENTRY(_kernel_execve)
RESTORE_CONTEXT; RESTORE_CONTEXT;
rti; rti;
1: .Lexecve_failed:
unlink; unlink;
rts; rts;
ENDPROC(_kernel_execve) ENDPROC(_kernel_execve)
...@@ -925,9 +918,14 @@ _schedule_and_signal_from_int: ...@@ -925,9 +918,14 @@ _schedule_and_signal_from_int:
p1 = rets; p1 = rets;
[sp + PT_RESERVED] = p1; [sp + PT_RESERVED] = p1;
#ifdef CONFIG_SMP
GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */
r0 = [p0 + PDA_IRQFLAGS];
#else
p0.l = _irq_flags; p0.l = _irq_flags;
p0.h = _irq_flags; p0.h = _irq_flags;
r0 = [p0]; r0 = [p0];
#endif
sti r0; sti r0;
r0 = sp; r0 = sp;
...@@ -1539,12 +1537,6 @@ ENTRY(_sys_call_table) ...@@ -1539,12 +1537,6 @@ ENTRY(_sys_call_table)
.endr .endr
END(_sys_call_table) END(_sys_call_table)
#if ANOMALY_05000261
/* Used by the assembly entry point to work around an anomaly. */
_last_cplb_fault_retx:
.long 0;
#endif
#ifdef CONFIG_EXCEPTION_L1_SCRATCH #ifdef CONFIG_EXCEPTION_L1_SCRATCH
/* .section .l1.bss.scratch */ /* .section .l1.bss.scratch */
.set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH .set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH
...@@ -1554,8 +1546,8 @@ _last_cplb_fault_retx: ...@@ -1554,8 +1546,8 @@ _last_cplb_fault_retx:
#else #else
.bss .bss
#endif #endif
_exception_stack: ENTRY(_exception_stack)
.rept 1024 .rept 1024 * NR_CPUS
.long 0 .long 0
.endr .endr
_exception_stack_top: _exception_stack_top:
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/blackfin.h> #include <asm/blackfin.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/trace.h> #include <asm/trace.h>
#include <asm/asm-offsets.h>
__INIT __INIT
...@@ -111,33 +112,26 @@ ENTRY(__start) ...@@ -111,33 +112,26 @@ ENTRY(__start)
* This happens here, since L1 gets clobbered * This happens here, since L1 gets clobbered
* below * below
*/ */
p0.l = _saved_retx; GET_PDA(p0, r0);
p0.h = _saved_retx; r7 = [p0 + PDA_RETX];
p1.l = _init_saved_retx; p1.l = _init_saved_retx;
p1.h = _init_saved_retx; p1.h = _init_saved_retx;
r0 = [p0]; [p1] = r7;
[p1] = r0;
p0.l = _saved_dcplb_fault_addr; r7 = [p0 + PDA_DCPLB];
p0.h = _saved_dcplb_fault_addr;
p1.l = _init_saved_dcplb_fault_addr; p1.l = _init_saved_dcplb_fault_addr;
p1.h = _init_saved_dcplb_fault_addr; p1.h = _init_saved_dcplb_fault_addr;
r0 = [p0]; [p1] = r7;
[p1] = r0;
p0.l = _saved_icplb_fault_addr; r7 = [p0 + PDA_ICPLB];
p0.h = _saved_icplb_fault_addr;
p1.l = _init_saved_icplb_fault_addr; p1.l = _init_saved_icplb_fault_addr;
p1.h = _init_saved_icplb_fault_addr; p1.h = _init_saved_icplb_fault_addr;
r0 = [p0]; [p1] = r7;
[p1] = r0;
p0.l = _saved_seqstat; r7 = [p0 + PDA_SEQSTAT];
p0.h = _saved_seqstat;
p1.l = _init_saved_seqstat; p1.l = _init_saved_seqstat;
p1.h = _init_saved_seqstat; p1.h = _init_saved_seqstat;
r0 = [p0]; [p1] = r7;
[p1] = r0;
#endif #endif
/* Initialize stack pointer */ /* Initialize stack pointer */
...@@ -255,6 +249,9 @@ ENTRY(_real_start) ...@@ -255,6 +249,9 @@ ENTRY(_real_start)
sp = sp + p1; sp = sp + p1;
usp = sp; usp = sp;
fp = sp; fp = sp;
sp += -12;
call _init_pda
sp += 12;
jump.l _start_kernel; jump.l _start_kernel;
ENDPROC(_real_start) ENDPROC(_real_start)
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
* - * -
*/ */
#ifndef CONFIG_SMP
/* Initialize this to an actual value to force it into the .data /* Initialize this to an actual value to force it into the .data
* section so that we know it is properly initialized at entry into * section so that we know it is properly initialized at entry into
* the kernel but before bss is initialized to zero (which is where * the kernel but before bss is initialized to zero (which is where
...@@ -63,6 +64,7 @@ ...@@ -63,6 +64,7 @@
*/ */
unsigned long irq_flags = 0x1f; unsigned long irq_flags = 0x1f;
EXPORT_SYMBOL(irq_flags); EXPORT_SYMBOL(irq_flags);
#endif
/* The number of spurious interrupts */ /* The number of spurious interrupts */
atomic_t num_spurious; atomic_t num_spurious;
...@@ -163,6 +165,10 @@ static void bfin_internal_mask_irq(unsigned int irq) ...@@ -163,6 +165,10 @@ static void bfin_internal_mask_irq(unsigned int irq)
mask_bit = SIC_SYSIRQ(irq) % 32; mask_bit = SIC_SYSIRQ(irq) % 32;
bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
~(1 << mask_bit)); ~(1 << mask_bit));
#ifdef CONFIG_SMP
bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
~(1 << mask_bit));
#endif
#endif #endif
} }
...@@ -177,6 +183,10 @@ static void bfin_internal_unmask_irq(unsigned int irq) ...@@ -177,6 +183,10 @@ static void bfin_internal_unmask_irq(unsigned int irq)
mask_bit = SIC_SYSIRQ(irq) % 32; mask_bit = SIC_SYSIRQ(irq) % 32;
bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) | bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
(1 << mask_bit)); (1 << mask_bit));
#ifdef CONFIG_SMP
bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) |
(1 << mask_bit));
#endif
#endif #endif
} }
...@@ -896,7 +906,7 @@ static struct irq_chip bfin_gpio_irqchip = { ...@@ -896,7 +906,7 @@ static struct irq_chip bfin_gpio_irqchip = {
#endif #endif
}; };
void __init init_exception_vectors(void) void __cpuinit init_exception_vectors(void)
{ {
/* cannot program in software: /* cannot program in software:
* evt0 - emulation (jtag) * evt0 - emulation (jtag)
...@@ -935,6 +945,10 @@ int __init init_arch_irq(void) ...@@ -935,6 +945,10 @@ int __init init_arch_irq(void)
# ifdef CONFIG_BF54x # ifdef CONFIG_BF54x
bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
# endif # endif
# ifdef CONFIG_SMP
bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
# endif
#else #else
bfin_write_SIC_IMASK(SIC_UNMASK_ALL); bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
#endif #endif
...@@ -995,6 +1009,17 @@ int __init init_arch_irq(void) ...@@ -995,6 +1009,17 @@ int __init init_arch_irq(void)
break; break;
#endif #endif
#ifdef CONFIG_TICK_SOURCE_SYSTMR0
case IRQ_TIMER0:
set_irq_handler(irq, handle_percpu_irq);
break;
#endif
#ifdef CONFIG_SMP
case IRQ_SUPPLE_0:
case IRQ_SUPPLE_1:
set_irq_handler(irq, handle_percpu_irq);
break;
#endif
default: default:
set_irq_handler(irq, handle_simple_irq); set_irq_handler(irq, handle_simple_irq);
break; break;
...@@ -1029,7 +1054,7 @@ int __init init_arch_irq(void) ...@@ -1029,7 +1054,7 @@ int __init init_arch_irq(void)
search_IAR(); search_IAR();
/* Enable interrupts IVG7-15 */ /* Enable interrupts IVG7-15 */
irq_flags = irq_flags | IMASK_IVG15 | irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
...@@ -1070,8 +1095,16 @@ void do_irq(int vec, struct pt_regs *fp) ...@@ -1070,8 +1095,16 @@ void do_irq(int vec, struct pt_regs *fp)
|| defined(BF538_FAMILY) || defined(CONFIG_BF51x) || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
unsigned long sic_status[3]; unsigned long sic_status[3];
if (smp_processor_id()) {
#ifdef CONFIG_SMP
/* This will be optimized out in UP mode. */
sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
#endif
} else {
sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0(); sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1(); sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
}
#ifdef CONFIG_BF54x #ifdef CONFIG_BF54x
sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2(); sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
#endif #endif
......
This diff is collapsed.
...@@ -130,7 +130,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ...@@ -130,7 +130,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
mutex_init(&pfmon_lock); mutex_init(&pfmon_lock);
dspid = bfin_read_DSPID(); dspid = bfin_dspid();
printk(KERN_INFO "Oprofile got the cpu id is 0x%x. \n", dspid); printk(KERN_INFO "Oprofile got the cpu id is 0x%x. \n", dspid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment