Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
36e91aa2
Commit
36e91aa2
authored
Jul 07, 2016
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'locking/arch-atomic' into locking/core, because the topic is ready
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
03e3c2b7
b7271b9f
Changes
52
Hide whitespace changes
Inline
Side-by-side
Showing
52 changed files
with
2449 additions
and
620 deletions
+2449
-620
arch/alpha/include/asm/atomic.h
arch/alpha/include/asm/atomic.h
+72
-15
arch/arc/include/asm/atomic.h
arch/arc/include/asm/atomic.h
+87
-12
arch/arm/include/asm/atomic.h
arch/arm/include/asm/atomic.h
+96
-10
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/atomic.h
+60
-0
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_ll_sc.h
+86
-24
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/atomic_lse.h
+196
-82
arch/avr32/include/asm/atomic.h
arch/avr32/include/asm/atomic.h
+49
-5
arch/blackfin/include/asm/atomic.h
arch/blackfin/include/asm/atomic.h
+8
-0
arch/blackfin/kernel/bfin_ksyms.c
arch/blackfin/kernel/bfin_ksyms.c
+1
-0
arch/blackfin/mach-bf561/atomic.S
arch/blackfin/mach-bf561/atomic.S
+31
-12
arch/frv/include/asm/atomic.h
arch/frv/include/asm/atomic.h
+10
-20
arch/frv/include/asm/atomic_defs.h
arch/frv/include/asm/atomic_defs.h
+2
-0
arch/h8300/include/asm/atomic.h
arch/h8300/include/asm/atomic.h
+23
-6
arch/hexagon/include/asm/atomic.h
arch/hexagon/include/asm/atomic.h
+26
-5
arch/ia64/include/asm/atomic.h
arch/ia64/include/asm/atomic.h
+114
-16
arch/m32r/include/asm/atomic.h
arch/m32r/include/asm/atomic.h
+32
-4
arch/m68k/include/asm/atomic.h
arch/m68k/include/asm/atomic.h
+40
-4
arch/metag/include/asm/atomic_lnkget.h
arch/metag/include/asm/atomic_lnkget.h
+32
-4
arch/metag/include/asm/atomic_lock1.h
arch/metag/include/asm/atomic_lock1.h
+29
-4
arch/mips/include/asm/atomic.h
arch/mips/include/asm/atomic.h
+137
-17
arch/mn10300/include/asm/atomic.h
arch/mn10300/include/asm/atomic.h
+29
-4
arch/parisc/include/asm/atomic.h
arch/parisc/include/asm/atomic.h
+55
-8
arch/powerpc/include/asm/atomic.h
arch/powerpc/include/asm/atomic.h
+74
-9
arch/s390/include/asm/atomic.h
arch/s390/include/asm/atomic.h
+30
-10
arch/sh/include/asm/atomic-grb.h
arch/sh/include/asm/atomic-grb.h
+30
-4
arch/sh/include/asm/atomic-irq.h
arch/sh/include/asm/atomic-irq.h
+27
-4
arch/sh/include/asm/atomic-llsc.h
arch/sh/include/asm/atomic-llsc.h
+28
-4
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/atomic_32.h
+10
-3
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/atomic_64.h
+12
-4
arch/sparc/lib/atomic32.c
arch/sparc/lib/atomic32.c
+17
-12
arch/sparc/lib/atomic_64.S
arch/sparc/lib/atomic_64.S
+50
-11
arch/sparc/lib/ksyms.c
arch/sparc/lib/ksyms.c
+13
-4
arch/tile/include/asm/atomic.h
arch/tile/include/asm/atomic.h
+2
-0
arch/tile/include/asm/atomic_32.h
arch/tile/include/asm/atomic_32.h
+49
-25
arch/tile/include/asm/atomic_64.h
arch/tile/include/asm/atomic_64.h
+76
-39
arch/tile/include/asm/bitops_32.h
arch/tile/include/asm/bitops_32.h
+9
-9
arch/tile/include/asm/futex.h
arch/tile/include/asm/futex.h
+7
-7
arch/tile/lib/atomic_32.c
arch/tile/lib/atomic_32.c
+25
-25
arch/tile/lib/atomic_asm_32.S
arch/tile/lib/atomic_asm_32.S
+16
-11
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic.h
+32
-3
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_32.h
+22
-3
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/atomic64_64.h
+32
-3
arch/xtensa/include/asm/atomic.h
arch/xtensa/include/asm/atomic.h
+48
-4
include/asm-generic/atomic-long.h
include/asm-generic/atomic-long.h
+35
-1
include/asm-generic/atomic.h
include/asm-generic/atomic.h
+47
-0
include/asm-generic/atomic64.h
include/asm-generic/atomic64.h
+11
-4
include/linux/atomic.h
include/linux/atomic.h
+466
-161
kernel/locking/qrwlock.c
kernel/locking/qrwlock.c
+1
-1
kernel/locking/qspinlock_paravirt.h
kernel/locking/qspinlock_paravirt.h
+2
-2
kernel/locking/rwsem-xadd.c
kernel/locking/rwsem-xadd.c
+1
-1
lib/atomic64.c
lib/atomic64.c
+28
-4
lib/atomic64_test.c
lib/atomic64_test.c
+34
-0
No files found.
arch/alpha/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -46,10 +46,9 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return
(int i, atomic_t *v)
\
static inline int atomic_##op##_return
_relaxed(int i, atomic_t *v)
\
{ \
long temp, result; \
smp_mb(); \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
" " #asm_op " %0,%3,%2\n" \
...
...
@@ -61,7 +60,23 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
smp_mb(); \
return result; \
}
#define ATOMIC_FETCH_OP(op, asm_op) \
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
"1: ldl_l %2,%1\n" \
" " #asm_op " %2,%3,%0\n" \
" stl_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
"2: br 1b\n" \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
return result; \
}
...
...
@@ -82,10 +97,9 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
static __inline__ long atomic64_##op##_return
_relaxed
(long i, atomic64_t * v) \
{ \
long temp, result; \
smp_mb(); \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
" " #asm_op " %0,%3,%2\n" \
...
...
@@ -97,34 +111,77 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
smp_mb(); \
return result; \
}
#define ATOMIC64_FETCH_OP(op, asm_op) \
static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
{ \
long temp, result; \
__asm__ __volatile__( \
"1: ldq_l %2,%1\n" \
" " #asm_op " %2,%3,%0\n" \
" stq_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
"2: br 1b\n" \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
return result; \
}
#define ATOMIC_OPS(op) \
ATOMIC_OP(op, op##l) \
ATOMIC_OP_RETURN(op, op##l) \
ATOMIC_FETCH_OP(op, op##l) \
ATOMIC64_OP(op, op##q) \
ATOMIC64_OP_RETURN(op, op##q)
ATOMIC64_OP_RETURN(op, op##q) \
ATOMIC64_FETCH_OP(op, op##q)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot
ATOMIC_OP
(
and
,
and
)
ATOMIC_OP
(
andnot
,
bic
)
ATOMIC_OP
(
or
,
bis
)
ATOMIC_OP
(
xor
,
xor
)
ATOMIC64_OP
(
and
,
and
)
ATOMIC64_OP
(
andnot
,
bic
)
ATOMIC64_OP
(
or
,
bis
)
ATOMIC64_OP
(
xor
,
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \
ATOMIC_OP(op, asm) \
ATOMIC_FETCH_OP(op, asm) \
ATOMIC64_OP(op, asm) \
ATOMIC64_FETCH_OP(op, asm)
ATOMIC_OPS
(
and
,
and
)
ATOMIC_OPS
(
andnot
,
bic
)
ATOMIC_OPS
(
or
,
bis
)
ATOMIC_OPS
(
xor
,
xor
)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/arc/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -67,6 +67,33 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return val; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned int val, orig; \
\
/* \
* Explicit full memory barrier needed before/after as \
* LLOCK/SCOND thmeselves don't provide any such semantics \
*/
\
smp_mb(); \
\
__asm__ __volatile__( \
"1: llock %[orig], [%[ctr]] \n" \
" " #asm_op " %[val], %[orig], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
" \n" \
: [val] "=&r" (val), \
[orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
\
smp_mb(); \
\
return orig; \
}
#else
/* !CONFIG_ARC_HAS_LLSC */
#ifndef CONFIG_SMP
...
...
@@ -129,25 +156,44 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return temp; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned long orig; \
\
/* \
* spin lock/unlock provides the needed smp_mb() before/after \
*/
\
atomic_ops_lock(flags); \
orig = v->counter; \
v->counter c_op i; \
atomic_ops_unlock(flags); \
\
return orig; \
}
#endif
/* !CONFIG_ARC_HAS_LLSC */
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OP_RETURN(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS
(
add
,
+=
,
add
)
ATOMIC_OPS
(
sub
,
-=
,
sub
)
#define atomic_andnot atomic_andnot
ATOMIC_OP
(
and
,
&=
,
and
)
ATOMIC_OP
(
andnot
,
&=
~
,
bic
)
ATOMIC_OP
(
or
,
|=
,
or
)
ATOMIC_OP
(
xor
,
^=
,
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op
)
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
ATOMIC_OPS
(
and
,
&=
,
and
)
ATOMIC_OPS
(
andnot
,
&=
~
,
bic
)
ATOMIC_OPS
(
or
,
|=
,
or
)
ATOMIC_OPS
(
xor
,
^=
,
xor
)
#else
/* CONFIG_ARC_PLAT_EZNPS */
...
...
@@ -208,22 +254,51 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return temp; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned int temp = i; \
\
/* Explicit full memory barrier needed before/after */
\
smp_mb(); \
\
__asm__ __volatile__( \
" mov r2, %0\n" \
" mov r3, %1\n" \
" .word %2\n" \
" mov %0, r2" \
: "+r"(temp) \
: "r"(&v->counter), "i"(asm_op) \
: "r2", "r3", "memory"); \
\
smp_mb(); \
\
return temp; \
}
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OP_RETURN(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS
(
add
,
+=
,
CTOP_INST_AADD_DI_R2_R2_R3
)
#define atomic_sub(i, v) atomic_add(-(i), (v))
#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
ATOMIC_OP
(
and
,
&=
,
CTOP_INST_AAND_DI_R2_R2_R3
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS
(
and
,
&=
,
CTOP_INST_AAND_DI_R2_R2_R3
)
#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
ATOMIC_OP
(
or
,
|=
,
CTOP_INST_AOR_DI_R2_R2_R3
)
ATOMIC_OP
(
xor
,
^=
,
CTOP_INST_AXOR_DI_R2_R2_R3
)
ATOMIC_OP
S
(
or
,
|=
,
CTOP_INST_AOR_DI_R2_R2_R3
)
ATOMIC_OP
S
(
xor
,
^=
,
CTOP_INST_AXOR_DI_R2_R2_R3
)
#endif
/* CONFIG_ARC_PLAT_EZNPS */
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/arm/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -77,8 +77,36 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
return result; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result, val; \
\
prefetchw(&v->counter); \
\
__asm__ __volatile__("@ atomic_fetch_" #op "\n" \
"1: ldrex %0, [%4]\n" \
" " #asm_op " %1, %0, %5\n" \
" strex %2, %1, [%4]\n" \
" teq %2, #0\n" \
" bne 1b" \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "Ir" (i) \
: "cc"); \
\
return result; \
}
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
static
inline
int
atomic_cmpxchg_relaxed
(
atomic_t
*
ptr
,
int
old
,
int
new
)
{
...
...
@@ -159,6 +187,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return val; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
\
raw_local_irq_save(flags); \
val = v->counter; \
v->counter c_op i; \
raw_local_irq_restore(flags); \
\
return val; \
}
static
inline
int
atomic_cmpxchg
(
atomic_t
*
v
,
int
old
,
int
new
)
{
int
ret
;
...
...
@@ -187,19 +229,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OP_RETURN(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS
(
add
,
+=
,
add
)
ATOMIC_OPS
(
sub
,
-=
,
sub
)
#define atomic_andnot atomic_andnot
ATOMIC_OP
(
and
,
&=
,
and
)
ATOMIC_OP
(
andnot
,
&=
~
,
bic
)
ATOMIC_OP
(
or
,
|=
,
orr
)
ATOMIC_OP
(
xor
,
^=
,
eor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS
(
and
,
&=
,
and
)
ATOMIC_OPS
(
andnot
,
&=
~
,
bic
)
ATOMIC_OPS
(
or
,
|=
,
orr
)
ATOMIC_OPS
(
xor
,
^=
,
eor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
@@ -317,24 +366,61 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
return result; \
}
#define ATOMIC64_FETCH_OP(op, op1, op2) \
static inline long long \
atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
{ \
long long result, val; \
unsigned long tmp; \
\
prefetchw(&v->counter); \
\
__asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
"1: ldrexd %0, %H0, [%4]\n" \
" " #op1 " %Q1, %Q0, %Q5\n" \
" " #op2 " %R1, %R0, %R5\n" \
" strexd %2, %1, %H1, [%4]\n" \
" teq %2, #0\n" \
" bne 1b" \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "r" (i) \
: "cc"); \
\
return result; \
}
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_OP_RETURN(op, op1, op2)
ATOMIC64_OP_RETURN(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
ATOMIC64_OPS
(
add
,
adds
,
adc
)
ATOMIC64_OPS
(
sub
,
subs
,
sbc
)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
#define atomic64_andnot atomic64_andnot
ATOMIC64_OP
(
and
,
and
,
and
)
ATOMIC64_OP
(
andnot
,
bic
,
bic
)
ATOMIC64_OP
(
or
,
orr
,
orr
)
ATOMIC64_OP
(
xor
,
eor
,
eor
)
ATOMIC64_OPS
(
and
,
and
,
and
)
ATOMIC64_OPS
(
andnot
,
bic
,
bic
)
ATOMIC64_OPS
(
or
,
orr
,
orr
)
ATOMIC64_OPS
(
xor
,
eor
,
eor
)
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
...
...
arch/arm64/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -76,6 +76,36 @@
#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
#define atomic_fetch_add_release atomic_fetch_add_release
#define atomic_fetch_add atomic_fetch_add
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
#define atomic_fetch_sub_release atomic_fetch_sub_release
#define atomic_fetch_sub atomic_fetch_sub
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_and_acquire atomic_fetch_and_acquire
#define atomic_fetch_and_release atomic_fetch_and_release
#define atomic_fetch_and atomic_fetch_and
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
#define atomic_fetch_andnot_release atomic_fetch_andnot_release
#define atomic_fetch_andnot atomic_fetch_andnot
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_or_acquire atomic_fetch_or_acquire
#define atomic_fetch_or_release atomic_fetch_or_release
#define atomic_fetch_or atomic_fetch_or
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
#define atomic_fetch_xor_release atomic_fetch_xor_release
#define atomic_fetch_xor atomic_fetch_xor
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
#define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new))
#define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new))
...
...
@@ -125,6 +155,36 @@
#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
#define atomic64_fetch_add_release atomic64_fetch_add_release
#define atomic64_fetch_add atomic64_fetch_add
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
#define atomic64_fetch_sub_release atomic64_fetch_sub_release
#define atomic64_fetch_sub atomic64_fetch_sub
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
#define atomic64_fetch_and_release atomic64_fetch_and_release
#define atomic64_fetch_and atomic64_fetch_and
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
#define atomic64_fetch_andnot atomic64_fetch_andnot
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
#define atomic64_fetch_or_release atomic64_fetch_or_release
#define atomic64_fetch_or atomic64_fetch_or
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
#define atomic64_fetch_xor_release atomic64_fetch_xor_release
#define atomic64_fetch_xor atomic64_fetch_xor
#define atomic64_xchg_relaxed atomic_xchg_relaxed
#define atomic64_xchg_acquire atomic_xchg_acquire
#define atomic64_xchg_release atomic_xchg_release
...
...
arch/arm64/include/asm/atomic_ll_sc.h
View file @
36e91aa2
...
...
@@ -77,26 +77,57 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
} \
__LL_SC_EXPORT(atomic_##op##_return##name);
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int val, result; \
\
asm volatile("// atomic_fetch_" #op #name "\n" \
" prfm pstl1strm, %3\n" \
"1: ld" #acq "xr %w0, %3\n" \
" " #asm_op " %w1, %w0, %w4\n" \
" st" #rel "xr %w2, %w1, %3\n" \
" cbnz %w2, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i) \
: cl); \
\
return result; \
} \
__LL_SC_EXPORT(atomic_fetch_##op##name);
#define ATOMIC_OPS(...) \
ATOMIC_OP(__VA_ARGS__) \
ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)
#define ATOMIC_OPS_RLX(...) \
ATOMIC_OPS(__VA_ARGS__) \
ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\
ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)
ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
ATOMIC_OPS_RLX
(
add
,
add
)
ATOMIC_OPS_RLX
(
sub
,
sub
)
ATOMIC_OPS
(
add
,
add
)
ATOMIC_OPS
(
sub
,
sub
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(...) \
ATOMIC_OP(__VA_ARGS__) \
ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
ATOMIC_OP
(
and
,
and
)
ATOMIC_OP
(
andnot
,
bic
)
ATOMIC_OP
(
or
,
orr
)
ATOMIC_OP
(
xor
,
eor
)
ATOMIC_OP
S
(
and
,
and
)
ATOMIC_OP
S
(
andnot
,
bic
)
ATOMIC_OP
S
(
or
,
orr
)
ATOMIC_OP
S
(
xor
,
eor
)
#undef ATOMIC_OPS_RLX
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
@@ -140,26 +171,57 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
} \
__LL_SC_EXPORT(atomic64_##op##_return##name);
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
{ \
long result, val; \
unsigned long tmp; \
\
asm volatile("// atomic64_fetch_" #op #name "\n" \
" prfm pstl1strm, %3\n" \
"1: ld" #acq "xr %0, %3\n" \
" " #asm_op " %1, %0, %4\n" \
" st" #rel "xr %w2, %1, %3\n" \
" cbnz %w2, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i) \
: cl); \
\
return result; \
} \
__LL_SC_EXPORT(atomic64_fetch_##op##name);
#define ATOMIC64_OPS(...) \
ATOMIC64_OP(__VA_ARGS__) \
ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__)
#define ATOMIC64_OPS_RLX(...) \
ATOMIC64_OPS(__VA_ARGS__) \
ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \
ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__)
ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
ATOMIC64_OPS_RLX
(
add
,
add
)
ATOMIC64_OPS_RLX
(
sub
,
sub
)
ATOMIC64_OPS
(
add
,
add
)
ATOMIC64_OPS
(
sub
,
sub
)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(...) \
ATOMIC64_OP(__VA_ARGS__) \
ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
ATOMIC64_OP
(
and
,
and
)
ATOMIC64_OP
(
andnot
,
bic
)
ATOMIC64_OP
(
or
,
orr
)
ATOMIC64_OP
(
xor
,
eor
)
ATOMIC64_OP
S
(
and
,
and
)
ATOMIC64_OP
S
(
andnot
,
bic
)
ATOMIC64_OP
S
(
or
,
orr
)
ATOMIC64_OP
S
(
xor
,
eor
)
#undef ATOMIC64_OPS_RLX
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
...
...
arch/arm64/include/asm/atomic_lse.h
View file @
36e91aa2
...
...
@@ -26,54 +26,57 @@
#endif
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
static
inline
void
atomic_
andnot
(
int
i
,
atomic_t
*
v
)
{
register
int
w0
asm
(
"w0"
)
=
i
;
register
atomic_t
*
x1
asm
(
"x1"
)
=
v
;
asm
volatile
(
ARM64_LSE_ATOMIC_INSN
(
__LL_SC_ATOMIC
(
andnot
),
" stclr %w[i], %[v]
\n
"
)
:
[
i
]
"+r"
(
w0
),
[
v
]
"+Q"
(
v
->
counter
)
:
"r"
(
x1
)
:
__LL_SC_CLOBBERS
);
#define ATOMIC_OP(op, asm_op) \
static inline void atomic_
##op(int i, atomic_t *v) \
{
\
register int w0 asm ("w0") = i;
\
register atomic_t *x1 asm ("x1") = v;
\
\
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(
op), \
" " #asm_op " %w[i], %[v]\n") \
: [i] "+r" (w0), [v] "+Q" (v->counter)
\
: "r" (x1)
\
: __LL_SC_CLOBBERS);
\
}
static
inline
void
atomic_or
(
int
i
,
atomic_t
*
v
)
{
register
int
w0
asm
(
"w0"
)
=
i
;
register
atomic_t
*
x1
asm
(
"x1"
)
=
v
;
ATOMIC_OP
(
andnot
,
stclr
)
ATOMIC_OP
(
or
,
stset
)
ATOMIC_OP
(
xor
,
steor
)
ATOMIC_OP
(
add
,
stadd
)
asm
volatile
(
ARM64_LSE_ATOMIC_INSN
(
__LL_SC_ATOMIC
(
or
),
" stset %w[i], %[v]
\n
"
)
:
[
i
]
"+r"
(
w0
),
[
v
]
"+Q"
(
v
->
counter
)
:
"r"
(
x1
)
:
__LL_SC_CLOBBERS
);
}
#undef ATOMIC_OP
static
inline
void
atomic_xor
(
int
i
,
atomic_t
*
v
)
{
register
int
w0
asm
(
"w0"
)
=
i
;
register
atomic_t
*
x1
asm
(
"x1"
)
=
v
;
asm
volatile
(
ARM64_LSE_ATOMIC_INSN
(
__LL_SC_ATOMIC
(
xor
),
" steor %w[i], %[v]
\n
"
)
:
[
i
]
"+r"
(
w0
),
[
v
]
"+Q"
(
v
->
counter
)
:
"r"
(
x1
)
:
__LL_SC_CLOBBERS
);
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */
\
__LL_SC_ATOMIC(fetch_##op##name), \
/* LSE atomics */
\
" " #asm_op #mb " %w[i], %w[i], %[v]") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \
\
return w0; \
}
static
inline
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
register
int
w0
asm
(
"w0"
)
=
i
;
register
atomic_t
*
x1
asm
(
"x1"
)
=
v
;
#define ATOMIC_FETCH_OPS(op, asm_op) \
ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
asm
volatile
(
ARM64_LSE_ATOMIC_INSN
(
__LL_SC_ATOMIC
(
add
),
" stadd %w[i], %[v]
\n
"
)
:
[
i
]
"+r"
(
w0
),
[
v
]
"+Q"
(
v
->
counter
)
:
"r"
(
x1
)
:
__LL_SC_CLOBBERS
);
}
ATOMIC_FETCH_OPS
(
andnot
,
ldclr
)
ATOMIC_FETCH_OPS
(
or
,
ldset
)
ATOMIC_FETCH_OPS
(
xor
,
ldeor
)
ATOMIC_FETCH_OPS
(
add
,
ldadd
)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_FETCH_OPS
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
static inline int atomic_add_return##name(int i, atomic_t *v) \
...
...
@@ -119,6 +122,33 @@ static inline void atomic_and(int i, atomic_t *v)
:
__LL_SC_CLOBBERS
);
}
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
static inline int atomic_fetch_and##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */
\
" nop\n" \
__LL_SC_ATOMIC(fetch_and##name), \
/* LSE atomics */
\
" mvn %w[i], %w[i]\n" \
" ldclr" #mb " %w[i], %w[i], %[v]") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \
\
return w0; \
}
ATOMIC_FETCH_OP_AND
(
_relaxed
,
)
ATOMIC_FETCH_OP_AND
(
_acquire
,
a
,
"memory"
)
ATOMIC_FETCH_OP_AND
(
_release
,
l
,
"memory"
)
ATOMIC_FETCH_OP_AND
(
,
al
,
"memory"
)
#undef ATOMIC_FETCH_OP_AND
static
inline
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
register
int
w0
asm
(
"w0"
)
=
i
;
...
...
@@ -164,57 +194,87 @@ ATOMIC_OP_SUB_RETURN(_release, l, "memory")
ATOMIC_OP_SUB_RETURN
(
,
al
,
"memory"
)
#undef ATOMIC_OP_SUB_RETURN
#undef __LL_SC_ATOMIC
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
static
inline
void
atomic64_andnot
(
long
i
,
atomic64_t
*
v
)
{
register
long
x0
asm
(
"x0"
)
=
i
;
register
atomic64_t
*
x1
asm
(
"x1"
)
=
v
;
asm
volatile
(
ARM64_LSE_ATOMIC_INSN
(
__LL_SC_ATOMIC64
(
andnot
),
" stclr %[i], %[v]
\n
"
)
:
[
i
]
"+r"
(
x0
),
[
v
]
"+Q"
(
v
->
counter
)
:
"r"
(
x1
)
:
__LL_SC_CLOBBERS
);
#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */
\
" nop\n" \
__LL_SC_ATOMIC(fetch_sub##name), \
/* LSE atomics */
\
" neg %w[i], %w[i]\n" \
" ldadd" #mb " %w[i], %w[i], %[v]") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \
\
return w0; \
}
static
inline
void
atomic64_or
(
long
i
,
atomic64_t
*
v
)
{
register
long
x0
asm
(
"x0"
)
=
i
;
register
atomic64_t
*
x1
asm
(
"x1"
)
=
v
;
ATOMIC_FETCH_OP_SUB
(
_relaxed
,
)
ATOMIC_FETCH_OP_SUB
(
_acquire
,
a
,
"memory"
)
ATOMIC_FETCH_OP_SUB
(
_release
,
l
,
"memory"
)
ATOMIC_FETCH_OP_SUB
(
,
al
,
"memory"
)
asm
volatile
(
ARM64_LSE_ATOMIC_INSN
(
__LL_SC_ATOMIC64
(
or
),
" stset %[i], %[v]
\n
"
)
:
[
i
]
"+r"
(
x0
),
[
v
]
"+Q"
(
v
->
counter
)
:
"r"
(
x1
)
:
__LL_SC_CLOBBERS
);
#undef ATOMIC_FETCH_OP_SUB
#undef __LL_SC_ATOMIC
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
#define ATOMIC64_OP(op, asm_op) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
" " #asm_op " %[i], %[v]\n") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
: __LL_SC_CLOBBERS); \
}
static
inline
void
atomic64_xor
(
long
i
,
atomic64_t
*
v
)
{
register
long
x0
asm
(
"x0"
)
=
i
;
register
atomic64_t
*
x1
asm
(
"x1"
)
=
v
;
ATOMIC64_OP
(
andnot
,
stclr
)
ATOMIC64_OP
(
or
,
stset
)
ATOMIC64_OP
(
xor
,
steor
)
ATOMIC64_OP
(
add
,
stadd
)
asm
volatile
(
ARM64_LSE_ATOMIC_INSN
(
__LL_SC_ATOMIC64
(
xor
),
" steor %[i], %[v]
\n
"
)
:
[
i
]
"+r"
(
x0
),
[
v
]
"+Q"
(
v
->
counter
)
:
"r"
(
x1
)
:
__LL_SC_CLOBBERS
);
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */
\
__LL_SC_ATOMIC64(fetch_##op##name), \
/* LSE atomics */
\
" " #asm_op #mb " %[i], %[i], %[v]") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \
\
return x0; \
}
static
inline
void
atomic64_add
(
long
i
,
atomic64_t
*
v
)
{
register
long
x0
asm
(
"x0"
)
=
i
;
register
atomic64_t
*
x1
asm
(
"x1"
)
=
v
;
#define ATOMIC64_FETCH_OPS(op, asm_op) \
ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
asm
volatile
(
ARM64_LSE_ATOMIC_INSN
(
__LL_SC_ATOMIC64
(
add
),
" stadd %[i], %[v]
\n
"
)
:
[
i
]
"+r"
(
x0
),
[
v
]
"+Q"
(
v
->
counter
)
:
"r"
(
x1
)
:
__LL_SC_CLOBBERS
);
}
ATOMIC64_FETCH_OPS
(
andnot
,
ldclr
)
ATOMIC64_FETCH_OPS
(
or
,
ldset
)
ATOMIC64_FETCH_OPS
(
xor
,
ldeor
)
ATOMIC64_FETCH_OPS
(
add
,
ldadd
)
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_FETCH_OPS
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
static inline long atomic64_add_return##name(long i, atomic64_t *v) \
...
...
@@ -260,6 +320,33 @@ static inline void atomic64_and(long i, atomic64_t *v)
:
__LL_SC_CLOBBERS
);
}
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("w0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */
\
" nop\n" \
__LL_SC_ATOMIC64(fetch_and##name), \
/* LSE atomics */
\
" mvn %[i], %[i]\n" \
" ldclr" #mb " %[i], %[i], %[v]") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \
\
return x0; \
}
ATOMIC64_FETCH_OP_AND
(
_relaxed
,
)
ATOMIC64_FETCH_OP_AND
(
_acquire
,
a
,
"memory"
)
ATOMIC64_FETCH_OP_AND
(
_release
,
l
,
"memory"
)
ATOMIC64_FETCH_OP_AND
(
,
al
,
"memory"
)
#undef ATOMIC64_FETCH_OP_AND
static
inline
void
atomic64_sub
(
long
i
,
atomic64_t
*
v
)
{
register
long
x0
asm
(
"x0"
)
=
i
;
...
...
@@ -306,6 +393,33 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC64_OP_SUB_RETURN
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("w0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */
\
" nop\n" \
__LL_SC_ATOMIC64(fetch_sub##name), \
/* LSE atomics */
\
" neg %[i], %[i]\n" \
" ldadd" #mb " %[i], %[i], %[v]") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \
\
return x0; \
}
ATOMIC64_FETCH_OP_SUB
(
_relaxed
,
)
ATOMIC64_FETCH_OP_SUB
(
_acquire
,
a
,
"memory"
)
ATOMIC64_FETCH_OP_SUB
(
_release
,
l
,
"memory"
)
ATOMIC64_FETCH_OP_SUB
(
,
al
,
"memory"
)
#undef ATOMIC64_FETCH_OP_SUB
static
inline
long
atomic64_dec_if_positive
(
atomic64_t
*
v
)
{
register
long
x0
asm
(
"x0"
)
=
(
long
)
v
;
...
...
arch/avr32/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -41,21 +41,49 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
#define ATOMIC_FETCH_OP(op, asm_op, asm_con) \
static inline int __atomic_fetch_##op(int i, atomic_t *v) \
{ \
int result, val; \
\
asm volatile( \
"
/* atomic_fetch_" #op " */
\n" \
"1: ssrf 5\n" \
" ld.w %0, %3\n" \
" mov %1, %0\n" \
" " #asm_op " %1, %4\n" \
" stcond %2, %1\n" \
" brne 1b" \
: "=&r" (result), "=&r" (val), "=o" (v->counter) \
: "m" (v->counter), #asm_con (i) \
: "cc"); \
\
return result; \
}
ATOMIC_OP_RETURN
(
sub
,
sub
,
rKs21
)
ATOMIC_OP_RETURN
(
add
,
add
,
r
)
ATOMIC_FETCH_OP
(
sub
,
sub
,
rKs21
)
ATOMIC_FETCH_OP
(
add
,
add
,
r
)
#define ATOMIC_OP(op, asm_op) \
#define ATOMIC_OP
S
(op, asm_op) \
ATOMIC_OP_RETURN(op, asm_op, r) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
(void)__atomic_##op##_return(i, v); \
} \
ATOMIC_FETCH_OP(op, asm_op, r) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
return __atomic_fetch_##op(i, v); \
}
ATOMIC_OP
(
and
,
and
)
ATOMIC_OP
(
or
,
or
)
ATOMIC_OP
(
xor
,
eor
)
ATOMIC_OP
S
(
and
,
and
)
ATOMIC_OP
S
(
or
,
or
)
ATOMIC_OP
S
(
xor
,
eor
)
#undef ATOMIC_OP
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
/*
...
...
@@ -87,6 +115,14 @@ static inline int atomic_add_return(int i, atomic_t *v)
return
__atomic_add_return
(
i
,
v
);
}
static
inline
int
atomic_fetch_add
(
int
i
,
atomic_t
*
v
)
{
if
(
IS_21BIT_CONST
(
i
))
return
__atomic_fetch_sub
(
-
i
,
v
);
return
__atomic_fetch_add
(
i
,
v
);
}
/*
* atomic_sub_return - subtract the atomic variable
* @i: integer value to subtract
...
...
@@ -102,6 +138,14 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return
__atomic_add_return
(
-
i
,
v
);
}
static
inline
int
atomic_fetch_sub
(
int
i
,
atomic_t
*
v
)
{
if
(
IS_21BIT_CONST
(
i
))
return
__atomic_fetch_sub
(
i
,
v
);
return
__atomic_fetch_add
(
-
i
,
v
);
}
/*
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
...
...
arch/blackfin/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -17,6 +17,7 @@
asmlinkage
int
__raw_uncached_fetch_asm
(
const
volatile
int
*
ptr
);
asmlinkage
int
__raw_atomic_add_asm
(
volatile
int
*
ptr
,
int
value
);
asmlinkage
int
__raw_atomic_xadd_asm
(
volatile
int
*
ptr
,
int
value
);
asmlinkage
int
__raw_atomic_and_asm
(
volatile
int
*
ptr
,
int
value
);
asmlinkage
int
__raw_atomic_or_asm
(
volatile
int
*
ptr
,
int
value
);
...
...
@@ -28,10 +29,17 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
#define atomic_fetch_add(i, v) __raw_atomic_xadd_asm(&(v)->counter, i)
#define atomic_fetch_sub(i, v) __raw_atomic_xadd_asm(&(v)->counter, -(i))
#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
#define atomic_fetch_or(i, v) __raw_atomic_or_asm(&(v)->counter, i)
#define atomic_fetch_and(i, v) __raw_atomic_and_asm(&(v)->counter, i)
#define atomic_fetch_xor(i, v) __raw_atomic_xor_asm(&(v)->counter, i)
#endif
#include <asm-generic/atomic.h>
...
...
arch/blackfin/kernel/bfin_ksyms.c
View file @
36e91aa2
...
...
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(insl_16);
#ifdef CONFIG_SMP
EXPORT_SYMBOL
(
__raw_atomic_add_asm
);
EXPORT_SYMBOL
(
__raw_atomic_xadd_asm
);
EXPORT_SYMBOL
(
__raw_atomic_and_asm
);
EXPORT_SYMBOL
(
__raw_atomic_or_asm
);
EXPORT_SYMBOL
(
__raw_atomic_xor_asm
);
...
...
arch/blackfin/mach-bf561/atomic.S
View file @
36e91aa2
...
...
@@ -605,6 +605,28 @@ ENTRY(___raw_atomic_add_asm)
rts
;
ENDPROC
(
___raw_atomic_add_asm
)
/*
*
r0
=
ptr
*
r1
=
value
*
*
ADD
a
signed
value
to
a
32
bit
word
and
return
the
old
value
atomically
.
*
Clobbers
:
r3
:
0
,
p1
:
0
*/
ENTRY
(
___raw_atomic_xadd_asm
)
p1
=
r0
;
r3
=
r1
;
[--
sp
]
=
rets
;
call
_get_core_lock
;
r3
=
[
p1
]
;
r2
=
r3
+
r2
;
[
p1
]
=
r2
;
r1
=
p1
;
call
_put_core_lock
;
r0
=
r3
;
rets
=
[
sp
++]
;
rts
;
ENDPROC
(
___raw_atomic_add_asm
)
/*
*
r0
=
ptr
*
r1
=
mask
...
...
@@ -618,10 +640,9 @@ ENTRY(___raw_atomic_and_asm)
r3
=
r1
;
[--
sp
]
=
rets
;
call
_get_core_lock
;
r2
=
[
p1
]
;
r3
=
r2
&
r3
;
[
p1
]
=
r3
;
r3
=
r2
;
r3
=
[
p1
]
;
r2
=
r2
&
r3
;
[
p1
]
=
r2
;
r1
=
p1
;
call
_put_core_lock
;
r0
=
r3
;
...
...
@@ -642,10 +663,9 @@ ENTRY(___raw_atomic_or_asm)
r3
=
r1
;
[--
sp
]
=
rets
;
call
_get_core_lock
;
r2
=
[
p1
]
;
r3
=
r2
|
r3
;
[
p1
]
=
r3
;
r3
=
r2
;
r3
=
[
p1
]
;
r2
=
r2
|
r3
;
[
p1
]
=
r2
;
r1
=
p1
;
call
_put_core_lock
;
r0
=
r3
;
...
...
@@ -666,10 +686,9 @@ ENTRY(___raw_atomic_xor_asm)
r3
=
r1
;
[--
sp
]
=
rets
;
call
_get_core_lock
;
r2
=
[
p1
]
;
r3
=
r2
^
r3
;
[
p1
]
=
r3
;
r3
=
r2
;
r3
=
[
p1
]
;
r2
=
r2
^
r3
;
[
p1
]
=
r2
;
r1
=
p1
;
call
_put_core_lock
;
r0
=
r3
;
...
...
arch/frv/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -60,16 +60,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
return
atomic_add_return
(
i
,
v
)
<
0
;
}
static
inline
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
atomic_add_return
(
i
,
v
);
}
static
inline
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
atomic_sub_return
(
i
,
v
);
}
static
inline
void
atomic_inc
(
atomic_t
*
v
)
{
atomic_inc_return
(
v
);
...
...
@@ -136,16 +126,6 @@ static inline long long atomic64_add_negative(long long i, atomic64_t *v)
return
atomic64_add_return
(
i
,
v
)
<
0
;
}
static
inline
void
atomic64_add
(
long
long
i
,
atomic64_t
*
v
)
{
atomic64_add_return
(
i
,
v
);
}
static
inline
void
atomic64_sub
(
long
long
i
,
atomic64_t
*
v
)
{
atomic64_sub_return
(
i
,
v
);
}
static
inline
void
atomic64_inc
(
atomic64_t
*
v
)
{
atomic64_inc_return
(
v
);
...
...
@@ -182,11 +162,19 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
}
#define ATOMIC_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
return __atomic32_fetch_##op(i, &v->counter); \
} \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
(void)__atomic32_fetch_##op(i, &v->counter); \
} \
\
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
{ \
return __atomic64_fetch_##op(i, &v->counter); \
} \
static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \
(void)__atomic64_fetch_##op(i, &v->counter); \
...
...
@@ -195,6 +183,8 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
ATOMIC_OP
(
or
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
xor
)
ATOMIC_OP
(
add
)
ATOMIC_OP
(
sub
)
#undef ATOMIC_OP
...
...
arch/frv/include/asm/atomic_defs.h
View file @
36e91aa2
...
...
@@ -162,6 +162,8 @@ ATOMIC_EXPORT(__atomic64_fetch_##op);
ATOMIC_FETCH_OP
(
or
)
ATOMIC_FETCH_OP
(
and
)
ATOMIC_FETCH_OP
(
xor
)
ATOMIC_FETCH_OP
(
add
)
ATOMIC_FETCH_OP
(
sub
)
ATOMIC_OP_RETURN
(
add
)
ATOMIC_OP_RETURN
(
sub
)
...
...
arch/h8300/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -28,6 +28,19 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return ret; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
h8300flags flags; \
int ret; \
\
flags = arch_local_irq_save(); \
ret = v->counter; \
v->counter c_op i; \
arch_local_irq_restore(flags); \
return ret; \
}
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
...
...
@@ -41,17 +54,21 @@ static inline void atomic_##op(int i, atomic_t *v) \
ATOMIC_OP_RETURN
(
add
,
+=
)
ATOMIC_OP_RETURN
(
sub
,
-=
)
ATOMIC_OP
(
and
,
&=
)
ATOMIC_OP
(
or
,
|=
)
ATOMIC_OP
(
xor
,
^=
)
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS
(
and
,
&=
)
ATOMIC_OPS
(
or
,
|=
)
ATOMIC_OPS
(
xor
,
^=
)
ATOMIC_OPS
(
add
,
+=
)
ATOMIC_OPS
(
sub
,
-=
)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_add(i, v) (void)atomic_add_return(i, v)
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_inc_return(v) atomic_add_return(1, v)
...
...
arch/hexagon/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -110,7 +110,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
); \
} \
#define ATOMIC_OP_RETURN(op)
\
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int output; \
...
...
@@ -127,16 +127,37 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return output; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int output, val; \
\
__asm__ __volatile__ ( \
"1: %0 = memw_locked(%2);\n" \
" %1 = "#op "(%0,%3);\n" \
" memw_locked(%2,P3)=%1;\n" \
" if !P3 jump 1b;\n" \
: "=&r" (output), "=&r" (val) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
); \
return output; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
ATOMIC_OPS
(
xor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/ia64/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -42,8 +42,27 @@ ia64_atomic_##op (int i, atomic_t *v) \
return new; \
}
ATOMIC_OP
(
add
,
+
)
ATOMIC_OP
(
sub
,
-
)
#define ATOMIC_FETCH_OP(op, c_op) \
static __inline__ int \
ia64_atomic_fetch_##op (int i, atomic_t *v) \
{ \
__s32 old, new; \
CMPXCHG_BUGCHECK_DECL \
\
do { \
CMPXCHG_BUGCHECK(v); \
old = atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return old; \
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS
(
add
,
+
)
ATOMIC_OPS
(
sub
,
-
)
#define atomic_add_return(i,v) \
({ \
...
...
@@ -69,14 +88,44 @@ ATOMIC_OP(sub, -)
: ia64_atomic_sub(__ia64_asr_i, v); \
})
ATOMIC_OP
(
and
,
&
)
ATOMIC_OP
(
or
,
|
)
ATOMIC_OP
(
xor
,
^
)
#define atomic_fetch_add(i,v) \
({ \
int __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
: ia64_atomic_fetch_add(__ia64_aar_i, v); \
})
#define atomic_fetch_sub(i,v) \
({ \
int __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
: ia64_atomic_fetch_sub(__ia64_asr_i, v); \
})
ATOMIC_FETCH_OP
(
and
,
&
)
ATOMIC_FETCH_OP
(
or
,
|
)
ATOMIC_FETCH_OP
(
xor
,
^
)
#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
#define atomic_
and(i,v) (void)ia64_atomic
_and(i,v)
#define atomic_
or(i,v) (void)ia64_atomic
_or(i,v)
#define atomic_
xor(i,v) (void)ia64_atomic
_xor(i,v)
#define atomic_
fetch_and(i,v) ia64_atomic_fetch
_and(i,v)
#define atomic_
fetch_or(i,v) ia64_atomic_fetch
_or(i,v)
#define atomic_
fetch_xor(i,v) ia64_atomic_fetch
_xor(i,v)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP
#define ATOMIC64_OP(op, c_op) \
...
...
@@ -94,8 +143,27 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \
return new; \
}
ATOMIC64_OP
(
add
,
+
)
ATOMIC64_OP
(
sub
,
-
)
#define ATOMIC64_FETCH_OP(op, c_op) \
static __inline__ long \
ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v) \
{ \
__s64 old, new; \
CMPXCHG_BUGCHECK_DECL \
\
do { \
CMPXCHG_BUGCHECK(v); \
old = atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return old; \
}
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS
(
add
,
+
)
ATOMIC64_OPS
(
sub
,
-
)
#define atomic64_add_return(i,v) \
({ \
...
...
@@ -121,14 +189,44 @@ ATOMIC64_OP(sub, -)
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
ATOMIC64_OP
(
and
,
&
)
ATOMIC64_OP
(
or
,
|
)
ATOMIC64_OP
(
xor
,
^
)
#define atomic64_fetch_add(i,v) \
({ \
long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \
})
#define atomic64_fetch_sub(i,v) \
({ \
long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
: ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
})
ATOMIC64_FETCH_OP
(
and
,
&
)
ATOMIC64_FETCH_OP
(
or
,
|
)
ATOMIC64_FETCH_OP
(
xor
,
^
)
#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
#define atomic64_
and(i,v) (void)ia64_atomic64
_and(i,v)
#define atomic64_
or(i,v) (void)ia64_atomic64
_or(i,v)
#define atomic64_
xor(i,v) (void)ia64_atomic64
_xor(i,v)
#define atomic64_
fetch_and(i,v) ia64_atomic64_fetch
_and(i,v)
#define atomic64_
fetch_or(i,v) ia64_atomic64_fetch
_or(i,v)
#define atomic64_
fetch_xor(i,v) ia64_atomic64_fetch
_xor(i,v)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
...
...
arch/m32r/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -89,16 +89,44 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int result, val; \
\
local_irq_save(flags); \
__asm__ __volatile__ ( \
"# atomic_fetch_" #op " \n\t" \
DCACHE_CLEAR("%0", "r4", "%2") \
M32R_LOCK" %1, @%2; \n\t" \
"mv %0, %1 \n\t" \
#op " %1, %3; \n\t" \
M32R_UNLOCK" %1, @%2; \n\t" \
: "=&r" (result), "=&r" (val) \
: "r" (&v->counter), "r" (i) \
: "memory" \
__ATOMIC_CLOBBER \
); \
local_irq_restore(flags); \
\
return result; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
ATOMIC_OPS
(
xor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/m68k/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -53,6 +53,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return t; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int t, tmp; \
\
__asm__ __volatile__( \
"1: movel %2,%1\n" \
" " #asm_op "l %3,%1\n" \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "g" (i), "2" (atomic_read(v))); \
return tmp; \
}
#else
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
...
...
@@ -68,20 +83,41 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
return t; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
\
local_irq_save(flags); \
t = v->counter; \
v->counter c_op i; \
local_irq_restore(flags); \
\
return t; \
}
#endif
/* CONFIG_RMW_INSNS */
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OP_RETURN(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS
(
add
,
+=
,
add
)
ATOMIC_OPS
(
sub
,
-=
,
sub
)
ATOMIC_OP
(
and
,
&=
,
and
)
ATOMIC_OP
(
or
,
|=
,
or
)
ATOMIC_OP
(
xor
,
^=
,
eor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS
(
and
,
&=
,
and
)
ATOMIC_OPS
(
or
,
|=
,
or
)
ATOMIC_OPS
(
xor
,
^=
,
eor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/metag/include/asm/atomic_lnkget.h
View file @
36e91aa2
...
...
@@ -69,16 +69,44 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int result, temp; \
\
smp_mb(); \
\
asm volatile ( \
"1: LNKGETD %1, [%2]\n" \
" " #op " %0, %1, %3\n" \
" LNKSETD [%2], %0\n" \
" DEFR %0, TXSTAT\n" \
" ANDT %0, %0, #HI(0x3f000000)\n" \
" CMPT %0, #HI(0x02000000)\n" \
" BNZ 1b\n" \
: "=&d" (temp), "=&d" (result) \
: "da" (&v->counter), "bd" (i) \
: "cc"); \
\
smp_mb(); \
\
return result; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
ATOMIC_OPS
(
xor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/metag/include/asm/atomic_lock1.h
View file @
36e91aa2
...
...
@@ -64,15 +64,40 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long result; \
unsigned long flags; \
\
__global_lock1(flags); \
result = v->counter; \
fence(); \
v->counter c_op i; \
__global_unlock1(flags); \
\
return result; \
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_OP_RETURN(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS
(
add
,
+=
)
ATOMIC_OPS
(
sub
,
-=
)
ATOMIC_OP
(
and
,
&=
)
ATOMIC_OP
(
or
,
|=
)
ATOMIC_OP
(
xor
,
^=
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS
(
and
,
&=
)
ATOMIC_OPS
(
or
,
|=
)
ATOMIC_OPS
(
xor
,
^=
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/mips/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -66,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)
\
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
...
...
@@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
}
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static __inline__ int atomic_##op##_return
(int i, atomic_t * v)
\
static __inline__ int atomic_##op##_return
_relaxed(int i, atomic_t * v)
\
{ \
int result; \
\
smp_mb__before_llsc(); \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
int temp; \
\
...
...
@@ -125,23 +123,84 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
raw_local_irq_restore(flags); \
} \
\
smp_llsc_mb(); \
return result; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
int temp; \
\
__asm__ __volatile__( \
" .set arch=r4000 \n" \
"1: ll %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
" beqzl %0, 1b \n" \
" move %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
int temp; \
\
do { \
__asm__ __volatile__( \
" .set "MIPS_ISA_LEVEL" \n" \
" ll %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!result)); \
\
result = temp; \
} else { \
unsigned long flags; \
\
raw_local_irq_save(flags); \
result = v->counter; \
v->counter c_op i; \
raw_local_irq_restore(flags); \
} \
\
return result; \
}
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OP_RETURN(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS
(
add
,
+=
,
addu
)
ATOMIC_OPS
(
sub
,
-=
,
subu
)
ATOMIC_OP
(
and
,
&=
,
and
)
ATOMIC_OP
(
or
,
|=
,
or
)
ATOMIC_OP
(
xor
,
^=
,
xor
)
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS
(
and
,
&=
,
and
)
ATOMIC_OPS
(
or
,
|=
,
or
)
ATOMIC_OPS
(
xor
,
^=
,
xor
)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
@@ -362,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
}
#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
static __inline__ long atomic64_##op##_return
(long i, atomic64_t * v)
\
static __inline__ long atomic64_##op##_return
_relaxed(long i, atomic64_t * v)
\
{ \
long result; \
\
smp_mb__before_llsc(); \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \
\
...
...
@@ -409,22 +466,85 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
raw_local_irq_restore(flags); \
} \
\
smp_llsc_mb(); \
return result; \
}
#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
{ \
long result; \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \
\
__asm__ __volatile__( \
" .set arch=r4000 \n" \
"1: lld %1, %2 # atomic64_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
" beqzl %0, 1b \n" \
" move %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
\
do { \
__asm__ __volatile__( \
" .set "MIPS_ISA_LEVEL" \n" \
" lld %1, %2 # atomic64_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"=" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
: "memory"); \
} while (unlikely(!result)); \
\
result = temp; \
} else { \
unsigned long flags; \
\
raw_local_irq_save(flags); \
result = v->counter; \
v->counter c_op i; \
raw_local_irq_restore(flags); \
} \
\
return result; \
}
#define ATOMIC64_OPS(op, c_op, asm_op) \
ATOMIC64_OP(op, c_op, asm_op) \
ATOMIC64_OP_RETURN(op, c_op, asm_op)
ATOMIC64_OP_RETURN(op, c_op, asm_op) \
ATOMIC64_FETCH_OP(op, c_op, asm_op)
ATOMIC64_OPS
(
add
,
+=
,
daddu
)
ATOMIC64_OPS
(
sub
,
-=
,
dsubu
)
ATOMIC64_OP
(
and
,
&=
,
and
)
ATOMIC64_OP
(
or
,
|=
,
or
)
ATOMIC64_OP
(
xor
,
^=
,
xor
)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op, asm_op) \
ATOMIC64_OP(op, c_op, asm_op) \
ATOMIC64_FETCH_OP(op, c_op, asm_op)
ATOMIC64_OPS
(
and
,
&=
,
and
)
ATOMIC64_OPS
(
or
,
|=
,
or
)
ATOMIC64_OPS
(
xor
,
^=
,
xor
)
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
...
...
arch/mn10300/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -84,16 +84,41 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return retval; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int retval, status; \
\
asm volatile( \
"1: mov %4,(_AAR,%3) \n" \
" mov (_ADR,%3),%1 \n" \
" mov %1,%0 \n" \
" " #op " %5,%0 \n" \
" mov %0,(_ADR,%3) \n" \
" mov (_ADR,%3),%0 \n"
/* flush */
\
" mov (_ASR,%3),%0 \n" \
" or %0,%0 \n" \
" bne 1b \n" \
: "=&r"(status), "=&r"(retval), "=m"(v->counter) \
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
: "memory", "cc"); \
return retval; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
ATOMIC_OPS
(
xor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/parisc/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -121,16 +121,39 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
return ret; \
}
#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
#define ATOMIC_FETCH_OP(op, c_op) \
static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
\
_atomic_spin_lock_irqsave(v, flags); \
ret = v->counter; \
v->counter c_op i; \
_atomic_spin_unlock_irqrestore(v, flags); \
\
return ret; \
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_OP_RETURN(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS
(
add
,
+=
)
ATOMIC_OPS
(
sub
,
-=
)
ATOMIC_OP
(
and
,
&=
)
ATOMIC_OP
(
or
,
|=
)
ATOMIC_OP
(
xor
,
^=
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS
(
and
,
&=
)
ATOMIC_OPS
(
or
,
|=
)
ATOMIC_OPS
(
xor
,
^=
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
@@ -185,15 +208,39 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
return ret; \
}
#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
#define ATOMIC64_FETCH_OP(op, c_op) \
static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
\
_atomic_spin_lock_irqsave(v, flags); \
ret = v->counter; \
v->counter c_op i; \
_atomic_spin_unlock_irqrestore(v, flags); \
\
return ret; \
}
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS
(
add
,
+=
)
ATOMIC64_OPS
(
sub
,
-=
)
ATOMIC64_OP
(
and
,
&=
)
ATOMIC64_OP
(
or
,
|=
)
ATOMIC64_OP
(
xor
,
^=
)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS
(
and
,
&=
)
ATOMIC64_OPS
(
or
,
|=
)
ATOMIC64_OPS
(
xor
,
^=
)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
...
...
arch/powerpc/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -78,21 +78,53 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
return t; \
}
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
{ \
int res, t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
#asm_op " %1,%3,%0\n" \
PPC405_ERR77(0, %4) \
" stwcx. %1,0,%4\n" \
" bne- 1b\n" \
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
: "cc"); \
\
return res; \
}
#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
ATOMIC_OP_RETURN_RELAXED(op, asm_op)
ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
ATOMIC_FETCH_OP_RELAXED(op, asm_op)
ATOMIC_OPS
(
add
,
add
)
ATOMIC_OPS
(
sub
,
subf
)
ATOMIC_OP
(
and
,
and
)
ATOMIC_OP
(
or
,
or
)
ATOMIC_OP
(
xor
,
xor
)
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
ATOMIC_FETCH_OP_RELAXED(op, asm_op)
ATOMIC_OPS
(
and
,
and
)
ATOMIC_OPS
(
or
,
or
)
ATOMIC_OPS
(
xor
,
xor
)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP_RELAXED
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
...
...
@@ -329,20 +361,53 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
return t; \
}
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
static inline long \
atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
{ \
long res, t; \
\
__asm__ __volatile__( \
"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
#asm_op " %1,%3,%0\n" \
" stdcx. %1,0,%4\n" \
" bne- 1b\n" \
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
: "cc"); \
\
return res; \
}
#define ATOMIC64_OPS(op, asm_op) \
ATOMIC64_OP(op, asm_op) \
ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
ATOMIC64_OPS
(
add
,
add
)
ATOMIC64_OPS
(
sub
,
subf
)
ATOMIC64_OP
(
and
,
and
)
ATOMIC64_OP
(
or
,
or
)
ATOMIC64_OP
(
xor
,
xor
)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, asm_op) \
ATOMIC64_OP(op, asm_op) \
ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
ATOMIC64_OPS
(
and
,
and
)
ATOMIC64_OPS
(
or
,
or
)
ATOMIC64_OPS
(
xor
,
xor
)
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOPIC64_OPS
#undef ATOMIC64_FETCH_OP_RELAXED
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
...
...
arch/s390/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -93,6 +93,11 @@ static inline int atomic_add_return(int i, atomic_t *v)
return
__ATOMIC_LOOP
(
v
,
i
,
__ATOMIC_ADD
,
__ATOMIC_BARRIER
)
+
i
;
}
static
inline
int
atomic_fetch_add
(
int
i
,
atomic_t
*
v
)
{
return
__ATOMIC_LOOP
(
v
,
i
,
__ATOMIC_ADD
,
__ATOMIC_BARRIER
);
}
static
inline
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
...
...
@@ -114,22 +119,27 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
#define atomic_dec(_v) atomic_sub(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
#define ATOMIC_OP(op, OP) \
#define ATOMIC_OP
S
(op, OP) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
} \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
}
ATOMIC_OP
(
and
,
AND
)
ATOMIC_OP
(
or
,
OR
)
ATOMIC_OP
(
xor
,
XOR
)
ATOMIC_OP
S
(
and
,
AND
)
ATOMIC_OP
S
(
or
,
OR
)
ATOMIC_OP
S
(
xor
,
XOR
)
#undef ATOMIC_OP
#undef ATOMIC_OP
S
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
...
...
@@ -236,6 +246,11 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return
__ATOMIC64_LOOP
(
v
,
i
,
__ATOMIC64_ADD
,
__ATOMIC64_BARRIER
)
+
i
;
}
static
inline
long
long
atomic64_fetch_add
(
long
long
i
,
atomic64_t
*
v
)
{
return
__ATOMIC64_LOOP
(
v
,
i
,
__ATOMIC64_ADD
,
__ATOMIC64_BARRIER
);
}
static
inline
void
atomic64_add
(
long
long
i
,
atomic64_t
*
v
)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
...
...
@@ -264,17 +279,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
return
old
;
}
#define ATOMIC64_OP(op, OP) \
#define ATOMIC64_OP
S
(op, OP) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
} \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
}
ATOMIC64_OP
(
and
,
AND
)
ATOMIC64_OP
(
or
,
OR
)
ATOMIC64_OP
(
xor
,
XOR
)
ATOMIC64_OP
S
(
and
,
AND
)
ATOMIC64_OP
S
(
or
,
OR
)
ATOMIC64_OP
S
(
xor
,
XOR
)
#undef ATOMIC64_OP
#undef ATOMIC64_OP
S
#undef __ATOMIC64_LOOP
static
inline
int
atomic64_add_unless
(
atomic64_t
*
v
,
long
long
i
,
long
long
u
)
...
...
@@ -315,6 +334,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub(1, _v)
...
...
arch/sh/include/asm/atomic-grb.h
View file @
36e91aa2
...
...
@@ -43,16 +43,42 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return tmp; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int res, tmp; \
\
__asm__ __volatile__ ( \
" .align 2 \n\t" \
" mova 1f, r0 \n\t"
/* r0 = end point */
\
" mov r15, r1 \n\t"
/* r1 = saved sp */
\
" mov #-6, r15 \n\t"
/* LOGIN: r15 = size */
\
" mov.l @%2, %0 \n\t"
/* load old value */
\
" mov %0, %1 \n\t"
/* save old value */
\
" " #op " %3, %0 \n\t"
/* $op */
\
" mov.l %0, @%2 \n\t"
/* store new value */
\
"1: mov r1, r15 \n\t"
/* LOGOUT */
\
: "=&r" (tmp), "=&r" (res), "+r" (v) \
: "r" (i) \
: "memory" , "r0", "r1"); \
\
return res; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
ATOMIC_OPS
(
xor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/sh/include/asm/atomic-irq.h
View file @
36e91aa2
...
...
@@ -33,15 +33,38 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return temp; \
}
#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
raw_local_irq_save(flags); \
temp = v->counter; \
v->counter c_op i; \
raw_local_irq_restore(flags); \
\
return temp; \
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_OP_RETURN(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS
(
add
,
+=
)
ATOMIC_OPS
(
sub
,
-=
)
ATOMIC_OP
(
and
,
&=
)
ATOMIC_OP
(
or
,
|=
)
ATOMIC_OP
(
xor
,
^=
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS
(
and
,
&=
)
ATOMIC_OPS
(
or
,
|=
)
ATOMIC_OPS
(
xor
,
^=
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/sh/include/asm/atomic-llsc.h
View file @
36e91aa2
...
...
@@ -48,15 +48,39 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return temp; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long res, temp; \
\
__asm__ __volatile__ ( \
"1: movli.l @%3, %0 ! atomic_fetch_" #op " \n" \
" mov %0, %1 \n" \
" " #op " %2, %0 \n" \
" movco.l %0, @%3 \n" \
" bf 1b \n" \
" synco \n" \
: "=&z" (temp), "=&z" (res) \
: "r" (i), "r" (&v->counter) \
: "t"); \
\
return res; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
ATOMIC_OPS
(
xor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/sparc/include/asm/atomic_32.h
View file @
36e91aa2
...
...
@@ -20,9 +20,10 @@
#define ATOMIC_INIT(i) { (i) }
int
atomic_add_return
(
int
,
atomic_t
*
);
void
atomic_and
(
int
,
atomic_t
*
);
void
atomic_or
(
int
,
atomic_t
*
);
void
atomic_xor
(
int
,
atomic_t
*
);
int
atomic_fetch_add
(
int
,
atomic_t
*
);
int
atomic_fetch_and
(
int
,
atomic_t
*
);
int
atomic_fetch_or
(
int
,
atomic_t
*
);
int
atomic_fetch_xor
(
int
,
atomic_t
*
);
int
atomic_cmpxchg
(
atomic_t
*
,
int
,
int
);
int
atomic_xchg
(
atomic_t
*
,
int
);
int
__atomic_add_unless
(
atomic_t
*
,
int
,
int
);
...
...
@@ -35,7 +36,13 @@ void atomic_set(atomic_t *, int);
#define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
#define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v)))
#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v)))
#define atomic_inc_return(v) (atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (atomic_add_return( -1, (v)))
...
...
arch/sparc/include/asm/atomic_64.h
View file @
36e91aa2
...
...
@@ -28,16 +28,24 @@ void atomic64_##op(long, atomic64_t *);
int atomic_##op##_return(int, atomic_t *); \
long atomic64_##op##_return(long, atomic64_t *);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
int atomic_fetch_##op(int, atomic_t *); \
long atomic64_fetch_##op(long, atomic64_t *);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
ATOMIC_OPS
(
xor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/sparc/lib/atomic32.c
View file @
36e91aa2
...
...
@@ -27,39 +27,44 @@ static DEFINE_SPINLOCK(dummy);
#endif
/* SMP */
#define ATOMIC_
OP_RETURN
(op, c_op) \
int atomic_
##op##_return
(int i, atomic_t *v) \
#define ATOMIC_
FETCH_OP
(op, c_op) \
int atomic_
fetch_##op
(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\
ret = (v->counter c_op i); \
ret = v->counter; \
v->counter c_op i; \
\
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
EXPORT_SYMBOL(atomic_
##op##_return
);
EXPORT_SYMBOL(atomic_
fetch_##op
);
#define ATOMIC_OP
(op, c_op)
\
void atomic_##op(int i, atomic_t *v)
\
#define ATOMIC_OP
_RETURN(op, c_op)
\
int atomic_##op##_return(int i, atomic_t *v)
\
{ \
int ret; \
unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\
v->counter c_op i;
\
ret = (v->counter c_op i);
\
\
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
EXPORT_SYMBOL(atomic_##op);
EXPORT_SYMBOL(atomic_##op
##_return
);
ATOMIC_OP_RETURN
(
add
,
+=
)
ATOMIC_OP
(
and
,
&=
)
ATOMIC_OP
(
or
,
|=
)
ATOMIC_OP
(
xor
,
^=
)
ATOMIC_FETCH_OP
(
add
,
+=
)
ATOMIC_FETCH_OP
(
and
,
&=
)
ATOMIC_FETCH_OP
(
or
,
|=
)
ATOMIC_FETCH_OP
(
xor
,
^=
)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
int
atomic_xchg
(
atomic_t
*
v
,
int
new
)
{
...
...
arch/sparc/lib/atomic_64.S
View file @
36e91aa2
...
...
@@ -9,10 +9,11 @@
.
text
/
*
T
wo
versions
of
the
atomic
routines
,
one
that
/
*
T
hree
versions
of
the
atomic
routines
,
one
that
*
does
not
return
a
value
and
does
not
perform
*
memory
barriers
,
and
a
second
which
returns
*
a
value
and
does
the
barriers
.
*
memory
barriers
,
and
a
two
which
return
*
a
value
,
the
new
and
old
value
resp
.
and
does
the
*
barriers
.
*/
#define ATOMIC_OP(op) \
...
...
@@ -43,15 +44,34 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
2
:
BACKOFF_SPIN
(%
o2
,
%
o3
,
1
b
)
; \
ENDPROC
(
atomic_
##
op
##
_return
)
;
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
ENTRY
(
atomic_fetch_
##
op
)
/
*
%
o0
=
increment
,
%
o1
=
atomic_ptr
*/
\
BACKOFF_SETUP
(%
o2
)
; \
1
:
lduw
[%
o1
],
%
g1
; \
op
%
g1
,
%
o0
,
%
g7
; \
cas
[%
o1
],
%
g1
,
%
g7
; \
cmp
%
g1
,
%
g7
; \
bne
,
pn
%
icc
,
BACKOFF_LABEL
(
2
f
,
1
b
)
; \
nop
; \
retl
; \
sra
%
g1
,
0
,
%
o0
; \
2
:
BACKOFF_SPIN
(%
o2
,
%
o3
,
1
b
)
; \
ENDPROC
(
atomic_fetch_
##
op
)
;
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
ATOMIC_OPS
(
xor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
@@ -83,15 +103,34 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
2
:
BACKOFF_SPIN
(%
o2
,
%
o3
,
1
b
)
; \
ENDPROC
(
atomic64_
##
op
##
_return
)
;
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
#define ATOMIC64_FETCH_OP(op) \
ENTRY
(
atomic64_fetch_
##
op
)
/
*
%
o0
=
increment
,
%
o1
=
atomic_ptr
*/
\
BACKOFF_SETUP
(%
o2
)
; \
1
:
ldx
[%
o1
],
%
g1
; \
op
%
g1
,
%
o0
,
%
g7
; \
casx
[%
o1
],
%
g1
,
%
g7
; \
cmp
%
g1
,
%
g7
; \
bne
,
pn
%
xcc
,
BACKOFF_LABEL
(
2
f
,
1
b
)
; \
nop
; \
retl
; \
mov
%
g1
,
%
o0
; \
2
:
BACKOFF_SPIN
(%
o2
,
%
o3
,
1
b
)
; \
ENDPROC
(
atomic64_fetch_
##
op
)
;
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
ATOMIC64_OPS
(
add
)
ATOMIC64_OPS
(
sub
)
ATOMIC64_OP
(
and
)
ATOMIC64_OP
(
or
)
ATOMIC64_OP
(
xor
)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
ATOMIC64_OPS
(
and
)
ATOMIC64_OPS
(
or
)
ATOMIC64_OPS
(
xor
)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
...
...
arch/sparc/lib/ksyms.c
View file @
36e91aa2
...
...
@@ -107,15 +107,24 @@ EXPORT_SYMBOL(atomic64_##op);
EXPORT_SYMBOL(atomic_##op##_return); \
EXPORT_SYMBOL(atomic64_##op##_return);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
EXPORT_SYMBOL(atomic_fetch_##op); \
EXPORT_SYMBOL(atomic64_fetch_##op);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
ATOMIC_OPS
(
xor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
arch/tile/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -46,6 +46,8 @@ static inline int atomic_read(const atomic_t *v)
*/
#define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v))
#define atomic_fetch_sub(i, v) atomic_fetch_add(-(int)(i), (v))
/**
* atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
...
...
arch/tile/include/asm/atomic_32.h
View file @
36e91aa2
...
...
@@ -34,18 +34,29 @@ static inline void atomic_add(int i, atomic_t *v)
_atomic_xchg_add
(
&
v
->
counter
,
i
);
}
#define ATOMIC_OP(op) \
unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
#define ATOMIC_OP
S
(op) \
unsigned long _atomic_
fetch_
##op(volatile unsigned long *p, unsigned long mask); \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
_atomic_##op((unsigned long *)&v->counter, i); \
_atomic_fetch_##op((unsigned long *)&v->counter, i); \
} \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
smp_mb(); \
return _atomic_fetch_##op((unsigned long *)&v->counter, i); \
}
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
ATOMIC_OP
S
(
and
)
ATOMIC_OP
S
(
or
)
ATOMIC_OP
S
(
xor
)
#undef ATOMIC_OP
#undef ATOMIC_OPS
static
inline
int
atomic_fetch_add
(
int
i
,
atomic_t
*
v
)
{
smp_mb
();
return
_atomic_xchg_add
(
&
v
->
counter
,
i
);
}
/**
* atomic_add_return - add integer and return
...
...
@@ -126,16 +137,29 @@ static inline void atomic64_add(long long i, atomic64_t *v)
_atomic64_xchg_add
(
&
v
->
counter
,
i
);
}
#define ATOMIC64_OP
(op)
\
long long _atomic64_
##op(long long *v, long long n);
\
#define ATOMIC64_OP
S(op)
\
long long _atomic64_
fetch_##op(long long *v, long long n);
\
static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \
_atomic64_##op(&v->counter, i); \
_atomic64_fetch_##op(&v->counter, i); \
} \
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
{ \
smp_mb(); \
return _atomic64_fetch_##op(&v->counter, i); \
}
ATOMIC64_OP
(
and
)
ATOMIC64_OP
(
or
)
ATOMIC64_OP
(
xor
)
ATOMIC64_OPS
(
and
)
ATOMIC64_OPS
(
or
)
ATOMIC64_OPS
(
xor
)
#undef ATOMIC64_OPS
static
inline
long
long
atomic64_fetch_add
(
long
long
i
,
atomic64_t
*
v
)
{
smp_mb
();
return
_atomic64_xchg_add
(
&
v
->
counter
,
i
);
}
/**
* atomic64_add_return - add integer and return
...
...
@@ -186,6 +210,7 @@ static inline void atomic64_set(atomic64_t *v, long long n)
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
#define atomic64_dec(v) atomic64_sub(1LL, (v))
...
...
@@ -193,7 +218,6 @@ static inline void atomic64_set(atomic64_t *v, long long n)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
#endif
/* !__ASSEMBLY__ */
/*
...
...
@@ -242,16 +266,16 @@ struct __get_user {
unsigned
long
val
;
int
err
;
};
extern
struct
__get_user
__atomic_cmpxchg
(
volatile
int
*
p
,
extern
struct
__get_user
__atomic
32
_cmpxchg
(
volatile
int
*
p
,
int
*
lock
,
int
o
,
int
n
);
extern
struct
__get_user
__atomic_xchg
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic_xchg_add
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic_xchg_add_unless
(
volatile
int
*
p
,
extern
struct
__get_user
__atomic
32
_xchg
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic
32
_xchg_add
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic
32
_xchg_add_unless
(
volatile
int
*
p
,
int
*
lock
,
int
o
,
int
n
);
extern
struct
__get_user
__atomic_or
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic_and
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic_andn
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic_xor
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic
32_fetch
_or
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic
32_fetch
_and
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic
32_fetch
_andn
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
struct
__get_user
__atomic
32_fetch
_xor
(
volatile
int
*
p
,
int
*
lock
,
int
n
);
extern
long
long
__atomic64_cmpxchg
(
volatile
long
long
*
p
,
int
*
lock
,
long
long
o
,
long
long
n
);
extern
long
long
__atomic64_xchg
(
volatile
long
long
*
p
,
int
*
lock
,
long
long
n
);
...
...
@@ -259,9 +283,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
long
long
n
);
extern
long
long
__atomic64_xchg_add_unless
(
volatile
long
long
*
p
,
int
*
lock
,
long
long
o
,
long
long
n
);
extern
long
long
__atomic64_and
(
volatile
long
long
*
p
,
int
*
lock
,
long
long
n
);
extern
long
long
__atomic64_or
(
volatile
long
long
*
p
,
int
*
lock
,
long
long
n
);
extern
long
long
__atomic64_xor
(
volatile
long
long
*
p
,
int
*
lock
,
long
long
n
);
extern
long
long
__atomic64_
fetch_
and
(
volatile
long
long
*
p
,
int
*
lock
,
long
long
n
);
extern
long
long
__atomic64_
fetch_
or
(
volatile
long
long
*
p
,
int
*
lock
,
long
long
n
);
extern
long
long
__atomic64_
fetch_
xor
(
volatile
long
long
*
p
,
int
*
lock
,
long
long
n
);
/* Return failure from the atomic wrappers. */
struct
__get_user
__atomic_bad_address
(
int
__user
*
addr
);
...
...
arch/tile/include/asm/atomic_64.h
View file @
36e91aa2
...
...
@@ -32,11 +32,6 @@
* on any routine which updates memory and returns a value.
*/
static
inline
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
__insn_fetchadd4
((
void
*
)
&
v
->
counter
,
i
);
}
/*
* Note a subtlety of the locking here. We are required to provide a
* full memory barrier before and after the operation. However, we
...
...
@@ -59,28 +54,39 @@ static inline int atomic_add_return(int i, atomic_t *v)
return
val
;
}
static
inline
int
__atomic_add_unless
(
atomic_t
*
v
,
int
a
,
int
u
)
#define ATOMIC_OPS(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int val; \
smp_mb(); \
val = __insn_fetch##op##4((void *)&v->counter, i); \
smp_mb(); \
return val; \
} \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
__insn_fetch##op##4((void *)&v->counter, i); \
}
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
#undef ATOMIC_OPS
static
inline
int
atomic_fetch_xor
(
int
i
,
atomic_t
*
v
)
{
int
guess
,
oldval
=
v
->
counter
;
smp_mb
();
do
{
if
(
oldval
==
u
)
break
;
guess
=
oldval
;
oldval
=
cmpxchg
(
&
v
->
counter
,
guess
,
guess
+
a
);
__insn_mtspr
(
SPR_CMPEXCH_VALUE
,
guess
);
oldval
=
__insn_cmpexch4
(
&
v
->
counter
,
guess
^
i
);
}
while
(
guess
!=
oldval
);
smp_mb
();
return
oldval
;
}
static
inline
void
atomic_and
(
int
i
,
atomic_t
*
v
)
{
__insn_fetchand4
((
void
*
)
&
v
->
counter
,
i
);
}
static
inline
void
atomic_or
(
int
i
,
atomic_t
*
v
)
{
__insn_fetchor4
((
void
*
)
&
v
->
counter
,
i
);
}
static
inline
void
atomic_xor
(
int
i
,
atomic_t
*
v
)
{
int
guess
,
oldval
=
v
->
counter
;
...
...
@@ -91,6 +97,18 @@ static inline void atomic_xor(int i, atomic_t *v)
}
while
(
guess
!=
oldval
);
}
static
inline
int
__atomic_add_unless
(
atomic_t
*
v
,
int
a
,
int
u
)
{
int
guess
,
oldval
=
v
->
counter
;
do
{
if
(
oldval
==
u
)
break
;
guess
=
oldval
;
oldval
=
cmpxchg
(
&
v
->
counter
,
guess
,
guess
+
a
);
}
while
(
guess
!=
oldval
);
return
oldval
;
}
/* Now the true 64-bit operations. */
#define ATOMIC64_INIT(i) { (i) }
...
...
@@ -98,11 +116,6 @@ static inline void atomic_xor(int i, atomic_t *v)
#define atomic64_read(v) READ_ONCE((v)->counter)
#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
static
inline
void
atomic64_add
(
long
i
,
atomic64_t
*
v
)
{
__insn_fetchadd
((
void
*
)
&
v
->
counter
,
i
);
}
static
inline
long
atomic64_add_return
(
long
i
,
atomic64_t
*
v
)
{
int
val
;
...
...
@@ -112,26 +125,37 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
return
val
;
}
static
inline
long
atomic64_add_unless
(
atomic64_t
*
v
,
long
a
,
long
u
)
#define ATOMIC64_OPS(op) \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
long val; \
smp_mb(); \
val = __insn_fetch##op((void *)&v->counter, i); \
smp_mb(); \
return val; \
} \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
__insn_fetch##op((void *)&v->counter, i); \
}
ATOMIC64_OPS
(
add
)
ATOMIC64_OPS
(
and
)
ATOMIC64_OPS
(
or
)
#undef ATOMIC64_OPS
static
inline
long
atomic64_fetch_xor
(
long
i
,
atomic64_t
*
v
)
{
long
guess
,
oldval
=
v
->
counter
;
smp_mb
();
do
{
if
(
oldval
==
u
)
break
;
guess
=
oldval
;
oldval
=
cmpxchg
(
&
v
->
counter
,
guess
,
guess
+
a
);
__insn_mtspr
(
SPR_CMPEXCH_VALUE
,
guess
);
oldval
=
__insn_cmpexch
(
&
v
->
counter
,
guess
^
i
);
}
while
(
guess
!=
oldval
);
return
oldval
!=
u
;
}
static
inline
void
atomic64_and
(
long
i
,
atomic64_t
*
v
)
{
__insn_fetchand
((
void
*
)
&
v
->
counter
,
i
);
}
static
inline
void
atomic64_or
(
long
i
,
atomic64_t
*
v
)
{
__insn_fetchor
((
void
*
)
&
v
->
counter
,
i
);
smp_mb
();
return
oldval
;
}
static
inline
void
atomic64_xor
(
long
i
,
atomic64_t
*
v
)
...
...
@@ -144,7 +168,20 @@ static inline void atomic64_xor(long i, atomic64_t *v)
}
while
(
guess
!=
oldval
);
}
static
inline
long
atomic64_add_unless
(
atomic64_t
*
v
,
long
a
,
long
u
)
{
long
guess
,
oldval
=
v
->
counter
;
do
{
if
(
oldval
==
u
)
break
;
guess
=
oldval
;
oldval
=
cmpxchg
(
&
v
->
counter
,
guess
,
guess
+
a
);
}
while
(
guess
!=
oldval
);
return
oldval
!=
u
;
}
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
...
...
arch/tile/include/asm/bitops_32.h
View file @
36e91aa2
...
...
@@ -19,9 +19,9 @@
#include <asm/barrier.h>
/* Tile-specific routines to support <asm/bitops.h>. */
unsigned
long
_atomic_or
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
);
unsigned
long
_atomic_andn
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
);
unsigned
long
_atomic_xor
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
);
unsigned
long
_atomic_
fetch_
or
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
);
unsigned
long
_atomic_
fetch_
andn
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
);
unsigned
long
_atomic_
fetch_
xor
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
);
/**
* set_bit - Atomically set a bit in memory
...
...
@@ -35,7 +35,7 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
*/
static
inline
void
set_bit
(
unsigned
nr
,
volatile
unsigned
long
*
addr
)
{
_atomic_or
(
addr
+
BIT_WORD
(
nr
),
BIT_MASK
(
nr
));
_atomic_
fetch_
or
(
addr
+
BIT_WORD
(
nr
),
BIT_MASK
(
nr
));
}
/**
...
...
@@ -54,7 +54,7 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr)
*/
static
inline
void
clear_bit
(
unsigned
nr
,
volatile
unsigned
long
*
addr
)
{
_atomic_andn
(
addr
+
BIT_WORD
(
nr
),
BIT_MASK
(
nr
));
_atomic_
fetch_
andn
(
addr
+
BIT_WORD
(
nr
),
BIT_MASK
(
nr
));
}
/**
...
...
@@ -69,7 +69,7 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
*/
static
inline
void
change_bit
(
unsigned
nr
,
volatile
unsigned
long
*
addr
)
{
_atomic_xor
(
addr
+
BIT_WORD
(
nr
),
BIT_MASK
(
nr
));
_atomic_
fetch_
xor
(
addr
+
BIT_WORD
(
nr
),
BIT_MASK
(
nr
));
}
/**
...
...
@@ -85,7 +85,7 @@ static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
unsigned
long
mask
=
BIT_MASK
(
nr
);
addr
+=
BIT_WORD
(
nr
);
smp_mb
();
/* barrier for proper semantics */
return
(
_atomic_or
(
addr
,
mask
)
&
mask
)
!=
0
;
return
(
_atomic_
fetch_
or
(
addr
,
mask
)
&
mask
)
!=
0
;
}
/**
...
...
@@ -101,7 +101,7 @@ static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
unsigned
long
mask
=
BIT_MASK
(
nr
);
addr
+=
BIT_WORD
(
nr
);
smp_mb
();
/* barrier for proper semantics */
return
(
_atomic_andn
(
addr
,
mask
)
&
mask
)
!=
0
;
return
(
_atomic_
fetch_
andn
(
addr
,
mask
)
&
mask
)
!=
0
;
}
/**
...
...
@@ -118,7 +118,7 @@ static inline int test_and_change_bit(unsigned nr,
unsigned
long
mask
=
BIT_MASK
(
nr
);
addr
+=
BIT_WORD
(
nr
);
smp_mb
();
/* barrier for proper semantics */
return
(
_atomic_xor
(
addr
,
mask
)
&
mask
)
!=
0
;
return
(
_atomic_
fetch_
xor
(
addr
,
mask
)
&
mask
)
!=
0
;
}
#include <asm-generic/bitops/ext2-atomic.h>
...
...
arch/tile/include/asm/futex.h
View file @
36e91aa2
...
...
@@ -80,16 +80,16 @@
ret = gu.err; \
}
#define __futex_set() __futex_call(__atomic_xchg)
#define __futex_add() __futex_call(__atomic_xchg_add)
#define __futex_or() __futex_call(__atomic_or)
#define __futex_andn() __futex_call(__atomic_andn)
#define __futex_xor() __futex_call(__atomic_xor)
#define __futex_set() __futex_call(__atomic
32
_xchg)
#define __futex_add() __futex_call(__atomic
32
_xchg_add)
#define __futex_or() __futex_call(__atomic
32_fetch
_or)
#define __futex_andn() __futex_call(__atomic
32_fetch
_andn)
#define __futex_xor() __futex_call(__atomic
32_fetch
_xor)
#define __futex_cmpxchg() \
{ \
struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
lock, oldval, oparg); \
struct __get_user gu = __atomic
32
_cmpxchg((u32 __force *)uaddr, \
lock, oldval, oparg); \
val = gu.val; \
ret = gu.err; \
}
...
...
arch/tile/lib/atomic_32.c
View file @
36e91aa2
...
...
@@ -61,13 +61,13 @@ static inline int *__atomic_setup(volatile void *v)
int
_atomic_xchg
(
int
*
v
,
int
n
)
{
return
__atomic_xchg
(
v
,
__atomic_setup
(
v
),
n
).
val
;
return
__atomic
32
_xchg
(
v
,
__atomic_setup
(
v
),
n
).
val
;
}
EXPORT_SYMBOL
(
_atomic_xchg
);
int
_atomic_xchg_add
(
int
*
v
,
int
i
)
{
return
__atomic_xchg_add
(
v
,
__atomic_setup
(
v
),
i
).
val
;
return
__atomic
32
_xchg_add
(
v
,
__atomic_setup
(
v
),
i
).
val
;
}
EXPORT_SYMBOL
(
_atomic_xchg_add
);
...
...
@@ -78,39 +78,39 @@ int _atomic_xchg_add_unless(int *v, int a, int u)
* to use the first argument consistently as the "old value"
* in the assembly, as is done for _atomic_cmpxchg().
*/
return
__atomic_xchg_add_unless
(
v
,
__atomic_setup
(
v
),
u
,
a
).
val
;
return
__atomic
32
_xchg_add_unless
(
v
,
__atomic_setup
(
v
),
u
,
a
).
val
;
}
EXPORT_SYMBOL
(
_atomic_xchg_add_unless
);
int
_atomic_cmpxchg
(
int
*
v
,
int
o
,
int
n
)
{
return
__atomic_cmpxchg
(
v
,
__atomic_setup
(
v
),
o
,
n
).
val
;
return
__atomic
32
_cmpxchg
(
v
,
__atomic_setup
(
v
),
o
,
n
).
val
;
}
EXPORT_SYMBOL
(
_atomic_cmpxchg
);
unsigned
long
_atomic_or
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
)
unsigned
long
_atomic_
fetch_
or
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
)
{
return
__atomic_or
((
int
*
)
p
,
__atomic_setup
(
p
),
mask
).
val
;
return
__atomic
32_fetch
_or
((
int
*
)
p
,
__atomic_setup
(
p
),
mask
).
val
;
}
EXPORT_SYMBOL
(
_atomic_or
);
EXPORT_SYMBOL
(
_atomic_
fetch_
or
);
unsigned
long
_atomic_and
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
)
unsigned
long
_atomic_
fetch_
and
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
)
{
return
__atomic_and
((
int
*
)
p
,
__atomic_setup
(
p
),
mask
).
val
;
return
__atomic
32_fetch
_and
((
int
*
)
p
,
__atomic_setup
(
p
),
mask
).
val
;
}
EXPORT_SYMBOL
(
_atomic_and
);
EXPORT_SYMBOL
(
_atomic_
fetch_
and
);
unsigned
long
_atomic_andn
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
)
unsigned
long
_atomic_
fetch_
andn
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
)
{
return
__atomic_andn
((
int
*
)
p
,
__atomic_setup
(
p
),
mask
).
val
;
return
__atomic
32_fetch
_andn
((
int
*
)
p
,
__atomic_setup
(
p
),
mask
).
val
;
}
EXPORT_SYMBOL
(
_atomic_andn
);
EXPORT_SYMBOL
(
_atomic_
fetch_
andn
);
unsigned
long
_atomic_xor
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
)
unsigned
long
_atomic_
fetch_
xor
(
volatile
unsigned
long
*
p
,
unsigned
long
mask
)
{
return
__atomic_xor
((
int
*
)
p
,
__atomic_setup
(
p
),
mask
).
val
;
return
__atomic
32_fetch
_xor
((
int
*
)
p
,
__atomic_setup
(
p
),
mask
).
val
;
}
EXPORT_SYMBOL
(
_atomic_xor
);
EXPORT_SYMBOL
(
_atomic_
fetch_
xor
);
long
long
_atomic64_xchg
(
long
long
*
v
,
long
long
n
)
...
...
@@ -142,23 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n)
}
EXPORT_SYMBOL
(
_atomic64_cmpxchg
);
long
long
_atomic64_and
(
long
long
*
v
,
long
long
n
)
long
long
_atomic64_
fetch_
and
(
long
long
*
v
,
long
long
n
)
{
return
__atomic64_and
(
v
,
__atomic_setup
(
v
),
n
);
return
__atomic64_
fetch_
and
(
v
,
__atomic_setup
(
v
),
n
);
}
EXPORT_SYMBOL
(
_atomic64_and
);
EXPORT_SYMBOL
(
_atomic64_
fetch_
and
);
long
long
_atomic64_or
(
long
long
*
v
,
long
long
n
)
long
long
_atomic64_
fetch_
or
(
long
long
*
v
,
long
long
n
)
{
return
__atomic64_or
(
v
,
__atomic_setup
(
v
),
n
);
return
__atomic64_
fetch_
or
(
v
,
__atomic_setup
(
v
),
n
);
}
EXPORT_SYMBOL
(
_atomic64_or
);
EXPORT_SYMBOL
(
_atomic64_
fetch_
or
);
long
long
_atomic64_xor
(
long
long
*
v
,
long
long
n
)
long
long
_atomic64_
fetch_
xor
(
long
long
*
v
,
long
long
n
)
{
return
__atomic64_xor
(
v
,
__atomic_setup
(
v
),
n
);
return
__atomic64_
fetch_
xor
(
v
,
__atomic_setup
(
v
),
n
);
}
EXPORT_SYMBOL
(
_atomic64_xor
);
EXPORT_SYMBOL
(
_atomic64_
fetch_
xor
);
/*
* If any of the atomic or futex routines hit a bad address (not in
...
...
arch/tile/lib/atomic_asm_32.S
View file @
36e91aa2
...
...
@@ -172,15 +172,20 @@ STD_ENTRY_SECTION(__atomic\name, .text.atomic)
.
endif
.
endm
atomic_op
_cmpxchg
,
32
,
"seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }"
atomic_op
_xchg
,
32
,
"move r24, r2"
atomic_op
_xchg_add
,
32
,
"add r24, r22, r2"
atomic_op
_xchg_add_unless
,
32
,
\
/*
*
Use
__atomic32
prefix
to
avoid
collisions
with
GCC
builtin
__atomic
functions
.
*/
atomic_op
32
_cmpxchg
,
32
,
"seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }"
atomic_op
32
_xchg
,
32
,
"move r24, r2"
atomic_op
32
_xchg_add
,
32
,
"add r24, r22, r2"
atomic_op
32
_xchg_add_unless
,
32
,
\
"
sne
r26
,
r22
,
r2
; { bbns r26, 3f; add r24, r22, r3 }"
atomic_op
_or
,
32
,
"or r24, r22, r2"
atomic_op
_and
,
32
,
"and r24, r22, r2"
atomic_op
_andn
,
32
,
"nor r2, r2, zero; and r24, r22, r2"
atomic_op
_xor
,
32
,
"xor r24, r22, r2"
atomic_op
32
_fetch
_or
,
32
,
"or r24, r22, r2"
atomic_op
32
_fetch
_and
,
32
,
"and r24, r22, r2"
atomic_op
32
_fetch
_andn
,
32
,
"nor r2, r2, zero; and r24, r22, r2"
atomic_op
32
_fetch
_xor
,
32
,
"xor r24, r22, r2"
atomic_op
64
_cmpxchg
,
64
,
"{ seq r26, r22, r2; seq r27, r23, r3 }; \
{
bbns
r26
,
3
f
; move r24, r4 }; { bbns r27, 3f; move r25, r5 }"
...
...
@@ -192,9 +197,9 @@ atomic_op 64_xchg_add_unless, 64, \
{
bbns
r26
,
3
f
; add r24, r22, r4 }; \
{
bbns
r27
,
3
f
; add r25, r23, r5 }; \
slt_u
r26
,
r24
,
r22
; add r25, r25, r26"
atomic_op
64
_or
,
64
,
"{ or r24, r22, r2; or r25, r23, r3 }"
atomic_op
64
_and
,
64
,
"{ and r24, r22, r2; and r25, r23, r3 }"
atomic_op
64
_xor
,
64
,
"{ xor r24, r22, r2; xor r25, r23, r3 }"
atomic_op
64
_
fetch_
or
,
64
,
"{ or r24, r22, r2; or r25, r23, r3 }"
atomic_op
64
_
fetch_
and
,
64
,
"{ and r24, r22, r2; and r25, r23, r3 }"
atomic_op
64
_
fetch_
xor
,
64
,
"{ xor r24, r22, r2; xor r25, r23, r3 }"
jrp
lr
/*
happy
backtracer
*/
...
...
arch/x86/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -171,6 +171,16 @@ static __always_inline int atomic_sub_return(int i, atomic_t *v)
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
static
__always_inline
int
atomic_fetch_add
(
int
i
,
atomic_t
*
v
)
{
return
xadd
(
&
v
->
counter
,
i
);
}
static
__always_inline
int
atomic_fetch_sub
(
int
i
,
atomic_t
*
v
)
{
return
xadd
(
&
v
->
counter
,
-
i
);
}
static
__always_inline
int
atomic_cmpxchg
(
atomic_t
*
v
,
int
old
,
int
new
)
{
return
cmpxchg
(
&
v
->
counter
,
old
,
new
);
...
...
@@ -190,10 +200,29 @@ static inline void atomic_##op(int i, atomic_t *v) \
: "memory"); \
}
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int old, val = atomic_read(v); \
for (;;) { \
old = atomic_cmpxchg(v, val, val c_op i); \
if (old == val) \
break; \
val = old; \
} \
return old; \
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS
(
and
,
&
)
ATOMIC_OPS
(
or
,
|
)
ATOMIC_OPS
(
xor
,
^
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP
/**
...
...
arch/x86/include/asm/atomic64_32.h
View file @
36e91aa2
...
...
@@ -320,10 +320,29 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
c = old; \
}
ATOMIC64_OP
(
and
,
&
)
ATOMIC64_OP
(
or
,
|
)
ATOMIC64_OP
(
xor
,
^
)
#define ATOMIC64_FETCH_OP(op, c_op) \
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
{ \
long long old, c = 0; \
while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
c = old; \
return old; \
}
ATOMIC64_FETCH_OP
(
add
,
+
)
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS
(
and
,
&
)
ATOMIC64_OPS
(
or
,
|
)
ATOMIC64_OPS
(
xor
,
^
)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
#endif
/* _ASM_X86_ATOMIC64_32_H */
arch/x86/include/asm/atomic64_64.h
View file @
36e91aa2
...
...
@@ -158,6 +158,16 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
return
atomic64_add_return
(
-
i
,
v
);
}
static
inline
long
atomic64_fetch_add
(
long
i
,
atomic64_t
*
v
)
{
return
xadd
(
&
v
->
counter
,
i
);
}
static
inline
long
atomic64_fetch_sub
(
long
i
,
atomic64_t
*
v
)
{
return
xadd
(
&
v
->
counter
,
-
i
);
}
#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
...
...
@@ -229,10 +239,29 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
: "memory"); \
}
ATOMIC64_OP
(
and
)
ATOMIC64_OP
(
or
)
ATOMIC64_OP
(
xor
)
#define ATOMIC64_FETCH_OP(op, c_op) \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
long old, val = atomic64_read(v); \
for (;;) { \
old = atomic64_cmpxchg(v, val, val c_op i); \
if (old == val) \
break; \
val = old; \
} \
return old; \
}
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS
(
and
,
&
)
ATOMIC64_OPS
(
or
,
|
)
ATOMIC64_OPS
(
xor
,
^
)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
#endif
/* _ASM_X86_ATOMIC64_64_H */
arch/xtensa/include/asm/atomic.h
View file @
36e91aa2
...
...
@@ -98,6 +98,26 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
return result; \
}
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
: "memory" \
); \
\
return result; \
}
#else
/* XCHAL_HAVE_S32C1I */
#define ATOMIC_OP(op) \
...
...
@@ -138,18 +158,42 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
return vval; \
}
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned int tmp, vval; \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %3, 0\n" \
" " #op " %1, %0, %2\n" \
" s32i %1, %3, 0\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval), "=&a" (tmp) \
: "a" (i), "a" (v) \
: "a15", "memory" \
); \
\
return vval; \
}
#endif
/* XCHAL_HAVE_S32C1I */
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_
FETCH_OP(op) ATOMIC_
OP_RETURN(op)
ATOMIC_OPS
(
add
)
ATOMIC_OPS
(
sub
)
ATOMIC_OP
(
and
)
ATOMIC_OP
(
or
)
ATOMIC_OP
(
xor
)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS
(
and
)
ATOMIC_OPS
(
or
)
ATOMIC_OPS
(
xor
)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
include/asm-generic/atomic-long.h
View file @
36e91aa2
...
...
@@ -112,6 +112,40 @@ static __always_inline void atomic_long_dec(atomic_long_t *l)
ATOMIC_LONG_PFX
(
_dec
)(
v
);
}
#define ATOMIC_LONG_FETCH_OP(op, mo) \
static inline long \
atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \
}
ATOMIC_LONG_FETCH_OP
(
add
,
)
ATOMIC_LONG_FETCH_OP
(
add
,
_relaxed
)
ATOMIC_LONG_FETCH_OP
(
add
,
_acquire
)
ATOMIC_LONG_FETCH_OP
(
add
,
_release
)
ATOMIC_LONG_FETCH_OP
(
sub
,
)
ATOMIC_LONG_FETCH_OP
(
sub
,
_relaxed
)
ATOMIC_LONG_FETCH_OP
(
sub
,
_acquire
)
ATOMIC_LONG_FETCH_OP
(
sub
,
_release
)
ATOMIC_LONG_FETCH_OP
(
and
,
)
ATOMIC_LONG_FETCH_OP
(
and
,
_relaxed
)
ATOMIC_LONG_FETCH_OP
(
and
,
_acquire
)
ATOMIC_LONG_FETCH_OP
(
and
,
_release
)
ATOMIC_LONG_FETCH_OP
(
andnot
,
)
ATOMIC_LONG_FETCH_OP
(
andnot
,
_relaxed
)
ATOMIC_LONG_FETCH_OP
(
andnot
,
_acquire
)
ATOMIC_LONG_FETCH_OP
(
andnot
,
_release
)
ATOMIC_LONG_FETCH_OP
(
or
,
)
ATOMIC_LONG_FETCH_OP
(
or
,
_relaxed
)
ATOMIC_LONG_FETCH_OP
(
or
,
_acquire
)
ATOMIC_LONG_FETCH_OP
(
or
,
_release
)
ATOMIC_LONG_FETCH_OP
(
xor
,
)
ATOMIC_LONG_FETCH_OP
(
xor
,
_relaxed
)
ATOMIC_LONG_FETCH_OP
(
xor
,
_acquire
)
ATOMIC_LONG_FETCH_OP
(
xor
,
_release
)
#define ATOMIC_LONG_OP(op) \
static __always_inline void \
atomic_long_##op(long i, atomic_long_t *l) \
...
...
@@ -124,9 +158,9 @@ atomic_long_##op(long i, atomic_long_t *l) \
ATOMIC_LONG_OP
(
add
)
ATOMIC_LONG_OP
(
sub
)
ATOMIC_LONG_OP
(
and
)
ATOMIC_LONG_OP
(
andnot
)
ATOMIC_LONG_OP
(
or
)
ATOMIC_LONG_OP
(
xor
)
ATOMIC_LONG_OP
(
andnot
)
#undef ATOMIC_LONG_OP
...
...
include/asm-generic/atomic.h
View file @
36e91aa2
...
...
@@ -61,6 +61,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return c c_op i; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c; \
}
#else
#include <linux/irqflags.h>
...
...
@@ -88,6 +100,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return ret; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
\
raw_local_irq_save(flags); \
ret = v->counter; \
v->counter = v->counter c_op i; \
raw_local_irq_restore(flags); \
\
return ret; \
}
#endif
/* CONFIG_SMP */
#ifndef atomic_add_return
...
...
@@ -98,6 +124,26 @@ ATOMIC_OP_RETURN(add, +)
ATOMIC_OP_RETURN
(
sub
,
-
)
#endif
#ifndef atomic_fetch_add
ATOMIC_FETCH_OP
(
add
,
+
)
#endif
#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP
(
sub
,
-
)
#endif
#ifndef atomic_fetch_and
ATOMIC_FETCH_OP
(
and
,
&
)
#endif
#ifndef atomic_fetch_or
ATOMIC_FETCH_OP
(
or
,
|
)
#endif
#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP
(
xor
,
^
)
#endif
#ifndef atomic_and
ATOMIC_OP
(
and
,
&
)
#endif
...
...
@@ -110,6 +156,7 @@ ATOMIC_OP(or, |)
ATOMIC_OP
(
xor
,
^
)
#endif
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
...
...
include/asm-generic/atomic64.h
View file @
36e91aa2
...
...
@@ -27,16 +27,23 @@ extern void atomic64_##op(long long a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
extern long long atomic64_##op##_return(long long a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
#define ATOMIC64_FETCH_OP(op) \
extern long long atomic64_fetch_##op(long long a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
ATOMIC64_OPS
(
add
)
ATOMIC64_OPS
(
sub
)
ATOMIC64_OP
(
and
)
ATOMIC64_OP
(
or
)
ATOMIC64_OP
(
xor
)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
ATOMIC64_OPS
(
and
)
ATOMIC64_OPS
(
or
)
ATOMIC64_OPS
(
xor
)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
...
...
include/linux/atomic.h
View file @
36e91aa2
...
...
@@ -163,206 +163,201 @@
#endif
#endif
/* atomic_dec_return_relaxed */
/* atomic_xchg_relaxed */
#ifndef atomic_xchg_relaxed
#define atomic_xchg_relaxed atomic_xchg
#define atomic_xchg_acquire atomic_xchg
#define atomic_xchg_release atomic_xchg
#else
/* atomic_xchg_relaxed */
/* atomic_fetch_add_relaxed */
#ifndef atomic_fetch_add_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add
#define atomic_fetch_add_acquire atomic_fetch_add
#define atomic_fetch_add_release atomic_fetch_add
#ifndef atomic_xchg_acquire
#define atomic_xchg_acquire(...) \
__atomic_op_acquire(atomic_xchg, __VA_ARGS__)
#endif
#else
/* atomic_fetch_add_relaxed */
#ifndef atomic_
xchg_releas
e
#define
atomic_xchg_releas
e(...) \
__atomic_op_
release(atomic_xchg
, __VA_ARGS__)
#ifndef atomic_
fetch_add_acquir
e
#define
atomic_fetch_add_acquir
e(...) \
__atomic_op_
acquire(atomic_fetch_add
, __VA_ARGS__)
#endif
#ifndef atomic_
xchg
#define
atomic_xchg(...)
\
__atomic_op_
fence(atomic_xchg
, __VA_ARGS__)
#ifndef atomic_
fetch_add_release
#define
atomic_fetch_add_release(...)
\
__atomic_op_
release(atomic_fetch_add
, __VA_ARGS__)
#endif
#endif
/* atomic_xchg_relaxed */
/* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_relaxed
#define atomic_cmpxchg_relaxed atomic_cmpxchg
#define atomic_cmpxchg_acquire atomic_cmpxchg
#define atomic_cmpxchg_release atomic_cmpxchg
#else
/* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_acquire
#define atomic_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
#ifndef atomic_fetch_add
#define atomic_fetch_add(...) \
__atomic_op_fence(atomic_fetch_add, __VA_ARGS__)
#endif
#endif
/* atomic_fetch_add_relaxed */
#ifndef atomic_cmpxchg_release
#define atomic_cmpxchg_release(...) \
__atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
#endif
/* atomic_fetch_sub_relaxed */
#ifndef atomic_fetch_sub_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub
#define atomic_fetch_sub_acquire atomic_fetch_sub
#define atomic_fetch_sub_release atomic_fetch_sub
#ifndef atomic_cmpxchg
#define atomic_cmpxchg(...) \
__atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
#else
/* atomic_fetch_sub_relaxed */
#ifndef atomic_fetch_sub_acquire
#define atomic_fetch_sub_acquire(...) \
__atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__)
#endif
#endif
/* atomic_cmpxchg_relaxed */
#ifndef atomic64_read_acquire
#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
#ifndef atomic_fetch_sub_release
#define atomic_fetch_sub_release(...) \
__atomic_op_release(atomic_fetch_sub, __VA_ARGS__)
#endif
#ifndef atomic64_set_release
#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
#ifndef atomic_fetch_sub
#define atomic_fetch_sub(...) \
__atomic_op_fence(atomic_fetch_sub, __VA_ARGS__)
#endif
#endif
/* atomic_fetch_sub_relaxed */
/* atomic
64_add_return
_relaxed */
#ifndef atomic
64_add_return
_relaxed
#define
atomic64_add_return_relaxed atomic64_add_return
#define
atomic64_add_return_acquire atomic64_add_return
#define
atomic64_add_return_release atomic64_add_return
/* atomic
_fetch_or
_relaxed */
#ifndef atomic
_fetch_or
_relaxed
#define
atomic_fetch_or_relaxed atomic_fetch_or
#define
atomic_fetch_or_acquire atomic_fetch_or
#define
atomic_fetch_or_release atomic_fetch_or
#else
/* atomic
64_add_return
_relaxed */
#else
/* atomic
_fetch_or
_relaxed */
#ifndef atomic
64_add_return
_acquire
#define
atomic64_add_return_acquire(...)
\
__atomic_op_acquire(atomic
64_add_return
, __VA_ARGS__)
#ifndef atomic
_fetch_or
_acquire
#define
atomic_fetch_or_acquire(...)
\
__atomic_op_acquire(atomic
_fetch_or
, __VA_ARGS__)
#endif
#ifndef atomic
64_add_return
_release
#define
atomic64_add_return_release(...)
\
__atomic_op_release(atomic
64_add_return
, __VA_ARGS__)
#ifndef atomic
_fetch_or
_release
#define
atomic_fetch_or_release(...)
\
__atomic_op_release(atomic
_fetch_or
, __VA_ARGS__)
#endif
#ifndef atomic
64_add_return
#define
atomic64_add_return(...)
\
__atomic_op_fence(atomic
64_add_return
, __VA_ARGS__)
#ifndef atomic
_fetch_or
#define
atomic_fetch_or(...)
\
__atomic_op_fence(atomic
_fetch_or
, __VA_ARGS__)
#endif
#endif
/* atomic
64_add_return
_relaxed */
#endif
/* atomic
_fetch_or
_relaxed */
/* atomic
64_inc_return
_relaxed */
#ifndef atomic
64_inc_return
_relaxed
#define
atomic64_inc_return_relaxed atomic64_inc_return
#define
atomic64_inc_return_acquire atomic64_inc_return
#define
atomic64_inc_return_release atomic64_inc_return
/* atomic
_fetch_and
_relaxed */
#ifndef atomic
_fetch_and
_relaxed
#define
atomic_fetch_and_relaxed atomic_fetch_and
#define
atomic_fetch_and_acquire atomic_fetch_and
#define
atomic_fetch_and_release atomic_fetch_and
#else
/* atomic
64_inc_return
_relaxed */
#else
/* atomic
_fetch_and
_relaxed */
#ifndef atomic
64_inc_return
_acquire
#define
atomic64_inc_return_acquire(...)
\
__atomic_op_acquire(atomic
64_inc_return
, __VA_ARGS__)
#ifndef atomic
_fetch_and
_acquire
#define
atomic_fetch_and_acquire(...)
\
__atomic_op_acquire(atomic
_fetch_and
, __VA_ARGS__)
#endif
#ifndef atomic
64_inc_return
_release
#define
atomic64_inc_return_release(...)
\
__atomic_op_release(atomic
64_inc_return
, __VA_ARGS__)
#ifndef atomic
_fetch_and
_release
#define
atomic_fetch_and_release(...)
\
__atomic_op_release(atomic
_fetch_and
, __VA_ARGS__)
#endif
#ifndef atomic
64_inc_return
#define
atomic64_inc_return(...)
\
__atomic_op_fence(atomic
64_inc_return
, __VA_ARGS__)
#ifndef atomic
_fetch_and
#define
atomic_fetch_and(...)
\
__atomic_op_fence(atomic
_fetch_and
, __VA_ARGS__)
#endif
#endif
/* atomic64_inc_return_relaxed */
#endif
/* atomic_fetch_and_relaxed */
/* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return
#define atomic64_sub_return_acquire atomic64_sub_return
#define atomic64_sub_return_release atomic64_sub_return
#ifdef atomic_andnot
/* atomic_fetch_andnot_relaxed */
#ifndef atomic_fetch_andnot_relaxed
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
#define atomic_fetch_andnot_acquire atomic_fetch_andnot
#define atomic_fetch_andnot_release atomic_fetch_andnot
#else
/* atomic
64_sub_return
_relaxed */
#else
/* atomic
_fetch_andnot
_relaxed */
#ifndef atomic
64_sub_return
_acquire
#define
atomic64_sub_return_acquire(...)
\
__atomic_op_acquire(atomic
64_sub_return
, __VA_ARGS__)
#ifndef atomic
_fetch_andnot
_acquire
#define
atomic_fetch_andnot_acquire(...)
\
__atomic_op_acquire(atomic
_fetch_andnot
, __VA_ARGS__)
#endif
#ifndef atomic
64_sub_return
_release
#define
atomic64_sub_return_release(...)
\
__atomic_op_release(atomic
64_sub_return
, __VA_ARGS__)
#ifndef atomic
_fetch_andnot
_release
#define
atomic_fetch_andnot_release(...)
\
__atomic_op_release(atomic
_fetch_andnot
, __VA_ARGS__)
#endif
#ifndef atomic
64_sub_return
#define
atomic64_sub_return(...)
\
__atomic_op_fence(atomic
64_sub_return
, __VA_ARGS__)
#ifndef atomic
_fetch_andnot
#define
atomic_fetch_andnot(...)
\
__atomic_op_fence(atomic
_fetch_andnot
, __VA_ARGS__)
#endif
#endif
/* atomic64_sub_return_relaxed */
#endif
/* atomic_fetch_andnot_relaxed */
#endif
/* atomic_andnot */
/* atomic
64_dec_return
_relaxed */
#ifndef atomic
64_dec_return
_relaxed
#define
atomic64_dec_return_relaxed atomic64_dec_return
#define
atomic64_dec_return_acquire atomic64_dec_return
#define
atomic64_dec_return_release atomic64_dec_return
/* atomic
_fetch_xor
_relaxed */
#ifndef atomic
_fetch_xor
_relaxed
#define
atomic_fetch_xor_relaxed atomic_fetch_xor
#define
atomic_fetch_xor_acquire atomic_fetch_xor
#define
atomic_fetch_xor_release atomic_fetch_xor
#else
/* atomic
64_dec_return
_relaxed */
#else
/* atomic
_fetch_xor
_relaxed */
#ifndef atomic
64_dec_return
_acquire
#define
atomic64_dec_return_acquire(...)
\
__atomic_op_acquire(atomic
64_dec_return
, __VA_ARGS__)
#ifndef atomic
_fetch_xor
_acquire
#define
atomic_fetch_xor_acquire(...)
\
__atomic_op_acquire(atomic
_fetch_xor
, __VA_ARGS__)
#endif
#ifndef atomic
64_dec_return
_release
#define
atomic64_dec_return_release(...)
\
__atomic_op_release(atomic
64_dec_return
, __VA_ARGS__)
#ifndef atomic
_fetch_xor
_release
#define
atomic_fetch_xor_release(...)
\
__atomic_op_release(atomic
_fetch_xor
, __VA_ARGS__)
#endif
#ifndef atomic
64_dec_return
#define
atomic64_dec_return(...)
\
__atomic_op_fence(atomic
64_dec_return
, __VA_ARGS__)
#ifndef atomic
_fetch_xor
#define
atomic_fetch_xor(...)
\
__atomic_op_fence(atomic
_fetch_xor
, __VA_ARGS__)
#endif
#endif
/* atomic
64_dec_return
_relaxed */
#endif
/* atomic
_fetch_xor
_relaxed */
/* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_relaxed
#define atomic64_xchg_relaxed atomic64_xchg
#define atomic64_xchg_acquire atomic64_xchg
#define atomic64_xchg_release atomic64_xchg
#else
/* atomic64_xchg_relaxed */
/* atomic_xchg_relaxed */
#ifndef atomic_xchg_relaxed
#define atomic_xchg_relaxed atomic_xchg
#define atomic_xchg_acquire atomic_xchg
#define atomic_xchg_release atomic_xchg
#ifndef atomic64_xchg_acquire
#define atomic64_xchg_acquire(...) \
__atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
#else
/* atomic_xchg_relaxed */
#ifndef atomic_xchg_acquire
#define atomic_xchg_acquire(...) \
__atomic_op_acquire(atomic_xchg, __VA_ARGS__)
#endif
#ifndef atomic
64
_xchg_release
#define atomic
64
_xchg_release(...) \
__atomic_op_release(atomic
64
_xchg, __VA_ARGS__)
#ifndef atomic_xchg_release
#define atomic_xchg_release(...) \
__atomic_op_release(atomic_xchg, __VA_ARGS__)
#endif
#ifndef atomic
64
_xchg
#define atomic
64
_xchg(...) \
__atomic_op_fence(atomic
64
_xchg, __VA_ARGS__)
#ifndef atomic_xchg
#define atomic_xchg(...) \
__atomic_op_fence(atomic_xchg, __VA_ARGS__)
#endif
#endif
/* atomic
64
_xchg_relaxed */
#endif
/* atomic_xchg_relaxed */
/* atomic
64
_cmpxchg_relaxed */
#ifndef atomic
64
_cmpxchg_relaxed
#define atomic
64_cmpxchg_relaxed atomic64
_cmpxchg
#define atomic
64_cmpxchg_acquire atomic64
_cmpxchg
#define atomic
64_cmpxchg_release atomic64
_cmpxchg
/* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_relaxed
#define atomic
_cmpxchg_relaxed atomic
_cmpxchg
#define atomic
_cmpxchg_acquire atomic
_cmpxchg
#define atomic
_cmpxchg_release atomic
_cmpxchg
#else
/* atomic
64
_cmpxchg_relaxed */
#else
/* atomic_cmpxchg_relaxed */
#ifndef atomic
64
_cmpxchg_acquire
#define atomic
64
_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic
64
_cmpxchg, __VA_ARGS__)
#ifndef atomic_cmpxchg_acquire
#define atomic_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
#endif
#ifndef atomic
64
_cmpxchg_release
#define atomic
64
_cmpxchg_release(...) \
__atomic_op_release(atomic
64
_cmpxchg, __VA_ARGS__)
#ifndef atomic_cmpxchg_release
#define atomic_cmpxchg_release(...) \
__atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
#endif
#ifndef atomic
64
_cmpxchg
#define atomic
64
_cmpxchg(...) \
__atomic_op_fence(atomic
64
_cmpxchg, __VA_ARGS__)
#ifndef atomic_cmpxchg
#define atomic_cmpxchg(...) \
__atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
#endif
#endif
/* atomic
64
_cmpxchg_relaxed */
#endif
/* atomic_cmpxchg_relaxed */
/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
...
...
@@ -463,17 +458,27 @@ static inline void atomic_andnot(int i, atomic_t *v)
{
atomic_and
(
~
i
,
v
);
}
#endif
static
inline
__deprecated
void
atomic_clear_mask
(
unsigned
int
mask
,
atomic_t
*
v
)
static
inline
int
atomic_fetch_andnot
(
int
i
,
atomic_t
*
v
)
{
return
atomic_fetch_and
(
~
i
,
v
);
}
static
inline
int
atomic_fetch_andnot_relaxed
(
int
i
,
atomic_t
*
v
)
{
return
atomic_fetch_and_relaxed
(
~
i
,
v
);
}
static
inline
int
atomic_fetch_andnot_acquire
(
int
i
,
atomic_t
*
v
)
{
atomic_andnot
(
mask
,
v
);
return
atomic_fetch_and_acquire
(
~
i
,
v
);
}
static
inline
__deprecated
void
atomic_set_mask
(
unsigned
int
mask
,
atomic_t
*
v
)
static
inline
int
atomic_fetch_andnot_release
(
int
i
,
atomic_t
*
v
)
{
atomic_or
(
mask
,
v
);
return
atomic_fetch_and_release
(
~
i
,
v
);
}
#endif
/**
* atomic_inc_not_zero_hint - increment if not null
...
...
@@ -558,36 +563,336 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
/**
* atomic_fetch_or - perform *p |= mask and return old value of *p
* @mask: mask to OR on the atomic_t
* @p: pointer to atomic_t
*/
#ifndef atomic_fetch_or
static
inline
int
atomic_fetch_or
(
int
mask
,
atomic_t
*
p
)
{
int
old
,
val
=
atomic_read
(
p
);
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
for
(;;)
{
old
=
atomic_cmpxchg
(
p
,
val
,
val
|
mask
);
if
(
old
==
val
)
break
;
val
=
old
;
}
#ifndef atomic64_read_acquire
#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
#endif
return
old
;
}
#ifndef atomic64_set_release
#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
#endif
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
/* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_relaxed
#define atomic64_add_return_relaxed atomic64_add_return
#define atomic64_add_return_acquire atomic64_add_return
#define atomic64_add_return_release atomic64_add_return
#else
/* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_acquire
#define atomic64_add_return_acquire(...) \
__atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
#endif
#ifndef atomic64_add_return_release
#define atomic64_add_return_release(...) \
__atomic_op_release(atomic64_add_return, __VA_ARGS__)
#endif
#ifndef atomic64_add_return
#define atomic64_add_return(...) \
__atomic_op_fence(atomic64_add_return, __VA_ARGS__)
#endif
#endif
/* atomic64_add_return_relaxed */
/* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_relaxed
#define atomic64_inc_return_relaxed atomic64_inc_return
#define atomic64_inc_return_acquire atomic64_inc_return
#define atomic64_inc_return_release atomic64_inc_return
#else
/* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_acquire
#define atomic64_inc_return_acquire(...) \
__atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
#endif
#ifndef atomic64_inc_return_release
#define atomic64_inc_return_release(...) \
__atomic_op_release(atomic64_inc_return, __VA_ARGS__)
#endif
#ifndef atomic64_inc_return
#define atomic64_inc_return(...) \
__atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
#endif
#endif
/* atomic64_inc_return_relaxed */
/* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return
#define atomic64_sub_return_acquire atomic64_sub_return
#define atomic64_sub_return_release atomic64_sub_return
#else
/* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_acquire
#define atomic64_sub_return_acquire(...) \
__atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
#endif
#ifndef atomic64_sub_return_release
#define atomic64_sub_return_release(...) \
__atomic_op_release(atomic64_sub_return, __VA_ARGS__)
#endif
#ifndef atomic64_sub_return
#define atomic64_sub_return(...) \
__atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
#endif
#endif
/* atomic64_sub_return_relaxed */
/* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return
#define atomic64_dec_return_acquire atomic64_dec_return
#define atomic64_dec_return_release atomic64_dec_return
#else
/* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_acquire
#define atomic64_dec_return_acquire(...) \
__atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
#endif
#ifndef atomic64_dec_return_release
#define atomic64_dec_return_release(...) \
__atomic_op_release(atomic64_dec_return, __VA_ARGS__)
#endif
#ifndef atomic64_dec_return
#define atomic64_dec_return(...) \
__atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
#endif
#endif
/* atomic64_dec_return_relaxed */
/* atomic64_fetch_add_relaxed */
#ifndef atomic64_fetch_add_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add
#define atomic64_fetch_add_acquire atomic64_fetch_add
#define atomic64_fetch_add_release atomic64_fetch_add
#else
/* atomic64_fetch_add_relaxed */
#ifndef atomic64_fetch_add_acquire
#define atomic64_fetch_add_acquire(...) \
__atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_add_release
#define atomic64_fetch_add_release(...) \
__atomic_op_release(atomic64_fetch_add, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_add
#define atomic64_fetch_add(...) \
__atomic_op_fence(atomic64_fetch_add, __VA_ARGS__)
#endif
#endif
/* atomic64_fetch_add_relaxed */
/* atomic64_fetch_sub_relaxed */
#ifndef atomic64_fetch_sub_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
#define atomic64_fetch_sub_acquire atomic64_fetch_sub
#define atomic64_fetch_sub_release atomic64_fetch_sub
#else
/* atomic64_fetch_sub_relaxed */
#ifndef atomic64_fetch_sub_acquire
#define atomic64_fetch_sub_acquire(...) \
__atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_sub_release
#define atomic64_fetch_sub_release(...) \
__atomic_op_release(atomic64_fetch_sub, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_sub
#define atomic64_fetch_sub(...) \
__atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__)
#endif
#endif
/* atomic64_fetch_sub_relaxed */
/* atomic64_fetch_or_relaxed */
#ifndef atomic64_fetch_or_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or
#define atomic64_fetch_or_acquire atomic64_fetch_or
#define atomic64_fetch_or_release atomic64_fetch_or
#else
/* atomic64_fetch_or_relaxed */
#ifndef atomic64_fetch_or_acquire
#define atomic64_fetch_or_acquire(...) \
__atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_or_release
#define atomic64_fetch_or_release(...) \
__atomic_op_release(atomic64_fetch_or, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_or
#define atomic64_fetch_or(...) \
__atomic_op_fence(atomic64_fetch_or, __VA_ARGS__)
#endif
#endif
/* atomic64_fetch_or_relaxed */
/* atomic64_fetch_and_relaxed */
#ifndef atomic64_fetch_and_relaxed
#define atomic64_fetch_and_relaxed atomic64_fetch_and
#define atomic64_fetch_and_acquire atomic64_fetch_and
#define atomic64_fetch_and_release atomic64_fetch_and
#else
/* atomic64_fetch_and_relaxed */
#ifndef atomic64_fetch_and_acquire
#define atomic64_fetch_and_acquire(...) \
__atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_and_release
#define atomic64_fetch_and_release(...) \
__atomic_op_release(atomic64_fetch_and, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_and
#define atomic64_fetch_and(...) \
__atomic_op_fence(atomic64_fetch_and, __VA_ARGS__)
#endif
#endif
/* atomic64_fetch_and_relaxed */
#ifdef atomic64_andnot
/* atomic64_fetch_andnot_relaxed */
#ifndef atomic64_fetch_andnot_relaxed
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
#define atomic64_fetch_andnot_release atomic64_fetch_andnot
#else
/* atomic64_fetch_andnot_relaxed */
#ifndef atomic64_fetch_andnot_acquire
#define atomic64_fetch_andnot_acquire(...) \
__atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_andnot_release
#define atomic64_fetch_andnot_release(...) \
__atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_andnot
#define atomic64_fetch_andnot(...) \
__atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
#endif
#endif
/* atomic64_fetch_andnot_relaxed */
#endif
/* atomic64_andnot */
/* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
#define atomic64_fetch_xor_acquire atomic64_fetch_xor
#define atomic64_fetch_xor_release atomic64_fetch_xor
#else
/* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_acquire
#define atomic64_fetch_xor_acquire(...) \
__atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_xor_release
#define atomic64_fetch_xor_release(...) \
__atomic_op_release(atomic64_fetch_xor, __VA_ARGS__)
#endif
#ifndef atomic64_fetch_xor
#define atomic64_fetch_xor(...) \
__atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__)
#endif
#endif
/* atomic64_fetch_xor_relaxed */
/* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_relaxed
#define atomic64_xchg_relaxed atomic64_xchg
#define atomic64_xchg_acquire atomic64_xchg
#define atomic64_xchg_release atomic64_xchg
#else
/* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_acquire
#define atomic64_xchg_acquire(...) \
__atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
#endif
#ifndef atomic64_xchg_release
#define atomic64_xchg_release(...) \
__atomic_op_release(atomic64_xchg, __VA_ARGS__)
#endif
#ifndef atomic64_xchg
#define atomic64_xchg(...) \
__atomic_op_fence(atomic64_xchg, __VA_ARGS__)
#endif
#endif
/* atomic64_xchg_relaxed */
/* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_relaxed
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
#define atomic64_cmpxchg_acquire atomic64_cmpxchg
#define atomic64_cmpxchg_release atomic64_cmpxchg
#else
/* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_acquire
#define atomic64_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
#endif
#ifndef atomic64_cmpxchg_release
#define atomic64_cmpxchg_release(...) \
__atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
#endif
#ifndef atomic64_cmpxchg
#define atomic64_cmpxchg(...) \
__atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
#endif
#endif
/* atomic64_cmpxchg_relaxed */
#ifndef atomic64_andnot
static
inline
void
atomic64_andnot
(
long
long
i
,
atomic64_t
*
v
)
{
atomic64_and
(
~
i
,
v
);
}
static
inline
long
long
atomic64_fetch_andnot
(
long
long
i
,
atomic64_t
*
v
)
{
return
atomic64_fetch_and
(
~
i
,
v
);
}
static
inline
long
long
atomic64_fetch_andnot_relaxed
(
long
long
i
,
atomic64_t
*
v
)
{
return
atomic64_fetch_and_relaxed
(
~
i
,
v
);
}
static
inline
long
long
atomic64_fetch_andnot_acquire
(
long
long
i
,
atomic64_t
*
v
)
{
return
atomic64_fetch_and_acquire
(
~
i
,
v
);
}
static
inline
long
long
atomic64_fetch_andnot_release
(
long
long
i
,
atomic64_t
*
v
)
{
return
atomic64_fetch_and_release
(
~
i
,
v
);
}
#endif
#include <asm-generic/atomic-long.h>
...
...
kernel/locking/qrwlock.c
View file @
36e91aa2
...
...
@@ -93,7 +93,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
* that accesses can't leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write.
*/
cnts
=
atomic_
add_return_acquire
(
_QR_BIAS
,
&
lock
->
cnts
)
-
_QR_BIAS
;
cnts
=
atomic_
fetch_add_acquire
(
_QR_BIAS
,
&
lock
->
cnts
)
;
rspin_until_writer_unlock
(
lock
,
cnts
);
/*
...
...
kernel/locking/qspinlock_paravirt.h
View file @
36e91aa2
...
...
@@ -112,12 +112,12 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
#else
/* _Q_PENDING_BITS == 8 */
static
__always_inline
void
set_pending
(
struct
qspinlock
*
lock
)
{
atomic_
set_mask
(
_Q_PENDING_VAL
,
&
lock
->
val
);
atomic_
or
(
_Q_PENDING_VAL
,
&
lock
->
val
);
}
static
__always_inline
void
clear_pending
(
struct
qspinlock
*
lock
)
{
atomic_
clear_mask
(
_Q_PENDING_VAL
,
&
lock
->
val
);
atomic_
andnot
(
_Q_PENDING_VAL
,
&
lock
->
val
);
}
static
__always_inline
int
trylock_clear_pending
(
struct
qspinlock
*
lock
)
...
...
kernel/locking/rwsem-xadd.c
View file @
36e91aa2
...
...
@@ -153,7 +153,7 @@ __rwsem_mark_wake(struct rw_semaphore *sem,
if
(
wake_type
!=
RWSEM_WAKE_READ_OWNED
)
{
adjustment
=
RWSEM_ACTIVE_READ_BIAS
;
try_reader_grant:
oldcount
=
atomic_long_
add_return
(
adjustment
,
&
sem
->
count
)
-
adjustment
;
oldcount
=
atomic_long_
fetch_add
(
adjustment
,
&
sem
->
count
)
;
if
(
unlikely
(
oldcount
<
RWSEM_WAITING_BIAS
))
{
/*
...
...
lib/atomic64.c
View file @
36e91aa2
...
...
@@ -96,17 +96,41 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \
} \
EXPORT_SYMBOL(atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
long long atomic64_fetch_##op(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
long long val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = v->counter; \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
EXPORT_SYMBOL(atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op)
ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS
(
add
,
+=
)
ATOMIC64_OPS
(
sub
,
-=
)
ATOMIC64_OP
(
and
,
&=
)
ATOMIC64_OP
(
or
,
|=
)
ATOMIC64_OP
(
xor
,
^=
)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS
(
and
,
&=
)
ATOMIC64_OPS
(
or
,
|=
)
ATOMIC64_OPS
(
xor
,
^=
)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
...
...
lib/atomic64_test.c
View file @
36e91aa2
...
...
@@ -53,11 +53,25 @@ do { \
BUG_ON(atomic##bit##_read(&v) != r); \
} while (0)
#define TEST_FETCH(bit, op, c_op, val) \
do { \
atomic##bit##_set(&v, v0); \
r = v0; \
r c_op val; \
BUG_ON(atomic##bit##_##op(val, &v) != v0); \
BUG_ON(atomic##bit##_read(&v) != r); \
} while (0)
#define RETURN_FAMILY_TEST(bit, op, c_op, val) \
do { \
FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \
} while (0)
#define FETCH_FAMILY_TEST(bit, op, c_op, val) \
do { \
FAMILY_TEST(TEST_FETCH, bit, op, c_op, val); \
} while (0)
#define TEST_ARGS(bit, op, init, ret, expect, args...) \
do { \
atomic##bit##_set(&v, init); \
...
...
@@ -114,6 +128,16 @@ static __init void test_atomic(void)
RETURN_FAMILY_TEST
(,
sub_return
,
-=
,
onestwos
);
RETURN_FAMILY_TEST
(,
sub_return
,
-=
,
-
one
);
FETCH_FAMILY_TEST
(,
fetch_add
,
+=
,
onestwos
);
FETCH_FAMILY_TEST
(,
fetch_add
,
+=
,
-
one
);
FETCH_FAMILY_TEST
(,
fetch_sub
,
-=
,
onestwos
);
FETCH_FAMILY_TEST
(,
fetch_sub
,
-=
,
-
one
);
FETCH_FAMILY_TEST
(,
fetch_or
,
|=
,
v1
);
FETCH_FAMILY_TEST
(,
fetch_and
,
&=
,
v1
);
FETCH_FAMILY_TEST
(,
fetch_andnot
,
&=
~
,
v1
);
FETCH_FAMILY_TEST
(,
fetch_xor
,
^=
,
v1
);
INC_RETURN_FAMILY_TEST
(,
v0
);
DEC_RETURN_FAMILY_TEST
(,
v0
);
...
...
@@ -154,6 +178,16 @@ static __init void test_atomic64(void)
RETURN_FAMILY_TEST
(
64
,
sub_return
,
-=
,
onestwos
);
RETURN_FAMILY_TEST
(
64
,
sub_return
,
-=
,
-
one
);
FETCH_FAMILY_TEST
(
64
,
fetch_add
,
+=
,
onestwos
);
FETCH_FAMILY_TEST
(
64
,
fetch_add
,
+=
,
-
one
);
FETCH_FAMILY_TEST
(
64
,
fetch_sub
,
-=
,
onestwos
);
FETCH_FAMILY_TEST
(
64
,
fetch_sub
,
-=
,
-
one
);
FETCH_FAMILY_TEST
(
64
,
fetch_or
,
|=
,
v1
);
FETCH_FAMILY_TEST
(
64
,
fetch_and
,
&=
,
v1
);
FETCH_FAMILY_TEST
(
64
,
fetch_andnot
,
&=
~
,
v1
);
FETCH_FAMILY_TEST
(
64
,
fetch_xor
,
^=
,
v1
);
INIT
(
v0
);
atomic64_inc
(
&
v
);
r
+=
one
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment