Commit f7c34874 authored by Max Filippov's avatar Max Filippov

xtensa: add exclusive atomics support

Implement atomic primitives using exclusive access opcodes available in
the recent xtensa cores.
Since l32ex/s32ex don't have any memory ordering guarantees don't define
__smp_mb__before_atomic/__smp_mb__after_atomic to make them use memw.
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent d065fcf1
......@@ -56,7 +56,67 @@
*/
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if XCHAL_HAVE_S32C1I
#if XCHAL_HAVE_EXCLUSIVE
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
__asm__ __volatile__( \
"1: l32ex %1, %3\n" \
" " #op " %0, %1, %2\n" \
" s32ex %0, %3\n" \
" getex %0\n" \
" beqz %0, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
: "memory" \
); \
} \
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
__asm__ __volatile__( \
"1: l32ex %1, %3\n" \
" " #op " %0, %1, %2\n" \
" s32ex %0, %3\n" \
" getex %0\n" \
" beqz %0, 1b\n" \
" " #op " %0, %1, %2\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
: "memory" \
); \
\
return result; \
}
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
__asm__ __volatile__( \
"1: l32ex %1, %3\n" \
" " #op " %0, %1, %2\n" \
" s32ex %0, %3\n" \
" getex %0\n" \
" beqz %0, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
: "memory" \
); \
\
return tmp; \
}
#elif XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t * v) \
{ \
......
......@@ -9,12 +9,16 @@
#ifndef _XTENSA_SYSTEM_H
#define _XTENSA_SYSTEM_H
#include <asm/core.h>
#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() barrier()
#define wmb() mb()
#if XCHAL_HAVE_S32C1I
#define __smp_mb__before_atomic() barrier()
#define __smp_mb__after_atomic() barrier()
#endif
#include <asm-generic/barrier.h>
......
......@@ -96,7 +96,126 @@ static inline unsigned long __fls(unsigned long word)
#include <asm-generic/bitops/fls64.h>
#if XCHAL_HAVE_S32C1I
#if XCHAL_HAVE_EXCLUSIVE
static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %0, %2\n"
" or %0, %0, %1\n"
" s32ex %0, %2\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (mask), "a" (p)
: "memory");
}
static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %0, %2\n"
" and %0, %0, %1\n"
" s32ex %0, %2\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (~mask), "a" (p)
: "memory");
}
static inline void change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %0, %2\n"
" xor %0, %0, %1\n"
" s32ex %0, %2\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
: "a" (~mask), "a" (p)
: "memory");
}
static inline int
test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" or %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return value & mask;
}
static inline int
test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" and %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
return value & mask;
}
static inline int
test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32ex %1, %3\n"
" xor %0, %1, %2\n"
" s32ex %0, %3\n"
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return value & mask;
}
#elif XCHAL_HAVE_S32C1I
static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
......
......@@ -23,7 +23,24 @@
static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new)
{
#if XCHAL_HAVE_S32C1I
#if XCHAL_HAVE_EXCLUSIVE
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32ex %0, %3\n"
" bne %0, %4, 2f\n"
" mov %1, %2\n"
" s32ex %1, %3\n"
" getex %1\n"
" beqz %1, 1b\n"
"2:\n"
: "=&a" (result), "=&a" (tmp)
: "a" (new), "a" (p), "a" (old)
: "memory"
);
return result;
#elif XCHAL_HAVE_S32C1I
__asm__ __volatile__(
" wsr %2, scompare1\n"
" s32c1i %0, %1, 0\n"
......@@ -108,7 +125,22 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
#if XCHAL_HAVE_S32C1I
#if XCHAL_HAVE_EXCLUSIVE
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32ex %0, %3\n"
" mov %1, %2\n"
" s32ex %1, %3\n"
" getex %1\n"
" beqz %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (val), "a" (m)
: "memory"
);
return result;
#elif XCHAL_HAVE_S32C1I
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
......
......@@ -6,6 +6,10 @@
#include <variant/core.h>
#ifndef XCHAL_HAVE_EXCLUSIVE
#define XCHAL_HAVE_EXCLUSIVE 0
#endif
#ifndef XCHAL_SPANNING_WAY
#define XCHAL_SPANNING_WAY 0
#endif
......
......@@ -19,6 +19,31 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#if XCHAL_HAVE_EXCLUSIVE
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \
"1: l32ex %[oldval], %[addr]\n" \
insn "\n" \
"2: s32ex %[newval], %[addr]\n" \
" getex %[newval]\n" \
" beqz %[newval], 1b\n" \
" movi %[newval], 0\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 4\n" \
" .literal_position\n" \
"5: movi %[oldval], 3b\n" \
" movi %[newval], %[fault]\n" \
" jx %[oldval]\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .long 1b, 5b, 2b, 5b\n" \
" .previous\n" \
: [oldval] "=&r" (old), [newval] "=&r" (ret) \
: [addr] "r" (uaddr), [oparg] "r" (arg), \
[fault] "I" (-EFAULT) \
: "memory")
#elif XCHAL_HAVE_S32C1I
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \
"1: l32i %[oldval], %[addr], 0\n" \
......@@ -42,11 +67,12 @@
: [addr] "r" (uaddr), [oparg] "r" (arg), \
[fault] "I" (-EFAULT) \
: "memory")
#endif
static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
u32 __user *uaddr)
{
#if XCHAL_HAVE_S32C1I
#if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
int oldval = 0, ret;
pagefault_disable();
......@@ -91,7 +117,7 @@ static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
#if XCHAL_HAVE_S32C1I
#if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
unsigned long tmp;
int ret = 0;
......@@ -100,9 +126,19 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__asm__ __volatile__ (
" # futex_atomic_cmpxchg_inatomic\n"
#if XCHAL_HAVE_EXCLUSIVE
"1: l32ex %[tmp], %[addr]\n"
" s32i %[tmp], %[uval], 0\n"
" bne %[tmp], %[oldval], 2f\n"
" mov %[tmp], %[newval]\n"
"3: s32ex %[tmp], %[addr]\n"
" getex %[tmp]\n"
" beqz %[tmp], 1b\n"
#elif XCHAL_HAVE_S32C1I
" wsr %[oldval], scompare1\n"
"1: s32c1i %[newval], %[addr], 0\n"
" s32i %[newval], %[uval], 0\n"
#endif
"2:\n"
" .section .fixup,\"ax\"\n"
" .align 4\n"
......@@ -113,6 +149,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .previous\n"
" .section __ex_table,\"a\"\n"
" .long 1b, 4b\n"
#if XCHAL_HAVE_EXCLUSIVE
" .long 3b, 4b\n"
#endif
" .previous\n"
: [ret] "+r" (ret), [newval] "+r" (newval), [tmp] "=&r" (tmp)
: [addr] "r" (uaddr), [oldval] "r" (oldval), [uval] "r" (uval),
......
......@@ -650,6 +650,9 @@ c_show(struct seq_file *f, void *slot)
#endif
#if XCHAL_HAVE_S32C1I
"s32c1i "
#endif
#if XCHAL_HAVE_EXCLUSIVE
"exclusive "
#endif
"\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment