Commit 24b44a66 authored by Will Deacon's avatar Will Deacon Committed by Russell King

ARM: 5889/1: Add atomic64 routines for ARMv6k and above.

In preparation for perf-events support, ARM needs to support atomic64_t
operations. v6k and above support the ldrexd and strexd instructions to
do just that.

This patch adds atomic64 support to the ARM architecture. v6k and above
make use of new instructions whilst older cores fall back on the generic
solution using spinlocks. If and when v7-M cores are supported by Linux,
they will need to fall back on the spinlock implementation too.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent a9221de6
......@@ -12,6 +12,7 @@ config ARM
select HAVE_IDE
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_ATOMIC64 if (!CPU_32v6K)
select HAVE_OPROFILE
select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL)
......
......@@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
u64 __aligned(8) counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
static inline u64 atomic64_read(atomic64_t *v)
{
u64 result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrexd %0, %H0, [%1]"
: "=&r" (result)
: "r" (&v->counter)
);
return result;
}
static inline void atomic64_set(atomic64_t *v, u64 i)
{
u64 tmp;
__asm__ __volatile__("@ atomic64_set\n"
"1: ldrexd %0, %H0, [%1]\n"
" strexd %0, %2, %H2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
}
static inline void atomic64_add(u64 i, atomic64_t *v)
{
u64 result;
unsigned long tmp;
__asm__ __volatile__("@ atomic64_add\n"
"1: ldrexd %0, %H0, [%2]\n"
" adds %0, %0, %3\n"
" adc %H0, %H0, %H3\n"
" strexd %1, %0, %H0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
}
static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
{
u64 result;
unsigned long tmp;
smp_mb();
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%2]\n"
" adds %0, %0, %3\n"
" adc %H0, %H0, %H3\n"
" strexd %1, %0, %H0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
smp_mb();
return result;
}
static inline void atomic64_sub(u64 i, atomic64_t *v)
{
u64 result;
unsigned long tmp;
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%2]\n"
" subs %0, %0, %3\n"
" sbc %H0, %H0, %H3\n"
" strexd %1, %0, %H0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
}
static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
{
u64 result;
unsigned long tmp;
smp_mb();
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%2]\n"
" subs %0, %0, %3\n"
" sbc %H0, %H0, %H3\n"
" strexd %1, %0, %H0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
smp_mb();
return result;
}
static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
{
u64 oldval;
unsigned long res;
smp_mb();
do {
__asm__ __volatile__("@ atomic64_cmpxchg\n"
"ldrexd %1, %H1, [%2]\n"
"mov %0, #0\n"
"teq %1, %3\n"
"teqeq %H1, %H3\n"
"strexdeq %0, %4, %H4, [%2]"
: "=&r" (res), "=&r" (oldval)
: "r" (&ptr->counter), "r" (old), "r" (new)
: "cc");
} while (res);
smp_mb();
return oldval;
}
static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
{
u64 result;
unsigned long tmp;
smp_mb();
__asm__ __volatile__("@ atomic64_xchg\n"
"1: ldrexd %0, %H0, [%2]\n"
" strexd %1, %3, %H3, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&ptr->counter), "r" (new)
: "cc");
smp_mb();
return result;
}
static inline u64 atomic64_dec_if_positive(atomic64_t *v)
{
u64 result;
unsigned long tmp;
smp_mb();
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%2]\n"
" subs %0, %0, #1\n"
" sbc %H0, %H0, #0\n"
" teq %H0, #0\n"
" bmi 2f\n"
" strexd %1, %0, %H0, [%2]\n"
" teq %1, #0\n"
" bne 1b\n"
"2:"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter)
: "cc");
smp_mb();
return result;
}
static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
{
u64 val;
unsigned long tmp;
int ret = 1;
smp_mb();
__asm__ __volatile__("@ atomic64_add_unless\n"
"1: ldrexd %0, %H0, [%3]\n"
" teq %0, %4\n"
" teqeq %H0, %H4\n"
" moveq %1, #0\n"
" beq 2f\n"
" adds %0, %0, %5\n"
" adc %H0, %H0, %H5\n"
" strexd %2, %0, %H0, [%3]\n"
" teq %2, #0\n"
" bne 1b\n"
"2:"
: "=&r" (val), "=&r" (ret), "=&r" (tmp)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
if (ret)
smp_mb();
return ret;
}
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec(v) atomic64_sub(1LL, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
#else /* !CONFIG_GENERIC_ATOMIC64 */
#include <asm-generic/atomic64.h>
#endif
#include <asm-generic/atomic-long.h>
#endif
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment