Commit 90612373 authored by Leonardo Bras's avatar Leonardo Bras Committed by Palmer Dabbelt

riscv/atomic.h : Deduplicate arch_atomic.*

Some functions use mostly the same asm for 32-bit and 64-bit versions.

Make a macro that is generic enough and avoid code duplication.

(This did not cause any change in generated asm)
Signed-off-by: default avatarLeonardo Bras <leobras@redhat.com>
Reviewed-by: default avatarGuo Ren <guoren@kernel.org>
Reviewed-by: default avatarAndrea Parri <parri.andrea@gmail.com>
Tested-by: default avatarGuo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20240103163203.72768-5-leobras@redhat.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 07a0a41c
...@@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i) ...@@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_FETCH_OP #undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
({ \
__asm__ __volatile__ ( \
"0: lr." sfx " %[p], %[c]\n" \
" beq %[p], %[u], 1f\n" \
" add %[rc], %[p], %[a]\n" \
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
" bnez %[rc], 0b\n" \
" fence rw, rw\n" \
"1:\n" \
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
: [a]"r" (_a), [u]"r" (_u) \
: "memory"); \
})
/* This is required to provide a full barrier on success. */ /* This is required to provide a full barrier on success. */
static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{ {
int prev, rc; int prev, rc;
__asm__ __volatile__ ( _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
"0: lr.w %[p], %[c]\n"
" beq %[p], %[u], 1f\n"
" add %[rc], %[p], %[a]\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
: "memory");
return prev; return prev;
} }
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
...@@ -222,77 +228,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, ...@@ -222,77 +228,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
s64 prev; s64 prev;
long rc; long rc;
__asm__ __volatile__ ( _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
"0: lr.d %[p], %[c]\n"
" beq %[p], %[u], 1f\n"
" add %[rc], %[p], %[a]\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
: "memory");
return prev; return prev;
} }
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif #endif
#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
({ \
__asm__ __volatile__ ( \
"0: lr." sfx " %[p], %[c]\n" \
" bltz %[p], 1f\n" \
" addi %[rc], %[p], 1\n" \
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
" bnez %[rc], 0b\n" \
" fence rw, rw\n" \
"1:\n" \
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
: \
: "memory"); \
})
static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
{ {
int prev, rc; int prev, rc;
__asm__ __volatile__ ( _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
"0: lr.w %[p], %[c]\n"
" bltz %[p], 1f\n"
" addi %[rc], %[p], 1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
: "memory");
return !(prev < 0); return !(prev < 0);
} }
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
({ \
__asm__ __volatile__ ( \
"0: lr." sfx " %[p], %[c]\n" \
" bgtz %[p], 1f\n" \
" addi %[rc], %[p], -1\n" \
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
" bnez %[rc], 0b\n" \
" fence rw, rw\n" \
"1:\n" \
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
: \
: "memory"); \
})
static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v) static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
{ {
int prev, rc; int prev, rc;
__asm__ __volatile__ ( _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
"0: lr.w %[p], %[c]\n"
" bgtz %[p], 1f\n"
" addi %[rc], %[p], -1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
: "memory");
return !(prev > 0); return !(prev > 0);
} }
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
({ \
__asm__ __volatile__ ( \
"0: lr." sfx " %[p], %[c]\n" \
" addi %[rc], %[p], -1\n" \
" bltz %[rc], 1f\n" \
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
" bnez %[rc], 0b\n" \
" fence rw, rw\n" \
"1:\n" \
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
: \
: "memory"); \
})
static __always_inline int arch_atomic_dec_if_positive(atomic_t *v) static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
{ {
int prev, rc; int prev, rc;
__asm__ __volatile__ ( _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
"0: lr.w %[p], %[c]\n"
" addi %[rc], %[p], -1\n"
" bltz %[rc], 1f\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
: "memory");
return prev - 1; return prev - 1;
} }
...@@ -304,17 +319,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v) ...@@ -304,17 +319,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
s64 prev; s64 prev;
long rc; long rc;
__asm__ __volatile__ ( _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
"0: lr.d %[p], %[c]\n"
" bltz %[p], 1f\n"
" addi %[rc], %[p], 1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
: "memory");
return !(prev < 0); return !(prev < 0);
} }
...@@ -325,17 +331,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v) ...@@ -325,17 +331,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
s64 prev; s64 prev;
long rc; long rc;
__asm__ __volatile__ ( _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
"0: lr.d %[p], %[c]\n"
" bgtz %[p], 1f\n"
" addi %[rc], %[p], -1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
: "memory");
return !(prev > 0); return !(prev > 0);
} }
...@@ -346,17 +343,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) ...@@ -346,17 +343,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
s64 prev; s64 prev;
long rc; long rc;
__asm__ __volatile__ ( _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
"0: lr.d %[p], %[c]\n"
" addi %[rc], %[p], -1\n"
" bltz %[rc], 1f\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
: "memory");
return prev - 1; return prev - 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment