Commit 0aa7be05 authored by Uros Bizjak's avatar Uros Bizjak Committed by Peter Zijlstra

locking/atomic: Add generic try_cmpxchg64 support

Add generic support for try_cmpxchg64{,_acquire,_release,_relaxed}
and their falbacks involving cmpxchg64.
Signed-off-by: default avatarUros Bizjak <ubizjak@gmail.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220515184205.103089-2-ubizjak@gmail.com
parent 68290613
...@@ -147,6 +147,76 @@ ...@@ -147,6 +147,76 @@
#endif /* arch_try_cmpxchg_relaxed */ #endif /* arch_try_cmpxchg_relaxed */
#ifndef arch_try_cmpxchg64_relaxed
#ifdef arch_try_cmpxchg64
#define arch_try_cmpxchg64_acquire arch_try_cmpxchg64
#define arch_try_cmpxchg64_release arch_try_cmpxchg64
#define arch_try_cmpxchg64_relaxed arch_try_cmpxchg64
#endif /* arch_try_cmpxchg64 */
#ifndef arch_try_cmpxchg64
#define arch_try_cmpxchg64(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
___r = arch_cmpxchg64((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
#endif /* arch_try_cmpxchg64 */
#ifndef arch_try_cmpxchg64_acquire
#define arch_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
___r = arch_cmpxchg64_acquire((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
#endif /* arch_try_cmpxchg64_acquire */
#ifndef arch_try_cmpxchg64_release
#define arch_try_cmpxchg64_release(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
___r = arch_cmpxchg64_release((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
#endif /* arch_try_cmpxchg64_release */
#ifndef arch_try_cmpxchg64_relaxed
#define arch_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
___r = arch_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
#endif /* arch_try_cmpxchg64_relaxed */
#else /* arch_try_cmpxchg64_relaxed */
#ifndef arch_try_cmpxchg64_acquire
#define arch_try_cmpxchg64_acquire(...) \
__atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
#endif
#ifndef arch_try_cmpxchg64_release
#define arch_try_cmpxchg64_release(...) \
__atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
#endif
#ifndef arch_try_cmpxchg64
#define arch_try_cmpxchg64(...) \
__atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
#endif
#endif /* arch_try_cmpxchg64_relaxed */
#ifndef arch_atomic_read_acquire #ifndef arch_atomic_read_acquire
static __always_inline int static __always_inline int
arch_atomic_read_acquire(const atomic_t *v) arch_atomic_read_acquire(const atomic_t *v)
...@@ -2386,4 +2456,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v) ...@@ -2386,4 +2456,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v)
#endif #endif
#endif /* _LINUX_ATOMIC_FALLBACK_H */ #endif /* _LINUX_ATOMIC_FALLBACK_H */
// 8e2cc06bc0d2c0967d2f8424762bd48555ee40ae // b5e87bdd5ede61470c29f7a7e4de781af3770f09
...@@ -2006,6 +2006,44 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2006,6 +2006,44 @@ atomic_long_dec_if_positive(atomic_long_t *v)
arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
}) })
#define try_cmpxchg64(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
kcsan_mb(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_acquire(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_release(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
kcsan_release(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_relaxed(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define cmpxchg_local(ptr, ...) \ #define cmpxchg_local(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
...@@ -2045,4 +2083,4 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2045,4 +2083,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
}) })
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
// 87c974b93032afd42143613434d1a7788fa598f9 // 764f741eb77a7ad565dc8d99ce2837d5542e8aee
...@@ -164,41 +164,44 @@ gen_xchg_fallbacks() ...@@ -164,41 +164,44 @@ gen_xchg_fallbacks()
gen_try_cmpxchg_fallback() gen_try_cmpxchg_fallback()
{ {
local cmpxchg="$1"; shift;
local order="$1"; shift; local order="$1"; shift;
cat <<EOF cat <<EOF
#ifndef arch_try_cmpxchg${order} #ifndef arch_try_${cmpxchg}${order}
#define arch_try_cmpxchg${order}(_ptr, _oldp, _new) \\ #define arch_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\
({ \\ ({ \\
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\
___r = arch_cmpxchg${order}((_ptr), ___o, (_new)); \\ ___r = arch_${cmpxchg}${order}((_ptr), ___o, (_new)); \\
if (unlikely(___r != ___o)) \\ if (unlikely(___r != ___o)) \\
*___op = ___r; \\ *___op = ___r; \\
likely(___r == ___o); \\ likely(___r == ___o); \\
}) })
#endif /* arch_try_cmpxchg${order} */ #endif /* arch_try_${cmpxchg}${order} */
EOF EOF
} }
gen_try_cmpxchg_fallbacks() gen_try_cmpxchg_fallbacks()
{ {
printf "#ifndef arch_try_cmpxchg_relaxed\n" local cmpxchg="$1"; shift;
printf "#ifdef arch_try_cmpxchg\n"
gen_basic_fallbacks "arch_try_cmpxchg" printf "#ifndef arch_try_${cmpxchg}_relaxed\n"
printf "#ifdef arch_try_${cmpxchg}\n"
printf "#endif /* arch_try_cmpxchg */\n\n" gen_basic_fallbacks "arch_try_${cmpxchg}"
printf "#endif /* arch_try_${cmpxchg} */\n\n"
for order in "" "_acquire" "_release" "_relaxed"; do for order in "" "_acquire" "_release" "_relaxed"; do
gen_try_cmpxchg_fallback "${order}" gen_try_cmpxchg_fallback "${cmpxchg}" "${order}"
done done
printf "#else /* arch_try_cmpxchg_relaxed */\n" printf "#else /* arch_try_${cmpxchg}_relaxed */\n"
gen_order_fallbacks "arch_try_cmpxchg" gen_order_fallbacks "arch_try_${cmpxchg}"
printf "#endif /* arch_try_cmpxchg_relaxed */\n\n" printf "#endif /* arch_try_${cmpxchg}_relaxed */\n\n"
} }
cat << EOF cat << EOF
...@@ -218,7 +221,9 @@ for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do ...@@ -218,7 +221,9 @@ for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do
gen_xchg_fallbacks "${xchg}" gen_xchg_fallbacks "${xchg}"
done done
gen_try_cmpxchg_fallbacks for cmpxchg in "cmpxchg" "cmpxchg64"; do
gen_try_cmpxchg_fallbacks "${cmpxchg}"
done
grep '^[a-z]' "$1" | while read name meta args; do grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic" "int" ${args} gen_proto "${meta}" "${name}" "atomic" "int" ${args}
......
...@@ -166,7 +166,7 @@ grep '^[a-z]' "$1" | while read name meta args; do ...@@ -166,7 +166,7 @@ grep '^[a-z]' "$1" | while read name meta args; do
done done
for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg" "try_cmpxchg64"; do
for order in "" "_acquire" "_release" "_relaxed"; do for order in "" "_acquire" "_release" "_relaxed"; do
gen_xchg "${xchg}" "${order}" "" gen_xchg "${xchg}" "${order}" ""
printf "\n" printf "\n"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment