summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2023-06-05 08:01:17 +0100
committerPeter Zijlstra <peterz@infradead.org>2023-06-05 09:57:21 +0200
commit9257959a6e5b4fca6fc8e985790bff62c2046f20 (patch)
tree1b12bc04830f3339277701a6406e555db6163128
parent1815da1718aa4c062b94cf3fc09432f552e25768 (diff)
downloadlinux-9257959a6e5b4fca6fc8e985790bff62c2046f20.tar.gz
linux-9257959a6e5b4fca6fc8e985790bff62c2046f20.tar.bz2
linux-9257959a6e5b4fca6fc8e985790bff62c2046f20.zip
locking/atomic: scripts: restructure fallback ifdeffery
Currently the various ordering variants of an atomic operation are defined in groups of full/acquire/release/relaxed ordering variants with some shared ifdeffery and several potential definitions of each ordering variant in different branches of the shared ifdeffery. As an ordering variant can have several potential definitions down different branches of the shared ifdeffery, it can be painful for a human to find a relevant definition, and we don't have a good location to place anything common to all definitions of an ordering variant (e.g. kerneldoc). Historically the grouping of full/acquire/release/relaxed ordering variants was necessary as we filled in the missing atomics in the same namespace as the architecture used. It would be easy to accidentally define one ordering fallback in terms of another ordering fallback with redundant barriers, and avoiding that would otherwise require a lot of baroque ifdeffery. With recent changes we no longer need to fill in the missing atomics in the arch_atomic*_<op>() namespace, and only need to fill in the raw_atomic*_<op>() namespace. Due to this, there's no risk of a namespace collision, and we can define each raw_atomic*_<op> ordering variant with its own ifdeffery checking for the arch_atomic*_<op> ordering variants. Restructure the fallbacks in this way, with each ordering variant having its own ifdeffery of the form: | #if defined(arch_atomic_fetch_andnot_acquire) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire | #elif defined(arch_atomic_fetch_andnot_relaxed) | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | } | #elif defined(arch_atomic_fetch_andnot) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot | #else | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | return raw_atomic_fetch_and_acquire(~i, v); | } | #endif Note that where there's no relevant arch_atomic*_<op>() ordering variant, we'll define the operation in terms of a distinct raw_atomic*_<otherop>(), as this itself might have been filled in with a fallback. As we now generate the raw_atomic*_<op>() implementations directly, we no longer need the trivial wrappers, so they are removed. This makes the ifdeffery easier to follow, and will allow for further improvements in subsequent patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-21-mark.rutland@arm.com
-rw-r--r--include/linux/atomic.h1
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h3166
-rw-r--r--include/linux/atomic/atomic-raw.h1135
-rwxr-xr-xscripts/atomic/fallbacks/acquire2
-rwxr-xr-xscripts/atomic/fallbacks/add_negative4
-rwxr-xr-xscripts/atomic/fallbacks/add_unless4
-rwxr-xr-xscripts/atomic/fallbacks/andnot4
-rw-r--r--scripts/atomic/fallbacks/cmpxchg4
-rwxr-xr-xscripts/atomic/fallbacks/dec4
-rwxr-xr-xscripts/atomic/fallbacks/dec_and_test4
-rwxr-xr-xscripts/atomic/fallbacks/dec_if_positive6
-rwxr-xr-xscripts/atomic/fallbacks/dec_unless_positive6
-rwxr-xr-xscripts/atomic/fallbacks/fence2
-rwxr-xr-xscripts/atomic/fallbacks/fetch_add_unless6
-rwxr-xr-xscripts/atomic/fallbacks/inc4
-rwxr-xr-xscripts/atomic/fallbacks/inc_and_test4
-rwxr-xr-xscripts/atomic/fallbacks/inc_not_zero4
-rwxr-xr-xscripts/atomic/fallbacks/inc_unless_negative6
-rwxr-xr-xscripts/atomic/fallbacks/read_acquire4
-rwxr-xr-xscripts/atomic/fallbacks/release2
-rwxr-xr-xscripts/atomic/fallbacks/set_release4
-rwxr-xr-xscripts/atomic/fallbacks/sub_and_test4
-rwxr-xr-xscripts/atomic/fallbacks/try_cmpxchg4
-rw-r--r--scripts/atomic/fallbacks/xchg4
-rwxr-xr-xscripts/atomic/gen-atomic-fallback.sh236
-rw-r--r--scripts/atomic/gen-atomic-raw.sh80
-rwxr-xr-xscripts/atomic/gen-atomics.sh1
27 files changed, 1860 insertions, 2845 deletions
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 296cfae0389f..8dd57c3a99e9 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -78,7 +78,6 @@
})
#include <linux/atomic/atomic-arch-fallback.h>
-#include <linux/atomic/atomic-raw.h>
#include <linux/atomic/atomic-long.h>
#include <linux/atomic/atomic-instrumented.h>
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 1a2d81dbc2e4..99bc1a871dc1 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -8,2749 +8,2911 @@
#include <linux/compiler.h>
-#ifndef arch_xchg_relaxed
-#define arch_xchg_acquire arch_xchg
-#define arch_xchg_release arch_xchg
-#define arch_xchg_relaxed arch_xchg
-#else /* arch_xchg_relaxed */
-
-#ifndef arch_xchg_acquire
-#define arch_xchg_acquire(...) \
- __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#if defined(arch_xchg)
+#define raw_xchg arch_xchg
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg(...) \
+ __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#else
+extern void raw_xchg_not_implemented(void);
+#define raw_xchg(...) raw_xchg_not_implemented()
#endif
-#ifndef arch_xchg_release
-#define arch_xchg_release(...) \
- __atomic_op_release(arch_xchg, __VA_ARGS__)
+#if defined(arch_xchg_acquire)
+#define raw_xchg_acquire arch_xchg_acquire
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg_acquire(...) \
+ __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#elif defined(arch_xchg)
+#define raw_xchg_acquire arch_xchg
+#else
+extern void raw_xchg_acquire_not_implemented(void);
+#define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
#endif
-#ifndef arch_xchg
-#define arch_xchg(...) \
- __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#if defined(arch_xchg_release)
+#define raw_xchg_release arch_xchg_release
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg_release(...) \
+ __atomic_op_release(arch_xchg, __VA_ARGS__)
+#elif defined(arch_xchg)
+#define raw_xchg_release arch_xchg
+#else
+extern void raw_xchg_release_not_implemented(void);
+#define raw_xchg_release(...) raw_xchg_release_not_implemented()
+#endif
+
+#if defined(arch_xchg_relaxed)
+#define raw_xchg_relaxed arch_xchg_relaxed
+#elif defined(arch_xchg)
+#define raw_xchg_relaxed arch_xchg
+#else
+extern void raw_xchg_relaxed_not_implemented(void);
+#define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg)
+#define raw_cmpxchg arch_cmpxchg
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg(...) \
+ __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#else
+extern void raw_cmpxchg_not_implemented(void);
+#define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
#endif
-#endif /* arch_xchg_relaxed */
-
-#ifndef arch_cmpxchg_relaxed
-#define arch_cmpxchg_acquire arch_cmpxchg
-#define arch_cmpxchg_release arch_cmpxchg
-#define arch_cmpxchg_relaxed arch_cmpxchg
-#else /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg_acquire
-#define arch_cmpxchg_acquire(...) \
+#if defined(arch_cmpxchg_acquire)
+#define raw_cmpxchg_acquire arch_cmpxchg_acquire
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_acquire(...) \
__atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_acquire arch_cmpxchg
+#else
+extern void raw_cmpxchg_acquire_not_implemented(void);
+#define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
#endif
-#ifndef arch_cmpxchg_release
-#define arch_cmpxchg_release(...) \
+#if defined(arch_cmpxchg_release)
+#define raw_cmpxchg_release arch_cmpxchg_release
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_release(...) \
__atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_release arch_cmpxchg
+#else
+extern void raw_cmpxchg_release_not_implemented(void);
+#define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_relaxed arch_cmpxchg
+#else
+extern void raw_cmpxchg_relaxed_not_implemented(void);
+#define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64)
+#define raw_cmpxchg64 arch_cmpxchg64
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64(...) \
+ __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#else
+extern void raw_cmpxchg64_not_implemented(void);
+#define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
#endif
-#ifndef arch_cmpxchg
-#define arch_cmpxchg(...) \
- __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg64_relaxed
-#define arch_cmpxchg64_acquire arch_cmpxchg64
-#define arch_cmpxchg64_release arch_cmpxchg64
-#define arch_cmpxchg64_relaxed arch_cmpxchg64
-#else /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_cmpxchg64_acquire
-#define arch_cmpxchg64_acquire(...) \
+#if defined(arch_cmpxchg64_acquire)
+#define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_acquire(...) \
__atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_acquire arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_acquire_not_implemented(void);
+#define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
#endif
-#ifndef arch_cmpxchg64_release
-#define arch_cmpxchg64_release(...) \
+#if defined(arch_cmpxchg64_release)
+#define raw_cmpxchg64_release arch_cmpxchg64_release
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_release(...) \
__atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_release arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_release_not_implemented(void);
+#define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_relaxed arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_relaxed_not_implemented(void);
+#define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128)
+#define raw_cmpxchg128 arch_cmpxchg128
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128(...) \
+ __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
+#else
+extern void raw_cmpxchg128_not_implemented(void);
+#define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
#endif
-#ifndef arch_cmpxchg64
-#define arch_cmpxchg64(...) \
- __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_cmpxchg128_relaxed
-#define arch_cmpxchg128_acquire arch_cmpxchg128
-#define arch_cmpxchg128_release arch_cmpxchg128
-#define arch_cmpxchg128_relaxed arch_cmpxchg128
-#else /* arch_cmpxchg128_relaxed */
-
-#ifndef arch_cmpxchg128_acquire
-#define arch_cmpxchg128_acquire(...) \
+#if defined(arch_cmpxchg128_acquire)
+#define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_acquire(...) \
__atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_acquire arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_acquire_not_implemented(void);
+#define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
#endif
-#ifndef arch_cmpxchg128_release
-#define arch_cmpxchg128_release(...) \
+#if defined(arch_cmpxchg128_release)
+#define raw_cmpxchg128_release arch_cmpxchg128_release
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_release(...) \
__atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg128
-#define arch_cmpxchg128(...) \
- __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg128_relaxed */
-
-#ifndef arch_try_cmpxchg_relaxed
-#ifdef arch_try_cmpxchg
-#define arch_try_cmpxchg_acquire arch_try_cmpxchg
-#define arch_try_cmpxchg_release arch_try_cmpxchg
-#define arch_try_cmpxchg_relaxed arch_try_cmpxchg
-#endif /* arch_try_cmpxchg */
-
-#ifndef arch_try_cmpxchg
-#define arch_try_cmpxchg(_ptr, _oldp, _new) \
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_release arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_release_not_implemented(void);
+#define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_relaxed arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_relaxed_not_implemented(void);
+#define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
+#endif
+
+#if defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg arch_try_cmpxchg
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg(...) \
+ __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg */
+#endif
-#ifndef arch_try_cmpxchg_acquire
-#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg_acquire)
+#define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_acquire arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg_acquire */
+#endif
-#ifndef arch_try_cmpxchg_release
-#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg_release)
+#define raw_try_cmpxchg_release arch_try_cmpxchg_release
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_release(...) \
+ __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_release arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg_release */
+#endif
-#ifndef arch_try_cmpxchg_relaxed
-#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_relaxed arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg_relaxed */
-
-#else /* arch_try_cmpxchg_relaxed */
-
-#ifndef arch_try_cmpxchg_acquire
-#define arch_try_cmpxchg_acquire(...) \
- __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
#endif
-#ifndef arch_try_cmpxchg_release
-#define arch_try_cmpxchg_release(...) \
- __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_try_cmpxchg
-#define arch_try_cmpxchg(...) \
- __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_try_cmpxchg_relaxed */
-
-#ifndef arch_try_cmpxchg64_relaxed
-#ifdef arch_try_cmpxchg64
-#define arch_try_cmpxchg64_acquire arch_try_cmpxchg64
-#define arch_try_cmpxchg64_release arch_try_cmpxchg64
-#define arch_try_cmpxchg64_relaxed arch_try_cmpxchg64
-#endif /* arch_try_cmpxchg64 */
-
-#ifndef arch_try_cmpxchg64
-#define arch_try_cmpxchg64(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64 arch_try_cmpxchg64
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64(...) \
+ __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg64(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg64((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg64 */
+#endif
-#ifndef arch_try_cmpxchg64_acquire
-#define arch_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg64_acquire)
+#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg64_acquire((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg64_acquire */
+#endif
-#ifndef arch_try_cmpxchg64_release
-#define arch_try_cmpxchg64_release(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg64_release)
+#define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_release(...) \
+ __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_release arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg64_release((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg64_release */
+#endif
-#ifndef arch_try_cmpxchg64_relaxed
-#define arch_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg64_relaxed */
-
-#else /* arch_try_cmpxchg64_relaxed */
-
-#ifndef arch_try_cmpxchg64_acquire
-#define arch_try_cmpxchg64_acquire(...) \
- __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef arch_try_cmpxchg64_release
-#define arch_try_cmpxchg64_release(...) \
- __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef arch_try_cmpxchg64
-#define arch_try_cmpxchg64(...) \
- __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
#endif
-#endif /* arch_try_cmpxchg64_relaxed */
-
-#ifndef arch_try_cmpxchg128_relaxed
-#ifdef arch_try_cmpxchg128
-#define arch_try_cmpxchg128_acquire arch_try_cmpxchg128
-#define arch_try_cmpxchg128_release arch_try_cmpxchg128
-#define arch_try_cmpxchg128_relaxed arch_try_cmpxchg128
-#endif /* arch_try_cmpxchg128 */
-
-#ifndef arch_try_cmpxchg128
-#define arch_try_cmpxchg128(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128 arch_try_cmpxchg128
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128(...) \
+ __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg128(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg128((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg128 */
+#endif
-#ifndef arch_try_cmpxchg128_acquire
-#define arch_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg128_acquire)
+#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg128_acquire((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg128_acquire */
+#endif
-#ifndef arch_try_cmpxchg128_release
-#define arch_try_cmpxchg128_release(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg128_release)
+#define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_release(...) \
+ __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_release arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg128_release((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg128_release */
+#endif
-#ifndef arch_try_cmpxchg128_relaxed
-#define arch_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg128_relaxed */
-
-#else /* arch_try_cmpxchg128_relaxed */
-
-#ifndef arch_try_cmpxchg128_acquire
-#define arch_try_cmpxchg128_acquire(...) \
- __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
#endif
-#ifndef arch_try_cmpxchg128_release
-#define arch_try_cmpxchg128_release(...) \
- __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
-#endif
+#define raw_cmpxchg_local arch_cmpxchg_local
-#ifndef arch_try_cmpxchg128
-#define arch_try_cmpxchg128(...) \
- __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
+#ifdef arch_try_cmpxchg_local
+#define raw_try_cmpxchg_local arch_try_cmpxchg_local
+#else
+#define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
#endif
-#endif /* arch_try_cmpxchg128_relaxed */
+#define raw_cmpxchg64_local arch_cmpxchg64_local
-#ifndef arch_try_cmpxchg_local
-#define arch_try_cmpxchg_local(_ptr, _oldp, _new) \
+#ifdef arch_try_cmpxchg64_local
+#define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
+#else
+#define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_local((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg_local */
+#endif
+
+#define raw_cmpxchg128_local arch_cmpxchg128_local
-#ifndef arch_try_cmpxchg64_local
-#define arch_try_cmpxchg64_local(_ptr, _oldp, _new) \
+#ifdef arch_try_cmpxchg128_local
+#define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
+#else
+#define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg64_local((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg64_local */
+#endif
+
+#define raw_sync_cmpxchg arch_sync_cmpxchg
+
+#define raw_atomic_read arch_atomic_read
-#ifndef arch_atomic_read_acquire
+#if defined(arch_atomic_read_acquire)
+#define raw_atomic_read_acquire arch_atomic_read_acquire
+#elif defined(arch_atomic_read)
+#define raw_atomic_read_acquire arch_atomic_read
+#else
static __always_inline int
-arch_atomic_read_acquire(const atomic_t *v)
+raw_atomic_read_acquire(const atomic_t *v)
{
int ret;
if (__native_word(atomic_t)) {
ret = smp_load_acquire(&(v)->counter);
} else {
- ret = arch_atomic_read(v);
+ ret = raw_atomic_read(v);
__atomic_acquire_fence();
}
return ret;
}
-#define arch_atomic_read_acquire arch_atomic_read_acquire
#endif
-#ifndef arch_atomic_set_release
+#define raw_atomic_set arch_atomic_set
+
+#if defined(arch_atomic_set_release)
+#define raw_atomic_set_release arch_atomic_set_release
+#elif defined(arch_atomic_set)
+#define raw_atomic_set_release arch_atomic_set
+#else
static __always_inline void
-arch_atomic_set_release(atomic_t *v, int i)
+raw_atomic_set_release(atomic_t *v, int i)
{
if (__native_word(atomic_t)) {
smp_store_release(&(v)->counter, i);
} else {
__atomic_release_fence();
- arch_atomic_set(v, i);
+ raw_atomic_set(v, i);
}
}
-#define arch_atomic_set_release arch_atomic_set_release
#endif
-#ifndef arch_atomic_add_return_relaxed
-#define arch_atomic_add_return_acquire arch_atomic_add_return
-#define arch_atomic_add_return_release arch_atomic_add_return
-#define arch_atomic_add_return_relaxed arch_atomic_add_return
-#else /* arch_atomic_add_return_relaxed */
+#define raw_atomic_add arch_atomic_add
+
+#if defined(arch_atomic_add_return)
+#define raw_atomic_add_return arch_atomic_add_return
+#elif defined(arch_atomic_add_return_relaxed)
+static __always_inline int
+raw_atomic_add_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#else
+#error "Unable to define raw_atomic_add_return"
+#endif
-#ifndef arch_atomic_add_return_acquire
+#if defined(arch_atomic_add_return_acquire)
+#define raw_atomic_add_return_acquire arch_atomic_add_return_acquire
+#elif defined(arch_atomic_add_return_relaxed)
static __always_inline int
-arch_atomic_add_return_acquire(int i, atomic_t *v)
+raw_atomic_add_return_acquire(int i, atomic_t *v)
{
int ret = arch_atomic_add_return_relaxed(i, v);
__atomic_acquire_fence();
return ret;
}
-#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
+#elif defined(arch_atomic_add_return)
+#define raw_atomic_add_return_acquire arch_atomic_add_return
+#else
+#error "Unable to define raw_atomic_add_return_acquire"
#endif
-#ifndef arch_atomic_add_return_release
+#if defined(arch_atomic_add_return_release)
+#define raw_atomic_add_return_release arch_atomic_add_return_release
+#elif defined(arch_atomic_add_return_relaxed)
static __always_inline int
-arch_atomic_add_return_release(int i, atomic_t *v)
+raw_atomic_add_return_release(int i, atomic_t *v)
{
__atomic_release_fence();
return arch_atomic_add_return_relaxed(i, v);
}
-#define arch_atomic_add_return_release arch_atomic_add_return_release
+#elif defined(arch_atomic_add_return)
+#define raw_atomic_add_return_release arch_atomic_add_return
+#else
+#error "Unable to define raw_atomic_add_return_release"
+#endif
+
+#if defined(arch_atomic_add_return_relaxed)
+#define raw_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#elif defined(arch_atomic_add_return)
+#define raw_atomic_add_return_relaxed arch_atomic_add_return
+#else
+#error "Unable to define raw_atomic_add_return_relaxed"
#endif
-#ifndef arch_atomic_add_return
+#if defined(arch_atomic_fetch_add)
+#define raw_atomic_fetch_add arch_atomic_fetch_add
+#elif defined(arch_atomic_fetch_add_relaxed)
static __always_inline int
-arch_atomic_add_return(int i, atomic_t *v)
+raw_atomic_fetch_add(int i, atomic_t *v)
{
int ret;
__atomic_pre_full_fence();
- ret = arch_atomic_add_return_relaxed(i, v);
+ ret = arch_atomic_fetch_add_relaxed(i, v);
__atomic_post_full_fence();
return ret;
}
-#define arch_atomic_add_return arch_atomic_add_return
+#else
+#error "Unable to define raw_atomic_fetch_add"
#endif
-#endif /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_fetch_add_relaxed
-#define arch_atomic_fetch_add_acquire arch_atomic_fetc