atomic: Switch atomic.h to builtin atomics

Switch to standard builtin atomics by removing the defines for
!USE_ATOMIC_COMPILER_BUILTINS.

Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
This commit is contained in:
Wilco Dijkstra 2025-09-09 10:36:45 +00:00
parent b568af853b
commit c7b7c80b39
1 changed files with 0 additions and 165 deletions

View File

@ -563,10 +563,6 @@
C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's
atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */
/* Each arch can request to use compiler built-ins for C11 atomics. If it
does, all atomics will be based on these. */
#if USE_ATOMIC_COMPILER_BUILTINS
/* We require 32b atomic operations; some archs also support 64b atomic
operations. */
void __atomic_link_error (void);
@ -693,167 +689,6 @@ void __atomic_link_error (void);
({ __atomic_check_size((mem)); \
__atomic_fetch_xor ((mem), (operand), __ATOMIC_RELEASE); })
#else /* !USE_ATOMIC_COMPILER_BUILTINS */
/* By default, we assume that read, write, and full barriers are equivalent
to acquire, release, and seq_cst barriers. Archs for which this does not
hold have to provide custom definitions of the fences. */
# ifndef atomic_thread_fence_acquire
# define atomic_thread_fence_acquire() atomic_read_barrier ()
# endif
# ifndef atomic_thread_fence_release
# define atomic_thread_fence_release() atomic_write_barrier ()
# endif
# ifndef atomic_thread_fence_seq_cst
# define atomic_thread_fence_seq_cst() atomic_full_barrier ()
# endif
# ifndef atomic_load_relaxed
# define atomic_load_relaxed(mem) \
({ __typeof ((__typeof (*(mem))) *(mem)) __atg100_val; \
__asm ("" : "=r" (__atg100_val) : "0" (*(mem))); \
__atg100_val; })
# endif
# ifndef atomic_load_acquire
# define atomic_load_acquire(mem) \
({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem); \
atomic_thread_fence_acquire (); \
__atg101_val; })
# endif
# ifndef atomic_store_relaxed
/* XXX Use inline asm here? */
# define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0)
# endif
# ifndef atomic_store_release
# define atomic_store_release(mem, val) \
do { \
atomic_thread_fence_release (); \
atomic_store_relaxed ((mem), (val)); \
} while (0)
# endif
/* On failure, this CAS has memory_order_relaxed semantics. */
/* XXX This potentially has one branch more than necessary, but archs
currently do not define a CAS that returns both the previous value and
the success flag. */
# ifndef atomic_compare_exchange_weak_acquire
# define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
({ typeof (*(expected)) __atg102_expected = *(expected); \
*(expected) = \
atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected)); \
*(expected) == __atg102_expected; })
# endif
# ifndef atomic_compare_exchange_weak_relaxed
/* XXX Fall back to CAS with acquire MO because archs do not define a weaker
CAS. */
# define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
atomic_compare_exchange_weak_acquire ((mem), (expected), (desired))
# endif
# ifndef atomic_compare_exchange_weak_release
# define atomic_compare_exchange_weak_release(mem, expected, desired) \
({ typeof (*(expected)) __atg103_expected = *(expected); \
*(expected) = \
atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected)); \
*(expected) == __atg103_expected; })
# endif
/* XXX Fall back to acquire MO because archs do not define a weaker
atomic_exchange. */
# ifndef atomic_exchange_relaxed
# define atomic_exchange_relaxed(mem, val) \
atomic_exchange_acq ((mem), (val))
# endif
# ifndef atomic_exchange_acquire
# define atomic_exchange_acquire(mem, val) \
atomic_exchange_acq ((mem), (val))
# endif
# ifndef atomic_exchange_release
# define atomic_exchange_release(mem, val) \
atomic_exchange_rel ((mem), (val))
# endif
# ifndef atomic_fetch_add_acquire
# define atomic_fetch_add_acquire(mem, operand) \
atomic_exchange_and_add_acq ((mem), (operand))
# endif
# ifndef atomic_fetch_add_relaxed
/* XXX Fall back to acquire MO because the MO semantics of
atomic_exchange_and_add are not documented; the generic version falls back
to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined,
and vice versa. */
# define atomic_fetch_add_relaxed(mem, operand) \
atomic_fetch_add_acquire ((mem), (operand))
# endif
# ifndef atomic_fetch_add_release
# define atomic_fetch_add_release(mem, operand) \
atomic_exchange_and_add_rel ((mem), (operand))
# endif
# ifndef atomic_fetch_add_acq_rel
# define atomic_fetch_add_acq_rel(mem, operand) \
({ atomic_thread_fence_release (); \
atomic_exchange_and_add_acq ((mem), (operand)); })
# endif
/* XXX Fall back to acquire MO because archs do not define a weaker
atomic_and_val. */
# ifndef atomic_fetch_and_relaxed
# define atomic_fetch_and_relaxed(mem, operand) \
atomic_fetch_and_acquire ((mem), (operand))
# endif
/* XXX The default for atomic_and_val has acquire semantics, but this is not
documented. */
# ifndef atomic_fetch_and_acquire
# define atomic_fetch_and_acquire(mem, operand) \
atomic_and_val ((mem), (operand))
# endif
# ifndef atomic_fetch_and_release
/* XXX This unnecessarily has acquire MO. */
# define atomic_fetch_and_release(mem, operand) \
({ atomic_thread_fence_release (); \
atomic_and_val ((mem), (operand)); })
# endif
/* XXX The default for atomic_or_val has acquire semantics, but this is not
documented. */
# ifndef atomic_fetch_or_acquire
# define atomic_fetch_or_acquire(mem, operand) \
atomic_or_val ((mem), (operand))
# endif
/* XXX Fall back to acquire MO because archs do not define a weaker
atomic_or_val. */
# ifndef atomic_fetch_or_relaxed
# define atomic_fetch_or_relaxed(mem, operand) \
atomic_fetch_or_acquire ((mem), (operand))
# endif
/* XXX Contains an unnecessary acquire MO because archs do not define a weaker
atomic_or_val. */
# ifndef atomic_fetch_or_release
# define atomic_fetch_or_release(mem, operand) \
({ atomic_thread_fence_release (); \
atomic_fetch_or_acquire ((mem), (operand)); })
# endif
# ifndef atomic_fetch_xor_release
/* Failing the atomic_compare_exchange_weak_release reloads the value in
__atg104_expected, so we need only do the XOR again and retry. */
# define atomic_fetch_xor_release(mem, operand) \
({ __typeof (mem) __atg104_memp = (mem); \
__typeof (*(mem)) __atg104_expected = (*__atg104_memp); \
__typeof (*(mem)) __atg104_desired; \
__typeof (*(mem)) __atg104_op = (operand); \
\
do \
__atg104_desired = __atg104_expected ^ __atg104_op; \
while (__glibc_unlikely \
(atomic_compare_exchange_weak_release ( \
__atg104_memp, &__atg104_expected, __atg104_desired) \
== 0)); \
__atg104_expected; })
#endif
#endif /* !USE_ATOMIC_COMPILER_BUILTINS */
/* This operation does not affect synchronization semantics but can be used
in the body of a spin loop to potentially improve its efficiency. */
#ifndef atomic_spin_nop