atomic: Switch m68k to builtin atomics

Switch m68k to builtin atomics.

Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
This commit is contained in:
Wilco Dijkstra 2025-09-08 17:16:03 +00:00
parent 2a035debbb
commit 1c48da52bc
3 changed files with 4 additions and 249 deletions

View File

@ -20,25 +20,9 @@
/* If we have just non-atomic operations, we can as well make them wide. */
#define __HAVE_64B_ATOMICS 1
#define USE_ATOMIC_COMPILER_BUILTINS 0
#define USE_ATOMIC_COMPILER_BUILTINS 1
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
/* The only basic operation needed is compare and exchange. */
#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
({ __typeof (mem) __gmemp = (mem); \
__typeof (*mem) __gret = *__gmemp; \
__typeof (*mem) __gnewval = (newval); \
\
if (__gret == (oldval)) \
*__gmemp = __gnewval; \
__gret; })
#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
({ __typeof (mem) __gmemp = (mem); \
__typeof (*mem) __gnewval = (newval); \
\
*__gmemp == (oldval) ? (*__gmemp = __gnewval, 0) : 1; })
#endif

View File

@ -15,218 +15,8 @@
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#define __HAVE_64B_ATOMICS 1
#define USE_ATOMIC_COMPILER_BUILTINS 0
#define __HAVE_64B_ATOMICS 0
#define USE_ATOMIC_COMPILER_BUILTINS 1
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
({ __typeof (*(mem)) __ret; \
__asm __volatile ("cas%.b %0,%2,%1" \
: "=d" (__ret), "+m" (*(mem)) \
: "d" (newval), "0" (oldval)); \
__ret; })
#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
({ __typeof (*(mem)) __ret; \
__asm __volatile ("cas%.w %0,%2,%1" \
: "=d" (__ret), "+m" (*(mem)) \
: "d" (newval), "0" (oldval)); \
__ret; })
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
({ __typeof (*(mem)) __ret; \
__asm __volatile ("cas%.l %0,%2,%1" \
: "=d" (__ret), "+m" (*(mem)) \
: "d" (newval), "0" (oldval)); \
__ret; })
# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
({ __typeof (*(mem)) __ret; \
__typeof (mem) __memp = (mem); \
__asm __volatile ("cas2%.l %0:%R0,%1:%R1,(%2):(%3)" \
: "=d" (__ret) \
: "d" ((__typeof (*(mem))) (newval)), "r" (__memp), \
"r" ((char *) __memp + 4), "0" (oldval) \
: "memory"); \
__ret; })
#define atomic_exchange_acq(mem, newvalue) \
({ __typeof (*(mem)) __result = *(mem); \
if (sizeof (*(mem)) == 1) \
__asm __volatile ("1: cas%.b %0,%2,%1;" \
" jbne 1b" \
: "=d" (__result), "+m" (*(mem)) \
: "d" (newvalue), "0" (__result)); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("1: cas%.w %0,%2,%1;" \
" jbne 1b" \
: "=d" (__result), "+m" (*(mem)) \
: "d" (newvalue), "0" (__result)); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("1: cas%.l %0,%2,%1;" \
" jbne 1b" \
: "=d" (__result), "+m" (*(mem)) \
: "d" (newvalue), "0" (__result)); \
else \
{ \
__typeof (mem) __memp = (mem); \
__asm __volatile ("1: cas2%.l %0:%R0,%1:%R1,(%2):(%3);" \
" jbne 1b" \
: "=d" (__result) \
: "d" ((__typeof (*(mem))) (newvalue)), \
"r" (__memp), "r" ((char *) __memp + 4), \
"0" (__result) \
: "memory"); \
} \
__result; })
#define atomic_exchange_and_add(mem, value) \
({ __typeof (*(mem)) __result = *(mem); \
__typeof (*(mem)) __temp; \
if (sizeof (*(mem)) == 1) \
__asm __volatile ("1: move%.b %0,%2;" \
" add%.b %3,%2;" \
" cas%.b %0,%2,%1;" \
" jbne 1b" \
: "=d" (__result), "+m" (*(mem)), \
"=&d" (__temp) \
: "d" (value), "0" (__result)); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("1: move%.w %0,%2;" \
" add%.w %3,%2;" \
" cas%.w %0,%2,%1;" \
" jbne 1b" \
: "=d" (__result), "+m" (*(mem)), \
"=&d" (__temp) \
: "d" (value), "0" (__result)); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("1: move%.l %0,%2;" \
" add%.l %3,%2;" \
" cas%.l %0,%2,%1;" \
" jbne 1b" \
: "=d" (__result), "+m" (*(mem)), \
"=&d" (__temp) \
: "d" (value), "0" (__result)); \
else \
{ \
__typeof (mem) __memp = (mem); \
__asm __volatile ("1: move%.l %0,%1;" \
" move%.l %R0,%R1;" \
" add%.l %R2,%R1;" \
" addx%.l %2,%1;" \
" cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \
" jbne 1b" \
: "=d" (__result), "=&d" (__temp) \
: "d" ((__typeof (*(mem))) (value)), "r" (__memp), \
"r" ((char *) __memp + 4), "0" (__result) \
: "memory"); \
} \
__result; })
#define atomic_add(mem, value) \
(void) ({ if (sizeof (*(mem)) == 1) \
__asm __volatile ("add%.b %1,%0" \
: "+m" (*(mem)) \
: "id" (value)); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("add%.w %1,%0" \
: "+m" (*(mem)) \
: "id" (value)); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("add%.l %1,%0" \
: "+m" (*(mem)) \
: "id" (value)); \
else \
{ \
__typeof (mem) __memp = (mem); \
__typeof (*(mem)) __oldval = *__memp; \
__typeof (*(mem)) __temp; \
__asm __volatile ("1: move%.l %0,%1;" \
" move%.l %R0,%R1;" \
" add%.l %R2,%R1;" \
" addx%.l %2,%1;" \
" cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \
" jbne 1b" \
: "=d" (__oldval), "=&d" (__temp) \
: "d" ((__typeof (*(mem))) (value)), \
"r" (__memp), "r" ((char *) __memp + 4), \
"0" (__oldval) \
: "memory"); \
} \
})
#define atomic_increment_and_test(mem) \
({ char __result; \
if (sizeof (*(mem)) == 1) \
__asm __volatile ("addq%.b %#1,%1; seq %0" \
: "=dm" (__result), "+m" (*(mem))); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("addq%.w %#1,%1; seq %0" \
: "=dm" (__result), "+m" (*(mem))); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("addq%.l %#1,%1; seq %0" \
: "=dm" (__result), "+m" (*(mem))); \
else \
{ \
__typeof (mem) __memp = (mem); \
__typeof (*(mem)) __oldval = *__memp; \
__typeof (*(mem)) __temp; \
__asm __volatile ("1: move%.l %1,%2;" \
" move%.l %R1,%R2;" \
" addq%.l %#1,%R2;" \
" addx%.l %5,%2;" \
" seq %0;" \
" cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \
" jbne 1b" \
: "=&dm" (__result), "=d" (__oldval), \
"=&d" (__temp) \
: "r" (__memp), "r" ((char *) __memp + 4), \
"d" (0), "1" (__oldval) \
: "memory"); \
} \
__result; })
#define atomic_decrement_and_test(mem) \
({ char __result; \
if (sizeof (*(mem)) == 1) \
__asm __volatile ("subq%.b %#1,%1; seq %0" \
: "=dm" (__result), "+m" (*(mem))); \
else if (sizeof (*(mem)) == 2) \
__asm __volatile ("subq%.w %#1,%1; seq %0" \
: "=dm" (__result), "+m" (*(mem))); \
else if (sizeof (*(mem)) == 4) \
__asm __volatile ("subq%.l %#1,%1; seq %0" \
: "=dm" (__result), "+m" (*(mem))); \
else \
{ \
__typeof (mem) __memp = (mem); \
__typeof (*(mem)) __oldval = *__memp; \
__typeof (*(mem)) __temp; \
__asm __volatile ("1: move%.l %1,%2;" \
" move%.l %R1,%R2;" \
" subq%.l %#1,%R2;" \
" subx%.l %5,%2;" \
" seq %0;" \
" cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \
" jbne 1b" \
: "=&dm" (__result), "=d" (__oldval), \
"=&d" (__temp) \
: "r" (__memp), "r" ((char *) __memp + 4), \
"d" (0), "1" (__oldval) \
: "memory"); \
} \
__result; })
#define atomic_bit_set(mem, bit) \
__asm __volatile ("bfset %0{%1,#1}" \
: "+m" (*(mem)) \
: "di" (sizeof (*(mem)) * 8 - (bit) - 1))
#define atomic_bit_test_set(mem, bit) \
({ char __result; \
__asm __volatile ("bfset %1{%2,#1}; sne %0" \
: "=dm" (__result), "+m" (*(mem)) \
: "di" (sizeof (*(mem)) * 8 - (bit) - 1)); \
__result; })

View File

@ -24,30 +24,11 @@
kernel provides userspace atomicity operations. Use them. */
#define __HAVE_64B_ATOMICS 0
#define USE_ATOMIC_COMPILER_BUILTINS 0
#define USE_ATOMIC_COMPILER_BUILTINS 1
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
/* The only basic operation needed is compare and exchange. */
#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
({ \
/* Use temporary variables to workaround call-clobberness of \
the registers. */ \
__typeof (mem) _mem = mem; \
__typeof (oldval) _oldval = oldval; \
__typeof (newval) _newval = newval; \
register uint32_t _d0 asm ("d0") = SYS_ify (atomic_cmpxchg_32); \
register uint32_t *_a0 asm ("a0") = (uint32_t *) _mem; \
register uint32_t _d2 asm ("d2") = (uint32_t) _oldval; \
register uint32_t _d1 asm ("d1") = (uint32_t) _newval; \
\
asm ("trap #0" \
: "+d" (_d0), "+m" (*_a0) \
: "a" (_a0), "d" (_d2), "d" (_d1)); \
(__typeof (oldval)) _d0; \
})
# define atomic_full_barrier() \
(INTERNAL_SYSCALL_CALL (atomic_barrier), (void) 0)