mirror of git://sourceware.org/git/glibc.git
atomic: Remove atomic_forced_read
Remove the odd atomic_forced_read which is neither atomic nor forced. Some uses are completely redundant, so simply remove them. In other cases the intended use is to force a memory ordering, so use acquire load for those. In yet other cases their purpose is unclear, for example __nscd_cache_search appears to allow concurrent accesses to the cache while it is being garbage collected by another thread! Use relaxed atomic loads here to block spills from accidentally reloading memory that is being changed. Passes regress on AArch64, OK for commit?
This commit is contained in:
parent
9da624a183
commit
adbd3ba137
|
|
@ -342,12 +342,12 @@ do_lookup_x (const char *undef_name, unsigned int new_hash,
|
||||||
const struct r_found_version *const version, int flags,
|
const struct r_found_version *const version, int flags,
|
||||||
struct link_map *skip, int type_class, struct link_map *undef_map)
|
struct link_map *skip, int type_class, struct link_map *undef_map)
|
||||||
{
|
{
|
||||||
size_t n = scope->r_nlist;
|
/* Make sure we read r_nlist before r_list, or otherwise we
|
||||||
/* Make sure we read the value before proceeding. Otherwise we
|
|
||||||
might use r_list pointing to the initial scope and r_nlist being
|
might use r_list pointing to the initial scope and r_nlist being
|
||||||
the value after a resize. That is the only path in dl-open.c not
|
the value after a resize. That is the only path in dl-open.c not
|
||||||
protected by GSCOPE. A read barrier here might be to expensive. */
|
protected by GSCOPE. This works if all updates also use a store-
|
||||||
__asm volatile ("" : "+r" (n), "+m" (scope->r_list));
|
release or release barrier. */
|
||||||
|
size_t n = atomic_load_acquire (&scope->r_nlist);
|
||||||
struct link_map **list = scope->r_list;
|
struct link_map **list = scope->r_list;
|
||||||
|
|
||||||
do
|
do
|
||||||
|
|
@ -541,15 +541,13 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
|
||||||
if (is_nodelete (map, flags))
|
if (is_nodelete (map, flags))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
struct link_map_reldeps *l_reldeps
|
|
||||||
= atomic_forced_read (undef_map->l_reldeps);
|
|
||||||
|
|
||||||
/* Make sure l_reldeps is read before l_initfini. */
|
/* Make sure l_reldeps is read before l_initfini. */
|
||||||
atomic_read_barrier ();
|
struct link_map_reldeps *l_reldeps
|
||||||
|
= atomic_load_acquire (&undef_map->l_reldeps);
|
||||||
|
|
||||||
/* Determine whether UNDEF_MAP already has a reference to MAP. First
|
/* Determine whether UNDEF_MAP already has a reference to MAP. First
|
||||||
look in the normal dependencies. */
|
look in the normal dependencies. */
|
||||||
struct link_map **l_initfini = atomic_forced_read (undef_map->l_initfini);
|
struct link_map **l_initfini = undef_map->l_initfini;
|
||||||
if (l_initfini != NULL)
|
if (l_initfini != NULL)
|
||||||
{
|
{
|
||||||
for (i = 0; l_initfini[i] != NULL; ++i)
|
for (i = 0; l_initfini[i] != NULL; ++i)
|
||||||
|
|
@ -583,7 +581,7 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
|
||||||
it can e.g. point to unallocated memory. So avoid the optimizer
|
it can e.g. point to unallocated memory. So avoid the optimizer
|
||||||
treating the above read from MAP->l_serial as ensurance it
|
treating the above read from MAP->l_serial as ensurance it
|
||||||
can safely dereference it. */
|
can safely dereference it. */
|
||||||
map = atomic_forced_read (map);
|
__asm ("" : "=r" (map) : "0" (map));
|
||||||
|
|
||||||
/* From this point on it is unsafe to dereference MAP, until it
|
/* From this point on it is unsafe to dereference MAP, until it
|
||||||
has been found in one of the lists. */
|
has been found in one of the lists. */
|
||||||
|
|
|
||||||
|
|
@ -117,11 +117,6 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#ifndef atomic_forced_read
|
|
||||||
# define atomic_forced_read(x) \
|
|
||||||
({ __typeof (x) __x; __asm ("" : "=r" (__x) : "0" (x)); __x; })
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* This is equal to 1 iff the architecture supports 64b atomic operations. */
|
/* This is equal to 1 iff the architecture supports 64b atomic operations. */
|
||||||
#ifndef __HAVE_64B_ATOMICS
|
#ifndef __HAVE_64B_ATOMICS
|
||||||
#error Unable to determine if 64-bit atomics are present.
|
#error Unable to determine if 64-bit atomics are present.
|
||||||
|
|
|
||||||
|
|
@ -155,7 +155,7 @@ static size_t pagesize;
|
||||||
static void *
|
static void *
|
||||||
__debug_malloc (size_t bytes)
|
__debug_malloc (size_t bytes)
|
||||||
{
|
{
|
||||||
void *(*hook) (size_t, const void *) = atomic_forced_read (__malloc_hook);
|
void *(*hook) (size_t, const void *) = __malloc_hook;
|
||||||
if (__glibc_unlikely (hook != NULL))
|
if (__glibc_unlikely (hook != NULL))
|
||||||
return (*hook)(bytes, RETURN_ADDRESS (0));
|
return (*hook)(bytes, RETURN_ADDRESS (0));
|
||||||
|
|
||||||
|
|
@ -179,7 +179,7 @@ strong_alias (__debug_malloc, malloc)
|
||||||
static void
|
static void
|
||||||
__debug_free (void *mem)
|
__debug_free (void *mem)
|
||||||
{
|
{
|
||||||
void (*hook) (void *, const void *) = atomic_forced_read (__free_hook);
|
void (*hook) (void *, const void *) = __free_hook;
|
||||||
if (__glibc_unlikely (hook != NULL))
|
if (__glibc_unlikely (hook != NULL))
|
||||||
{
|
{
|
||||||
(*hook)(mem, RETURN_ADDRESS (0));
|
(*hook)(mem, RETURN_ADDRESS (0));
|
||||||
|
|
@ -201,8 +201,7 @@ strong_alias (__debug_free, free)
|
||||||
static void *
|
static void *
|
||||||
__debug_realloc (void *oldmem, size_t bytes)
|
__debug_realloc (void *oldmem, size_t bytes)
|
||||||
{
|
{
|
||||||
void *(*hook) (void *, size_t, const void *) =
|
void *(*hook) (void *, size_t, const void *) = __realloc_hook;
|
||||||
atomic_forced_read (__realloc_hook);
|
|
||||||
if (__glibc_unlikely (hook != NULL))
|
if (__glibc_unlikely (hook != NULL))
|
||||||
return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
|
return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
|
||||||
|
|
||||||
|
|
@ -230,8 +229,7 @@ strong_alias (__debug_realloc, realloc)
|
||||||
static void *
|
static void *
|
||||||
_debug_mid_memalign (size_t alignment, size_t bytes, const void *address)
|
_debug_mid_memalign (size_t alignment, size_t bytes, const void *address)
|
||||||
{
|
{
|
||||||
void *(*hook) (size_t, size_t, const void *) =
|
void *(*hook) (size_t, size_t, const void *) = __memalign_hook;
|
||||||
atomic_forced_read (__memalign_hook);
|
|
||||||
if (__glibc_unlikely (hook != NULL))
|
if (__glibc_unlikely (hook != NULL))
|
||||||
return (*hook)(alignment, bytes, address);
|
return (*hook)(alignment, bytes, address);
|
||||||
|
|
||||||
|
|
@ -330,7 +328,7 @@ __debug_calloc (size_t nmemb, size_t size)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *(*hook) (size_t, const void *) = atomic_forced_read (__malloc_hook);
|
void *(*hook) (size_t, const void *) = __malloc_hook;
|
||||||
if (__glibc_unlikely (hook != NULL))
|
if (__glibc_unlikely (hook != NULL))
|
||||||
{
|
{
|
||||||
void *mem = (*hook)(bytes, RETURN_ADDRESS (0));
|
void *mem = (*hook)(bytes, RETURN_ADDRESS (0));
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ __pthread_sigqueue (pthread_t threadid, int signo, const union sigval value)
|
||||||
/* Force load of pd->tid into local variable or register. Otherwise
|
/* Force load of pd->tid into local variable or register. Otherwise
|
||||||
if a thread exits between ESRCH test and tgkill, we might return
|
if a thread exits between ESRCH test and tgkill, we might return
|
||||||
EINVAL, because pd->tid would be cleared by the kernel. */
|
EINVAL, because pd->tid would be cleared by the kernel. */
|
||||||
pid_t tid = atomic_forced_read (pd->tid);
|
pid_t tid = atomic_load_relaxed (&pd->tid);
|
||||||
if (__glibc_unlikely (tid <= 0))
|
if (__glibc_unlikely (tid <= 0))
|
||||||
/* Not a valid thread handle. */
|
/* Not a valid thread handle. */
|
||||||
return ESRCH;
|
return ESRCH;
|
||||||
|
|
|
||||||
|
|
@ -454,7 +454,6 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
|
||||||
size_t datasize = mapped->datasize;
|
size_t datasize = mapped->datasize;
|
||||||
|
|
||||||
ref_t trail = mapped->head->array[hash];
|
ref_t trail = mapped->head->array[hash];
|
||||||
trail = atomic_forced_read (trail);
|
|
||||||
ref_t work = trail;
|
ref_t work = trail;
|
||||||
size_t loop_cnt = datasize / (MINIMUM_HASHENTRY_SIZE
|
size_t loop_cnt = datasize / (MINIMUM_HASHENTRY_SIZE
|
||||||
+ offsetof (struct datahead, data) / 2);
|
+ offsetof (struct datahead, data) / 2);
|
||||||
|
|
@ -468,17 +467,18 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
|
||||||
/* Although during garbage collection when moving struct hashentry
|
/* Although during garbage collection when moving struct hashentry
|
||||||
records around we first copy from old to new location and then
|
records around we first copy from old to new location and then
|
||||||
adjust pointer from previous hashentry to it, there is no barrier
|
adjust pointer from previous hashentry to it, there is no barrier
|
||||||
between those memory writes. It is very unlikely to hit it,
|
between those memory writes!!! This is extremely risky on any
|
||||||
so check alignment only if a misaligned load can crash the
|
modern CPU which can reorder memory accesses very aggressively.
|
||||||
application. */
|
Check alignment, both as a partial consistency check and to avoid
|
||||||
|
crashes on targets which require atomic loads to be aligned. */
|
||||||
if ((uintptr_t) here & (__alignof__ (*here) - 1))
|
if ((uintptr_t) here & (__alignof__ (*here) - 1))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (type == here->type
|
if (type == here->type
|
||||||
&& keylen == here->len
|
&& keylen == here->len
|
||||||
&& (here_key = atomic_forced_read (here->key)) + keylen <= datasize
|
&& (here_key = atomic_load_relaxed (&here->key)) + keylen <= datasize
|
||||||
&& memcmp (key, mapped->data + here_key, keylen) == 0
|
&& memcmp (key, mapped->data + here_key, keylen) == 0
|
||||||
&& ((here_packet = atomic_forced_read (here->packet))
|
&& ((here_packet = atomic_load_relaxed (&here->packet))
|
||||||
+ sizeof (struct datahead) <= datasize))
|
+ sizeof (struct datahead) <= datasize))
|
||||||
{
|
{
|
||||||
/* We found the entry. Increment the appropriate counter. */
|
/* We found the entry. Increment the appropriate counter. */
|
||||||
|
|
@ -497,7 +497,7 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
|
||||||
return dh;
|
return dh;
|
||||||
}
|
}
|
||||||
|
|
||||||
work = atomic_forced_read (here->next);
|
work = atomic_load_relaxed (&here->next);
|
||||||
/* Prevent endless loops. This should never happen but perhaps
|
/* Prevent endless loops. This should never happen but perhaps
|
||||||
the database got corrupted, accidentally or deliberately. */
|
the database got corrupted, accidentally or deliberately. */
|
||||||
if (work == trail || loop_cnt-- == 0)
|
if (work == trail || loop_cnt-- == 0)
|
||||||
|
|
@ -514,7 +514,7 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
|
||||||
if (trail + MINIMUM_HASHENTRY_SIZE > datasize)
|
if (trail + MINIMUM_HASHENTRY_SIZE > datasize)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
trail = atomic_forced_read (trailelem->next);
|
trail = atomic_load_relaxed (&trailelem->next);
|
||||||
}
|
}
|
||||||
tick = 1 - tick;
|
tick = 1 - tick;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue